From d3f11de75662a7a2e8b621fe86614012a3f6d7b0 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Mon, 21 May 2018 16:43:24 +0000 Subject: [PATCH 001/909] Added initial environment file for ecaccess.ecmwf.int (ECMWF); added case for this machine in env.sh . --- env.sh | 3 +++ env/ecaccess.ecmwf.int/shell | 1 + 2 files changed, 4 insertions(+) create mode 100644 env/ecaccess.ecmwf.int/shell diff --git a/env.sh b/env.sh index ea29a7eba..a15583711 100755 --- a/env.sh +++ b/env.sh @@ -35,6 +35,9 @@ elif [[ $LOGINHOST =~ ^b[A-Za-z0-9]+\.hsn\.hlrn\.de$ ]]; then STRATEGY="blogin.hlrn.de" elif [[ $LOGINHOST =~ \.hww\.de$ ]] || [[ $LOGINHOST =~ ^nid[0-9]{5}$ ]]; then STRATEGY="hazelhen.hww.de" +elif [[ $LOGINHOST =~ ^cc[a-b]+-login[0-9]+\.ecmwf\.int$ ]]; then + STRATEGY="ecaccess.ecmwf.int" + echo $STRATEGY else echo "can not determine environment for host: "$LOGINHOST [ $BEING_EXECUTED = true ] && exit 1 diff --git a/env/ecaccess.ecmwf.int/shell b/env/ecaccess.ecmwf.int/shell new file mode 100644 index 000000000..736ac32b0 --- /dev/null +++ b/env/ecaccess.ecmwf.int/shell @@ -0,0 +1 @@ +module load cmake/3.5.1 From e4e90f7e074b1593b91cece1dd253bb1a639cc70 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Wed, 23 May 2018 07:51:31 +0000 Subject: [PATCH 002/909] Removed comma after write(...,...) statement, which prevented the cray compiler on the ECMWF machine to compile fesom2. --- src/io_blowup.F90 | 2 +- src/io_meandata.F90 | 2 +- src/io_restart.F90 | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/io_blowup.F90 b/src/io_blowup.F90 index 3a2bdfc33..214933ea4 100644 --- a/src/io_blowup.F90 +++ b/src/io_blowup.F90 @@ -204,7 +204,7 @@ subroutine create_new_file(id) att_text='time' id%error_status(c) = nf_put_att_text(id%ncid, id%tID, 'long_name', len_trim(att_text), trim(att_text)); c=c+1 - write(att_text, '(a14,I4.4,a1,I2.2,a1,I2.2,a6)'), 'seconds since ', yearold, '-', 1, '-', 1, ' 0:0:0' + write(att_text, '(a14,I4.4,a1,I2.2,a1,I2.2,a6)') 'seconds since ', yearold, '-', 1, '-', 1, ' 0:0:0' id%error_status(c) = nf_put_att_text(id%ncid, id%tID, 'units', len_trim(att_text), trim(att_text)); c=c+1 att_text='iteration_count' diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 21ced45ca..72871edcb 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -190,7 +190,7 @@ subroutine create_new_file(entry) att_text='time' entry%error_status(c) = nf_put_att_text(entry%ncid, entry%tID, 'long_name', len_trim(att_text), trim(att_text)); c=c+1 - write(att_text, '(a14,I4.4,a1,I2.2,a1,I2.2,a6)'), 'seconds since ', yearold, '-', 1, '-', 1, ' 0:0:0' + write(att_text, '(a14,I4.4,a1,I2.2,a1,I2.2,a6)') 'seconds since ', yearold, '-', 1, '-', 1, ' 0:0:0' entry%error_status(c) = nf_put_att_text(entry%ncid, entry%tID, 'units', len_trim(att_text), trim(att_text)); c=c+1 entry%error_status(c) = nf_def_var(entry%ncid, trim(entry%name), NF_DOUBLE, entry%ndim+1, & diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 9d1c77836..2426e94e3 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -275,7 +275,7 @@ subroutine create_new_file(id) att_text='time' id%error_status(c) = nf_put_att_text(id%ncid, id%tID, 'long_name', len_trim(att_text), trim(att_text)); c=c+1 - write(att_text, '(a14,I4.4,a1,I2.2,a1,I2.2,a6)'), 'seconds since ', yearold, '-', 1, '-', 1, ' 0:0:0' + write(att_text, '(a14,I4.4,a1,I2.2,a1,I2.2,a6)') 'seconds since ', yearold, '-', 1, '-', 1, ' 0:0:0' id%error_status(c) = nf_put_att_text(id%ncid, id%tID, 'units', len_trim(att_text), trim(att_text)); c=c+1 att_text='iteration_count' From eda69fd656e9a0bdc559e2f6c230b1f8f3993337 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Wed, 23 May 2018 07:59:01 +0000 Subject: [PATCH 003/909] interface for exchange_nod expects type WP, cray compiler on ECMWF stopped compilation here. --- src/oce_dyn.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 9274a05e3..c6790c4e3 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -1021,7 +1021,7 @@ SUBROUTINE h_viscosity_leith real(kind=WP) :: dz, div_elem(3), xe, ye, vi integer :: elem, nl1, nz, elnodes(3),n,k, nt real(kind=WP) :: leithx, leithy -real, allocatable :: aux(:,:) +real(kind=WP), allocatable :: aux(:,:) ! if(mom_adv<4) call relative_vorticity !!! vorticity array should be allocated ! Fill in viscosity: From d9faeff4387e256f21efc0e63049f235ca7558ab Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Wed, 30 May 2018 09:42:25 +0000 Subject: [PATCH 004/909] =?UTF-8?q?changed=20fileID=20from=2010+mype=20to?= =?UTF-8?q?=20103+mype=20since=20the=20program=20will=20crash=20on=20runti?= =?UTF-8?q?me=20at=20the=20ECMWF=20computer.=20The=20unit=20number=20must?= =?UTF-8?q?=20be=20outside=20the=20range=20of=201=C3=9F00=20through=20102.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/oce_local.F90 | 8 ++++---- src/oce_mesh.F90 | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/oce_local.F90 b/src/oce_local.F90 index f78780c5e..7035af9dd 100755 --- a/src/oce_local.F90 +++ b/src/oce_local.F90 @@ -107,7 +107,7 @@ SUBROUTINE save_dist_mesh if(mype==0) then file_name=trim(dist_mesh_dir)//'rpart.out' - fileID=10+mype + fileID=103+mype !skip unit range 100--102 open(fileID, file=file_name) ncount=0; DO n=1, nod2D @@ -121,7 +121,7 @@ SUBROUTINE save_dist_mesh file_name=trim(dist_mesh_dir)//'my_list'//trim(mype_string)//'.out' - fileID=10+mype + fileID=103+mype !skip unit range 100--102 ! ============================= ! lists of owned nodes and elements ! ============================= @@ -147,7 +147,7 @@ SUBROUTINE save_dist_mesh ! ========================= call com_global2local file_name=trim(dist_mesh_dir)//'com_info'//trim(mype_string)//'.out' - fileID=10+mype + fileID=103+mype !skip unit range 100--102 open(fileID, file=file_name) write(fileID,*) mype write(fileID,*) com_nod2D%rPEnum @@ -200,7 +200,7 @@ SUBROUTINE save_dist_mesh ! ================================ if(mype==0) then file_name=trim(dist_mesh_dir)//'rpart.out' - fileID=10+mype + fileID=103+mype !skip unit range 100--102 open(fileID, file=file_name) ncount=0 DO n=1, nod2D diff --git a/src/oce_mesh.F90 b/src/oce_mesh.F90 index 3a2f9f31d..c587c4141 100755 --- a/src/oce_mesh.F90 +++ b/src/oce_mesh.F90 @@ -102,7 +102,7 @@ SUBROUTINE read_mesh !=========================== file_name=trim(dist_mesh_dir)//'my_list'//trim(mype_string)//'.out' - fileID=10+mype + fileID=103+mype !skip unit range 100--102 open(fileID, file=trim(file_name)) read(fileID,*) n @@ -331,7 +331,7 @@ SUBROUTINE read_mesh ! every proc reads its file ! ============================== file_name=trim(dist_mesh_dir)//'com_info'//trim(mype_string)//'.out' - fileID=10+mype + fileID=103+mype !skip unit range 100--102 open(fileID, file=file_name) read(fileID,*) n read(fileID,*) com_nod2D%rPEnum From a9ea4bfc131f905ec1d7b01e074c733aea573dd7 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Wed, 30 May 2018 09:50:00 +0000 Subject: [PATCH 005/909] Add initial runscript for ECMWF that already works. --- work/job_ecmwf | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100755 work/job_ecmwf diff --git a/work/job_ecmwf b/work/job_ecmwf new file mode 100755 index 000000000..48368abb8 --- /dev/null +++ b/work/job_ecmwf @@ -0,0 +1,43 @@ +#!/bin/bash +#PBS -S /usr/bin/ksh +#PBS -N fesom2-test +#PBS -q np +#PBS -l EC_total_tasks=288 + +# optionally, specifiy that no OpenMP is used +#PBS -l EC_threads_per_task=1 + +#PBS -l EC_hyperthreading=1 +#PBS -l EC_user_defined_priority=99 +#PBS -l walltime=00:17:00 +##PBS -l walltime=00:57:00 + +##PBS -j oe #join out and err +#PBD -n +#PBS -o /scratch/rd/natr/run/pbs.out +#PBS -e /scratch/rd/natr/run/pbs.err + +#PBS -m abe +#PBS -M thomas.rackow@awi.de + +#queue suitable for target processors min/max processors per node memory limit wall-clock +#np parallel MOM+CN 1/72 not shared 72 120 GB 48 hours + +path=`pwd` +echo Initial path: $path + +cd /scratch/rd/natr/run + +# debug +set -x + +ln -s $HOME/fesom2/bin/fesom.x . #../bin/fesom.x . # cp -n ../bin/fesom.x +cp -n $HOME/fesom2/config/namelist.config . #../config/namelist.config . +cp -n $HOME/fesom2/config/namelist.forcing . #../config/namelist.forcing . +cp -n $HOME/fesom2/config/namelist.oce . #../config/namelist.oce . +cp -n $HOME/fesom2/config/namelist.ice . #../config/namelist.ice . + +date +echo tasks_per_node, total_tasks, HT: $EC_tasks_per_node $EC_total_tasks $EC_hyperthreads +aprun -N $EC_tasks_per_node -n $EC_total_tasks -j $EC_hyperthreads ./fesom.x > "fesom2.out" +date From ba2d4d406410b42f2e82167c4fc88e4bef938112 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Wed, 30 May 2018 09:54:09 +0000 Subject: [PATCH 006/909] Add namelist.config file for the ECMWF machine. --- config/namelist.config.ecmwf | 85 ++++++++++++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) create mode 100755 config/namelist.config.ecmwf diff --git a/config/namelist.config.ecmwf b/config/namelist.config.ecmwf new file mode 100755 index 000000000..4544d3293 --- /dev/null +++ b/config/namelist.config.ecmwf @@ -0,0 +1,85 @@ +! This is the namelist file for model general configuration + +&modelname +runid='fesom' +/ + +×tep +step_per_day=32 +run_length=1 +run_length_unit='y' ! y, m, d, s +/ + +&clockinit ! the model starts at +timenew=0.0 +daynew=1 +yearnew=1948 +/ + +&paths +MeshPath='/fwsm/lb/project/fesom2/core2/' +OpbndPath='' +ClimateDataPath='/fwsm/lb/project/fesom2/hydrography/' +ForcingDataPath='/fwsm/lb/project/fesom2/forcing/' +TideForcingPath='/fwsm/lb/project/fesom2/tide_forcing/' +ResultPath='/scratch/rd/natr/results/' +/ + +&initialization +OceClimaDataName='Winter_PHC3_ts.out' !which T/S data to initial. ocean + !'Winter_PHC3_ts.out' + !'Annual_PHC3_ts.out' +use_prepared_init_ice=.false. !how to init. ice; runid.initial_ice.nc +/ + +&inout +restartflag='last' !restart from which saved record,'last','#' +output_length=1 !only required for d,h,s cases, y, m take 1 +output_length_unit='m' !output period: y, d, h, s +output_offset=64 +restart_length=1 !only required for d,h,s cases, y, m take 1 +restart_length_unit='y' !output period: y, d, h, s +restart_offset=64 +logfile_outfreq=64 !in logfile info. output frequency, # steps +use_means=.true. !average output, if false prints out snapshots +/ + +&mesh_def +grid_type=1 !1 z-level, 2 sigma, 3 z+sigma +use_ALE=.true. ! switch on/off ALE +which_ALE='linfs' ! 'linfs','zlevel', 'zstar','zstar-weight', 'ztilde' +use_partial_cell=.false. +min_hnode=0.25 +lzstar_lev=3 +/ + +&geometry +cartesian=.false. +fplane=.false. +betaplane=.false. +f_fplane=-1.4e-4 ![1/s] +beta_betaplane=2.0e-11 ![1/s/m] +cyclic_length=360. ![degree] +rotated_grid=.true. !option only valid for coupled model case now +alphaEuler=50. ![degree] Euler angles, convention: +betaEuler=15. ![degree] first around z, then around new x, +gammaEuler=-90. ![degree] then around new z. +force_rotation=.false. +/ + +&calendar +include_fleapyear=.true. +/ + +&run_config +use_ice=.true. ! ocean+ice +use_floatice = .false. +use_sw_pene=.true. +toy_ocean=.false. ! use toy forcing/initialization +/ + +&machine +! system_arch=1 +n_levels=3 +n_part= 2, 4, 36 ! number of partitions on each hierarchy level +/ From cceef7f7b7df65252b4eba87b81135ba0dd1f790 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Fri, 1 Jun 2018 09:12:01 +0000 Subject: [PATCH 007/909] Update environment files for ECMWF: env.sh and shell file. --- env.sh | 1 - env/ecaccess.ecmwf.int/shell | 10 +++++++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/env.sh b/env.sh index a15583711..d16f51a0b 100755 --- a/env.sh +++ b/env.sh @@ -37,7 +37,6 @@ elif [[ $LOGINHOST =~ \.hww\.de$ ]] || [[ $LOGINHOST =~ ^nid[0-9]{5}$ ]]; then STRATEGY="hazelhen.hww.de" elif [[ $LOGINHOST =~ ^cc[a-b]+-login[0-9]+\.ecmwf\.int$ ]]; then STRATEGY="ecaccess.ecmwf.int" - echo $STRATEGY else echo "can not determine environment for host: "$LOGINHOST [ $BEING_EXECUTED = true ] && exit 1 diff --git a/env/ecaccess.ecmwf.int/shell b/env/ecaccess.ecmwf.int/shell index 736ac32b0..0ddf47b0c 100644 --- a/env/ecaccess.ecmwf.int/shell +++ b/env/ecaccess.ecmwf.int/shell @@ -1 +1,9 @@ -module load cmake/3.5.1 +export PATH=/home/rd/natr/cmake-3.11.2-Linux-x86_64/bin:$PATH + +module unload cray-hdf5 +module load cray-netcdf +module load cray-hdf5 + +#export CRAYPE_LINK_TYPE=dynamic + +export FC=ftn CC=cc CXX=CC From 9def08719c6a4b7cfda4219caf78233436c45e44 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Fri, 1 Jun 2018 09:13:50 +0000 Subject: [PATCH 008/909] Update namelist.config.ecmwf --- config/namelist.config.ecmwf | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/config/namelist.config.ecmwf b/config/namelist.config.ecmwf index 4544d3293..9048d35e4 100755 --- a/config/namelist.config.ecmwf +++ b/config/namelist.config.ecmwf @@ -5,9 +5,9 @@ runid='fesom' / ×tep -step_per_day=32 -run_length=1 -run_length_unit='y' ! y, m, d, s +step_per_day=32 !288 !32 +run_length=1 !2 !1 +run_length_unit='y' ! y, m, d, s / &clockinit ! the model starts at @@ -40,7 +40,7 @@ output_offset=64 restart_length=1 !only required for d,h,s cases, y, m take 1 restart_length_unit='y' !output period: y, d, h, s restart_offset=64 -logfile_outfreq=64 !in logfile info. output frequency, # steps +logfile_outfreq=960 !in logfile info. output frequency, # steps use_means=.true. !average output, if false prints out snapshots / @@ -68,7 +68,7 @@ force_rotation=.false. / &calendar -include_fleapyear=.true. +include_fleapyear=.false. / &run_config From 3b8803837d25cc41c33458c53e7ce1a19e038bd1 Mon Sep 17 00:00:00 2001 From: "Kristian S. Mogensen" Date: Thu, 7 Jun 2018 14:12:29 +0100 Subject: [PATCH 009/909] Initial commit. --- Makefile | 53 +++ interinfo.F90 | 23 + nctools.F90 | 40 ++ nemogcmcoup_coupinit.F90 | 224 +++++++++ nemogcmcoup_exflds_get.F90 | 28 ++ nemogcmcoup_final.F90 | 11 + nemogcmcoup_get.F90 | 30 ++ nemogcmcoup_get_1way.F90 | 23 + nemogcmcoup_init.F90 | 38 ++ nemogcmcoup_init_ioserver.F90 | 12 + nemogcmcoup_init_ioserver_2.F90 | 11 + nemogcmcoup_lim2_get.F90 | 324 +++++++++++++ nemogcmcoup_lim2_update.F90 | 669 ++++++++++++++++++++++++++ nemogcmcoup_mlflds_get.F90 | 26 + nemogcmcoup_mlinit.F90 | 26 + nemogcmcoup_step.F90 | 33 ++ nemogcmcoup_update.F90 | 32 ++ nemogcmcoup_update_add.F90 | 32 ++ nemogcmcoup_wam_coupinit.F90 | 25 + nemogcmcoup_wam_get.F90 | 30 ++ nemogcmcoup_wam_update.F90 | 34 ++ nemogcmcoup_wam_update_stress.F90 | 32 ++ par_kind.F90 | 8 + parinter.F90 | 762 ++++++++++++++++++++++++++++++ scripgrid.F90 | 278 +++++++++++ scrippar.F90 | 5 + scripremap.F90 | 734 ++++++++++++++++++++++++++++ 27 files changed, 3543 insertions(+) create mode 100644 Makefile create mode 100644 interinfo.F90 create mode 100644 nctools.F90 create mode 100644 nemogcmcoup_coupinit.F90 create mode 100644 nemogcmcoup_exflds_get.F90 create mode 100644 nemogcmcoup_final.F90 create mode 100644 nemogcmcoup_get.F90 create mode 100644 nemogcmcoup_get_1way.F90 create mode 100644 nemogcmcoup_init.F90 create mode 100644 nemogcmcoup_init_ioserver.F90 create mode 100644 nemogcmcoup_init_ioserver_2.F90 create mode 100644 nemogcmcoup_lim2_get.F90 create mode 100644 nemogcmcoup_lim2_update.F90 create mode 100644 nemogcmcoup_mlflds_get.F90 create mode 100644 nemogcmcoup_mlinit.F90 create mode 100644 nemogcmcoup_step.F90 create mode 100644 nemogcmcoup_update.F90 create mode 100644 nemogcmcoup_update_add.F90 create mode 100644 nemogcmcoup_wam_coupinit.F90 create mode 100644 nemogcmcoup_wam_get.F90 create mode 100644 nemogcmcoup_wam_update.F90 create mode 100644 nemogcmcoup_wam_update_stress.F90 create mode 100644 par_kind.F90 create mode 100644 parinter.F90 create mode 100644 scripgrid.F90 create mode 100644 scrippar.F90 create mode 100644 scripremap.F90 diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..bb0c3db0f --- /dev/null +++ b/Makefile @@ -0,0 +1,53 @@ +.SUFFIXES: +.SUFFIXES: .F90 .F .f .o + +CPP=cpp +FC=mpif90 +LD=$(FC) +FCFLAGSFIXED=-g -c -O3 -fdefault-real-8 -fdefault-double-8 -fcray-pointer -fconvert=swap -fopenmp $(NETCDF_INCLUDE) $(GRIB_API_INCLUDE) +FCFLAGSFREE=$(FCFLAGSFIXED) +CPPFLAGS=-traditional -P -Dkey_mpp_mpi +LDFLAGS=-g -O3 -fdefault-real-8 -fdefault-double-8 -fcray-pointer -fconvert=swap -fopenmp $(MAGPLUSLIB_SHARED) $(NETCDF_LIB) $(GRIB_API_LIB) +AR=ar +ARFLAGS=-rv + +OBJ=scrippar.o scripremap.o scripgrid.o parinter.o interinfo.o nemogcmcoup_mlflds_get.o par_kind.o nemogcmcoup_init_ioserver.o nemogcmcoup_init_ioserver_2.o nemogcmcoup_final.o nemogcmcoup_init.o nemogcmcoup_wam_coupinit.o nctools.o nemogcmcoup_step.o nemogcmcoup_exflds_get.o nemogcmcoup_wam_update.o nemogcmcoup_wam_update_stress.o nemogcmcoup_wam_get.o nemogcmcoup_coupinit.o nemogcmcoup_get_1way.o nemogcmcoup_mlinit.o nemogcmcoup_update_add.o nemogcmcoup_update.o nemogcmcoup_lim2_update.o nemogcmcoup_get.o nemogcmcoup_lim2_get.o + +all: libfesom.a + +.F90.o: + $(CPP) $(CPPFLAGS) $< > $*.pp.f90 + $(FC) $(FCFLAGSFREE) $*.pp.f90 -o $*.o + +.F.o: + $(CPP) $(CPPFLAGS) $< > $*.pp.f + $(FC) $(FCFLAGSFIXED) $*.pp.f -o $*.o + +.f.o: + $(CPP) $(CPPFLAGS) $< > $*.pp.f + $(FC) $(FCFLAGSFIXED) $*.pp.f -o $*.o + +libfesom.a: $(OBJ) + $(AR) $(ARFLAGS) $@ $(OBJ) + +clean: + rm -f *.o *.mod *~ *.x *.pp.f90 *.pp.f *.lst *.in *.nc *.grb *.a + rm -rf *.dSYM + +scripremap.o: nctools.o scrippar.o scripgrid.o +scripgrid.o: nctools.o scrippar.o +parinter.o: scripremap.o scrippar.o +interinfo.o: parinter.o +nemogcmcoup_mlflds_get.o: par_kind.o +nemogcmcoup_exflds_get.o: par_kind.o +nemogcmcoup_coup.o: par_kind.o +nemogcmcoup_coupinit.o: scripremap.o parinter.o interinfo.o +nemogcmcoup_wam_update.o: par_kind.o +nemogcmcoup_wam_get.o: par_kind.o +nemogcmcoup_wam_update_stress.o: par_kind.o +nemogcmcoup_mlinit.o: par_kind.o +nemogcmcoup_update_add.o: par_kind.o +nemogcmcoup_update.o: par_kind.o +nemogcmcoup_lim2_update.o: par_kind.o +nemogcmcoup_get.o: par_kind.o +nemogcmcoup_lim2_get.o: par_kind.o diff --git a/interinfo.F90 b/interinfo.F90 new file mode 100644 index 000000000..bb4fc1804 --- /dev/null +++ b/interinfo.F90 @@ -0,0 +1,23 @@ +MODULE interinfo + + ! Parallel regridding information + + USE parinter + + IMPLICIT NONE + + SAVE + + ! IFS to NEMO + + TYPE(parinterinfo) :: gausstoT,gausstoUV + + ! NEMO to IFS + + TYPE(parinterinfo) :: Ttogauss, UVtogauss + + ! Read parinterinfo on task 0 only and broadcast. + + LOGICAL :: lparbcast = .FALSE. + +END MODULE interinfo diff --git a/nctools.F90 b/nctools.F90 new file mode 100644 index 000000000..d0f1e99c2 --- /dev/null +++ b/nctools.F90 @@ -0,0 +1,40 @@ +#define __MYFILE__ 'nctools.F90' +MODULE nctools + + ! Utility subroutines for netCDF access + ! Modified : MAB (nf90, handle_error, LINE&FILE) + ! Modifled : KSM (new shorter name) + + USE netcdf + + PUBLIC ldebug_netcdf, nchdlerr + LOGICAL :: ldebug_netcdf = .FALSE. ! Debug switch for netcdf + +CONTAINS + + SUBROUTINE nchdlerr(status,lineno,filename) + + ! Error handler for netCDF access + IMPLICIT NONE + + + INTEGER :: status ! netCDF return status + INTEGER :: lineno ! Line number (usually obtained from + ! preprocessing __LINE__,__MYFILE__) + CHARACTER(len=*),OPTIONAL :: filename + + IF (status/=nf90_noerr) THEN + WRITE(*,*)'Netcdf error, code ',status + IF (PRESENT(filename)) THEN + WRITE(*,*)'In file ',filename,' in line ',lineno + ELSE + WRITE(*,*)'In line ',lineno + END IF + WRITE(*,'(2A)')' Error message : ',nf90_strerror(status) + CALL abort + ENDIF + + END SUBROUTINE nchdlerr + +!---------------------------------------------------------------------- +END MODULE nctools diff --git a/nemogcmcoup_coupinit.F90 b/nemogcmcoup_coupinit.F90 new file mode 100644 index 000000000..7abc87428 --- /dev/null +++ b/nemogcmcoup_coupinit.F90 @@ -0,0 +1,224 @@ +SUBROUTINE nemogcmcoup_coupinit( mype, npes, icomm, & + & npoints, nlocmsk, ngloind ) + + ! Initialize single executable coupling + USE parinter + USE scripremap + USE interinfo + IMPLICIT NONE + + ! Input arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Gaussian grid information + ! Number of points + INTEGER, INTENT(IN) :: npoints + ! Integer mask and global indices + INTEGER, DIMENSION(npoints), INTENT(IN) :: nlocmsk, ngloind + INTEGER :: iunit = 0 + + ! Local variables + + ! Namelist containing the file names of the weights + CHARACTER(len=256) :: cdfile_gauss_to_T, cdfile_gauss_to_UV, & + & cdfile_T_to_gauss, cdfile_UV_to_gauss + CHARACTER(len=256) :: cdpathdist + LOGICAL :: lwritedist, lreaddist + LOGICAL :: lcommout + CHARACTER(len=128) :: commoutprefix + NAMELIST/namnemocoup/cdfile_gauss_to_T,& + & cdfile_gauss_to_UV,& + & cdfile_T_to_gauss,& + & cdfile_UV_to_gauss,& + & cdpathdist, & + & lreaddist, & + & lwritedist, & + & lcommout, & + & commoutprefix,& + & lparbcast + + ! Global number of gaussian gridpoints + INTEGER :: nglopoints + ! Ocean grids accessed with NEMO modules + INTEGER :: noglopoints,nopoints + INTEGER, ALLOCATABLE, DIMENSION(:) :: omask,ogloind + ! SCRIP remapping data structures. + TYPE(scripremaptype) :: remap_gauss_to_T, remap_T_to_gauss, & + & remap_gauss_to_UV, remap_UV_to_gauss + ! Misc variables + INTEGER :: i,j,k,ierr + LOGICAL :: lexists + + ! Read namelists + + cdfile_gauss_to_T = 'gausstoT.nc' + cdfile_gauss_to_UV = 'gausstoUV.nc' + cdfile_T_to_gauss = 'Ttogauss.nc' + cdfile_UV_to_gauss = 'UVtogauss.nc' + lcommout = .FALSE. + commoutprefix = 'parinter_comm' + cdpathdist = './' + lreaddist = .FALSE. + lwritedist = .FALSE. + + OPEN(9,file='namnemocoup.in') + READ(9,namnemocoup) + CLOSE(9) + + ! Global number of Gaussian gridpoints + +#if defined key_mpp_mpi + CALL mpi_allreduce( npoints, nglopoints, 1, & + & mpi_integer, mpi_sum, icomm, ierr) +#else + nglopoints=npoints +#endif + + WRITE(0,*)'Update FESOM global scalar points' + noglopoints=126858 + IF (mype==0) THEN + nopoints=126858 + ELSE + nopoints=0 + ENDIF + + ! Ocean mask and global indicies + + ALLOCATE(omask(MAX(nopoints,1)),ogloind(MAX(nopoints,1))) + + omask(:) = 1 + IF (mype==0) THEN + DO i=1,nopoints + ogloind(i)=i + ENDDO + ENDIF + + ! Read the interpolation weights and setup the parallel interpolation + ! from atmosphere Gaussian grid to ocean T-grid + + IF (lreaddist) THEN + CALL parinter_read( mype, npes, nglopoints, noglopoints, gausstoT, & + & cdpathdist,'ifs_to_fesom_gridT',lexists) + ENDIF + IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN + IF (lparbcast) THEN + CALL scripremap_read_sgl(cdfile_gauss_to_T,remap_gauss_to_T,& + & mype,npes,icomm,.TRUE.) + ELSE + CALL scripremap_read(cdfile_gauss_to_T,remap_gauss_to_T) + ENDIF + CALL parinter_init( mype, npes, icomm, & + & npoints, nglopoints, nlocmsk, ngloind, & + & nopoints, noglopoints, omask, ogloind, & + & remap_gauss_to_T, gausstoT, lcommout, TRIM(commoutprefix)//'_gtoT', & + & iunit ) + CALL scripremap_dealloc(remap_gauss_to_T) + IF (lwritedist) THEN + CALL parinter_write( mype, npes, nglopoints, noglopoints, gausstoT, & + & cdpathdist,'ifs_to_fesom_gridT') + ENDIF + ENDIF + + ! From ocean T-grid to atmosphere Gaussian grid + + IF (lreaddist) THEN + CALL parinter_read( mype, npes, noglopoints, nglopoints, Ttogauss, & + & cdpathdist,'fesom_gridT_to_ifs',lexists) + ENDIF + IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN + IF (lparbcast) THEN + CALL scripremap_read_sgl(cdfile_T_to_gauss,remap_T_to_gauss,& + & mype,npes,icomm,.TRUE.) + ELSE + CALL scripremap_read(cdfile_T_to_gauss,remap_T_to_gauss) + ENDIF + + CALL parinter_init( mype, npes, icomm, & + & nopoints, noglopoints, omask, ogloind, & + & npoints, nglopoints, nlocmsk, ngloind, & + & remap_T_to_gauss, Ttogauss, lcommout, TRIM(commoutprefix)//'_Ttog', & + & iunit ) + CALL scripremap_dealloc(remap_T_to_gauss) + IF (lwritedist) THEN + CALL parinter_write( mype, npes, noglopoints, nglopoints, Ttogauss, & + & cdpathdist,'fesom_gridT_to_ifs') + ENDIF + ENDIF + + DEALLOCATE(omask,ogloind) + + WRITE(0,*)'Update FESOM global vector points' + noglopoints=244659 + IF (mype==0) THEN + nopoints=244659 + ELSE + nopoints=0 + ENDIF + + ! Ocean mask and global indicies + + ALLOCATE(omask(MAX(nopoints,1)),ogloind(MAX(nopoints,1))) + + omask(:) = 1 + IF (mype==0) THEN + DO i=1,nopoints + ogloind(i)=i + ENDDO + ENDIF + + ! Read the interpolation weights and setup the parallel interpolation + ! from atmosphere Gaussian grid to ocean UV-grid + + IF (lreaddist) THEN + CALL parinter_read( mype, npes, nglopoints, noglopoints, gausstoUV, & + & cdpathdist,'ifs_to_fesom_gridUV',lexists) + ENDIF + IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN + IF (lparbcast) THEN + CALL scripremap_read_sgl(cdfile_gauss_to_UV,remap_gauss_to_UV,& + & mype,npes,icomm,.TRUE.) + ELSE + CALL scripremap_read(cdfile_gauss_to_UV,remap_gauss_to_UV) + ENDIF + CALL parinter_init( mype, npes, icomm, & + & npoints, nglopoints, nlocmsk, ngloind, & + & nopoints, noglopoints, omask, ogloind, & + & remap_gauss_to_UV, gausstoUV, lcommout, TRIM(commoutprefix)//'_gtoUV', & + & iunit ) + CALL scripremap_dealloc(remap_gauss_to_UV) + IF (lwritedist) THEN + CALL parinter_write( mype, npes, nglopoints, noglopoints, gausstoUV, & + & cdpathdist,'ifs_to_fesom_gridUV') + ENDIF + ENDIF + + ! From ocean UV-grid to atmosphere Gaussian grid + + IF (lreaddist) THEN + CALL parinter_read( mype, npes, noglopoints, nglopoints, UVtogauss, & + & cdpathdist,'fesom_gridUV_to_ifs',lexists) + ENDIF + IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN + IF (lparbcast) THEN + CALL scripremap_read_sgl(cdfile_UV_to_gauss,remap_UV_to_gauss,& + & mype,npes,icomm,.TRUE.) + ELSE + CALL scripremap_read(cdfile_UV_to_gauss,remap_UV_to_gauss) + ENDIF + + CALL parinter_init( mype, npes, icomm, & + & nopoints, noglopoints, omask, ogloind, & + & npoints, nglopoints, nlocmsk, ngloind, & + & remap_UV_to_gauss, UVtogauss, lcommout, TRIM(commoutprefix)//'_UVtog', & + & iunit ) + CALL scripremap_dealloc(remap_UV_to_gauss) + IF (lwritedist) THEN + CALL parinter_write( mype, npes, noglopoints, nglopoints, UVtogauss, & + & cdpathdist,'fesom_gridUV_to_ifs') + ENDIF + ENDIF + + DEALLOCATE(omask,ogloind) + +END SUBROUTINE nemogcmcoup_coupinit diff --git a/nemogcmcoup_exflds_get.F90 b/nemogcmcoup_exflds_get.F90 new file mode 100644 index 000000000..e31aa6fa5 --- /dev/null +++ b/nemogcmcoup_exflds_get.F90 @@ -0,0 +1,28 @@ +SUBROUTINE nemogcmcoup_exflds_get( mype, npes, icomm, & + & nopoints, pgssh, pgmld, pg20d, pgsss, & + & pgtem300, pgsal300 ) + + ! Interpolate sst, ice: surf T; albedo; concentration; thickness, + ! snow thickness and currents from the ORCA grid to the Gaussian grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + USE par_kind + IMPLICIT NONE + + ! Arguments + REAL(wp), DIMENSION(nopoints) :: pgssh, pgmld, pg20d, pgsss, & + & pgtem300, pgsal300 + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + ! Number Gaussian grid points + INTEGER, INTENT(IN) :: nopoints + + ! Local variables + + WRITE(0,*)'nemogcmcoup_exflds_get should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_exflds_get + diff --git a/nemogcmcoup_final.F90 b/nemogcmcoup_final.F90 new file mode 100644 index 000000000..140dd16ab --- /dev/null +++ b/nemogcmcoup_final.F90 @@ -0,0 +1,11 @@ +SUBROUTINE nemogcmcoup_final + + ! Finalize the NEMO model + + IMPLICIT NONE + + WRITE(*,*)'Insert call to finalization of FESOM' + CALL abort + +END SUBROUTINE nemogcmcoup_final + diff --git a/nemogcmcoup_get.F90 b/nemogcmcoup_get.F90 new file mode 100644 index 000000000..a651299c8 --- /dev/null +++ b/nemogcmcoup_get.F90 @@ -0,0 +1,30 @@ +SUBROUTINE nemogcmcoup_get( mype, npes, icomm, & + & nopoints, pgsst, pgice, pgucur, pgvcur ) + + ! Interpolate sst, ice and currents from the ORCA grid + ! to the Gaussian grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + USE par_kind + + IMPLICIT NONE + + + ! Arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + ! Number Gaussian grid points + INTEGER, INTENT(IN) :: nopoints + ! Local arrays of sst, ice and currents + REAL(wp), DIMENSION(nopoints) :: pgsst, pgice, pgucur, pgvcur + + ! Local variables + + WRITE(0,*)'nemogcmcoup_get should not be called with FESOM' + CALL abort + +END SUBROUTINE nemogcmcoup_get + diff --git a/nemogcmcoup_get_1way.F90 b/nemogcmcoup_get_1way.F90 new file mode 100644 index 000000000..d3dbb0458 --- /dev/null +++ b/nemogcmcoup_get_1way.F90 @@ -0,0 +1,23 @@ +SUBROUTINE nemogcmcoup_get_1way( mype, npes, icomm ) + + ! Interpolate sst, ice and currents from the ORCA grid + ! to the Gaussian grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + IMPLICIT NONE + + + ! Arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + + ! Local variables + + WRITE(0,*)'nemogcmcoup_get_1way should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_get_1way + diff --git a/nemogcmcoup_init.F90 b/nemogcmcoup_init.F90 new file mode 100644 index 000000000..1d10b012e --- /dev/null +++ b/nemogcmcoup_init.F90 @@ -0,0 +1,38 @@ +SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & + & lwaveonly, iatmunit, lwrite ) + + ! Initialize the NEMO model for single executable coupling + + USE par_kind + + IMPLICIT NONE + + ! Input arguments + + ! Message passing information + INTEGER, INTENT(IN) :: icomm + ! Initial date, time, initial timestep and final time step + INTEGER, INTENT(OUT) :: inidate, initime, itini, itend + ! Length of the time step + REAL(wp), INTENT(OUT) :: zstp + ! Coupling to waves only + LOGICAL, INTENT(IN) :: lwaveonly + ! Logfile unit (used if >=0) + INTEGER :: iatmunit + ! Write to this unit + LOGICAL :: lwrite + + WRITE(0,*)'Insert FESOM init here.' + CALL abort + + ! Set information for the caller + +#ifdef FESOM_TODO + inidate = nn_date0 + initime = nn_time0*3600 + itini = nit000 + itend = nn_itend + zstp = rdttra(1) +#endif + +END SUBROUTINE nemogcmcoup_init diff --git a/nemogcmcoup_init_ioserver.F90 b/nemogcmcoup_init_ioserver.F90 new file mode 100644 index 000000000..0ef2c9e21 --- /dev/null +++ b/nemogcmcoup_init_ioserver.F90 @@ -0,0 +1,12 @@ +SUBROUTINE nemogcmcoup_init_ioserver( icomm, lnemoioserver ) + + ! Initialize the NEMO mppio server + + IMPLICIT NONE + INTEGER :: icomm + LOGICAL :: lnemoioserver + + WRITE(*,*)'No mpp_ioserver' + CALL abort + +END SUBROUTINE nemogcmcoup_init_ioserver diff --git a/nemogcmcoup_init_ioserver_2.F90 b/nemogcmcoup_init_ioserver_2.F90 new file mode 100644 index 000000000..4b069eea2 --- /dev/null +++ b/nemogcmcoup_init_ioserver_2.F90 @@ -0,0 +1,11 @@ +SUBROUTINE nemogcmcoup_init_ioserver_2( icomm ) + + ! Initialize the NEMO mppio server + + IMPLICIT NONE + INTEGER :: icomm + + WRITE(*,*)'No mpp_ioserver' + CALL abort + +END SUBROUTINE nemogcmcoup_init_ioserver_2 diff --git a/nemogcmcoup_lim2_get.F90 b/nemogcmcoup_lim2_get.F90 new file mode 100644 index 000000000..2176782f1 --- /dev/null +++ b/nemogcmcoup_lim2_get.F90 @@ -0,0 +1,324 @@ +SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & + & nopoints, pgsst, pgist, pgalb, & + & pgifr, pghic, pghsn, pgucur, pgvcur, & + & pgistl, licelvls ) + + ! Interpolate sst, ice: surf T; albedo; concentration; thickness, + ! snow thickness and currents from the ORCA grid to the Gaussian grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + USE par_kind + + IMPLICIT NONE + + ! Arguments + REAL(wp), DIMENSION(nopoints) :: pgsst, pgist, pgalb, pgifr, pghic, pghsn, pgucur, pgvcur + REAL(wp), DIMENSION(nopoints,3) :: pgistl + LOGICAL :: licelvls + + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + ! Number Gaussian grid points + INTEGER, INTENT(IN) :: nopoints + + ! Local variables + +#ifdef FESOM_TODO + + ! Temporary array for packing of input data without halos. + REAL(wp), DIMENSION((nlei-nldi+1)*(nlej-nldj+1)) :: zsend + ! Arrays for rotation of current vectors from ij to ne. + REAL(wp), DIMENSION(jpi,jpj) :: zotx1, zoty1, ztmpx, ztmpy + ! Array for fraction of leads (i.e. ocean) + REAL(wp), DIMENSION(jpi,jpj) :: zfr_l + REAL(wp) :: zt + ! Loop variables + INTEGER :: ji, jj, jk, jl + REAL(wp) :: zhook_handle ! Dr Hook handle + + IF(lhook) CALL dr_hook('nemogcmcoup_lim2_get',0,zhook_handle) + IF(nn_timing == 1) CALL timing_start('nemogcmcoup_lim2_get') + + zfr_l(:,:) = 1.- fr_i(:,:) + + IF (.NOT.ALLOCATED(zscplsst)) THEN + ALLOCATE(zscplsst(jpi,jpj)) + ENDIF + + ! Pack SST data and convert to K. + + IF ( nsstlvl(1) == nsstlvl(2) ) THEN + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = tsn(ji,jj,nsstlvl(1),jp_tem) + rt0 + zscplsst(ji,jj) = zsend(jk) - rt0 + ENDDO + ENDDO + ELSE + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = SUM(& + & tsn(ji,jj,nsstlvl(1):nsstlvl(2),jp_tem) * & + & tmask(ji,jj,nsstlvl(1):nsstlvl(2)) * & + & fse3t(ji,jj,nsstlvl(1):nsstlvl(2)) ) / & + & MAX( SUM( & + & tmask(ji,jj,nsstlvl(1):nsstlvl(2)) * & + & fse3t(ji,jj,nsstlvl(1):nsstlvl(2))) , 1.0 ) + rt0 + zscplsst(ji,jj) = zsend(jk) - rt0 + ENDDO + ENDDO + ENDIF + CALL lbc_lnk( zscplsst, 'T', 1. ) + + ! Interpolate SST + + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & + & nopoints, pgsst ) + + ! Pack ice temperature data (already in K) + +#if defined key_lim2 + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = tn_ice(ji,jj,1) + ENDDO + ENDDO +#else + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = 0 + zt=0.0 + DO jl = 1, jpl + zsend(jk) = zsend(jk) + tn_ice(ji,jj,jl) * a_i(ji,jj,jl) + zt = zt + a_i(ji,jj,jl) + ENDDO + IF ( zt > 0.0 ) THEN + zsend(jk) = zsend(jk) / zt + ELSE + zsend(jk) = rt0 + ENDIF + ENDDO + ENDDO +#endif + + ! Interpolate ice temperature + + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & + & nopoints, pgist ) + + ! Ice level temperatures + + IF (licelvls) THEN + +#if defined key_lim2 + + DO jl = 1, 3 + + ! Pack ice temperatures data at level jl(already in K) + + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = tbif (ji,jj,jl) + ENDDO + ENDDO + + ! Interpolate ice temperature at level jl + + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & + & nopoints, pgistl(:,jl) ) + + ENDDO + +#else + WRITE(0,*)'licelvls needs to be sorted for LIM3' + CALL abort +#endif + + ENDIF + + ! Pack ice albedo data + +#if defined key_lim2 + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = alb_ice(ji,jj,1) + ENDDO + ENDDO +#else + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = 0 + zt=0.0 + DO jl = 1, jpl + zsend(jk) = zsend(jk) + alb_ice(ji,jj,jl) * a_i(ji,jj,jl) + zt = zt + a_i(ji,jj,jl) + ENDDO + IF ( zt > 0.0_wp ) THEN + zsend(jk) = zsend(jk) / zt + ELSE + zsend(jk) = albedo_oce_mix(ji,jj) + ENDIF + ENDDO + ENDDO +#endif + + ! Interpolate ice albedo + + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & + & nopoints, pgalb ) + + ! Pack ice fraction data + + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = fr_i(ji,jj) + ENDDO + ENDDO + + ! Interpolation of ice fraction. + + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & + & nopoints, pgifr ) + + ! Pack ice thickness data + +#if defined key_lim2 + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = hicif(ji,jj) + ENDDO + ENDDO +#else + ! LIM3 + ! Average over categories (to be revised). + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = 0 + DO jl = 1, jpl + zsend(jk) = zsend(jk) + ht_i(ji,jj,jl) * a_i(ji,jj,jl) + ENDDO + ENDDO + ENDDO +#endif + + ! Interpolation of ice thickness + + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & + & nopoints, pghic ) + + ! Pack snow thickness data + +#if defined key_lim2 + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = hsnif(ji,jj) + ENDDO + ENDDO +#else + ! LIM3 + ! Average over categories (to be revised). + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = 0 + DO jl = 1, jpl + zsend(jk) = zsend(jk) + ht_s(ji,jj,jl) * a_i(ji,jj,jl) + ENDDO + ENDDO + ENDDO +#endif + + ! Interpolation of snow thickness + + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & + & nopoints, pghsn ) + + ! Currents needs to be rotated from ij to ne first + + DO jj = 2, jpjm1 + DO ji = 2, jpim1 + zotx1(ji,jj) = 0.5 * ( un(ji,jj,1) + un(ji-1,jj ,1) ) + zoty1(ji,jj) = 0.5 * ( vn(ji,jj,1) + vn(ji ,jj-1,1) ) + END DO + END DO + CALL lbc_lnk( zotx1, 'T', -1. ) + CALL lbc_lnk( zoty1, 'T', -1. ) + CALL rot_rep( zotx1, zoty1, 'T', 'ij->e', ztmpx ) + CALL rot_rep( zotx1, zoty1, 'T', 'ij->n', ztmpy ) + + ! Pack U current + + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = ztmpx(ji,jj) + ENDDO + ENDDO + + ! Interpolate U current + + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & + & nopoints, pgucur ) + + ! Pack V current + + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = ztmpy(ji,jj) + ENDDO + ENDDO + + ! Interpolate V current + + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & + & nopoints, pgvcur ) + + IF(nn_timing == 1) CALL timing_stop('nemogcmcoup_lim2_get') + IF(lhook) CALL dr_hook('nemogcmcoup_lim2_get',1,zhook_handle) + +#else + + WRITE(0,*)'nemogcmcoup_lim2_get not done for FESOM yet' + CALL abort + +#endif + +END SUBROUTINE nemogcmcoup_lim2_get + diff --git a/nemogcmcoup_lim2_update.F90 b/nemogcmcoup_lim2_update.F90 new file mode 100644 index 000000000..748f87b7b --- /dev/null +++ b/nemogcmcoup_lim2_update.F90 @@ -0,0 +1,669 @@ +SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & + & npoints, & + & taux_oce, tauy_oce, taux_ice, tauy_ice, & + & qs___oce, qs___ice, qns__oce, qns__ice, dqdt_ice, & + & evap_tot, evap_ice, prcp_liq, prcp_sol, & + & runoff, ocerunoff, tcc, lcc, tice_atm, & + & kt, ldebug, loceicemix, lqnsicefilt ) + + ! Update fluxes in nemogcmcoup_data by parallel + ! interpolation of the input gaussian grid data + + USE par_kind + + IMPLICIT NONE + + ! Arguments + + ! MPI communications + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Fluxes on the Gaussian grid. + INTEGER, INTENT(IN) :: npoints + REAL(wp), DIMENSION(npoints), INTENT(IN) :: & + & taux_oce, tauy_oce, taux_ice, tauy_ice, & + & qs___oce, qs___ice, qns__oce, qns__ice, & + & dqdt_ice, evap_tot, evap_ice, prcp_liq, prcp_sol, & + & runoff, ocerunoff, tcc, lcc, tice_atm + + ! Current time step + INTEGER, INTENT(in) :: kt + ! Write debugging fields in netCDF + LOGICAL, INTENT(IN) :: ldebug + ! QS/QNS mixed switch + LOGICAL, INTENT(IN) :: loceicemix + ! QNS ice filter switch (requires tice_atm to be sent) + LOGICAL, INTENT(IN) :: lqnsicefilt + + ! Local variables + +#ifdef FESOM_TODO + + ! Packed receive buffer + REAL(wp), DIMENSION((nlei-nldi+1)*(nlej-nldj+1)) :: zrecv + ! Unpacked fields on ORCA grids + REAL(wp), DIMENSION(jpi,jpj) :: zqs___oce, zqs___ice, zqns__oce, zqns__ice + REAL(wp), DIMENSION(jpi,jpj) :: zdqdt_ice, zevap_tot, zevap_ice, zprcp_liq, zprcp_sol + REAL(wp), DIMENSION(jpi,jpj) :: zrunoff, zocerunoff + REAL(wp), DIMENSION(jpi,jpj) :: ztmp, zicefr + ! Arrays for rotation + REAL(wp), DIMENSION(jpi,jpj) :: zuu,zvu,zuv,zvv,zutau,zvtau + ! Lead fraction for both LIM2/LIM3 + REAL(wp), DIMENSION(jpi,jpj) :: zfrld + ! Mask for masking for I grid + REAL(wp) :: zmsksum + ! For summing up LIM3 contributions to ice temperature + REAL(wp) :: zval,zweig + + ! Loop variables + INTEGER :: ji,jj,jk,jl + ! netCDF debugging output variables + CHARACTER(len=128) :: cdoutfile + INTEGER :: inum + REAL(wp) :: zhook_handle ! Dr Hook handle + + IF(lhook) CALL dr_hook('nemogcmcoup_lim2_update',0,zhook_handle) + IF(nn_timing == 1) CALL timing_start('nemogcmcoup_lim2_update') + + ! Allocate the storage data + + IF (.NOT.lallociceflx) THEN + ALLOCATE( & + & zsqns_tot(jpi,jpj), & + & zsqns_ice(jpi,jpj), & + & zsqsr_tot(jpi,jpj), & + & zsqsr_ice(jpi,jpj), & + & zsemp_tot(jpi,jpj), & + & zsemp_ice(jpi,jpj), & + & zsevap_ice(jpi,jpj), & + & zsdqdns_ice(jpi,jpj), & + & zssprecip(jpi,jpj), & + & zstprecip(jpi,jpj), & + & zstcc(jpi,jpj), & + & zslcc(jpi,jpj), & + & zsatmist(jpi,jpj), & + & zsqns_ice_add(jpi,jpj)& + & ) + lallociceflx = .TRUE. + ENDIF + IF (.NOT.lallocstress) THEN + ALLOCATE( & + & zsutau(jpi,jpj), & + & zsvtau(jpi,jpj), & + & zsutau_ice(jpi,jpj), & + & zsvtau_ice(jpi,jpj) & + & ) + lallocstress = .TRUE. + ENDIF + + ! Sort out incoming arrays from the IFS and put them on the ocean grid + + !1. Interpolate ocean solar radiation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qs___oce, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ocean solar radiation + + zqs___oce(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zqs___oce(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !2. Interpolate ice solar radiation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qs___ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ice solar radiation + + zqs___ice(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zqs___ice(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !3. Interpolate ocean non-solar radiation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qns__oce, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ocean non-solar radiation + + zqns__oce(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zqns__oce(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !4. Interpolate ice non-solar radiation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qns__ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ice non-solar radiation + + zqns__ice(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zqns__ice(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !5. Interpolate D(q)/dT to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, dqdt_ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack D(q)/D(T) + + zdqdt_ice(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zdqdt_ice(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !6. Interpolate total evaporation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, evap_tot, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack total evaporation + + zevap_tot(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zevap_tot(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !7. Interpolate evaporation over ice to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, evap_ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack evaporation over ice + + zevap_ice(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zevap_ice(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !8. Interpolate liquid precipitation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, prcp_liq, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack liquid precipitation + + zprcp_liq(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zprcp_liq(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !9. Interpolate solid precipitation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, prcp_sol, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack precipitation over ice + + zprcp_sol(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zprcp_sol(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !10. Interpolate runoff to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, runoff, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack runoff + + zrunoff(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zrunoff(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !11. Interpolate ocean runoff to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, ocerunoff, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ocean runoff + + zocerunoff(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zocerunoff(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !12. Interpolate total cloud fractions to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tcc, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ocean runoff + + zstcc(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zstcc(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !13. Interpolate low cloud fractions to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, lcc, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ocean runoff + + zslcc(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zslcc(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! get sea ice fraction and lead fraction + +#if defined key_lim2 + zfrld(:,:) = frld(:,:) + zicefr(:,:) = 1 - zfrld(:,:) +#else + zicefr(:,:) = 0.0_wp + DO jl = 1, jpl + zicefr(:,:) = zicefr(:,:) + a_i(:,:,jl) + ENDDO + zfrld(:,:) = 1 - zicefr(:,:) +#endif + + zsemp_tot(:,:) = zevap_tot(:,:) - zprcp_liq(:,:) - zprcp_sol(:,:) + zstprecip(:,:) = zprcp_liq(:,:) + zprcp_sol(:,:) + ! More consistent with NEMO, but does changes the results, so + ! we don't do it for now. + ! zsemp_tot(:,:) = zevap_tot(:,:) - zstprecip(:,:) + zsemp_ice(:,:) = zevap_ice(:,:) - zprcp_sol(:,:) + zssprecip(:,:) = - zsemp_ice(:,:) + zsemp_tot(:,:) = zsemp_tot(:,:) - zrunoff(:,:) + zsemp_tot(:,:) = zsemp_tot(:,:) - zocerunoff(:,:) + zsevap_ice(:,:) = zevap_ice(:,:) + + ! non solar heat fluxes ! (qns) + IF (loceicemix) THEN + zsqns_tot(:,:) = zqns__oce(:,:) + ELSE + zsqns_tot(:,:) = zfrld(:,:) * zqns__oce(:,:) + zicefr(:,:) * zqns__ice(:,:) + ENDIF + zsqns_ice(:,:) = zqns__ice(:,:) + ztmp(:,:) = zfrld(:,:) * zprcp_sol(:,:) * lfus ! add the latent heat of solid precip. melting + + zsqns_tot(:,:) = zsqns_tot(:,:) - ztmp(:,:) ! over free ocean + ! solar heat fluxes ! (qsr) + + IF (loceicemix) THEN + zsqsr_tot(:,:) = zqs___oce(:,:) + ELSE + zsqsr_tot(:,:) = zfrld(:,:) * zqs___oce(:,:) + zicefr(:,:) * zqs___ice(:,:) + ENDIF + zsqsr_ice(:,:) = zqs___ice(:,:) + + IF( ln_dm2dc ) THEN ! modify qsr to include the diurnal cycle + zsqsr_tot(:,:) = sbc_dcy( zsqsr_tot(:,:) ) + zsqsr_ice(:,:) = sbc_dcy( zsqsr_ice(:,:) ) + ENDIF + + zsdqdns_ice(:,:) = zdqdt_ice(:,:) + + ! Apply lateral boundary condition + + CALL lbc_lnk(zsqns_tot, 'T', 1.0) + CALL lbc_lnk(zsqns_ice, 'T', 1.0) + CALL lbc_lnk(zsqsr_tot, 'T', 1.0) + CALL lbc_lnk(zsqsr_ice, 'T', 1.0) + CALL lbc_lnk(zsemp_tot, 'T', 1.0) + CALL lbc_lnk(zsemp_ice, 'T', 1.0) + CALL lbc_lnk(zsdqdns_ice, 'T', 1.0) + CALL lbc_lnk(zssprecip, 'T', 1.0) + CALL lbc_lnk(zstprecip, 'T', 1.0) + CALL lbc_lnk(zstcc, 'T', 1.0) + CALL lbc_lnk(zslcc, 'T', 1.0) + + ! Interpolate atmospheric ice temperature to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tice_atm, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack atmospheric ice temperature + + zsatmist(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zsatmist(ji,jj) = zrecv(jk) + ENDDO + ENDDO + CALL lbc_lnk(zsatmist, 'T', 1.0) + + zsqns_ice_add(:,:) = 0.0_wp + + ! Use the dqns_ice filter + + IF (lqnsicefilt) THEN + + ! Add filtr to qns_ice + +#if defined key_lim2 + ztmp(:,:) = tn_ice(:,:,1) +#else + DO jj = nldj, nlej + DO ji = nldi, nlei + zval=0.0 + zweig=0.0 + DO jl = 1, jpl + zval = zval + tn_ice(ji,jj,jl) * a_i(ji,jj,jl) + zweig = zweig + a_i(ji,jj,jl) + ENDDO + IF ( zweig > 0.0 ) THEN + ztmp(ji,jj) = zval /zweig + ELSE + ztmp(ji,jj) = rt0 + ENDIF + ENDDO + ENDDO + CALL lbc_lnk(ztmp, 'T', 1.0) +#endif + + WHERE ( zicefr(:,:) > .001_wp ) + zsqns_ice_add(:,:) = zsdqdns_ice(:,:) * ( ztmp(:,:) - zsatmist(:,:) ) + END WHERE + + zsqns_ice(:,:) = zsqns_ice(:,:) + zsqns_ice_add(:,:) + + ENDIF + + ! Interpolate u-stress to U grid + + CALL parinter_fld( mype, npes, icomm, gausstoU, npoints,taux_oce, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack u stress on U grid + + zuu(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zuu(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Interpolate v-stress to U grid + + CALL parinter_fld( mype, npes, icomm, gausstoU, npoints, tauy_oce, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack v stress on U grid + + zvu(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zvu(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Interpolate u-stress to V grid + + CALL parinter_fld( mype, npes, icomm, gausstoV, npoints,taux_oce, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack u stress on V grid + + zuv(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zuv(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Interpolate v-stress to V grid + + CALL parinter_fld( mype, npes, icomm, gausstoV, npoints, tauy_oce, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack v stress on V grid + + zvv(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zvv(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Rotate stresses from en to ij and put u,v stresses on U,V grids + + CALL repcmo( zuu, zvu, zuv, zvv, zsutau, zsvtau ) + + ! Apply lateral boundary condition on u,v stresses on the U,V grids + + CALL lbc_lnk( zsutau, 'U', -1.0 ) + CALL lbc_lnk( zsvtau, 'V', -1.0 ) + + ! Interpolate ice u-stress to U grid + + CALL parinter_fld( mype, npes, icomm, gausstoU, npoints,taux_ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ice u stress on U grid + + zuu(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zuu(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Interpolate ice v-stress to U grid + + CALL parinter_fld( mype, npes, icomm, gausstoU, npoints, tauy_ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ice v stress on U grid + + zvu(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zvu(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Interpolate ice u-stress to V grid + + CALL parinter_fld( mype, npes, icomm, gausstoV, npoints,taux_ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ice u stress on V grid + + zuv(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zuv(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Interpolate ice v-stress to V grid + + CALL parinter_fld( mype, npes, icomm, gausstoV, npoints, tauy_ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ice v stress on V grid + + zvv(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zvv(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Rotate stresses from en to ij and put u,v stresses on U,V grids + + CALL repcmo( zuu, zvu, zuv, zvv, zutau, zvtau ) + + ! Apply lateral boundary condition on u,v stresses on the U,V grids + + CALL lbc_lnk( zutau, 'U', -1.0 ) + CALL lbc_lnk( zvtau, 'V', -1.0 ) + +#if defined key_lim2_vp + + ! Convert to I grid for LIM2 for key_lim_vp + DO jj = 2, jpjm1 ! (U,V) ==> I + DO ji = 2, jpim1 ! NO vector opt. + zmsksum = umask(ji-1,jj,1) + umask(ji-1,jj-1,1) + zsutau_ice(ji,jj) = ( umask(ji-1,jj,1) * zutau(ji-1,jj) + & + & umask(ji-1,jj-1,1) * zutau(ji-1,jj-1) ) + IF ( zmsksum > 0.0 ) THEN + zsutau_ice(ji,jj) = zsutau_ice(ji,jj) / zmsksum + ENDIF + zmsksum = vmask(ji,jj-1,1) + vmask(ji-1,jj-1,1) + zsvtau_ice(ji,jj) = ( vmask(ji,jj-1,1) * zvtau(ji,jj-1) + & + & vmask(ji-1,jj-1,1) * zvtau(ji-1,jj-1) ) + IF ( zmsksum > 0.0 ) THEN + zsvtau_ice(ji,jj) = zsvtau_ice(ji,jj) / zmsksum + ENDIF + END DO + END DO + +#else + + zsutau_ice(:,:) = zutau(:,:) + zsvtau_ice(:,:) = zvtau(:,:) + +#endif + + CALL lbc_lnk( zsutau_ice, 'I', -1.0 ) + CALL lbc_lnk( zsvtau_ice, 'I', -1.0 ) + + ! Optionally write files write the data on the ORCA grid via IOM. + + IF (ldebug) THEN + WRITE(cdoutfile,'(A,I8.8)') 'zsutau_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsutau' , zsutau ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsvtau_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsvtau' , zsvtau ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsutau_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsutau_ice' , zsutau_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsvtau_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsvtau_ice' , zsvtau_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsqns_tot_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsqns_tot' , zsqns_tot ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsqns_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsqns_ice' , zsqns_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsqsr_tot_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsqsr_tot' , zsqsr_tot ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsqsr_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsqsr_ice' , zsqsr_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsemp_tot_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsemp_tot' , zsemp_tot ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsemp_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsemp_ice' , zsemp_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsdqdns_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsdqdns_ice' , zsdqdns_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zssprecip_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zssprecip' , zssprecip ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zstprecip_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zstprecip' , zstprecip ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsevap_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsevap_ice' , zsevap_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zstcc_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zstcc' , zstcc ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zslcc_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zslcc' , zslcc ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsatmist_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsatmist' , zsatmist ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsqns_ice_add_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsqns_ice_add' , zsqns_ice_add ) + CALL iom_close( inum ) + ENDIF + + IF(nn_timing == 1) CALL timing_stop('nemogcmcoup_lim2_update') + IF(lhook) CALL dr_hook('nemogcmcoup_lim2_update',1,zhook_handle) + +#else + + WRITE(0,*)'nemogcmcoup_lim2_update not done for FESOM yet' + CALL abort + +#endif + +END SUBROUTINE nemogcmcoup_lim2_update + + diff --git a/nemogcmcoup_mlflds_get.F90 b/nemogcmcoup_mlflds_get.F90 new file mode 100644 index 000000000..f03bae3a4 --- /dev/null +++ b/nemogcmcoup_mlflds_get.F90 @@ -0,0 +1,26 @@ +SUBROUTINE nemogcmcoup_mlflds_get( mype, npes, icomm, & + & nlev, nopoints, pgt3d, pgs3d, pgu3d, pgv3d ) + + ! Interpolate sst, ice: surf T; albedo; concentration; thickness, + ! snow thickness and currents from the ORCA grid to the Gaussian grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + USE par_kind + IMPLICIT NONE + + ! Arguments + REAL(wp), DIMENSION(nopoints,nlev) :: pgt3d, pgs3d, pgu3d, pgv3d + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + ! Number Gaussian grid points + INTEGER, INTENT(IN) :: nopoints,nlev + + ! Local variables + + WRITE(0,*)'nemogcmcoup_mlflds_get should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_mlflds_get + diff --git a/nemogcmcoup_mlinit.F90 b/nemogcmcoup_mlinit.F90 new file mode 100644 index 000000000..f44b27f06 --- /dev/null +++ b/nemogcmcoup_mlinit.F90 @@ -0,0 +1,26 @@ +SUBROUTINE nemogcmcoup_mlinit( mype, npes, icomm, & + & nlev, nopoints, pdep, pmask ) + + ! Get information about the vertical discretization of the ocean model + + ! nlevs are maximum levels on input and actual number levels on output + + USE par_kind + + IMPLICIT NONE + + ! Input arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Grid information + INTEGER, INTENT(INOUT) :: nlev, nopoints + REAL(wp), INTENT(OUT), DIMENSION(nlev) :: pdep + REAL(wp), INTENT(OUT), DIMENSION(nopoints,nlev) :: pmask + + ! Local variables + + WRITE(0,*)'nemogcmcoup_mlinit should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_mlinit diff --git a/nemogcmcoup_step.F90 b/nemogcmcoup_step.F90 new file mode 100644 index 000000000..a88cf9867 --- /dev/null +++ b/nemogcmcoup_step.F90 @@ -0,0 +1,33 @@ +SUBROUTINE nemogcmcoup_step( istp, icdate, ictime ) + + IMPLICIT NONE + + ! Arguments + + ! Time step + INTEGER, INTENT(IN) :: istp + + ! Data and time from NEMO + INTEGER, INTENT(OUT) :: icdate, ictime + + ! Local variables + + ! Advance the FESOM model 1 time step + + WRITE(0,*)'Insert FESOM step here.' + + ! Compute date and time at the end of the time step. + +#ifdef FESOM_TODO + iye = ndastp / 10000 + imo = ndastp / 100 - iye * 100 + ida = MOD( ndastp, 100 ) + CALL greg2jul( 0, 0, 0, ida, imo, iye, zjul ) + zjul = zjul + ( nsec_day + 0.5_wp * rdttra(1) ) / 86400.0_wp + CALL jul2greg( iss, imm, ihh, ida, imo, iye, zjul ) + icdate = iye * 10000 + imo * 100 + ida + ictime = ihh * 10000 + imm * 100 + iss +#endif + +END SUBROUTINE nemogcmcoup_step + diff --git a/nemogcmcoup_update.F90 b/nemogcmcoup_update.F90 new file mode 100644 index 000000000..d712c2098 --- /dev/null +++ b/nemogcmcoup_update.F90 @@ -0,0 +1,32 @@ +SUBROUTINE nemogcmcoup_update( mype, npes, icomm, & + & npoints, pgutau, pgvtau, & + & pgqsr, pgqns, pgemp, kt, ldebug ) + + ! Update fluxes in nemogcmcoup_data by parallel + ! interpolation of the input gaussian grid data + + USE par_kind + + IMPLICIT NONE + + ! Arguments + + ! MPI communications + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Fluxes on the Gaussian grid. + INTEGER, INTENT(IN) :: npoints + REAL(wp), DIMENSION(npoints), intent(IN) :: & + & pgutau, pgvtau, pgqsr, pgqns, pgemp + ! Current time step + INTEGER, INTENT(in) :: kt + ! Write debugging fields in netCDF + LOGICAL, INTENT(IN) :: ldebug + + ! Local variables + + WRITE(0,*)'nemogcmcoup_update should be called with with.' + CALL abort + +END SUBROUTINE nemogcmcoup_update + + diff --git a/nemogcmcoup_update_add.F90 b/nemogcmcoup_update_add.F90 new file mode 100644 index 000000000..c6813bf02 --- /dev/null +++ b/nemogcmcoup_update_add.F90 @@ -0,0 +1,32 @@ +SUBROUTINE nemogcmcoup_update_add( mype, npes, icomm, & + & npoints, pgsst, pgtsk, kt, ldebug ) + + ! Update addetiona in nemogcmcoup_data by parallel + ! interpolation of the input gaussian grid data + + USE par_kind + + IMPLICIT NONE + + ! Arguments + + ! MPI communications + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Input on the Gaussian grid. + INTEGER, INTENT(IN) :: npoints + REAL(wp), DIMENSION(npoints), intent(IN) :: & + & pgsst, pgtsk + ! Current time step + INTEGER, INTENT(in) :: kt + ! Write debugging fields in netCDF + LOGICAL, INTENT(IN) :: ldebug + + ! Local variables + + WRITE(0,*)'nemogcmcoup_update_add should not be called when coupling to fesom.' + CALL abort + + +END SUBROUTINE nemogcmcoup_update_add + + diff --git a/nemogcmcoup_wam_coupinit.F90 b/nemogcmcoup_wam_coupinit.F90 new file mode 100644 index 000000000..428cbbe06 --- /dev/null +++ b/nemogcmcoup_wam_coupinit.F90 @@ -0,0 +1,25 @@ +SUBROUTINE nemogcmcoup_wam_coupinit( mype, npes, icomm, & + & nlocpoints, nglopoints, & + & nlocmsk, ngloind, iunit ) + + ! Initialize single executable coupling between WAM and NEMO + ! This is called from WAM. + + IMPLICIT NONE + + ! Input arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype,npes,icomm + ! WAM grid information + ! Number of local and global points + INTEGER, INTENT(IN) :: nlocpoints, nglopoints + ! Integer mask and global indices + INTEGER, DIMENSION(nlocpoints), INTENT(IN) :: nlocmsk, ngloind + ! Unit for output in parinter_init + INTEGER :: iunit + + WRITE(0,*)'Wam couplind not implemented for FESOM' + CALL abort + +END SUBROUTINE nemogcmcoup_wam_coupinit diff --git a/nemogcmcoup_wam_get.F90 b/nemogcmcoup_wam_get.F90 new file mode 100644 index 000000000..2975cfd6f --- /dev/null +++ b/nemogcmcoup_wam_get.F90 @@ -0,0 +1,30 @@ +SUBROUTINE nemogcmcoup_wam_get( mype, npes, icomm, & + & nopoints, pwsst, pwicecov, pwicethk, & + & pwucur, pwvcur, licethk ) + + ! Interpolate from the ORCA grid + ! to the WAM grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + USE par_kind + IMPLICIT NONE + + ! Arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + ! Number WAM grid points + INTEGER, INTENT(IN) :: nopoints + ! Local arrays of sst, ice cover, ice thickness and currents + REAL(wp), DIMENSION(nopoints) :: pwsst, pwicecov, pwicethk, pwucur, pwvcur + LOGICAL :: licethk + + ! Local variables + + WRITE(0,*)'nemogcmcoup_wam_get should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_wam_get + diff --git a/nemogcmcoup_wam_update.F90 b/nemogcmcoup_wam_update.F90 new file mode 100644 index 000000000..abadc4855 --- /dev/null +++ b/nemogcmcoup_wam_update.F90 @@ -0,0 +1,34 @@ +SUBROUTINE nemogcmcoup_wam_update( mype, npes, icomm, & + & npoints, pwswh, pwmwp, & + & pwphioc, pwtauoc, pwstrn, & + & pwustokes, pwvstokes, & + & cdtpro, ldebug ) + + ! Update fluxes in nemogcmcoup_data by parallel + ! interpolation of the input WAM grid data + + USE par_kind + + IMPLICIT NONE + + ! Arguments + + ! MPI communications + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Data on the WAM grid. + INTEGER, INTENT(IN) :: npoints + REAL(wp), DIMENSION(npoints), INTENT(IN) :: & + & pwswh, pwmwp, pwphioc, pwtauoc, pwstrn, pwustokes, pwvstokes + ! Current time + CHARACTER(len=14), INTENT(IN) :: cdtpro + ! Write debugging fields in netCDF + LOGICAL, INTENT(IN) :: ldebug + + ! Local variables + + WRITE(0,*)'nemogcmcoup_wam_update should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_wam_update + + diff --git a/nemogcmcoup_wam_update_stress.F90 b/nemogcmcoup_wam_update_stress.F90 new file mode 100644 index 000000000..5777d46c2 --- /dev/null +++ b/nemogcmcoup_wam_update_stress.F90 @@ -0,0 +1,32 @@ +SUBROUTINE nemogcmcoup_wam_update_stress( mype, npes, icomm, npoints, & + & pwutau, pwvtau, pwuv10n, pwphif,& + & cdtpro, ldebug ) + + ! Update stresses in nemogcmcoup_data by parallel + ! interpolation of the input WAM grid data + + USE par_kind + + IMPLICIT NONE + + ! Arguments + + ! MPI communications + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Data on the WAM grid. + INTEGER, INTENT(IN) :: npoints + REAL(wp), DIMENSION(npoints), INTENT(IN) :: & + & pwutau, pwvtau, pwuv10n, pwphif + ! Current time step + CHARACTER(len=14), INTENT(IN) :: cdtpro + ! Write debugging fields in netCDF + LOGICAL, INTENT(IN) :: ldebug + + ! Local variables + + WRITE(0,*)'nemogcmcoup_wam_update_stress should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_wam_update_stress + + diff --git a/par_kind.F90 b/par_kind.F90 new file mode 100644 index 000000000..781434883 --- /dev/null +++ b/par_kind.F90 @@ -0,0 +1,8 @@ +MODULE par_kind + IMPLICIT NONE + INTEGER, PUBLIC, PARAMETER :: & !: Floating point section + sp = SELECTED_REAL_KIND( 6, 37), & !: single precision (real 4) + dp = SELECTED_REAL_KIND(12,307), & !: double precision (real 8) + wp = SELECTED_REAL_KIND(12,307), & !: double precision (real 8) + ik = SELECTED_INT_KIND(6) !: integer precision +END MODULE par_kind diff --git a/parinter.F90 b/parinter.F90 new file mode 100644 index 000000000..1dfc6ca8f --- /dev/null +++ b/parinter.F90 @@ -0,0 +1,762 @@ +#define __MYFILE__ 'parinter.F90' +MODULE parinter + +#if defined key_mpp_mpi + USE mpi +#endif + USE scripremap + USE scrippar + USE nctools + + IMPLICIT NONE + + ! Type to contains interpolation information + ! (like what is in scripremaptype) and message + ! passing information + + TYPE parinterinfo + ! Number of local links + INTEGER :: num_links + ! Destination side + INTEGER, POINTER, DIMENSION(:) :: dst_address + ! Source addresses and work array + INTEGER, POINTER, DIMENSION(:) :: src_address + ! Local remap matrix + REAL(scripdp), POINTER, DIMENSION(:,:) :: remap_matrix + ! Message passing information + ! Array of local addresses for send buffer + ! packing + INTEGER, POINTER, DIMENSION(:) :: send_address + ! Sending bookkeeping + INTEGER :: nsendtot + INTEGER, POINTER, DIMENSION(:) :: nsend,nsdisp + ! Receiving bookkeeping + INTEGER :: nrecvtot + INTEGER, POINTER, DIMENSION(:) :: nrecv,nrdisp + END TYPE parinterinfo + +CONTAINS + + SUBROUTINE parinter_init( mype, nproc, mpi_comm, & + & nsrclocpoints, nsrcglopoints, srcmask, srcgloind, & + & ndstlocpoints, ndstglopoints, dstmask, dstgloind, & + & remap, pinfo, lcommout, commoutprefix, iunit ) + + ! Setup interpolation based on SCRIP format weights in + ! remap and the source/destination grids information. + + ! Procedure: + + ! 1) A global SCRIP remapping file is read on all processors. + ! 2) Find local destination points in the global grid. + ! 3) Find which processor needs source data and setup buffer + ! information for sending data. + ! 4) Construct new src remapping for buffer received + + ! All information is stored in the TYPE(parinterinfo) output + ! data type + + ! Input arguments. + + ! Message passing information + INTEGER, INTENT(IN) :: mype, nproc, mpi_comm + ! Source grid local and global number of grid points + INTEGER, INTENT(IN) :: nsrclocpoints, nsrcglopoints + ! Source integer mask (0/1) for SCRIP compliance + INTEGER, INTENT(IN), DIMENSION(nsrclocpoints) :: srcmask + ! Source global addresses of each local grid point + INTEGER, INTENT(IN), DIMENSION(nsrclocpoints) :: srcgloind + ! Destination grid local and global number of grid points + INTEGER, INTENT(IN) :: ndstlocpoints, ndstglopoints + ! Destination integer mask (0/1) for SCRIP compliance + INTEGER, INTENT(IN), DIMENSION(ndstlocpoints) :: dstmask + ! Destination global addresses of each local grid point + INTEGER, INTENT(IN), DIMENSION(ndstlocpoints) :: dstgloind + ! SCRIP remapping data + TYPE(scripremaptype) :: remap + ! Switch for output communication patterns + LOGICAL :: lcommout + CHARACTER(len=*) :: commoutprefix + ! Unit to use for output + INTEGER :: iunit + + ! Output arguments + + ! Interpolation and message passing information + TYPE(parinterinfo), INTENT(OUT) :: pinfo + + ! Local variable + + ! Variable for glocal <-> local address/pe information + INTEGER, DIMENSION(nsrcglopoints) :: ilsrcmppmap, ilsrclocind + INTEGER, DIMENSION(nsrcglopoints) :: igsrcmppmap, igsrclocind + INTEGER, DIMENSION(ndstglopoints) :: ildstmppmap, ildstlocind + INTEGER, DIMENSION(ndstglopoints) :: igdstmppmap, igdstlocind + INTEGER, DIMENSION(nsrcglopoints) :: isrcpe,isrcpetmp + INTEGER, DIMENSION(nsrcglopoints) :: isrcaddtmp + INTEGER, DIMENSION(0:nproc-1) :: isrcoffset + INTEGER, DIMENSION(nproc) :: isrcno, isrcoff, isrccur + INTEGER, DIMENSION(nproc) :: ircvoff, ircvcur + INTEGER, DIMENSION(:), ALLOCATABLE :: isrctot, ircvtot + + ! Misc variable + INTEGER :: i,n,pe + INTEGER :: istatus + CHARACTER(len=256) :: cdfile + + ! Check that masks are consistent. + + ! Remark: More consistency tests between remapping information + ! and input argument could be code, but for now we settle + ! for checking the masks. + + ! Source grid + + DO i=1,nsrclocpoints + IF (srcmask(i)/=remap%src%grid_imask(srcgloind(i))) THEN + WRITE(iunit,*)'Source imask is inconsistent at ' + WRITE(iunit,*)'global index = ',srcgloind(i) + WRITE(iunit,*)'Source mask = ',srcmask(i) + WRITE(iunit,*)'Remap mask = ',remap%src%grid_imask(srcgloind(i)) + WRITE(iunit,*)'Latitude = ',remap%src%grid_center_lat(srcgloind(i)) + WRITE(iunit,*)'Longitude = ',remap%src%grid_center_lon(srcgloind(i)) + CALL flush(iunit) + CALL abort + ENDIF + ENDDO + + ! Destination grid + + DO i=1,ndstlocpoints + IF (dstmask(i)/=remap%dst%grid_imask(dstgloind(i))) THEN + WRITE(iunit,*)'Destination imask is inconsistent at ' + WRITE(iunit,*)'global index = ',dstgloind(i) + WRITE(iunit,*)'Destin mask = ',dstmask(i) + WRITE(iunit,*)'Remap mask = ',remap%dst%grid_imask(dstgloind(i)) + WRITE(iunit,*)'Latitude = ',remap%dst%grid_center_lat(dstgloind(i)) + WRITE(iunit,*)'Longitude = ',remap%dst%grid_center_lon(dstgloind(i)) + CALL flush(iunit) + CALL abort + ENDIF + ENDDO + + ! Setup global to local and vice versa mappings. + + ilsrcmppmap(:)=-1 + ilsrclocind(:)=0 + ildstmppmap(:)=-1 + ildstlocind(:)=0 + + DO i=1,nsrclocpoints + ilsrcmppmap(srcgloind(i))=mype + ilsrclocind(srcgloind(i))=i + ENDDO + + DO i=1,ndstlocpoints + ildstmppmap(dstgloind(i))=mype + ildstlocind(dstgloind(i))=i + ENDDO + +#if defined key_mpp_mpi + CALL mpi_allreduce(ilsrcmppmap,igsrcmppmap,nsrcglopoints, & + & mpi_integer,mpi_max,mpi_comm,istatus) + CALL mpi_allreduce(ilsrclocind,igsrclocind,nsrcglopoints, & + & mpi_integer,mpi_max,mpi_comm,istatus) + CALL mpi_allreduce(ildstmppmap,igdstmppmap,ndstglopoints, & + & mpi_integer,mpi_max,mpi_comm,istatus) + CALL mpi_allreduce(ildstlocind,igdstlocind,ndstglopoints, & + & mpi_integer,mpi_max,mpi_comm,istatus) +#else + igsrcmppmap(:)=ilsrcmppmap(:) + igsrclocind(:)=ilsrclocind(:) + igdstmppmap(:)=ildstmppmap(:) + igdstlocind(:)=ildstlocind(:) +#endif + + ! Optionally construct an ascii file listing what src and + ! dest points belongs to which task + + ! Since igsrcmppmap and igdstmppmap are global data only do + ! this for mype==0. + + IF (lcommout.AND.(mype==0)) THEN + WRITE(cdfile,'(A,I4.4,A)')commoutprefix//'_srcmppmap_',mype+1,'.dat' + OPEN(9,file=cdfile) + DO i=1,nsrcglopoints + WRITE(9,*)remap%src%grid_center_lat(i),& + & remap%src%grid_center_lon(i), & + & igsrcmppmap(i)+1,remap%src%grid_imask(i) + ENDDO + CLOSE(9) + WRITE(cdfile,'(A,I4.4,A)')commoutprefix//'_dstmppmap_',mype+1,'.dat' + OPEN(9,file=cdfile) + DO i=1,ndstglopoints + WRITE(9,*)remap%dst%grid_center_lat(i),& + & remap%dst%grid_center_lon(i), & + & igdstmppmap(i)+1,remap%dst%grid_imask(i) + ENDDO + CLOSE(9) + ENDIF + + ! + ! Standard interpolation in serial case is + ! + ! DO n=1,remap%num_links + ! zdst(remap%dst_address(n)) = zdst(remap%dst_address(n)) + & + ! & remap%remap_matrix(1,n)*zsrc(remap%src_address(n)) + ! END DO + ! + + ! In parallel we need to first find local number of links + + pinfo%num_links=0 + DO i=1,remap%num_links + IF (igdstmppmap(remap%dst_address(i))==mype) & + & pinfo%num_links=pinfo%num_links+1 + ENDDO + ALLOCATE(pinfo%dst_address(pinfo%num_links),& + & pinfo%src_address(pinfo%num_links),& + & pinfo%remap_matrix(1,pinfo%num_links)) + + ! Get local destination addresses + + n=0 + DO i=1,remap%num_links + IF (igdstmppmap(remap%dst_address(i))==mype) THEN + n=n+1 + pinfo%dst_address(n)=& + & igdstlocind(remap%dst_address(i)) + pinfo%remap_matrix(:,n)=& + & remap%remap_matrix(:,i) + ENDIF + ENDDO + + ! Get sending processors maps. + + ! The same data point might need to be sent to many processors + ! so first construct a map for processors needing the data + + isrcpe(:)=-1 + DO i=1,remap%num_links + IF (igdstmppmap(remap%dst_address(i))==mype) THEN + isrcpe(remap%src_address(i))=& + & igsrcmppmap(remap%src_address(i)) + ENDIF + ENDDO + + ! Optionally write a set if ascii file listing which tasks + ! mype needs to send to communicate with + + IF (lcommout) THEN + ! Destination processors + WRITE(cdfile,'(A,I4.4,A)')commoutprefix//'_dsts_',mype+1,'.dat' + OPEN(9,file=cdfile) + DO pe=0,nproc-1 + IF (pe==mype) THEN + isrcpetmp(:)=isrcpe(:) + ENDIF +#if defined key_mpp_mpi + CALL mpi_bcast(isrcpetmp,nsrcglopoints,mpi_integer,pe,mpi_comm,istatus) +#endif + DO i=1,nsrcglopoints + IF (isrcpetmp(i)==mype) THEN + WRITE(9,*)remap%src%grid_center_lat(i),& + & remap%src%grid_center_lon(i), & + & pe+1,mype+1 + ENDIF + ENDDO + ENDDO + CLOSE(9) + ENDIF + + ! Get number of points to send to each processor + + ALLOCATE(pinfo%nsend(0:nproc-1)) + isrcno(:)=0 + DO i=1,nsrcglopoints + IF (isrcpe(i)>=0) THEN + isrcno(isrcpe(i)+1)=isrcno(isrcpe(i)+1)+1 + ENDIF + ENDDO +#if defined key_mpp_mpi + CALL mpi_alltoall(isrcno,1,mpi_integer, & + & pinfo%nsend(0:nproc-1),1,mpi_integer, & + & mpi_comm,istatus) +#else + pinfo%nsend(0:nproc-1) = isrcno(1:nproc) +#endif + pinfo%nsendtot=SUM(pinfo%nsend(0:nproc-1)) + + ! Construct sending buffer mapping. Data is mapping in + ! processor order. + + ALLOCATE(pinfo%send_address(pinfo%nsendtot)) + + ! Temporary arrays for mpi all to all. + + ALLOCATE(isrctot(SUM(isrcno(1:nproc)))) + ALLOCATE(ircvtot(SUM(pinfo%nsend(0:nproc-1)))) + + ! Offset for message parsing + + isrcoff(1)=0 + ircvoff(1)=0 + DO i=1,nproc-1 + isrcoff(i+1) = isrcoff(i) + isrcno(i) + ircvoff(i+1) = pinfo%nsend(i-1) + ircvoff(i) + ENDDO + + ! Pack indices i into a buffer + + isrccur(:)=0 + DO i=1,nsrcglopoints + IF (isrcpe(i)>=0) THEN + isrccur(isrcpe(i)+1)=isrccur(isrcpe(i)+1)+1 + isrctot(isrccur(isrcpe(i)+1)+isrcoff(isrcpe(i)+1)) = i + ENDIF + ENDDO + + ! Send the data + +#if defined key_mpp_mpi + CALL mpi_alltoallv(& + & isrctot,isrccur,isrcoff,mpi_integer, & + & ircvtot,pinfo%nsend(0:nproc-1),ircvoff,mpi_integer, & + & mpi_comm,istatus) +#else + ircvtot(:)=isrctot(:) +#endif + + ! Get the send address. ircvtot will at this point contain the + ! addresses in the global index needed for message passing + + DO i=1,pinfo%nsendtot + pinfo%send_address(i)=igsrclocind(ircvtot(i)) + ENDDO + + ! Deallocate the mpi all to all arrays + + DEALLOCATE(ircvtot,isrctot) + + ! Get number of points to receive to each processor + + ALLOCATE(pinfo%nrecv(0:nproc-1)) + pinfo%nrecv(0:nproc-1)=0 + DO i=1,nsrcglopoints + IF (isrcpe(i)>=0 .AND. isrcpe(i)=0 .AND. isrcpe(i)0) THEN + CALL nchdlerr(nf90_def_dim(ncid,'num_links',& + & pinfo%num_links,dimnl),& + & __LINE__,__MYFILE__) + ENDIF + + CALL nchdlerr(nf90_def_dim(ncid,'num_wgts',& + & 1,dimnw),& + & __LINE__,__MYFILE__) + + IF (pinfo%nsendtot>0) THEN + CALL nchdlerr(nf90_def_dim(ncid,'nsendtot',& + & pinfo%nsendtot,dimnst),& + & __LINE__,__MYFILE__) + ENDIF + + IF (pinfo%nrecvtot>0) THEN + CALL nchdlerr(nf90_def_dim(ncid,'nrecvtot',& + & pinfo%nrecvtot,dimnrt),& + & __LINE__,__MYFILE__) + ENDIF + + CALL nchdlerr(nf90_def_dim(ncid,'nproc',& + & nproc,dimnpr),& + & __LINE__,__MYFILE__) + + IF (pinfo%num_links>0) THEN + + dims1(1)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'dst_address',& + & nf90_int,dims1,idda),& + & __LINE__,__MYFILE__) + + dims1(1)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'src_address',& + & nf90_int,dims1,idsa),& + & __LINE__,__MYFILE__) + + dims2(1)=dimnw + dims2(2)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'remap_matrix',& + & nf90_double,dims2,idrm),& + & __LINE__,__MYFILE__) + + ENDIF + + dims1(1)=dimnpr + CALL nchdlerr(nf90_def_var(ncid,'nsend',& + & nf90_int,dims1,idns),& + & __LINE__,__MYFILE__) + + IF (pinfo%nsendtot>0) THEN + + dims1(1)=dimnst + CALL nchdlerr(nf90_def_var(ncid,'send_address',& + & nf90_int,dims1,idsaa),& + & __LINE__,__MYFILE__) + + ENDIF + + dims1(1)=dimnpr + CALL nchdlerr(nf90_def_var(ncid,'nrecv',& + & nf90_int,dims1,idnr),& + & __LINE__,__MYFILE__) + + dims1(1)=dimnpr + CALL nchdlerr(nf90_def_var(ncid,'nsdisp',& + & nf90_int,dims1,idnsp),& + & __LINE__,__MYFILE__) + + dims1(1)=dimnpr + CALL nchdlerr(nf90_def_var(ncid,'nrdisp',& + & nf90_int,dims1,idnrp),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_enddef(ncid),__LINE__,__MYFILE__) + + + IF (pinfo%num_links>0) THEN + + CALL nchdlerr(nf90_put_var(ncid,idda,pinfo%dst_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idsa,pinfo%src_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idrm,pinfo%remap_matrix),& + & __LINE__,__MYFILE__) + + ENDIF + + CALL nchdlerr(nf90_put_var(ncid,idns,pinfo%nsend(0:nproc-1)),& + & __LINE__,__MYFILE__) + + IF (pinfo%nsendtot>0) THEN + + CALL nchdlerr(nf90_put_var(ncid,idsaa,pinfo%send_address),& + & __LINE__,__MYFILE__) + + ENDIF + + CALL nchdlerr(nf90_put_var(ncid,idnr,pinfo%nrecv(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idnsp,pinfo%nsdisp(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idnrp,pinfo%nrdisp(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_close(ncid),__LINE__, __MYFILE__ ) + + END SUBROUTINE parinter_write + + SUBROUTINE parinter_read( mype, nproc, & + & nsrcglopoints, ndstglopoints, & + & pinfo, cdpath, cdprefix, lexists ) + + ! Write pinfo information in a netCDF file in order to + ! be able to read it rather than calling parinter_init + + ! Input arguments. + + ! Message passing information + INTEGER, INTENT(IN) :: mype, nproc + ! Source grid local global number of grid points + INTEGER, INTENT(IN) :: nsrcglopoints + ! Destination grid global number of grid points + INTEGER, INTENT(IN) :: ndstglopoints + ! Interpolation and message passing information + TYPE(parinterinfo), INTENT(OUT) :: pinfo + ! Does the information exists + LOGICAL :: lexists + ! Path and file prefix + CHARACTER(len=*) :: cdpath, cdprefix + + ! Local variable + + ! Misc variable + CHARACTER(len=1024) :: cdfile + INTEGER :: ncid, dimid, varid, num_wgts + + WRITE(cdfile,'(A,2(I8.8,A),2(I4.4,A),A)') & + & TRIM(cdpath)//'/'//TRIM(cdprefix)//'_', & + & nsrcglopoints,'_',ndstglopoints,'_',mype,'_',nproc,'.nc' + + + lexists=nf90_open(TRIM(cdfile),nf90_nowrite,ncid)==nf90_noerr + + IF (lexists) THEN + + ! If num_links is not present we assume it to be zero. + + IF (nf90_inq_dimid(ncid,'num_links',dimid)==nf90_noerr) THEN + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=pinfo%num_links),& + & __LINE__,__MYFILE__) + ELSE + pinfo%num_links=0 + ENDIF + + CALL nchdlerr(nf90_inq_dimid(ncid,'num_wgts',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=num_wgts),& + & __LINE__,__MYFILE__) + IF (num_wgts/=1) THEN + WRITE(0,*)'parinter_read: num_wgts has to be 1 for now' + CALL abort + ENDIF + + ! If nsendtot is not present we assume it to be zero. + + IF (nf90_inq_dimid(ncid,'nsendtot',dimid)==nf90_noerr) THEN + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=pinfo%nsendtot),& + & __LINE__,__MYFILE__) + ELSE + pinfo%nsendtot=0 + ENDIF + + IF(nf90_inq_dimid(ncid,'nrecvtot',dimid)==nf90_noerr) THEN + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=pinfo%nrecvtot),& + & __LINE__,__MYFILE__) + ELSE + pinfo%nrecvtot=0 + ENDIF + + ALLOCATE(pinfo%dst_address(pinfo%num_links),& + & pinfo%src_address(pinfo%num_links),& + & pinfo%remap_matrix(num_wgts,pinfo%num_links),& + & pinfo%nsend(0:nproc-1),& + & pinfo%send_address(pinfo%nsendtot),& + & pinfo%nrecv(0:nproc-1),& + & pinfo%nsdisp(0:nproc-1),& + & pinfo%nrdisp(0:nproc-1)) + + IF (pinfo%num_links>0) THEN + CALL nchdlerr(nf90_inq_varid(ncid,'dst_address',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%dst_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_address',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%src_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'remap_matrix',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%remap_matrix),& + & __LINE__,__MYFILE__) + ENDIF + + CALL nchdlerr(nf90_inq_varid(ncid,'nsend',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nsend(0:nproc-1)),& + & __LINE__,__MYFILE__) + + IF (pinfo%nsendtot>0) THEN + + CALL nchdlerr(nf90_inq_varid(ncid,'send_address',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%send_address),& + & __LINE__,__MYFILE__) + + ENDIF + + CALL nchdlerr(nf90_inq_varid(ncid,'nrecv',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nrecv(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'nsdisp',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nsdisp(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'nrdisp',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nrdisp(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_close(ncid),__LINE__, __MYFILE__ ) + + ENDIF + + END SUBROUTINE parinter_read + +END MODULE parinter diff --git a/scripgrid.F90 b/scripgrid.F90 new file mode 100644 index 000000000..e2f74d368 --- /dev/null +++ b/scripgrid.F90 @@ -0,0 +1,278 @@ +#define __MYFILE__ 'scripgrid.F90' +MODULE scripgrid + + USE nctools + USE scrippar + + IMPLICIT NONE + + TYPE scripgridtype + INTEGER :: grid_size + INTEGER :: grid_corners + INTEGER :: grid_rank + INTEGER, ALLOCATABLE, DIMENSION(:) :: grid_dims + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: grid_center_lat + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: grid_center_lon + INTEGER, ALLOCATABLE, DIMENSION(:) :: grid_imask + REAL(scripdp), ALLOCATABLE, DIMENSION(:,:) :: grid_corner_lat + REAL(scripdp), ALLOCATABLE, DIMENSION(:,:) :: grid_corner_lon + CHARACTER(len=scriplen) :: grid_center_lat_units + CHARACTER(len=scriplen) :: grid_center_lon_units + CHARACTER(len=scriplen) :: grid_imask_units + CHARACTER(len=scriplen) :: grid_corner_lat_units + CHARACTER(len=scriplen) :: grid_corner_lon_units + CHARACTER(len=scriplen) :: title + END TYPE scripgridtype + +CONTAINS + + SUBROUTINE scripgrid_read( cdfilename, grid ) + + CHARACTER(len=*) :: cdfilename + TYPE(scripgridtype) :: grid + + INTEGER :: ncid, dimid, varid + + CALL scripgrid_init(grid) + + CALL nchdlerr(nf90_open(TRIM(cdfilename),nf90_nowrite,ncid),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'grid_size',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=grid%grid_size),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'grid_corners',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=grid%grid_corners),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'grid_rank',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=grid%grid_rank),& + & __LINE__,__MYFILE__) + + CALL scripgrid_alloc(grid) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_dims',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_center_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_center_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_center_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_center_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_center_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_corner_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_corner_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_corner_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_corner_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_imask',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_imask_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_imask),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'title',grid%title),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_close(ncid),__LINE__,__MYFILE__) + + END SUBROUTINE scripgrid_read + + SUBROUTINE scripgrid_write( cdgridfile, grid ) + + CHARACTER(len=*) :: cdgridfile + TYPE(scripgridtype) :: grid + + INTEGER :: ncid + INTEGER :: ioldfill + INTEGER :: idimsize,idimxsize,idimysize,idimcorners,idimrank + INTEGER :: idims1rank(1),idims1size(1),idims2(2) + INTEGER :: iddims,idcentlat,idcentlon,idimask,idcornlat,idcornlon + INTEGER :: igriddims(2) + + ! Setup netcdf file + + CALL nchdlerr(nf90_create(TRIM(cdgridfile),nf90_clobber,ncid),& + & __LINE__,__MYFILE__) + + ! Define dimensions + + CALL nchdlerr(nf90_def_dim(ncid,'grid_size',& + & grid%grid_size,idimsize),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_def_dim(ncid,'grid_corners',& + & grid%grid_corners,idimcorners),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_def_dim(ncid,'grid_rank',& + & grid%grid_rank,idimrank),& + & __LINE__,__MYFILE__) + + idims1rank(1) = idimrank + + idims1size(1) = idimsize + + idims2(1) = idimcorners + idims2(2) = idimsize + + ! Define variables + + CALL nchdlerr(nf90_def_var(ncid,'grid_dims',& + & nf90_int,idims1rank,iddims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_var(ncid,'grid_center_lat',& + & nf90_double,idims1size,idcentlat),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idcentlat,'units',& + & grid%grid_center_lat_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_var(ncid,'grid_center_lon',& + & nf90_double,idims1size,idcentlon),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idcentlon,'units',& + & grid%grid_center_lon_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_var(ncid,'grid_imask',& + & nf90_int,idims1size,idimask),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idimask,'units',& + & grid%grid_imask_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_var(ncid,'grid_corner_lat',& + & nf90_double,idims2,idcornlat),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idcornlat,'units',& + & grid%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_var(ncid,'grid_corner_lon',& + & nf90_double,idims2,idcornlon),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idcornlon,'units',& + & grid%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'title',& + & TRIM(grid%title)),& + & __LINE__,__MYFILE__) + + ! End of netCDF definition phase + + CALL nchdlerr(nf90_enddef(ncid),__LINE__,__MYFILE__) + + ! Write variables + + + CALL nchdlerr(nf90_put_var(ncid,iddims,grid%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idcentlat,& + & grid%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idcentlon,& + & grid%grid_center_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idimask,& + & grid%grid_imask), & + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idcornlat,& + & grid%grid_corner_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idcornlon,& + & grid%grid_corner_lon),& + & __LINE__,__MYFILE__) + + ! Close file + + CALL nchdlerr(nf90_close(ncid),__LINE__,__MYFILE__) + + END SUBROUTINE scripgrid_write + + SUBROUTINE scripgrid_init( grid ) + + TYPE(scripgridtype) :: grid + + grid%grid_size=0 + grid%grid_corners=0 + grid%grid_rank=0 + grid%grid_center_lat_units='' + grid%grid_center_lon_units='' + grid%grid_imask_units='' + grid%grid_corner_lat_units='' + grid%grid_corner_lon_units='' + grid%title='' + + END SUBROUTINE scripgrid_init + + SUBROUTINE scripgrid_alloc( grid ) + + TYPE(scripgridtype) :: grid + + IF ( (grid%grid_size == 0) .OR. & + & (grid%grid_corners == 0) .OR. & + & (grid%grid_rank == 0) ) THEN + WRITE(*,*)'scripgridtype not initialized' + CALL abort + ENDIF + + ALLOCATE( & + & grid%grid_dims(grid%grid_rank), & + & grid%grid_center_lat(grid%grid_size), & + & grid%grid_center_lon(grid%grid_size), & + & grid%grid_corner_lat(grid%grid_corners, grid%grid_size), & + & grid%grid_corner_lon(grid%grid_corners, grid%grid_size), & + & grid%grid_imask(grid%grid_size) & + & ) + + END SUBROUTINE scripgrid_alloc + + SUBROUTINE scripgrid_dealloc( grid ) + + TYPE(scripgridtype) :: grid + + DEALLOCATE( & + & grid%grid_dims, & + & grid%grid_center_lat, & + & grid%grid_center_lon, & + & grid%grid_corner_lat, & + & grid%grid_corner_lon, & + & grid%grid_imask & + & ) + + END SUBROUTINE scripgrid_dealloc + +END MODULE scripgrid diff --git a/scrippar.F90 b/scrippar.F90 new file mode 100644 index 000000000..41e034979 --- /dev/null +++ b/scrippar.F90 @@ -0,0 +1,5 @@ +MODULE scrippar + INTEGER, PARAMETER :: scripdp = SELECTED_REAL_KIND(12,307) + INTEGER, PARAMETER :: scriplen = 80 +END MODULE scrippar + diff --git a/scripremap.F90 b/scripremap.F90 new file mode 100644 index 000000000..1e2f23c27 --- /dev/null +++ b/scripremap.F90 @@ -0,0 +1,734 @@ +#define __MYFILE__ 'scripremap.F90' +MODULE scripremap + +#if defined key_mpp_mpi + USE mpi +#endif + USE nctools + USE scrippar + USE scripgrid + + IMPLICIT NONE + + TYPE scripremaptype + INTEGER :: num_links + INTEGER :: num_wgts + TYPE(scripgridtype) :: src + TYPE(scripgridtype) :: dst + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: src_grid_area + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: dst_grid_area + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: src_grid_frac + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: dst_grid_frac + INTEGER, ALLOCATABLE, DIMENSION(:) :: src_address + INTEGER, ALLOCATABLE, DIMENSION(:) :: dst_address + REAL(scripdp), ALLOCATABLE, DIMENSION(:,:) :: remap_matrix + CHARACTER(len=scriplen) :: src_grid_area_units + CHARACTER(len=scriplen) :: dst_grid_area_units + CHARACTER(len=scriplen) :: src_grid_frac_units + CHARACTER(len=scriplen) :: dst_grid_frac_units + CHARACTER(len=scriplen) :: title + CHARACTER(len=scriplen) :: normalization + CHARACTER(len=scriplen) :: map_method + CHARACTER(len=scriplen) :: history + CHARACTER(len=scriplen) :: conventions + END TYPE scripremaptype + +CONTAINS + + SUBROUTINE scripremap_read_work(cdfilename,remap) + + CHARACTER(len=*) :: cdfilename + TYPE(scripremaptype) :: remap + + INTEGER :: ncid, dimid, varid + LOGICAL :: lcorners + + lcorners=.TRUE. + + CALL scripremap_init(remap) + + CALL nchdlerr(nf90_open(TRIM(cdfilename),nf90_nowrite,ncid),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'src_grid_size',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%src%grid_size),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'dst_grid_size',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%dst%grid_size),& + & __LINE__,__MYFILE__) + + + IF (nf90_inq_dimid(ncid,'src_grid_corners',dimid)==nf90_noerr) THEN + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%src%grid_corners),& + & __LINE__,__MYFILE__) + ELSE + lcorners=.FALSE. + remap%src%grid_corners=1 + ENDIF + + IF (lcorners) THEN + CALL nchdlerr(nf90_inq_dimid(ncid,'dst_grid_corners',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%dst%grid_corners),& + & __LINE__,__MYFILE__) + ELSE + remap%dst%grid_corners=1 + ENDIF + + CALL nchdlerr(nf90_inq_dimid(ncid,'src_grid_rank',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%src%grid_rank),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'dst_grid_rank',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%dst%grid_rank),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'num_links',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%num_links),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'num_wgts',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%num_wgts),& + & __LINE__,__MYFILE__) + + CALL scripremap_alloc(remap) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_dims',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_dims',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_center_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_center_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_center_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_center_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_center_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_center_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_center_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_center_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_center_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_center_lon),& + & __LINE__,__MYFILE__) + + IF (lcorners) THEN + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_corner_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_corner_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_corner_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_corner_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_corner_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_corner_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_corner_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_corner_lon),& + & __LINE__,__MYFILE__) + + ELSE + + remap%src%grid_corner_lat(:,:) = 0.0 + remap%src%grid_corner_lon(:,:) = 0.0 + remap%dst%grid_corner_lat(:,:) = 0.0 + remap%dst%grid_corner_lon(:,:) = 0.0 + remap%src%grid_corner_lat_units = '' + remap%src%grid_corner_lon_units = '' + remap%dst%grid_corner_lat_units = '' + remap%dst%grid_corner_lon_units = '' + + ENDIF + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_imask',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_imask_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_imask),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_imask',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_imask_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_imask),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_area',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src_grid_area_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src_grid_area),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_area',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst_grid_area_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst_grid_area),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_frac',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src_grid_frac_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src_grid_frac),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_frac',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst_grid_frac_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst_grid_frac),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_address',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_address',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'remap_matrix',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%remap_matrix),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'title',remap%title),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'normalization',remap%normalization),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'map_method',remap%map_method),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'history',remap%history),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'conventions',remap%conventions),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'dest_grid',remap%dst%title),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'source_grid',remap%src%title),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_close(ncid),__LINE__,__MYFILE__) + + END SUBROUTINE scripremap_read_work + + SUBROUTINE scripremap_read(cdfilename,remap) + + CHARACTER(len=*) :: cdfilename + TYPE(scripremaptype) :: remap + + CALL scripremap_read_work(cdfilename,remap) + + END SUBROUTINE scripremap_read + + + SUBROUTINE scripremap_read_sgl(cdfilename,remap,& + & mype,nproc,mycomm,linteronly) + + CHARACTER(len=*) :: cdfilename + TYPE(scripremaptype) :: remap + INTEGER :: mype,nproc,mycomm + LOGICAL :: linteronly + + INTEGER, DIMENSION(8) :: isizes + INTEGER :: ierr, ip + + IF (mype==0) THEN + CALL scripremap_read_work(cdfilename,remap) +#if defined key_mpp_mpi + isizes(1)=remap%src%grid_size + isizes(2)=remap%dst%grid_size + isizes(3)=remap%src%grid_corners + isizes(4)=remap%dst%grid_corners + isizes(5)=remap%src%grid_rank + isizes(6)=remap%dst%grid_rank + isizes(7)=remap%num_links + isizes(8)=remap%num_wgts + CALL mpi_bcast( isizes, 8, mpi_integer, 0, mycomm, ierr) + ELSE + CALL mpi_bcast( isizes, 8, mpi_integer, 0, mycomm, ierr) + CALL scripremap_init(remap) + remap%src%grid_size=isizes(1) + remap%dst%grid_size=isizes(2) + remap%src%grid_corners=isizes(3) + remap%dst%grid_corners=isizes(4) + remap%src%grid_rank=isizes(5) + remap%dst%grid_rank=isizes(6) + remap%num_links=isizes(7) + remap%num_wgts=isizes(8) + CALL scripremap_alloc(remap) +#endif + ENDIF + +#if defined key_mpp_mpi + + IF (.NOT.linteronly) THEN + + CALL mpi_bcast( remap%src%grid_dims, remap%src%grid_rank, & + & mpi_integer, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_center_lat, remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_center_lon, remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_corner_lat, remap%src%grid_corners*remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_corner_lon, remap%src%grid_corners*remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + + CALL mpi_bcast( remap%dst%grid_dims, remap%dst%grid_rank, & + & mpi_integer, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_center_lat, remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_center_lon, remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_corner_lat, remap%dst%grid_corners*remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_corner_lon, remap%dst%grid_corners*remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + + CALL mpi_bcast( remap%src_grid_area, remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst_grid_area, remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src_grid_frac, remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst_grid_frac, remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + + CALL mpi_bcast( remap%src%grid_center_lat_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_center_lat_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_center_lon_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_center_lon_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_corner_lat_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_corner_lon_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_corner_lat_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_corner_lon_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_imask_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_imask_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src_grid_area_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst_grid_area_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src_grid_frac_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst_grid_frac_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%title, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%normalization, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%map_method, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%history, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%conventions, scriplen, & + & mpi_character, 0, mycomm, ierr ) + ENDIF + + CALL mpi_bcast( remap%src_address, remap%num_links, & + & mpi_integer, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst_address, remap%num_links, & + & mpi_integer, 0, mycomm, ierr ) + CALL mpi_bcast( remap%remap_matrix, remap%num_wgts*remap%num_links, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_imask, remap%src%grid_size, & + & mpi_integer, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_imask, remap%dst%grid_size, & + & mpi_integer, 0, mycomm, ierr ) + +#endif + END SUBROUTINE scripremap_read_sgl + + SUBROUTINE scripremap_write(cdfilename,remap) + + CHARACTER(len=*) :: cdfilename + TYPE(scripremaptype) :: remap + + INTEGER :: ncid + INTEGER :: dimsgs,dimdgs,dimsgc,dimdgc,dimsgr,dimdgr,dimnl,dimnw + INTEGER :: dims1(1),dims2(2) + INTEGER :: idsgd,iddgd,idsgea,iddgea,idsgeo,iddgeo + INTEGER :: idsgoa,idsgoo,iddgoa,iddgoo,idsgim,iddgim,idsgar,iddgar + INTEGER :: idsgf,iddgf,idsga,iddga,idsa,idda,idrm + + CALL nchdlerr(nf90_create(TRIM(cdfilename),nf90_clobber,ncid), & + & __LINE__, __MYFILE__ ) + + CALL nchdlerr(nf90_def_dim(ncid,'src_grid_size',& + & remap%src%grid_size,dimsgs),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'dst_grid_size',& + & remap%dst%grid_size,dimdgs),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'src_grid_corners',& + & remap%src%grid_corners,dimsgc),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'dst_grid_corners',& + & remap%dst%grid_corners,dimdgc),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'src_grid_rank',& + & remap%src%grid_rank,dimsgr),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'dst_grid_rank',& + & remap%dst%grid_rank,dimdgr),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'num_links',& + & remap%num_links,dimnl),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'num_wgts',& + & remap%num_wgts,dimnw),& + & __LINE__,__MYFILE__) + + dims1(1)=dimsgr + CALL nchdlerr(nf90_def_var(ncid,'src_grid_dims',& + & nf90_int,dims1,idsgd),& + & __LINE__,__MYFILE__) + + dims1(1)=dimdgr + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_dims',& + & nf90_int,dims1,iddgd), & + & __LINE__,__MYFILE__) + + dims1(1)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_center_lat',& + & nf90_double,dims1,idsgea), & + & __LINE__,__MYFILE__) + + dims1(1)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_center_lat',& + & nf90_double,dims1,iddgea), & + & __LINE__,__MYFILE__) + + dims1(1)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_center_lon',& + & nf90_double,dims1,idsgeo), & + & __LINE__,__MYFILE__) + + dims1(1)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_center_lon',& + & nf90_double,dims1,iddgeo), & + & __LINE__,__MYFILE__) + + dims2(1)=dimsgc + dims2(2)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_corner_lat',& + & nf90_double,dims2,idsgoa), & + & __LINE__,__MYFILE__) + + dims2(1)=dimsgc + dims2(2)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_corner_lon',& + & nf90_double,dims2,idsgoo), & + & __LINE__,__MYFILE__) + + dims2(1)=dimdgc + dims2(2)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_corner_lat',& + & nf90_double,dims2,iddgoa), & + & __LINE__,__MYFILE__) + + dims2(1)=dimdgc + dims2(2)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_corner_lon',& + & nf90_double,dims2,iddgoo), & + & __LINE__,__MYFILE__) + + dims1(1)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_imask',& + & nf90_int,dims1,idsgim), & + & __LINE__,__MYFILE__) + + dims1(1)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_imask',& + & nf90_int,dims1,iddgim), & + & __LINE__,__MYFILE__) + + dims1(1)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_area',& + & nf90_double,dims1,idsga), & + & __LINE__,__MYFILE__) + + dims1(1)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_area',& + & nf90_double,dims1,iddga), & + & __LINE__,__MYFILE__) + + dims1(1)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_frac',& + & nf90_double,dims1,idsgf), & + & __LINE__,__MYFILE__) + + dims1(1)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_frac',& + & nf90_double,dims1,iddgf), & + & __LINE__,__MYFILE__) + + dims1(1)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'src_address',& + & nf90_int,dims1,idsa), & + & __LINE__,__MYFILE__) + + dims1(1)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'dst_address',& + & nf90_int,dims1,idda), & + & __LINE__,__MYFILE__) + + dims2(1)=dimnw + dims2(2)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'remap_matrix',& + & nf90_double,dims2,idrm), & + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_att(ncid,idsgea,'units',& + & remap%src%grid_center_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgea,'units',& + & remap%dst%grid_center_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsgeo,'units',& + & remap%src%grid_center_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgeo,'units',& + & remap%dst%grid_center_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsgoa,'units',& + & remap%src%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsgoo,'units',& + & remap%src%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgoa,'units',& + & remap%dst%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgoo,'units',& + & remap%dst%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsgim,'units',& + & remap%src%grid_imask_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgim,'units',& + & remap%dst%grid_imask_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsga,'units',& + & remap%src_grid_area_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddga,'units',& + & remap%dst_grid_area_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsgf,'units',& + & remap%src_grid_frac_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgf,'units',& + & remap%dst_grid_frac_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'title',& + & remap%title),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'normalization',& + & remap%normalization),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'map_method',& + & remap%map_method),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'history',& + & remap%history),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'conventions',& + & remap%conventions),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'dest_grid',& + & remap%dst%title),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'source_grid',& + & remap%src%title),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_enddef(ncid),__LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idsgd,remap%src%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,iddgd,remap%dst%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idsgea,remap%src%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,iddgea,remap%dst%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idsgeo,remap%src%grid_center_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,iddgeo,remap%dst%grid_center_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idsgoa,remap%src%grid_corner_lat),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idsgoo,remap%src%grid_corner_lon),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,iddgoa,remap%dst%grid_corner_lat),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,iddgoo,remap%dst%grid_corner_lon),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idsgim,remap%src%grid_imask),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,iddgim,remap%dst%grid_imask),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idsga,remap%src_grid_area),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,iddga,remap%dst_grid_area),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idsgf,remap%src_grid_frac),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,iddgf,remap%dst_grid_frac),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idsa,remap%src_address),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idda,remap%dst_address),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idrm,remap%remap_matrix),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_close(ncid),__LINE__, __MYFILE__ ) + + END SUBROUTINE scripremap_write + + SUBROUTINE scripremap_init(remap) + + TYPE(scripremaptype) :: remap + + CALL scripgrid_init(remap%src) + CALL scripgrid_init(remap%dst) + remap%num_links = 0 + remap%num_wgts = 0 + remap%title='' + remap%normalization='' + remap%map_method='' + remap%history='' + remap%conventions='' + remap%src_grid_area_units='' + remap%dst_grid_area_units='' + remap%src_grid_frac_units='' + remap%dst_grid_frac_units='' + + END SUBROUTINE scripremap_init + + SUBROUTINE scripremap_alloc(remap) + + TYPE(scripremaptype) :: remap + + IF ( (remap%num_links == 0) .OR. & + & (remap%num_wgts == 0) ) THEN + WRITE(*,*)'scripremaptype not initialized' + CALL abort + ENDIF + + CALL scripgrid_alloc(remap%src) + CALL scripgrid_alloc(remap%dst) + + ALLOCATE( & + & remap%src_grid_area(remap%src%grid_size), & + & remap%dst_grid_area(remap%dst%grid_size), & + & remap%src_grid_frac(remap%src%grid_size), & + & remap%dst_grid_frac(remap%dst%grid_size), & + & remap%src_address(remap%num_links), & + & remap%dst_address(remap%num_links), & + & remap%remap_matrix(remap%num_wgts, remap%num_links) & + & ) + + END SUBROUTINE scripremap_alloc + + SUBROUTINE scripremap_dealloc(remap) + + TYPE(scripremaptype) :: remap + + DEALLOCATE( & + & remap%src_grid_area, & + & remap%dst_grid_area, & + & remap%src_grid_frac, & + & remap%dst_grid_frac, & + & remap%src_address, & + & remap%dst_address, & + & remap%remap_matrix & + & ) + + CALL scripgrid_dealloc(remap%src) + CALL scripgrid_dealloc(remap%dst) + + CALL scripremap_init(remap) + + END SUBROUTINE scripremap_dealloc + +END MODULE scripremap From c13fcf1772642d8df7a2df068d757f2e5d883f2b Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Thu, 7 Jun 2018 16:50:29 +0100 Subject: [PATCH 010/909] Moved nemogcmcoup* subroutines into ifs_interface/notused.F90. Grouped modules into ifs_modules.F90. --- Makefile | 2 +- ...mcoup_lim2_update.F90 => ifs_interface.F90 | 643 ++++++++++++++++++ ifs_modules.F90 | 38 ++ ifs_notused.F90 | 356 ++++++++++ interinfo.F90 | 23 - nemogcmcoup_coupinit.F90 | 224 ------ nemogcmcoup_exflds_get.F90 | 28 - nemogcmcoup_final.F90 | 11 - nemogcmcoup_get.F90 | 30 - nemogcmcoup_get_1way.F90 | 23 - nemogcmcoup_init.F90 | 38 -- nemogcmcoup_init_ioserver.F90 | 12 - nemogcmcoup_init_ioserver_2.F90 | 11 - nemogcmcoup_lim2_get.F90 | 324 --------- nemogcmcoup_mlflds_get.F90 | 26 - nemogcmcoup_mlinit.F90 | 26 - nemogcmcoup_step.F90 | 33 - nemogcmcoup_update.F90 | 32 - nemogcmcoup_update_add.F90 | 32 - nemogcmcoup_wam_coupinit.F90 | 25 - nemogcmcoup_wam_get.F90 | 30 - nemogcmcoup_wam_update.F90 | 34 - nemogcmcoup_wam_update_stress.F90 | 32 - par_kind.F90 | 8 - 24 files changed, 1038 insertions(+), 1003 deletions(-) rename nemogcmcoup_lim2_update.F90 => ifs_interface.F90 (55%) create mode 100644 ifs_modules.F90 create mode 100644 ifs_notused.F90 delete mode 100644 interinfo.F90 delete mode 100644 nemogcmcoup_coupinit.F90 delete mode 100644 nemogcmcoup_exflds_get.F90 delete mode 100644 nemogcmcoup_final.F90 delete mode 100644 nemogcmcoup_get.F90 delete mode 100644 nemogcmcoup_get_1way.F90 delete mode 100644 nemogcmcoup_init.F90 delete mode 100644 nemogcmcoup_init_ioserver.F90 delete mode 100644 nemogcmcoup_init_ioserver_2.F90 delete mode 100644 nemogcmcoup_lim2_get.F90 delete mode 100644 nemogcmcoup_mlflds_get.F90 delete mode 100644 nemogcmcoup_mlinit.F90 delete mode 100644 nemogcmcoup_step.F90 delete mode 100644 nemogcmcoup_update.F90 delete mode 100644 nemogcmcoup_update_add.F90 delete mode 100644 nemogcmcoup_wam_coupinit.F90 delete mode 100644 nemogcmcoup_wam_get.F90 delete mode 100644 nemogcmcoup_wam_update.F90 delete mode 100644 nemogcmcoup_wam_update_stress.F90 delete mode 100644 par_kind.F90 diff --git a/Makefile b/Makefile index bb0c3db0f..b969cf2ae 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,7 @@ LDFLAGS=-g -O3 -fdefault-real-8 -fdefault-double-8 -fcray-pointer -fconvert=swap AR=ar ARFLAGS=-rv -OBJ=scrippar.o scripremap.o scripgrid.o parinter.o interinfo.o nemogcmcoup_mlflds_get.o par_kind.o nemogcmcoup_init_ioserver.o nemogcmcoup_init_ioserver_2.o nemogcmcoup_final.o nemogcmcoup_init.o nemogcmcoup_wam_coupinit.o nctools.o nemogcmcoup_step.o nemogcmcoup_exflds_get.o nemogcmcoup_wam_update.o nemogcmcoup_wam_update_stress.o nemogcmcoup_wam_get.o nemogcmcoup_coupinit.o nemogcmcoup_get_1way.o nemogcmcoup_mlinit.o nemogcmcoup_update_add.o nemogcmcoup_update.o nemogcmcoup_lim2_update.o nemogcmcoup_get.o nemogcmcoup_lim2_get.o +OBJ=scripremap.o scripgrid.o parinter.o nctools.o ifs_modules.o ifs_interface.o ifs_notused.o all: libfesom.a diff --git a/nemogcmcoup_lim2_update.F90 b/ifs_interface.F90 similarity index 55% rename from nemogcmcoup_lim2_update.F90 rename to ifs_interface.F90 index 748f87b7b..5ad07fc11 100644 --- a/nemogcmcoup_lim2_update.F90 +++ b/ifs_interface.F90 @@ -1,3 +1,600 @@ +!===================================================== +! IFS interface for calling FESOM2 as a subroutine. +! +! -Original code for NEMO by Kristian Mogensen, ECMWF. +!----------------------------------------------------- + +SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & + & lwaveonly, iatmunit, lwrite ) + + ! Initialize the NEMO model for single executable coupling + + USE par_kind + + IMPLICIT NONE + + ! Input arguments + + ! Message passing information + INTEGER, INTENT(IN) :: icomm + ! Initial date, time, initial timestep and final time step + INTEGER, INTENT(OUT) :: inidate, initime, itini, itend + ! Length of the time step + REAL(wp), INTENT(OUT) :: zstp + ! Coupling to waves only + LOGICAL, INTENT(IN) :: lwaveonly + ! Logfile unit (used if >=0) + INTEGER :: iatmunit + ! Write to this unit + LOGICAL :: lwrite + + WRITE(0,*)'Insert FESOM init here.' + CALL abort + + ! Set information for the caller + +#ifdef FESOM_TODO + inidate = nn_date0 + initime = nn_time0*3600 + itini = nit000 + itend = nn_itend + zstp = rdttra(1) +#endif + +END SUBROUTINE nemogcmcoup_init + + +SUBROUTINE nemogcmcoup_coupinit( mype, npes, icomm, & + & npoints, nlocmsk, ngloind ) + + ! Initialize single executable coupling + USE parinter + USE scripremap + USE interinfo + IMPLICIT NONE + + ! Input arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Gaussian grid information + ! Number of points + INTEGER, INTENT(IN) :: npoints + ! Integer mask and global indices + INTEGER, DIMENSION(npoints), INTENT(IN) :: nlocmsk, ngloind + INTEGER :: iunit = 0 + + ! Local variables + + ! Namelist containing the file names of the weights + CHARACTER(len=256) :: cdfile_gauss_to_T, cdfile_gauss_to_UV, & + & cdfile_T_to_gauss, cdfile_UV_to_gauss + CHARACTER(len=256) :: cdpathdist + LOGICAL :: lwritedist, lreaddist + LOGICAL :: lcommout + CHARACTER(len=128) :: commoutprefix + NAMELIST/namnemocoup/cdfile_gauss_to_T,& + & cdfile_gauss_to_UV,& + & cdfile_T_to_gauss,& + & cdfile_UV_to_gauss,& + & cdpathdist, & + & lreaddist, & + & lwritedist, & + & lcommout, & + & commoutprefix,& + & lparbcast + + ! Global number of gaussian gridpoints + INTEGER :: nglopoints + ! Ocean grids accessed with NEMO modules + INTEGER :: noglopoints,nopoints + INTEGER, ALLOCATABLE, DIMENSION(:) :: omask,ogloind + ! SCRIP remapping data structures. + TYPE(scripremaptype) :: remap_gauss_to_T, remap_T_to_gauss, & + & remap_gauss_to_UV, remap_UV_to_gauss + ! Misc variables + INTEGER :: i,j,k,ierr + LOGICAL :: lexists + + ! Read namelists + + cdfile_gauss_to_T = 'gausstoT.nc' + cdfile_gauss_to_UV = 'gausstoUV.nc' + cdfile_T_to_gauss = 'Ttogauss.nc' + cdfile_UV_to_gauss = 'UVtogauss.nc' + lcommout = .FALSE. + commoutprefix = 'parinter_comm' + cdpathdist = './' + lreaddist = .FALSE. + lwritedist = .FALSE. + + OPEN(9,file='namnemocoup.in') + READ(9,namnemocoup) + CLOSE(9) + + ! Global number of Gaussian gridpoints + +#if defined key_mpp_mpi + CALL mpi_allreduce( npoints, nglopoints, 1, & + & mpi_integer, mpi_sum, icomm, ierr) +#else + nglopoints=npoints +#endif + + WRITE(0,*)'Update FESOM global scalar points' + noglopoints=126858 + IF (mype==0) THEN + nopoints=126858 + ELSE + nopoints=0 + ENDIF + + ! Ocean mask and global indicies + + ALLOCATE(omask(MAX(nopoints,1)),ogloind(MAX(nopoints,1))) + + omask(:) = 1 + IF (mype==0) THEN + DO i=1,nopoints + ogloind(i)=i + ENDDO + ENDIF + + ! Read the interpolation weights and setup the parallel interpolation + ! from atmosphere Gaussian grid to ocean T-grid + + IF (lreaddist) THEN + CALL parinter_read( mype, npes, nglopoints, noglopoints, gausstoT, & + & cdpathdist,'ifs_to_fesom_gridT',lexists) + ENDIF + IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN + IF (lparbcast) THEN + CALL scripremap_read_sgl(cdfile_gauss_to_T,remap_gauss_to_T,& + & mype,npes,icomm,.TRUE.) + ELSE + CALL scripremap_read(cdfile_gauss_to_T,remap_gauss_to_T) + ENDIF + CALL parinter_init( mype, npes, icomm, & + & npoints, nglopoints, nlocmsk, ngloind, & + & nopoints, noglopoints, omask, ogloind, & + & remap_gauss_to_T, gausstoT, lcommout, TRIM(commoutprefix)//'_gtoT', & + & iunit ) + CALL scripremap_dealloc(remap_gauss_to_T) + IF (lwritedist) THEN + CALL parinter_write( mype, npes, nglopoints, noglopoints, gausstoT, & + & cdpathdist,'ifs_to_fesom_gridT') + ENDIF + ENDIF + + ! From ocean T-grid to atmosphere Gaussian grid + + IF (lreaddist) THEN + CALL parinter_read( mype, npes, noglopoints, nglopoints, Ttogauss, & + & cdpathdist,'fesom_gridT_to_ifs',lexists) + ENDIF + IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN + IF (lparbcast) THEN + CALL scripremap_read_sgl(cdfile_T_to_gauss,remap_T_to_gauss,& + & mype,npes,icomm,.TRUE.) + ELSE + CALL scripremap_read(cdfile_T_to_gauss,remap_T_to_gauss) + ENDIF + + CALL parinter_init( mype, npes, icomm, & + & nopoints, noglopoints, omask, ogloind, & + & npoints, nglopoints, nlocmsk, ngloind, & + & remap_T_to_gauss, Ttogauss, lcommout, TRIM(commoutprefix)//'_Ttog', & + & iunit ) + CALL scripremap_dealloc(remap_T_to_gauss) + IF (lwritedist) THEN + CALL parinter_write( mype, npes, noglopoints, nglopoints, Ttogauss, & + & cdpathdist,'fesom_gridT_to_ifs') + ENDIF + ENDIF + + DEALLOCATE(omask,ogloind) + + WRITE(0,*)'Update FESOM global vector points' + noglopoints=244659 + IF (mype==0) THEN + nopoints=244659 + ELSE + nopoints=0 + ENDIF + + ! Ocean mask and global indicies + + ALLOCATE(omask(MAX(nopoints,1)),ogloind(MAX(nopoints,1))) + + omask(:) = 1 + IF (mype==0) THEN + DO i=1,nopoints + ogloind(i)=i + ENDDO + ENDIF + + ! Read the interpolation weights and setup the parallel interpolation + ! from atmosphere Gaussian grid to ocean UV-grid + + IF (lreaddist) THEN + CALL parinter_read( mype, npes, nglopoints, noglopoints, gausstoUV, & + & cdpathdist,'ifs_to_fesom_gridUV',lexists) + ENDIF + IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN + IF (lparbcast) THEN + CALL scripremap_read_sgl(cdfile_gauss_to_UV,remap_gauss_to_UV,& + & mype,npes,icomm,.TRUE.) + ELSE + CALL scripremap_read(cdfile_gauss_to_UV,remap_gauss_to_UV) + ENDIF + CALL parinter_init( mype, npes, icomm, & + & npoints, nglopoints, nlocmsk, ngloind, & + & nopoints, noglopoints, omask, ogloind, & + & remap_gauss_to_UV, gausstoUV, lcommout, TRIM(commoutprefix)//'_gtoUV', & + & iunit ) + CALL scripremap_dealloc(remap_gauss_to_UV) + IF (lwritedist) THEN + CALL parinter_write( mype, npes, nglopoints, noglopoints, gausstoUV, & + & cdpathdist,'ifs_to_fesom_gridUV') + ENDIF + ENDIF + + ! From ocean UV-grid to atmosphere Gaussian grid + + IF (lreaddist) THEN + CALL parinter_read( mype, npes, noglopoints, nglopoints, UVtogauss, & + & cdpathdist,'fesom_gridUV_to_ifs',lexists) + ENDIF + IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN + IF (lparbcast) THEN + CALL scripremap_read_sgl(cdfile_UV_to_gauss,remap_UV_to_gauss,& + & mype,npes,icomm,.TRUE.) + ELSE + CALL scripremap_read(cdfile_UV_to_gauss,remap_UV_to_gauss) + ENDIF + + CALL parinter_init( mype, npes, icomm, & + & nopoints, noglopoints, omask, ogloind, & + & npoints, nglopoints, nlocmsk, ngloind, & + & remap_UV_to_gauss, UVtogauss, lcommout, TRIM(commoutprefix)//'_UVtog', & + & iunit ) + CALL scripremap_dealloc(remap_UV_to_gauss) + IF (lwritedist) THEN + CALL parinter_write( mype, npes, noglopoints, nglopoints, UVtogauss, & + & cdpathdist,'fesom_gridUV_to_ifs') + ENDIF + ENDIF + + DEALLOCATE(omask,ogloind) + +END SUBROUTINE nemogcmcoup_coupinit + + +SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & + & nopoints, pgsst, pgist, pgalb, & + & pgifr, pghic, pghsn, pgucur, pgvcur, & + & pgistl, licelvls ) + + ! Interpolate sst, ice: surf T; albedo; concentration; thickness, + ! snow thickness and currents from the ORCA grid to the Gaussian grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + USE par_kind + + IMPLICIT NONE + + ! Arguments + REAL(wp), DIMENSION(nopoints) :: pgsst, pgist, pgalb, pgifr, pghic, pghsn, pgucur, pgvcur + REAL(wp), DIMENSION(nopoints,3) :: pgistl + LOGICAL :: licelvls + + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + ! Number Gaussian grid points + INTEGER, INTENT(IN) :: nopoints + + ! Local variables + +#ifdef FESOM_TODO + + ! Temporary array for packing of input data without halos. + REAL(wp), DIMENSION((nlei-nldi+1)*(nlej-nldj+1)) :: zsend + ! Arrays for rotation of current vectors from ij to ne. + REAL(wp), DIMENSION(jpi,jpj) :: zotx1, zoty1, ztmpx, ztmpy + ! Array for fraction of leads (i.e. ocean) + REAL(wp), DIMENSION(jpi,jpj) :: zfr_l + REAL(wp) :: zt + ! Loop variables + INTEGER :: ji, jj, jk, jl + REAL(wp) :: zhook_handle ! Dr Hook handle + + IF(lhook) CALL dr_hook('nemogcmcoup_lim2_get',0,zhook_handle) + IF(nn_timing == 1) CALL timing_start('nemogcmcoup_lim2_get') + + zfr_l(:,:) = 1.- fr_i(:,:) + + IF (.NOT.ALLOCATED(zscplsst)) THEN + ALLOCATE(zscplsst(jpi,jpj)) + ENDIF + + ! Pack SST data and convert to K. + + IF ( nsstlvl(1) == nsstlvl(2) ) THEN + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = tsn(ji,jj,nsstlvl(1),jp_tem) + rt0 + zscplsst(ji,jj) = zsend(jk) - rt0 + ENDDO + ENDDO + ELSE + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = SUM(& + & tsn(ji,jj,nsstlvl(1):nsstlvl(2),jp_tem) * & + & tmask(ji,jj,nsstlvl(1):nsstlvl(2)) * & + & fse3t(ji,jj,nsstlvl(1):nsstlvl(2)) ) / & + & MAX( SUM( & + & tmask(ji,jj,nsstlvl(1):nsstlvl(2)) * & + & fse3t(ji,jj,nsstlvl(1):nsstlvl(2))) , 1.0 ) + rt0 + zscplsst(ji,jj) = zsend(jk) - rt0 + ENDDO + ENDDO + ENDIF + CALL lbc_lnk( zscplsst, 'T', 1. ) + + ! Interpolate SST + + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & + & nopoints, pgsst ) + + ! Pack ice temperature data (already in K) + +#if defined key_lim2 + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = tn_ice(ji,jj,1) + ENDDO + ENDDO +#else + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = 0 + zt=0.0 + DO jl = 1, jpl + zsend(jk) = zsend(jk) + tn_ice(ji,jj,jl) * a_i(ji,jj,jl) + zt = zt + a_i(ji,jj,jl) + ENDDO + IF ( zt > 0.0 ) THEN + zsend(jk) = zsend(jk) / zt + ELSE + zsend(jk) = rt0 + ENDIF + ENDDO + ENDDO +#endif + + ! Interpolate ice temperature + + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & + & nopoints, pgist ) + + ! Ice level temperatures + + IF (licelvls) THEN + +#if defined key_lim2 + + DO jl = 1, 3 + + ! Pack ice temperatures data at level jl(already in K) + + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = tbif (ji,jj,jl) + ENDDO + ENDDO + + ! Interpolate ice temperature at level jl + + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & + & nopoints, pgistl(:,jl) ) + + ENDDO + +#else + WRITE(0,*)'licelvls needs to be sorted for LIM3' + CALL abort +#endif + + ENDIF + + ! Pack ice albedo data + +#if defined key_lim2 + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = alb_ice(ji,jj,1) + ENDDO + ENDDO +#else + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = 0 + zt=0.0 + DO jl = 1, jpl + zsend(jk) = zsend(jk) + alb_ice(ji,jj,jl) * a_i(ji,jj,jl) + zt = zt + a_i(ji,jj,jl) + ENDDO + IF ( zt > 0.0_wp ) THEN + zsend(jk) = zsend(jk) / zt + ELSE + zsend(jk) = albedo_oce_mix(ji,jj) + ENDIF + ENDDO + ENDDO +#endif + + ! Interpolate ice albedo + + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & + & nopoints, pgalb ) + + ! Pack ice fraction data + + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = fr_i(ji,jj) + ENDDO + ENDDO + + ! Interpolation of ice fraction. + + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & + & nopoints, pgifr ) + + ! Pack ice thickness data + +#if defined key_lim2 + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = hicif(ji,jj) + ENDDO + ENDDO +#else + ! LIM3 + ! Average over categories (to be revised). + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = 0 + DO jl = 1, jpl + zsend(jk) = zsend(jk) + ht_i(ji,jj,jl) * a_i(ji,jj,jl) + ENDDO + ENDDO + ENDDO +#endif + + ! Interpolation of ice thickness + + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & + & nopoints, pghic ) + + ! Pack snow thickness data + +#if defined key_lim2 + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = hsnif(ji,jj) + ENDDO + ENDDO +#else + ! LIM3 + ! Average over categories (to be revised). + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = 0 + DO jl = 1, jpl + zsend(jk) = zsend(jk) + ht_s(ji,jj,jl) * a_i(ji,jj,jl) + ENDDO + ENDDO + ENDDO +#endif + + ! Interpolation of snow thickness + + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & + & nopoints, pghsn ) + + ! Currents needs to be rotated from ij to ne first + + DO jj = 2, jpjm1 + DO ji = 2, jpim1 + zotx1(ji,jj) = 0.5 * ( un(ji,jj,1) + un(ji-1,jj ,1) ) + zoty1(ji,jj) = 0.5 * ( vn(ji,jj,1) + vn(ji ,jj-1,1) ) + END DO + END DO + CALL lbc_lnk( zotx1, 'T', -1. ) + CALL lbc_lnk( zoty1, 'T', -1. ) + CALL rot_rep( zotx1, zoty1, 'T', 'ij->e', ztmpx ) + CALL rot_rep( zotx1, zoty1, 'T', 'ij->n', ztmpy ) + + ! Pack U current + + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = ztmpx(ji,jj) + ENDDO + ENDDO + + ! Interpolate U current + + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & + & nopoints, pgucur ) + + ! Pack V current + + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = ztmpy(ji,jj) + ENDDO + ENDDO + + ! Interpolate V current + + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & + & nopoints, pgvcur ) + + IF(nn_timing == 1) CALL timing_stop('nemogcmcoup_lim2_get') + IF(lhook) CALL dr_hook('nemogcmcoup_lim2_get',1,zhook_handle) + +#else + + WRITE(0,*)'nemogcmcoup_lim2_get not done for FESOM yet' + CALL abort + +#endif + +END SUBROUTINE nemogcmcoup_lim2_get + + SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & & npoints, & & taux_oce, tauy_oce, taux_ice, tauy_ice, & @@ -666,4 +1263,50 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & END SUBROUTINE nemogcmcoup_lim2_update + +SUBROUTINE nemogcmcoup_step( istp, icdate, ictime ) + + IMPLICIT NONE + + ! Arguments + + ! Time step + INTEGER, INTENT(IN) :: istp + + ! Data and time from NEMO + INTEGER, INTENT(OUT) :: icdate, ictime + + ! Local variables + + ! Advance the FESOM model 1 time step + + WRITE(0,*)'Insert FESOM step here.' + + ! Compute date and time at the end of the time step. + +#ifdef FESOM_TODO + iye = ndastp / 10000 + imo = ndastp / 100 - iye * 100 + ida = MOD( ndastp, 100 ) + CALL greg2jul( 0, 0, 0, ida, imo, iye, zjul ) + zjul = zjul + ( nsec_day + 0.5_wp * rdttra(1) ) / 86400.0_wp + CALL jul2greg( iss, imm, ihh, ida, imo, iye, zjul ) + icdate = iye * 10000 + imo * 100 + ida + ictime = ihh * 10000 + imm * 100 + iss +#endif + +END SUBROUTINE nemogcmcoup_step + + +SUBROUTINE nemogcmcoup_final + + ! Finalize the NEMO model + + IMPLICIT NONE + + WRITE(*,*)'Insert call to finalization of FESOM' + CALL abort + +END SUBROUTINE nemogcmcoup_final + diff --git a/ifs_modules.F90 b/ifs_modules.F90 new file mode 100644 index 000000000..20e9dac57 --- /dev/null +++ b/ifs_modules.F90 @@ -0,0 +1,38 @@ +! Set of modules needed by the interface to IFS. +! +! -Original code by Kristian Mogensen, ECMWF. + +MODULE par_kind + IMPLICIT NONE + INTEGER, PUBLIC, PARAMETER :: & !: Floating point section + sp = SELECTED_REAL_KIND( 6, 37), & !: single precision (real 4) + dp = SELECTED_REAL_KIND(12,307), & !: double precision (real 8) + wp = SELECTED_REAL_KIND(12,307), & !: double precision (real 8) + ik = SELECTED_INT_KIND(6) !: integer precision +END MODULE par_kind + +MODULE interinfo + + ! Parallel regridding information + + USE parinter + + IMPLICIT NONE + + SAVE + + ! IFS to NEMO + + TYPE(parinterinfo) :: gausstoT,gausstoUV + + ! NEMO to IFS + + TYPE(parinterinfo) :: Ttogauss, UVtogauss + + ! Read parinterinfo on task 0 only and broadcast. + + LOGICAL :: lparbcast = .FALSE. + +END MODULE interinfo + + diff --git a/ifs_notused.F90 b/ifs_notused.F90 new file mode 100644 index 000000000..bbeb66423 --- /dev/null +++ b/ifs_notused.F90 @@ -0,0 +1,356 @@ +! Routines usually provided by the library that are currently +! not implemented for FESOM2. +! +! -Original code by Kristian Mogensen, ECMWF. + +SUBROUTINE nemogcmcoup_init_ioserver( icomm, lnemoioserver ) + + ! Initialize the NEMO mppio server + + IMPLICIT NONE + INTEGER :: icomm + LOGICAL :: lnemoioserver + + WRITE(*,*)'No mpp_ioserver' + CALL abort + +END SUBROUTINE nemogcmcoup_init_ioserver + + +SUBROUTINE nemogcmcoup_init_ioserver_2( icomm ) + + ! Initialize the NEMO mppio server + + IMPLICIT NONE + INTEGER :: icomm + + WRITE(*,*)'No mpp_ioserver' + CALL abort + +END SUBROUTINE nemogcmcoup_init_ioserver_2 + + +SUBROUTINE nemogcmcoup_mlflds_get( mype, npes, icomm, & + & nlev, nopoints, pgt3d, pgs3d, pgu3d, pgv3d ) + + ! Interpolate sst, ice: surf T; albedo; concentration; thickness, + ! snow thickness and currents from the ORCA grid to the Gaussian grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + USE par_kind + IMPLICIT NONE + + ! Arguments + REAL(wp), DIMENSION(nopoints,nlev) :: pgt3d, pgs3d, pgu3d, pgv3d + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + ! Number Gaussian grid points + INTEGER, INTENT(IN) :: nopoints,nlev + + ! Local variables + + WRITE(0,*)'nemogcmcoup_mlflds_get should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_mlflds_get + + +SUBROUTINE nemogcmcoup_get( mype, npes, icomm, & + & nopoints, pgsst, pgice, pgucur, pgvcur ) + + ! Interpolate sst, ice and currents from the ORCA grid + ! to the Gaussian grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + USE par_kind + + IMPLICIT NONE + + + ! Arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + ! Number Gaussian grid points + INTEGER, INTENT(IN) :: nopoints + ! Local arrays of sst, ice and currents + REAL(wp), DIMENSION(nopoints) :: pgsst, pgice, pgucur, pgvcur + + ! Local variables + + WRITE(0,*)'nemogcmcoup_get should not be called with FESOM' + CALL abort + +END SUBROUTINE nemogcmcoup_get + + +SUBROUTINE nemogcmcoup_exflds_get( mype, npes, icomm, & + & nopoints, pgssh, pgmld, pg20d, pgsss, & + & pgtem300, pgsal300 ) + + ! Interpolate sst, ice: surf T; albedo; concentration; thickness, + ! snow thickness and currents from the ORCA grid to the Gaussian grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + USE par_kind + IMPLICIT NONE + + ! Arguments + REAL(wp), DIMENSION(nopoints) :: pgssh, pgmld, pg20d, pgsss, & + & pgtem300, pgsal300 + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + ! Number Gaussian grid points + INTEGER, INTENT(IN) :: nopoints + + ! Local variables + + WRITE(0,*)'nemogcmcoup_exflds_get should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_exflds_get + + +SUBROUTINE nemogcmcoup_get_1way( mype, npes, icomm ) + + ! Interpolate sst, ice and currents from the ORCA grid + ! to the Gaussian grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + IMPLICIT NONE + + + ! Arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + + ! Local variables + + WRITE(0,*)'nemogcmcoup_get_1way should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_get_1way + + +SUBROUTINE nemogcmcoup_mlinit( mype, npes, icomm, & + & nlev, nopoints, pdep, pmask ) + + ! Get information about the vertical discretization of the ocean model + + ! nlevs are maximum levels on input and actual number levels on output + + USE par_kind + + IMPLICIT NONE + + ! Input arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Grid information + INTEGER, INTENT(INOUT) :: nlev, nopoints + REAL(wp), INTENT(OUT), DIMENSION(nlev) :: pdep + REAL(wp), INTENT(OUT), DIMENSION(nopoints,nlev) :: pmask + + ! Local variables + + WRITE(0,*)'nemogcmcoup_mlinit should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_mlinit + + +SUBROUTINE nemogcmcoup_update( mype, npes, icomm, & + & npoints, pgutau, pgvtau, & + & pgqsr, pgqns, pgemp, kt, ldebug ) + + ! Update fluxes in nemogcmcoup_data by parallel + ! interpolation of the input gaussian grid data + + USE par_kind + + IMPLICIT NONE + + ! Arguments + + ! MPI communications + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Fluxes on the Gaussian grid. + INTEGER, INTENT(IN) :: npoints + REAL(wp), DIMENSION(npoints), intent(IN) :: & + & pgutau, pgvtau, pgqsr, pgqns, pgemp + ! Current time step + INTEGER, INTENT(in) :: kt + ! Write debugging fields in netCDF + LOGICAL, INTENT(IN) :: ldebug + + ! Local variables + + WRITE(0,*)'nemogcmcoup_update should be called with with.' + CALL abort + +END SUBROUTINE nemogcmcoup_update + +SUBROUTINE nemogcmcoup_update_add( mype, npes, icomm, & + & npoints, pgsst, pgtsk, kt, ldebug ) + + ! Update addetiona in nemogcmcoup_data by parallel + ! interpolation of the input gaussian grid data + + USE par_kind + + IMPLICIT NONE + + ! Arguments + + ! MPI communications + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Input on the Gaussian grid. + INTEGER, INTENT(IN) :: npoints + REAL(wp), DIMENSION(npoints), intent(IN) :: & + & pgsst, pgtsk + ! Current time step + INTEGER, INTENT(in) :: kt + ! Write debugging fields in netCDF + LOGICAL, INTENT(IN) :: ldebug + + ! Local variables + + WRITE(0,*)'nemogcmcoup_update_add should not be called when coupling to fesom.' + CALL abort + + +END SUBROUTINE nemogcmcoup_update_add + + +SUBROUTINE nemogcmcoup_wam_coupinit( mype, npes, icomm, & + & nlocpoints, nglopoints, & + & nlocmsk, ngloind, iunit ) + + ! Initialize single executable coupling between WAM and NEMO + ! This is called from WAM. + + IMPLICIT NONE + + ! Input arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype,npes,icomm + ! WAM grid information + ! Number of local and global points + INTEGER, INTENT(IN) :: nlocpoints, nglopoints + ! Integer mask and global indices + INTEGER, DIMENSION(nlocpoints), INTENT(IN) :: nlocmsk, ngloind + ! Unit for output in parinter_init + INTEGER :: iunit + + WRITE(0,*)'Wam coupling not implemented for FESOM' + CALL abort + +END SUBROUTINE nemogcmcoup_wam_coupinit + + +SUBROUTINE nemogcmcoup_wam_get( mype, npes, icomm, & + & nopoints, pwsst, pwicecov, pwicethk, & + & pwucur, pwvcur, licethk ) + + ! Interpolate from the ORCA grid + ! to the WAM grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + USE par_kind + IMPLICIT NONE + + ! Arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + ! Number WAM grid points + INTEGER, INTENT(IN) :: nopoints + ! Local arrays of sst, ice cover, ice thickness and currents + REAL(wp), DIMENSION(nopoints) :: pwsst, pwicecov, pwicethk, pwucur, pwvcur + LOGICAL :: licethk + + ! Local variables + + WRITE(0,*)'nemogcmcoup_wam_get should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_wam_get + + +SUBROUTINE nemogcmcoup_wam_update( mype, npes, icomm, & + & npoints, pwswh, pwmwp, & + & pwphioc, pwtauoc, pwstrn, & + & pwustokes, pwvstokes, & + & cdtpro, ldebug ) + + ! Update fluxes in nemogcmcoup_data by parallel + ! interpolation of the input WAM grid data + + USE par_kind + + IMPLICIT NONE + + ! Arguments + + ! MPI communications + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Data on the WAM grid. + INTEGER, INTENT(IN) :: npoints + REAL(wp), DIMENSION(npoints), INTENT(IN) :: & + & pwswh, pwmwp, pwphioc, pwtauoc, pwstrn, pwustokes, pwvstokes + ! Current time + CHARACTER(len=14), INTENT(IN) :: cdtpro + ! Write debugging fields in netCDF + LOGICAL, INTENT(IN) :: ldebug + + ! Local variables + + WRITE(0,*)'nemogcmcoup_wam_update should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_wam_update + + +SUBROUTINE nemogcmcoup_wam_update_stress( mype, npes, icomm, npoints, & + & pwutau, pwvtau, pwuv10n, pwphif,& + & cdtpro, ldebug ) + + ! Update stresses in nemogcmcoup_data by parallel + ! interpolation of the input WAM grid data + + USE par_kind + + IMPLICIT NONE + + ! Arguments + + ! MPI communications + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Data on the WAM grid. + INTEGER, INTENT(IN) :: npoints + REAL(wp), DIMENSION(npoints), INTENT(IN) :: & + & pwutau, pwvtau, pwuv10n, pwphif + ! Current time step + CHARACTER(len=14), INTENT(IN) :: cdtpro + ! Write debugging fields in netCDF + LOGICAL, INTENT(IN) :: ldebug + + ! Local variables + + WRITE(0,*)'nemogcmcoup_wam_update_stress should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_wam_update_stress diff --git a/interinfo.F90 b/interinfo.F90 deleted file mode 100644 index bb4fc1804..000000000 --- a/interinfo.F90 +++ /dev/null @@ -1,23 +0,0 @@ -MODULE interinfo - - ! Parallel regridding information - - USE parinter - - IMPLICIT NONE - - SAVE - - ! IFS to NEMO - - TYPE(parinterinfo) :: gausstoT,gausstoUV - - ! NEMO to IFS - - TYPE(parinterinfo) :: Ttogauss, UVtogauss - - ! Read parinterinfo on task 0 only and broadcast. - - LOGICAL :: lparbcast = .FALSE. - -END MODULE interinfo diff --git a/nemogcmcoup_coupinit.F90 b/nemogcmcoup_coupinit.F90 deleted file mode 100644 index 7abc87428..000000000 --- a/nemogcmcoup_coupinit.F90 +++ /dev/null @@ -1,224 +0,0 @@ -SUBROUTINE nemogcmcoup_coupinit( mype, npes, icomm, & - & npoints, nlocmsk, ngloind ) - - ! Initialize single executable coupling - USE parinter - USE scripremap - USE interinfo - IMPLICIT NONE - - ! Input arguments - - ! Message passing information - INTEGER, INTENT(IN) :: mype,npes,icomm - ! Gaussian grid information - ! Number of points - INTEGER, INTENT(IN) :: npoints - ! Integer mask and global indices - INTEGER, DIMENSION(npoints), INTENT(IN) :: nlocmsk, ngloind - INTEGER :: iunit = 0 - - ! Local variables - - ! Namelist containing the file names of the weights - CHARACTER(len=256) :: cdfile_gauss_to_T, cdfile_gauss_to_UV, & - & cdfile_T_to_gauss, cdfile_UV_to_gauss - CHARACTER(len=256) :: cdpathdist - LOGICAL :: lwritedist, lreaddist - LOGICAL :: lcommout - CHARACTER(len=128) :: commoutprefix - NAMELIST/namnemocoup/cdfile_gauss_to_T,& - & cdfile_gauss_to_UV,& - & cdfile_T_to_gauss,& - & cdfile_UV_to_gauss,& - & cdpathdist, & - & lreaddist, & - & lwritedist, & - & lcommout, & - & commoutprefix,& - & lparbcast - - ! Global number of gaussian gridpoints - INTEGER :: nglopoints - ! Ocean grids accessed with NEMO modules - INTEGER :: noglopoints,nopoints - INTEGER, ALLOCATABLE, DIMENSION(:) :: omask,ogloind - ! SCRIP remapping data structures. - TYPE(scripremaptype) :: remap_gauss_to_T, remap_T_to_gauss, & - & remap_gauss_to_UV, remap_UV_to_gauss - ! Misc variables - INTEGER :: i,j,k,ierr - LOGICAL :: lexists - - ! Read namelists - - cdfile_gauss_to_T = 'gausstoT.nc' - cdfile_gauss_to_UV = 'gausstoUV.nc' - cdfile_T_to_gauss = 'Ttogauss.nc' - cdfile_UV_to_gauss = 'UVtogauss.nc' - lcommout = .FALSE. - commoutprefix = 'parinter_comm' - cdpathdist = './' - lreaddist = .FALSE. - lwritedist = .FALSE. - - OPEN(9,file='namnemocoup.in') - READ(9,namnemocoup) - CLOSE(9) - - ! Global number of Gaussian gridpoints - -#if defined key_mpp_mpi - CALL mpi_allreduce( npoints, nglopoints, 1, & - & mpi_integer, mpi_sum, icomm, ierr) -#else - nglopoints=npoints -#endif - - WRITE(0,*)'Update FESOM global scalar points' - noglopoints=126858 - IF (mype==0) THEN - nopoints=126858 - ELSE - nopoints=0 - ENDIF - - ! Ocean mask and global indicies - - ALLOCATE(omask(MAX(nopoints,1)),ogloind(MAX(nopoints,1))) - - omask(:) = 1 - IF (mype==0) THEN - DO i=1,nopoints - ogloind(i)=i - ENDDO - ENDIF - - ! Read the interpolation weights and setup the parallel interpolation - ! from atmosphere Gaussian grid to ocean T-grid - - IF (lreaddist) THEN - CALL parinter_read( mype, npes, nglopoints, noglopoints, gausstoT, & - & cdpathdist,'ifs_to_fesom_gridT',lexists) - ENDIF - IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN - IF (lparbcast) THEN - CALL scripremap_read_sgl(cdfile_gauss_to_T,remap_gauss_to_T,& - & mype,npes,icomm,.TRUE.) - ELSE - CALL scripremap_read(cdfile_gauss_to_T,remap_gauss_to_T) - ENDIF - CALL parinter_init( mype, npes, icomm, & - & npoints, nglopoints, nlocmsk, ngloind, & - & nopoints, noglopoints, omask, ogloind, & - & remap_gauss_to_T, gausstoT, lcommout, TRIM(commoutprefix)//'_gtoT', & - & iunit ) - CALL scripremap_dealloc(remap_gauss_to_T) - IF (lwritedist) THEN - CALL parinter_write( mype, npes, nglopoints, noglopoints, gausstoT, & - & cdpathdist,'ifs_to_fesom_gridT') - ENDIF - ENDIF - - ! From ocean T-grid to atmosphere Gaussian grid - - IF (lreaddist) THEN - CALL parinter_read( mype, npes, noglopoints, nglopoints, Ttogauss, & - & cdpathdist,'fesom_gridT_to_ifs',lexists) - ENDIF - IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN - IF (lparbcast) THEN - CALL scripremap_read_sgl(cdfile_T_to_gauss,remap_T_to_gauss,& - & mype,npes,icomm,.TRUE.) - ELSE - CALL scripremap_read(cdfile_T_to_gauss,remap_T_to_gauss) - ENDIF - - CALL parinter_init( mype, npes, icomm, & - & nopoints, noglopoints, omask, ogloind, & - & npoints, nglopoints, nlocmsk, ngloind, & - & remap_T_to_gauss, Ttogauss, lcommout, TRIM(commoutprefix)//'_Ttog', & - & iunit ) - CALL scripremap_dealloc(remap_T_to_gauss) - IF (lwritedist) THEN - CALL parinter_write( mype, npes, noglopoints, nglopoints, Ttogauss, & - & cdpathdist,'fesom_gridT_to_ifs') - ENDIF - ENDIF - - DEALLOCATE(omask,ogloind) - - WRITE(0,*)'Update FESOM global vector points' - noglopoints=244659 - IF (mype==0) THEN - nopoints=244659 - ELSE - nopoints=0 - ENDIF - - ! Ocean mask and global indicies - - ALLOCATE(omask(MAX(nopoints,1)),ogloind(MAX(nopoints,1))) - - omask(:) = 1 - IF (mype==0) THEN - DO i=1,nopoints - ogloind(i)=i - ENDDO - ENDIF - - ! Read the interpolation weights and setup the parallel interpolation - ! from atmosphere Gaussian grid to ocean UV-grid - - IF (lreaddist) THEN - CALL parinter_read( mype, npes, nglopoints, noglopoints, gausstoUV, & - & cdpathdist,'ifs_to_fesom_gridUV',lexists) - ENDIF - IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN - IF (lparbcast) THEN - CALL scripremap_read_sgl(cdfile_gauss_to_UV,remap_gauss_to_UV,& - & mype,npes,icomm,.TRUE.) - ELSE - CALL scripremap_read(cdfile_gauss_to_UV,remap_gauss_to_UV) - ENDIF - CALL parinter_init( mype, npes, icomm, & - & npoints, nglopoints, nlocmsk, ngloind, & - & nopoints, noglopoints, omask, ogloind, & - & remap_gauss_to_UV, gausstoUV, lcommout, TRIM(commoutprefix)//'_gtoUV', & - & iunit ) - CALL scripremap_dealloc(remap_gauss_to_UV) - IF (lwritedist) THEN - CALL parinter_write( mype, npes, nglopoints, noglopoints, gausstoUV, & - & cdpathdist,'ifs_to_fesom_gridUV') - ENDIF - ENDIF - - ! From ocean UV-grid to atmosphere Gaussian grid - - IF (lreaddist) THEN - CALL parinter_read( mype, npes, noglopoints, nglopoints, UVtogauss, & - & cdpathdist,'fesom_gridUV_to_ifs',lexists) - ENDIF - IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN - IF (lparbcast) THEN - CALL scripremap_read_sgl(cdfile_UV_to_gauss,remap_UV_to_gauss,& - & mype,npes,icomm,.TRUE.) - ELSE - CALL scripremap_read(cdfile_UV_to_gauss,remap_UV_to_gauss) - ENDIF - - CALL parinter_init( mype, npes, icomm, & - & nopoints, noglopoints, omask, ogloind, & - & npoints, nglopoints, nlocmsk, ngloind, & - & remap_UV_to_gauss, UVtogauss, lcommout, TRIM(commoutprefix)//'_UVtog', & - & iunit ) - CALL scripremap_dealloc(remap_UV_to_gauss) - IF (lwritedist) THEN - CALL parinter_write( mype, npes, noglopoints, nglopoints, UVtogauss, & - & cdpathdist,'fesom_gridUV_to_ifs') - ENDIF - ENDIF - - DEALLOCATE(omask,ogloind) - -END SUBROUTINE nemogcmcoup_coupinit diff --git a/nemogcmcoup_exflds_get.F90 b/nemogcmcoup_exflds_get.F90 deleted file mode 100644 index e31aa6fa5..000000000 --- a/nemogcmcoup_exflds_get.F90 +++ /dev/null @@ -1,28 +0,0 @@ -SUBROUTINE nemogcmcoup_exflds_get( mype, npes, icomm, & - & nopoints, pgssh, pgmld, pg20d, pgsss, & - & pgtem300, pgsal300 ) - - ! Interpolate sst, ice: surf T; albedo; concentration; thickness, - ! snow thickness and currents from the ORCA grid to the Gaussian grid. - - ! This routine can be called at any point in time since it does - ! the necessary message passing in parinter_fld. - - USE par_kind - IMPLICIT NONE - - ! Arguments - REAL(wp), DIMENSION(nopoints) :: pgssh, pgmld, pg20d, pgsss, & - & pgtem300, pgsal300 - ! Message passing information - INTEGER, INTENT(IN) :: mype, npes, icomm - ! Number Gaussian grid points - INTEGER, INTENT(IN) :: nopoints - - ! Local variables - - WRITE(0,*)'nemogcmcoup_exflds_get should not be called when coupling to fesom.' - CALL abort - -END SUBROUTINE nemogcmcoup_exflds_get - diff --git a/nemogcmcoup_final.F90 b/nemogcmcoup_final.F90 deleted file mode 100644 index 140dd16ab..000000000 --- a/nemogcmcoup_final.F90 +++ /dev/null @@ -1,11 +0,0 @@ -SUBROUTINE nemogcmcoup_final - - ! Finalize the NEMO model - - IMPLICIT NONE - - WRITE(*,*)'Insert call to finalization of FESOM' - CALL abort - -END SUBROUTINE nemogcmcoup_final - diff --git a/nemogcmcoup_get.F90 b/nemogcmcoup_get.F90 deleted file mode 100644 index a651299c8..000000000 --- a/nemogcmcoup_get.F90 +++ /dev/null @@ -1,30 +0,0 @@ -SUBROUTINE nemogcmcoup_get( mype, npes, icomm, & - & nopoints, pgsst, pgice, pgucur, pgvcur ) - - ! Interpolate sst, ice and currents from the ORCA grid - ! to the Gaussian grid. - - ! This routine can be called at any point in time since it does - ! the necessary message passing in parinter_fld. - - USE par_kind - - IMPLICIT NONE - - - ! Arguments - - ! Message passing information - INTEGER, INTENT(IN) :: mype, npes, icomm - ! Number Gaussian grid points - INTEGER, INTENT(IN) :: nopoints - ! Local arrays of sst, ice and currents - REAL(wp), DIMENSION(nopoints) :: pgsst, pgice, pgucur, pgvcur - - ! Local variables - - WRITE(0,*)'nemogcmcoup_get should not be called with FESOM' - CALL abort - -END SUBROUTINE nemogcmcoup_get - diff --git a/nemogcmcoup_get_1way.F90 b/nemogcmcoup_get_1way.F90 deleted file mode 100644 index d3dbb0458..000000000 --- a/nemogcmcoup_get_1way.F90 +++ /dev/null @@ -1,23 +0,0 @@ -SUBROUTINE nemogcmcoup_get_1way( mype, npes, icomm ) - - ! Interpolate sst, ice and currents from the ORCA grid - ! to the Gaussian grid. - - ! This routine can be called at any point in time since it does - ! the necessary message passing in parinter_fld. - - IMPLICIT NONE - - - ! Arguments - - ! Message passing information - INTEGER, INTENT(IN) :: mype, npes, icomm - - ! Local variables - - WRITE(0,*)'nemogcmcoup_get_1way should not be called when coupling to fesom.' - CALL abort - -END SUBROUTINE nemogcmcoup_get_1way - diff --git a/nemogcmcoup_init.F90 b/nemogcmcoup_init.F90 deleted file mode 100644 index 1d10b012e..000000000 --- a/nemogcmcoup_init.F90 +++ /dev/null @@ -1,38 +0,0 @@ -SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & - & lwaveonly, iatmunit, lwrite ) - - ! Initialize the NEMO model for single executable coupling - - USE par_kind - - IMPLICIT NONE - - ! Input arguments - - ! Message passing information - INTEGER, INTENT(IN) :: icomm - ! Initial date, time, initial timestep and final time step - INTEGER, INTENT(OUT) :: inidate, initime, itini, itend - ! Length of the time step - REAL(wp), INTENT(OUT) :: zstp - ! Coupling to waves only - LOGICAL, INTENT(IN) :: lwaveonly - ! Logfile unit (used if >=0) - INTEGER :: iatmunit - ! Write to this unit - LOGICAL :: lwrite - - WRITE(0,*)'Insert FESOM init here.' - CALL abort - - ! Set information for the caller - -#ifdef FESOM_TODO - inidate = nn_date0 - initime = nn_time0*3600 - itini = nit000 - itend = nn_itend - zstp = rdttra(1) -#endif - -END SUBROUTINE nemogcmcoup_init diff --git a/nemogcmcoup_init_ioserver.F90 b/nemogcmcoup_init_ioserver.F90 deleted file mode 100644 index 0ef2c9e21..000000000 --- a/nemogcmcoup_init_ioserver.F90 +++ /dev/null @@ -1,12 +0,0 @@ -SUBROUTINE nemogcmcoup_init_ioserver( icomm, lnemoioserver ) - - ! Initialize the NEMO mppio server - - IMPLICIT NONE - INTEGER :: icomm - LOGICAL :: lnemoioserver - - WRITE(*,*)'No mpp_ioserver' - CALL abort - -END SUBROUTINE nemogcmcoup_init_ioserver diff --git a/nemogcmcoup_init_ioserver_2.F90 b/nemogcmcoup_init_ioserver_2.F90 deleted file mode 100644 index 4b069eea2..000000000 --- a/nemogcmcoup_init_ioserver_2.F90 +++ /dev/null @@ -1,11 +0,0 @@ -SUBROUTINE nemogcmcoup_init_ioserver_2( icomm ) - - ! Initialize the NEMO mppio server - - IMPLICIT NONE - INTEGER :: icomm - - WRITE(*,*)'No mpp_ioserver' - CALL abort - -END SUBROUTINE nemogcmcoup_init_ioserver_2 diff --git a/nemogcmcoup_lim2_get.F90 b/nemogcmcoup_lim2_get.F90 deleted file mode 100644 index 2176782f1..000000000 --- a/nemogcmcoup_lim2_get.F90 +++ /dev/null @@ -1,324 +0,0 @@ -SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & - & nopoints, pgsst, pgist, pgalb, & - & pgifr, pghic, pghsn, pgucur, pgvcur, & - & pgistl, licelvls ) - - ! Interpolate sst, ice: surf T; albedo; concentration; thickness, - ! snow thickness and currents from the ORCA grid to the Gaussian grid. - - ! This routine can be called at any point in time since it does - ! the necessary message passing in parinter_fld. - - USE par_kind - - IMPLICIT NONE - - ! Arguments - REAL(wp), DIMENSION(nopoints) :: pgsst, pgist, pgalb, pgifr, pghic, pghsn, pgucur, pgvcur - REAL(wp), DIMENSION(nopoints,3) :: pgistl - LOGICAL :: licelvls - - ! Message passing information - INTEGER, INTENT(IN) :: mype, npes, icomm - ! Number Gaussian grid points - INTEGER, INTENT(IN) :: nopoints - - ! Local variables - -#ifdef FESOM_TODO - - ! Temporary array for packing of input data without halos. - REAL(wp), DIMENSION((nlei-nldi+1)*(nlej-nldj+1)) :: zsend - ! Arrays for rotation of current vectors from ij to ne. - REAL(wp), DIMENSION(jpi,jpj) :: zotx1, zoty1, ztmpx, ztmpy - ! Array for fraction of leads (i.e. ocean) - REAL(wp), DIMENSION(jpi,jpj) :: zfr_l - REAL(wp) :: zt - ! Loop variables - INTEGER :: ji, jj, jk, jl - REAL(wp) :: zhook_handle ! Dr Hook handle - - IF(lhook) CALL dr_hook('nemogcmcoup_lim2_get',0,zhook_handle) - IF(nn_timing == 1) CALL timing_start('nemogcmcoup_lim2_get') - - zfr_l(:,:) = 1.- fr_i(:,:) - - IF (.NOT.ALLOCATED(zscplsst)) THEN - ALLOCATE(zscplsst(jpi,jpj)) - ENDIF - - ! Pack SST data and convert to K. - - IF ( nsstlvl(1) == nsstlvl(2) ) THEN - jk = 0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = jk + 1 - zsend(jk) = tsn(ji,jj,nsstlvl(1),jp_tem) + rt0 - zscplsst(ji,jj) = zsend(jk) - rt0 - ENDDO - ENDDO - ELSE - jk = 0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = jk + 1 - zsend(jk) = SUM(& - & tsn(ji,jj,nsstlvl(1):nsstlvl(2),jp_tem) * & - & tmask(ji,jj,nsstlvl(1):nsstlvl(2)) * & - & fse3t(ji,jj,nsstlvl(1):nsstlvl(2)) ) / & - & MAX( SUM( & - & tmask(ji,jj,nsstlvl(1):nsstlvl(2)) * & - & fse3t(ji,jj,nsstlvl(1):nsstlvl(2))) , 1.0 ) + rt0 - zscplsst(ji,jj) = zsend(jk) - rt0 - ENDDO - ENDDO - ENDIF - CALL lbc_lnk( zscplsst, 'T', 1. ) - - ! Interpolate SST - - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & - & nopoints, pgsst ) - - ! Pack ice temperature data (already in K) - -#if defined key_lim2 - jk = 0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = jk + 1 - zsend(jk) = tn_ice(ji,jj,1) - ENDDO - ENDDO -#else - jk = 0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = jk + 1 - zsend(jk) = 0 - zt=0.0 - DO jl = 1, jpl - zsend(jk) = zsend(jk) + tn_ice(ji,jj,jl) * a_i(ji,jj,jl) - zt = zt + a_i(ji,jj,jl) - ENDDO - IF ( zt > 0.0 ) THEN - zsend(jk) = zsend(jk) / zt - ELSE - zsend(jk) = rt0 - ENDIF - ENDDO - ENDDO -#endif - - ! Interpolate ice temperature - - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & - & nopoints, pgist ) - - ! Ice level temperatures - - IF (licelvls) THEN - -#if defined key_lim2 - - DO jl = 1, 3 - - ! Pack ice temperatures data at level jl(already in K) - - jk = 0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = jk + 1 - zsend(jk) = tbif (ji,jj,jl) - ENDDO - ENDDO - - ! Interpolate ice temperature at level jl - - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & - & nopoints, pgistl(:,jl) ) - - ENDDO - -#else - WRITE(0,*)'licelvls needs to be sorted for LIM3' - CALL abort -#endif - - ENDIF - - ! Pack ice albedo data - -#if defined key_lim2 - jk = 0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = jk + 1 - zsend(jk) = alb_ice(ji,jj,1) - ENDDO - ENDDO -#else - jk = 0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = jk + 1 - zsend(jk) = 0 - zt=0.0 - DO jl = 1, jpl - zsend(jk) = zsend(jk) + alb_ice(ji,jj,jl) * a_i(ji,jj,jl) - zt = zt + a_i(ji,jj,jl) - ENDDO - IF ( zt > 0.0_wp ) THEN - zsend(jk) = zsend(jk) / zt - ELSE - zsend(jk) = albedo_oce_mix(ji,jj) - ENDIF - ENDDO - ENDDO -#endif - - ! Interpolate ice albedo - - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & - & nopoints, pgalb ) - - ! Pack ice fraction data - - jk = 0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = jk + 1 - zsend(jk) = fr_i(ji,jj) - ENDDO - ENDDO - - ! Interpolation of ice fraction. - - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & - & nopoints, pgifr ) - - ! Pack ice thickness data - -#if defined key_lim2 - jk = 0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = jk + 1 - zsend(jk) = hicif(ji,jj) - ENDDO - ENDDO -#else - ! LIM3 - ! Average over categories (to be revised). - jk = 0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = jk + 1 - zsend(jk) = 0 - DO jl = 1, jpl - zsend(jk) = zsend(jk) + ht_i(ji,jj,jl) * a_i(ji,jj,jl) - ENDDO - ENDDO - ENDDO -#endif - - ! Interpolation of ice thickness - - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & - & nopoints, pghic ) - - ! Pack snow thickness data - -#if defined key_lim2 - jk = 0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = jk + 1 - zsend(jk) = hsnif(ji,jj) - ENDDO - ENDDO -#else - ! LIM3 - ! Average over categories (to be revised). - jk = 0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = jk + 1 - zsend(jk) = 0 - DO jl = 1, jpl - zsend(jk) = zsend(jk) + ht_s(ji,jj,jl) * a_i(ji,jj,jl) - ENDDO - ENDDO - ENDDO -#endif - - ! Interpolation of snow thickness - - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & - & nopoints, pghsn ) - - ! Currents needs to be rotated from ij to ne first - - DO jj = 2, jpjm1 - DO ji = 2, jpim1 - zotx1(ji,jj) = 0.5 * ( un(ji,jj,1) + un(ji-1,jj ,1) ) - zoty1(ji,jj) = 0.5 * ( vn(ji,jj,1) + vn(ji ,jj-1,1) ) - END DO - END DO - CALL lbc_lnk( zotx1, 'T', -1. ) - CALL lbc_lnk( zoty1, 'T', -1. ) - CALL rot_rep( zotx1, zoty1, 'T', 'ij->e', ztmpx ) - CALL rot_rep( zotx1, zoty1, 'T', 'ij->n', ztmpy ) - - ! Pack U current - - jk = 0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = jk + 1 - zsend(jk) = ztmpx(ji,jj) - ENDDO - ENDDO - - ! Interpolate U current - - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & - & nopoints, pgucur ) - - ! Pack V current - - jk = 0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = jk + 1 - zsend(jk) = ztmpy(ji,jj) - ENDDO - ENDDO - - ! Interpolate V current - - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & - & nopoints, pgvcur ) - - IF(nn_timing == 1) CALL timing_stop('nemogcmcoup_lim2_get') - IF(lhook) CALL dr_hook('nemogcmcoup_lim2_get',1,zhook_handle) - -#else - - WRITE(0,*)'nemogcmcoup_lim2_get not done for FESOM yet' - CALL abort - -#endif - -END SUBROUTINE nemogcmcoup_lim2_get - diff --git a/nemogcmcoup_mlflds_get.F90 b/nemogcmcoup_mlflds_get.F90 deleted file mode 100644 index f03bae3a4..000000000 --- a/nemogcmcoup_mlflds_get.F90 +++ /dev/null @@ -1,26 +0,0 @@ -SUBROUTINE nemogcmcoup_mlflds_get( mype, npes, icomm, & - & nlev, nopoints, pgt3d, pgs3d, pgu3d, pgv3d ) - - ! Interpolate sst, ice: surf T; albedo; concentration; thickness, - ! snow thickness and currents from the ORCA grid to the Gaussian grid. - - ! This routine can be called at any point in time since it does - ! the necessary message passing in parinter_fld. - - USE par_kind - IMPLICIT NONE - - ! Arguments - REAL(wp), DIMENSION(nopoints,nlev) :: pgt3d, pgs3d, pgu3d, pgv3d - ! Message passing information - INTEGER, INTENT(IN) :: mype, npes, icomm - ! Number Gaussian grid points - INTEGER, INTENT(IN) :: nopoints,nlev - - ! Local variables - - WRITE(0,*)'nemogcmcoup_mlflds_get should not be called when coupling to fesom.' - CALL abort - -END SUBROUTINE nemogcmcoup_mlflds_get - diff --git a/nemogcmcoup_mlinit.F90 b/nemogcmcoup_mlinit.F90 deleted file mode 100644 index f44b27f06..000000000 --- a/nemogcmcoup_mlinit.F90 +++ /dev/null @@ -1,26 +0,0 @@ -SUBROUTINE nemogcmcoup_mlinit( mype, npes, icomm, & - & nlev, nopoints, pdep, pmask ) - - ! Get information about the vertical discretization of the ocean model - - ! nlevs are maximum levels on input and actual number levels on output - - USE par_kind - - IMPLICIT NONE - - ! Input arguments - - ! Message passing information - INTEGER, INTENT(IN) :: mype,npes,icomm - ! Grid information - INTEGER, INTENT(INOUT) :: nlev, nopoints - REAL(wp), INTENT(OUT), DIMENSION(nlev) :: pdep - REAL(wp), INTENT(OUT), DIMENSION(nopoints,nlev) :: pmask - - ! Local variables - - WRITE(0,*)'nemogcmcoup_mlinit should not be called when coupling to fesom.' - CALL abort - -END SUBROUTINE nemogcmcoup_mlinit diff --git a/nemogcmcoup_step.F90 b/nemogcmcoup_step.F90 deleted file mode 100644 index a88cf9867..000000000 --- a/nemogcmcoup_step.F90 +++ /dev/null @@ -1,33 +0,0 @@ -SUBROUTINE nemogcmcoup_step( istp, icdate, ictime ) - - IMPLICIT NONE - - ! Arguments - - ! Time step - INTEGER, INTENT(IN) :: istp - - ! Data and time from NEMO - INTEGER, INTENT(OUT) :: icdate, ictime - - ! Local variables - - ! Advance the FESOM model 1 time step - - WRITE(0,*)'Insert FESOM step here.' - - ! Compute date and time at the end of the time step. - -#ifdef FESOM_TODO - iye = ndastp / 10000 - imo = ndastp / 100 - iye * 100 - ida = MOD( ndastp, 100 ) - CALL greg2jul( 0, 0, 0, ida, imo, iye, zjul ) - zjul = zjul + ( nsec_day + 0.5_wp * rdttra(1) ) / 86400.0_wp - CALL jul2greg( iss, imm, ihh, ida, imo, iye, zjul ) - icdate = iye * 10000 + imo * 100 + ida - ictime = ihh * 10000 + imm * 100 + iss -#endif - -END SUBROUTINE nemogcmcoup_step - diff --git a/nemogcmcoup_update.F90 b/nemogcmcoup_update.F90 deleted file mode 100644 index d712c2098..000000000 --- a/nemogcmcoup_update.F90 +++ /dev/null @@ -1,32 +0,0 @@ -SUBROUTINE nemogcmcoup_update( mype, npes, icomm, & - & npoints, pgutau, pgvtau, & - & pgqsr, pgqns, pgemp, kt, ldebug ) - - ! Update fluxes in nemogcmcoup_data by parallel - ! interpolation of the input gaussian grid data - - USE par_kind - - IMPLICIT NONE - - ! Arguments - - ! MPI communications - INTEGER, INTENT(IN) :: mype,npes,icomm - ! Fluxes on the Gaussian grid. - INTEGER, INTENT(IN) :: npoints - REAL(wp), DIMENSION(npoints), intent(IN) :: & - & pgutau, pgvtau, pgqsr, pgqns, pgemp - ! Current time step - INTEGER, INTENT(in) :: kt - ! Write debugging fields in netCDF - LOGICAL, INTENT(IN) :: ldebug - - ! Local variables - - WRITE(0,*)'nemogcmcoup_update should be called with with.' - CALL abort - -END SUBROUTINE nemogcmcoup_update - - diff --git a/nemogcmcoup_update_add.F90 b/nemogcmcoup_update_add.F90 deleted file mode 100644 index c6813bf02..000000000 --- a/nemogcmcoup_update_add.F90 +++ /dev/null @@ -1,32 +0,0 @@ -SUBROUTINE nemogcmcoup_update_add( mype, npes, icomm, & - & npoints, pgsst, pgtsk, kt, ldebug ) - - ! Update addetiona in nemogcmcoup_data by parallel - ! interpolation of the input gaussian grid data - - USE par_kind - - IMPLICIT NONE - - ! Arguments - - ! MPI communications - INTEGER, INTENT(IN) :: mype,npes,icomm - ! Input on the Gaussian grid. - INTEGER, INTENT(IN) :: npoints - REAL(wp), DIMENSION(npoints), intent(IN) :: & - & pgsst, pgtsk - ! Current time step - INTEGER, INTENT(in) :: kt - ! Write debugging fields in netCDF - LOGICAL, INTENT(IN) :: ldebug - - ! Local variables - - WRITE(0,*)'nemogcmcoup_update_add should not be called when coupling to fesom.' - CALL abort - - -END SUBROUTINE nemogcmcoup_update_add - - diff --git a/nemogcmcoup_wam_coupinit.F90 b/nemogcmcoup_wam_coupinit.F90 deleted file mode 100644 index 428cbbe06..000000000 --- a/nemogcmcoup_wam_coupinit.F90 +++ /dev/null @@ -1,25 +0,0 @@ -SUBROUTINE nemogcmcoup_wam_coupinit( mype, npes, icomm, & - & nlocpoints, nglopoints, & - & nlocmsk, ngloind, iunit ) - - ! Initialize single executable coupling between WAM and NEMO - ! This is called from WAM. - - IMPLICIT NONE - - ! Input arguments - - ! Message passing information - INTEGER, INTENT(IN) :: mype,npes,icomm - ! WAM grid information - ! Number of local and global points - INTEGER, INTENT(IN) :: nlocpoints, nglopoints - ! Integer mask and global indices - INTEGER, DIMENSION(nlocpoints), INTENT(IN) :: nlocmsk, ngloind - ! Unit for output in parinter_init - INTEGER :: iunit - - WRITE(0,*)'Wam couplind not implemented for FESOM' - CALL abort - -END SUBROUTINE nemogcmcoup_wam_coupinit diff --git a/nemogcmcoup_wam_get.F90 b/nemogcmcoup_wam_get.F90 deleted file mode 100644 index 2975cfd6f..000000000 --- a/nemogcmcoup_wam_get.F90 +++ /dev/null @@ -1,30 +0,0 @@ -SUBROUTINE nemogcmcoup_wam_get( mype, npes, icomm, & - & nopoints, pwsst, pwicecov, pwicethk, & - & pwucur, pwvcur, licethk ) - - ! Interpolate from the ORCA grid - ! to the WAM grid. - - ! This routine can be called at any point in time since it does - ! the necessary message passing in parinter_fld. - - USE par_kind - IMPLICIT NONE - - ! Arguments - - ! Message passing information - INTEGER, INTENT(IN) :: mype, npes, icomm - ! Number WAM grid points - INTEGER, INTENT(IN) :: nopoints - ! Local arrays of sst, ice cover, ice thickness and currents - REAL(wp), DIMENSION(nopoints) :: pwsst, pwicecov, pwicethk, pwucur, pwvcur - LOGICAL :: licethk - - ! Local variables - - WRITE(0,*)'nemogcmcoup_wam_get should not be called when coupling to fesom.' - CALL abort - -END SUBROUTINE nemogcmcoup_wam_get - diff --git a/nemogcmcoup_wam_update.F90 b/nemogcmcoup_wam_update.F90 deleted file mode 100644 index abadc4855..000000000 --- a/nemogcmcoup_wam_update.F90 +++ /dev/null @@ -1,34 +0,0 @@ -SUBROUTINE nemogcmcoup_wam_update( mype, npes, icomm, & - & npoints, pwswh, pwmwp, & - & pwphioc, pwtauoc, pwstrn, & - & pwustokes, pwvstokes, & - & cdtpro, ldebug ) - - ! Update fluxes in nemogcmcoup_data by parallel - ! interpolation of the input WAM grid data - - USE par_kind - - IMPLICIT NONE - - ! Arguments - - ! MPI communications - INTEGER, INTENT(IN) :: mype,npes,icomm - ! Data on the WAM grid. - INTEGER, INTENT(IN) :: npoints - REAL(wp), DIMENSION(npoints), INTENT(IN) :: & - & pwswh, pwmwp, pwphioc, pwtauoc, pwstrn, pwustokes, pwvstokes - ! Current time - CHARACTER(len=14), INTENT(IN) :: cdtpro - ! Write debugging fields in netCDF - LOGICAL, INTENT(IN) :: ldebug - - ! Local variables - - WRITE(0,*)'nemogcmcoup_wam_update should not be called when coupling to fesom.' - CALL abort - -END SUBROUTINE nemogcmcoup_wam_update - - diff --git a/nemogcmcoup_wam_update_stress.F90 b/nemogcmcoup_wam_update_stress.F90 deleted file mode 100644 index 5777d46c2..000000000 --- a/nemogcmcoup_wam_update_stress.F90 +++ /dev/null @@ -1,32 +0,0 @@ -SUBROUTINE nemogcmcoup_wam_update_stress( mype, npes, icomm, npoints, & - & pwutau, pwvtau, pwuv10n, pwphif,& - & cdtpro, ldebug ) - - ! Update stresses in nemogcmcoup_data by parallel - ! interpolation of the input WAM grid data - - USE par_kind - - IMPLICIT NONE - - ! Arguments - - ! MPI communications - INTEGER, INTENT(IN) :: mype,npes,icomm - ! Data on the WAM grid. - INTEGER, INTENT(IN) :: npoints - REAL(wp), DIMENSION(npoints), INTENT(IN) :: & - & pwutau, pwvtau, pwuv10n, pwphif - ! Current time step - CHARACTER(len=14), INTENT(IN) :: cdtpro - ! Write debugging fields in netCDF - LOGICAL, INTENT(IN) :: ldebug - - ! Local variables - - WRITE(0,*)'nemogcmcoup_wam_update_stress should not be called when coupling to fesom.' - CALL abort - -END SUBROUTINE nemogcmcoup_wam_update_stress - - diff --git a/par_kind.F90 b/par_kind.F90 deleted file mode 100644 index 781434883..000000000 --- a/par_kind.F90 +++ /dev/null @@ -1,8 +0,0 @@ -MODULE par_kind - IMPLICIT NONE - INTEGER, PUBLIC, PARAMETER :: & !: Floating point section - sp = SELECTED_REAL_KIND( 6, 37), & !: single precision (real 4) - dp = SELECTED_REAL_KIND(12,307), & !: double precision (real 8) - wp = SELECTED_REAL_KIND(12,307), & !: double precision (real 8) - ik = SELECTED_INT_KIND(6) !: integer precision -END MODULE par_kind From ea1e771a43b13d66f56aa0e12f9510f5d9ca64b3 Mon Sep 17 00:00:00 2001 From: "Kristian S. Mogensen" Date: Thu, 7 Jun 2018 17:16:19 +0100 Subject: [PATCH 011/909] All module are now in ifs_modules.F90 --- nctools.F90 | 40 --- parinter.F90 | 762 ------------------------------------------------- scripgrid.F90 | 278 ------------------ scrippar.F90 | 5 - scripremap.F90 | 734 ----------------------------------------------- 5 files changed, 1819 deletions(-) delete mode 100644 nctools.F90 delete mode 100644 parinter.F90 delete mode 100644 scripgrid.F90 delete mode 100644 scrippar.F90 delete mode 100644 scripremap.F90 diff --git a/nctools.F90 b/nctools.F90 deleted file mode 100644 index d0f1e99c2..000000000 --- a/nctools.F90 +++ /dev/null @@ -1,40 +0,0 @@ -#define __MYFILE__ 'nctools.F90' -MODULE nctools - - ! Utility subroutines for netCDF access - ! Modified : MAB (nf90, handle_error, LINE&FILE) - ! Modifled : KSM (new shorter name) - - USE netcdf - - PUBLIC ldebug_netcdf, nchdlerr - LOGICAL :: ldebug_netcdf = .FALSE. ! Debug switch for netcdf - -CONTAINS - - SUBROUTINE nchdlerr(status,lineno,filename) - - ! Error handler for netCDF access - IMPLICIT NONE - - - INTEGER :: status ! netCDF return status - INTEGER :: lineno ! Line number (usually obtained from - ! preprocessing __LINE__,__MYFILE__) - CHARACTER(len=*),OPTIONAL :: filename - - IF (status/=nf90_noerr) THEN - WRITE(*,*)'Netcdf error, code ',status - IF (PRESENT(filename)) THEN - WRITE(*,*)'In file ',filename,' in line ',lineno - ELSE - WRITE(*,*)'In line ',lineno - END IF - WRITE(*,'(2A)')' Error message : ',nf90_strerror(status) - CALL abort - ENDIF - - END SUBROUTINE nchdlerr - -!---------------------------------------------------------------------- -END MODULE nctools diff --git a/parinter.F90 b/parinter.F90 deleted file mode 100644 index 1dfc6ca8f..000000000 --- a/parinter.F90 +++ /dev/null @@ -1,762 +0,0 @@ -#define __MYFILE__ 'parinter.F90' -MODULE parinter - -#if defined key_mpp_mpi - USE mpi -#endif - USE scripremap - USE scrippar - USE nctools - - IMPLICIT NONE - - ! Type to contains interpolation information - ! (like what is in scripremaptype) and message - ! passing information - - TYPE parinterinfo - ! Number of local links - INTEGER :: num_links - ! Destination side - INTEGER, POINTER, DIMENSION(:) :: dst_address - ! Source addresses and work array - INTEGER, POINTER, DIMENSION(:) :: src_address - ! Local remap matrix - REAL(scripdp), POINTER, DIMENSION(:,:) :: remap_matrix - ! Message passing information - ! Array of local addresses for send buffer - ! packing - INTEGER, POINTER, DIMENSION(:) :: send_address - ! Sending bookkeeping - INTEGER :: nsendtot - INTEGER, POINTER, DIMENSION(:) :: nsend,nsdisp - ! Receiving bookkeeping - INTEGER :: nrecvtot - INTEGER, POINTER, DIMENSION(:) :: nrecv,nrdisp - END TYPE parinterinfo - -CONTAINS - - SUBROUTINE parinter_init( mype, nproc, mpi_comm, & - & nsrclocpoints, nsrcglopoints, srcmask, srcgloind, & - & ndstlocpoints, ndstglopoints, dstmask, dstgloind, & - & remap, pinfo, lcommout, commoutprefix, iunit ) - - ! Setup interpolation based on SCRIP format weights in - ! remap and the source/destination grids information. - - ! Procedure: - - ! 1) A global SCRIP remapping file is read on all processors. - ! 2) Find local destination points in the global grid. - ! 3) Find which processor needs source data and setup buffer - ! information for sending data. - ! 4) Construct new src remapping for buffer received - - ! All information is stored in the TYPE(parinterinfo) output - ! data type - - ! Input arguments. - - ! Message passing information - INTEGER, INTENT(IN) :: mype, nproc, mpi_comm - ! Source grid local and global number of grid points - INTEGER, INTENT(IN) :: nsrclocpoints, nsrcglopoints - ! Source integer mask (0/1) for SCRIP compliance - INTEGER, INTENT(IN), DIMENSION(nsrclocpoints) :: srcmask - ! Source global addresses of each local grid point - INTEGER, INTENT(IN), DIMENSION(nsrclocpoints) :: srcgloind - ! Destination grid local and global number of grid points - INTEGER, INTENT(IN) :: ndstlocpoints, ndstglopoints - ! Destination integer mask (0/1) for SCRIP compliance - INTEGER, INTENT(IN), DIMENSION(ndstlocpoints) :: dstmask - ! Destination global addresses of each local grid point - INTEGER, INTENT(IN), DIMENSION(ndstlocpoints) :: dstgloind - ! SCRIP remapping data - TYPE(scripremaptype) :: remap - ! Switch for output communication patterns - LOGICAL :: lcommout - CHARACTER(len=*) :: commoutprefix - ! Unit to use for output - INTEGER :: iunit - - ! Output arguments - - ! Interpolation and message passing information - TYPE(parinterinfo), INTENT(OUT) :: pinfo - - ! Local variable - - ! Variable for glocal <-> local address/pe information - INTEGER, DIMENSION(nsrcglopoints) :: ilsrcmppmap, ilsrclocind - INTEGER, DIMENSION(nsrcglopoints) :: igsrcmppmap, igsrclocind - INTEGER, DIMENSION(ndstglopoints) :: ildstmppmap, ildstlocind - INTEGER, DIMENSION(ndstglopoints) :: igdstmppmap, igdstlocind - INTEGER, DIMENSION(nsrcglopoints) :: isrcpe,isrcpetmp - INTEGER, DIMENSION(nsrcglopoints) :: isrcaddtmp - INTEGER, DIMENSION(0:nproc-1) :: isrcoffset - INTEGER, DIMENSION(nproc) :: isrcno, isrcoff, isrccur - INTEGER, DIMENSION(nproc) :: ircvoff, ircvcur - INTEGER, DIMENSION(:), ALLOCATABLE :: isrctot, ircvtot - - ! Misc variable - INTEGER :: i,n,pe - INTEGER :: istatus - CHARACTER(len=256) :: cdfile - - ! Check that masks are consistent. - - ! Remark: More consistency tests between remapping information - ! and input argument could be code, but for now we settle - ! for checking the masks. - - ! Source grid - - DO i=1,nsrclocpoints - IF (srcmask(i)/=remap%src%grid_imask(srcgloind(i))) THEN - WRITE(iunit,*)'Source imask is inconsistent at ' - WRITE(iunit,*)'global index = ',srcgloind(i) - WRITE(iunit,*)'Source mask = ',srcmask(i) - WRITE(iunit,*)'Remap mask = ',remap%src%grid_imask(srcgloind(i)) - WRITE(iunit,*)'Latitude = ',remap%src%grid_center_lat(srcgloind(i)) - WRITE(iunit,*)'Longitude = ',remap%src%grid_center_lon(srcgloind(i)) - CALL flush(iunit) - CALL abort - ENDIF - ENDDO - - ! Destination grid - - DO i=1,ndstlocpoints - IF (dstmask(i)/=remap%dst%grid_imask(dstgloind(i))) THEN - WRITE(iunit,*)'Destination imask is inconsistent at ' - WRITE(iunit,*)'global index = ',dstgloind(i) - WRITE(iunit,*)'Destin mask = ',dstmask(i) - WRITE(iunit,*)'Remap mask = ',remap%dst%grid_imask(dstgloind(i)) - WRITE(iunit,*)'Latitude = ',remap%dst%grid_center_lat(dstgloind(i)) - WRITE(iunit,*)'Longitude = ',remap%dst%grid_center_lon(dstgloind(i)) - CALL flush(iunit) - CALL abort - ENDIF - ENDDO - - ! Setup global to local and vice versa mappings. - - ilsrcmppmap(:)=-1 - ilsrclocind(:)=0 - ildstmppmap(:)=-1 - ildstlocind(:)=0 - - DO i=1,nsrclocpoints - ilsrcmppmap(srcgloind(i))=mype - ilsrclocind(srcgloind(i))=i - ENDDO - - DO i=1,ndstlocpoints - ildstmppmap(dstgloind(i))=mype - ildstlocind(dstgloind(i))=i - ENDDO - -#if defined key_mpp_mpi - CALL mpi_allreduce(ilsrcmppmap,igsrcmppmap,nsrcglopoints, & - & mpi_integer,mpi_max,mpi_comm,istatus) - CALL mpi_allreduce(ilsrclocind,igsrclocind,nsrcglopoints, & - & mpi_integer,mpi_max,mpi_comm,istatus) - CALL mpi_allreduce(ildstmppmap,igdstmppmap,ndstglopoints, & - & mpi_integer,mpi_max,mpi_comm,istatus) - CALL mpi_allreduce(ildstlocind,igdstlocind,ndstglopoints, & - & mpi_integer,mpi_max,mpi_comm,istatus) -#else - igsrcmppmap(:)=ilsrcmppmap(:) - igsrclocind(:)=ilsrclocind(:) - igdstmppmap(:)=ildstmppmap(:) - igdstlocind(:)=ildstlocind(:) -#endif - - ! Optionally construct an ascii file listing what src and - ! dest points belongs to which task - - ! Since igsrcmppmap and igdstmppmap are global data only do - ! this for mype==0. - - IF (lcommout.AND.(mype==0)) THEN - WRITE(cdfile,'(A,I4.4,A)')commoutprefix//'_srcmppmap_',mype+1,'.dat' - OPEN(9,file=cdfile) - DO i=1,nsrcglopoints - WRITE(9,*)remap%src%grid_center_lat(i),& - & remap%src%grid_center_lon(i), & - & igsrcmppmap(i)+1,remap%src%grid_imask(i) - ENDDO - CLOSE(9) - WRITE(cdfile,'(A,I4.4,A)')commoutprefix//'_dstmppmap_',mype+1,'.dat' - OPEN(9,file=cdfile) - DO i=1,ndstglopoints - WRITE(9,*)remap%dst%grid_center_lat(i),& - & remap%dst%grid_center_lon(i), & - & igdstmppmap(i)+1,remap%dst%grid_imask(i) - ENDDO - CLOSE(9) - ENDIF - - ! - ! Standard interpolation in serial case is - ! - ! DO n=1,remap%num_links - ! zdst(remap%dst_address(n)) = zdst(remap%dst_address(n)) + & - ! & remap%remap_matrix(1,n)*zsrc(remap%src_address(n)) - ! END DO - ! - - ! In parallel we need to first find local number of links - - pinfo%num_links=0 - DO i=1,remap%num_links - IF (igdstmppmap(remap%dst_address(i))==mype) & - & pinfo%num_links=pinfo%num_links+1 - ENDDO - ALLOCATE(pinfo%dst_address(pinfo%num_links),& - & pinfo%src_address(pinfo%num_links),& - & pinfo%remap_matrix(1,pinfo%num_links)) - - ! Get local destination addresses - - n=0 - DO i=1,remap%num_links - IF (igdstmppmap(remap%dst_address(i))==mype) THEN - n=n+1 - pinfo%dst_address(n)=& - & igdstlocind(remap%dst_address(i)) - pinfo%remap_matrix(:,n)=& - & remap%remap_matrix(:,i) - ENDIF - ENDDO - - ! Get sending processors maps. - - ! The same data point might need to be sent to many processors - ! so first construct a map for processors needing the data - - isrcpe(:)=-1 - DO i=1,remap%num_links - IF (igdstmppmap(remap%dst_address(i))==mype) THEN - isrcpe(remap%src_address(i))=& - & igsrcmppmap(remap%src_address(i)) - ENDIF - ENDDO - - ! Optionally write a set if ascii file listing which tasks - ! mype needs to send to communicate with - - IF (lcommout) THEN - ! Destination processors - WRITE(cdfile,'(A,I4.4,A)')commoutprefix//'_dsts_',mype+1,'.dat' - OPEN(9,file=cdfile) - DO pe=0,nproc-1 - IF (pe==mype) THEN - isrcpetmp(:)=isrcpe(:) - ENDIF -#if defined key_mpp_mpi - CALL mpi_bcast(isrcpetmp,nsrcglopoints,mpi_integer,pe,mpi_comm,istatus) -#endif - DO i=1,nsrcglopoints - IF (isrcpetmp(i)==mype) THEN - WRITE(9,*)remap%src%grid_center_lat(i),& - & remap%src%grid_center_lon(i), & - & pe+1,mype+1 - ENDIF - ENDDO - ENDDO - CLOSE(9) - ENDIF - - ! Get number of points to send to each processor - - ALLOCATE(pinfo%nsend(0:nproc-1)) - isrcno(:)=0 - DO i=1,nsrcglopoints - IF (isrcpe(i)>=0) THEN - isrcno(isrcpe(i)+1)=isrcno(isrcpe(i)+1)+1 - ENDIF - ENDDO -#if defined key_mpp_mpi - CALL mpi_alltoall(isrcno,1,mpi_integer, & - & pinfo%nsend(0:nproc-1),1,mpi_integer, & - & mpi_comm,istatus) -#else - pinfo%nsend(0:nproc-1) = isrcno(1:nproc) -#endif - pinfo%nsendtot=SUM(pinfo%nsend(0:nproc-1)) - - ! Construct sending buffer mapping. Data is mapping in - ! processor order. - - ALLOCATE(pinfo%send_address(pinfo%nsendtot)) - - ! Temporary arrays for mpi all to all. - - ALLOCATE(isrctot(SUM(isrcno(1:nproc)))) - ALLOCATE(ircvtot(SUM(pinfo%nsend(0:nproc-1)))) - - ! Offset for message parsing - - isrcoff(1)=0 - ircvoff(1)=0 - DO i=1,nproc-1 - isrcoff(i+1) = isrcoff(i) + isrcno(i) - ircvoff(i+1) = pinfo%nsend(i-1) + ircvoff(i) - ENDDO - - ! Pack indices i into a buffer - - isrccur(:)=0 - DO i=1,nsrcglopoints - IF (isrcpe(i)>=0) THEN - isrccur(isrcpe(i)+1)=isrccur(isrcpe(i)+1)+1 - isrctot(isrccur(isrcpe(i)+1)+isrcoff(isrcpe(i)+1)) = i - ENDIF - ENDDO - - ! Send the data - -#if defined key_mpp_mpi - CALL mpi_alltoallv(& - & isrctot,isrccur,isrcoff,mpi_integer, & - & ircvtot,pinfo%nsend(0:nproc-1),ircvoff,mpi_integer, & - & mpi_comm,istatus) -#else - ircvtot(:)=isrctot(:) -#endif - - ! Get the send address. ircvtot will at this point contain the - ! addresses in the global index needed for message passing - - DO i=1,pinfo%nsendtot - pinfo%send_address(i)=igsrclocind(ircvtot(i)) - ENDDO - - ! Deallocate the mpi all to all arrays - - DEALLOCATE(ircvtot,isrctot) - - ! Get number of points to receive to each processor - - ALLOCATE(pinfo%nrecv(0:nproc-1)) - pinfo%nrecv(0:nproc-1)=0 - DO i=1,nsrcglopoints - IF (isrcpe(i)>=0 .AND. isrcpe(i)=0 .AND. isrcpe(i)0) THEN - CALL nchdlerr(nf90_def_dim(ncid,'num_links',& - & pinfo%num_links,dimnl),& - & __LINE__,__MYFILE__) - ENDIF - - CALL nchdlerr(nf90_def_dim(ncid,'num_wgts',& - & 1,dimnw),& - & __LINE__,__MYFILE__) - - IF (pinfo%nsendtot>0) THEN - CALL nchdlerr(nf90_def_dim(ncid,'nsendtot',& - & pinfo%nsendtot,dimnst),& - & __LINE__,__MYFILE__) - ENDIF - - IF (pinfo%nrecvtot>0) THEN - CALL nchdlerr(nf90_def_dim(ncid,'nrecvtot',& - & pinfo%nrecvtot,dimnrt),& - & __LINE__,__MYFILE__) - ENDIF - - CALL nchdlerr(nf90_def_dim(ncid,'nproc',& - & nproc,dimnpr),& - & __LINE__,__MYFILE__) - - IF (pinfo%num_links>0) THEN - - dims1(1)=dimnl - CALL nchdlerr(nf90_def_var(ncid,'dst_address',& - & nf90_int,dims1,idda),& - & __LINE__,__MYFILE__) - - dims1(1)=dimnl - CALL nchdlerr(nf90_def_var(ncid,'src_address',& - & nf90_int,dims1,idsa),& - & __LINE__,__MYFILE__) - - dims2(1)=dimnw - dims2(2)=dimnl - CALL nchdlerr(nf90_def_var(ncid,'remap_matrix',& - & nf90_double,dims2,idrm),& - & __LINE__,__MYFILE__) - - ENDIF - - dims1(1)=dimnpr - CALL nchdlerr(nf90_def_var(ncid,'nsend',& - & nf90_int,dims1,idns),& - & __LINE__,__MYFILE__) - - IF (pinfo%nsendtot>0) THEN - - dims1(1)=dimnst - CALL nchdlerr(nf90_def_var(ncid,'send_address',& - & nf90_int,dims1,idsaa),& - & __LINE__,__MYFILE__) - - ENDIF - - dims1(1)=dimnpr - CALL nchdlerr(nf90_def_var(ncid,'nrecv',& - & nf90_int,dims1,idnr),& - & __LINE__,__MYFILE__) - - dims1(1)=dimnpr - CALL nchdlerr(nf90_def_var(ncid,'nsdisp',& - & nf90_int,dims1,idnsp),& - & __LINE__,__MYFILE__) - - dims1(1)=dimnpr - CALL nchdlerr(nf90_def_var(ncid,'nrdisp',& - & nf90_int,dims1,idnrp),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_enddef(ncid),__LINE__,__MYFILE__) - - - IF (pinfo%num_links>0) THEN - - CALL nchdlerr(nf90_put_var(ncid,idda,pinfo%dst_address),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idsa,pinfo%src_address),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idrm,pinfo%remap_matrix),& - & __LINE__,__MYFILE__) - - ENDIF - - CALL nchdlerr(nf90_put_var(ncid,idns,pinfo%nsend(0:nproc-1)),& - & __LINE__,__MYFILE__) - - IF (pinfo%nsendtot>0) THEN - - CALL nchdlerr(nf90_put_var(ncid,idsaa,pinfo%send_address),& - & __LINE__,__MYFILE__) - - ENDIF - - CALL nchdlerr(nf90_put_var(ncid,idnr,pinfo%nrecv(0:nproc-1)),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idnsp,pinfo%nsdisp(0:nproc-1)),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idnrp,pinfo%nrdisp(0:nproc-1)),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_close(ncid),__LINE__, __MYFILE__ ) - - END SUBROUTINE parinter_write - - SUBROUTINE parinter_read( mype, nproc, & - & nsrcglopoints, ndstglopoints, & - & pinfo, cdpath, cdprefix, lexists ) - - ! Write pinfo information in a netCDF file in order to - ! be able to read it rather than calling parinter_init - - ! Input arguments. - - ! Message passing information - INTEGER, INTENT(IN) :: mype, nproc - ! Source grid local global number of grid points - INTEGER, INTENT(IN) :: nsrcglopoints - ! Destination grid global number of grid points - INTEGER, INTENT(IN) :: ndstglopoints - ! Interpolation and message passing information - TYPE(parinterinfo), INTENT(OUT) :: pinfo - ! Does the information exists - LOGICAL :: lexists - ! Path and file prefix - CHARACTER(len=*) :: cdpath, cdprefix - - ! Local variable - - ! Misc variable - CHARACTER(len=1024) :: cdfile - INTEGER :: ncid, dimid, varid, num_wgts - - WRITE(cdfile,'(A,2(I8.8,A),2(I4.4,A),A)') & - & TRIM(cdpath)//'/'//TRIM(cdprefix)//'_', & - & nsrcglopoints,'_',ndstglopoints,'_',mype,'_',nproc,'.nc' - - - lexists=nf90_open(TRIM(cdfile),nf90_nowrite,ncid)==nf90_noerr - - IF (lexists) THEN - - ! If num_links is not present we assume it to be zero. - - IF (nf90_inq_dimid(ncid,'num_links',dimid)==nf90_noerr) THEN - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=pinfo%num_links),& - & __LINE__,__MYFILE__) - ELSE - pinfo%num_links=0 - ENDIF - - CALL nchdlerr(nf90_inq_dimid(ncid,'num_wgts',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=num_wgts),& - & __LINE__,__MYFILE__) - IF (num_wgts/=1) THEN - WRITE(0,*)'parinter_read: num_wgts has to be 1 for now' - CALL abort - ENDIF - - ! If nsendtot is not present we assume it to be zero. - - IF (nf90_inq_dimid(ncid,'nsendtot',dimid)==nf90_noerr) THEN - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=pinfo%nsendtot),& - & __LINE__,__MYFILE__) - ELSE - pinfo%nsendtot=0 - ENDIF - - IF(nf90_inq_dimid(ncid,'nrecvtot',dimid)==nf90_noerr) THEN - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=pinfo%nrecvtot),& - & __LINE__,__MYFILE__) - ELSE - pinfo%nrecvtot=0 - ENDIF - - ALLOCATE(pinfo%dst_address(pinfo%num_links),& - & pinfo%src_address(pinfo%num_links),& - & pinfo%remap_matrix(num_wgts,pinfo%num_links),& - & pinfo%nsend(0:nproc-1),& - & pinfo%send_address(pinfo%nsendtot),& - & pinfo%nrecv(0:nproc-1),& - & pinfo%nsdisp(0:nproc-1),& - & pinfo%nrdisp(0:nproc-1)) - - IF (pinfo%num_links>0) THEN - CALL nchdlerr(nf90_inq_varid(ncid,'dst_address',varid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%dst_address),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'src_address',varid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%src_address),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'remap_matrix',varid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%remap_matrix),& - & __LINE__,__MYFILE__) - ENDIF - - CALL nchdlerr(nf90_inq_varid(ncid,'nsend',varid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nsend(0:nproc-1)),& - & __LINE__,__MYFILE__) - - IF (pinfo%nsendtot>0) THEN - - CALL nchdlerr(nf90_inq_varid(ncid,'send_address',varid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%send_address),& - & __LINE__,__MYFILE__) - - ENDIF - - CALL nchdlerr(nf90_inq_varid(ncid,'nrecv',varid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nrecv(0:nproc-1)),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'nsdisp',varid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nsdisp(0:nproc-1)),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'nrdisp',varid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nrdisp(0:nproc-1)),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_close(ncid),__LINE__, __MYFILE__ ) - - ENDIF - - END SUBROUTINE parinter_read - -END MODULE parinter diff --git a/scripgrid.F90 b/scripgrid.F90 deleted file mode 100644 index e2f74d368..000000000 --- a/scripgrid.F90 +++ /dev/null @@ -1,278 +0,0 @@ -#define __MYFILE__ 'scripgrid.F90' -MODULE scripgrid - - USE nctools - USE scrippar - - IMPLICIT NONE - - TYPE scripgridtype - INTEGER :: grid_size - INTEGER :: grid_corners - INTEGER :: grid_rank - INTEGER, ALLOCATABLE, DIMENSION(:) :: grid_dims - REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: grid_center_lat - REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: grid_center_lon - INTEGER, ALLOCATABLE, DIMENSION(:) :: grid_imask - REAL(scripdp), ALLOCATABLE, DIMENSION(:,:) :: grid_corner_lat - REAL(scripdp), ALLOCATABLE, DIMENSION(:,:) :: grid_corner_lon - CHARACTER(len=scriplen) :: grid_center_lat_units - CHARACTER(len=scriplen) :: grid_center_lon_units - CHARACTER(len=scriplen) :: grid_imask_units - CHARACTER(len=scriplen) :: grid_corner_lat_units - CHARACTER(len=scriplen) :: grid_corner_lon_units - CHARACTER(len=scriplen) :: title - END TYPE scripgridtype - -CONTAINS - - SUBROUTINE scripgrid_read( cdfilename, grid ) - - CHARACTER(len=*) :: cdfilename - TYPE(scripgridtype) :: grid - - INTEGER :: ncid, dimid, varid - - CALL scripgrid_init(grid) - - CALL nchdlerr(nf90_open(TRIM(cdfilename),nf90_nowrite,ncid),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_dimid(ncid,'grid_size',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=grid%grid_size),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_dimid(ncid,'grid_corners',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=grid%grid_corners),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_dimid(ncid,'grid_rank',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=grid%grid_rank),& - & __LINE__,__MYFILE__) - - CALL scripgrid_alloc(grid) - - CALL nchdlerr(nf90_inq_varid(ncid,'grid_dims',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_dims),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'grid_center_lat',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_center_lat_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_center_lat),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'grid_center_lon',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_center_lon_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_center_lon),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'grid_corner_lat',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_corner_lat_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_corner_lat),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'grid_corner_lon',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_corner_lon_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_corner_lon),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'grid_imask',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_imask_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_imask),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_get_att(ncid,nf90_global,'title',grid%title),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_close(ncid),__LINE__,__MYFILE__) - - END SUBROUTINE scripgrid_read - - SUBROUTINE scripgrid_write( cdgridfile, grid ) - - CHARACTER(len=*) :: cdgridfile - TYPE(scripgridtype) :: grid - - INTEGER :: ncid - INTEGER :: ioldfill - INTEGER :: idimsize,idimxsize,idimysize,idimcorners,idimrank - INTEGER :: idims1rank(1),idims1size(1),idims2(2) - INTEGER :: iddims,idcentlat,idcentlon,idimask,idcornlat,idcornlon - INTEGER :: igriddims(2) - - ! Setup netcdf file - - CALL nchdlerr(nf90_create(TRIM(cdgridfile),nf90_clobber,ncid),& - & __LINE__,__MYFILE__) - - ! Define dimensions - - CALL nchdlerr(nf90_def_dim(ncid,'grid_size',& - & grid%grid_size,idimsize),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_def_dim(ncid,'grid_corners',& - & grid%grid_corners,idimcorners),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_def_dim(ncid,'grid_rank',& - & grid%grid_rank,idimrank),& - & __LINE__,__MYFILE__) - - idims1rank(1) = idimrank - - idims1size(1) = idimsize - - idims2(1) = idimcorners - idims2(2) = idimsize - - ! Define variables - - CALL nchdlerr(nf90_def_var(ncid,'grid_dims',& - & nf90_int,idims1rank,iddims),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_var(ncid,'grid_center_lat',& - & nf90_double,idims1size,idcentlat),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idcentlat,'units',& - & grid%grid_center_lat_units),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_var(ncid,'grid_center_lon',& - & nf90_double,idims1size,idcentlon),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idcentlon,'units',& - & grid%grid_center_lon_units),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_var(ncid,'grid_imask',& - & nf90_int,idims1size,idimask),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idimask,'units',& - & grid%grid_imask_units),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_var(ncid,'grid_corner_lat',& - & nf90_double,idims2,idcornlat),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idcornlat,'units',& - & grid%grid_corner_lat_units),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_var(ncid,'grid_corner_lon',& - & nf90_double,idims2,idcornlon),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idcornlon,'units',& - & grid%grid_corner_lon_units),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_att(ncid,nf90_global,'title',& - & TRIM(grid%title)),& - & __LINE__,__MYFILE__) - - ! End of netCDF definition phase - - CALL nchdlerr(nf90_enddef(ncid),__LINE__,__MYFILE__) - - ! Write variables - - - CALL nchdlerr(nf90_put_var(ncid,iddims,grid%grid_dims),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idcentlat,& - & grid%grid_center_lat),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idcentlon,& - & grid%grid_center_lon),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idimask,& - & grid%grid_imask), & - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idcornlat,& - & grid%grid_corner_lat),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idcornlon,& - & grid%grid_corner_lon),& - & __LINE__,__MYFILE__) - - ! Close file - - CALL nchdlerr(nf90_close(ncid),__LINE__,__MYFILE__) - - END SUBROUTINE scripgrid_write - - SUBROUTINE scripgrid_init( grid ) - - TYPE(scripgridtype) :: grid - - grid%grid_size=0 - grid%grid_corners=0 - grid%grid_rank=0 - grid%grid_center_lat_units='' - grid%grid_center_lon_units='' - grid%grid_imask_units='' - grid%grid_corner_lat_units='' - grid%grid_corner_lon_units='' - grid%title='' - - END SUBROUTINE scripgrid_init - - SUBROUTINE scripgrid_alloc( grid ) - - TYPE(scripgridtype) :: grid - - IF ( (grid%grid_size == 0) .OR. & - & (grid%grid_corners == 0) .OR. & - & (grid%grid_rank == 0) ) THEN - WRITE(*,*)'scripgridtype not initialized' - CALL abort - ENDIF - - ALLOCATE( & - & grid%grid_dims(grid%grid_rank), & - & grid%grid_center_lat(grid%grid_size), & - & grid%grid_center_lon(grid%grid_size), & - & grid%grid_corner_lat(grid%grid_corners, grid%grid_size), & - & grid%grid_corner_lon(grid%grid_corners, grid%grid_size), & - & grid%grid_imask(grid%grid_size) & - & ) - - END SUBROUTINE scripgrid_alloc - - SUBROUTINE scripgrid_dealloc( grid ) - - TYPE(scripgridtype) :: grid - - DEALLOCATE( & - & grid%grid_dims, & - & grid%grid_center_lat, & - & grid%grid_center_lon, & - & grid%grid_corner_lat, & - & grid%grid_corner_lon, & - & grid%grid_imask & - & ) - - END SUBROUTINE scripgrid_dealloc - -END MODULE scripgrid diff --git a/scrippar.F90 b/scrippar.F90 deleted file mode 100644 index 41e034979..000000000 --- a/scrippar.F90 +++ /dev/null @@ -1,5 +0,0 @@ -MODULE scrippar - INTEGER, PARAMETER :: scripdp = SELECTED_REAL_KIND(12,307) - INTEGER, PARAMETER :: scriplen = 80 -END MODULE scrippar - diff --git a/scripremap.F90 b/scripremap.F90 deleted file mode 100644 index 1e2f23c27..000000000 --- a/scripremap.F90 +++ /dev/null @@ -1,734 +0,0 @@ -#define __MYFILE__ 'scripremap.F90' -MODULE scripremap - -#if defined key_mpp_mpi - USE mpi -#endif - USE nctools - USE scrippar - USE scripgrid - - IMPLICIT NONE - - TYPE scripremaptype - INTEGER :: num_links - INTEGER :: num_wgts - TYPE(scripgridtype) :: src - TYPE(scripgridtype) :: dst - REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: src_grid_area - REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: dst_grid_area - REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: src_grid_frac - REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: dst_grid_frac - INTEGER, ALLOCATABLE, DIMENSION(:) :: src_address - INTEGER, ALLOCATABLE, DIMENSION(:) :: dst_address - REAL(scripdp), ALLOCATABLE, DIMENSION(:,:) :: remap_matrix - CHARACTER(len=scriplen) :: src_grid_area_units - CHARACTER(len=scriplen) :: dst_grid_area_units - CHARACTER(len=scriplen) :: src_grid_frac_units - CHARACTER(len=scriplen) :: dst_grid_frac_units - CHARACTER(len=scriplen) :: title - CHARACTER(len=scriplen) :: normalization - CHARACTER(len=scriplen) :: map_method - CHARACTER(len=scriplen) :: history - CHARACTER(len=scriplen) :: conventions - END TYPE scripremaptype - -CONTAINS - - SUBROUTINE scripremap_read_work(cdfilename,remap) - - CHARACTER(len=*) :: cdfilename - TYPE(scripremaptype) :: remap - - INTEGER :: ncid, dimid, varid - LOGICAL :: lcorners - - lcorners=.TRUE. - - CALL scripremap_init(remap) - - CALL nchdlerr(nf90_open(TRIM(cdfilename),nf90_nowrite,ncid),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_dimid(ncid,'src_grid_size',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=remap%src%grid_size),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_dimid(ncid,'dst_grid_size',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=remap%dst%grid_size),& - & __LINE__,__MYFILE__) - - - IF (nf90_inq_dimid(ncid,'src_grid_corners',dimid)==nf90_noerr) THEN - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=remap%src%grid_corners),& - & __LINE__,__MYFILE__) - ELSE - lcorners=.FALSE. - remap%src%grid_corners=1 - ENDIF - - IF (lcorners) THEN - CALL nchdlerr(nf90_inq_dimid(ncid,'dst_grid_corners',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=remap%dst%grid_corners),& - & __LINE__,__MYFILE__) - ELSE - remap%dst%grid_corners=1 - ENDIF - - CALL nchdlerr(nf90_inq_dimid(ncid,'src_grid_rank',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=remap%src%grid_rank),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_dimid(ncid,'dst_grid_rank',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=remap%dst%grid_rank),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_dimid(ncid,'num_links',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=remap%num_links),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_dimid(ncid,'num_wgts',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=remap%num_wgts),& - & __LINE__,__MYFILE__) - - CALL scripremap_alloc(remap) - - CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_dims',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_dims),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_dims',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_dims),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_center_lat',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_center_lat_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_center_lat),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_center_lat',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_center_lat_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_center_lat),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_center_lon',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_center_lon_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_center_lon),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_center_lon',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_center_lon_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_center_lon),& - & __LINE__,__MYFILE__) - - IF (lcorners) THEN - - CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_corner_lat',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_corner_lat_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_corner_lat),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_corner_lon',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_corner_lon_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_corner_lon),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_corner_lat',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_corner_lat_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_corner_lat),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_corner_lon',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_corner_lon_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_corner_lon),& - & __LINE__,__MYFILE__) - - ELSE - - remap%src%grid_corner_lat(:,:) = 0.0 - remap%src%grid_corner_lon(:,:) = 0.0 - remap%dst%grid_corner_lat(:,:) = 0.0 - remap%dst%grid_corner_lon(:,:) = 0.0 - remap%src%grid_corner_lat_units = '' - remap%src%grid_corner_lon_units = '' - remap%dst%grid_corner_lat_units = '' - remap%dst%grid_corner_lon_units = '' - - ENDIF - - CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_imask',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_imask_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_imask),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_imask',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_imask_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_imask),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_area',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src_grid_area_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%src_grid_area),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_area',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst_grid_area_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst_grid_area),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_frac',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src_grid_frac_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%src_grid_frac),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_frac',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst_grid_frac_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst_grid_frac),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'src_address',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%src_address),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'dst_address',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst_address),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'remap_matrix',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%remap_matrix),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_get_att(ncid,nf90_global,'title',remap%title),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,nf90_global,'normalization',remap%normalization),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,nf90_global,'map_method',remap%map_method),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,nf90_global,'history',remap%history),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,nf90_global,'conventions',remap%conventions),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,nf90_global,'dest_grid',remap%dst%title),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,nf90_global,'source_grid',remap%src%title),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_close(ncid),__LINE__,__MYFILE__) - - END SUBROUTINE scripremap_read_work - - SUBROUTINE scripremap_read(cdfilename,remap) - - CHARACTER(len=*) :: cdfilename - TYPE(scripremaptype) :: remap - - CALL scripremap_read_work(cdfilename,remap) - - END SUBROUTINE scripremap_read - - - SUBROUTINE scripremap_read_sgl(cdfilename,remap,& - & mype,nproc,mycomm,linteronly) - - CHARACTER(len=*) :: cdfilename - TYPE(scripremaptype) :: remap - INTEGER :: mype,nproc,mycomm - LOGICAL :: linteronly - - INTEGER, DIMENSION(8) :: isizes - INTEGER :: ierr, ip - - IF (mype==0) THEN - CALL scripremap_read_work(cdfilename,remap) -#if defined key_mpp_mpi - isizes(1)=remap%src%grid_size - isizes(2)=remap%dst%grid_size - isizes(3)=remap%src%grid_corners - isizes(4)=remap%dst%grid_corners - isizes(5)=remap%src%grid_rank - isizes(6)=remap%dst%grid_rank - isizes(7)=remap%num_links - isizes(8)=remap%num_wgts - CALL mpi_bcast( isizes, 8, mpi_integer, 0, mycomm, ierr) - ELSE - CALL mpi_bcast( isizes, 8, mpi_integer, 0, mycomm, ierr) - CALL scripremap_init(remap) - remap%src%grid_size=isizes(1) - remap%dst%grid_size=isizes(2) - remap%src%grid_corners=isizes(3) - remap%dst%grid_corners=isizes(4) - remap%src%grid_rank=isizes(5) - remap%dst%grid_rank=isizes(6) - remap%num_links=isizes(7) - remap%num_wgts=isizes(8) - CALL scripremap_alloc(remap) -#endif - ENDIF - -#if defined key_mpp_mpi - - IF (.NOT.linteronly) THEN - - CALL mpi_bcast( remap%src%grid_dims, remap%src%grid_rank, & - & mpi_integer, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src%grid_center_lat, remap%src%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src%grid_center_lon, remap%src%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src%grid_corner_lat, remap%src%grid_corners*remap%src%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src%grid_corner_lon, remap%src%grid_corners*remap%src%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - - CALL mpi_bcast( remap%dst%grid_dims, remap%dst%grid_rank, & - & mpi_integer, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst%grid_center_lat, remap%dst%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst%grid_center_lon, remap%dst%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst%grid_corner_lat, remap%dst%grid_corners*remap%dst%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst%grid_corner_lon, remap%dst%grid_corners*remap%dst%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - - CALL mpi_bcast( remap%src_grid_area, remap%src%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst_grid_area, remap%dst%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src_grid_frac, remap%src%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst_grid_frac, remap%dst%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - - CALL mpi_bcast( remap%src%grid_center_lat_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst%grid_center_lat_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src%grid_center_lon_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst%grid_center_lon_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src%grid_corner_lat_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src%grid_corner_lon_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst%grid_corner_lat_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst%grid_corner_lon_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src%grid_imask_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst%grid_imask_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src_grid_area_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst_grid_area_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src_grid_frac_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst_grid_frac_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%title, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%normalization, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%map_method, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%history, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%conventions, scriplen, & - & mpi_character, 0, mycomm, ierr ) - ENDIF - - CALL mpi_bcast( remap%src_address, remap%num_links, & - & mpi_integer, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst_address, remap%num_links, & - & mpi_integer, 0, mycomm, ierr ) - CALL mpi_bcast( remap%remap_matrix, remap%num_wgts*remap%num_links, & - & mpi_double_precision, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src%grid_imask, remap%src%grid_size, & - & mpi_integer, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst%grid_imask, remap%dst%grid_size, & - & mpi_integer, 0, mycomm, ierr ) - -#endif - END SUBROUTINE scripremap_read_sgl - - SUBROUTINE scripremap_write(cdfilename,remap) - - CHARACTER(len=*) :: cdfilename - TYPE(scripremaptype) :: remap - - INTEGER :: ncid - INTEGER :: dimsgs,dimdgs,dimsgc,dimdgc,dimsgr,dimdgr,dimnl,dimnw - INTEGER :: dims1(1),dims2(2) - INTEGER :: idsgd,iddgd,idsgea,iddgea,idsgeo,iddgeo - INTEGER :: idsgoa,idsgoo,iddgoa,iddgoo,idsgim,iddgim,idsgar,iddgar - INTEGER :: idsgf,iddgf,idsga,iddga,idsa,idda,idrm - - CALL nchdlerr(nf90_create(TRIM(cdfilename),nf90_clobber,ncid), & - & __LINE__, __MYFILE__ ) - - CALL nchdlerr(nf90_def_dim(ncid,'src_grid_size',& - & remap%src%grid_size,dimsgs),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_dim(ncid,'dst_grid_size',& - & remap%dst%grid_size,dimdgs),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_dim(ncid,'src_grid_corners',& - & remap%src%grid_corners,dimsgc),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_dim(ncid,'dst_grid_corners',& - & remap%dst%grid_corners,dimdgc),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_dim(ncid,'src_grid_rank',& - & remap%src%grid_rank,dimsgr),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_dim(ncid,'dst_grid_rank',& - & remap%dst%grid_rank,dimdgr),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_dim(ncid,'num_links',& - & remap%num_links,dimnl),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_dim(ncid,'num_wgts',& - & remap%num_wgts,dimnw),& - & __LINE__,__MYFILE__) - - dims1(1)=dimsgr - CALL nchdlerr(nf90_def_var(ncid,'src_grid_dims',& - & nf90_int,dims1,idsgd),& - & __LINE__,__MYFILE__) - - dims1(1)=dimdgr - CALL nchdlerr(nf90_def_var(ncid,'dst_grid_dims',& - & nf90_int,dims1,iddgd), & - & __LINE__,__MYFILE__) - - dims1(1)=dimsgs - CALL nchdlerr(nf90_def_var(ncid,'src_grid_center_lat',& - & nf90_double,dims1,idsgea), & - & __LINE__,__MYFILE__) - - dims1(1)=dimdgs - CALL nchdlerr(nf90_def_var(ncid,'dst_grid_center_lat',& - & nf90_double,dims1,iddgea), & - & __LINE__,__MYFILE__) - - dims1(1)=dimsgs - CALL nchdlerr(nf90_def_var(ncid,'src_grid_center_lon',& - & nf90_double,dims1,idsgeo), & - & __LINE__,__MYFILE__) - - dims1(1)=dimdgs - CALL nchdlerr(nf90_def_var(ncid,'dst_grid_center_lon',& - & nf90_double,dims1,iddgeo), & - & __LINE__,__MYFILE__) - - dims2(1)=dimsgc - dims2(2)=dimsgs - CALL nchdlerr(nf90_def_var(ncid,'src_grid_corner_lat',& - & nf90_double,dims2,idsgoa), & - & __LINE__,__MYFILE__) - - dims2(1)=dimsgc - dims2(2)=dimsgs - CALL nchdlerr(nf90_def_var(ncid,'src_grid_corner_lon',& - & nf90_double,dims2,idsgoo), & - & __LINE__,__MYFILE__) - - dims2(1)=dimdgc - dims2(2)=dimdgs - CALL nchdlerr(nf90_def_var(ncid,'dst_grid_corner_lat',& - & nf90_double,dims2,iddgoa), & - & __LINE__,__MYFILE__) - - dims2(1)=dimdgc - dims2(2)=dimdgs - CALL nchdlerr(nf90_def_var(ncid,'dst_grid_corner_lon',& - & nf90_double,dims2,iddgoo), & - & __LINE__,__MYFILE__) - - dims1(1)=dimsgs - CALL nchdlerr(nf90_def_var(ncid,'src_grid_imask',& - & nf90_int,dims1,idsgim), & - & __LINE__,__MYFILE__) - - dims1(1)=dimdgs - CALL nchdlerr(nf90_def_var(ncid,'dst_grid_imask',& - & nf90_int,dims1,iddgim), & - & __LINE__,__MYFILE__) - - dims1(1)=dimsgs - CALL nchdlerr(nf90_def_var(ncid,'src_grid_area',& - & nf90_double,dims1,idsga), & - & __LINE__,__MYFILE__) - - dims1(1)=dimdgs - CALL nchdlerr(nf90_def_var(ncid,'dst_grid_area',& - & nf90_double,dims1,iddga), & - & __LINE__,__MYFILE__) - - dims1(1)=dimsgs - CALL nchdlerr(nf90_def_var(ncid,'src_grid_frac',& - & nf90_double,dims1,idsgf), & - & __LINE__,__MYFILE__) - - dims1(1)=dimdgs - CALL nchdlerr(nf90_def_var(ncid,'dst_grid_frac',& - & nf90_double,dims1,iddgf), & - & __LINE__,__MYFILE__) - - dims1(1)=dimnl - CALL nchdlerr(nf90_def_var(ncid,'src_address',& - & nf90_int,dims1,idsa), & - & __LINE__,__MYFILE__) - - dims1(1)=dimnl - CALL nchdlerr(nf90_def_var(ncid,'dst_address',& - & nf90_int,dims1,idda), & - & __LINE__,__MYFILE__) - - dims2(1)=dimnw - dims2(2)=dimnl - CALL nchdlerr(nf90_def_var(ncid,'remap_matrix',& - & nf90_double,dims2,idrm), & - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_att(ncid,idsgea,'units',& - & remap%src%grid_center_lat_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,iddgea,'units',& - & remap%dst%grid_center_lat_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idsgeo,'units',& - & remap%src%grid_center_lon_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,iddgeo,'units',& - & remap%dst%grid_center_lon_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idsgoa,'units',& - & remap%src%grid_corner_lat_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idsgoo,'units',& - & remap%src%grid_corner_lon_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,iddgoa,'units',& - & remap%dst%grid_corner_lat_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,iddgoo,'units',& - & remap%dst%grid_corner_lon_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idsgim,'units',& - & remap%src%grid_imask_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,iddgim,'units',& - & remap%dst%grid_imask_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idsga,'units',& - & remap%src_grid_area_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,iddga,'units',& - & remap%dst_grid_area_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idsgf,'units',& - & remap%src_grid_frac_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,iddgf,'units',& - & remap%dst_grid_frac_units),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_att(ncid,nf90_global,'title',& - & remap%title),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,nf90_global,'normalization',& - & remap%normalization),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,nf90_global,'map_method',& - & remap%map_method),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,nf90_global,'history',& - & remap%history),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,nf90_global,'conventions',& - & remap%conventions),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,nf90_global,'dest_grid',& - & remap%dst%title),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,nf90_global,'source_grid',& - & remap%src%title),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_enddef(ncid),__LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idsgd,remap%src%grid_dims),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,iddgd,remap%dst%grid_dims),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idsgea,remap%src%grid_center_lat),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,iddgea,remap%dst%grid_center_lat),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idsgeo,remap%src%grid_center_lon),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,iddgeo,remap%dst%grid_center_lon),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idsgoa,remap%src%grid_corner_lat),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,idsgoo,remap%src%grid_corner_lon),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,iddgoa,remap%dst%grid_corner_lat),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,iddgoo,remap%dst%grid_corner_lon),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,idsgim,remap%src%grid_imask),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,iddgim,remap%dst%grid_imask),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,idsga,remap%src_grid_area),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,iddga,remap%dst_grid_area),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,idsgf,remap%src_grid_frac),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,iddgf,remap%dst_grid_frac),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,idsa,remap%src_address),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,idda,remap%dst_address),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,idrm,remap%remap_matrix),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_close(ncid),__LINE__, __MYFILE__ ) - - END SUBROUTINE scripremap_write - - SUBROUTINE scripremap_init(remap) - - TYPE(scripremaptype) :: remap - - CALL scripgrid_init(remap%src) - CALL scripgrid_init(remap%dst) - remap%num_links = 0 - remap%num_wgts = 0 - remap%title='' - remap%normalization='' - remap%map_method='' - remap%history='' - remap%conventions='' - remap%src_grid_area_units='' - remap%dst_grid_area_units='' - remap%src_grid_frac_units='' - remap%dst_grid_frac_units='' - - END SUBROUTINE scripremap_init - - SUBROUTINE scripremap_alloc(remap) - - TYPE(scripremaptype) :: remap - - IF ( (remap%num_links == 0) .OR. & - & (remap%num_wgts == 0) ) THEN - WRITE(*,*)'scripremaptype not initialized' - CALL abort - ENDIF - - CALL scripgrid_alloc(remap%src) - CALL scripgrid_alloc(remap%dst) - - ALLOCATE( & - & remap%src_grid_area(remap%src%grid_size), & - & remap%dst_grid_area(remap%dst%grid_size), & - & remap%src_grid_frac(remap%src%grid_size), & - & remap%dst_grid_frac(remap%dst%grid_size), & - & remap%src_address(remap%num_links), & - & remap%dst_address(remap%num_links), & - & remap%remap_matrix(remap%num_wgts, remap%num_links) & - & ) - - END SUBROUTINE scripremap_alloc - - SUBROUTINE scripremap_dealloc(remap) - - TYPE(scripremaptype) :: remap - - DEALLOCATE( & - & remap%src_grid_area, & - & remap%dst_grid_area, & - & remap%src_grid_frac, & - & remap%dst_grid_frac, & - & remap%src_address, & - & remap%dst_address, & - & remap%remap_matrix & - & ) - - CALL scripgrid_dealloc(remap%src) - CALL scripgrid_dealloc(remap%dst) - - CALL scripremap_init(remap) - - END SUBROUTINE scripremap_dealloc - -END MODULE scripremap From 84f77da1297078d68437afbff2737c29fdcd0267 Mon Sep 17 00:00:00 2001 From: "Kristian S. Mogensen" Date: Thu, 7 Jun 2018 17:27:40 +0100 Subject: [PATCH 012/909] Remove key_mpp_mpi from Makefile and added it to the source. --- Makefile | 4 +- ifs_interface.F90 | 4 - ifs_modules.F90 | 1823 ++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 1823 insertions(+), 8 deletions(-) diff --git a/Makefile b/Makefile index b969cf2ae..8bd75b807 100644 --- a/Makefile +++ b/Makefile @@ -6,12 +6,12 @@ FC=mpif90 LD=$(FC) FCFLAGSFIXED=-g -c -O3 -fdefault-real-8 -fdefault-double-8 -fcray-pointer -fconvert=swap -fopenmp $(NETCDF_INCLUDE) $(GRIB_API_INCLUDE) FCFLAGSFREE=$(FCFLAGSFIXED) -CPPFLAGS=-traditional -P -Dkey_mpp_mpi +CPPFLAGS=-traditional -P LDFLAGS=-g -O3 -fdefault-real-8 -fdefault-double-8 -fcray-pointer -fconvert=swap -fopenmp $(MAGPLUSLIB_SHARED) $(NETCDF_LIB) $(GRIB_API_LIB) AR=ar ARFLAGS=-rv -OBJ=scripremap.o scripgrid.o parinter.o nctools.o ifs_modules.o ifs_interface.o ifs_notused.o +OBJ=ifs_modules.o ifs_interface.o ifs_notused.o all: libfesom.a diff --git a/ifs_interface.F90 b/ifs_interface.F90 index 5ad07fc11..82537dcca 100644 --- a/ifs_interface.F90 +++ b/ifs_interface.F90 @@ -114,12 +114,8 @@ SUBROUTINE nemogcmcoup_coupinit( mype, npes, icomm, & ! Global number of Gaussian gridpoints -#if defined key_mpp_mpi CALL mpi_allreduce( npoints, nglopoints, 1, & & mpi_integer, mpi_sum, icomm, ierr) -#else - nglopoints=npoints -#endif WRITE(0,*)'Update FESOM global scalar points' noglopoints=126858 diff --git a/ifs_modules.F90 b/ifs_modules.F90 index 20e9dac57..a6f07acaa 100644 --- a/ifs_modules.F90 +++ b/ifs_modules.F90 @@ -1,3 +1,5 @@ +#define __MYFILE__ 'ifs_modules.F90' +#define key_mpp_mpi ! Set of modules needed by the interface to IFS. ! ! -Original code by Kristian Mogensen, ECMWF. @@ -11,6 +13,1825 @@ MODULE par_kind ik = SELECTED_INT_KIND(6) !: integer precision END MODULE par_kind +MODULE nctools + + ! Utility subroutines for netCDF access + ! Modified : MAB (nf90, handle_error, LINE&FILE) + ! Modifled : KSM (new shorter name) + + USE netcdf + + PUBLIC ldebug_netcdf, nchdlerr + LOGICAL :: ldebug_netcdf = .FALSE. ! Debug switch for netcdf + +CONTAINS + + SUBROUTINE nchdlerr(status,lineno,filename) + + ! Error handler for netCDF access + IMPLICIT NONE + + + INTEGER :: status ! netCDF return status + INTEGER :: lineno ! Line number (usually obtained from + ! preprocessing __LINE__,__MYFILE__) + CHARACTER(len=*),OPTIONAL :: filename + + IF (status/=nf90_noerr) THEN + WRITE(*,*)'Netcdf error, code ',status + IF (PRESENT(filename)) THEN + WRITE(*,*)'In file ',filename,' in line ',lineno + ELSE + WRITE(*,*)'In line ',lineno + END IF + WRITE(*,'(2A)')' Error message : ',nf90_strerror(status) + CALL abort + ENDIF + + END SUBROUTINE nchdlerr + +!---------------------------------------------------------------------- +END MODULE nctools + +MODULE scrippar + INTEGER, PARAMETER :: scripdp = SELECTED_REAL_KIND(12,307) + INTEGER, PARAMETER :: scriplen = 80 +END MODULE scrippar + +MODULE scripgrid + + USE nctools + USE scrippar + + IMPLICIT NONE + + TYPE scripgridtype + INTEGER :: grid_size + INTEGER :: grid_corners + INTEGER :: grid_rank + INTEGER, ALLOCATABLE, DIMENSION(:) :: grid_dims + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: grid_center_lat + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: grid_center_lon + INTEGER, ALLOCATABLE, DIMENSION(:) :: grid_imask + REAL(scripdp), ALLOCATABLE, DIMENSION(:,:) :: grid_corner_lat + REAL(scripdp), ALLOCATABLE, DIMENSION(:,:) :: grid_corner_lon + CHARACTER(len=scriplen) :: grid_center_lat_units + CHARACTER(len=scriplen) :: grid_center_lon_units + CHARACTER(len=scriplen) :: grid_imask_units + CHARACTER(len=scriplen) :: grid_corner_lat_units + CHARACTER(len=scriplen) :: grid_corner_lon_units + CHARACTER(len=scriplen) :: title + END TYPE scripgridtype + +CONTAINS + + SUBROUTINE scripgrid_read( cdfilename, grid ) + + CHARACTER(len=*) :: cdfilename + TYPE(scripgridtype) :: grid + + INTEGER :: ncid, dimid, varid + + CALL scripgrid_init(grid) + + CALL nchdlerr(nf90_open(TRIM(cdfilename),nf90_nowrite,ncid),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'grid_size',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=grid%grid_size),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'grid_corners',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=grid%grid_corners),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'grid_rank',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=grid%grid_rank),& + & __LINE__,__MYFILE__) + + CALL scripgrid_alloc(grid) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_dims',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_center_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_center_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_center_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_center_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_center_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_corner_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_corner_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_corner_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_corner_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_imask',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_imask_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_imask),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'title',grid%title),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_close(ncid),__LINE__,__MYFILE__) + + END SUBROUTINE scripgrid_read + + SUBROUTINE scripgrid_write( cdgridfile, grid ) + + CHARACTER(len=*) :: cdgridfile + TYPE(scripgridtype) :: grid + + INTEGER :: ncid + INTEGER :: ioldfill + INTEGER :: idimsize,idimxsize,idimysize,idimcorners,idimrank + INTEGER :: idims1rank(1),idims1size(1),idims2(2) + INTEGER :: iddims,idcentlat,idcentlon,idimask,idcornlat,idcornlon + INTEGER :: igriddims(2) + + ! Setup netcdf file + + CALL nchdlerr(nf90_create(TRIM(cdgridfile),nf90_clobber,ncid),& + & __LINE__,__MYFILE__) + + ! Define dimensions + + CALL nchdlerr(nf90_def_dim(ncid,'grid_size',& + & grid%grid_size,idimsize),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_def_dim(ncid,'grid_corners',& + & grid%grid_corners,idimcorners),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_def_dim(ncid,'grid_rank',& + & grid%grid_rank,idimrank),& + & __LINE__,__MYFILE__) + + idims1rank(1) = idimrank + + idims1size(1) = idimsize + + idims2(1) = idimcorners + idims2(2) = idimsize + + ! Define variables + + CALL nchdlerr(nf90_def_var(ncid,'grid_dims',& + & nf90_int,idims1rank,iddims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_var(ncid,'grid_center_lat',& + & nf90_double,idims1size,idcentlat),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idcentlat,'units',& + & grid%grid_center_lat_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_var(ncid,'grid_center_lon',& + & nf90_double,idims1size,idcentlon),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idcentlon,'units',& + & grid%grid_center_lon_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_var(ncid,'grid_imask',& + & nf90_int,idims1size,idimask),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idimask,'units',& + & grid%grid_imask_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_var(ncid,'grid_corner_lat',& + & nf90_double,idims2,idcornlat),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idcornlat,'units',& + & grid%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_var(ncid,'grid_corner_lon',& + & nf90_double,idims2,idcornlon),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idcornlon,'units',& + & grid%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'title',& + & TRIM(grid%title)),& + & __LINE__,__MYFILE__) + + ! End of netCDF definition phase + + CALL nchdlerr(nf90_enddef(ncid),__LINE__,__MYFILE__) + + ! Write variables + + + CALL nchdlerr(nf90_put_var(ncid,iddims,grid%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idcentlat,& + & grid%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idcentlon,& + & grid%grid_center_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idimask,& + & grid%grid_imask), & + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idcornlat,& + & grid%grid_corner_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idcornlon,& + & grid%grid_corner_lon),& + & __LINE__,__MYFILE__) + + ! Close file + + CALL nchdlerr(nf90_close(ncid),__LINE__,__MYFILE__) + + END SUBROUTINE scripgrid_write + + SUBROUTINE scripgrid_init( grid ) + + TYPE(scripgridtype) :: grid + + grid%grid_size=0 + grid%grid_corners=0 + grid%grid_rank=0 + grid%grid_center_lat_units='' + grid%grid_center_lon_units='' + grid%grid_imask_units='' + grid%grid_corner_lat_units='' + grid%grid_corner_lon_units='' + grid%title='' + + END SUBROUTINE scripgrid_init + + SUBROUTINE scripgrid_alloc( grid ) + + TYPE(scripgridtype) :: grid + + IF ( (grid%grid_size == 0) .OR. & + & (grid%grid_corners == 0) .OR. & + & (grid%grid_rank == 0) ) THEN + WRITE(*,*)'scripgridtype not initialized' + CALL abort + ENDIF + + ALLOCATE( & + & grid%grid_dims(grid%grid_rank), & + & grid%grid_center_lat(grid%grid_size), & + & grid%grid_center_lon(grid%grid_size), & + & grid%grid_corner_lat(grid%grid_corners, grid%grid_size), & + & grid%grid_corner_lon(grid%grid_corners, grid%grid_size), & + & grid%grid_imask(grid%grid_size) & + & ) + + END SUBROUTINE scripgrid_alloc + + SUBROUTINE scripgrid_dealloc( grid ) + + TYPE(scripgridtype) :: grid + + DEALLOCATE( & + & grid%grid_dims, & + & grid%grid_center_lat, & + & grid%grid_center_lon, & + & grid%grid_corner_lat, & + & grid%grid_corner_lon, & + & grid%grid_imask & + & ) + + END SUBROUTINE scripgrid_dealloc + +END MODULE scripgrid + +MODULE scripremap + +#if defined key_mpp_mpi + USE mpi +#endif + USE nctools + USE scrippar + USE scripgrid + + IMPLICIT NONE + + TYPE scripremaptype + INTEGER :: num_links + INTEGER :: num_wgts + TYPE(scripgridtype) :: src + TYPE(scripgridtype) :: dst + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: src_grid_area + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: dst_grid_area + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: src_grid_frac + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: dst_grid_frac + INTEGER, ALLOCATABLE, DIMENSION(:) :: src_address + INTEGER, ALLOCATABLE, DIMENSION(:) :: dst_address + REAL(scripdp), ALLOCATABLE, DIMENSION(:,:) :: remap_matrix + CHARACTER(len=scriplen) :: src_grid_area_units + CHARACTER(len=scriplen) :: dst_grid_area_units + CHARACTER(len=scriplen) :: src_grid_frac_units + CHARACTER(len=scriplen) :: dst_grid_frac_units + CHARACTER(len=scriplen) :: title + CHARACTER(len=scriplen) :: normalization + CHARACTER(len=scriplen) :: map_method + CHARACTER(len=scriplen) :: history + CHARACTER(len=scriplen) :: conventions + END TYPE scripremaptype + +CONTAINS + + SUBROUTINE scripremap_read_work(cdfilename,remap) + + CHARACTER(len=*) :: cdfilename + TYPE(scripremaptype) :: remap + + INTEGER :: ncid, dimid, varid + LOGICAL :: lcorners + + lcorners=.TRUE. + + CALL scripremap_init(remap) + + CALL nchdlerr(nf90_open(TRIM(cdfilename),nf90_nowrite,ncid),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'src_grid_size',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%src%grid_size),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'dst_grid_size',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%dst%grid_size),& + & __LINE__,__MYFILE__) + + + IF (nf90_inq_dimid(ncid,'src_grid_corners',dimid)==nf90_noerr) THEN + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%src%grid_corners),& + & __LINE__,__MYFILE__) + ELSE + lcorners=.FALSE. + remap%src%grid_corners=1 + ENDIF + + IF (lcorners) THEN + CALL nchdlerr(nf90_inq_dimid(ncid,'dst_grid_corners',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%dst%grid_corners),& + & __LINE__,__MYFILE__) + ELSE + remap%dst%grid_corners=1 + ENDIF + + CALL nchdlerr(nf90_inq_dimid(ncid,'src_grid_rank',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%src%grid_rank),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'dst_grid_rank',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%dst%grid_rank),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'num_links',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%num_links),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'num_wgts',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%num_wgts),& + & __LINE__,__MYFILE__) + + CALL scripremap_alloc(remap) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_dims',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_dims',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_center_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_center_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_center_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_center_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_center_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_center_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_center_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_center_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_center_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_center_lon),& + & __LINE__,__MYFILE__) + + IF (lcorners) THEN + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_corner_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_corner_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_corner_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_corner_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_corner_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_corner_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_corner_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_corner_lon),& + & __LINE__,__MYFILE__) + + ELSE + + remap%src%grid_corner_lat(:,:) = 0.0 + remap%src%grid_corner_lon(:,:) = 0.0 + remap%dst%grid_corner_lat(:,:) = 0.0 + remap%dst%grid_corner_lon(:,:) = 0.0 + remap%src%grid_corner_lat_units = '' + remap%src%grid_corner_lon_units = '' + remap%dst%grid_corner_lat_units = '' + remap%dst%grid_corner_lon_units = '' + + ENDIF + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_imask',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_imask_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_imask),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_imask',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_imask_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_imask),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_area',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src_grid_area_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src_grid_area),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_area',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst_grid_area_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst_grid_area),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_frac',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src_grid_frac_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src_grid_frac),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_frac',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst_grid_frac_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst_grid_frac),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_address',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_address',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'remap_matrix',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%remap_matrix),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'title',remap%title),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'normalization',remap%normalization),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'map_method',remap%map_method),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'history',remap%history),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'conventions',remap%conventions),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'dest_grid',remap%dst%title),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'source_grid',remap%src%title),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_close(ncid),__LINE__,__MYFILE__) + + END SUBROUTINE scripremap_read_work + + SUBROUTINE scripremap_read(cdfilename,remap) + + CHARACTER(len=*) :: cdfilename + TYPE(scripremaptype) :: remap + + CALL scripremap_read_work(cdfilename,remap) + + END SUBROUTINE scripremap_read + + + SUBROUTINE scripremap_read_sgl(cdfilename,remap,& + & mype,nproc,mycomm,linteronly) + + CHARACTER(len=*) :: cdfilename + TYPE(scripremaptype) :: remap + INTEGER :: mype,nproc,mycomm + LOGICAL :: linteronly + + INTEGER, DIMENSION(8) :: isizes + INTEGER :: ierr, ip + + IF (mype==0) THEN + CALL scripremap_read_work(cdfilename,remap) +#if defined key_mpp_mpi + isizes(1)=remap%src%grid_size + isizes(2)=remap%dst%grid_size + isizes(3)=remap%src%grid_corners + isizes(4)=remap%dst%grid_corners + isizes(5)=remap%src%grid_rank + isizes(6)=remap%dst%grid_rank + isizes(7)=remap%num_links + isizes(8)=remap%num_wgts + CALL mpi_bcast( isizes, 8, mpi_integer, 0, mycomm, ierr) + ELSE + CALL mpi_bcast( isizes, 8, mpi_integer, 0, mycomm, ierr) + CALL scripremap_init(remap) + remap%src%grid_size=isizes(1) + remap%dst%grid_size=isizes(2) + remap%src%grid_corners=isizes(3) + remap%dst%grid_corners=isizes(4) + remap%src%grid_rank=isizes(5) + remap%dst%grid_rank=isizes(6) + remap%num_links=isizes(7) + remap%num_wgts=isizes(8) + CALL scripremap_alloc(remap) +#endif + ENDIF + +#if defined key_mpp_mpi + + IF (.NOT.linteronly) THEN + + CALL mpi_bcast( remap%src%grid_dims, remap%src%grid_rank, & + & mpi_integer, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_center_lat, remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_center_lon, remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_corner_lat, remap%src%grid_corners*remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_corner_lon, remap%src%grid_corners*remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + + CALL mpi_bcast( remap%dst%grid_dims, remap%dst%grid_rank, & + & mpi_integer, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_center_lat, remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_center_lon, remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_corner_lat, remap%dst%grid_corners*remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_corner_lon, remap%dst%grid_corners*remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + + CALL mpi_bcast( remap%src_grid_area, remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst_grid_area, remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src_grid_frac, remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst_grid_frac, remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + + CALL mpi_bcast( remap%src%grid_center_lat_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_center_lat_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_center_lon_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_center_lon_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_corner_lat_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_corner_lon_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_corner_lat_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_corner_lon_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_imask_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_imask_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src_grid_area_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst_grid_area_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src_grid_frac_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst_grid_frac_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%title, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%normalization, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%map_method, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%history, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%conventions, scriplen, & + & mpi_character, 0, mycomm, ierr ) + ENDIF + + CALL mpi_bcast( remap%src_address, remap%num_links, & + & mpi_integer, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst_address, remap%num_links, & + & mpi_integer, 0, mycomm, ierr ) + CALL mpi_bcast( remap%remap_matrix, remap%num_wgts*remap%num_links, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_imask, remap%src%grid_size, & + & mpi_integer, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_imask, remap%dst%grid_size, & + & mpi_integer, 0, mycomm, ierr ) + +#endif + END SUBROUTINE scripremap_read_sgl + + SUBROUTINE scripremap_write(cdfilename,remap) + + CHARACTER(len=*) :: cdfilename + TYPE(scripremaptype) :: remap + + INTEGER :: ncid + INTEGER :: dimsgs,dimdgs,dimsgc,dimdgc,dimsgr,dimdgr,dimnl,dimnw + INTEGER :: dims1(1),dims2(2) + INTEGER :: idsgd,iddgd,idsgea,iddgea,idsgeo,iddgeo + INTEGER :: idsgoa,idsgoo,iddgoa,iddgoo,idsgim,iddgim,idsgar,iddgar + INTEGER :: idsgf,iddgf,idsga,iddga,idsa,idda,idrm + + CALL nchdlerr(nf90_create(TRIM(cdfilename),nf90_clobber,ncid), & + & __LINE__, __MYFILE__ ) + + CALL nchdlerr(nf90_def_dim(ncid,'src_grid_size',& + & remap%src%grid_size,dimsgs),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'dst_grid_size',& + & remap%dst%grid_size,dimdgs),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'src_grid_corners',& + & remap%src%grid_corners,dimsgc),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'dst_grid_corners',& + & remap%dst%grid_corners,dimdgc),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'src_grid_rank',& + & remap%src%grid_rank,dimsgr),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'dst_grid_rank',& + & remap%dst%grid_rank,dimdgr),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'num_links',& + & remap%num_links,dimnl),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'num_wgts',& + & remap%num_wgts,dimnw),& + & __LINE__,__MYFILE__) + + dims1(1)=dimsgr + CALL nchdlerr(nf90_def_var(ncid,'src_grid_dims',& + & nf90_int,dims1,idsgd),& + & __LINE__,__MYFILE__) + + dims1(1)=dimdgr + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_dims',& + & nf90_int,dims1,iddgd), & + & __LINE__,__MYFILE__) + + dims1(1)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_center_lat',& + & nf90_double,dims1,idsgea), & + & __LINE__,__MYFILE__) + + dims1(1)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_center_lat',& + & nf90_double,dims1,iddgea), & + & __LINE__,__MYFILE__) + + dims1(1)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_center_lon',& + & nf90_double,dims1,idsgeo), & + & __LINE__,__MYFILE__) + + dims1(1)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_center_lon',& + & nf90_double,dims1,iddgeo), & + & __LINE__,__MYFILE__) + + dims2(1)=dimsgc + dims2(2)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_corner_lat',& + & nf90_double,dims2,idsgoa), & + & __LINE__,__MYFILE__) + + dims2(1)=dimsgc + dims2(2)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_corner_lon',& + & nf90_double,dims2,idsgoo), & + & __LINE__,__MYFILE__) + + dims2(1)=dimdgc + dims2(2)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_corner_lat',& + & nf90_double,dims2,iddgoa), & + & __LINE__,__MYFILE__) + + dims2(1)=dimdgc + dims2(2)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_corner_lon',& + & nf90_double,dims2,iddgoo), & + & __LINE__,__MYFILE__) + + dims1(1)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_imask',& + & nf90_int,dims1,idsgim), & + & __LINE__,__MYFILE__) + + dims1(1)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_imask',& + & nf90_int,dims1,iddgim), & + & __LINE__,__MYFILE__) + + dims1(1)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_area',& + & nf90_double,dims1,idsga), & + & __LINE__,__MYFILE__) + + dims1(1)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_area',& + & nf90_double,dims1,iddga), & + & __LINE__,__MYFILE__) + + dims1(1)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_frac',& + & nf90_double,dims1,idsgf), & + & __LINE__,__MYFILE__) + + dims1(1)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_frac',& + & nf90_double,dims1,iddgf), & + & __LINE__,__MYFILE__) + + dims1(1)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'src_address',& + & nf90_int,dims1,idsa), & + & __LINE__,__MYFILE__) + + dims1(1)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'dst_address',& + & nf90_int,dims1,idda), & + & __LINE__,__MYFILE__) + + dims2(1)=dimnw + dims2(2)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'remap_matrix',& + & nf90_double,dims2,idrm), & + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_att(ncid,idsgea,'units',& + & remap%src%grid_center_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgea,'units',& + & remap%dst%grid_center_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsgeo,'units',& + & remap%src%grid_center_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgeo,'units',& + & remap%dst%grid_center_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsgoa,'units',& + & remap%src%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsgoo,'units',& + & remap%src%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgoa,'units',& + & remap%dst%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgoo,'units',& + & remap%dst%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsgim,'units',& + & remap%src%grid_imask_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgim,'units',& + & remap%dst%grid_imask_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsga,'units',& + & remap%src_grid_area_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddga,'units',& + & remap%dst_grid_area_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsgf,'units',& + & remap%src_grid_frac_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgf,'units',& + & remap%dst_grid_frac_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'title',& + & remap%title),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'normalization',& + & remap%normalization),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'map_method',& + & remap%map_method),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'history',& + & remap%history),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'conventions',& + & remap%conventions),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'dest_grid',& + & remap%dst%title),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'source_grid',& + & remap%src%title),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_enddef(ncid),__LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idsgd,remap%src%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,iddgd,remap%dst%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idsgea,remap%src%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,iddgea,remap%dst%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idsgeo,remap%src%grid_center_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,iddgeo,remap%dst%grid_center_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idsgoa,remap%src%grid_corner_lat),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idsgoo,remap%src%grid_corner_lon),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,iddgoa,remap%dst%grid_corner_lat),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,iddgoo,remap%dst%grid_corner_lon),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idsgim,remap%src%grid_imask),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,iddgim,remap%dst%grid_imask),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idsga,remap%src_grid_area),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,iddga,remap%dst_grid_area),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idsgf,remap%src_grid_frac),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,iddgf,remap%dst_grid_frac),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idsa,remap%src_address),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idda,remap%dst_address),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idrm,remap%remap_matrix),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_close(ncid),__LINE__, __MYFILE__ ) + + END SUBROUTINE scripremap_write + + SUBROUTINE scripremap_init(remap) + + TYPE(scripremaptype) :: remap + + CALL scripgrid_init(remap%src) + CALL scripgrid_init(remap%dst) + remap%num_links = 0 + remap%num_wgts = 0 + remap%title='' + remap%normalization='' + remap%map_method='' + remap%history='' + remap%conventions='' + remap%src_grid_area_units='' + remap%dst_grid_area_units='' + remap%src_grid_frac_units='' + remap%dst_grid_frac_units='' + + END SUBROUTINE scripremap_init + + SUBROUTINE scripremap_alloc(remap) + + TYPE(scripremaptype) :: remap + + IF ( (remap%num_links == 0) .OR. & + & (remap%num_wgts == 0) ) THEN + WRITE(*,*)'scripremaptype not initialized' + CALL abort + ENDIF + + CALL scripgrid_alloc(remap%src) + CALL scripgrid_alloc(remap%dst) + + ALLOCATE( & + & remap%src_grid_area(remap%src%grid_size), & + & remap%dst_grid_area(remap%dst%grid_size), & + & remap%src_grid_frac(remap%src%grid_size), & + & remap%dst_grid_frac(remap%dst%grid_size), & + & remap%src_address(remap%num_links), & + & remap%dst_address(remap%num_links), & + & remap%remap_matrix(remap%num_wgts, remap%num_links) & + & ) + + END SUBROUTINE scripremap_alloc + + SUBROUTINE scripremap_dealloc(remap) + + TYPE(scripremaptype) :: remap + + DEALLOCATE( & + & remap%src_grid_area, & + & remap%dst_grid_area, & + & remap%src_grid_frac, & + & remap%dst_grid_frac, & + & remap%src_address, & + & remap%dst_address, & + & remap%remap_matrix & + & ) + + CALL scripgrid_dealloc(remap%src) + CALL scripgrid_dealloc(remap%dst) + + CALL scripremap_init(remap) + + END SUBROUTINE scripremap_dealloc + +END MODULE scripremap + +MODULE parinter + +#if defined key_mpp_mpi + USE mpi +#endif + USE scripremap + USE scrippar + USE nctools + + IMPLICIT NONE + + ! Type to contains interpolation information + ! (like what is in scripremaptype) and message + ! passing information + + TYPE parinterinfo + ! Number of local links + INTEGER :: num_links + ! Destination side + INTEGER, POINTER, DIMENSION(:) :: dst_address + ! Source addresses and work array + INTEGER, POINTER, DIMENSION(:) :: src_address + ! Local remap matrix + REAL(scripdp), POINTER, DIMENSION(:,:) :: remap_matrix + ! Message passing information + ! Array of local addresses for send buffer + ! packing + INTEGER, POINTER, DIMENSION(:) :: send_address + ! Sending bookkeeping + INTEGER :: nsendtot + INTEGER, POINTER, DIMENSION(:) :: nsend,nsdisp + ! Receiving bookkeeping + INTEGER :: nrecvtot + INTEGER, POINTER, DIMENSION(:) :: nrecv,nrdisp + END TYPE parinterinfo + +CONTAINS + + SUBROUTINE parinter_init( mype, nproc, mpi_comm, & + & nsrclocpoints, nsrcglopoints, srcmask, srcgloind, & + & ndstlocpoints, ndstglopoints, dstmask, dstgloind, & + & remap, pinfo, lcommout, commoutprefix, iunit ) + + ! Setup interpolation based on SCRIP format weights in + ! remap and the source/destination grids information. + + ! Procedure: + + ! 1) A global SCRIP remapping file is read on all processors. + ! 2) Find local destination points in the global grid. + ! 3) Find which processor needs source data and setup buffer + ! information for sending data. + ! 4) Construct new src remapping for buffer received + + ! All information is stored in the TYPE(parinterinfo) output + ! data type + + ! Input arguments. + + ! Message passing information + INTEGER, INTENT(IN) :: mype, nproc, mpi_comm + ! Source grid local and global number of grid points + INTEGER, INTENT(IN) :: nsrclocpoints, nsrcglopoints + ! Source integer mask (0/1) for SCRIP compliance + INTEGER, INTENT(IN), DIMENSION(nsrclocpoints) :: srcmask + ! Source global addresses of each local grid point + INTEGER, INTENT(IN), DIMENSION(nsrclocpoints) :: srcgloind + ! Destination grid local and global number of grid points + INTEGER, INTENT(IN) :: ndstlocpoints, ndstglopoints + ! Destination integer mask (0/1) for SCRIP compliance + INTEGER, INTENT(IN), DIMENSION(ndstlocpoints) :: dstmask + ! Destination global addresses of each local grid point + INTEGER, INTENT(IN), DIMENSION(ndstlocpoints) :: dstgloind + ! SCRIP remapping data + TYPE(scripremaptype) :: remap + ! Switch for output communication patterns + LOGICAL :: lcommout + CHARACTER(len=*) :: commoutprefix + ! Unit to use for output + INTEGER :: iunit + + ! Output arguments + + ! Interpolation and message passing information + TYPE(parinterinfo), INTENT(OUT) :: pinfo + + ! Local variable + + ! Variable for glocal <-> local address/pe information + INTEGER, DIMENSION(nsrcglopoints) :: ilsrcmppmap, ilsrclocind + INTEGER, DIMENSION(nsrcglopoints) :: igsrcmppmap, igsrclocind + INTEGER, DIMENSION(ndstglopoints) :: ildstmppmap, ildstlocind + INTEGER, DIMENSION(ndstglopoints) :: igdstmppmap, igdstlocind + INTEGER, DIMENSION(nsrcglopoints) :: isrcpe,isrcpetmp + INTEGER, DIMENSION(nsrcglopoints) :: isrcaddtmp + INTEGER, DIMENSION(0:nproc-1) :: isrcoffset + INTEGER, DIMENSION(nproc) :: isrcno, isrcoff, isrccur + INTEGER, DIMENSION(nproc) :: ircvoff, ircvcur + INTEGER, DIMENSION(:), ALLOCATABLE :: isrctot, ircvtot + + ! Misc variable + INTEGER :: i,n,pe + INTEGER :: istatus + CHARACTER(len=256) :: cdfile + + ! Check that masks are consistent. + + ! Remark: More consistency tests between remapping information + ! and input argument could be code, but for now we settle + ! for checking the masks. + + ! Source grid + + DO i=1,nsrclocpoints + IF (srcmask(i)/=remap%src%grid_imask(srcgloind(i))) THEN + WRITE(iunit,*)'Source imask is inconsistent at ' + WRITE(iunit,*)'global index = ',srcgloind(i) + WRITE(iunit,*)'Source mask = ',srcmask(i) + WRITE(iunit,*)'Remap mask = ',remap%src%grid_imask(srcgloind(i)) + WRITE(iunit,*)'Latitude = ',remap%src%grid_center_lat(srcgloind(i)) + WRITE(iunit,*)'Longitude = ',remap%src%grid_center_lon(srcgloind(i)) + CALL flush(iunit) + CALL abort + ENDIF + ENDDO + + ! Destination grid + + DO i=1,ndstlocpoints + IF (dstmask(i)/=remap%dst%grid_imask(dstgloind(i))) THEN + WRITE(iunit,*)'Destination imask is inconsistent at ' + WRITE(iunit,*)'global index = ',dstgloind(i) + WRITE(iunit,*)'Destin mask = ',dstmask(i) + WRITE(iunit,*)'Remap mask = ',remap%dst%grid_imask(dstgloind(i)) + WRITE(iunit,*)'Latitude = ',remap%dst%grid_center_lat(dstgloind(i)) + WRITE(iunit,*)'Longitude = ',remap%dst%grid_center_lon(dstgloind(i)) + CALL flush(iunit) + CALL abort + ENDIF + ENDDO + + ! Setup global to local and vice versa mappings. + + ilsrcmppmap(:)=-1 + ilsrclocind(:)=0 + ildstmppmap(:)=-1 + ildstlocind(:)=0 + + DO i=1,nsrclocpoints + ilsrcmppmap(srcgloind(i))=mype + ilsrclocind(srcgloind(i))=i + ENDDO + + DO i=1,ndstlocpoints + ildstmppmap(dstgloind(i))=mype + ildstlocind(dstgloind(i))=i + ENDDO + +#if defined key_mpp_mpi + CALL mpi_allreduce(ilsrcmppmap,igsrcmppmap,nsrcglopoints, & + & mpi_integer,mpi_max,mpi_comm,istatus) + CALL mpi_allreduce(ilsrclocind,igsrclocind,nsrcglopoints, & + & mpi_integer,mpi_max,mpi_comm,istatus) + CALL mpi_allreduce(ildstmppmap,igdstmppmap,ndstglopoints, & + & mpi_integer,mpi_max,mpi_comm,istatus) + CALL mpi_allreduce(ildstlocind,igdstlocind,ndstglopoints, & + & mpi_integer,mpi_max,mpi_comm,istatus) +#else + igsrcmppmap(:)=ilsrcmppmap(:) + igsrclocind(:)=ilsrclocind(:) + igdstmppmap(:)=ildstmppmap(:) + igdstlocind(:)=ildstlocind(:) +#endif + + ! Optionally construct an ascii file listing what src and + ! dest points belongs to which task + + ! Since igsrcmppmap and igdstmppmap are global data only do + ! this for mype==0. + + IF (lcommout.AND.(mype==0)) THEN + WRITE(cdfile,'(A,I4.4,A)')commoutprefix//'_srcmppmap_',mype+1,'.dat' + OPEN(9,file=cdfile) + DO i=1,nsrcglopoints + WRITE(9,*)remap%src%grid_center_lat(i),& + & remap%src%grid_center_lon(i), & + & igsrcmppmap(i)+1,remap%src%grid_imask(i) + ENDDO + CLOSE(9) + WRITE(cdfile,'(A,I4.4,A)')commoutprefix//'_dstmppmap_',mype+1,'.dat' + OPEN(9,file=cdfile) + DO i=1,ndstglopoints + WRITE(9,*)remap%dst%grid_center_lat(i),& + & remap%dst%grid_center_lon(i), & + & igdstmppmap(i)+1,remap%dst%grid_imask(i) + ENDDO + CLOSE(9) + ENDIF + + ! + ! Standard interpolation in serial case is + ! + ! DO n=1,remap%num_links + ! zdst(remap%dst_address(n)) = zdst(remap%dst_address(n)) + & + ! & remap%remap_matrix(1,n)*zsrc(remap%src_address(n)) + ! END DO + ! + + ! In parallel we need to first find local number of links + + pinfo%num_links=0 + DO i=1,remap%num_links + IF (igdstmppmap(remap%dst_address(i))==mype) & + & pinfo%num_links=pinfo%num_links+1 + ENDDO + ALLOCATE(pinfo%dst_address(pinfo%num_links),& + & pinfo%src_address(pinfo%num_links),& + & pinfo%remap_matrix(1,pinfo%num_links)) + + ! Get local destination addresses + + n=0 + DO i=1,remap%num_links + IF (igdstmppmap(remap%dst_address(i))==mype) THEN + n=n+1 + pinfo%dst_address(n)=& + & igdstlocind(remap%dst_address(i)) + pinfo%remap_matrix(:,n)=& + & remap%remap_matrix(:,i) + ENDIF + ENDDO + + ! Get sending processors maps. + + ! The same data point might need to be sent to many processors + ! so first construct a map for processors needing the data + + isrcpe(:)=-1 + DO i=1,remap%num_links + IF (igdstmppmap(remap%dst_address(i))==mype) THEN + isrcpe(remap%src_address(i))=& + & igsrcmppmap(remap%src_address(i)) + ENDIF + ENDDO + + ! Optionally write a set if ascii file listing which tasks + ! mype needs to send to communicate with + + IF (lcommout) THEN + ! Destination processors + WRITE(cdfile,'(A,I4.4,A)')commoutprefix//'_dsts_',mype+1,'.dat' + OPEN(9,file=cdfile) + DO pe=0,nproc-1 + IF (pe==mype) THEN + isrcpetmp(:)=isrcpe(:) + ENDIF +#if defined key_mpp_mpi + CALL mpi_bcast(isrcpetmp,nsrcglopoints,mpi_integer,pe,mpi_comm,istatus) +#endif + DO i=1,nsrcglopoints + IF (isrcpetmp(i)==mype) THEN + WRITE(9,*)remap%src%grid_center_lat(i),& + & remap%src%grid_center_lon(i), & + & pe+1,mype+1 + ENDIF + ENDDO + ENDDO + CLOSE(9) + ENDIF + + ! Get number of points to send to each processor + + ALLOCATE(pinfo%nsend(0:nproc-1)) + isrcno(:)=0 + DO i=1,nsrcglopoints + IF (isrcpe(i)>=0) THEN + isrcno(isrcpe(i)+1)=isrcno(isrcpe(i)+1)+1 + ENDIF + ENDDO +#if defined key_mpp_mpi + CALL mpi_alltoall(isrcno,1,mpi_integer, & + & pinfo%nsend(0:nproc-1),1,mpi_integer, & + & mpi_comm,istatus) +#else + pinfo%nsend(0:nproc-1) = isrcno(1:nproc) +#endif + pinfo%nsendtot=SUM(pinfo%nsend(0:nproc-1)) + + ! Construct sending buffer mapping. Data is mapping in + ! processor order. + + ALLOCATE(pinfo%send_address(pinfo%nsendtot)) + + ! Temporary arrays for mpi all to all. + + ALLOCATE(isrctot(SUM(isrcno(1:nproc)))) + ALLOCATE(ircvtot(SUM(pinfo%nsend(0:nproc-1)))) + + ! Offset for message parsing + + isrcoff(1)=0 + ircvoff(1)=0 + DO i=1,nproc-1 + isrcoff(i+1) = isrcoff(i) + isrcno(i) + ircvoff(i+1) = pinfo%nsend(i-1) + ircvoff(i) + ENDDO + + ! Pack indices i into a buffer + + isrccur(:)=0 + DO i=1,nsrcglopoints + IF (isrcpe(i)>=0) THEN + isrccur(isrcpe(i)+1)=isrccur(isrcpe(i)+1)+1 + isrctot(isrccur(isrcpe(i)+1)+isrcoff(isrcpe(i)+1)) = i + ENDIF + ENDDO + + ! Send the data + +#if defined key_mpp_mpi + CALL mpi_alltoallv(& + & isrctot,isrccur,isrcoff,mpi_integer, & + & ircvtot,pinfo%nsend(0:nproc-1),ircvoff,mpi_integer, & + & mpi_comm,istatus) +#else + ircvtot(:)=isrctot(:) +#endif + + ! Get the send address. ircvtot will at this point contain the + ! addresses in the global index needed for message passing + + DO i=1,pinfo%nsendtot + pinfo%send_address(i)=igsrclocind(ircvtot(i)) + ENDDO + + ! Deallocate the mpi all to all arrays + + DEALLOCATE(ircvtot,isrctot) + + ! Get number of points to receive to each processor + + ALLOCATE(pinfo%nrecv(0:nproc-1)) + pinfo%nrecv(0:nproc-1)=0 + DO i=1,nsrcglopoints + IF (isrcpe(i)>=0 .AND. isrcpe(i)=0 .AND. isrcpe(i)0) THEN + CALL nchdlerr(nf90_def_dim(ncid,'num_links',& + & pinfo%num_links,dimnl),& + & __LINE__,__MYFILE__) + ENDIF + + CALL nchdlerr(nf90_def_dim(ncid,'num_wgts',& + & 1,dimnw),& + & __LINE__,__MYFILE__) + + IF (pinfo%nsendtot>0) THEN + CALL nchdlerr(nf90_def_dim(ncid,'nsendtot',& + & pinfo%nsendtot,dimnst),& + & __LINE__,__MYFILE__) + ENDIF + + IF (pinfo%nrecvtot>0) THEN + CALL nchdlerr(nf90_def_dim(ncid,'nrecvtot',& + & pinfo%nrecvtot,dimnrt),& + & __LINE__,__MYFILE__) + ENDIF + + CALL nchdlerr(nf90_def_dim(ncid,'nproc',& + & nproc,dimnpr),& + & __LINE__,__MYFILE__) + + IF (pinfo%num_links>0) THEN + + dims1(1)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'dst_address',& + & nf90_int,dims1,idda),& + & __LINE__,__MYFILE__) + + dims1(1)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'src_address',& + & nf90_int,dims1,idsa),& + & __LINE__,__MYFILE__) + + dims2(1)=dimnw + dims2(2)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'remap_matrix',& + & nf90_double,dims2,idrm),& + & __LINE__,__MYFILE__) + + ENDIF + + dims1(1)=dimnpr + CALL nchdlerr(nf90_def_var(ncid,'nsend',& + & nf90_int,dims1,idns),& + & __LINE__,__MYFILE__) + + IF (pinfo%nsendtot>0) THEN + + dims1(1)=dimnst + CALL nchdlerr(nf90_def_var(ncid,'send_address',& + & nf90_int,dims1,idsaa),& + & __LINE__,__MYFILE__) + + ENDIF + + dims1(1)=dimnpr + CALL nchdlerr(nf90_def_var(ncid,'nrecv',& + & nf90_int,dims1,idnr),& + & __LINE__,__MYFILE__) + + dims1(1)=dimnpr + CALL nchdlerr(nf90_def_var(ncid,'nsdisp',& + & nf90_int,dims1,idnsp),& + & __LINE__,__MYFILE__) + + dims1(1)=dimnpr + CALL nchdlerr(nf90_def_var(ncid,'nrdisp',& + & nf90_int,dims1,idnrp),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_enddef(ncid),__LINE__,__MYFILE__) + + + IF (pinfo%num_links>0) THEN + + CALL nchdlerr(nf90_put_var(ncid,idda,pinfo%dst_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idsa,pinfo%src_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idrm,pinfo%remap_matrix),& + & __LINE__,__MYFILE__) + + ENDIF + + CALL nchdlerr(nf90_put_var(ncid,idns,pinfo%nsend(0:nproc-1)),& + & __LINE__,__MYFILE__) + + IF (pinfo%nsendtot>0) THEN + + CALL nchdlerr(nf90_put_var(ncid,idsaa,pinfo%send_address),& + & __LINE__,__MYFILE__) + + ENDIF + + CALL nchdlerr(nf90_put_var(ncid,idnr,pinfo%nrecv(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idnsp,pinfo%nsdisp(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idnrp,pinfo%nrdisp(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_close(ncid),__LINE__, __MYFILE__ ) + + END SUBROUTINE parinter_write + + SUBROUTINE parinter_read( mype, nproc, & + & nsrcglopoints, ndstglopoints, & + & pinfo, cdpath, cdprefix, lexists ) + + ! Write pinfo information in a netCDF file in order to + ! be able to read it rather than calling parinter_init + + ! Input arguments. + + ! Message passing information + INTEGER, INTENT(IN) :: mype, nproc + ! Source grid local global number of grid points + INTEGER, INTENT(IN) :: nsrcglopoints + ! Destination grid global number of grid points + INTEGER, INTENT(IN) :: ndstglopoints + ! Interpolation and message passing information + TYPE(parinterinfo), INTENT(OUT) :: pinfo + ! Does the information exists + LOGICAL :: lexists + ! Path and file prefix + CHARACTER(len=*) :: cdpath, cdprefix + + ! Local variable + + ! Misc variable + CHARACTER(len=1024) :: cdfile + INTEGER :: ncid, dimid, varid, num_wgts + + WRITE(cdfile,'(A,2(I8.8,A),2(I4.4,A),A)') & + & TRIM(cdpath)//'/'//TRIM(cdprefix)//'_', & + & nsrcglopoints,'_',ndstglopoints,'_',mype,'_',nproc,'.nc' + + + lexists=nf90_open(TRIM(cdfile),nf90_nowrite,ncid)==nf90_noerr + + IF (lexists) THEN + + ! If num_links is not present we assume it to be zero. + + IF (nf90_inq_dimid(ncid,'num_links',dimid)==nf90_noerr) THEN + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=pinfo%num_links),& + & __LINE__,__MYFILE__) + ELSE + pinfo%num_links=0 + ENDIF + + CALL nchdlerr(nf90_inq_dimid(ncid,'num_wgts',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=num_wgts),& + & __LINE__,__MYFILE__) + IF (num_wgts/=1) THEN + WRITE(0,*)'parinter_read: num_wgts has to be 1 for now' + CALL abort + ENDIF + + ! If nsendtot is not present we assume it to be zero. + + IF (nf90_inq_dimid(ncid,'nsendtot',dimid)==nf90_noerr) THEN + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=pinfo%nsendtot),& + & __LINE__,__MYFILE__) + ELSE + pinfo%nsendtot=0 + ENDIF + + IF(nf90_inq_dimid(ncid,'nrecvtot',dimid)==nf90_noerr) THEN + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=pinfo%nrecvtot),& + & __LINE__,__MYFILE__) + ELSE + pinfo%nrecvtot=0 + ENDIF + + ALLOCATE(pinfo%dst_address(pinfo%num_links),& + & pinfo%src_address(pinfo%num_links),& + & pinfo%remap_matrix(num_wgts,pinfo%num_links),& + & pinfo%nsend(0:nproc-1),& + & pinfo%send_address(pinfo%nsendtot),& + & pinfo%nrecv(0:nproc-1),& + & pinfo%nsdisp(0:nproc-1),& + & pinfo%nrdisp(0:nproc-1)) + + IF (pinfo%num_links>0) THEN + CALL nchdlerr(nf90_inq_varid(ncid,'dst_address',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%dst_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_address',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%src_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'remap_matrix',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%remap_matrix),& + & __LINE__,__MYFILE__) + ENDIF + + CALL nchdlerr(nf90_inq_varid(ncid,'nsend',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nsend(0:nproc-1)),& + & __LINE__,__MYFILE__) + + IF (pinfo%nsendtot>0) THEN + + CALL nchdlerr(nf90_inq_varid(ncid,'send_address',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%send_address),& + & __LINE__,__MYFILE__) + + ENDIF + + CALL nchdlerr(nf90_inq_varid(ncid,'nrecv',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nrecv(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'nsdisp',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nsdisp(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'nrdisp',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nrdisp(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_close(ncid),__LINE__, __MYFILE__ ) + + ENDIF + + END SUBROUTINE parinter_read + +END MODULE parinter + MODULE interinfo ! Parallel regridding information @@ -34,5 +1855,3 @@ MODULE interinfo LOGICAL :: lparbcast = .FALSE. END MODULE interinfo - - From 71ff648656de81309bb6ca8a6f5f64f7cc2b2840 Mon Sep 17 00:00:00 2001 From: "Kristian S. Mogensen" Date: Thu, 7 Jun 2018 17:29:42 +0100 Subject: [PATCH 013/909] Fix dependencies. --- Makefile | 21 ++++----------------- 1 file changed, 4 insertions(+), 17 deletions(-) diff --git a/Makefile b/Makefile index 8bd75b807..84067b7ca 100644 --- a/Makefile +++ b/Makefile @@ -34,20 +34,7 @@ clean: rm -f *.o *.mod *~ *.x *.pp.f90 *.pp.f *.lst *.in *.nc *.grb *.a rm -rf *.dSYM -scripremap.o: nctools.o scrippar.o scripgrid.o -scripgrid.o: nctools.o scrippar.o -parinter.o: scripremap.o scrippar.o -interinfo.o: parinter.o -nemogcmcoup_mlflds_get.o: par_kind.o -nemogcmcoup_exflds_get.o: par_kind.o -nemogcmcoup_coup.o: par_kind.o -nemogcmcoup_coupinit.o: scripremap.o parinter.o interinfo.o -nemogcmcoup_wam_update.o: par_kind.o -nemogcmcoup_wam_get.o: par_kind.o -nemogcmcoup_wam_update_stress.o: par_kind.o -nemogcmcoup_mlinit.o: par_kind.o -nemogcmcoup_update_add.o: par_kind.o -nemogcmcoup_update.o: par_kind.o -nemogcmcoup_lim2_update.o: par_kind.o -nemogcmcoup_get.o: par_kind.o -nemogcmcoup_lim2_get.o: par_kind.o +ifs_interface.o: ifs_modules.o +ifs_notused.o: ifs_modules.o + + From 24c5a3c466f773bbcd8ac0ac5afcab2b30866177 Mon Sep 17 00:00:00 2001 From: "Kristian S. Mogensen" Date: Thu, 7 Jun 2018 17:58:31 +0100 Subject: [PATCH 014/909] Test initialization of coupling. --- .gitignore | 4 ++++ ifs_interface.F90 | 14 ++++++++++---- 2 files changed, 14 insertions(+), 4 deletions(-) create mode 100644 .gitignore diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..c15ee62d1 --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +*.o +*.f90 +*.mod +*.a diff --git a/ifs_interface.F90 b/ifs_interface.F90 index 82537dcca..59fff624c 100644 --- a/ifs_interface.F90 +++ b/ifs_interface.F90 @@ -29,7 +29,7 @@ SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & LOGICAL :: lwrite WRITE(0,*)'Insert FESOM init here.' - CALL abort +! CALL abort ! Set information for the caller @@ -39,6 +39,12 @@ SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & itini = nit000 itend = nn_itend zstp = rdttra(1) +#else + inidate = 20170906 + initime = 0 + itini = 1 + itend = 24 + zstp = 3600.0 #endif END SUBROUTINE nemogcmcoup_init @@ -73,7 +79,7 @@ SUBROUTINE nemogcmcoup_coupinit( mype, npes, icomm, & LOGICAL :: lwritedist, lreaddist LOGICAL :: lcommout CHARACTER(len=128) :: commoutprefix - NAMELIST/namnemocoup/cdfile_gauss_to_T,& + NAMELIST/namfesomcoup/cdfile_gauss_to_T,& & cdfile_gauss_to_UV,& & cdfile_T_to_gauss,& & cdfile_UV_to_gauss,& @@ -108,8 +114,8 @@ SUBROUTINE nemogcmcoup_coupinit( mype, npes, icomm, & lreaddist = .FALSE. lwritedist = .FALSE. - OPEN(9,file='namnemocoup.in') - READ(9,namnemocoup) + OPEN(9,file='namfesomcoup.in') + READ(9,namfesomcoup) CLOSE(9) ! Global number of Gaussian gridpoints From 604547850a96ee018bd50a33932427f9d0a72404 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Fri, 8 Jun 2018 10:43:51 +0100 Subject: [PATCH 015/909] Restructuring of program main into 3 major parts works. --- src/fvom_main.F90 | 136 ++++++++++++++++++++++++++++++++++++---------- 1 file changed, 107 insertions(+), 29 deletions(-) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index a8569c986..c5cefa35e 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -7,30 +7,50 @@ !=============================================================================! program main -USE o_MESH -USE o_ARRAYS -USE o_PARAM -USE g_PARSUP -USE i_PARAM -use i_ARRAYS -use g_clock -use g_config -use g_forcing_index -use g_comm_auto -use g_forcing_arrays -use io_RESTART -use io_MEANDATA -use io_mesh_info -use diagnostics + + use g_PARSUP, only: mype + integer :: nsteps + + call main_initialize(nsteps) + if (mype==0) write(*,*) 'Initialization complete.' + + call main_timestepping(nsteps) + if (mype==0) write(*,*) 'Timestepping complete...' + + call main_finalize + if (mype==0) write(*,*) 'Finalization complete...' + +end program main + +subroutine main_initialize(nsteps) + ! Split main into three major parts + ! Coded by Thomas Rackow, 2018 + !---------------------------------- + USE o_MESH + USE o_ARRAYS + USE o_PARAM + USE g_PARSUP + USE i_PARAM + use i_ARRAYS + use g_clock + use g_config + use g_forcing_index + use g_comm_auto + use g_forcing_arrays + use io_RESTART + use io_MEANDATA + use io_mesh_info + use diagnostics #if defined (__oasis) -use cpl_driver + use cpl_driver #endif -IMPLICIT NONE + IMPLICIT NONE -integer :: n, nsteps, offset, row, i + integer :: n, offset, row, i + integer, INTENT(OUT) :: nsteps #ifndef __oifs - !ECHAM6-FESOM2 coupling: cpl_oasis3mct_init is called here in order to avoid circular dependencies between modules (cpl_driver and g_PARSUP) + !ECHAM6-FESOM2 coupling: cpl_oasis3mct_init is called here in order to avoid circular !dependencies between modules (cpl_driver and g_PARSUP) !OIFS-FESOM2 coupling: does not require MPI_INIT here as this is done by OASIS call MPI_INIT(i) #endif @@ -91,14 +111,43 @@ program main if (r_restart .and. use_ALE) then call restart_thickness_ale end if - - + +end subroutine main_initialize + + +subroutine main_timestepping(nsteps) + ! Split main into three major parts + ! Coded by Thomas Rackow, 2018 + !---------------------------------- + USE o_MESH + USE o_ARRAYS + USE o_PARAM + USE g_PARSUP + USE i_PARAM + use i_ARRAYS + use g_clock + use g_config + use g_forcing_index + use g_comm_auto + use g_forcing_arrays + use io_RESTART + use io_MEANDATA + use io_mesh_info + use diagnostics +#if defined (__oasis) + use cpl_driver +#endif + IMPLICIT NONE + + integer :: n, offset, row, i + integer, INTENT(IN) :: nsteps + !===================== ! Time stepping !===================== - if (mype==0) write(*,*) 'FESOM start interation before the barrier...' + if (mype==0) write(*,*) 'FESOM start integration before the barrier...' call MPI_Barrier(MPI_COMM_FESOM, MPIERR) - if (mype==0) write(*,*) 'FESOM start interation after the barrier...' + if (mype==0) write(*,*) 'FESOM start integration after the barrier...' !___MODEL TIME STEPPING LOOP________________________________________________ @@ -148,9 +197,38 @@ program main call output (n) call restart(n, .false., .false.) end do - - !___FINISH MODEL RUN________________________________________________________ - if (mype==0) write(*,*) 'FESOM Run is finished, updating clock' - call clock_finish - call par_ex -end program main +end subroutine main_timestepping + + +subroutine main_finalize + ! Split main into three major parts + ! Coded by Thomas Rackow, 2018 + !---------------------------------- + USE o_MESH + USE o_ARRAYS + USE o_PARAM + USE g_PARSUP + USE i_PARAM + use i_ARRAYS + use g_clock + use g_config + use g_forcing_index + use g_comm_auto + use g_forcing_arrays + use io_RESTART + use io_MEANDATA + use io_mesh_info + use diagnostics +#if defined (__oasis) + use cpl_driver +#endif + IMPLICIT NONE + + integer :: n, nsteps, offset, row, i + + !___FINISH MODEL RUN________________________________________________________ + if (mype==0) write(*,*) 'FESOM Run is finished, updating clock' + call clock_finish + call par_ex + +end subroutine main_finalize From d9d384460d9827b703757ef1eceaa1103c5e8db9 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Fri, 8 Jun 2018 13:28:15 +0100 Subject: [PATCH 016/909] Clean up of finalize routine. --- src/fvom_main.F90 | 32 ++++++++------------------------ 1 file changed, 8 insertions(+), 24 deletions(-) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index c5cefa35e..2079f4fa6 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -46,13 +46,13 @@ subroutine main_initialize(nsteps) #endif IMPLICIT NONE - integer :: n, offset, row, i + integer :: n, offset, ierr integer, INTENT(OUT) :: nsteps #ifndef __oifs !ECHAM6-FESOM2 coupling: cpl_oasis3mct_init is called here in order to avoid circular !dependencies between modules (cpl_driver and g_PARSUP) !OIFS-FESOM2 coupling: does not require MPI_INIT here as this is done by OASIS - call MPI_INIT(i) + call MPI_INIT(ierr) #endif @@ -139,7 +139,7 @@ subroutine main_timestepping(nsteps) #endif IMPLICIT NONE - integer :: n, offset, row, i + integer :: n integer, INTENT(IN) :: nsteps !===================== @@ -204,30 +204,14 @@ subroutine main_finalize ! Split main into three major parts ! Coded by Thomas Rackow, 2018 !---------------------------------- - USE o_MESH - USE o_ARRAYS - USE o_PARAM - USE g_PARSUP - USE i_PARAM - use i_ARRAYS - use g_clock - use g_config - use g_forcing_index - use g_comm_auto - use g_forcing_arrays - use io_RESTART - use io_MEANDATA - use io_mesh_info - use diagnostics -#if defined (__oasis) - use cpl_driver -#endif - IMPLICIT NONE - integer :: n, nsteps, offset, row, i + USE g_PARSUP, only: mype, par_ex + use g_clock, only: clock_finish + + IMPLICIT NONE !___FINISH MODEL RUN________________________________________________________ - if (mype==0) write(*,*) 'FESOM Run is finished, updating clock' + if (mype==0) write(*,*) 'FESOM run is finished, updating clock' call clock_finish call par_ex From 68424cb6cb71471618c9596c4662840f15e227d9 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Fri, 8 Jun 2018 15:57:35 +0100 Subject: [PATCH 017/909] Cleanup of initialization routine, using only-statements and deleting unused modules. Other used routines are not in modules. --- src/fvom_main.F90 | 49 +++++++++++++++++++++++++++++++---------------- 1 file changed, 32 insertions(+), 17 deletions(-) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index 2079f4fa6..474dbab0c 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -22,31 +22,30 @@ program main end program main + + +!=============================================================================! +! Initialization +!=============================================================================! + subroutine main_initialize(nsteps) ! Split main into three major parts ! Coded by Thomas Rackow, 2018 !---------------------------------- - USE o_MESH - USE o_ARRAYS - USE o_PARAM - USE g_PARSUP - USE i_PARAM - use i_ARRAYS - use g_clock - use g_config - use g_forcing_index - use g_comm_auto - use g_forcing_arrays - use io_RESTART - use io_MEANDATA - use io_mesh_info - use diagnostics + USE g_PARSUP, only: mype, par_init + USE i_PARAM, only: ice_ave_steps, whichEVP + use i_ARRAYS, only: ice_steps_since_upd, ice_update + use g_clock, only: clock_init, clock_newyear + use g_config, only: use_ice, r_restart, use_ALE + use io_RESTART, only: restart + use io_mesh_info, only: write_mesh_info + use diagnostics, only: compute_diagnostics #if defined (__oasis) use cpl_driver #endif IMPLICIT NONE - integer :: n, offset, ierr + integer :: ierr integer, INTENT(OUT) :: nsteps #ifndef __oifs @@ -60,7 +59,13 @@ subroutine main_initialize(nsteps) call cpl_oasis3mct_init(MPI_COMM_FESOM) #endif + ! sets npes and mype call par_init + if (mype==0) write(*,*) '!=============================================================================!' + if (mype==0) write(*,*) '! Welcome to the ' + if (mype==0) write(*,*) '! Finite Volume Sea-ice Ocean Model (FESOM2) ' + if (mype==0) write(*,*) '!=============================================================================!' + !===================== ! Read configuration data, ! load the mesh and fill in @@ -107,7 +112,7 @@ subroutine main_initialize(nsteps) if (.not. r_restart) call write_mesh_info !___IF RESTART WITH ZLEVEL OR ZSTAR IS DONE, ALSO THE ACTUAL LEVELS AND ____ - !___MIDDEPTH LEVELS NEEDS TO BE CALCULATET AT RESTART_______________________ + !___MIDDEPTH LEVELS NEEDS TO BE CALCULATED AT RESTART_______________________ if (r_restart .and. use_ALE) then call restart_thickness_ale end if @@ -115,6 +120,11 @@ subroutine main_initialize(nsteps) end subroutine main_initialize + +!=============================================================================! +! Timestepping +!=============================================================================! + subroutine main_timestepping(nsteps) ! Split main into three major parts ! Coded by Thomas Rackow, 2018 @@ -200,6 +210,11 @@ subroutine main_timestepping(nsteps) end subroutine main_timestepping + +!=============================================================================! +! Finalization +!=============================================================================! + subroutine main_finalize ! Split main into three major parts ! Coded by Thomas Rackow, 2018 From f618afb0d2e17a119dc1f516da359d0eebb15f1d Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Fri, 8 Jun 2018 16:09:03 +0100 Subject: [PATCH 018/909] Added environment and jobscript for workstation (at ECMWF). Could also work at AWI? --- env/workstation/shell | 13 +++++++++++++ work/job_workstation | 23 +++++++++++++++++++++++ 2 files changed, 36 insertions(+) create mode 100644 env/workstation/shell create mode 100755 work/job_workstation diff --git a/env/workstation/shell b/env/workstation/shell new file mode 100644 index 000000000..4c2f2f1ff --- /dev/null +++ b/env/workstation/shell @@ -0,0 +1,13 @@ +# used at ECMWF + +module load netcdf4 +module load hdf5 +#following 4 not needed? +#module load eccodes +#module load emos +#module load fftw +#module load boost +module load openmpi + +export FC=mpif90 CC=mpicc CXX=mpicxx # MPI wrappers for Fortran, cc and CC similarly +#export FC=mpif90 CC=gcc CXX=mpicxx # MPI wrappers for Fortran, cc and CC similarly diff --git a/work/job_workstation b/work/job_workstation new file mode 100755 index 000000000..30266352e --- /dev/null +++ b/work/job_workstation @@ -0,0 +1,23 @@ +#!/bin/bash +source ../env.sh # source this from your run script + +path=`pwd` +echo Initial path: $path + +mkdir -p /scratch/rd/natr/run +cd /scratch/rd/natr/run + +# debug +set -x + +cp -n $HOME/fesom2/bin/fesom.x . #../bin/fesom.x . # ln -s ../bin/fesom.x +cp -n $HOME/fesom2/config/namelist.config . #../config/namelist.config . +cp -n $HOME/fesom2/config/namelist.forcing . #../config/namelist.forcing . +cp -n $HOME/fesom2/config/namelist.oce . #../config/namelist.oce . +cp -n $HOME/fesom2/config/namelist.ice . #../config/namelist.ice . + +date +#aprun -N $EC_tasks_per_node -n $EC_total_tasks -j $EC_hyperthreads ./fesom.x > "fesom2.out" +#./fesom.x > "fesom2.out" +mpirun -np 4 ./fesom.x > "fesom2.out" +date From 2dd42013ac108aa9022867f5f0685c75e140387d Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Fri, 8 Jun 2018 16:48:17 +0100 Subject: [PATCH 019/909] Move everything to src/ before merge. --- src/Makefile_dummy | 40 + src/ifs_interface.F90 | 1314 +++++++++++++++++++++++++++++ src/ifs_modules.F90 | 1857 +++++++++++++++++++++++++++++++++++++++++ src/ifs_notused.F90 | 356 ++++++++ 4 files changed, 3567 insertions(+) create mode 100644 src/Makefile_dummy create mode 100644 src/ifs_interface.F90 create mode 100644 src/ifs_modules.F90 create mode 100644 src/ifs_notused.F90 diff --git a/src/Makefile_dummy b/src/Makefile_dummy new file mode 100644 index 000000000..84067b7ca --- /dev/null +++ b/src/Makefile_dummy @@ -0,0 +1,40 @@ +.SUFFIXES: +.SUFFIXES: .F90 .F .f .o + +CPP=cpp +FC=mpif90 +LD=$(FC) +FCFLAGSFIXED=-g -c -O3 -fdefault-real-8 -fdefault-double-8 -fcray-pointer -fconvert=swap -fopenmp $(NETCDF_INCLUDE) $(GRIB_API_INCLUDE) +FCFLAGSFREE=$(FCFLAGSFIXED) +CPPFLAGS=-traditional -P +LDFLAGS=-g -O3 -fdefault-real-8 -fdefault-double-8 -fcray-pointer -fconvert=swap -fopenmp $(MAGPLUSLIB_SHARED) $(NETCDF_LIB) $(GRIB_API_LIB) +AR=ar +ARFLAGS=-rv + +OBJ=ifs_modules.o ifs_interface.o ifs_notused.o + +all: libfesom.a + +.F90.o: + $(CPP) $(CPPFLAGS) $< > $*.pp.f90 + $(FC) $(FCFLAGSFREE) $*.pp.f90 -o $*.o + +.F.o: + $(CPP) $(CPPFLAGS) $< > $*.pp.f + $(FC) $(FCFLAGSFIXED) $*.pp.f -o $*.o + +.f.o: + $(CPP) $(CPPFLAGS) $< > $*.pp.f + $(FC) $(FCFLAGSFIXED) $*.pp.f -o $*.o + +libfesom.a: $(OBJ) + $(AR) $(ARFLAGS) $@ $(OBJ) + +clean: + rm -f *.o *.mod *~ *.x *.pp.f90 *.pp.f *.lst *.in *.nc *.grb *.a + rm -rf *.dSYM + +ifs_interface.o: ifs_modules.o +ifs_notused.o: ifs_modules.o + + diff --git a/src/ifs_interface.F90 b/src/ifs_interface.F90 new file mode 100644 index 000000000..59fff624c --- /dev/null +++ b/src/ifs_interface.F90 @@ -0,0 +1,1314 @@ +!===================================================== +! IFS interface for calling FESOM2 as a subroutine. +! +! -Original code for NEMO by Kristian Mogensen, ECMWF. +!----------------------------------------------------- + +SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & + & lwaveonly, iatmunit, lwrite ) + + ! Initialize the NEMO model for single executable coupling + + USE par_kind + + IMPLICIT NONE + + ! Input arguments + + ! Message passing information + INTEGER, INTENT(IN) :: icomm + ! Initial date, time, initial timestep and final time step + INTEGER, INTENT(OUT) :: inidate, initime, itini, itend + ! Length of the time step + REAL(wp), INTENT(OUT) :: zstp + ! Coupling to waves only + LOGICAL, INTENT(IN) :: lwaveonly + ! Logfile unit (used if >=0) + INTEGER :: iatmunit + ! Write to this unit + LOGICAL :: lwrite + + WRITE(0,*)'Insert FESOM init here.' +! CALL abort + + ! Set information for the caller + +#ifdef FESOM_TODO + inidate = nn_date0 + initime = nn_time0*3600 + itini = nit000 + itend = nn_itend + zstp = rdttra(1) +#else + inidate = 20170906 + initime = 0 + itini = 1 + itend = 24 + zstp = 3600.0 +#endif + +END SUBROUTINE nemogcmcoup_init + + +SUBROUTINE nemogcmcoup_coupinit( mype, npes, icomm, & + & npoints, nlocmsk, ngloind ) + + ! Initialize single executable coupling + USE parinter + USE scripremap + USE interinfo + IMPLICIT NONE + + ! Input arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Gaussian grid information + ! Number of points + INTEGER, INTENT(IN) :: npoints + ! Integer mask and global indices + INTEGER, DIMENSION(npoints), INTENT(IN) :: nlocmsk, ngloind + INTEGER :: iunit = 0 + + ! Local variables + + ! Namelist containing the file names of the weights + CHARACTER(len=256) :: cdfile_gauss_to_T, cdfile_gauss_to_UV, & + & cdfile_T_to_gauss, cdfile_UV_to_gauss + CHARACTER(len=256) :: cdpathdist + LOGICAL :: lwritedist, lreaddist + LOGICAL :: lcommout + CHARACTER(len=128) :: commoutprefix + NAMELIST/namfesomcoup/cdfile_gauss_to_T,& + & cdfile_gauss_to_UV,& + & cdfile_T_to_gauss,& + & cdfile_UV_to_gauss,& + & cdpathdist, & + & lreaddist, & + & lwritedist, & + & lcommout, & + & commoutprefix,& + & lparbcast + + ! Global number of gaussian gridpoints + INTEGER :: nglopoints + ! Ocean grids accessed with NEMO modules + INTEGER :: noglopoints,nopoints + INTEGER, ALLOCATABLE, DIMENSION(:) :: omask,ogloind + ! SCRIP remapping data structures. + TYPE(scripremaptype) :: remap_gauss_to_T, remap_T_to_gauss, & + & remap_gauss_to_UV, remap_UV_to_gauss + ! Misc variables + INTEGER :: i,j,k,ierr + LOGICAL :: lexists + + ! Read namelists + + cdfile_gauss_to_T = 'gausstoT.nc' + cdfile_gauss_to_UV = 'gausstoUV.nc' + cdfile_T_to_gauss = 'Ttogauss.nc' + cdfile_UV_to_gauss = 'UVtogauss.nc' + lcommout = .FALSE. + commoutprefix = 'parinter_comm' + cdpathdist = './' + lreaddist = .FALSE. + lwritedist = .FALSE. + + OPEN(9,file='namfesomcoup.in') + READ(9,namfesomcoup) + CLOSE(9) + + ! Global number of Gaussian gridpoints + + CALL mpi_allreduce( npoints, nglopoints, 1, & + & mpi_integer, mpi_sum, icomm, ierr) + + WRITE(0,*)'Update FESOM global scalar points' + noglopoints=126858 + IF (mype==0) THEN + nopoints=126858 + ELSE + nopoints=0 + ENDIF + + ! Ocean mask and global indicies + + ALLOCATE(omask(MAX(nopoints,1)),ogloind(MAX(nopoints,1))) + + omask(:) = 1 + IF (mype==0) THEN + DO i=1,nopoints + ogloind(i)=i + ENDDO + ENDIF + + ! Read the interpolation weights and setup the parallel interpolation + ! from atmosphere Gaussian grid to ocean T-grid + + IF (lreaddist) THEN + CALL parinter_read( mype, npes, nglopoints, noglopoints, gausstoT, & + & cdpathdist,'ifs_to_fesom_gridT',lexists) + ENDIF + IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN + IF (lparbcast) THEN + CALL scripremap_read_sgl(cdfile_gauss_to_T,remap_gauss_to_T,& + & mype,npes,icomm,.TRUE.) + ELSE + CALL scripremap_read(cdfile_gauss_to_T,remap_gauss_to_T) + ENDIF + CALL parinter_init( mype, npes, icomm, & + & npoints, nglopoints, nlocmsk, ngloind, & + & nopoints, noglopoints, omask, ogloind, & + & remap_gauss_to_T, gausstoT, lcommout, TRIM(commoutprefix)//'_gtoT', & + & iunit ) + CALL scripremap_dealloc(remap_gauss_to_T) + IF (lwritedist) THEN + CALL parinter_write( mype, npes, nglopoints, noglopoints, gausstoT, & + & cdpathdist,'ifs_to_fesom_gridT') + ENDIF + ENDIF + + ! From ocean T-grid to atmosphere Gaussian grid + + IF (lreaddist) THEN + CALL parinter_read( mype, npes, noglopoints, nglopoints, Ttogauss, & + & cdpathdist,'fesom_gridT_to_ifs',lexists) + ENDIF + IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN + IF (lparbcast) THEN + CALL scripremap_read_sgl(cdfile_T_to_gauss,remap_T_to_gauss,& + & mype,npes,icomm,.TRUE.) + ELSE + CALL scripremap_read(cdfile_T_to_gauss,remap_T_to_gauss) + ENDIF + + CALL parinter_init( mype, npes, icomm, & + & nopoints, noglopoints, omask, ogloind, & + & npoints, nglopoints, nlocmsk, ngloind, & + & remap_T_to_gauss, Ttogauss, lcommout, TRIM(commoutprefix)//'_Ttog', & + & iunit ) + CALL scripremap_dealloc(remap_T_to_gauss) + IF (lwritedist) THEN + CALL parinter_write( mype, npes, noglopoints, nglopoints, Ttogauss, & + & cdpathdist,'fesom_gridT_to_ifs') + ENDIF + ENDIF + + DEALLOCATE(omask,ogloind) + + WRITE(0,*)'Update FESOM global vector points' + noglopoints=244659 + IF (mype==0) THEN + nopoints=244659 + ELSE + nopoints=0 + ENDIF + + ! Ocean mask and global indicies + + ALLOCATE(omask(MAX(nopoints,1)),ogloind(MAX(nopoints,1))) + + omask(:) = 1 + IF (mype==0) THEN + DO i=1,nopoints + ogloind(i)=i + ENDDO + ENDIF + + ! Read the interpolation weights and setup the parallel interpolation + ! from atmosphere Gaussian grid to ocean UV-grid + + IF (lreaddist) THEN + CALL parinter_read( mype, npes, nglopoints, noglopoints, gausstoUV, & + & cdpathdist,'ifs_to_fesom_gridUV',lexists) + ENDIF + IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN + IF (lparbcast) THEN + CALL scripremap_read_sgl(cdfile_gauss_to_UV,remap_gauss_to_UV,& + & mype,npes,icomm,.TRUE.) + ELSE + CALL scripremap_read(cdfile_gauss_to_UV,remap_gauss_to_UV) + ENDIF + CALL parinter_init( mype, npes, icomm, & + & npoints, nglopoints, nlocmsk, ngloind, & + & nopoints, noglopoints, omask, ogloind, & + & remap_gauss_to_UV, gausstoUV, lcommout, TRIM(commoutprefix)//'_gtoUV', & + & iunit ) + CALL scripremap_dealloc(remap_gauss_to_UV) + IF (lwritedist) THEN + CALL parinter_write( mype, npes, nglopoints, noglopoints, gausstoUV, & + & cdpathdist,'ifs_to_fesom_gridUV') + ENDIF + ENDIF + + ! From ocean UV-grid to atmosphere Gaussian grid + + IF (lreaddist) THEN + CALL parinter_read( mype, npes, noglopoints, nglopoints, UVtogauss, & + & cdpathdist,'fesom_gridUV_to_ifs',lexists) + ENDIF + IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN + IF (lparbcast) THEN + CALL scripremap_read_sgl(cdfile_UV_to_gauss,remap_UV_to_gauss,& + & mype,npes,icomm,.TRUE.) + ELSE + CALL scripremap_read(cdfile_UV_to_gauss,remap_UV_to_gauss) + ENDIF + + CALL parinter_init( mype, npes, icomm, & + & nopoints, noglopoints, omask, ogloind, & + & npoints, nglopoints, nlocmsk, ngloind, & + & remap_UV_to_gauss, UVtogauss, lcommout, TRIM(commoutprefix)//'_UVtog', & + & iunit ) + CALL scripremap_dealloc(remap_UV_to_gauss) + IF (lwritedist) THEN + CALL parinter_write( mype, npes, noglopoints, nglopoints, UVtogauss, & + & cdpathdist,'fesom_gridUV_to_ifs') + ENDIF + ENDIF + + DEALLOCATE(omask,ogloind) + +END SUBROUTINE nemogcmcoup_coupinit + + +SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & + & nopoints, pgsst, pgist, pgalb, & + & pgifr, pghic, pghsn, pgucur, pgvcur, & + & pgistl, licelvls ) + + ! Interpolate sst, ice: surf T; albedo; concentration; thickness, + ! snow thickness and currents from the ORCA grid to the Gaussian grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + USE par_kind + + IMPLICIT NONE + + ! Arguments + REAL(wp), DIMENSION(nopoints) :: pgsst, pgist, pgalb, pgifr, pghic, pghsn, pgucur, pgvcur + REAL(wp), DIMENSION(nopoints,3) :: pgistl + LOGICAL :: licelvls + + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + ! Number Gaussian grid points + INTEGER, INTENT(IN) :: nopoints + + ! Local variables + +#ifdef FESOM_TODO + + ! Temporary array for packing of input data without halos. + REAL(wp), DIMENSION((nlei-nldi+1)*(nlej-nldj+1)) :: zsend + ! Arrays for rotation of current vectors from ij to ne. + REAL(wp), DIMENSION(jpi,jpj) :: zotx1, zoty1, ztmpx, ztmpy + ! Array for fraction of leads (i.e. ocean) + REAL(wp), DIMENSION(jpi,jpj) :: zfr_l + REAL(wp) :: zt + ! Loop variables + INTEGER :: ji, jj, jk, jl + REAL(wp) :: zhook_handle ! Dr Hook handle + + IF(lhook) CALL dr_hook('nemogcmcoup_lim2_get',0,zhook_handle) + IF(nn_timing == 1) CALL timing_start('nemogcmcoup_lim2_get') + + zfr_l(:,:) = 1.- fr_i(:,:) + + IF (.NOT.ALLOCATED(zscplsst)) THEN + ALLOCATE(zscplsst(jpi,jpj)) + ENDIF + + ! Pack SST data and convert to K. + + IF ( nsstlvl(1) == nsstlvl(2) ) THEN + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = tsn(ji,jj,nsstlvl(1),jp_tem) + rt0 + zscplsst(ji,jj) = zsend(jk) - rt0 + ENDDO + ENDDO + ELSE + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = SUM(& + & tsn(ji,jj,nsstlvl(1):nsstlvl(2),jp_tem) * & + & tmask(ji,jj,nsstlvl(1):nsstlvl(2)) * & + & fse3t(ji,jj,nsstlvl(1):nsstlvl(2)) ) / & + & MAX( SUM( & + & tmask(ji,jj,nsstlvl(1):nsstlvl(2)) * & + & fse3t(ji,jj,nsstlvl(1):nsstlvl(2))) , 1.0 ) + rt0 + zscplsst(ji,jj) = zsend(jk) - rt0 + ENDDO + ENDDO + ENDIF + CALL lbc_lnk( zscplsst, 'T', 1. ) + + ! Interpolate SST + + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & + & nopoints, pgsst ) + + ! Pack ice temperature data (already in K) + +#if defined key_lim2 + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = tn_ice(ji,jj,1) + ENDDO + ENDDO +#else + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = 0 + zt=0.0 + DO jl = 1, jpl + zsend(jk) = zsend(jk) + tn_ice(ji,jj,jl) * a_i(ji,jj,jl) + zt = zt + a_i(ji,jj,jl) + ENDDO + IF ( zt > 0.0 ) THEN + zsend(jk) = zsend(jk) / zt + ELSE + zsend(jk) = rt0 + ENDIF + ENDDO + ENDDO +#endif + + ! Interpolate ice temperature + + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & + & nopoints, pgist ) + + ! Ice level temperatures + + IF (licelvls) THEN + +#if defined key_lim2 + + DO jl = 1, 3 + + ! Pack ice temperatures data at level jl(already in K) + + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = tbif (ji,jj,jl) + ENDDO + ENDDO + + ! Interpolate ice temperature at level jl + + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & + & nopoints, pgistl(:,jl) ) + + ENDDO + +#else + WRITE(0,*)'licelvls needs to be sorted for LIM3' + CALL abort +#endif + + ENDIF + + ! Pack ice albedo data + +#if defined key_lim2 + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = alb_ice(ji,jj,1) + ENDDO + ENDDO +#else + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = 0 + zt=0.0 + DO jl = 1, jpl + zsend(jk) = zsend(jk) + alb_ice(ji,jj,jl) * a_i(ji,jj,jl) + zt = zt + a_i(ji,jj,jl) + ENDDO + IF ( zt > 0.0_wp ) THEN + zsend(jk) = zsend(jk) / zt + ELSE + zsend(jk) = albedo_oce_mix(ji,jj) + ENDIF + ENDDO + ENDDO +#endif + + ! Interpolate ice albedo + + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & + & nopoints, pgalb ) + + ! Pack ice fraction data + + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = fr_i(ji,jj) + ENDDO + ENDDO + + ! Interpolation of ice fraction. + + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & + & nopoints, pgifr ) + + ! Pack ice thickness data + +#if defined key_lim2 + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = hicif(ji,jj) + ENDDO + ENDDO +#else + ! LIM3 + ! Average over categories (to be revised). + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = 0 + DO jl = 1, jpl + zsend(jk) = zsend(jk) + ht_i(ji,jj,jl) * a_i(ji,jj,jl) + ENDDO + ENDDO + ENDDO +#endif + + ! Interpolation of ice thickness + + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & + & nopoints, pghic ) + + ! Pack snow thickness data + +#if defined key_lim2 + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = hsnif(ji,jj) + ENDDO + ENDDO +#else + ! LIM3 + ! Average over categories (to be revised). + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = 0 + DO jl = 1, jpl + zsend(jk) = zsend(jk) + ht_s(ji,jj,jl) * a_i(ji,jj,jl) + ENDDO + ENDDO + ENDDO +#endif + + ! Interpolation of snow thickness + + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & + & nopoints, pghsn ) + + ! Currents needs to be rotated from ij to ne first + + DO jj = 2, jpjm1 + DO ji = 2, jpim1 + zotx1(ji,jj) = 0.5 * ( un(ji,jj,1) + un(ji-1,jj ,1) ) + zoty1(ji,jj) = 0.5 * ( vn(ji,jj,1) + vn(ji ,jj-1,1) ) + END DO + END DO + CALL lbc_lnk( zotx1, 'T', -1. ) + CALL lbc_lnk( zoty1, 'T', -1. ) + CALL rot_rep( zotx1, zoty1, 'T', 'ij->e', ztmpx ) + CALL rot_rep( zotx1, zoty1, 'T', 'ij->n', ztmpy ) + + ! Pack U current + + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = ztmpx(ji,jj) + ENDDO + ENDDO + + ! Interpolate U current + + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & + & nopoints, pgucur ) + + ! Pack V current + + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = ztmpy(ji,jj) + ENDDO + ENDDO + + ! Interpolate V current + + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & + & nopoints, pgvcur ) + + IF(nn_timing == 1) CALL timing_stop('nemogcmcoup_lim2_get') + IF(lhook) CALL dr_hook('nemogcmcoup_lim2_get',1,zhook_handle) + +#else + + WRITE(0,*)'nemogcmcoup_lim2_get not done for FESOM yet' + CALL abort + +#endif + +END SUBROUTINE nemogcmcoup_lim2_get + + +SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & + & npoints, & + & taux_oce, tauy_oce, taux_ice, tauy_ice, & + & qs___oce, qs___ice, qns__oce, qns__ice, dqdt_ice, & + & evap_tot, evap_ice, prcp_liq, prcp_sol, & + & runoff, ocerunoff, tcc, lcc, tice_atm, & + & kt, ldebug, loceicemix, lqnsicefilt ) + + ! Update fluxes in nemogcmcoup_data by parallel + ! interpolation of the input gaussian grid data + + USE par_kind + + IMPLICIT NONE + + ! Arguments + + ! MPI communications + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Fluxes on the Gaussian grid. + INTEGER, INTENT(IN) :: npoints + REAL(wp), DIMENSION(npoints), INTENT(IN) :: & + & taux_oce, tauy_oce, taux_ice, tauy_ice, & + & qs___oce, qs___ice, qns__oce, qns__ice, & + & dqdt_ice, evap_tot, evap_ice, prcp_liq, prcp_sol, & + & runoff, ocerunoff, tcc, lcc, tice_atm + + ! Current time step + INTEGER, INTENT(in) :: kt + ! Write debugging fields in netCDF + LOGICAL, INTENT(IN) :: ldebug + ! QS/QNS mixed switch + LOGICAL, INTENT(IN) :: loceicemix + ! QNS ice filter switch (requires tice_atm to be sent) + LOGICAL, INTENT(IN) :: lqnsicefilt + + ! Local variables + +#ifdef FESOM_TODO + + ! Packed receive buffer + REAL(wp), DIMENSION((nlei-nldi+1)*(nlej-nldj+1)) :: zrecv + ! Unpacked fields on ORCA grids + REAL(wp), DIMENSION(jpi,jpj) :: zqs___oce, zqs___ice, zqns__oce, zqns__ice + REAL(wp), DIMENSION(jpi,jpj) :: zdqdt_ice, zevap_tot, zevap_ice, zprcp_liq, zprcp_sol + REAL(wp), DIMENSION(jpi,jpj) :: zrunoff, zocerunoff + REAL(wp), DIMENSION(jpi,jpj) :: ztmp, zicefr + ! Arrays for rotation + REAL(wp), DIMENSION(jpi,jpj) :: zuu,zvu,zuv,zvv,zutau,zvtau + ! Lead fraction for both LIM2/LIM3 + REAL(wp), DIMENSION(jpi,jpj) :: zfrld + ! Mask for masking for I grid + REAL(wp) :: zmsksum + ! For summing up LIM3 contributions to ice temperature + REAL(wp) :: zval,zweig + + ! Loop variables + INTEGER :: ji,jj,jk,jl + ! netCDF debugging output variables + CHARACTER(len=128) :: cdoutfile + INTEGER :: inum + REAL(wp) :: zhook_handle ! Dr Hook handle + + IF(lhook) CALL dr_hook('nemogcmcoup_lim2_update',0,zhook_handle) + IF(nn_timing == 1) CALL timing_start('nemogcmcoup_lim2_update') + + ! Allocate the storage data + + IF (.NOT.lallociceflx) THEN + ALLOCATE( & + & zsqns_tot(jpi,jpj), & + & zsqns_ice(jpi,jpj), & + & zsqsr_tot(jpi,jpj), & + & zsqsr_ice(jpi,jpj), & + & zsemp_tot(jpi,jpj), & + & zsemp_ice(jpi,jpj), & + & zsevap_ice(jpi,jpj), & + & zsdqdns_ice(jpi,jpj), & + & zssprecip(jpi,jpj), & + & zstprecip(jpi,jpj), & + & zstcc(jpi,jpj), & + & zslcc(jpi,jpj), & + & zsatmist(jpi,jpj), & + & zsqns_ice_add(jpi,jpj)& + & ) + lallociceflx = .TRUE. + ENDIF + IF (.NOT.lallocstress) THEN + ALLOCATE( & + & zsutau(jpi,jpj), & + & zsvtau(jpi,jpj), & + & zsutau_ice(jpi,jpj), & + & zsvtau_ice(jpi,jpj) & + & ) + lallocstress = .TRUE. + ENDIF + + ! Sort out incoming arrays from the IFS and put them on the ocean grid + + !1. Interpolate ocean solar radiation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qs___oce, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ocean solar radiation + + zqs___oce(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zqs___oce(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !2. Interpolate ice solar radiation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qs___ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ice solar radiation + + zqs___ice(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zqs___ice(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !3. Interpolate ocean non-solar radiation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qns__oce, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ocean non-solar radiation + + zqns__oce(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zqns__oce(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !4. Interpolate ice non-solar radiation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qns__ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ice non-solar radiation + + zqns__ice(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zqns__ice(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !5. Interpolate D(q)/dT to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, dqdt_ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack D(q)/D(T) + + zdqdt_ice(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zdqdt_ice(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !6. Interpolate total evaporation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, evap_tot, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack total evaporation + + zevap_tot(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zevap_tot(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !7. Interpolate evaporation over ice to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, evap_ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack evaporation over ice + + zevap_ice(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zevap_ice(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !8. Interpolate liquid precipitation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, prcp_liq, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack liquid precipitation + + zprcp_liq(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zprcp_liq(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !9. Interpolate solid precipitation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, prcp_sol, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack precipitation over ice + + zprcp_sol(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zprcp_sol(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !10. Interpolate runoff to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, runoff, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack runoff + + zrunoff(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zrunoff(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !11. Interpolate ocean runoff to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, ocerunoff, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ocean runoff + + zocerunoff(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zocerunoff(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !12. Interpolate total cloud fractions to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tcc, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ocean runoff + + zstcc(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zstcc(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !13. Interpolate low cloud fractions to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, lcc, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ocean runoff + + zslcc(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zslcc(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! get sea ice fraction and lead fraction + +#if defined key_lim2 + zfrld(:,:) = frld(:,:) + zicefr(:,:) = 1 - zfrld(:,:) +#else + zicefr(:,:) = 0.0_wp + DO jl = 1, jpl + zicefr(:,:) = zicefr(:,:) + a_i(:,:,jl) + ENDDO + zfrld(:,:) = 1 - zicefr(:,:) +#endif + + zsemp_tot(:,:) = zevap_tot(:,:) - zprcp_liq(:,:) - zprcp_sol(:,:) + zstprecip(:,:) = zprcp_liq(:,:) + zprcp_sol(:,:) + ! More consistent with NEMO, but does changes the results, so + ! we don't do it for now. + ! zsemp_tot(:,:) = zevap_tot(:,:) - zstprecip(:,:) + zsemp_ice(:,:) = zevap_ice(:,:) - zprcp_sol(:,:) + zssprecip(:,:) = - zsemp_ice(:,:) + zsemp_tot(:,:) = zsemp_tot(:,:) - zrunoff(:,:) + zsemp_tot(:,:) = zsemp_tot(:,:) - zocerunoff(:,:) + zsevap_ice(:,:) = zevap_ice(:,:) + + ! non solar heat fluxes ! (qns) + IF (loceicemix) THEN + zsqns_tot(:,:) = zqns__oce(:,:) + ELSE + zsqns_tot(:,:) = zfrld(:,:) * zqns__oce(:,:) + zicefr(:,:) * zqns__ice(:,:) + ENDIF + zsqns_ice(:,:) = zqns__ice(:,:) + ztmp(:,:) = zfrld(:,:) * zprcp_sol(:,:) * lfus ! add the latent heat of solid precip. melting + + zsqns_tot(:,:) = zsqns_tot(:,:) - ztmp(:,:) ! over free ocean + ! solar heat fluxes ! (qsr) + + IF (loceicemix) THEN + zsqsr_tot(:,:) = zqs___oce(:,:) + ELSE + zsqsr_tot(:,:) = zfrld(:,:) * zqs___oce(:,:) + zicefr(:,:) * zqs___ice(:,:) + ENDIF + zsqsr_ice(:,:) = zqs___ice(:,:) + + IF( ln_dm2dc ) THEN ! modify qsr to include the diurnal cycle + zsqsr_tot(:,:) = sbc_dcy( zsqsr_tot(:,:) ) + zsqsr_ice(:,:) = sbc_dcy( zsqsr_ice(:,:) ) + ENDIF + + zsdqdns_ice(:,:) = zdqdt_ice(:,:) + + ! Apply lateral boundary condition + + CALL lbc_lnk(zsqns_tot, 'T', 1.0) + CALL lbc_lnk(zsqns_ice, 'T', 1.0) + CALL lbc_lnk(zsqsr_tot, 'T', 1.0) + CALL lbc_lnk(zsqsr_ice, 'T', 1.0) + CALL lbc_lnk(zsemp_tot, 'T', 1.0) + CALL lbc_lnk(zsemp_ice, 'T', 1.0) + CALL lbc_lnk(zsdqdns_ice, 'T', 1.0) + CALL lbc_lnk(zssprecip, 'T', 1.0) + CALL lbc_lnk(zstprecip, 'T', 1.0) + CALL lbc_lnk(zstcc, 'T', 1.0) + CALL lbc_lnk(zslcc, 'T', 1.0) + + ! Interpolate atmospheric ice temperature to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tice_atm, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack atmospheric ice temperature + + zsatmist(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zsatmist(ji,jj) = zrecv(jk) + ENDDO + ENDDO + CALL lbc_lnk(zsatmist, 'T', 1.0) + + zsqns_ice_add(:,:) = 0.0_wp + + ! Use the dqns_ice filter + + IF (lqnsicefilt) THEN + + ! Add filtr to qns_ice + +#if defined key_lim2 + ztmp(:,:) = tn_ice(:,:,1) +#else + DO jj = nldj, nlej + DO ji = nldi, nlei + zval=0.0 + zweig=0.0 + DO jl = 1, jpl + zval = zval + tn_ice(ji,jj,jl) * a_i(ji,jj,jl) + zweig = zweig + a_i(ji,jj,jl) + ENDDO + IF ( zweig > 0.0 ) THEN + ztmp(ji,jj) = zval /zweig + ELSE + ztmp(ji,jj) = rt0 + ENDIF + ENDDO + ENDDO + CALL lbc_lnk(ztmp, 'T', 1.0) +#endif + + WHERE ( zicefr(:,:) > .001_wp ) + zsqns_ice_add(:,:) = zsdqdns_ice(:,:) * ( ztmp(:,:) - zsatmist(:,:) ) + END WHERE + + zsqns_ice(:,:) = zsqns_ice(:,:) + zsqns_ice_add(:,:) + + ENDIF + + ! Interpolate u-stress to U grid + + CALL parinter_fld( mype, npes, icomm, gausstoU, npoints,taux_oce, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack u stress on U grid + + zuu(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zuu(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Interpolate v-stress to U grid + + CALL parinter_fld( mype, npes, icomm, gausstoU, npoints, tauy_oce, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack v stress on U grid + + zvu(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zvu(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Interpolate u-stress to V grid + + CALL parinter_fld( mype, npes, icomm, gausstoV, npoints,taux_oce, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack u stress on V grid + + zuv(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zuv(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Interpolate v-stress to V grid + + CALL parinter_fld( mype, npes, icomm, gausstoV, npoints, tauy_oce, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack v stress on V grid + + zvv(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zvv(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Rotate stresses from en to ij and put u,v stresses on U,V grids + + CALL repcmo( zuu, zvu, zuv, zvv, zsutau, zsvtau ) + + ! Apply lateral boundary condition on u,v stresses on the U,V grids + + CALL lbc_lnk( zsutau, 'U', -1.0 ) + CALL lbc_lnk( zsvtau, 'V', -1.0 ) + + ! Interpolate ice u-stress to U grid + + CALL parinter_fld( mype, npes, icomm, gausstoU, npoints,taux_ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ice u stress on U grid + + zuu(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zuu(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Interpolate ice v-stress to U grid + + CALL parinter_fld( mype, npes, icomm, gausstoU, npoints, tauy_ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ice v stress on U grid + + zvu(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zvu(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Interpolate ice u-stress to V grid + + CALL parinter_fld( mype, npes, icomm, gausstoV, npoints,taux_ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ice u stress on V grid + + zuv(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zuv(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Interpolate ice v-stress to V grid + + CALL parinter_fld( mype, npes, icomm, gausstoV, npoints, tauy_ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ice v stress on V grid + + zvv(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zvv(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Rotate stresses from en to ij and put u,v stresses on U,V grids + + CALL repcmo( zuu, zvu, zuv, zvv, zutau, zvtau ) + + ! Apply lateral boundary condition on u,v stresses on the U,V grids + + CALL lbc_lnk( zutau, 'U', -1.0 ) + CALL lbc_lnk( zvtau, 'V', -1.0 ) + +#if defined key_lim2_vp + + ! Convert to I grid for LIM2 for key_lim_vp + DO jj = 2, jpjm1 ! (U,V) ==> I + DO ji = 2, jpim1 ! NO vector opt. + zmsksum = umask(ji-1,jj,1) + umask(ji-1,jj-1,1) + zsutau_ice(ji,jj) = ( umask(ji-1,jj,1) * zutau(ji-1,jj) + & + & umask(ji-1,jj-1,1) * zutau(ji-1,jj-1) ) + IF ( zmsksum > 0.0 ) THEN + zsutau_ice(ji,jj) = zsutau_ice(ji,jj) / zmsksum + ENDIF + zmsksum = vmask(ji,jj-1,1) + vmask(ji-1,jj-1,1) + zsvtau_ice(ji,jj) = ( vmask(ji,jj-1,1) * zvtau(ji,jj-1) + & + & vmask(ji-1,jj-1,1) * zvtau(ji-1,jj-1) ) + IF ( zmsksum > 0.0 ) THEN + zsvtau_ice(ji,jj) = zsvtau_ice(ji,jj) / zmsksum + ENDIF + END DO + END DO + +#else + + zsutau_ice(:,:) = zutau(:,:) + zsvtau_ice(:,:) = zvtau(:,:) + +#endif + + CALL lbc_lnk( zsutau_ice, 'I', -1.0 ) + CALL lbc_lnk( zsvtau_ice, 'I', -1.0 ) + + ! Optionally write files write the data on the ORCA grid via IOM. + + IF (ldebug) THEN + WRITE(cdoutfile,'(A,I8.8)') 'zsutau_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsutau' , zsutau ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsvtau_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsvtau' , zsvtau ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsutau_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsutau_ice' , zsutau_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsvtau_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsvtau_ice' , zsvtau_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsqns_tot_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsqns_tot' , zsqns_tot ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsqns_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsqns_ice' , zsqns_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsqsr_tot_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsqsr_tot' , zsqsr_tot ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsqsr_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsqsr_ice' , zsqsr_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsemp_tot_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsemp_tot' , zsemp_tot ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsemp_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsemp_ice' , zsemp_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsdqdns_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsdqdns_ice' , zsdqdns_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zssprecip_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zssprecip' , zssprecip ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zstprecip_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zstprecip' , zstprecip ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsevap_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsevap_ice' , zsevap_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zstcc_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zstcc' , zstcc ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zslcc_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zslcc' , zslcc ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsatmist_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsatmist' , zsatmist ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsqns_ice_add_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsqns_ice_add' , zsqns_ice_add ) + CALL iom_close( inum ) + ENDIF + + IF(nn_timing == 1) CALL timing_stop('nemogcmcoup_lim2_update') + IF(lhook) CALL dr_hook('nemogcmcoup_lim2_update',1,zhook_handle) + +#else + + WRITE(0,*)'nemogcmcoup_lim2_update not done for FESOM yet' + CALL abort + +#endif + +END SUBROUTINE nemogcmcoup_lim2_update + + +SUBROUTINE nemogcmcoup_step( istp, icdate, ictime ) + + IMPLICIT NONE + + ! Arguments + + ! Time step + INTEGER, INTENT(IN) :: istp + + ! Data and time from NEMO + INTEGER, INTENT(OUT) :: icdate, ictime + + ! Local variables + + ! Advance the FESOM model 1 time step + + WRITE(0,*)'Insert FESOM step here.' + + ! Compute date and time at the end of the time step. + +#ifdef FESOM_TODO + iye = ndastp / 10000 + imo = ndastp / 100 - iye * 100 + ida = MOD( ndastp, 100 ) + CALL greg2jul( 0, 0, 0, ida, imo, iye, zjul ) + zjul = zjul + ( nsec_day + 0.5_wp * rdttra(1) ) / 86400.0_wp + CALL jul2greg( iss, imm, ihh, ida, imo, iye, zjul ) + icdate = iye * 10000 + imo * 100 + ida + ictime = ihh * 10000 + imm * 100 + iss +#endif + +END SUBROUTINE nemogcmcoup_step + + +SUBROUTINE nemogcmcoup_final + + ! Finalize the NEMO model + + IMPLICIT NONE + + WRITE(*,*)'Insert call to finalization of FESOM' + CALL abort + +END SUBROUTINE nemogcmcoup_final + + diff --git a/src/ifs_modules.F90 b/src/ifs_modules.F90 new file mode 100644 index 000000000..a6f07acaa --- /dev/null +++ b/src/ifs_modules.F90 @@ -0,0 +1,1857 @@ +#define __MYFILE__ 'ifs_modules.F90' +#define key_mpp_mpi +! Set of modules needed by the interface to IFS. +! +! -Original code by Kristian Mogensen, ECMWF. + +MODULE par_kind + IMPLICIT NONE + INTEGER, PUBLIC, PARAMETER :: & !: Floating point section + sp = SELECTED_REAL_KIND( 6, 37), & !: single precision (real 4) + dp = SELECTED_REAL_KIND(12,307), & !: double precision (real 8) + wp = SELECTED_REAL_KIND(12,307), & !: double precision (real 8) + ik = SELECTED_INT_KIND(6) !: integer precision +END MODULE par_kind + +MODULE nctools + + ! Utility subroutines for netCDF access + ! Modified : MAB (nf90, handle_error, LINE&FILE) + ! Modifled : KSM (new shorter name) + + USE netcdf + + PUBLIC ldebug_netcdf, nchdlerr + LOGICAL :: ldebug_netcdf = .FALSE. ! Debug switch for netcdf + +CONTAINS + + SUBROUTINE nchdlerr(status,lineno,filename) + + ! Error handler for netCDF access + IMPLICIT NONE + + + INTEGER :: status ! netCDF return status + INTEGER :: lineno ! Line number (usually obtained from + ! preprocessing __LINE__,__MYFILE__) + CHARACTER(len=*),OPTIONAL :: filename + + IF (status/=nf90_noerr) THEN + WRITE(*,*)'Netcdf error, code ',status + IF (PRESENT(filename)) THEN + WRITE(*,*)'In file ',filename,' in line ',lineno + ELSE + WRITE(*,*)'In line ',lineno + END IF + WRITE(*,'(2A)')' Error message : ',nf90_strerror(status) + CALL abort + ENDIF + + END SUBROUTINE nchdlerr + +!---------------------------------------------------------------------- +END MODULE nctools + +MODULE scrippar + INTEGER, PARAMETER :: scripdp = SELECTED_REAL_KIND(12,307) + INTEGER, PARAMETER :: scriplen = 80 +END MODULE scrippar + +MODULE scripgrid + + USE nctools + USE scrippar + + IMPLICIT NONE + + TYPE scripgridtype + INTEGER :: grid_size + INTEGER :: grid_corners + INTEGER :: grid_rank + INTEGER, ALLOCATABLE, DIMENSION(:) :: grid_dims + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: grid_center_lat + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: grid_center_lon + INTEGER, ALLOCATABLE, DIMENSION(:) :: grid_imask + REAL(scripdp), ALLOCATABLE, DIMENSION(:,:) :: grid_corner_lat + REAL(scripdp), ALLOCATABLE, DIMENSION(:,:) :: grid_corner_lon + CHARACTER(len=scriplen) :: grid_center_lat_units + CHARACTER(len=scriplen) :: grid_center_lon_units + CHARACTER(len=scriplen) :: grid_imask_units + CHARACTER(len=scriplen) :: grid_corner_lat_units + CHARACTER(len=scriplen) :: grid_corner_lon_units + CHARACTER(len=scriplen) :: title + END TYPE scripgridtype + +CONTAINS + + SUBROUTINE scripgrid_read( cdfilename, grid ) + + CHARACTER(len=*) :: cdfilename + TYPE(scripgridtype) :: grid + + INTEGER :: ncid, dimid, varid + + CALL scripgrid_init(grid) + + CALL nchdlerr(nf90_open(TRIM(cdfilename),nf90_nowrite,ncid),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'grid_size',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=grid%grid_size),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'grid_corners',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=grid%grid_corners),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'grid_rank',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=grid%grid_rank),& + & __LINE__,__MYFILE__) + + CALL scripgrid_alloc(grid) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_dims',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_center_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_center_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_center_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_center_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_center_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_corner_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_corner_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_corner_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_corner_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_imask',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_imask_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_imask),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'title',grid%title),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_close(ncid),__LINE__,__MYFILE__) + + END SUBROUTINE scripgrid_read + + SUBROUTINE scripgrid_write( cdgridfile, grid ) + + CHARACTER(len=*) :: cdgridfile + TYPE(scripgridtype) :: grid + + INTEGER :: ncid + INTEGER :: ioldfill + INTEGER :: idimsize,idimxsize,idimysize,idimcorners,idimrank + INTEGER :: idims1rank(1),idims1size(1),idims2(2) + INTEGER :: iddims,idcentlat,idcentlon,idimask,idcornlat,idcornlon + INTEGER :: igriddims(2) + + ! Setup netcdf file + + CALL nchdlerr(nf90_create(TRIM(cdgridfile),nf90_clobber,ncid),& + & __LINE__,__MYFILE__) + + ! Define dimensions + + CALL nchdlerr(nf90_def_dim(ncid,'grid_size',& + & grid%grid_size,idimsize),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_def_dim(ncid,'grid_corners',& + & grid%grid_corners,idimcorners),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_def_dim(ncid,'grid_rank',& + & grid%grid_rank,idimrank),& + & __LINE__,__MYFILE__) + + idims1rank(1) = idimrank + + idims1size(1) = idimsize + + idims2(1) = idimcorners + idims2(2) = idimsize + + ! Define variables + + CALL nchdlerr(nf90_def_var(ncid,'grid_dims',& + & nf90_int,idims1rank,iddims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_var(ncid,'grid_center_lat',& + & nf90_double,idims1size,idcentlat),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idcentlat,'units',& + & grid%grid_center_lat_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_var(ncid,'grid_center_lon',& + & nf90_double,idims1size,idcentlon),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idcentlon,'units',& + & grid%grid_center_lon_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_var(ncid,'grid_imask',& + & nf90_int,idims1size,idimask),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idimask,'units',& + & grid%grid_imask_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_var(ncid,'grid_corner_lat',& + & nf90_double,idims2,idcornlat),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idcornlat,'units',& + & grid%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_var(ncid,'grid_corner_lon',& + & nf90_double,idims2,idcornlon),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idcornlon,'units',& + & grid%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'title',& + & TRIM(grid%title)),& + & __LINE__,__MYFILE__) + + ! End of netCDF definition phase + + CALL nchdlerr(nf90_enddef(ncid),__LINE__,__MYFILE__) + + ! Write variables + + + CALL nchdlerr(nf90_put_var(ncid,iddims,grid%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idcentlat,& + & grid%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idcentlon,& + & grid%grid_center_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idimask,& + & grid%grid_imask), & + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idcornlat,& + & grid%grid_corner_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idcornlon,& + & grid%grid_corner_lon),& + & __LINE__,__MYFILE__) + + ! Close file + + CALL nchdlerr(nf90_close(ncid),__LINE__,__MYFILE__) + + END SUBROUTINE scripgrid_write + + SUBROUTINE scripgrid_init( grid ) + + TYPE(scripgridtype) :: grid + + grid%grid_size=0 + grid%grid_corners=0 + grid%grid_rank=0 + grid%grid_center_lat_units='' + grid%grid_center_lon_units='' + grid%grid_imask_units='' + grid%grid_corner_lat_units='' + grid%grid_corner_lon_units='' + grid%title='' + + END SUBROUTINE scripgrid_init + + SUBROUTINE scripgrid_alloc( grid ) + + TYPE(scripgridtype) :: grid + + IF ( (grid%grid_size == 0) .OR. & + & (grid%grid_corners == 0) .OR. & + & (grid%grid_rank == 0) ) THEN + WRITE(*,*)'scripgridtype not initialized' + CALL abort + ENDIF + + ALLOCATE( & + & grid%grid_dims(grid%grid_rank), & + & grid%grid_center_lat(grid%grid_size), & + & grid%grid_center_lon(grid%grid_size), & + & grid%grid_corner_lat(grid%grid_corners, grid%grid_size), & + & grid%grid_corner_lon(grid%grid_corners, grid%grid_size), & + & grid%grid_imask(grid%grid_size) & + & ) + + END SUBROUTINE scripgrid_alloc + + SUBROUTINE scripgrid_dealloc( grid ) + + TYPE(scripgridtype) :: grid + + DEALLOCATE( & + & grid%grid_dims, & + & grid%grid_center_lat, & + & grid%grid_center_lon, & + & grid%grid_corner_lat, & + & grid%grid_corner_lon, & + & grid%grid_imask & + & ) + + END SUBROUTINE scripgrid_dealloc + +END MODULE scripgrid + +MODULE scripremap + +#if defined key_mpp_mpi + USE mpi +#endif + USE nctools + USE scrippar + USE scripgrid + + IMPLICIT NONE + + TYPE scripremaptype + INTEGER :: num_links + INTEGER :: num_wgts + TYPE(scripgridtype) :: src + TYPE(scripgridtype) :: dst + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: src_grid_area + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: dst_grid_area + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: src_grid_frac + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: dst_grid_frac + INTEGER, ALLOCATABLE, DIMENSION(:) :: src_address + INTEGER, ALLOCATABLE, DIMENSION(:) :: dst_address + REAL(scripdp), ALLOCATABLE, DIMENSION(:,:) :: remap_matrix + CHARACTER(len=scriplen) :: src_grid_area_units + CHARACTER(len=scriplen) :: dst_grid_area_units + CHARACTER(len=scriplen) :: src_grid_frac_units + CHARACTER(len=scriplen) :: dst_grid_frac_units + CHARACTER(len=scriplen) :: title + CHARACTER(len=scriplen) :: normalization + CHARACTER(len=scriplen) :: map_method + CHARACTER(len=scriplen) :: history + CHARACTER(len=scriplen) :: conventions + END TYPE scripremaptype + +CONTAINS + + SUBROUTINE scripremap_read_work(cdfilename,remap) + + CHARACTER(len=*) :: cdfilename + TYPE(scripremaptype) :: remap + + INTEGER :: ncid, dimid, varid + LOGICAL :: lcorners + + lcorners=.TRUE. + + CALL scripremap_init(remap) + + CALL nchdlerr(nf90_open(TRIM(cdfilename),nf90_nowrite,ncid),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'src_grid_size',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%src%grid_size),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'dst_grid_size',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%dst%grid_size),& + & __LINE__,__MYFILE__) + + + IF (nf90_inq_dimid(ncid,'src_grid_corners',dimid)==nf90_noerr) THEN + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%src%grid_corners),& + & __LINE__,__MYFILE__) + ELSE + lcorners=.FALSE. + remap%src%grid_corners=1 + ENDIF + + IF (lcorners) THEN + CALL nchdlerr(nf90_inq_dimid(ncid,'dst_grid_corners',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%dst%grid_corners),& + & __LINE__,__MYFILE__) + ELSE + remap%dst%grid_corners=1 + ENDIF + + CALL nchdlerr(nf90_inq_dimid(ncid,'src_grid_rank',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%src%grid_rank),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'dst_grid_rank',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%dst%grid_rank),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'num_links',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%num_links),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'num_wgts',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%num_wgts),& + & __LINE__,__MYFILE__) + + CALL scripremap_alloc(remap) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_dims',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_dims',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_center_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_center_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_center_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_center_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_center_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_center_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_center_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_center_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_center_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_center_lon),& + & __LINE__,__MYFILE__) + + IF (lcorners) THEN + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_corner_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_corner_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_corner_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_corner_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_corner_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_corner_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_corner_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_corner_lon),& + & __LINE__,__MYFILE__) + + ELSE + + remap%src%grid_corner_lat(:,:) = 0.0 + remap%src%grid_corner_lon(:,:) = 0.0 + remap%dst%grid_corner_lat(:,:) = 0.0 + remap%dst%grid_corner_lon(:,:) = 0.0 + remap%src%grid_corner_lat_units = '' + remap%src%grid_corner_lon_units = '' + remap%dst%grid_corner_lat_units = '' + remap%dst%grid_corner_lon_units = '' + + ENDIF + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_imask',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_imask_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_imask),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_imask',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_imask_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_imask),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_area',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src_grid_area_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src_grid_area),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_area',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst_grid_area_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst_grid_area),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_frac',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src_grid_frac_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src_grid_frac),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_frac',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst_grid_frac_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst_grid_frac),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_address',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_address',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'remap_matrix',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%remap_matrix),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'title',remap%title),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'normalization',remap%normalization),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'map_method',remap%map_method),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'history',remap%history),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'conventions',remap%conventions),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'dest_grid',remap%dst%title),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'source_grid',remap%src%title),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_close(ncid),__LINE__,__MYFILE__) + + END SUBROUTINE scripremap_read_work + + SUBROUTINE scripremap_read(cdfilename,remap) + + CHARACTER(len=*) :: cdfilename + TYPE(scripremaptype) :: remap + + CALL scripremap_read_work(cdfilename,remap) + + END SUBROUTINE scripremap_read + + + SUBROUTINE scripremap_read_sgl(cdfilename,remap,& + & mype,nproc,mycomm,linteronly) + + CHARACTER(len=*) :: cdfilename + TYPE(scripremaptype) :: remap + INTEGER :: mype,nproc,mycomm + LOGICAL :: linteronly + + INTEGER, DIMENSION(8) :: isizes + INTEGER :: ierr, ip + + IF (mype==0) THEN + CALL scripremap_read_work(cdfilename,remap) +#if defined key_mpp_mpi + isizes(1)=remap%src%grid_size + isizes(2)=remap%dst%grid_size + isizes(3)=remap%src%grid_corners + isizes(4)=remap%dst%grid_corners + isizes(5)=remap%src%grid_rank + isizes(6)=remap%dst%grid_rank + isizes(7)=remap%num_links + isizes(8)=remap%num_wgts + CALL mpi_bcast( isizes, 8, mpi_integer, 0, mycomm, ierr) + ELSE + CALL mpi_bcast( isizes, 8, mpi_integer, 0, mycomm, ierr) + CALL scripremap_init(remap) + remap%src%grid_size=isizes(1) + remap%dst%grid_size=isizes(2) + remap%src%grid_corners=isizes(3) + remap%dst%grid_corners=isizes(4) + remap%src%grid_rank=isizes(5) + remap%dst%grid_rank=isizes(6) + remap%num_links=isizes(7) + remap%num_wgts=isizes(8) + CALL scripremap_alloc(remap) +#endif + ENDIF + +#if defined key_mpp_mpi + + IF (.NOT.linteronly) THEN + + CALL mpi_bcast( remap%src%grid_dims, remap%src%grid_rank, & + & mpi_integer, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_center_lat, remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_center_lon, remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_corner_lat, remap%src%grid_corners*remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_corner_lon, remap%src%grid_corners*remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + + CALL mpi_bcast( remap%dst%grid_dims, remap%dst%grid_rank, & + & mpi_integer, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_center_lat, remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_center_lon, remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_corner_lat, remap%dst%grid_corners*remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_corner_lon, remap%dst%grid_corners*remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + + CALL mpi_bcast( remap%src_grid_area, remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst_grid_area, remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src_grid_frac, remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst_grid_frac, remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + + CALL mpi_bcast( remap%src%grid_center_lat_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_center_lat_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_center_lon_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_center_lon_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_corner_lat_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_corner_lon_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_corner_lat_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_corner_lon_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_imask_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_imask_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src_grid_area_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst_grid_area_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src_grid_frac_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst_grid_frac_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%title, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%normalization, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%map_method, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%history, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%conventions, scriplen, & + & mpi_character, 0, mycomm, ierr ) + ENDIF + + CALL mpi_bcast( remap%src_address, remap%num_links, & + & mpi_integer, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst_address, remap%num_links, & + & mpi_integer, 0, mycomm, ierr ) + CALL mpi_bcast( remap%remap_matrix, remap%num_wgts*remap%num_links, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_imask, remap%src%grid_size, & + & mpi_integer, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_imask, remap%dst%grid_size, & + & mpi_integer, 0, mycomm, ierr ) + +#endif + END SUBROUTINE scripremap_read_sgl + + SUBROUTINE scripremap_write(cdfilename,remap) + + CHARACTER(len=*) :: cdfilename + TYPE(scripremaptype) :: remap + + INTEGER :: ncid + INTEGER :: dimsgs,dimdgs,dimsgc,dimdgc,dimsgr,dimdgr,dimnl,dimnw + INTEGER :: dims1(1),dims2(2) + INTEGER :: idsgd,iddgd,idsgea,iddgea,idsgeo,iddgeo + INTEGER :: idsgoa,idsgoo,iddgoa,iddgoo,idsgim,iddgim,idsgar,iddgar + INTEGER :: idsgf,iddgf,idsga,iddga,idsa,idda,idrm + + CALL nchdlerr(nf90_create(TRIM(cdfilename),nf90_clobber,ncid), & + & __LINE__, __MYFILE__ ) + + CALL nchdlerr(nf90_def_dim(ncid,'src_grid_size',& + & remap%src%grid_size,dimsgs),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'dst_grid_size',& + & remap%dst%grid_size,dimdgs),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'src_grid_corners',& + & remap%src%grid_corners,dimsgc),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'dst_grid_corners',& + & remap%dst%grid_corners,dimdgc),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'src_grid_rank',& + & remap%src%grid_rank,dimsgr),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'dst_grid_rank',& + & remap%dst%grid_rank,dimdgr),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'num_links',& + & remap%num_links,dimnl),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'num_wgts',& + & remap%num_wgts,dimnw),& + & __LINE__,__MYFILE__) + + dims1(1)=dimsgr + CALL nchdlerr(nf90_def_var(ncid,'src_grid_dims',& + & nf90_int,dims1,idsgd),& + & __LINE__,__MYFILE__) + + dims1(1)=dimdgr + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_dims',& + & nf90_int,dims1,iddgd), & + & __LINE__,__MYFILE__) + + dims1(1)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_center_lat',& + & nf90_double,dims1,idsgea), & + & __LINE__,__MYFILE__) + + dims1(1)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_center_lat',& + & nf90_double,dims1,iddgea), & + & __LINE__,__MYFILE__) + + dims1(1)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_center_lon',& + & nf90_double,dims1,idsgeo), & + & __LINE__,__MYFILE__) + + dims1(1)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_center_lon',& + & nf90_double,dims1,iddgeo), & + & __LINE__,__MYFILE__) + + dims2(1)=dimsgc + dims2(2)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_corner_lat',& + & nf90_double,dims2,idsgoa), & + & __LINE__,__MYFILE__) + + dims2(1)=dimsgc + dims2(2)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_corner_lon',& + & nf90_double,dims2,idsgoo), & + & __LINE__,__MYFILE__) + + dims2(1)=dimdgc + dims2(2)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_corner_lat',& + & nf90_double,dims2,iddgoa), & + & __LINE__,__MYFILE__) + + dims2(1)=dimdgc + dims2(2)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_corner_lon',& + & nf90_double,dims2,iddgoo), & + & __LINE__,__MYFILE__) + + dims1(1)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_imask',& + & nf90_int,dims1,idsgim), & + & __LINE__,__MYFILE__) + + dims1(1)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_imask',& + & nf90_int,dims1,iddgim), & + & __LINE__,__MYFILE__) + + dims1(1)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_area',& + & nf90_double,dims1,idsga), & + & __LINE__,__MYFILE__) + + dims1(1)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_area',& + & nf90_double,dims1,iddga), & + & __LINE__,__MYFILE__) + + dims1(1)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_frac',& + & nf90_double,dims1,idsgf), & + & __LINE__,__MYFILE__) + + dims1(1)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_frac',& + & nf90_double,dims1,iddgf), & + & __LINE__,__MYFILE__) + + dims1(1)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'src_address',& + & nf90_int,dims1,idsa), & + & __LINE__,__MYFILE__) + + dims1(1)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'dst_address',& + & nf90_int,dims1,idda), & + & __LINE__,__MYFILE__) + + dims2(1)=dimnw + dims2(2)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'remap_matrix',& + & nf90_double,dims2,idrm), & + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_att(ncid,idsgea,'units',& + & remap%src%grid_center_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgea,'units',& + & remap%dst%grid_center_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsgeo,'units',& + & remap%src%grid_center_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgeo,'units',& + & remap%dst%grid_center_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsgoa,'units',& + & remap%src%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsgoo,'units',& + & remap%src%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgoa,'units',& + & remap%dst%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgoo,'units',& + & remap%dst%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsgim,'units',& + & remap%src%grid_imask_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgim,'units',& + & remap%dst%grid_imask_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsga,'units',& + & remap%src_grid_area_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddga,'units',& + & remap%dst_grid_area_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsgf,'units',& + & remap%src_grid_frac_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgf,'units',& + & remap%dst_grid_frac_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'title',& + & remap%title),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'normalization',& + & remap%normalization),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'map_method',& + & remap%map_method),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'history',& + & remap%history),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'conventions',& + & remap%conventions),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'dest_grid',& + & remap%dst%title),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'source_grid',& + & remap%src%title),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_enddef(ncid),__LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idsgd,remap%src%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,iddgd,remap%dst%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idsgea,remap%src%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,iddgea,remap%dst%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idsgeo,remap%src%grid_center_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,iddgeo,remap%dst%grid_center_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idsgoa,remap%src%grid_corner_lat),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idsgoo,remap%src%grid_corner_lon),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,iddgoa,remap%dst%grid_corner_lat),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,iddgoo,remap%dst%grid_corner_lon),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idsgim,remap%src%grid_imask),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,iddgim,remap%dst%grid_imask),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idsga,remap%src_grid_area),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,iddga,remap%dst_grid_area),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idsgf,remap%src_grid_frac),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,iddgf,remap%dst_grid_frac),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idsa,remap%src_address),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idda,remap%dst_address),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idrm,remap%remap_matrix),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_close(ncid),__LINE__, __MYFILE__ ) + + END SUBROUTINE scripremap_write + + SUBROUTINE scripremap_init(remap) + + TYPE(scripremaptype) :: remap + + CALL scripgrid_init(remap%src) + CALL scripgrid_init(remap%dst) + remap%num_links = 0 + remap%num_wgts = 0 + remap%title='' + remap%normalization='' + remap%map_method='' + remap%history='' + remap%conventions='' + remap%src_grid_area_units='' + remap%dst_grid_area_units='' + remap%src_grid_frac_units='' + remap%dst_grid_frac_units='' + + END SUBROUTINE scripremap_init + + SUBROUTINE scripremap_alloc(remap) + + TYPE(scripremaptype) :: remap + + IF ( (remap%num_links == 0) .OR. & + & (remap%num_wgts == 0) ) THEN + WRITE(*,*)'scripremaptype not initialized' + CALL abort + ENDIF + + CALL scripgrid_alloc(remap%src) + CALL scripgrid_alloc(remap%dst) + + ALLOCATE( & + & remap%src_grid_area(remap%src%grid_size), & + & remap%dst_grid_area(remap%dst%grid_size), & + & remap%src_grid_frac(remap%src%grid_size), & + & remap%dst_grid_frac(remap%dst%grid_size), & + & remap%src_address(remap%num_links), & + & remap%dst_address(remap%num_links), & + & remap%remap_matrix(remap%num_wgts, remap%num_links) & + & ) + + END SUBROUTINE scripremap_alloc + + SUBROUTINE scripremap_dealloc(remap) + + TYPE(scripremaptype) :: remap + + DEALLOCATE( & + & remap%src_grid_area, & + & remap%dst_grid_area, & + & remap%src_grid_frac, & + & remap%dst_grid_frac, & + & remap%src_address, & + & remap%dst_address, & + & remap%remap_matrix & + & ) + + CALL scripgrid_dealloc(remap%src) + CALL scripgrid_dealloc(remap%dst) + + CALL scripremap_init(remap) + + END SUBROUTINE scripremap_dealloc + +END MODULE scripremap + +MODULE parinter + +#if defined key_mpp_mpi + USE mpi +#endif + USE scripremap + USE scrippar + USE nctools + + IMPLICIT NONE + + ! Type to contains interpolation information + ! (like what is in scripremaptype) and message + ! passing information + + TYPE parinterinfo + ! Number of local links + INTEGER :: num_links + ! Destination side + INTEGER, POINTER, DIMENSION(:) :: dst_address + ! Source addresses and work array + INTEGER, POINTER, DIMENSION(:) :: src_address + ! Local remap matrix + REAL(scripdp), POINTER, DIMENSION(:,:) :: remap_matrix + ! Message passing information + ! Array of local addresses for send buffer + ! packing + INTEGER, POINTER, DIMENSION(:) :: send_address + ! Sending bookkeeping + INTEGER :: nsendtot + INTEGER, POINTER, DIMENSION(:) :: nsend,nsdisp + ! Receiving bookkeeping + INTEGER :: nrecvtot + INTEGER, POINTER, DIMENSION(:) :: nrecv,nrdisp + END TYPE parinterinfo + +CONTAINS + + SUBROUTINE parinter_init( mype, nproc, mpi_comm, & + & nsrclocpoints, nsrcglopoints, srcmask, srcgloind, & + & ndstlocpoints, ndstglopoints, dstmask, dstgloind, & + & remap, pinfo, lcommout, commoutprefix, iunit ) + + ! Setup interpolation based on SCRIP format weights in + ! remap and the source/destination grids information. + + ! Procedure: + + ! 1) A global SCRIP remapping file is read on all processors. + ! 2) Find local destination points in the global grid. + ! 3) Find which processor needs source data and setup buffer + ! information for sending data. + ! 4) Construct new src remapping for buffer received + + ! All information is stored in the TYPE(parinterinfo) output + ! data type + + ! Input arguments. + + ! Message passing information + INTEGER, INTENT(IN) :: mype, nproc, mpi_comm + ! Source grid local and global number of grid points + INTEGER, INTENT(IN) :: nsrclocpoints, nsrcglopoints + ! Source integer mask (0/1) for SCRIP compliance + INTEGER, INTENT(IN), DIMENSION(nsrclocpoints) :: srcmask + ! Source global addresses of each local grid point + INTEGER, INTENT(IN), DIMENSION(nsrclocpoints) :: srcgloind + ! Destination grid local and global number of grid points + INTEGER, INTENT(IN) :: ndstlocpoints, ndstglopoints + ! Destination integer mask (0/1) for SCRIP compliance + INTEGER, INTENT(IN), DIMENSION(ndstlocpoints) :: dstmask + ! Destination global addresses of each local grid point + INTEGER, INTENT(IN), DIMENSION(ndstlocpoints) :: dstgloind + ! SCRIP remapping data + TYPE(scripremaptype) :: remap + ! Switch for output communication patterns + LOGICAL :: lcommout + CHARACTER(len=*) :: commoutprefix + ! Unit to use for output + INTEGER :: iunit + + ! Output arguments + + ! Interpolation and message passing information + TYPE(parinterinfo), INTENT(OUT) :: pinfo + + ! Local variable + + ! Variable for glocal <-> local address/pe information + INTEGER, DIMENSION(nsrcglopoints) :: ilsrcmppmap, ilsrclocind + INTEGER, DIMENSION(nsrcglopoints) :: igsrcmppmap, igsrclocind + INTEGER, DIMENSION(ndstglopoints) :: ildstmppmap, ildstlocind + INTEGER, DIMENSION(ndstglopoints) :: igdstmppmap, igdstlocind + INTEGER, DIMENSION(nsrcglopoints) :: isrcpe,isrcpetmp + INTEGER, DIMENSION(nsrcglopoints) :: isrcaddtmp + INTEGER, DIMENSION(0:nproc-1) :: isrcoffset + INTEGER, DIMENSION(nproc) :: isrcno, isrcoff, isrccur + INTEGER, DIMENSION(nproc) :: ircvoff, ircvcur + INTEGER, DIMENSION(:), ALLOCATABLE :: isrctot, ircvtot + + ! Misc variable + INTEGER :: i,n,pe + INTEGER :: istatus + CHARACTER(len=256) :: cdfile + + ! Check that masks are consistent. + + ! Remark: More consistency tests between remapping information + ! and input argument could be code, but for now we settle + ! for checking the masks. + + ! Source grid + + DO i=1,nsrclocpoints + IF (srcmask(i)/=remap%src%grid_imask(srcgloind(i))) THEN + WRITE(iunit,*)'Source imask is inconsistent at ' + WRITE(iunit,*)'global index = ',srcgloind(i) + WRITE(iunit,*)'Source mask = ',srcmask(i) + WRITE(iunit,*)'Remap mask = ',remap%src%grid_imask(srcgloind(i)) + WRITE(iunit,*)'Latitude = ',remap%src%grid_center_lat(srcgloind(i)) + WRITE(iunit,*)'Longitude = ',remap%src%grid_center_lon(srcgloind(i)) + CALL flush(iunit) + CALL abort + ENDIF + ENDDO + + ! Destination grid + + DO i=1,ndstlocpoints + IF (dstmask(i)/=remap%dst%grid_imask(dstgloind(i))) THEN + WRITE(iunit,*)'Destination imask is inconsistent at ' + WRITE(iunit,*)'global index = ',dstgloind(i) + WRITE(iunit,*)'Destin mask = ',dstmask(i) + WRITE(iunit,*)'Remap mask = ',remap%dst%grid_imask(dstgloind(i)) + WRITE(iunit,*)'Latitude = ',remap%dst%grid_center_lat(dstgloind(i)) + WRITE(iunit,*)'Longitude = ',remap%dst%grid_center_lon(dstgloind(i)) + CALL flush(iunit) + CALL abort + ENDIF + ENDDO + + ! Setup global to local and vice versa mappings. + + ilsrcmppmap(:)=-1 + ilsrclocind(:)=0 + ildstmppmap(:)=-1 + ildstlocind(:)=0 + + DO i=1,nsrclocpoints + ilsrcmppmap(srcgloind(i))=mype + ilsrclocind(srcgloind(i))=i + ENDDO + + DO i=1,ndstlocpoints + ildstmppmap(dstgloind(i))=mype + ildstlocind(dstgloind(i))=i + ENDDO + +#if defined key_mpp_mpi + CALL mpi_allreduce(ilsrcmppmap,igsrcmppmap,nsrcglopoints, & + & mpi_integer,mpi_max,mpi_comm,istatus) + CALL mpi_allreduce(ilsrclocind,igsrclocind,nsrcglopoints, & + & mpi_integer,mpi_max,mpi_comm,istatus) + CALL mpi_allreduce(ildstmppmap,igdstmppmap,ndstglopoints, & + & mpi_integer,mpi_max,mpi_comm,istatus) + CALL mpi_allreduce(ildstlocind,igdstlocind,ndstglopoints, & + & mpi_integer,mpi_max,mpi_comm,istatus) +#else + igsrcmppmap(:)=ilsrcmppmap(:) + igsrclocind(:)=ilsrclocind(:) + igdstmppmap(:)=ildstmppmap(:) + igdstlocind(:)=ildstlocind(:) +#endif + + ! Optionally construct an ascii file listing what src and + ! dest points belongs to which task + + ! Since igsrcmppmap and igdstmppmap are global data only do + ! this for mype==0. + + IF (lcommout.AND.(mype==0)) THEN + WRITE(cdfile,'(A,I4.4,A)')commoutprefix//'_srcmppmap_',mype+1,'.dat' + OPEN(9,file=cdfile) + DO i=1,nsrcglopoints + WRITE(9,*)remap%src%grid_center_lat(i),& + & remap%src%grid_center_lon(i), & + & igsrcmppmap(i)+1,remap%src%grid_imask(i) + ENDDO + CLOSE(9) + WRITE(cdfile,'(A,I4.4,A)')commoutprefix//'_dstmppmap_',mype+1,'.dat' + OPEN(9,file=cdfile) + DO i=1,ndstglopoints + WRITE(9,*)remap%dst%grid_center_lat(i),& + & remap%dst%grid_center_lon(i), & + & igdstmppmap(i)+1,remap%dst%grid_imask(i) + ENDDO + CLOSE(9) + ENDIF + + ! + ! Standard interpolation in serial case is + ! + ! DO n=1,remap%num_links + ! zdst(remap%dst_address(n)) = zdst(remap%dst_address(n)) + & + ! & remap%remap_matrix(1,n)*zsrc(remap%src_address(n)) + ! END DO + ! + + ! In parallel we need to first find local number of links + + pinfo%num_links=0 + DO i=1,remap%num_links + IF (igdstmppmap(remap%dst_address(i))==mype) & + & pinfo%num_links=pinfo%num_links+1 + ENDDO + ALLOCATE(pinfo%dst_address(pinfo%num_links),& + & pinfo%src_address(pinfo%num_links),& + & pinfo%remap_matrix(1,pinfo%num_links)) + + ! Get local destination addresses + + n=0 + DO i=1,remap%num_links + IF (igdstmppmap(remap%dst_address(i))==mype) THEN + n=n+1 + pinfo%dst_address(n)=& + & igdstlocind(remap%dst_address(i)) + pinfo%remap_matrix(:,n)=& + & remap%remap_matrix(:,i) + ENDIF + ENDDO + + ! Get sending processors maps. + + ! The same data point might need to be sent to many processors + ! so first construct a map for processors needing the data + + isrcpe(:)=-1 + DO i=1,remap%num_links + IF (igdstmppmap(remap%dst_address(i))==mype) THEN + isrcpe(remap%src_address(i))=& + & igsrcmppmap(remap%src_address(i)) + ENDIF + ENDDO + + ! Optionally write a set if ascii file listing which tasks + ! mype needs to send to communicate with + + IF (lcommout) THEN + ! Destination processors + WRITE(cdfile,'(A,I4.4,A)')commoutprefix//'_dsts_',mype+1,'.dat' + OPEN(9,file=cdfile) + DO pe=0,nproc-1 + IF (pe==mype) THEN + isrcpetmp(:)=isrcpe(:) + ENDIF +#if defined key_mpp_mpi + CALL mpi_bcast(isrcpetmp,nsrcglopoints,mpi_integer,pe,mpi_comm,istatus) +#endif + DO i=1,nsrcglopoints + IF (isrcpetmp(i)==mype) THEN + WRITE(9,*)remap%src%grid_center_lat(i),& + & remap%src%grid_center_lon(i), & + & pe+1,mype+1 + ENDIF + ENDDO + ENDDO + CLOSE(9) + ENDIF + + ! Get number of points to send to each processor + + ALLOCATE(pinfo%nsend(0:nproc-1)) + isrcno(:)=0 + DO i=1,nsrcglopoints + IF (isrcpe(i)>=0) THEN + isrcno(isrcpe(i)+1)=isrcno(isrcpe(i)+1)+1 + ENDIF + ENDDO +#if defined key_mpp_mpi + CALL mpi_alltoall(isrcno,1,mpi_integer, & + & pinfo%nsend(0:nproc-1),1,mpi_integer, & + & mpi_comm,istatus) +#else + pinfo%nsend(0:nproc-1) = isrcno(1:nproc) +#endif + pinfo%nsendtot=SUM(pinfo%nsend(0:nproc-1)) + + ! Construct sending buffer mapping. Data is mapping in + ! processor order. + + ALLOCATE(pinfo%send_address(pinfo%nsendtot)) + + ! Temporary arrays for mpi all to all. + + ALLOCATE(isrctot(SUM(isrcno(1:nproc)))) + ALLOCATE(ircvtot(SUM(pinfo%nsend(0:nproc-1)))) + + ! Offset for message parsing + + isrcoff(1)=0 + ircvoff(1)=0 + DO i=1,nproc-1 + isrcoff(i+1) = isrcoff(i) + isrcno(i) + ircvoff(i+1) = pinfo%nsend(i-1) + ircvoff(i) + ENDDO + + ! Pack indices i into a buffer + + isrccur(:)=0 + DO i=1,nsrcglopoints + IF (isrcpe(i)>=0) THEN + isrccur(isrcpe(i)+1)=isrccur(isrcpe(i)+1)+1 + isrctot(isrccur(isrcpe(i)+1)+isrcoff(isrcpe(i)+1)) = i + ENDIF + ENDDO + + ! Send the data + +#if defined key_mpp_mpi + CALL mpi_alltoallv(& + & isrctot,isrccur,isrcoff,mpi_integer, & + & ircvtot,pinfo%nsend(0:nproc-1),ircvoff,mpi_integer, & + & mpi_comm,istatus) +#else + ircvtot(:)=isrctot(:) +#endif + + ! Get the send address. ircvtot will at this point contain the + ! addresses in the global index needed for message passing + + DO i=1,pinfo%nsendtot + pinfo%send_address(i)=igsrclocind(ircvtot(i)) + ENDDO + + ! Deallocate the mpi all to all arrays + + DEALLOCATE(ircvtot,isrctot) + + ! Get number of points to receive to each processor + + ALLOCATE(pinfo%nrecv(0:nproc-1)) + pinfo%nrecv(0:nproc-1)=0 + DO i=1,nsrcglopoints + IF (isrcpe(i)>=0 .AND. isrcpe(i)=0 .AND. isrcpe(i)0) THEN + CALL nchdlerr(nf90_def_dim(ncid,'num_links',& + & pinfo%num_links,dimnl),& + & __LINE__,__MYFILE__) + ENDIF + + CALL nchdlerr(nf90_def_dim(ncid,'num_wgts',& + & 1,dimnw),& + & __LINE__,__MYFILE__) + + IF (pinfo%nsendtot>0) THEN + CALL nchdlerr(nf90_def_dim(ncid,'nsendtot',& + & pinfo%nsendtot,dimnst),& + & __LINE__,__MYFILE__) + ENDIF + + IF (pinfo%nrecvtot>0) THEN + CALL nchdlerr(nf90_def_dim(ncid,'nrecvtot',& + & pinfo%nrecvtot,dimnrt),& + & __LINE__,__MYFILE__) + ENDIF + + CALL nchdlerr(nf90_def_dim(ncid,'nproc',& + & nproc,dimnpr),& + & __LINE__,__MYFILE__) + + IF (pinfo%num_links>0) THEN + + dims1(1)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'dst_address',& + & nf90_int,dims1,idda),& + & __LINE__,__MYFILE__) + + dims1(1)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'src_address',& + & nf90_int,dims1,idsa),& + & __LINE__,__MYFILE__) + + dims2(1)=dimnw + dims2(2)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'remap_matrix',& + & nf90_double,dims2,idrm),& + & __LINE__,__MYFILE__) + + ENDIF + + dims1(1)=dimnpr + CALL nchdlerr(nf90_def_var(ncid,'nsend',& + & nf90_int,dims1,idns),& + & __LINE__,__MYFILE__) + + IF (pinfo%nsendtot>0) THEN + + dims1(1)=dimnst + CALL nchdlerr(nf90_def_var(ncid,'send_address',& + & nf90_int,dims1,idsaa),& + & __LINE__,__MYFILE__) + + ENDIF + + dims1(1)=dimnpr + CALL nchdlerr(nf90_def_var(ncid,'nrecv',& + & nf90_int,dims1,idnr),& + & __LINE__,__MYFILE__) + + dims1(1)=dimnpr + CALL nchdlerr(nf90_def_var(ncid,'nsdisp',& + & nf90_int,dims1,idnsp),& + & __LINE__,__MYFILE__) + + dims1(1)=dimnpr + CALL nchdlerr(nf90_def_var(ncid,'nrdisp',& + & nf90_int,dims1,idnrp),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_enddef(ncid),__LINE__,__MYFILE__) + + + IF (pinfo%num_links>0) THEN + + CALL nchdlerr(nf90_put_var(ncid,idda,pinfo%dst_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idsa,pinfo%src_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idrm,pinfo%remap_matrix),& + & __LINE__,__MYFILE__) + + ENDIF + + CALL nchdlerr(nf90_put_var(ncid,idns,pinfo%nsend(0:nproc-1)),& + & __LINE__,__MYFILE__) + + IF (pinfo%nsendtot>0) THEN + + CALL nchdlerr(nf90_put_var(ncid,idsaa,pinfo%send_address),& + & __LINE__,__MYFILE__) + + ENDIF + + CALL nchdlerr(nf90_put_var(ncid,idnr,pinfo%nrecv(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idnsp,pinfo%nsdisp(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idnrp,pinfo%nrdisp(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_close(ncid),__LINE__, __MYFILE__ ) + + END SUBROUTINE parinter_write + + SUBROUTINE parinter_read( mype, nproc, & + & nsrcglopoints, ndstglopoints, & + & pinfo, cdpath, cdprefix, lexists ) + + ! Write pinfo information in a netCDF file in order to + ! be able to read it rather than calling parinter_init + + ! Input arguments. + + ! Message passing information + INTEGER, INTENT(IN) :: mype, nproc + ! Source grid local global number of grid points + INTEGER, INTENT(IN) :: nsrcglopoints + ! Destination grid global number of grid points + INTEGER, INTENT(IN) :: ndstglopoints + ! Interpolation and message passing information + TYPE(parinterinfo), INTENT(OUT) :: pinfo + ! Does the information exists + LOGICAL :: lexists + ! Path and file prefix + CHARACTER(len=*) :: cdpath, cdprefix + + ! Local variable + + ! Misc variable + CHARACTER(len=1024) :: cdfile + INTEGER :: ncid, dimid, varid, num_wgts + + WRITE(cdfile,'(A,2(I8.8,A),2(I4.4,A),A)') & + & TRIM(cdpath)//'/'//TRIM(cdprefix)//'_', & + & nsrcglopoints,'_',ndstglopoints,'_',mype,'_',nproc,'.nc' + + + lexists=nf90_open(TRIM(cdfile),nf90_nowrite,ncid)==nf90_noerr + + IF (lexists) THEN + + ! If num_links is not present we assume it to be zero. + + IF (nf90_inq_dimid(ncid,'num_links',dimid)==nf90_noerr) THEN + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=pinfo%num_links),& + & __LINE__,__MYFILE__) + ELSE + pinfo%num_links=0 + ENDIF + + CALL nchdlerr(nf90_inq_dimid(ncid,'num_wgts',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=num_wgts),& + & __LINE__,__MYFILE__) + IF (num_wgts/=1) THEN + WRITE(0,*)'parinter_read: num_wgts has to be 1 for now' + CALL abort + ENDIF + + ! If nsendtot is not present we assume it to be zero. + + IF (nf90_inq_dimid(ncid,'nsendtot',dimid)==nf90_noerr) THEN + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=pinfo%nsendtot),& + & __LINE__,__MYFILE__) + ELSE + pinfo%nsendtot=0 + ENDIF + + IF(nf90_inq_dimid(ncid,'nrecvtot',dimid)==nf90_noerr) THEN + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=pinfo%nrecvtot),& + & __LINE__,__MYFILE__) + ELSE + pinfo%nrecvtot=0 + ENDIF + + ALLOCATE(pinfo%dst_address(pinfo%num_links),& + & pinfo%src_address(pinfo%num_links),& + & pinfo%remap_matrix(num_wgts,pinfo%num_links),& + & pinfo%nsend(0:nproc-1),& + & pinfo%send_address(pinfo%nsendtot),& + & pinfo%nrecv(0:nproc-1),& + & pinfo%nsdisp(0:nproc-1),& + & pinfo%nrdisp(0:nproc-1)) + + IF (pinfo%num_links>0) THEN + CALL nchdlerr(nf90_inq_varid(ncid,'dst_address',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%dst_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_address',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%src_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'remap_matrix',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%remap_matrix),& + & __LINE__,__MYFILE__) + ENDIF + + CALL nchdlerr(nf90_inq_varid(ncid,'nsend',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nsend(0:nproc-1)),& + & __LINE__,__MYFILE__) + + IF (pinfo%nsendtot>0) THEN + + CALL nchdlerr(nf90_inq_varid(ncid,'send_address',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%send_address),& + & __LINE__,__MYFILE__) + + ENDIF + + CALL nchdlerr(nf90_inq_varid(ncid,'nrecv',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nrecv(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'nsdisp',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nsdisp(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'nrdisp',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nrdisp(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_close(ncid),__LINE__, __MYFILE__ ) + + ENDIF + + END SUBROUTINE parinter_read + +END MODULE parinter + +MODULE interinfo + + ! Parallel regridding information + + USE parinter + + IMPLICIT NONE + + SAVE + + ! IFS to NEMO + + TYPE(parinterinfo) :: gausstoT,gausstoUV + + ! NEMO to IFS + + TYPE(parinterinfo) :: Ttogauss, UVtogauss + + ! Read parinterinfo on task 0 only and broadcast. + + LOGICAL :: lparbcast = .FALSE. + +END MODULE interinfo diff --git a/src/ifs_notused.F90 b/src/ifs_notused.F90 new file mode 100644 index 000000000..bbeb66423 --- /dev/null +++ b/src/ifs_notused.F90 @@ -0,0 +1,356 @@ +! Routines usually provided by the library that are currently +! not implemented for FESOM2. +! +! -Original code by Kristian Mogensen, ECMWF. + +SUBROUTINE nemogcmcoup_init_ioserver( icomm, lnemoioserver ) + + ! Initialize the NEMO mppio server + + IMPLICIT NONE + INTEGER :: icomm + LOGICAL :: lnemoioserver + + WRITE(*,*)'No mpp_ioserver' + CALL abort + +END SUBROUTINE nemogcmcoup_init_ioserver + + +SUBROUTINE nemogcmcoup_init_ioserver_2( icomm ) + + ! Initialize the NEMO mppio server + + IMPLICIT NONE + INTEGER :: icomm + + WRITE(*,*)'No mpp_ioserver' + CALL abort + +END SUBROUTINE nemogcmcoup_init_ioserver_2 + + +SUBROUTINE nemogcmcoup_mlflds_get( mype, npes, icomm, & + & nlev, nopoints, pgt3d, pgs3d, pgu3d, pgv3d ) + + ! Interpolate sst, ice: surf T; albedo; concentration; thickness, + ! snow thickness and currents from the ORCA grid to the Gaussian grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + USE par_kind + IMPLICIT NONE + + ! Arguments + REAL(wp), DIMENSION(nopoints,nlev) :: pgt3d, pgs3d, pgu3d, pgv3d + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + ! Number Gaussian grid points + INTEGER, INTENT(IN) :: nopoints,nlev + + ! Local variables + + WRITE(0,*)'nemogcmcoup_mlflds_get should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_mlflds_get + + +SUBROUTINE nemogcmcoup_get( mype, npes, icomm, & + & nopoints, pgsst, pgice, pgucur, pgvcur ) + + ! Interpolate sst, ice and currents from the ORCA grid + ! to the Gaussian grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + USE par_kind + + IMPLICIT NONE + + + ! Arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + ! Number Gaussian grid points + INTEGER, INTENT(IN) :: nopoints + ! Local arrays of sst, ice and currents + REAL(wp), DIMENSION(nopoints) :: pgsst, pgice, pgucur, pgvcur + + ! Local variables + + WRITE(0,*)'nemogcmcoup_get should not be called with FESOM' + CALL abort + +END SUBROUTINE nemogcmcoup_get + + +SUBROUTINE nemogcmcoup_exflds_get( mype, npes, icomm, & + & nopoints, pgssh, pgmld, pg20d, pgsss, & + & pgtem300, pgsal300 ) + + ! Interpolate sst, ice: surf T; albedo; concentration; thickness, + ! snow thickness and currents from the ORCA grid to the Gaussian grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + USE par_kind + IMPLICIT NONE + + ! Arguments + REAL(wp), DIMENSION(nopoints) :: pgssh, pgmld, pg20d, pgsss, & + & pgtem300, pgsal300 + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + ! Number Gaussian grid points + INTEGER, INTENT(IN) :: nopoints + + ! Local variables + + WRITE(0,*)'nemogcmcoup_exflds_get should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_exflds_get + + +SUBROUTINE nemogcmcoup_get_1way( mype, npes, icomm ) + + ! Interpolate sst, ice and currents from the ORCA grid + ! to the Gaussian grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + IMPLICIT NONE + + + ! Arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + + ! Local variables + + WRITE(0,*)'nemogcmcoup_get_1way should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_get_1way + + +SUBROUTINE nemogcmcoup_mlinit( mype, npes, icomm, & + & nlev, nopoints, pdep, pmask ) + + ! Get information about the vertical discretization of the ocean model + + ! nlevs are maximum levels on input and actual number levels on output + + USE par_kind + + IMPLICIT NONE + + ! Input arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Grid information + INTEGER, INTENT(INOUT) :: nlev, nopoints + REAL(wp), INTENT(OUT), DIMENSION(nlev) :: pdep + REAL(wp), INTENT(OUT), DIMENSION(nopoints,nlev) :: pmask + + ! Local variables + + WRITE(0,*)'nemogcmcoup_mlinit should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_mlinit + + +SUBROUTINE nemogcmcoup_update( mype, npes, icomm, & + & npoints, pgutau, pgvtau, & + & pgqsr, pgqns, pgemp, kt, ldebug ) + + ! Update fluxes in nemogcmcoup_data by parallel + ! interpolation of the input gaussian grid data + + USE par_kind + + IMPLICIT NONE + + ! Arguments + + ! MPI communications + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Fluxes on the Gaussian grid. + INTEGER, INTENT(IN) :: npoints + REAL(wp), DIMENSION(npoints), intent(IN) :: & + & pgutau, pgvtau, pgqsr, pgqns, pgemp + ! Current time step + INTEGER, INTENT(in) :: kt + ! Write debugging fields in netCDF + LOGICAL, INTENT(IN) :: ldebug + + ! Local variables + + WRITE(0,*)'nemogcmcoup_update should be called with with.' + CALL abort + +END SUBROUTINE nemogcmcoup_update + +SUBROUTINE nemogcmcoup_update_add( mype, npes, icomm, & + & npoints, pgsst, pgtsk, kt, ldebug ) + + ! Update addetiona in nemogcmcoup_data by parallel + ! interpolation of the input gaussian grid data + + USE par_kind + + IMPLICIT NONE + + ! Arguments + + ! MPI communications + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Input on the Gaussian grid. + INTEGER, INTENT(IN) :: npoints + REAL(wp), DIMENSION(npoints), intent(IN) :: & + & pgsst, pgtsk + ! Current time step + INTEGER, INTENT(in) :: kt + ! Write debugging fields in netCDF + LOGICAL, INTENT(IN) :: ldebug + + ! Local variables + + WRITE(0,*)'nemogcmcoup_update_add should not be called when coupling to fesom.' + CALL abort + + +END SUBROUTINE nemogcmcoup_update_add + + +SUBROUTINE nemogcmcoup_wam_coupinit( mype, npes, icomm, & + & nlocpoints, nglopoints, & + & nlocmsk, ngloind, iunit ) + + ! Initialize single executable coupling between WAM and NEMO + ! This is called from WAM. + + IMPLICIT NONE + + ! Input arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype,npes,icomm + ! WAM grid information + ! Number of local and global points + INTEGER, INTENT(IN) :: nlocpoints, nglopoints + ! Integer mask and global indices + INTEGER, DIMENSION(nlocpoints), INTENT(IN) :: nlocmsk, ngloind + ! Unit for output in parinter_init + INTEGER :: iunit + + WRITE(0,*)'Wam coupling not implemented for FESOM' + CALL abort + +END SUBROUTINE nemogcmcoup_wam_coupinit + + +SUBROUTINE nemogcmcoup_wam_get( mype, npes, icomm, & + & nopoints, pwsst, pwicecov, pwicethk, & + & pwucur, pwvcur, licethk ) + + ! Interpolate from the ORCA grid + ! to the WAM grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + USE par_kind + IMPLICIT NONE + + ! Arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + ! Number WAM grid points + INTEGER, INTENT(IN) :: nopoints + ! Local arrays of sst, ice cover, ice thickness and currents + REAL(wp), DIMENSION(nopoints) :: pwsst, pwicecov, pwicethk, pwucur, pwvcur + LOGICAL :: licethk + + ! Local variables + + WRITE(0,*)'nemogcmcoup_wam_get should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_wam_get + + +SUBROUTINE nemogcmcoup_wam_update( mype, npes, icomm, & + & npoints, pwswh, pwmwp, & + & pwphioc, pwtauoc, pwstrn, & + & pwustokes, pwvstokes, & + & cdtpro, ldebug ) + + ! Update fluxes in nemogcmcoup_data by parallel + ! interpolation of the input WAM grid data + + USE par_kind + + IMPLICIT NONE + + ! Arguments + + ! MPI communications + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Data on the WAM grid. + INTEGER, INTENT(IN) :: npoints + REAL(wp), DIMENSION(npoints), INTENT(IN) :: & + & pwswh, pwmwp, pwphioc, pwtauoc, pwstrn, pwustokes, pwvstokes + ! Current time + CHARACTER(len=14), INTENT(IN) :: cdtpro + ! Write debugging fields in netCDF + LOGICAL, INTENT(IN) :: ldebug + + ! Local variables + + WRITE(0,*)'nemogcmcoup_wam_update should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_wam_update + + +SUBROUTINE nemogcmcoup_wam_update_stress( mype, npes, icomm, npoints, & + & pwutau, pwvtau, pwuv10n, pwphif,& + & cdtpro, ldebug ) + + ! Update stresses in nemogcmcoup_data by parallel + ! interpolation of the input WAM grid data + + USE par_kind + + IMPLICIT NONE + + ! Arguments + + ! MPI communications + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Data on the WAM grid. + INTEGER, INTENT(IN) :: npoints + REAL(wp), DIMENSION(npoints), INTENT(IN) :: & + & pwutau, pwvtau, pwuv10n, pwphif + ! Current time step + CHARACTER(len=14), INTENT(IN) :: cdtpro + ! Write debugging fields in netCDF + LOGICAL, INTENT(IN) :: ldebug + + ! Local variables + + WRITE(0,*)'nemogcmcoup_wam_update_stress should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_wam_update_stress From ef5ccfb6beddbaa01deddfcc934072b2faf870a6 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Fri, 8 Jun 2018 20:04:27 +0100 Subject: [PATCH 020/909] fesom code + interface routines compiles as library as well as executable (FESOM_LIBRARY switch in CMakeLists.txt) --- CMakeLists.txt | 1 + configure.sh | 2 ++ src/CMakeLists.txt | 33 +++++++++++++++++++++++++++++++++ src/fvom_main.F90 | 3 ++- src/ifs_interface.F90 | 4 ++-- src/ifs_modules.F90 | 2 ++ src/ifs_notused.F90 | 2 ++ 7 files changed, 44 insertions(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 74ad4c769..446b457ba 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -11,6 +11,7 @@ set(TOPLEVEL_DIR ${CMAKE_CURRENT_LIST_DIR}) set(FESOM_COUPLED OFF CACHE BOOL "compile fesom standalone or with oasis support (i.e. coupled)") #set(FESOM_COUPLED ON CACHE BOOL "compile fesom standalone or with oasis support (i.e. coupled)") #set(OIFS_COUPLED ON CACHE BOOL "compile fesom coupled to OpenIFS. Also needs FESOM_COUPLED to work)") +set(FESOM_LIBRARY ON CACHE BOOL "compile fesom as a library (to be called as subroutine), with interface to IFS") #set(VERBOSE OFF CACHE BOOL "toggle debug output") #add_subdirectory(oasis3-mct/lib/psmile) add_subdirectory(src) diff --git a/configure.sh b/configure.sh index b4b6e27b8..f19bcfaab 100755 --- a/configure.sh +++ b/configure.sh @@ -6,4 +6,6 @@ source env.sh # source this from your run script too mkdir build || true # make sure not to commit this to svn or git cd build cmake .. # not required when re-compiling +#sed -i -e 's/-lFALSE//g' src/CMakeFiles/fesom.dir/link.txt # workaround +#make VERBOSE=1 install -j`nproc --all` make install -j`nproc --all` diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 663dbe70c..0a07c08b7 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -11,8 +11,14 @@ file(GLOB sources_C ${src_home}/*.c) #if(${FESOM_STANDALONE}) # list(REMOVE_ITEM sources_Fortran ${src_home}/cpl_driver.F90) #endif() +if(NOT ${FESOM_LIBRARY}) +list(REMOVE_ITEM sources_Fortran ${src_home}/ifs_interface.F90) +list(REMOVE_ITEM sources_Fortran ${src_home}/ifs_modules.F90) +list(REMOVE_ITEM sources_Fortran ${src_home}/ifs_notused.F90) +endif() list(REMOVE_ITEM sources_Fortran ${src_home}/fvom_init.F90) + # depends on the metis library add_subdirectory(../lib/metis-5.1.0 ${PROJECT_BINARY_DIR}/metis) include_directories(../lib/metis-5.1.0/include) @@ -26,7 +32,11 @@ target_compile_definitions(${PROJECT_NAME}_C PRIVATE PARMS USE_MPI REAL=double D target_link_libraries(${PROJECT_NAME}_C metis parms) # create our binary (set its name to name of this project) +if(${FESOM_LIBRARY}) +add_library(${PROJECT_NAME} ${sources_Fortran}) +else() add_executable(${PROJECT_NAME} ${sources_Fortran}) +endif() target_compile_definitions(${PROJECT_NAME} PRIVATE PARMS -DMETIS_VERSION=5 -DPART_WEIGHTED -DMETISRANDOMSEED=35243) if(${FESOM_COUPLED}) include(${CMAKE_CURRENT_LIST_DIR}/../cmake/FindOASIS.cmake) @@ -35,6 +45,9 @@ endif() if(${OIFS_COUPLED}) target_compile_definitions(${PROJECT_NAME} PRIVATE __oifs) endif() +if(${FESOM_LIBRARY}) + target_compile_definitions(${PROJECT_NAME} PRIVATE __ifsinterface) +endif() if(${VERBOSE}) target_compile_definitions(${PROJECT_NAME} PRIVATE VERBOSE) endif() @@ -44,6 +57,7 @@ if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Intel ) # target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fast-transcendentals -xHost -ip -g -traceback -check all,noarg_temp_created,bounds) elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU ) target_compile_options(${PROJECT_NAME} PRIVATE -fdefault-real-8 -ffree-line-length-none) +# target_compile_options(${PROJECT_NAME} PRIVATE -fdefault-real-8 -ffree-line-length-none -mtune=native -march=native -g -Wall -c -O3 -fdefault-double-8 -fcray-pointer -fbacktrace -fconvert=big-endian -fno-range-check) endif() target_include_directories(${PROJECT_NAME} PRIVATE ${NETCDF_Fortran_INCLUDE_DIRECTORIES} ${OASIS_Fortran_INCLUDE_DIRECTORIES}) target_include_directories(${PROJECT_NAME} PRIVATE ${MCT_Fortran_INCLUDE_DIRECTORIES} ${MPEU_Fortran_INCLUDE_DIRECTORIES}) @@ -52,7 +66,26 @@ target_link_libraries(${PROJECT_NAME} ${PROJECT_NAME}_C ${NETCDF_Fortran_LIBRARI target_link_libraries(${PROJECT_NAME} ${PROJECT_NAME}_C ${MCT_Fortran_LIBRARIES} ${MPEU_Fortran_LIBRARIES} ${SCRIP_Fortran_LIBRARIES}) set_target_properties(${PROJECT_NAME} PROPERTIES LINKER_LANGUAGE Fortran) + +#set(FESOM_INSTALL_FILEPATH "${CMAKE_CURRENT_LIST_DIR}/../bin/fesom.x" CACHE FILEPATH "file path where the FESOM binary should be put") +#get_filename_component(FESOM_INSTALL_PATH ${FESOM_INSTALL_FILEPATH} DIRECTORY) +#get_filename_component(FESOM_INSTALL_NAME ${FESOM_INSTALL_FILEPATH} NAME) +#install(PROGRAMS ${PROJECT_BINARY_DIR}/${PROJECT_NAME} DESTINATION ${FESOM_INSTALL_PATH} RENAME ${FESOM_INSTALL_NAME}) + +if(${FESOM_LIBRARY}) +set(FESOM_INSTALL_FILEPATH "${CMAKE_CURRENT_LIST_DIR}/../lib/libfesom.a" CACHE FILEPATH "file path where the FESOM library should be put") +get_filename_component(FESOM_INSTALL_PATH ${FESOM_INSTALL_FILEPATH} DIRECTORY) +get_filename_component(FESOM_INSTALL_NAME ${FESOM_INSTALL_FILEPATH} NAME) +install(PROGRAMS ${PROJECT_BINARY_DIR}/lib${PROJECT_NAME}.a DESTINATION ${FESOM_INSTALL_PATH} RENAME ${FESOM_INSTALL_NAME}) +else() set(FESOM_INSTALL_FILEPATH "${CMAKE_CURRENT_LIST_DIR}/../bin/fesom.x" CACHE FILEPATH "file path where the FESOM binary should be put") get_filename_component(FESOM_INSTALL_PATH ${FESOM_INSTALL_FILEPATH} DIRECTORY) get_filename_component(FESOM_INSTALL_NAME ${FESOM_INSTALL_FILEPATH} NAME) install(PROGRAMS ${PROJECT_BINARY_DIR}/${PROJECT_NAME} DESTINATION ${FESOM_INSTALL_PATH} RENAME ${FESOM_INSTALL_NAME}) +endif() + + +#get_cmake_property(_variableNames VARIABLES) +#foreach (_variableName ${_variableNames}) +# message(STATUS "${_variableName}=${${_variableName}}") +#endforeach() diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index 474dbab0c..bab0f0bf8 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -6,6 +6,7 @@ ! The main driving routine !=============================================================================! +#ifndef __ifsinterface program main use g_PARSUP, only: mype @@ -21,7 +22,7 @@ program main if (mype==0) write(*,*) 'Finalization complete...' end program main - +#endif !=============================================================================! diff --git a/src/ifs_interface.F90 b/src/ifs_interface.F90 index 59fff624c..e71bb0918 100644 --- a/src/ifs_interface.F90 +++ b/src/ifs_interface.F90 @@ -1,3 +1,4 @@ +#if defined (__ifsinterface) !===================================================== ! IFS interface for calling FESOM2 as a subroutine. ! @@ -1310,5 +1311,4 @@ SUBROUTINE nemogcmcoup_final CALL abort END SUBROUTINE nemogcmcoup_final - - +#endif diff --git a/src/ifs_modules.F90 b/src/ifs_modules.F90 index a6f07acaa..9d6b867fa 100644 --- a/src/ifs_modules.F90 +++ b/src/ifs_modules.F90 @@ -1,3 +1,4 @@ +#if defined (__ifsinterface) #define __MYFILE__ 'ifs_modules.F90' #define key_mpp_mpi ! Set of modules needed by the interface to IFS. @@ -1855,3 +1856,4 @@ MODULE interinfo LOGICAL :: lparbcast = .FALSE. END MODULE interinfo +#endif diff --git a/src/ifs_notused.F90 b/src/ifs_notused.F90 index bbeb66423..ea1779327 100644 --- a/src/ifs_notused.F90 +++ b/src/ifs_notused.F90 @@ -1,3 +1,4 @@ +#if defined (__ifsinterface) ! Routines usually provided by the library that are currently ! not implemented for FESOM2. ! @@ -354,3 +355,4 @@ SUBROUTINE nemogcmcoup_wam_update_stress( mype, npes, icomm, npoints, & CALL abort END SUBROUTINE nemogcmcoup_wam_update_stress +#endif From a07633be8d671a940c15472154b3a42183f71481 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Fri, 8 Jun 2018 20:06:48 +0100 Subject: [PATCH 021/909] Add workstation case to env.sh file --- env.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/env.sh b/env.sh index d16f51a0b..be8347489 100755 --- a/env.sh +++ b/env.sh @@ -37,6 +37,8 @@ elif [[ $LOGINHOST =~ \.hww\.de$ ]] || [[ $LOGINHOST =~ ^nid[0-9]{5}$ ]]; then STRATEGY="hazelhen.hww.de" elif [[ $LOGINHOST =~ ^cc[a-b]+-login[0-9]+\.ecmwf\.int$ ]]; then STRATEGY="ecaccess.ecmwf.int" +elif [[ $LOGINHOST =~ ^pecora+\.ecmwf\.int$ ]]; then + STRATEGY="workstation" else echo "can not determine environment for host: "$LOGINHOST [ $BEING_EXECUTED = true ] && exit 1 From 21884e97bbbda652244a9c26858d01fdcdb036f0 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Fri, 8 Jun 2018 20:09:16 +0100 Subject: [PATCH 022/909] Add environment file for compilation on a workstation. --- env/workstation/shell | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 env/workstation/shell diff --git a/env/workstation/shell b/env/workstation/shell new file mode 100644 index 000000000..4c2f2f1ff --- /dev/null +++ b/env/workstation/shell @@ -0,0 +1,13 @@ +# used at ECMWF + +module load netcdf4 +module load hdf5 +#following 4 not needed? +#module load eccodes +#module load emos +#module load fftw +#module load boost +module load openmpi + +export FC=mpif90 CC=mpicc CXX=mpicxx # MPI wrappers for Fortran, cc and CC similarly +#export FC=mpif90 CC=gcc CXX=mpicxx # MPI wrappers for Fortran, cc and CC similarly From 89b9cf309ed5847ed1ea6242af25c1533d0f98e3 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Fri, 8 Jun 2018 20:11:02 +0100 Subject: [PATCH 023/909] Add job script for running jobs on the workstation (ECMWF). --- work/job_workstation | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100755 work/job_workstation diff --git a/work/job_workstation b/work/job_workstation new file mode 100755 index 000000000..30266352e --- /dev/null +++ b/work/job_workstation @@ -0,0 +1,23 @@ +#!/bin/bash +source ../env.sh # source this from your run script + +path=`pwd` +echo Initial path: $path + +mkdir -p /scratch/rd/natr/run +cd /scratch/rd/natr/run + +# debug +set -x + +cp -n $HOME/fesom2/bin/fesom.x . #../bin/fesom.x . # ln -s ../bin/fesom.x +cp -n $HOME/fesom2/config/namelist.config . #../config/namelist.config . +cp -n $HOME/fesom2/config/namelist.forcing . #../config/namelist.forcing . +cp -n $HOME/fesom2/config/namelist.oce . #../config/namelist.oce . +cp -n $HOME/fesom2/config/namelist.ice . #../config/namelist.ice . + +date +#aprun -N $EC_tasks_per_node -n $EC_total_tasks -j $EC_hyperthreads ./fesom.x > "fesom2.out" +#./fesom.x > "fesom2.out" +mpirun -np 4 ./fesom.x > "fesom2.out" +date From 3db1bc25d31aa9f60c4897733436d60d454a769b Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Mon, 11 Jun 2018 12:50:12 +0100 Subject: [PATCH 024/909] fesom2 can be linked to IFS! --- src/fvom_main.F90 | 7 +++- src/ifs_interface.F90 | 75 +++++++++++++++++++++++++++++++------------ 2 files changed, 60 insertions(+), 22 deletions(-) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index bab0f0bf8..839ee960c 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -49,8 +49,11 @@ subroutine main_initialize(nsteps) integer :: ierr integer, INTENT(OUT) :: nsteps +#ifndef __ifsinterface + !MPI_INIT is done by IFS when ifsinterface is used (fesom is called as library) #ifndef __oifs - !ECHAM6-FESOM2 coupling: cpl_oasis3mct_init is called here in order to avoid circular !dependencies between modules (cpl_driver and g_PARSUP) + !ECHAM6-FESOM2 coupling: cpl_oasis3mct_init is called here in order to avoid circular + !dependencies between modules (cpl_driver and g_PARSUP) !OIFS-FESOM2 coupling: does not require MPI_INIT here as this is done by OASIS call MPI_INIT(ierr) #endif @@ -62,6 +65,8 @@ subroutine main_initialize(nsteps) ! sets npes and mype call par_init +#endif + if (mype==0) write(*,*) '!=============================================================================!' if (mype==0) write(*,*) '! Welcome to the ' if (mype==0) write(*,*) '! Finite Volume Sea-ice Ocean Model (FESOM2) ' diff --git a/src/ifs_interface.F90 b/src/ifs_interface.F90 index e71bb0918..c1876230e 100644 --- a/src/ifs_interface.F90 +++ b/src/ifs_interface.F90 @@ -3,25 +3,30 @@ ! IFS interface for calling FESOM2 as a subroutine. ! ! -Original code for NEMO by Kristian Mogensen, ECMWF. +! -Adapted to FESOM2 by Thomas Rackow, AWI. !----------------------------------------------------- SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & & lwaveonly, iatmunit, lwrite ) - ! Initialize the NEMO model for single executable coupling - - USE par_kind + ! Initialize the FESOM model for single executable coupling + USE par_kind !in ifs_modules.F90 + USE g_PARSUP, only: MPI_COMM_FESOM + USE g_config, only: dt + USE g_clock, only: timenew, daynew, yearnew IMPLICIT NONE ! Input arguments ! Message passing information INTEGER, INTENT(IN) :: icomm - ! Initial date, time, initial timestep and final time step + ! Initial date (e.g. 20170906), time, initial timestep and final time step INTEGER, INTENT(OUT) :: inidate, initime, itini, itend ! Length of the time step REAL(wp), INTENT(OUT) :: zstp + + ! inherited from interface to NEMO, not used here: ! Coupling to waves only LOGICAL, INTENT(IN) :: lwaveonly ! Logfile unit (used if >=0) @@ -29,41 +34,61 @@ SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & ! Write to this unit LOGICAL :: lwrite - WRITE(0,*)'Insert FESOM init here.' -! CALL abort + WRITE(0,*)'!===================================' + WRITE(0,*)'! FESOM initialization from IFS.' - ! Set information for the caller + WRITE(0,*)'! get MPI_COMM_FESOM. ==============' + MPI_COMM_FESOM=icomm -#ifdef FESOM_TODO - inidate = nn_date0 - initime = nn_time0*3600 - itini = nit000 - itend = nn_itend - zstp = rdttra(1) -#else + itini = 1 + CALL main_initialize(itend) + WRITE(0,*)'! main_initialize done. ============' + + ! Set more information for the caller + write(0,*)'! clock initialized at time ', timenew, daynew, yearnew*1000 + 9*10 + 6 inidate = 20170906 - initime = 0 - itini = 1 - itend = 24 - zstp = 3600.0 -#endif + initime = 0 !hours? + WRITE(0,*)'! currently start only from 00:00 possible.' + + zstp = dt + WRITE(0,*)'! FESOM timestep is ', real(dt,4), 'sec' + + WRITE(0,*)'! no coupling to waves implemented. ' + WRITE(0,*)'!===================================' + +!#ifdef FESOM_TODO +! inidate = nn_date0 +! initime = nn_time0*3600 +! itini = nit000 +! itend = nn_itend +! zstp = rdttra(1) +!#else +! inidate = 20170906 +! initime = 0 +! itini = 1 +! itend = 24 +! zstp = 3600.0 +!#endif END SUBROUTINE nemogcmcoup_init -SUBROUTINE nemogcmcoup_coupinit( mype, npes, icomm, & +SUBROUTINE nemogcmcoup_coupinit( mypeIN, npesIN, icomm, & & npoints, nlocmsk, ngloind ) ! Initialize single executable coupling USE parinter USE scripremap USE interinfo + + ! FESOM modules + USE g_PARSUP, only: mype, npes IMPLICIT NONE ! Input arguments ! Message passing information - INTEGER, INTENT(IN) :: mype,npes,icomm + INTEGER, INTENT(IN) :: mypeIN,npesIN,icomm ! Gaussian grid information ! Number of points INTEGER, INTENT(IN) :: npoints @@ -103,6 +128,14 @@ SUBROUTINE nemogcmcoup_coupinit( mype, npes, icomm, & INTEGER :: i,j,k,ierr LOGICAL :: lexists + ! now FESOM knows about the (total number of) MPI tasks + mype=mypeIN + npes=npesIN + if(mype==0) then + write(*,*) 'MPI has been initialized in the atmospheric model' + write(*, *) 'Running on ', npes, ' PEs' + end if + ! Read namelists cdfile_gauss_to_T = 'gausstoT.nc' From 8b016282737cbdd342e743c7fceae1b0ee233b39 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Mon, 11 Jun 2018 13:01:31 +0100 Subject: [PATCH 025/909] renaming of subroutine read_namelist needed in fesom if ifsinterface is used. --- src/gen_model_setup.F90 | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/gen_model_setup.F90 b/src/gen_model_setup.F90 index d8a283aa4..d10f8d642 100755 --- a/src/gen_model_setup.F90 +++ b/src/gen_model_setup.F90 @@ -1,12 +1,20 @@ ! ============================================================== subroutine setup_model implicit none +#ifdef __ifsinterface + call read_namelist_ifs ! should be before clock_init +#else call read_namelist ! should be before clock_init +#endif call define_prog_tracer end subroutine setup_model ! ============================================================== +#ifdef __ifsinterface +subroutine read_namelist_ifs +#else subroutine read_namelist +#endif ! Reads namelist files and overwrites default parameters. ! ! Coded by Lars Nerger @@ -78,7 +86,11 @@ subroutine read_namelist if(mype==0) write(*,*) 'Namelist files are read in' ! if ((output_length_unit=='s').or.(int(real(step_per_day)/24.0)<=1)) use_means=.false. +#ifdef __ifsinterface +end subroutine read_namelist_ifs +#else end subroutine read_namelist +#endif ! ================================================================= subroutine define_prog_tracer ! Coded by Qiang Wang From c7ea5ed0d143e312f80c865bd86a190519599120 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Mon, 11 Jun 2018 16:24:49 +0100 Subject: [PATCH 026/909] Should run until get is needed. Added flag for ignoring time consistency checks. --- src/fvom_main.F90 | 3 +- src/gen_modules_config.F90 | 3 +- src/gen_modules_partitioning.F90 | 10 +-- src/ifs_interface.F90 | 101 +++++++++++++------------------ src/io_restart.F90 | 2 +- 5 files changed, 53 insertions(+), 66 deletions(-) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index 839ee960c..6ebdf5f08 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -63,9 +63,10 @@ subroutine main_initialize(nsteps) call cpl_oasis3mct_init(MPI_COMM_FESOM) #endif +#endif ! sets npes and mype call par_init -#endif + if (mype==0) write(*,*) '!=============================================================================!' if (mype==0) write(*,*) '! Welcome to the ' diff --git a/src/gen_modules_config.F90 b/src/gen_modules_config.F90 index 1e160138f..9a66d46a1 100755 --- a/src/gen_modules_config.F90 +++ b/src/gen_modules_config.F90 @@ -37,6 +37,7 @@ module g_config ! *** in out *** character*4 :: restartflag='last' !restart from record,'#','last' + logical :: ignore_timecheck=.false. !ignore time consistency check? (restart&clock) integer :: output_length=1 !valid for d,h,s character :: output_length_unit='m' !output period: y, m, d, h, s integer :: logfile_outfreq=1 ! logfile info. outp. freq., # steps @@ -45,7 +46,7 @@ module g_config character :: restart_length_unit='m' integer :: output_offset=32, restart_offset=32 - namelist /inout/ restartflag, output_length, output_length_unit, restart_length, restart_length_unit, & + namelist /inout/ restartflag, ignore_timecheck, output_length, output_length_unit, restart_length, restart_length_unit, & logfile_outfreq, use_means, output_offset, restart_offset ! *** mesh *** diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index 0c016c111..ed9f9064b 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -11,7 +11,7 @@ module g_PARSUP include 'mpif.h' #endif - integer :: MPI_COMM_FESOM + integer :: MPI_COMM_FESOM integer, parameter :: MAX_LAENDERECK=8 type com_struct integer :: rPEnum ! the number of PE I receive info from @@ -84,13 +84,13 @@ subroutine par_init ! initializes MPI integer :: i -#ifndef __oasis +#if defined __oasis || defined __ifsinterface + call MPI_Comm_Size(MPI_COMM_FESOM,npes,i) + call MPI_Comm_Rank(MPI_COMM_FESOM,mype,i) +#else call MPI_Comm_Size(MPI_COMM_WORLD,npes,i) call MPI_Comm_Rank(MPI_COMM_WORLD,mype,i) MPI_COMM_FESOM=MPI_COMM_WORLD -#else - call MPI_Comm_Size(MPI_COMM_FESOM,npes,i) - call MPI_Comm_Rank(MPI_COMM_FESOM,mype,i) #endif if(mype==0) then diff --git a/src/ifs_interface.F90 b/src/ifs_interface.F90 index c1876230e..56f5a7d71 100644 --- a/src/ifs_interface.F90 +++ b/src/ifs_interface.F90 @@ -14,7 +14,7 @@ SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & USE par_kind !in ifs_modules.F90 USE g_PARSUP, only: MPI_COMM_FESOM USE g_config, only: dt - USE g_clock, only: timenew, daynew, yearnew + USE g_clock, only: timenew, daynew, yearnew, month, day_in_month IMPLICIT NONE ! Input arguments @@ -34,41 +34,28 @@ SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & ! Write to this unit LOGICAL :: lwrite - WRITE(0,*)'!===================================' - WRITE(0,*)'! FESOM initialization from IFS.' - WRITE(0,*)'! get MPI_COMM_FESOM. ==============' + WRITE(0,*)'!======================================' + WRITE(0,*)'! FESOM is initialized from within IFS.' + + WRITE(0,*)'! get MPI_COMM_FESOM. =================' MPI_COMM_FESOM=icomm itini = 1 - CALL main_initialize(itend) - WRITE(0,*)'! main_initialize done. ============' + CALL main_initialize(itend) !also sets mype and npes + WRITE(0,*)'! main_initialize done. ===============' ! Set more information for the caller - write(0,*)'! clock initialized at time ', timenew, daynew, yearnew*1000 + 9*10 + 6 - inidate = 20170906 - initime = 0 !hours? - WRITE(0,*)'! currently start only from 00:00 possible.' + ! initial date and time (time is not used) + inidate = yearnew*10000 + month*100 + day_in_month ! e.g. 20170906 + initime = 0 + WRITE(0,*)'! FESOM initial date is ', inidate ,' ======' + + ! fesom timestep zstp = dt - WRITE(0,*)'! FESOM timestep is ', real(dt,4), 'sec' - - WRITE(0,*)'! no coupling to waves implemented. ' - WRITE(0,*)'!===================================' - -!#ifdef FESOM_TODO -! inidate = nn_date0 -! initime = nn_time0*3600 -! itini = nit000 -! itend = nn_itend -! zstp = rdttra(1) -!#else -! inidate = 20170906 -! initime = 0 -! itini = 1 -! itend = 24 -! zstp = 3600.0 -!#endif + WRITE(0,*)'! FESOM timestep is ', real(zstp,4), 'sec' + WRITE(0,*)'!======================================' END SUBROUTINE nemogcmcoup_init @@ -76,13 +63,14 @@ END SUBROUTINE nemogcmcoup_init SUBROUTINE nemogcmcoup_coupinit( mypeIN, npesIN, icomm, & & npoints, nlocmsk, ngloind ) + ! FESOM modules + USE g_PARSUP, only: mype, npes, myDim_nod2D, myDim_elem2D, myList_nod2D, myList_elem2D + USE o_MESH, only: nod2D, elem2D + ! Initialize single executable coupling USE parinter USE scripremap USE interinfo - - ! FESOM modules - USE g_PARSUP, only: mype, npes IMPLICIT NONE ! Input arguments @@ -128,9 +116,9 @@ SUBROUTINE nemogcmcoup_coupinit( mypeIN, npesIN, icomm, & INTEGER :: i,j,k,ierr LOGICAL :: lexists - ! now FESOM knows about the (total number of) MPI tasks - mype=mypeIN - npes=npesIN + + ! here FESOM knows about the (total number of) MPI tasks + if(mype==0) then write(*,*) 'MPI has been initialized in the atmospheric model' write(*, *) 'Running on ', npes, ' PEs' @@ -157,24 +145,25 @@ SUBROUTINE nemogcmcoup_coupinit( mypeIN, npesIN, icomm, & CALL mpi_allreduce( npoints, nglopoints, 1, & & mpi_integer, mpi_sum, icomm, ierr) + + WRITE(0,*)'!======================================' + WRITE(0,*)'! SCALARS =============================' + WRITE(0,*)'Update FESOM global scalar points' - noglopoints=126858 - IF (mype==0) THEN - nopoints=126858 - ELSE - nopoints=0 - ENDIF + noglopoints=nod2D + nopoints=myDim_nod2d ! Ocean mask and global indicies ALLOCATE(omask(MAX(nopoints,1)),ogloind(MAX(nopoints,1))) + omask(:)= 1 ! all points are ocean points + ogloind = myList_nod2D ! global index for local point number - omask(:) = 1 - IF (mype==0) THEN - DO i=1,nopoints - ogloind(i)=i - ENDDO - ENDIF + ! Could be helpful later: + ! Replace global numbering with a local one + ! tmp(1:nod2d)=0 + ! DO n=1, myDim_nod2D+eDim_nod2D + ! tmp(myList_nod2D(n))=n ! Read the interpolation weights and setup the parallel interpolation ! from atmosphere Gaussian grid to ocean T-grid @@ -230,24 +219,20 @@ SUBROUTINE nemogcmcoup_coupinit( mypeIN, npesIN, icomm, & DEALLOCATE(omask,ogloind) + + WRITE(0,*)'!======================================' + WRITE(0,*)'! VECTORS =============================' + WRITE(0,*)'Update FESOM global vector points' - noglopoints=244659 - IF (mype==0) THEN - nopoints=244659 - ELSE - nopoints=0 - ENDIF + noglopoints=elem2D + nopoints=myDim_elem2D ! Ocean mask and global indicies ALLOCATE(omask(MAX(nopoints,1)),ogloind(MAX(nopoints,1))) - omask(:) = 1 - IF (mype==0) THEN - DO i=1,nopoints - ogloind(i)=i - ENDDO - ENDIF + omask(:)= 1 ! all elements are in the ocean + ogloind = myList_elem2D ! global index for local element number ! Read the interpolation weights and setup the parallel interpolation ! from atmosphere Gaussian grid to ocean UV-grid diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 2426e94e3..302e03df4 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -484,7 +484,7 @@ subroutine read_restart(id, arg) end if write(*,*) 'restart from record ', rec2read, ' of ', id%rec_count - if (int(ctime)/=int(rtime)) then + if ((int(ctime)/=int(rtime)) .and. (.not. ignore_timecheck)) then write(*,*) 'Reading restart: timestamps in restart and in clock files do not match' write(*,*) 'restart/ times are:', ctime, rtime write(*,*) 'the model will stop!' From c453a744f075be962dd8b70c3da88f369b2a78ad Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Mon, 11 Jun 2018 21:56:04 +0100 Subject: [PATCH 027/909] Add other variables in _get routine. Ice temp. and albedo still set to constant. Currents still need to be rotated. --- src/ifs_interface.F90 | 331 +++++++++++------------------------------- 1 file changed, 86 insertions(+), 245 deletions(-) diff --git a/src/ifs_interface.F90 b/src/ifs_interface.F90 index 56f5a7d71..d20c00a7a 100644 --- a/src/ifs_interface.F90 +++ b/src/ifs_interface.F90 @@ -297,12 +297,19 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & & pgistl, licelvls ) ! Interpolate sst, ice: surf T; albedo; concentration; thickness, - ! snow thickness and currents from the ORCA grid to the Gaussian grid. + ! snow thickness and currents from the FESOM grid to the Gaussian grid. ! This routine can be called at any point in time since it does ! the necessary message passing in parinter_fld. - USE par_kind + USE par_kind ! in ifs_modules.F90 + USE o_ARRAYS, ONLY : tr_arr, UV + USE i_arrays, ONLY : m_ice, a_ice, m_snow + USE i_therm_param, ONLY : tmelt + USE g_PARSUP, only: myDim_nod2D, myDim_elem2D + USE parinter + USE scripremap + USE interinfo IMPLICIT NONE @@ -317,99 +324,97 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & INTEGER, INTENT(IN) :: nopoints ! Local variables + REAL(wp), DIMENSION(myDim_nod2D) :: zsend + REAL(wp), DIMENSION(myDim_elem2D) :: zsendUV -#ifdef FESOM_TODO - - ! Temporary array for packing of input data without halos. - REAL(wp), DIMENSION((nlei-nldi+1)*(nlej-nldj+1)) :: zsend - ! Arrays for rotation of current vectors from ij to ne. - REAL(wp), DIMENSION(jpi,jpj) :: zotx1, zoty1, ztmpx, ztmpy - ! Array for fraction of leads (i.e. ocean) - REAL(wp), DIMENSION(jpi,jpj) :: zfr_l - REAL(wp) :: zt ! Loop variables - INTEGER :: ji, jj, jk, jl - REAL(wp) :: zhook_handle ! Dr Hook handle - - IF(lhook) CALL dr_hook('nemogcmcoup_lim2_get',0,zhook_handle) - IF(nn_timing == 1) CALL timing_start('nemogcmcoup_lim2_get') - - zfr_l(:,:) = 1.- fr_i(:,:) - - IF (.NOT.ALLOCATED(zscplsst)) THEN - ALLOCATE(zscplsst(jpi,jpj)) - ENDIF + INTEGER :: n - ! Pack SST data and convert to K. - IF ( nsstlvl(1) == nsstlvl(2) ) THEN - jk = 0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = jk + 1 - zsend(jk) = tsn(ji,jj,nsstlvl(1),jp_tem) + rt0 - zscplsst(ji,jj) = zsend(jk) - rt0 - ENDDO - ENDDO - ELSE - jk = 0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = jk + 1 - zsend(jk) = SUM(& - & tsn(ji,jj,nsstlvl(1):nsstlvl(2),jp_tem) * & - & tmask(ji,jj,nsstlvl(1):nsstlvl(2)) * & - & fse3t(ji,jj,nsstlvl(1):nsstlvl(2)) ) / & - & MAX( SUM( & - & tmask(ji,jj,nsstlvl(1):nsstlvl(2)) * & - & fse3t(ji,jj,nsstlvl(1):nsstlvl(2))) , 1.0 ) + rt0 - zscplsst(ji,jj) = zsend(jk) - rt0 - ENDDO - ENDDO - ENDIF - CALL lbc_lnk( zscplsst, 'T', 1. ) + ! =================================================================== ! + ! Pack SST data and convert to K. 'pgsst' is on Gauss grid. + do n=1,myDim_nod2D + zsend(n)=tr_arr(1, n, 1)+tmelt ! sea surface temperature [K], + ! (1=surface, n=node, 1/2=T/S) + enddo ! Interpolate SST - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & + & myDim_nod2D, zsend, & & nopoints, pgsst ) + + ! =================================================================== ! + ! Pack ice fraction data [0..1] and interpolate: 'pgifr' on Gauss. + ! zsend(:)=a_ice(:) + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & myDim_nod2D, a_ice, & + & nopoints, pgifr ) + + + ! =================================================================== ! ! Pack ice temperature data (already in K) + zsend(:)=273.15 -#if defined key_lim2 - jk = 0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = jk + 1 - zsend(jk) = tn_ice(ji,jj,1) - ENDDO - ENDDO -#else - jk = 0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = jk + 1 - zsend(jk) = 0 - zt=0.0 - DO jl = 1, jpl - zsend(jk) = zsend(jk) + tn_ice(ji,jj,jl) * a_i(ji,jj,jl) - zt = zt + a_i(ji,jj,jl) - ENDDO - IF ( zt > 0.0 ) THEN - zsend(jk) = zsend(jk) / zt - ELSE - zsend(jk) = rt0 - ENDIF - ENDDO - ENDDO -#endif + ! Interpolate ice surface temperature: 'pgist' on Gaussian grid. + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & myDim_nod2D, zsend, & + & nopoints, pgist ) + + + ! =================================================================== ! + ! Pack ice albedo data and interpolate: 'pgalb' on Gaussian grid. + zsend(:)=0.7 - ! Interpolate ice temperature + ! Interpolate ice albedo + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & myDim_nod2D, zsend, & + & nopoints, pgalb ) + + ! =================================================================== ! + ! Pack ice thickness data and interpolate: 'pghic' on Gaussian grid. + zsend(:)=m_ice(:)/max(a_ice(:),0.01) ! ice thickness (mean over ice) + + ! Interpolation of average ice thickness CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & - & nopoints, pgist ) + & myDim_nod2D, zsend, & + & nopoints, pghic ) + + + ! =================================================================== ! + ! Pack snow thickness data and interpolate: 'pghsn' on Gaussian grid. + zsend(:)=m_snow(:)/max(a_ice(:),0.01) ! snow thickness (mean over ice) + + ! Interpolation of snow thickness + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & myDim_nod2D, zsend, & + & nopoints, pghsn ) + + + ! =================================================================== ! + ! Surface currents need to be rotated to geographical grid + + ! Pack u surface current and interpolate: 'pgucur' on Gaussian grid. + zsendUV(:)=UV(1,1,1:myDim_elem2D) !UV includes eDim, leave away + + CALL parinter_fld( mype, npes, icomm, UVtogauss, & + & myDim_elem2D, zsendUV, & + & nopoints, pgucur ) + + + ! Pack v surface current and interpolate: 'pgvcur' on Gaussian grid. + zsendUV(:)=UV(2,1,1:myDim_elem2D) !UV includes eDim, leave away + + CALL parinter_fld( mype, npes, icomm, UVtogauss, & + & myDim_elem2D, zsendUV, & + & nopoints, pgvcur ) + +#ifndef FESOM_TODO + + WRITE(0,*)'Everything implemented except ice level temperatures (licelvls).' + +#else ! Ice level temperatures @@ -444,173 +449,9 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & ENDIF - ! Pack ice albedo data - -#if defined key_lim2 - jk = 0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = jk + 1 - zsend(jk) = alb_ice(ji,jj,1) - ENDDO - ENDDO -#else - jk = 0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = jk + 1 - zsend(jk) = 0 - zt=0.0 - DO jl = 1, jpl - zsend(jk) = zsend(jk) + alb_ice(ji,jj,jl) * a_i(ji,jj,jl) - zt = zt + a_i(ji,jj,jl) - ENDDO - IF ( zt > 0.0_wp ) THEN - zsend(jk) = zsend(jk) / zt - ELSE - zsend(jk) = albedo_oce_mix(ji,jj) - ENDIF - ENDDO - ENDDO -#endif - - ! Interpolate ice albedo - - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & - & nopoints, pgalb ) - - ! Pack ice fraction data - - jk = 0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = jk + 1 - zsend(jk) = fr_i(ji,jj) - ENDDO - ENDDO - - ! Interpolation of ice fraction. - - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & - & nopoints, pgifr ) - - ! Pack ice thickness data - -#if defined key_lim2 - jk = 0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = jk + 1 - zsend(jk) = hicif(ji,jj) - ENDDO - ENDDO -#else - ! LIM3 - ! Average over categories (to be revised). - jk = 0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = jk + 1 - zsend(jk) = 0 - DO jl = 1, jpl - zsend(jk) = zsend(jk) + ht_i(ji,jj,jl) * a_i(ji,jj,jl) - ENDDO - ENDDO - ENDDO -#endif - - ! Interpolation of ice thickness - - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & - & nopoints, pghic ) - - ! Pack snow thickness data - -#if defined key_lim2 - jk = 0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = jk + 1 - zsend(jk) = hsnif(ji,jj) - ENDDO - ENDDO -#else - ! LIM3 - ! Average over categories (to be revised). - jk = 0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = jk + 1 - zsend(jk) = 0 - DO jl = 1, jpl - zsend(jk) = zsend(jk) + ht_s(ji,jj,jl) * a_i(ji,jj,jl) - ENDDO - ENDDO - ENDDO -#endif - - ! Interpolation of snow thickness - - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & - & nopoints, pghsn ) - - ! Currents needs to be rotated from ij to ne first - - DO jj = 2, jpjm1 - DO ji = 2, jpim1 - zotx1(ji,jj) = 0.5 * ( un(ji,jj,1) + un(ji-1,jj ,1) ) - zoty1(ji,jj) = 0.5 * ( vn(ji,jj,1) + vn(ji ,jj-1,1) ) - END DO - END DO - CALL lbc_lnk( zotx1, 'T', -1. ) - CALL lbc_lnk( zoty1, 'T', -1. ) - CALL rot_rep( zotx1, zoty1, 'T', 'ij->e', ztmpx ) - CALL rot_rep( zotx1, zoty1, 'T', 'ij->n', ztmpy ) - - ! Pack U current - - jk = 0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = jk + 1 - zsend(jk) = ztmpx(ji,jj) - ENDDO - ENDDO - - ! Interpolate U current - - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & - & nopoints, pgucur ) - - ! Pack V current - - jk = 0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = jk + 1 - zsend(jk) = ztmpy(ji,jj) - ENDDO - ENDDO - - ! Interpolate V current - - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & - & nopoints, pgvcur ) - IF(nn_timing == 1) CALL timing_stop('nemogcmcoup_lim2_get') IF(lhook) CALL dr_hook('nemogcmcoup_lim2_get',1,zhook_handle) -#else - - WRITE(0,*)'nemogcmcoup_lim2_get not done for FESOM yet' - CALL abort - #endif END SUBROUTINE nemogcmcoup_lim2_get From 26b9107690ceda02873a7df2640985971e8617c9 Mon Sep 17 00:00:00 2001 From: "Kristian S. Mogensen" Date: Tue, 12 Jun 2018 10:26:31 +0100 Subject: [PATCH 028/909] All workstations at ECMWF. --- .gitignore | 1 + env.sh | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 51a096137..970d81f02 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,4 @@ *.x *.out *~ +lib/libfesom.a diff --git a/env.sh b/env.sh index be8347489..f70307909 100755 --- a/env.sh +++ b/env.sh @@ -37,7 +37,7 @@ elif [[ $LOGINHOST =~ \.hww\.de$ ]] || [[ $LOGINHOST =~ ^nid[0-9]{5}$ ]]; then STRATEGY="hazelhen.hww.de" elif [[ $LOGINHOST =~ ^cc[a-b]+-login[0-9]+\.ecmwf\.int$ ]]; then STRATEGY="ecaccess.ecmwf.int" -elif [[ $LOGINHOST =~ ^pecora+\.ecmwf\.int$ ]]; then +elif [[ $LOGINHOST =~ ^[A-Za-z0-9]+\.ecmwf\.int$ ]]; then STRATEGY="workstation" else echo "can not determine environment for host: "$LOGINHOST From 577f16b6f1f6a846aa39bac98e357197f9196e07 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Tue, 12 Jun 2018 14:53:40 +0100 Subject: [PATCH 029/909] Added vector_r2g to the code in gen_modules_rotate_grid.F90. Needed to rotate the surface currents before IFS gets them. --- src/gen_modules_rotate_grid.F90 | 92 +++++++++++++++++++++++++++++++-- 1 file changed, 87 insertions(+), 5 deletions(-) diff --git a/src/gen_modules_rotate_grid.F90 b/src/gen_modules_rotate_grid.F90 index 01a028c68..3f4ec01e4 100755 --- a/src/gen_modules_rotate_grid.F90 +++ b/src/gen_modules_rotate_grid.F90 @@ -17,7 +17,7 @@ module g_rotate_grid use g_config implicit none save - real(kind=WP) :: r2g_matrix(3,3) + real(kind=WP) :: r2g_matrix(3,3), r2g_matrix_inv(3,3) contains @@ -31,7 +31,7 @@ subroutine set_mesh_transform_matrix ! x-axis, and the third is by an angle G about the new z-axis. use o_PARAM implicit none - real(kind=WP) :: al, be, ga + real(kind=WP) :: al, be, ga, det al=alphaEuler be=betaEuler @@ -48,7 +48,48 @@ subroutine set_mesh_transform_matrix r2g_matrix(3,2)=-sin(be)*cos(al) r2g_matrix(3,3)=cos(be) + ! inverse for vector_r2g; for rotation matrix det=1 and R^T=R^-1 + call matrix_inverse_3x3(r2g_matrix, r2g_matrix_inv, det) + end subroutine set_mesh_transform_matrix + ! + !---------------------------------------------------------------- + ! +subroutine matrix_inverse_3x3(A, AINV, DET) + ! + ! Coded by Sergey Danilov + ! Reviewed by Qiang Wang and Thomas Rackow + !----------------------------------------- + + implicit none + + real(kind=WP), dimension(3,3), intent(IN) :: A + real(kind=WP), dimension(3,3), intent(OUT) :: AINV + real(kind=WP), intent(OUT) :: DET + + integer :: i,j + + AINV(1,1) = A(2,2)*A(3,3) - A(3,2)*A(2,3) + AINV(2,1) = -A(2,1)*A(3,3) + A(3,1)*A(2,3) + AINV(3,1) = A(2,1)*A(3,2) - A(3,1)*A(2,2) + AINV(1,2) = -A(1,2)*A(3,3) + A(3,2)*A(1,3) + AINV(2,2) = A(1,1)*A(3,3) - A(3,1)*A(1,3) + AINV(3,2) = -A(1,1)*A(3,2) + A(3,1)*A(1,2) + AINV(1,3) = A(1,2)*A(2,3) - A(2,2)*A(1,3) + AINV(2,3) = -A(1,1)*A(2,3) + A(2,1)*A(1,3) + AINV(3,3) = A(1,1)*A(2,2) - A(2,1)*A(1,2) + DET = A(1,1)*AINV(1,1) + A(1,2)*AINV(2,1) + A(1,3)*AINV(3,1) + + if ( DET .eq. 0.0 ) then + do j=1,3 + write(*,*) (A(i,j),i=1,3) + end do + stop 'SINGULAR 3X3 MATRIX in matrix_inverse_3x3' + else + AINV = AINV/DET + endif + +end subroutine matrix_inverse_3x3 ! !---------------------------------------------------------------- ! @@ -156,8 +197,49 @@ subroutine vector_g2r(tlon, tlat, lon, lat, flag_coord) tlon=-sin(rlon)*txr + cos(rlon)*tyr end subroutine vector_g2r -! -!---------------------------------------------------------------------------- -! + ! + !---------------------------------------------------------------------------- + ! + subroutine vector_r2g(tlon, tlat, lon, lat, flag_coord) + ! Transform a 2d vector with components (tlon, tlat) in + ! rotate 2d vector (tlon, tlat) to be in geo. coordinates + ! tlon, tlat (in) :: lon & lat components of a vector in rotated coordinates + ! (out) :: lon & lat components of the vector in geo. coordinates + ! lon, lat :: [radian] coordinates + ! flag_coord :: 1, (lon,lat) is the geographical coord.; else, rotated coord. + ! + implicit none + integer, intent(in) :: flag_coord + real(kind=WP), intent(inout) :: tlon, tlat + real(kind=WP), intent(in) :: lon, lat + real(kind=WP) :: rlon, rlat, glon, glat + real(kind=WP) :: txg, tyg, tzg, txr, tyr, tzr + ! + ! geographical coordinate + if(flag_coord==1) then ! input is in geographical coordinates + glon=lon + glat=lat + call g2r(glon,glat,rlon,rlat) + else ! input is in rotated coordinates + rlon=lon + rlat=lat + call r2g(glon,glat,rlon,rlat) + end if + ! + ! vector rotated Cartesian + txg=-tlat*sin(rlat)*cos(rlon)-tlon*sin(rlon) + tyg=-tlat*sin(rlat)*sin(rlon)+tlon*cos(rlon) + tzg=tlat*cos(rlat) + ! + ! vector in geo Cartesian + txr=r2g_matrix_inv(1,1)*txg + r2g_matrix_inv(1,2)*tyg + r2g_matrix_inv(1,3)*tzg + tyr=r2g_matrix_inv(2,1)*txg + r2g_matrix_inv(2,2)*tyg + r2g_matrix_inv(2,3)*tzg + tzr=r2g_matrix_inv(3,1)*txg + r2g_matrix_inv(3,2)*tyg + r2g_matrix_inv(3,3)*tzg + ! + ! vector in geo coordinate + tlat=-sin(glat)*cos(glon)*txr - sin(glat)*sin(glon)*tyr + cos(glat)*tzr + tlon=-sin(glon)*txr + cos(glon)*tyr + + end subroutine vector_r2g end module g_rotate_grid From b0b06451c428032e5a036a21a1cca3c87904ee33 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Tue, 12 Jun 2018 20:21:00 +0100 Subject: [PATCH 030/909] Surface currents are rotated before being passed to IFS. Magnitude stays identical, but direction is corrected. --- src/ifs_interface.F90 | 51 ++++++++++++++++++++++++++++++++----------- 1 file changed, 38 insertions(+), 13 deletions(-) diff --git a/src/ifs_interface.F90 b/src/ifs_interface.F90 index d20c00a7a..6c62d4e7e 100644 --- a/src/ifs_interface.F90 +++ b/src/ifs_interface.F90 @@ -3,7 +3,7 @@ ! IFS interface for calling FESOM2 as a subroutine. ! ! -Original code for NEMO by Kristian Mogensen, ECMWF. -! -Adapted to FESOM2 by Thomas Rackow, AWI. +! -Adapted to FESOM2 by Thomas Rackow, AWI, 2018. !----------------------------------------------------- SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & @@ -307,6 +307,8 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & USE i_arrays, ONLY : m_ice, a_ice, m_snow USE i_therm_param, ONLY : tmelt USE g_PARSUP, only: myDim_nod2D, myDim_elem2D + USE o_MESH, only: elem2D_nodes, coord_nod2D + USE g_rotate_grid, only: vector_r2g USE parinter USE scripremap USE interinfo @@ -325,10 +327,12 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & ! Local variables REAL(wp), DIMENSION(myDim_nod2D) :: zsend - REAL(wp), DIMENSION(myDim_elem2D) :: zsendUV + REAL(wp), DIMENSION(myDim_elem2D) :: zsendU, zsendV + INTEGER :: elnodes(3) + REAL(wp) :: rlon, rlat ! Loop variables - INTEGER :: n + INTEGER :: n, elem ! =================================================================== ! @@ -395,19 +399,29 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & ! =================================================================== ! ! Surface currents need to be rotated to geographical grid - ! Pack u surface current and interpolate: 'pgucur' on Gaussian grid. - zsendUV(:)=UV(1,1,1:myDim_elem2D) !UV includes eDim, leave away + ! Pack u(v) surface currents + zsendU(:)=UV(1,1,1:myDim_elem2D) + zsendV(:)=UV(2,1,1:myDim_elem2D) !UV includes eDim, leave those away here - CALL parinter_fld( mype, npes, icomm, UVtogauss, & - & myDim_elem2D, zsendUV, & - & nopoints, pgucur ) + do elem=1, myDim_elem2D + + ! compute element midpoints + elnodes=elem2D_nodes(:,elem) + rlon=sum(coord_nod2D(1,elnodes))/3.0_WP + rlat=sum(coord_nod2D(2,elnodes))/3.0_WP + + ! Rotate vectors to geographical coordinates (r2g) + call vector_r2g(zsendU(elem), zsendV(elem), rlon, rlat, 0) ! 0-flag for rot. coord + end do - ! Pack v surface current and interpolate: 'pgvcur' on Gaussian grid. - zsendUV(:)=UV(2,1,1:myDim_elem2D) !UV includes eDim, leave away + ! Interpolate: 'pgucur' and 'pgvcur' on Gaussian grid. + CALL parinter_fld( mype, npes, icomm, UVtogauss, & + & myDim_elem2D, zsendU, & + & nopoints, pgucur ) CALL parinter_fld( mype, npes, icomm, UVtogauss, & - & myDim_elem2D, zsendUV, & + & myDim_elem2D, zsendV, & & nopoints, pgvcur ) #ifndef FESOM_TODO @@ -468,11 +482,15 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & ! Update fluxes in nemogcmcoup_data by parallel ! interpolation of the input gaussian grid data - USE par_kind + USE par_kind !in ifs_modules.F90 + USE g_PARSUP, only: myDim_nod2D, myDim_elem2D + USE o_MESH, only: elem2D_nodes, coord_nod2D + USE g_rotate_grid, only: vector_r2g IMPLICIT NONE - ! Arguments + ! =================================================================== ! + ! Arguments ========================================================= ! ! MPI communications INTEGER, INTENT(IN) :: mype,npes,icomm @@ -495,6 +513,13 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & ! Local variables + ! Packed receive buffer + REAL(wp), DIMENSION(myDim_nod2D) :: zrecv + REAL(wp), DIMENSION(myDim_elem2D):: zrecvU, zrecvV + + + ! =================================================================== ! + #ifdef FESOM_TODO ! Packed receive buffer From 44ff4f73b01008c5a7011e551e34703032e9e73b Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Wed, 13 Jun 2018 16:45:17 +0100 Subject: [PATCH 031/909] Added abs() statement in oce_fer_GM whenever sqrt(max(...,0)) is called. -0 is possible, crashing the sqrt() with GNU compiler. --- src/oce_fer_gm.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/oce_fer_gm.F90 b/src/oce_fer_gm.F90 index a847f1820..825ac2453 100644 --- a/src/oce_fer_gm.F90 +++ b/src/oce_fer_gm.F90 @@ -158,7 +158,7 @@ subroutine fer_compute_C_K nzmax=minval(nlevels(nod_in_elem2D(1:nod_in_elem2D_num(n), n)), 1) reso=mesh_resolution(n) DO nz=1, nzmax-1 - c1=c1+hnode_new(nz,n)*(sqrt(max(bvfreq(nz,n), 0._WP))+sqrt(max(bvfreq(nz+1,n), 0._WP)))/2. + c1=c1+hnode_new(nz,n)*(sqrt(abs(max(bvfreq(nz,n), 0._WP)))+sqrt(abs(max(bvfreq(nz+1,n), 0._WP))))/2. END DO c1=max(c_min, c1/pi) !ca. first baroclinic gravity wave speed limited from below by c_min scaling=1._WP From 0cd93a60aae1ff0900f2b995e3f355bbac420b3b Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Wed, 13 Jun 2018 16:51:40 +0100 Subject: [PATCH 032/909] Add -O3 option to the compile flags. --- src/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 0a07c08b7..21b530d32 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -56,7 +56,7 @@ if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Intel ) target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fast-transcendentals -xHost -ip) # target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fast-transcendentals -xHost -ip -g -traceback -check all,noarg_temp_created,bounds) elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU ) - target_compile_options(${PROJECT_NAME} PRIVATE -fdefault-real-8 -ffree-line-length-none) + target_compile_options(${PROJECT_NAME} PRIVATE -fdefault-real-8 -ffree-line-length-none -g -O3) # target_compile_options(${PROJECT_NAME} PRIVATE -fdefault-real-8 -ffree-line-length-none -mtune=native -march=native -g -Wall -c -O3 -fdefault-double-8 -fcray-pointer -fbacktrace -fconvert=big-endian -fno-range-check) endif() target_include_directories(${PROJECT_NAME} PRIVATE ${NETCDF_Fortran_INCLUDE_DIRECTORIES} ${OASIS_Fortran_INCLUDE_DIRECTORIES}) From 024d4d83452cfc75a3eb2b5fb37d46bc4a69e04d Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Wed, 13 Jun 2018 17:00:48 +0100 Subject: [PATCH 033/909] Added zero fluxes in _update routine, the coupled model runs a day with it. Added hardcoded integer for the number of FESOM substeps, should be in namelist in the future. --- src/ifs_interface.F90 | 93 ++++++++++++++++++++++++++++++++++--------- src/ifs_notused.F90 | 4 +- 2 files changed, 77 insertions(+), 20 deletions(-) diff --git a/src/ifs_interface.F90 b/src/ifs_interface.F90 index 6c62d4e7e..44e1c9b57 100644 --- a/src/ifs_interface.F90 +++ b/src/ifs_interface.F90 @@ -33,7 +33,12 @@ SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & INTEGER :: iatmunit ! Write to this unit LOGICAL :: lwrite + ! FESOM might perform substeps + INTEGER :: itend_fesom + INTEGER :: substeps !per IFS timestep + ! TODO hard-coded here, put in namelist + substeps=2 WRITE(0,*)'!======================================' WRITE(0,*)'! FESOM is initialized from within IFS.' @@ -42,7 +47,8 @@ SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & MPI_COMM_FESOM=icomm itini = 1 - CALL main_initialize(itend) !also sets mype and npes + CALL main_initialize(itend_fesom) !also sets mype and npes + itend=itend_fesom/substeps WRITE(0,*)'! main_initialize done. ===============' ! Set more information for the caller @@ -52,9 +58,9 @@ SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & initime = 0 WRITE(0,*)'! FESOM initial date is ', inidate ,' ======' - ! fesom timestep - zstp = dt - WRITE(0,*)'! FESOM timestep is ', real(zstp,4), 'sec' + ! fesom timestep (as seen by IFS) + zstp = real(substeps,wp)*dt + WRITE(0,*)'! FESOM timestep as seen by IFS is ', real(zstp,4), 'sec (',substeps,'xdt)' WRITE(0,*)'!======================================' END SUBROUTINE nemogcmcoup_init @@ -476,16 +482,23 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & & taux_oce, tauy_oce, taux_ice, tauy_ice, & & qs___oce, qs___ice, qns__oce, qns__ice, dqdt_ice, & & evap_tot, evap_ice, prcp_liq, prcp_sol, & - & runoff, ocerunoff, tcc, lcc, tice_atm, & + & runoffIN, ocerunoff, tcc, lcc, tice_atm, & & kt, ldebug, loceicemix, lqnsicefilt ) ! Update fluxes in nemogcmcoup_data by parallel ! interpolation of the input gaussian grid data USE par_kind !in ifs_modules.F90 - USE g_PARSUP, only: myDim_nod2D, myDim_elem2D - USE o_MESH, only: elem2D_nodes, coord_nod2D - USE g_rotate_grid, only: vector_r2g + USE g_PARSUP, only: myDim_nod2D, myDim_elem2D, par_ex + USE o_MESH, only: elem2D_nodes, coord_nod2D + USE g_rotate_grid, only: vector_r2g + USE g_forcing_arrays, only: shortwave, longwave, prec_rain, prec_snow, runoff, evaporation, evap_no_ifrac, sublimation + USE i_ARRAYS, only: stress_atmice_x, stress_atmice_y, stress_atmoce_x, stress_atmoce_y, oce_heat_flux, ice_heat_flux + + ! all needed? + USE parinter + USE scripremap + USE interinfo IMPLICIT NONE @@ -500,7 +513,7 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & & taux_oce, tauy_oce, taux_ice, tauy_ice, & & qs___oce, qs___ice, qns__oce, qns__ice, & & dqdt_ice, evap_tot, evap_ice, prcp_liq, prcp_sol, & - & runoff, ocerunoff, tcc, lcc, tice_atm + & runoffIN, ocerunoff, tcc, lcc, tice_atm ! Current time step INTEGER, INTENT(in) :: kt @@ -519,6 +532,41 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & ! =================================================================== ! + ! Sort out incoming arrays from the IFS and put them on the ocean grid + + !1. Interpolate ocean solar radiation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qs___oce, & + & myDim_nod2D, zrecv ) + + if (mype==0) write(*,*) 'parinter_fld worked...' + ! Unpack ocean solar radiation + ! shortwave(:)=zrecv eDIM missing + shortwave(:)=0. + + + ! TODO + longwave(:)=0. + prec_rain(:)=0. + prec_snow(:)=0. + runoff(:)=0. + evaporation(:)=0. + if (mype==0) write(*,*) 'First group worked...' + + stress_atmice_x=0. + stress_atmice_y=0. ! push ice to the north + stress_atmoce_x=0. ! push ocean surface waters to the East + stress_atmoce_y=0. + if (mype==0) write(*,*) 'Second group worked...' + + ice_heat_flux=0. + oce_heat_flux=0. + if (mype==0) write(*,*) 'Third group worked...' + + evap_no_ifrac=0. + sublimation=0. + if (mype==0) write(*,*) 'Fourth group worked...' + !thdgr, thdgrsn, flice #ifdef FESOM_TODO @@ -1143,8 +1191,8 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & #else - WRITE(0,*)'nemogcmcoup_lim2_update not done for FESOM yet' - CALL abort + WRITE(0,*)'nemogcmcoup_lim2_update partially implemented. Proceeding...' + !CALL par_ex #endif @@ -1153,6 +1201,7 @@ END SUBROUTINE nemogcmcoup_lim2_update SUBROUTINE nemogcmcoup_step( istp, icdate, ictime ) + USE g_clock, only: yearnew, month, day_in_month IMPLICIT NONE ! Arguments @@ -1164,12 +1213,20 @@ SUBROUTINE nemogcmcoup_step( istp, icdate, ictime ) INTEGER, INTENT(OUT) :: icdate, ictime ! Local variables - - ! Advance the FESOM model 1 time step + INTEGER :: substeps + + ! Advance the FESOM model 2 time steps here, still hard-coded + substeps=2 + + WRITE(0,*)'! IFS at timestep ', istp, '. Do ', substeps , 'FESOM timesteps...' + CALL main_timestepping(substeps) + + ! Compute date and time at the end of the time step - WRITE(0,*)'Insert FESOM step here.' + icdate = yearnew*10000 + month*100 + day_in_month ! e.g. 20170906 + ictime = 0 ! (time is not used) - ! Compute date and time at the end of the time step. + WRITE(0,*)'! FESOM date at end of timestep is ', icdate ,' ======' #ifdef FESOM_TODO iye = ndastp / 10000 @@ -1187,12 +1244,12 @@ END SUBROUTINE nemogcmcoup_step SUBROUTINE nemogcmcoup_final - ! Finalize the NEMO model + ! Finalize the FESOM model IMPLICIT NONE - WRITE(*,*)'Insert call to finalization of FESOM' - CALL abort + WRITE(*,*)'Finalization of FESOM from IFS.' + CALL main_finalize END SUBROUTINE nemogcmcoup_final #endif diff --git a/src/ifs_notused.F90 b/src/ifs_notused.F90 index ea1779327..617e1fb86 100644 --- a/src/ifs_notused.F90 +++ b/src/ifs_notused.F90 @@ -226,8 +226,8 @@ SUBROUTINE nemogcmcoup_update_add( mype, npes, icomm, & ! Local variables - WRITE(0,*)'nemogcmcoup_update_add should not be called when coupling to fesom.' - CALL abort + WRITE(0,*)'nemogcmcoup_update_add should not be called when coupling to fesom. Commented ABORT. Proceeding...' + !CALL abort END SUBROUTINE nemogcmcoup_update_add From 3691e8377068f32d62166812b9bf33759170e600 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Wed, 13 Jun 2018 17:06:03 +0100 Subject: [PATCH 034/909] Added __ifsinterface precompiler flags, such that ice_alb, ice_temp, (tmp_)oce/ice_heat_flux are available as well as allocated. --- src/ice_modules.F90 | 4 ++-- src/ice_setup_step.F90 | 8 ++++---- src/ice_thermo_cpl.F90 | 2 +- src/ice_thermo_oce.F90 | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index d0859d29a..562ba2d5d 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -68,13 +68,13 @@ MODULE i_ARRAYS REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: sigma11, sigma12, sigma22 REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: fresh_wa_flux REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: net_heat_flux -#if defined (__oasis) +#if defined (__oasis) || defined (__ifsinterface) real(kind=8),target, allocatable, dimension(:) :: ice_alb, ice_temp ! new fields for OIFS coupling real(kind=8),target, allocatable, dimension(:) :: oce_heat_flux, ice_heat_flux real(kind=8),target, allocatable, dimension(:) :: tmp_oce_heat_flux, tmp_ice_heat_flux !temporary flux fields !(for flux correction) -#endif /* (__oasis) */ +#endif /* (__oasis) || (__ifsinterface) */ REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: S_oc_array, T_oc_array REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_iceoce_x diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index e1bcc9947..105ca7d64 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -112,19 +112,19 @@ subroutine ice_array_setup allocate(elevation(n_size)) ! =ssh of ocean allocate(stress_iceoce_x(n_size), stress_iceoce_y(n_size)) allocate(U_w(n_size), V_w(n_size)) ! =uf and vf of ocean at surface nodes -#if defined (__oasis) +#if defined (__oasis) || defined (__ifsinterface) allocate(oce_heat_flux(n_size), ice_heat_flux(n_size)) allocate(tmp_oce_heat_flux(n_size), tmp_ice_heat_flux(n_size)) -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) allocate(ice_alb(n_size), ice_temp(n_size)) ice_alb=0. ice_temp=0. -#endif /* (__oifs) */ +#endif /* (__oifs) || (__ifsinterface) */ oce_heat_flux=0. ice_heat_flux=0. tmp_oce_heat_flux=0. tmp_ice_heat_flux=0. -#endif /* (__oasis) */ +#endif /* (__oasis) || (__ifsinterface)*/ end subroutine ice_array_setup !========================================================================== subroutine ice_timestep(step) diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index 32311da75..b465f33a7 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -1,4 +1,4 @@ -#if defined (__oasis) +#if defined (__oasis) || defined (__ifsinterface) subroutine thermodynamics !=================================================================== diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index 9f10397eb..659a159fe 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -17,7 +17,7 @@ subroutine cut_off() end where end subroutine cut_off -#if !defined (__oasis) +#if !defined (__oasis) && !defined (__ifsinterface) !=================================================================== ! Sea-ice thermodynamics routines ! From a0e1b394bb9144387198719af0ed4ed3bc548fd1 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Wed, 13 Jun 2018 17:09:00 +0100 Subject: [PATCH 035/909] Added __ifsinterface precompiler flags, such that sublimation and evap_no_ifrac are available as well as allocated. --- src/gen_forcing_init.F90 | 8 +++++--- src/gen_modules_forcing.F90 | 4 +++- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/src/gen_forcing_init.F90 b/src/gen_forcing_init.F90 index ac45c12a7..dec59aff4 100755 --- a/src/gen_forcing_init.F90 +++ b/src/gen_forcing_init.F90 @@ -45,15 +45,17 @@ subroutine forcing_array_setup allocate(Tair(n2), shum(n2)) allocate(runoff(n2), evaporation(n2)) -#if defined (__oasis) +#if defined (__oasis) || defined (__ifsinterface) allocate(sublimation(n2), evap_no_ifrac(n2)) + sublimation=0. + evap_no_ifrac=0. +#endif +#if defined (__oasis) allocate(tmp_sublimation(n2),tmp_evap_no_ifrac(n2), tmp_shortwave(n2)) allocate(atm_net_fluxes_north(nrecv), atm_net_fluxes_south(nrecv)) allocate(oce_net_fluxes_north(nrecv), oce_net_fluxes_south(nrecv)) allocate(flux_correction_north(nrecv), flux_correction_south(nrecv)) allocate(flux_correction_total(nrecv)) - sublimation=0. - evap_no_ifrac=0. tmp_sublimation = 0. tmp_evap_no_ifrac = 0. tmp_shortwave = 0. diff --git a/src/gen_modules_forcing.F90 b/src/gen_modules_forcing.F90 index 459249d8a..7a11f5929 100755 --- a/src/gen_modules_forcing.F90 +++ b/src/gen_modules_forcing.F90 @@ -70,8 +70,10 @@ module g_forcing_arrays real(kind=WP), allocatable, dimension(:) :: runoff, evaporation real(kind=WP), allocatable, dimension(:) :: cloudiness, Pair -#if defined (__oasis) +#if defined (__oasis) || defined (__ifsinterface) real(kind=8), target, allocatable, dimension(:) :: sublimation, evap_no_ifrac +#endif +#if defined (__oasis) real(kind=8), target, allocatable, dimension(:) :: tmp_sublimation, tmp_evap_no_ifrac !temporary flux fields real(kind=8), target, allocatable, dimension(:) :: tmp_shortwave !(for flux correction) real(kind=8), allocatable, dimension(:) :: atm_net_fluxes_north, atm_net_fluxes_south From 63e1e233bb1bdc184c3500835720d83ba37d8976 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Wed, 13 Jun 2018 17:27:54 +0100 Subject: [PATCH 036/909] Add environment files (shell) for gnu and cray compiler on the ECMWF supercomputer. --- env/ecaccess.ecmwf.int/shell.cray | 9 +++++++++ env/ecaccess.ecmwf.int/shell.gnu | 7 +++++++ 2 files changed, 16 insertions(+) create mode 100644 env/ecaccess.ecmwf.int/shell.cray create mode 100644 env/ecaccess.ecmwf.int/shell.gnu diff --git a/env/ecaccess.ecmwf.int/shell.cray b/env/ecaccess.ecmwf.int/shell.cray new file mode 100644 index 000000000..0ddf47b0c --- /dev/null +++ b/env/ecaccess.ecmwf.int/shell.cray @@ -0,0 +1,9 @@ +export PATH=/home/rd/natr/cmake-3.11.2-Linux-x86_64/bin:$PATH + +module unload cray-hdf5 +module load cray-netcdf +module load cray-hdf5 + +#export CRAYPE_LINK_TYPE=dynamic + +export FC=ftn CC=cc CXX=CC diff --git a/env/ecaccess.ecmwf.int/shell.gnu b/env/ecaccess.ecmwf.int/shell.gnu new file mode 100644 index 000000000..68222570f --- /dev/null +++ b/env/ecaccess.ecmwf.int/shell.gnu @@ -0,0 +1,7 @@ +export PATH=/home/rd/natr/cmake-3.11.2-Linux-x86_64/bin:$PATH + +prgenvswitchto gnu + +module load netcdf + +export FC=ftn CC=cc CXX=CC # ftn is here wrapper for Fortran, cc and CC similarly From e2948476d5b6cc2d882e21ddfd860cfb725a030f Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Wed, 13 Jun 2018 22:39:02 +0100 Subject: [PATCH 037/909] Added all fluxes and runsgit staged --- src/ifs_interface.F90 | 242 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 209 insertions(+), 33 deletions(-) diff --git a/src/ifs_interface.F90 b/src/ifs_interface.F90 index 44e1c9b57..a09c5530b 100644 --- a/src/ifs_interface.F90 +++ b/src/ifs_interface.F90 @@ -489,12 +489,14 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & ! interpolation of the input gaussian grid data USE par_kind !in ifs_modules.F90 - USE g_PARSUP, only: myDim_nod2D, myDim_elem2D, par_ex - USE o_MESH, only: elem2D_nodes, coord_nod2D - USE g_rotate_grid, only: vector_r2g - USE g_forcing_arrays, only: shortwave, longwave, prec_rain, prec_snow, runoff, evaporation, evap_no_ifrac, sublimation + USE g_PARSUP, only: myDim_nod2D, myDim_elem2D, par_ex, eDim_nod2D + USE o_MESH, only: coord_nod2D !elem2D_nodes + USE g_rotate_grid, only: vector_r2g, vector_g2r + USE g_forcing_arrays, only: shortwave, prec_rain, prec_snow, runoff, & + & evap_no_ifrac, sublimation !'longwave' only stand-alone, 'evaporation' filled later USE i_ARRAYS, only: stress_atmice_x, stress_atmice_y, stress_atmoce_x, stress_atmoce_y, oce_heat_flux, ice_heat_flux - + USE g_comm_auto ! exchange_nod does the halo exchange + ! all needed? USE parinter USE scripremap @@ -525,48 +527,221 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & LOGICAL, INTENT(IN) :: lqnsicefilt ! Local variables + INTEGER :: n + REAL(wp), parameter :: rhofwt = 1000. ! density of freshwater + ! Packed receive buffer REAL(wp), DIMENSION(myDim_nod2D) :: zrecv REAL(wp), DIMENSION(myDim_elem2D):: zrecvU, zrecvV + ! =================================================================== ! ! Sort out incoming arrays from the IFS and put them on the ocean grid - + + ! TODO + shortwave(:)=0. ! Done, updated below. What to do with shortwave over ice?? + !longwave(:)=0. ! Done. Only used in stand-alone mode. + prec_rain(:)=0. ! Done, updated below. + prec_snow(:)=0. ! Done, updated below. + evap_no_ifrac=0. ! Done, updated below. This is evap over ocean, does this correspond to evap_tot? + sublimation=0. ! Done, updated below. + ! + ice_heat_flux=0. ! Done. This is qns__ice currently. Is this the non-solar heat flux? ! non solar heat fluxes below ! (qns) + oce_heat_flux=0. ! Done. This is qns__oce currently. Is this the non-solar heat flux? + ! + runoff(:)=0. ! not used apparently. What is runoffIN, ocerunoff? + !evaporation(:)=0. + !ice_thermo_cpl.F90: !---- total evaporation (needed in oce_salt_balance.F90) + !ice_thermo_cpl.F90: evaporation = evap_no_ifrac*(1.-a_ice) + sublimation*a_ice + stress_atmice_x=0. ! Done, taux_ice + stress_atmice_y=0. ! Done, tauy_ice + stress_atmoce_x=0. ! Done, taux_oce + stress_atmoce_y=0. ! Done, tauy_oce + + + ! =================================================================== ! !1. Interpolate ocean solar radiation to T grid CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qs___oce, & & myDim_nod2D, zrecv ) - if (mype==0) write(*,*) 'parinter_fld worked...' - ! Unpack ocean solar radiation - ! shortwave(:)=zrecv eDIM missing - shortwave(:)=0. + ! Unpack ocean solar radiation, without halo + shortwave(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + ! Do the halo exchange + call exchange_nod(shortwave) + + + ! =================================================================== ! + !2. Interpolate ice solar radiation to T grid + ! DO NOTHING + + + ! =================================================================== ! + !3. Interpolate ocean non-solar radiation to T grid (is this non-solar heat flux?) + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qns__oce, & + & myDim_nod2D, zrecv ) + + ! Unpack ocean non-solar, without halo + oce_heat_flux(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + + ! Do the halo exchange + call exchange_nod(oce_heat_flux) + + + ! =================================================================== ! + !4. Interpolate non-solar radiation over ice to T grid (is this non-solar heat flux?) + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qns__ice, & + & myDim_nod2D, zrecv ) + + ! Unpack ice non-solar + ice_heat_flux(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + + ! Do the halo exchange + call exchange_nod(ice_heat_flux) + + + ! =================================================================== ! + !5. D(q)/dT to T grid + ! DO NOTHING + + + ! =================================================================== ! + !6. Interpolate total evaporation to T grid + ! =================================================================== ! + !ice_thermo_cpl.F90: total evaporation (needed in oce_salt_balance.F90) + !ice_thermo_cpl.F90: evaporation = evap_no_ifrac*(1.-a_ice) + sublimation*a_ice + ! =================================================================== ! + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, evap_tot, & + & myDim_nod2D, zrecv ) + + ! Unpack total evaporation, without halo + evap_no_ifrac(1:myDim_nod2D)=zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s + + ! Do the halo exchange + call exchange_nod(evap_no_ifrac) + + !7. Interpolate sublimation (evaporation over ice) to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, evap_ice, & + & myDim_nod2D, zrecv ) + + ! Unpack sublimation (evaporation over ice), without halo + sublimation(1:myDim_nod2D)=zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s + + ! Do the halo exchange + call exchange_nod(sublimation) + ! =================================================================== ! + ! =================================================================== ! + + + ! =================================================================== ! + !8. Interpolate liquid precipitation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, prcp_liq, & + & myDim_nod2D, zrecv ) + + ! Unpack liquid precipitation, without halo + prec_rain(1:myDim_nod2D)=zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s + + ! Do the halo exchange + call exchange_nod(prec_rain) + + + ! =================================================================== ! + !9. Interpolate solid precipitation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, prcp_sol, & + & myDim_nod2D, zrecv ) + + ! Unpack solid precipitation, without halo + prec_snow(1:myDim_nod2D)=zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s + + ! Do the halo exchange + call exchange_nod(prec_snow) + + + ! =================================================================== ! + !10. Interpolate runoff to T grid + ! + !CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, runoff, & + ! & myDim_nod2D, zrecv ) + ! + ! Unpack runoff, without halo + !runoff(1:myDim_nod2D)=zrecv(1:myDim_nod2D) !conversion?? + ! + ! Do the halo exchange + !call exchange_nod(runoff) + ! + !11. Interpolate ocean runoff to T grid + ! + !CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, ocerunoff, & + ! & myDim_nod2D, zrecv ) + ! + ! Unpack ocean runoff + ! ?? + + !12. Interpolate total cloud fractions to T grid (tcc) + ! + !13. Interpolate low cloud fractions to T grid (lcc) + + + ! =================================================================== ! + ! STRESSES + + ! OVER OCEAN: + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, taux_oce, & + & myDim_nod2D, zrecv ) + + ! Unpack x stress atm->oce, without halo; then do halo exchange + stress_atmoce_x(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + call exchange_nod(stress_atmoce_x) + + ! + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tauy_oce, & + & myDim_nod2D, zrecv ) + + ! Unpack y stress atm->oce, without halo; then do halo exchange + stress_atmoce_y(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + call exchange_nod(stress_atmoce_y) + + ! =================================================================== ! + ! OVER ICE: + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, taux_ice, & + & myDim_nod2D, zrecv ) + + ! Unpack x stress atm->ice, without halo; then do halo exchange + stress_atmice_x(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + call exchange_nod(stress_atmice_x) + + ! + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tauy_ice, & + & myDim_nod2D, zrecv ) + + ! Unpack y stress atm->ice, without halo; then do halo exchange + stress_atmice_y(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + call exchange_nod(stress_atmice_y) + + + ! =================================================================== ! + ! ROTATE VECTORS FROM GEOGRAPHIC TO FESOMS ROTATED GRID + + !if ((do_rotate_oce_wind .AND. do_rotate_ice_wind) .AND. rotated_grid) then + do n=1, myDim_nod2D+eDim_nod2D + call vector_g2r(stress_atmoce_x(n), stress_atmoce_y(n), coord_nod2D(1, n), coord_nod2D(2, n), 0) !0-flag for rot. coord. + call vector_g2r(stress_atmice_x(n), stress_atmice_y(n), coord_nod2D(1, n), coord_nod2D(2, n), 0) + end do + !do_rotate_oce_wind=.false. + !do_rotate_ice_wind=.false. + !end if - ! TODO - longwave(:)=0. - prec_rain(:)=0. - prec_snow(:)=0. - runoff(:)=0. - evaporation(:)=0. - if (mype==0) write(*,*) 'First group worked...' - - stress_atmice_x=0. - stress_atmice_y=0. ! push ice to the north - stress_atmoce_x=0. ! push ocean surface waters to the East - stress_atmoce_y=0. - if (mype==0) write(*,*) 'Second group worked...' - - ice_heat_flux=0. - oce_heat_flux=0. - if (mype==0) write(*,*) 'Third group worked...' - - evap_no_ifrac=0. - sublimation=0. - if (mype==0) write(*,*) 'Fourth group worked...' - !thdgr, thdgrsn, flice #ifdef FESOM_TODO @@ -1191,7 +1366,8 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & #else - WRITE(0,*)'nemogcmcoup_lim2_update partially implemented. Proceeding...' + !FESOM part + !WRITE(0,*)'nemogcmcoup_lim2_update partially implemented. Proceeding...' !CALL par_ex #endif From 4bf8bd835476a137b2ff892465ad5f3b6c8e59da Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Thu, 14 Jun 2018 12:17:00 +0100 Subject: [PATCH 038/909] We have a running coupled model! No par_ex in FESOM at the end of the run for clean finalization from within IFS. --- src/fvom_main.F90 | 20 +++++++++++++++++--- src/io_meandata.F90 | 38 ++++++++++++++++++++------------------ 2 files changed, 37 insertions(+), 21 deletions(-) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index 6ebdf5f08..d0d524054 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -169,7 +169,7 @@ subroutine main_timestepping(nsteps) !___MODEL TIME STEPPING LOOP________________________________________________ do n=1, nsteps - + if (mype==0) write(*,*) 'do n=', n, ' to ', nsteps mstep = n if (mod(n,logfile_outfreq)==0 .and. mype==0) then write(*,*) 'FESOM =======================================================' @@ -181,16 +181,21 @@ subroutine main_timestepping(nsteps) seconds_til_now=INT(dt)*(n-1) #endif call clock + if (mype==0) write(*,*) 'called clock' call forcing_index + if (mype==0) write(*,*) 'called forcing_index' call compute_vel_nodes - + if (mype==0) write(*,*) 'called compute_vel_nodes' ! eta_n=alpha*hbar+(1.0_WP-alpha)*hbar_old !PS !___model sea-ice step__________________________________________________ if(use_ice) then call ocean2ice + if (mype==0) write(*,*) 'called ocean2ice' +#ifndef __ifsinterface call update_atm_forcing(n) - + if (mype==0) write(*,*) 'called update_atm_forcing' +#endif if (ice_steps_since_upd>=ice_ave_steps-1) then ice_update=.true. ice_steps_since_upd = 0 @@ -200,19 +205,26 @@ subroutine main_timestepping(nsteps) endif if (ice_update) call ice_timestep(n) + if (ice_update .and. mype==0) write(*,*) 'called ice_timestep' call oce_fluxes_mom ! momentum only + if (mype==0) write(*,*) 'called oce_fluxes_mom' call oce_fluxes + if (mype==0) write(*,*) 'called oce_fluxes' end if !___model ocean step____________________________________________________ call oce_timestep_ale(n) + if (mype==0) write(*,*) 'called oce_timestep_ale' call compute_diagnostics(1) + if (mype==0) write(*,*) 'called compute_diagnostics' !___prepare output______________________________________________________ call output (n) + if (mype==0) write(*,*) 'called output' call restart(n, .false., .false.) + if (mype==0) write(*,*) 'called restart' end do end subroutine main_timestepping @@ -235,6 +247,8 @@ subroutine main_finalize !___FINISH MODEL RUN________________________________________________________ if (mype==0) write(*,*) 'FESOM run is finished, updating clock' call clock_finish +#if !defined (__ifsinterface) call par_ex +#endif end subroutine main_finalize diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 72871edcb..92275f8f1 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -79,12 +79,12 @@ subroutine ini_mean_io !2D call def_stream(nod2D, myDim_nod2D, 'ssh', 'sea surface elevation', 'm', eta_n, 1, 'd') !DS for frontier run - call def_stream(nod2D, myDim_nod2D, 't100', 'temperature at 100m', 'C', tr_arr(12,1:myDim_nod2D,1), 1, 'd') - call def_stream(elem2D, myDim_elem2D, 'u100', 'horizontal velocity at 100m', 'm/s', uv(1,12,1:myDim_elem2D), 1, 'd') - call def_stream(elem2D, myDim_elem2D, 'v100', 'meridional velocity at 100m', 'm/s', uv(2,12,1:myDim_elem2D), 1, 'd') - call def_stream(nod2D, myDim_nod2D, 't30', 'temperature at 30m', 'C', tr_arr(5,1:myDim_nod2D,1), 1, 'd') - call def_stream(elem2D, myDim_elem2D, 'u30', 'horizontal velocity at 30m', 'm/s', uv(1,5,1:myDim_elem2D), 1, 'd') - call def_stream(elem2D, myDim_elem2D, 'v30', 'meridional velocity at 30m', 'm/s', uv(2,5,1:myDim_elem2D), 1, 'd') + !call def_stream(nod2D, myDim_nod2D, 't100', 'temperature at 100m', 'C', tr_arr(12,1:myDim_nod2D,1), 1, 'd') + !call def_stream(elem2D, myDim_elem2D, 'u100', 'horizontal velocity at 100m', 'm/s', uv(1,12,1:myDim_elem2D), 1, 'd') + !call def_stream(elem2D, myDim_elem2D, 'v100', 'meridional velocity at 100m', 'm/s', uv(2,12,1:myDim_elem2D), 1, 'd') + !call def_stream(nod2D, myDim_nod2D, 't30', 'temperature at 30m', 'C', tr_arr(5,1:myDim_nod2D,1), 1, 'd') + !call def_stream(elem2D, myDim_elem2D, 'u30', 'horizontal velocity at 30m', 'm/s', uv(1,5,1:myDim_elem2D), 1, 'd') + !call def_stream(elem2D, myDim_elem2D, 'v30', 'meridional velocity at 30m', 'm/s', uv(2,5,1:myDim_elem2D), 1, 'd') !DS call def_stream(nod2D, myDim_nod2D, 'sst', 'sea surface temperature', 'C', tr_arr(1,1:myDim_nod2D,1), 1, 'd') @@ -92,24 +92,26 @@ subroutine ini_mean_io call def_stream(nod2D, myDim_nod2D, 'vve', 'vertical velocity', 'm/s', Wvel(5,:), 1, 'm') call def_stream(nod2D, myDim_nod2D, 'uice', 'ice velocity x', 'm/s', u_ice, 1, 'm') call def_stream(nod2D, myDim_nod2D, 'vice', 'ice velocity y', 'm/s', v_ice, 1, 'm') - call def_stream(nod2D, myDim_nod2D, 'a_ice', 'ice concentration', '%', a_ice(1:myDim_nod2D), 1, 'm') - call def_stream(nod2D, myDim_nod2D, 'm_ice', 'ice height', 'm', m_ice(1:myDim_nod2D), 1, 'm') + call def_stream(nod2D, myDim_nod2D, 'a_ice', 'ice concentration', '%', a_ice(1:myDim_nod2D), 1, 'd') + call def_stream(nod2D, myDim_nod2D, 'm_ice', 'ice height', 'm', m_ice(1:myDim_nod2D), 1, 'd') call def_stream(nod2D, myDim_nod2D, 'm_snow','snow height', 'm', m_snow(1:myDim_nod2D), 1, 'm') call def_stream(nod2D, myDim_nod2D, 'MLD1', 'Mixed Layer Depth', 'm', MLD1(1:myDim_nod2D), 1, 'm') call def_stream(nod2D, myDim_nod2D, 'MLD2', 'Mixed Layer Depth', 'm', MLD2(1:myDim_nod2D), 1, 'm') - call def_stream(nod2D, myDim_nod2D, 'fh', 'heat flux', 'W', heat_flux(:), 1, 'm') - call def_stream(nod2D, myDim_nod2D, 'fw', 'fresh water flux', 'm/s', water_flux(:), 1, 'm') - call def_stream(nod2D, myDim_nod2D, 'atmice_x', 'stress atmice x', 'N/m2', stress_atmice_x(:), 1, 'm') - call def_stream(nod2D, myDim_nod2D, 'atmice_y', 'stress atmice y', 'N/m2', stress_atmice_y(:), 1, 'm') - call def_stream(nod2D, myDim_nod2D, 'atmoce_x', 'stress atmoce x', 'N/m2', stress_atmoce_x(:), 1, 'm') - call def_stream(nod2D, myDim_nod2D, 'atmoce_y', 'stress atmoce y', 'N/m2', stress_atmoce_y(:), 1, 'm') - call def_stream(nod2D, myDim_nod2D, 'iceoce_x', 'stress iceoce x', 'N/m2', stress_iceoce_x(:), 1, 'm') - call def_stream(nod2D, myDim_nod2D, 'iceoce_y', 'stress iceoce y', 'N/m2', stress_iceoce_y(:), 1, 'm') + call def_stream(nod2D, myDim_nod2D, 'fh', 'heat flux', 'W', heat_flux(:), 1, 'd') + call def_stream(nod2D, myDim_nod2D, 'fw', 'fresh water flux', 'm/s', water_flux(:), 1, 'd') + call def_stream(nod2D, myDim_nod2D, 'atmice_x', 'stress atmice x', 'N/m2', stress_atmice_x(:), 1, 'd') + call def_stream(nod2D, myDim_nod2D, 'atmice_y', 'stress atmice y', 'N/m2', stress_atmice_y(:), 1, 'd') + call def_stream(nod2D, myDim_nod2D, 'atmoce_x', 'stress atmoce x', 'N/m2', stress_atmoce_x(:), 1, 'd') + call def_stream(nod2D, myDim_nod2D, 'atmoce_y', 'stress atmoce y', 'N/m2', stress_atmoce_y(:), 1, 'd') + call def_stream(nod2D, myDim_nod2D, 'iceoce_x', 'stress iceoce x', 'N/m2', stress_iceoce_x(:), 1, 'd') + call def_stream(nod2D, myDim_nod2D, 'iceoce_y', 'stress iceoce y', 'N/m2', stress_iceoce_y(:), 1, 'd') call def_stream(nod2D, myDim_nod2D, 'alpha', 'thermal expansion', 'none', sw_alpha(1,:), 1, 'm') call def_stream(nod2D, myDim_nod2D, 'beta', 'saline contraction', 'none', sw_beta (1,:), 1, 'm') call def_stream(nod2D, myDim_nod2D, 'runoff', 'river runoff', 'none', runoff(:), 1, 'y') - call def_stream(nod2D, myDim_nod2D, 'evap', 'evaporation', 'm/s', evaporation(:), 1, 'm') - call def_stream(nod2D, myDim_nod2D, 'prec', 'precicipation rain', 'm/s', prec_rain(:), 1, 'm') + call def_stream(nod2D, myDim_nod2D, 'evap', 'evaporation', 'm/s', evaporation(:), 1, 'd') + call def_stream(nod2D, myDim_nod2D, 'prec', 'precicipation rain', 'm/s', prec_rain(:), 1, 'd') + call def_stream(nod2D, myDim_nod2D, 'snow', 'precicipation snow', 'm/s', prec_snow(:), 1, 'd') + call def_stream(nod2D, myDim_nod2D, 'shortwave', 'shortwave', 'W/m2', shortwave(:), 1, 'd') !check unit #if defined (__oifs) call def_stream(nod2D, myDim_nod2D, 'alb', 'ice albedo', 'none', ice_alb(:), 1, 'm') #endif From e7a2db0e375d43713b40cddae69b4aa815184b67 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Thu, 14 Jun 2018 12:27:30 +0100 Subject: [PATCH 039/909] Cleanup of fvom_main.F90. Remove write statements for debugging. --- src/fvom_main.F90 | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index d0d524054..6531bb49a 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -134,7 +134,6 @@ end subroutine main_initialize subroutine main_timestepping(nsteps) ! Split main into three major parts - ! Coded by Thomas Rackow, 2018 !---------------------------------- USE o_MESH USE o_ARRAYS @@ -181,20 +180,15 @@ subroutine main_timestepping(nsteps) seconds_til_now=INT(dt)*(n-1) #endif call clock - if (mype==0) write(*,*) 'called clock' call forcing_index - if (mype==0) write(*,*) 'called forcing_index' call compute_vel_nodes - if (mype==0) write(*,*) 'called compute_vel_nodes' ! eta_n=alpha*hbar+(1.0_WP-alpha)*hbar_old !PS !___model sea-ice step__________________________________________________ if(use_ice) then call ocean2ice - if (mype==0) write(*,*) 'called ocean2ice' #ifndef __ifsinterface call update_atm_forcing(n) - if (mype==0) write(*,*) 'called update_atm_forcing' #endif if (ice_steps_since_upd>=ice_ave_steps-1) then ice_update=.true. @@ -205,26 +199,19 @@ subroutine main_timestepping(nsteps) endif if (ice_update) call ice_timestep(n) - if (ice_update .and. mype==0) write(*,*) 'called ice_timestep' call oce_fluxes_mom ! momentum only - if (mype==0) write(*,*) 'called oce_fluxes_mom' call oce_fluxes - if (mype==0) write(*,*) 'called oce_fluxes' end if !___model ocean step____________________________________________________ call oce_timestep_ale(n) - if (mype==0) write(*,*) 'called oce_timestep_ale' call compute_diagnostics(1) - if (mype==0) write(*,*) 'called compute_diagnostics' !___prepare output______________________________________________________ call output (n) - if (mype==0) write(*,*) 'called output' call restart(n, .false., .false.) - if (mype==0) write(*,*) 'called restart' end do end subroutine main_timestepping @@ -236,7 +223,6 @@ end subroutine main_timestepping subroutine main_finalize ! Split main into three major parts - ! Coded by Thomas Rackow, 2018 !---------------------------------- USE g_PARSUP, only: mype, par_ex From 996f7bbae527af939596e4efd78d16a10dcfaab4 Mon Sep 17 00:00:00 2001 From: "Kristian S. Mogensen" Date: Thu, 14 Jun 2018 17:45:25 +0100 Subject: [PATCH 040/909] Put namelist unit in a parameter and change it for __ifsinterface to 21. --- src/gen_model_setup.F90 | 59 ++++++++++++++++++++++------------------- 1 file changed, 32 insertions(+), 27 deletions(-) diff --git a/src/gen_model_setup.F90 b/src/gen_model_setup.F90 index d10f8d642..b6997c42c 100755 --- a/src/gen_model_setup.F90 +++ b/src/gen_model_setup.F90 @@ -30,22 +30,27 @@ subroutine read_namelist implicit none character(len=100) :: nmlfile +#ifdef __ifsinterface + integer, parameter :: iunit = 21 +#else + integer, parameter :: iunit = 20 +#endif namelist /clockinit/ timenew, daynew, yearnew nmlfile ='namelist.config' ! name of general configuration namelist file - open (20,file=nmlfile) - read (20,NML=modelname) - read (20,NML=timestep) - read (20,NML=clockinit) - read (20,NML=paths) - read (20,NML=initialization) - read (20,NML=inout) - read (20,NML=mesh_def) - read (20,NML=geometry) - read (20,NML=calendar) - read (20,NML=run_config) -!!$ read (20,NML=machine) - close (20) + open (iunit,file=nmlfile) + read (iunit,NML=modelname) + read (iunit,NML=timestep) + read (iunit,NML=clockinit) + read (iunit,NML=paths) + read (iunit,NML=initialization) + read (iunit,NML=inout) + read (iunit,NML=mesh_def) + read (iunit,NML=geometry) + read (iunit,NML=calendar) + read (iunit,NML=run_config) +!!$ read (iunit,NML=machine) + close (iunit) ! ========== ! compute dt ! ========== @@ -62,25 +67,25 @@ subroutine read_namelist ! ================================= nmlfile ='namelist.oce' ! name of ocean namelist file - open (20,file=nmlfile) - read (20,NML=oce_dyn) - read (20,NML=oce_tra) - close (20) + open (iunit,file=nmlfile) + read (iunit,NML=oce_dyn) + read (iunit,NML=oce_tra) + close (iunit) nmlfile ='namelist.forcing' ! name of forcing namelist file - open (20,file=nmlfile) - read (20,NML=forcing_exchange_coeff) - read (20,NML=forcing_source) - read (20,NML=forcing_bulk) - read (20,NML=land_ice) - close (20) + open (iunit,file=nmlfile) + read (iunit,NML=forcing_exchange_coeff) + read (iunit,NML=forcing_source) + read (iunit,NML=forcing_bulk) + read (iunit,NML=land_ice) + close (iunit) if(use_ice) then nmlfile ='namelist.ice' ! name of ice namelist file - open (20,file=nmlfile) - read (20,NML=ice_dyn) - read (20,NML=ice_therm) - close (20) + open (iunit,file=nmlfile) + read (iunit,NML=ice_dyn) + read (iunit,NML=ice_therm) + close (iunit) endif if(mype==0) write(*,*) 'Namelist files are read in' From 262b577c100edbdd344ff35dc284bd8492a62bff Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Mon, 19 Nov 2018 17:00:35 +0000 Subject: [PATCH 041/909] Cleanup of main. --- src/fvom_main.F90 | 1 - 1 file changed, 1 deletion(-) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index 6531bb49a..ec385a95b 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -31,7 +31,6 @@ end program main subroutine main_initialize(nsteps) ! Split main into three major parts - ! Coded by Thomas Rackow, 2018 !---------------------------------- USE g_PARSUP, only: mype, par_init USE i_PARAM, only: ice_ave_steps, whichEVP From 77cdeeb61a5dff407016e5e9784e855f0642f8fe Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Tue, 20 Nov 2018 11:00:32 +0000 Subject: [PATCH 042/909] Added a new case 'Cray' with NEMOs compile options to CMakeLists.txt in src/. The model compiles successfully with these options, some might not be necessary in our case. --- src/CMakeLists.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 21b530d32..0962cd9a4 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -58,6 +58,8 @@ if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Intel ) elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU ) target_compile_options(${PROJECT_NAME} PRIVATE -fdefault-real-8 -ffree-line-length-none -g -O3) # target_compile_options(${PROJECT_NAME} PRIVATE -fdefault-real-8 -ffree-line-length-none -mtune=native -march=native -g -Wall -c -O3 -fdefault-double-8 -fcray-pointer -fbacktrace -fconvert=big-endian -fno-range-check) +elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray ) + target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64) endif() target_include_directories(${PROJECT_NAME} PRIVATE ${NETCDF_Fortran_INCLUDE_DIRECTORIES} ${OASIS_Fortran_INCLUDE_DIRECTORIES}) target_include_directories(${PROJECT_NAME} PRIVATE ${MCT_Fortran_INCLUDE_DIRECTORIES} ${MPEU_Fortran_INCLUDE_DIRECTORIES}) From a9a469688955285d449cec8639882bd912beaba4 Mon Sep 17 00:00:00 2001 From: Kristian Mogensen Date: Wed, 21 Nov 2018 14:52:01 +0000 Subject: [PATCH 043/909] Added initialization of d_eta + no currents in coupling. --- src/ifs_interface.F90 | 19 ++++++++++++++++--- src/oce_setup_step.F90 | 1 + 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/src/ifs_interface.F90 b/src/ifs_interface.F90 index a09c5530b..4e66cb2ad 100644 --- a/src/ifs_interface.F90 +++ b/src/ifs_interface.F90 @@ -36,6 +36,7 @@ SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & ! FESOM might perform substeps INTEGER :: itend_fesom INTEGER :: substeps !per IFS timestep + INTEGER :: i ! TODO hard-coded here, put in namelist substeps=2 @@ -163,7 +164,7 @@ SUBROUTINE nemogcmcoup_coupinit( mypeIN, npesIN, icomm, & ALLOCATE(omask(MAX(nopoints,1)),ogloind(MAX(nopoints,1))) omask(:)= 1 ! all points are ocean points - ogloind = myList_nod2D ! global index for local point number + ogloind(1:myDim_nod2d)= myList_nod2D(1:myDim_nod2d) ! global index for local point number ! Could be helpful later: ! Replace global numbering with a local one @@ -238,7 +239,7 @@ SUBROUTINE nemogcmcoup_coupinit( mypeIN, npesIN, icomm, & ALLOCATE(omask(MAX(nopoints,1)),ogloind(MAX(nopoints,1))) omask(:)= 1 ! all elements are in the ocean - ogloind = myList_elem2D ! global index for local element number + ogloind(1:myDim_elem2D) = myList_elem2D(1:myDim_elem2D) ! global index for local element number ! Read the interpolation weights and setup the parallel interpolation ! from atmosphere Gaussian grid to ocean UV-grid @@ -338,7 +339,7 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & REAL(wp) :: rlon, rlat ! Loop variables - INTEGER :: n, elem + INTEGER :: n, elem, ierr ! =================================================================== ! @@ -421,6 +422,11 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & end do +#ifdef FESOM_TODO + + ! We need to sort out the non-unique global index before we + ! can couple currents + ! Interpolate: 'pgucur' and 'pgvcur' on Gaussian grid. CALL parinter_fld( mype, npes, icomm, UVtogauss, & & myDim_elem2D, zsendU, & @@ -430,6 +436,13 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & & myDim_elem2D, zsendV, & & nopoints, pgvcur ) +#else + + pgucur(:) = 0.0 + pgvcur(:) = 0.0 + +#endif + #ifndef FESOM_TODO WRITE(0,*)'Everything implemented except ice level temperatures (licelvls).' diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 8b409bbc6..3e964e349 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -120,6 +120,7 @@ SUBROUTINE array_setup ! elevation and its rhs ! ================ allocate(eta_n(node_size), d_eta(node_size)) +d_eta(:)=0.0 allocate(ssh_rhs(node_size)) ! ================ ! Monin-Obukhov From 684cf746abf1676f5472e525acca34043dc23ab8 Mon Sep 17 00:00:00 2001 From: Kristian Mogensen Date: Thu, 22 Nov 2018 13:40:54 +0000 Subject: [PATCH 044/909] Added substep namelist + cleanup of print. --- src/ifs_interface.F90 | 45 +++++++++++++++++++++++++++++++++---------- 1 file changed, 35 insertions(+), 10 deletions(-) diff --git a/src/ifs_interface.F90 b/src/ifs_interface.F90 index 4e66cb2ad..6ecc3f0ef 100644 --- a/src/ifs_interface.F90 +++ b/src/ifs_interface.F90 @@ -6,15 +6,21 @@ ! -Adapted to FESOM2 by Thomas Rackow, AWI, 2018. !----------------------------------------------------- +MODULE nemogcmcoup_steps + INTEGER :: substeps !per IFS timestep +END MODULE nemogcmcoup_steps + SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & & lwaveonly, iatmunit, lwrite ) ! Initialize the FESOM model for single executable coupling USE par_kind !in ifs_modules.F90 - USE g_PARSUP, only: MPI_COMM_FESOM + USE g_PARSUP, only: MPI_COMM_FESOM, mype USE g_config, only: dt USE g_clock, only: timenew, daynew, yearnew, month, day_in_month + USE nemogcmcoup_steps, ONLY : substeps + IMPLICIT NONE ! Input arguments @@ -35,34 +41,46 @@ SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & LOGICAL :: lwrite ! FESOM might perform substeps INTEGER :: itend_fesom - INTEGER :: substeps !per IFS timestep INTEGER :: i + NAMELIST/namfesomstep/substeps ! TODO hard-coded here, put in namelist substeps=2 + OPEN(9,file='namfesomstep.in') + READ(9,namfesomstep) + CLOSE(9) + if(mype==0) then WRITE(0,*)'!======================================' WRITE(0,*)'! FESOM is initialized from within IFS.' WRITE(0,*)'! get MPI_COMM_FESOM. =================' + endif MPI_COMM_FESOM=icomm itini = 1 CALL main_initialize(itend_fesom) !also sets mype and npes itend=itend_fesom/substeps + if(mype==0) then WRITE(0,*)'! main_initialize done. ===============' + endif ! Set more information for the caller ! initial date and time (time is not used) inidate = yearnew*10000 + month*100 + day_in_month ! e.g. 20170906 initime = 0 + if(mype==0) then WRITE(0,*)'! FESOM initial date is ', inidate ,' ======' - + WRITE(0,*)'! FESOM substeps are ', substeps ,' ======' + endif + ! fesom timestep (as seen by IFS) - zstp = real(substeps,wp)*dt + zstp = REAL(substeps,wp)*dt + if(mype==0) then WRITE(0,*)'! FESOM timestep as seen by IFS is ', real(zstp,4), 'sec (',substeps,'xdt)' WRITE(0,*)'!======================================' + endif END SUBROUTINE nemogcmcoup_init @@ -153,10 +171,13 @@ SUBROUTINE nemogcmcoup_coupinit( mypeIN, npesIN, icomm, & & mpi_integer, mpi_sum, icomm, ierr) + if(mype==0) then WRITE(0,*)'!======================================' WRITE(0,*)'! SCALARS =============================' WRITE(0,*)'Update FESOM global scalar points' + endif + noglopoints=nod2D nopoints=myDim_nod2d @@ -227,10 +248,12 @@ SUBROUTINE nemogcmcoup_coupinit( mypeIN, npesIN, icomm, & DEALLOCATE(omask,ogloind) + if(mype==0) then WRITE(0,*)'!======================================' WRITE(0,*)'! VECTORS =============================' WRITE(0,*)'Update FESOM global vector points' + endif noglopoints=elem2D nopoints=myDim_elem2D @@ -445,7 +468,9 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & #ifndef FESOM_TODO + if(mype==0) then WRITE(0,*)'Everything implemented except ice level temperatures (licelvls).' + endif #else @@ -1391,6 +1416,8 @@ END SUBROUTINE nemogcmcoup_lim2_update SUBROUTINE nemogcmcoup_step( istp, icdate, ictime ) USE g_clock, only: yearnew, month, day_in_month + USE g_PARSUP, only: mype + USE nemogcmcoup_steps, ONLY : substeps IMPLICIT NONE ! Arguments @@ -1401,13 +1428,9 @@ SUBROUTINE nemogcmcoup_step( istp, icdate, ictime ) ! Data and time from NEMO INTEGER, INTENT(OUT) :: icdate, ictime - ! Local variables - INTEGER :: substeps - - ! Advance the FESOM model 2 time steps here, still hard-coded - substeps=2 - + if(mype==0) then WRITE(0,*)'! IFS at timestep ', istp, '. Do ', substeps , 'FESOM timesteps...' + endif CALL main_timestepping(substeps) ! Compute date and time at the end of the time step @@ -1415,7 +1438,9 @@ SUBROUTINE nemogcmcoup_step( istp, icdate, ictime ) icdate = yearnew*10000 + month*100 + day_in_month ! e.g. 20170906 ictime = 0 ! (time is not used) + if(mype==0) then WRITE(0,*)'! FESOM date at end of timestep is ', icdate ,' ======' + endif #ifdef FESOM_TODO iye = ndastp / 10000 From 40f32e15f8fcf83eae1d55b6aeb9a575730a3f42 Mon Sep 17 00:00:00 2001 From: Kristian Mogensen Date: Thu, 22 Nov 2018 13:53:42 +0000 Subject: [PATCH 045/909] Remove more printout from mype/=0 --- src/ifs_interface.F90 | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/ifs_interface.F90 b/src/ifs_interface.F90 index 6ecc3f0ef..56a63c35e 100644 --- a/src/ifs_interface.F90 +++ b/src/ifs_interface.F90 @@ -50,18 +50,14 @@ SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & READ(9,namfesomstep) CLOSE(9) - if(mype==0) then - WRITE(0,*)'!======================================' - WRITE(0,*)'! FESOM is initialized from within IFS.' - - WRITE(0,*)'! get MPI_COMM_FESOM. =================' - endif MPI_COMM_FESOM=icomm - itini = 1 - CALL main_initialize(itend_fesom) !also sets mype and npes + CALL main_initialize(itend_fesom) !also sets mype and npes itend=itend_fesom/substeps if(mype==0) then + WRITE(0,*)'!======================================' + WRITE(0,*)'! FESOM is initialized from within IFS.' + WRITE(0,*)'! get MPI_COMM_FESOM. =================' WRITE(0,*)'! main_initialize done. ===============' endif @@ -1458,11 +1454,15 @@ END SUBROUTINE nemogcmcoup_step SUBROUTINE nemogcmcoup_final + USE g_PARSUP, only: mype + ! Finalize the FESOM model IMPLICIT NONE + if(mype==0) then WRITE(*,*)'Finalization of FESOM from IFS.' + endif CALL main_finalize END SUBROUTINE nemogcmcoup_final From ebb8f6f8a1a773d39732b9b4bc9cec5609c4021c Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Thu, 22 Nov 2018 17:44:56 +0000 Subject: [PATCH 046/909] Change sign of evap_no_ifrac and sublimation in ifs_interface because the total evaporation showed wrong sign when compared to uncoupled runs with CORE2 forcing. --- src/ifs_interface.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ifs_interface.F90 b/src/ifs_interface.F90 index 56a63c35e..d8e600438 100644 --- a/src/ifs_interface.F90 +++ b/src/ifs_interface.F90 @@ -655,7 +655,7 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & & myDim_nod2D, zrecv ) ! Unpack total evaporation, without halo - evap_no_ifrac(1:myDim_nod2D)=zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s + evap_no_ifrac(1:myDim_nod2D)=-zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s; change sign ! Do the halo exchange call exchange_nod(evap_no_ifrac) @@ -666,7 +666,7 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & & myDim_nod2D, zrecv ) ! Unpack sublimation (evaporation over ice), without halo - sublimation(1:myDim_nod2D)=zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s + sublimation(1:myDim_nod2D)=-zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s; change sign ! Do the halo exchange call exchange_nod(sublimation) From 76c830da7802ac05cbe25fbc07b68f7177859588 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Sat, 4 Jul 2020 18:16:31 +0200 Subject: [PATCH 047/909] compiles on mistral! --- config/namelist.config | 1 + src/fvom_main.F90 | 17 ++++--- src/gen_modules_config.F90 | 3 +- src/ifs_interface.F90 | 95 ++++++++++++++++++++++++-------------- src/ifs_modules.F90 | 2 +- src/ifs_notused.F90 | 24 ++++++---- 6 files changed, 90 insertions(+), 52 deletions(-) diff --git a/config/namelist.config b/config/namelist.config index 5c1d25e0d..3b51529b1 100755 --- a/config/namelist.config +++ b/config/namelist.config @@ -26,6 +26,7 @@ ResultPath='../results/linfs/zmeanval/3/' restart_length=1 !only required for d,h,s cases, y, m take 1 restart_length_unit='y' !output period: y, d, h, s logfile_outfreq=960 !in logfile info. output frequency, # steps +ignore_timecheck=.true. / &ale_def diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index 164f86f25..7a60ec082 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -40,7 +40,7 @@ subroutine main_initialize(nsteps) USE i_PARAM, only: ice_ave_steps, whichEVP use i_ARRAYS, only: ice_steps_since_upd, ice_update use g_clock, only: clock_init, clock_newyear -use g_config, only: use_ice, r_restart, use_ALE +use g_config ! , only: use_ice, r_restart, use_ALE use g_comm_auto use g_forcing_arrays use io_RESTART, only: restart @@ -188,7 +188,8 @@ end subroutine main_initialize subroutine main_timestepping(nsteps) ! Split main into three major parts !---------------------------------- - USE o_MESH + !USE o_MESH + USE MOD_MESH USE o_ARRAYS USE o_PARAM USE g_PARSUP @@ -196,7 +197,7 @@ subroutine main_timestepping(nsteps) use i_ARRAYS use g_clock use g_config - use g_forcing_index + !use g_forcing_index use g_comm_auto use g_forcing_arrays use io_RESTART @@ -212,6 +213,7 @@ subroutine main_timestepping(nsteps) integer, INTENT(IN) :: nsteps real(kind=WP) :: t0, t1, t2, t3, t4, t5, t6, t7, t8, t0_ice, t1_ice, t0_frc, t1_frc real(kind=WP) :: rtime_fullice, rtime_write_restart, rtime_write_means, rtime_compute_diag, rtime_read_forcing + type(t_mesh), target, save :: mesh !===================== ! Time stepping @@ -326,17 +328,20 @@ subroutine main_finalize ! Split main into three major parts !---------------------------------- - USE g_PARSUP, only: mype, par_ex + USE g_PARSUP, only: mype, npes, par_ex, MPI_COMM_FESOM, MPIERR, MPI_Wtime use g_clock, only: clock_finish + USE o_PARAM, only: WP IMPLICIT NONE + real(kind=WP) :: t0, t1 + !___FINISH MODEL RUN________________________________________________________ call MPI_Barrier(MPI_COMM_FESOM, MPIERR) if (mype==0) then t1 = MPI_Wtime() - runtime_alltimesteps = real(t1-t0,real32) + !runtime_alltimesteps = real(t1-t0,real32) write(*,*) 'FESOM Run is finished, updating clock' endif @@ -387,7 +392,7 @@ subroutine main_finalize ! write(*,*) ' Runtime for all timesteps : ',runtime_alltimesteps,' sec' write(*,*) '============================================' write(*,*) - end if + !end if ! call clock_finish # why comment in master? call clock_finish #if !defined (__ifsinterface) diff --git a/src/gen_modules_config.F90 b/src/gen_modules_config.F90 index 0df8b4840..73a7b6257 100755 --- a/src/gen_modules_config.F90 +++ b/src/gen_modules_config.F90 @@ -32,8 +32,9 @@ module g_config integer :: logfile_outfreq=1 ! logfile info. outp. freq., # steps integer :: restart_length=1 character :: restart_length_unit='m' + logical :: ignore_timecheck=.false. !ignore time consistency check? (restart&clock) - namelist /restart_log/ restart_length, restart_length_unit, logfile_outfreq + namelist /restart_log/ restart_length, restart_length_unit, logfile_outfreq, ignore_timecheck ! *** ale_def *** character(20) :: which_ALE='linfs' ! 'zlevel', 'zstar', 'zstar-weighted', 'ztilde' diff --git a/src/ifs_interface.F90 b/src/ifs_interface.F90 index d8e600438..045dee523 100644 --- a/src/ifs_interface.F90 +++ b/src/ifs_interface.F90 @@ -30,7 +30,7 @@ SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & ! Initial date (e.g. 20170906), time, initial timestep and final time step INTEGER, INTENT(OUT) :: inidate, initime, itini, itend ! Length of the time step - REAL(wp), INTENT(OUT) :: zstp + REAL(wpIFS), INTENT(OUT) :: zstp ! inherited from interface to NEMO, not used here: ! Coupling to waves only @@ -72,7 +72,7 @@ SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & endif ! fesom timestep (as seen by IFS) - zstp = REAL(substeps,wp)*dt + zstp = REAL(substeps,wpIFS)*dt if(mype==0) then WRITE(0,*)'! FESOM timestep as seen by IFS is ', real(zstp,4), 'sec (',substeps,'xdt)' WRITE(0,*)'!======================================' @@ -85,8 +85,10 @@ SUBROUTINE nemogcmcoup_coupinit( mypeIN, npesIN, icomm, & & npoints, nlocmsk, ngloind ) ! FESOM modules - USE g_PARSUP, only: mype, npes, myDim_nod2D, myDim_elem2D, myList_nod2D, myList_elem2D - USE o_MESH, only: nod2D, elem2D + USE g_PARSUP, only: mype, npes, myDim_nod2D, eDim_nod2D, myDim_elem2D, eDim_elem2D, eXDim_elem2D, & + myDim_edge2D, eDim_edge2D, myList_nod2D, myList_elem2D + USE MOD_MESH + !USE o_MESH, only: nod2D, elem2D ! Initialize single executable coupling USE parinter @@ -106,6 +108,7 @@ SUBROUTINE nemogcmcoup_coupinit( mypeIN, npesIN, icomm, & INTEGER :: iunit = 0 ! Local variables + type(t_mesh), target, save :: mesh ! Namelist containing the file names of the weights CHARACTER(len=256) :: cdfile_gauss_to_T, cdfile_gauss_to_UV, & @@ -137,6 +140,9 @@ SUBROUTINE nemogcmcoup_coupinit( mypeIN, npesIN, icomm, & INTEGER :: i,j,k,ierr LOGICAL :: lexists + ! associate the mesh +#include "associate_mesh.h" + ! here FESOM knows about the (total number of) MPI tasks @@ -332,8 +338,11 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & USE o_ARRAYS, ONLY : tr_arr, UV USE i_arrays, ONLY : m_ice, a_ice, m_snow USE i_therm_param, ONLY : tmelt - USE g_PARSUP, only: myDim_nod2D, myDim_elem2D - USE o_MESH, only: elem2D_nodes, coord_nod2D + !USE o_PARAM, ONLY : WP + USE g_PARSUP, only: myDim_nod2D,eDim_nod2D, myDim_elem2D,eDim_elem2D,eXDim_elem2D + !USE o_MESH, only: elem2D_nodes, coord_nod2D + USE MOD_MESH + USE g_rotate_grid, only: vector_r2g USE parinter USE scripremap @@ -342,24 +351,33 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & IMPLICIT NONE ! Arguments - REAL(wp), DIMENSION(nopoints) :: pgsst, pgist, pgalb, pgifr, pghic, pghsn, pgucur, pgvcur - REAL(wp), DIMENSION(nopoints,3) :: pgistl + REAL(wpIFS), DIMENSION(nopoints) :: pgsst, pgist, pgalb, pgifr, pghic, pghsn, pgucur, pgvcur + REAL(wpIFS), DIMENSION(nopoints,3) :: pgistl LOGICAL :: licelvls + type(t_mesh), target, save :: mesh + real(kind=wpIFS), dimension(:,:), pointer :: coord_nod2D + integer, dimension(:,:) , pointer :: elem2D_nodes + ! Message passing information INTEGER, INTENT(IN) :: mype, npes, icomm ! Number Gaussian grid points INTEGER, INTENT(IN) :: nopoints ! Local variables - REAL(wp), DIMENSION(myDim_nod2D) :: zsend - REAL(wp), DIMENSION(myDim_elem2D) :: zsendU, zsendV + REAL(wpIFS), DIMENSION(myDim_nod2D) :: zsend + REAL(wpIFS), DIMENSION(myDim_elem2D) :: zsendU, zsendV INTEGER :: elnodes(3) - REAL(wp) :: rlon, rlat + REAL(wpIFS) :: rlon, rlat ! Loop variables INTEGER :: n, elem, ierr + !#include "associate_mesh.h" + ! associate what is needed only + coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%coord_nod2D + elem2D_nodes(1:3, 1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem2D_nodes + ! =================================================================== ! ! Pack SST data and convert to K. 'pgsst' is on Gauss grid. @@ -433,8 +451,8 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & ! compute element midpoints elnodes=elem2D_nodes(:,elem) - rlon=sum(coord_nod2D(1,elnodes))/3.0_WP - rlat=sum(coord_nod2D(2,elnodes))/3.0_WP + rlon=sum(coord_nod2D(1,elnodes))/3.0_wpIFS + rlat=sum(coord_nod2D(2,elnodes))/3.0_wpIFS ! Rotate vectors to geographical coordinates (r2g) call vector_r2g(zsendU(elem), zsendV(elem), rlon, rlat, 0) ! 0-flag for rot. coord @@ -523,12 +541,15 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & ! interpolation of the input gaussian grid data USE par_kind !in ifs_modules.F90 - USE g_PARSUP, only: myDim_nod2D, myDim_elem2D, par_ex, eDim_nod2D - USE o_MESH, only: coord_nod2D !elem2D_nodes + USE g_PARSUP, only: myDim_nod2D, myDim_elem2D, par_ex, eDim_nod2D, eDim_elem2D, eXDim_elem2D, myDim_edge2D, eDim_edge2D + !USE o_MESH, only: coord_nod2D !elem2D_nodes + USE MOD_MESH + !USE o_PARAM, ONLY : WP, use wpIFS from par_kind (IFS) USE g_rotate_grid, only: vector_r2g, vector_g2r USE g_forcing_arrays, only: shortwave, prec_rain, prec_snow, runoff, & & evap_no_ifrac, sublimation !'longwave' only stand-alone, 'evaporation' filled later - USE i_ARRAYS, only: stress_atmice_x, stress_atmice_y, stress_atmoce_x, stress_atmoce_y, oce_heat_flux, ice_heat_flux + USE i_ARRAYS, only: stress_atmice_x, stress_atmice_y, oce_heat_flux, ice_heat_flux + USE o_ARRAYS, only: stress_atmoce_x, stress_atmoce_y USE g_comm_auto ! exchange_nod does the halo exchange ! all needed? @@ -545,7 +566,7 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & INTEGER, INTENT(IN) :: mype,npes,icomm ! Fluxes on the Gaussian grid. INTEGER, INTENT(IN) :: npoints - REAL(wp), DIMENSION(npoints), INTENT(IN) :: & + REAL(wpIFS), DIMENSION(npoints), INTENT(IN) :: & & taux_oce, tauy_oce, taux_ice, tauy_ice, & & qs___oce, qs___ice, qns__oce, qns__ice, & & dqdt_ice, evap_tot, evap_ice, prcp_liq, prcp_sol, & @@ -560,16 +581,22 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & ! QNS ice filter switch (requires tice_atm to be sent) LOGICAL, INTENT(IN) :: lqnsicefilt + type(t_mesh), target, save :: mesh + ! Local variables INTEGER :: n - REAL(wp), parameter :: rhofwt = 1000. ! density of freshwater + REAL(wpIFS), parameter :: rhofwt = 1000. ! density of freshwater ! Packed receive buffer - REAL(wp), DIMENSION(myDim_nod2D) :: zrecv - REAL(wp), DIMENSION(myDim_elem2D):: zrecvU, zrecvV + REAL(wpIFS), DIMENSION(myDim_nod2D) :: zrecv + REAL(wpIFS), DIMENSION(myDim_elem2D):: zrecvU, zrecvV + !#include "associate_mesh.h" + ! associate only the necessary things + real(kind=WP), dimension(:,:), pointer :: coord_nod2D + coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%coord_nod2D ! =================================================================== ! ! Sort out incoming arrays from the IFS and put them on the ocean grid @@ -780,27 +807,27 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & #ifdef FESOM_TODO ! Packed receive buffer - REAL(wp), DIMENSION((nlei-nldi+1)*(nlej-nldj+1)) :: zrecv + REAL(wpIFS), DIMENSION((nlei-nldi+1)*(nlej-nldj+1)) :: zrecv ! Unpacked fields on ORCA grids - REAL(wp), DIMENSION(jpi,jpj) :: zqs___oce, zqs___ice, zqns__oce, zqns__ice - REAL(wp), DIMENSION(jpi,jpj) :: zdqdt_ice, zevap_tot, zevap_ice, zprcp_liq, zprcp_sol - REAL(wp), DIMENSION(jpi,jpj) :: zrunoff, zocerunoff - REAL(wp), DIMENSION(jpi,jpj) :: ztmp, zicefr + REAL(wpIFS), DIMENSION(jpi,jpj) :: zqs___oce, zqs___ice, zqns__oce, zqns__ice + REAL(wpIFS), DIMENSION(jpi,jpj) :: zdqdt_ice, zevap_tot, zevap_ice, zprcp_liq, zprcp_sol + REAL(wpIFS), DIMENSION(jpi,jpj) :: zrunoff, zocerunoff + REAL(wpIFS), DIMENSION(jpi,jpj) :: ztmp, zicefr ! Arrays for rotation - REAL(wp), DIMENSION(jpi,jpj) :: zuu,zvu,zuv,zvv,zutau,zvtau + REAL(wpIFS), DIMENSION(jpi,jpj) :: zuu,zvu,zuv,zvv,zutau,zvtau ! Lead fraction for both LIM2/LIM3 - REAL(wp), DIMENSION(jpi,jpj) :: zfrld + REAL(wpIFS), DIMENSION(jpi,jpj) :: zfrld ! Mask for masking for I grid - REAL(wp) :: zmsksum + REAL(wpIFS) :: zmsksum ! For summing up LIM3 contributions to ice temperature - REAL(wp) :: zval,zweig + REAL(wpIFS) :: zval,zweig ! Loop variables INTEGER :: ji,jj,jk,jl ! netCDF debugging output variables CHARACTER(len=128) :: cdoutfile INTEGER :: inum - REAL(wp) :: zhook_handle ! Dr Hook handle + REAL(wpIFS) :: zhook_handle ! Dr Hook handle IF(lhook) CALL dr_hook('nemogcmcoup_lim2_update',0,zhook_handle) IF(nn_timing == 1) CALL timing_start('nemogcmcoup_lim2_update') @@ -1039,7 +1066,7 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & zfrld(:,:) = frld(:,:) zicefr(:,:) = 1 - zfrld(:,:) #else - zicefr(:,:) = 0.0_wp + zicefr(:,:) = 0.0_wpIFS DO jl = 1, jpl zicefr(:,:) = zicefr(:,:) + a_i(:,:,jl) ENDDO @@ -1113,7 +1140,7 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & ENDDO CALL lbc_lnk(zsatmist, 'T', 1.0) - zsqns_ice_add(:,:) = 0.0_wp + zsqns_ice_add(:,:) = 0.0_wpIFS ! Use the dqns_ice filter @@ -1142,7 +1169,7 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & CALL lbc_lnk(ztmp, 'T', 1.0) #endif - WHERE ( zicefr(:,:) > .001_wp ) + WHERE ( zicefr(:,:) > .001_wpIFS ) zsqns_ice_add(:,:) = zsdqdns_ice(:,:) * ( ztmp(:,:) - zsatmist(:,:) ) END WHERE @@ -1443,7 +1470,7 @@ SUBROUTINE nemogcmcoup_step( istp, icdate, ictime ) imo = ndastp / 100 - iye * 100 ida = MOD( ndastp, 100 ) CALL greg2jul( 0, 0, 0, ida, imo, iye, zjul ) - zjul = zjul + ( nsec_day + 0.5_wp * rdttra(1) ) / 86400.0_wp + zjul = zjul + ( nsec_day + 0.5_wpIFS * rdttra(1) ) / 86400.0_wpIFS CALL jul2greg( iss, imm, ihh, ida, imo, iye, zjul ) icdate = iye * 10000 + imo * 100 + ida ictime = ihh * 10000 + imm * 100 + iss diff --git a/src/ifs_modules.F90 b/src/ifs_modules.F90 index 9d6b867fa..8f52ee153 100644 --- a/src/ifs_modules.F90 +++ b/src/ifs_modules.F90 @@ -10,7 +10,7 @@ MODULE par_kind INTEGER, PUBLIC, PARAMETER :: & !: Floating point section sp = SELECTED_REAL_KIND( 6, 37), & !: single precision (real 4) dp = SELECTED_REAL_KIND(12,307), & !: double precision (real 8) - wp = SELECTED_REAL_KIND(12,307), & !: double precision (real 8) + wpIFS = SELECTED_REAL_KIND(12,307), & !: double precision (real 8) ik = SELECTED_INT_KIND(6) !: integer precision END MODULE par_kind diff --git a/src/ifs_notused.F90 b/src/ifs_notused.F90 index 617e1fb86..b483bf962 100644 --- a/src/ifs_notused.F90 +++ b/src/ifs_notused.F90 @@ -44,7 +44,7 @@ SUBROUTINE nemogcmcoup_mlflds_get( mype, npes, icomm, & IMPLICIT NONE ! Arguments - REAL(wp), DIMENSION(nopoints,nlev) :: pgt3d, pgs3d, pgu3d, pgv3d + REAL(wpIFS), DIMENSION(nopoints,nlev) :: pgt3d, pgs3d, pgu3d, pgv3d ! Message passing information INTEGER, INTENT(IN) :: mype, npes, icomm ! Number Gaussian grid points @@ -79,7 +79,7 @@ SUBROUTINE nemogcmcoup_get( mype, npes, icomm, & ! Number Gaussian grid points INTEGER, INTENT(IN) :: nopoints ! Local arrays of sst, ice and currents - REAL(wp), DIMENSION(nopoints) :: pgsst, pgice, pgucur, pgvcur + REAL(wpIFS), DIMENSION(nopoints) :: pgsst, pgice, pgucur, pgvcur ! Local variables @@ -103,7 +103,7 @@ SUBROUTINE nemogcmcoup_exflds_get( mype, npes, icomm, & IMPLICIT NONE ! Arguments - REAL(wp), DIMENSION(nopoints) :: pgssh, pgmld, pg20d, pgsss, & + REAL(wpIFS), DIMENSION(nopoints) :: pgssh, pgmld, pg20d, pgsss, & & pgtem300, pgsal300 ! Message passing information INTEGER, INTENT(IN) :: mype, npes, icomm @@ -159,11 +159,15 @@ SUBROUTINE nemogcmcoup_mlinit( mype, npes, icomm, & INTEGER, INTENT(IN) :: mype,npes,icomm ! Grid information INTEGER, INTENT(INOUT) :: nlev, nopoints - REAL(wp), INTENT(OUT), DIMENSION(nlev) :: pdep - REAL(wp), INTENT(OUT), DIMENSION(nopoints,nlev) :: pmask + REAL(wpIFS), INTENT(OUT), DIMENSION(nlev) :: pdep + REAL(wpIFS), INTENT(OUT), DIMENSION(nopoints,nlev) :: pmask ! Local variables + ! dummy argument with explicit INTENT(OUT) declaration needs an explicit value + pdep=0. + pmask=0. + WRITE(0,*)'nemogcmcoup_mlinit should not be called when coupling to fesom.' CALL abort @@ -187,7 +191,7 @@ SUBROUTINE nemogcmcoup_update( mype, npes, icomm, & INTEGER, INTENT(IN) :: mype,npes,icomm ! Fluxes on the Gaussian grid. INTEGER, INTENT(IN) :: npoints - REAL(wp), DIMENSION(npoints), intent(IN) :: & + REAL(wpIFS), DIMENSION(npoints), intent(IN) :: & & pgutau, pgvtau, pgqsr, pgqns, pgemp ! Current time step INTEGER, INTENT(in) :: kt @@ -217,7 +221,7 @@ SUBROUTINE nemogcmcoup_update_add( mype, npes, icomm, & INTEGER, INTENT(IN) :: mype,npes,icomm ! Input on the Gaussian grid. INTEGER, INTENT(IN) :: npoints - REAL(wp), DIMENSION(npoints), intent(IN) :: & + REAL(wpIFS), DIMENSION(npoints), intent(IN) :: & & pgsst, pgtsk ! Current time step INTEGER, INTENT(in) :: kt @@ -280,7 +284,7 @@ SUBROUTINE nemogcmcoup_wam_get( mype, npes, icomm, & ! Number WAM grid points INTEGER, INTENT(IN) :: nopoints ! Local arrays of sst, ice cover, ice thickness and currents - REAL(wp), DIMENSION(nopoints) :: pwsst, pwicecov, pwicethk, pwucur, pwvcur + REAL(wpIFS), DIMENSION(nopoints) :: pwsst, pwicecov, pwicethk, pwucur, pwvcur LOGICAL :: licethk ! Local variables @@ -310,7 +314,7 @@ SUBROUTINE nemogcmcoup_wam_update( mype, npes, icomm, & INTEGER, INTENT(IN) :: mype,npes,icomm ! Data on the WAM grid. INTEGER, INTENT(IN) :: npoints - REAL(wp), DIMENSION(npoints), INTENT(IN) :: & + REAL(wpIFS), DIMENSION(npoints), INTENT(IN) :: & & pwswh, pwmwp, pwphioc, pwtauoc, pwstrn, pwustokes, pwvstokes ! Current time CHARACTER(len=14), INTENT(IN) :: cdtpro @@ -342,7 +346,7 @@ SUBROUTINE nemogcmcoup_wam_update_stress( mype, npes, icomm, npoints, & INTEGER, INTENT(IN) :: mype,npes,icomm ! Data on the WAM grid. INTEGER, INTENT(IN) :: npoints - REAL(wp), DIMENSION(npoints), INTENT(IN) :: & + REAL(wpIFS), DIMENSION(npoints), INTENT(IN) :: & & pwutau, pwvtau, pwuv10n, pwphif ! Current time step CHARACTER(len=14), INTENT(IN) :: cdtpro From 141cabd25511c986ed5a19ebf7849329b2bb37f1 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Sat, 4 Jul 2020 23:36:27 +0000 Subject: [PATCH 048/909] add interface for subroutine tracer_gradient_z --- src/oce_tracer_mod.F90 | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/oce_tracer_mod.F90 b/src/oce_tracer_mod.F90 index 64f6984b6..824126dcd 100755 --- a/src/oce_tracer_mod.F90 +++ b/src/oce_tracer_mod.F90 @@ -2,6 +2,16 @@ MODULE o_tracers USE MOD_MESH IMPLICIT NONE + +interface + subroutine tracer_gradient_z(ttf, mesh) + use g_PARSUP, only: myDim_nod2D, eDim_nod2D + use mod_mesh + type(t_mesh), intent(in) , target :: mesh + real(kind=WP) :: ttf(mesh%nl-1,myDim_nod2D+eDim_nod2D) + end subroutine +end interface + CONTAINS !======================================================================= SUBROUTINE tracer_gradient_elements(ttf, mesh) From 27685ab039337295ea095c0e227bf43f57e26b01 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Sun, 5 Jul 2020 08:58:35 +0000 Subject: [PATCH 049/909] mpi_topology_module OFF --- src/gen_surface_forcing.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/gen_surface_forcing.F90 b/src/gen_surface_forcing.F90 index c9cfccf27..812d21a02 100644 --- a/src/gen_surface_forcing.F90 +++ b/src/gen_surface_forcing.F90 @@ -596,7 +596,7 @@ END SUBROUTINE nc_sbc_ini SUBROUTINE getcoeffld(fld_idx, rdate, mesh) use forcing_provider_async_module - use mpi_topology_module + ! TR use mpi_topology_module !!--------------------------------------------------------------------- !! *** ROUTINE getcoeffld *** !! @@ -657,7 +657,7 @@ SUBROUTINE getcoeffld(fld_idx, rdate, mesh) sbc_flfi(fld_idx)%sbcdata_a_t_index = -1 allocate(sbc_flfi(fld_idx)%sbcdata_b(nc_Nlon,nc_Nlat)) sbc_flfi(fld_idx)%sbcdata_b_t_index = -1 - sbc_flfi(fld_idx)%read_forcing_rootrank = mpi_topology%next_host_head_rank(MPI_COMM_FESOM) + sbc_flfi(fld_idx)%read_forcing_rootrank = 0 ! TR mpi_topology%next_host_head_rank(MPI_COMM_FESOM) end if rootrank = sbc_flfi(fld_idx)%read_forcing_rootrank From b65124ca128ce032be6ebbe856ad0c1d10b0a708 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Sun, 5 Jul 2020 10:11:14 +0000 Subject: [PATCH 050/909] changed compile option -st=c++11 to hstd=c++11 if on Cray --- src/forcing_provider_async/CMakeLists.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/forcing_provider_async/CMakeLists.txt b/src/forcing_provider_async/CMakeLists.txt index 217a22ac9..aea514706 100644 --- a/src/forcing_provider_async/CMakeLists.txt +++ b/src/forcing_provider_async/CMakeLists.txt @@ -14,4 +14,8 @@ target_include_directories(${PROJECT_NAME} INTERFACE ${CMAKE_CURRENT_LIST_DIR} PUBLIC ${CMAKE_CURRENT_BINARY_DIR} ) +if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray ) +target_compile_options(${PROJECT_NAME} PRIVATE -hstd=c++11) +else() target_compile_options(${PROJECT_NAME} PRIVATE -std=c++11) +endif() From 561c8e68159552277923d53d732bd811ba6e0ab6 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Sun, 5 Jul 2020 14:07:58 +0000 Subject: [PATCH 051/909] load mo_tidal in main_timestepping for foreph_ini etc --- src/fvom_main.F90 | 1 + 1 file changed, 1 insertion(+) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index 7a60ec082..514a47bac 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -204,6 +204,7 @@ subroutine main_timestepping(nsteps) use io_MEANDATA use io_mesh_info use diagnostics + use mo_tidal #if defined (__oasis) use cpl_driver #endif From 9180ac48e155e7fdfe36bd4a102c3ce0309872cc Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Mon, 6 Jul 2020 04:54:41 +0000 Subject: [PATCH 052/909] fix compile warnings for density_linear() by making it consistent with the interface definition (added intent OUT) --- src/oce_ale_pressure_bv.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/oce_ale_pressure_bv.F90 b/src/oce_ale_pressure_bv.F90 index 1d35c988b..18db515fe 100644 --- a/src/oce_ale_pressure_bv.F90 +++ b/src/oce_ale_pressure_bv.F90 @@ -1592,7 +1592,7 @@ SUBROUTINE density_linear(t, s, bulk_0, bulk_pz, bulk_pz2, rho_out, mesh) real(kind=WP), intent(IN) :: t,s real(kind=WP), intent(OUT) :: rho_out real(kind=WP) :: rhopot, bulk - real(kind=WP) :: bulk_0, bulk_pz, bulk_pz2 + real(kind=WP), intent(OUT) :: bulk_0, bulk_pz, bulk_pz2 type(t_mesh), intent(in) , target :: mesh #include "associate_mesh.h" !compute secant bulk modulus From 57adeda94680699ca2397642da78984690716f9c Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Mon, 6 Jul 2020 05:15:30 +0000 Subject: [PATCH 053/909] change unit number from 20 to 21 when called from within IFS (unit 20 already taken) --- src/gen_modules_cvmix_kpp.F90 | 11 ++++++++--- src/gen_modules_cvmix_pp.F90 | 11 ++++++++--- src/gen_modules_cvmix_tidal.F90 | 11 ++++++++--- src/gen_modules_cvmix_tke.F90 | 11 ++++++++--- 4 files changed, 32 insertions(+), 12 deletions(-) diff --git a/src/gen_modules_cvmix_kpp.F90 b/src/gen_modules_cvmix_kpp.F90 index c8d960bb4..479c0b69c 100644 --- a/src/gen_modules_cvmix_kpp.F90 +++ b/src/gen_modules_cvmix_kpp.F90 @@ -221,6 +221,11 @@ module g_cvmix_kpp subroutine init_cvmix_kpp(mesh) implicit none character(len=100) :: nmlfile +#ifdef __ifsinterface + integer, parameter :: iunit = 21 +#else + integer, parameter :: iunit = 20 +#endif logical :: nmlfile_exist=.False. integer :: node_size type(t_mesh), intent(in) , target :: mesh @@ -272,9 +277,9 @@ subroutine init_cvmix_kpp(mesh) ! check if cvmix namelist file exists if not use default values inquire(file=trim(nmlfile),exist=nmlfile_exist) if (nmlfile_exist) then - open(20,file=trim(nmlfile)) - read(20,nml=param_kpp) - close(20) + open(iunit,file=trim(nmlfile)) + read(iunit,nml=param_kpp) + close(iunit) else write(*,*) ' could not find namelist.cvmix, will use default values !' end if diff --git a/src/gen_modules_cvmix_pp.F90 b/src/gen_modules_cvmix_pp.F90 index d0c6a1ca5..630ef4237 100644 --- a/src/gen_modules_cvmix_pp.F90 +++ b/src/gen_modules_cvmix_pp.F90 @@ -69,6 +69,11 @@ subroutine init_cvmix_pp(mesh) implicit none type(t_mesh), intent(in), target :: mesh character(len=100) :: nmlfile +#ifdef __ifsinterface + integer, parameter :: iunit = 21 +#else + integer, parameter :: iunit = 20 +#endif logical :: nmlfile_exist=.False. integer :: node_size #include "associate_mesh.h" @@ -97,9 +102,9 @@ subroutine init_cvmix_pp(mesh) ! check if cvmix namelist file exists if not use default values inquire(file=trim(nmlfile),exist=nmlfile_exist) if (nmlfile_exist) then - open(20,file=trim(nmlfile)) - read(20,nml=param_pp) - close(20) + open(iunit,file=trim(nmlfile)) + read(iunit,nml=param_pp) + close(iunit) else write(*,*) ' could not find namelist.cvmix, will use default values !' end if diff --git a/src/gen_modules_cvmix_tidal.F90 b/src/gen_modules_cvmix_tidal.F90 index a51b5cabc..77d29eb3c 100644 --- a/src/gen_modules_cvmix_tidal.F90 +++ b/src/gen_modules_cvmix_tidal.F90 @@ -71,6 +71,11 @@ module g_cvmix_tidal subroutine init_cvmix_tidal(mesh) character(len=100) :: nmlfile +#ifdef __ifsinterface + integer, parameter :: iunit = 21 +#else + integer, parameter :: iunit = 20 +#endif logical :: file_exist=.False. integer :: node_size type(t_mesh), intent(in), target :: mesh @@ -100,9 +105,9 @@ subroutine init_cvmix_tidal(mesh) file_exist=.False. inquire(file=trim(nmlfile),exist=file_exist) if (file_exist) then - open(20,file=trim(nmlfile)) - read(20,nml=param_tidal) - close(20) + open(iunit,file=trim(nmlfile)) + read(iunit,nml=param_tidal) + close(iunit) else write(*,*) ' could not find namelist.cvmix, will use default values !' end if diff --git a/src/gen_modules_cvmix_tke.F90 b/src/gen_modules_cvmix_tke.F90 index 8354792fb..e27268f1c 100644 --- a/src/gen_modules_cvmix_tke.F90 +++ b/src/gen_modules_cvmix_tke.F90 @@ -119,6 +119,11 @@ module g_cvmix_tke subroutine init_cvmix_tke(mesh) implicit none character(len=100) :: nmlfile +#ifdef __ifsinterface + integer, parameter :: iunit = 21 +#else + integer, parameter :: iunit = 20 +#endif logical :: nmlfile_exist=.False. integer :: node_size type(t_mesh), intent(in), target :: mesh @@ -197,9 +202,9 @@ subroutine init_cvmix_tke(mesh) ! check if cvmix namelist file exists if not use default values inquire(file=trim(nmlfile),exist=nmlfile_exist) if (nmlfile_exist) then - open(20,file=trim(nmlfile)) - read(20,nml=param_tke) - close(20) + open(iunit,file=trim(nmlfile)) + read(iunit,nml=param_tke) + close(iunit) else write(*,*) ' could not find namelist.cvmix, will use default values !' end if From 7cbadf84695298be5a20aed10513e08718413f43 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Mon, 6 Jul 2020 11:13:30 +0000 Subject: [PATCH 054/909] comment unused module --- src/gen_model_setup.F90 | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/gen_model_setup.F90 b/src/gen_model_setup.F90 index 375e6204a..fa1ca8b2e 100755 --- a/src/gen_model_setup.F90 +++ b/src/gen_model_setup.F90 @@ -6,8 +6,7 @@ subroutine setup_model #else call read_namelist ! should be before clock_init #endif - call define_prog_tracer - + ! call define_prog_tracer end subroutine setup_model ! ============================================================== #ifdef __ifsinterface From 61654273e990eacb692a120ca67231b352b8088b Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Thu, 9 Jul 2020 20:08:32 +0000 Subject: [PATCH 055/909] add associatemesh.h to subroutine --- src/gen_ic3d.F90 | 1 + 1 file changed, 1 insertion(+) diff --git a/src/gen_ic3d.F90 b/src/gen_ic3d.F90 index 568d561f8..91b3e2fc0 100644 --- a/src/gen_ic3d.F90 +++ b/src/gen_ic3d.F90 @@ -387,6 +387,7 @@ SUBROUTINE do_ic3d(mesh) IMPLICIT NONE integer :: n type(t_mesh), intent(in) , target :: mesh +#include "associate_mesh.h" if (mype==0) write(*,*) "Start: Initial conditions for tracers" From 0314e1bc5f607f6b7ab1415f49ee6bbf260fe5b8 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Thu, 9 Jul 2020 23:34:10 +0000 Subject: [PATCH 056/909] put mesh object into module after initialization. It can be USEd in the timestepping subroutine without passing it as as argument (when called from IFS, we should stick to the given IFS interface that does not know about the mesh and thus cannot pass it on) --- src/oce_mesh.F90 | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/oce_mesh.F90 b/src/oce_mesh.F90 index 90bdaeb3d..c633c3b95 100755 --- a/src/oce_mesh.F90 +++ b/src/oce_mesh.F90 @@ -74,6 +74,11 @@ subroutine mesh_auxiliary_arrays(mesh) end subroutine end interface end module +! bring mesh into main_timestepping() +module g_init2timestepping + USE MOD_MESH, only: t_mesh + type(t_mesh), target, save :: meshinmod +end module g_init2timestepping ! Driving routine. The distributed mesh information and mesh proper ! are read from files. From 149240a00d774d7cd47314ec00762d765032080b Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Thu, 9 Jul 2020 23:36:26 +0000 Subject: [PATCH 057/909] load the new module with the mesh, store it there as meshinmod, and read it in timestepping. the model runs for 4 days with GNU compiler (pi-mesh)! --- src/fvom_main.F90 | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index 514a47bac..13b031b7d 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -34,6 +34,7 @@ subroutine main_initialize(nsteps) ! Split main into three major parts !---------------------------------- USE MOD_MESH +USE g_init2timestepping, only: meshinmod USE o_ARRAYS USE o_PARAM USE g_PARSUP, only: mype, par_init @@ -177,8 +178,10 @@ subroutine main_initialize(nsteps) write(*,*) '============================================' endif -end subroutine main_initialize + ! save mesh for timestepping routine + meshinmod=mesh +end subroutine main_initialize !=============================================================================! @@ -190,6 +193,7 @@ subroutine main_timestepping(nsteps) !---------------------------------- !USE o_MESH USE MOD_MESH + USE g_init2timestepping, only: meshinmod USE o_ARRAYS USE o_PARAM USE g_PARSUP @@ -214,8 +218,9 @@ subroutine main_timestepping(nsteps) integer, INTENT(IN) :: nsteps real(kind=WP) :: t0, t1, t2, t3, t4, t5, t6, t7, t8, t0_ice, t1_ice, t0_frc, t1_frc real(kind=WP) :: rtime_fullice, rtime_write_restart, rtime_write_means, rtime_compute_diag, rtime_read_forcing - type(t_mesh), target, save :: mesh + type(t_mesh), target :: mesh + mesh = meshinmod !===================== ! Time stepping !===================== From a758b1418525b668967f668ff3c1a168d72cc7ba Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Sat, 11 Jul 2020 11:26:41 +0000 Subject: [PATCH 058/909] use mpi_topology_module as in cray_compatibility branch. Was still commented by accident. --- src/gen_surface_forcing.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/gen_surface_forcing.F90 b/src/gen_surface_forcing.F90 index c8874a6a7..49dd14715 100644 --- a/src/gen_surface_forcing.F90 +++ b/src/gen_surface_forcing.F90 @@ -596,7 +596,7 @@ END SUBROUTINE nc_sbc_ini SUBROUTINE getcoeffld(fld_idx, rdate, mesh) use forcing_provider_async_module - ! TR use mpi_topology_module + use mpi_topology_module !!--------------------------------------------------------------------- !! *** ROUTINE getcoeffld *** !! @@ -657,7 +657,7 @@ SUBROUTINE getcoeffld(fld_idx, rdate, mesh) sbc_flfi(fld_idx)%sbcdata_a_t_index = -1 allocate(sbc_flfi(fld_idx)%sbcdata_b(nc_Nlon,nc_Nlat)) sbc_flfi(fld_idx)%sbcdata_b_t_index = -1 - sbc_flfi(fld_idx)%read_forcing_rootrank = 0 ! TR mpi_topology%next_host_head_rank(MPI_COMM_FESOM) + sbc_flfi(fld_idx)%read_forcing_rootrank = mpi_topology%next_host_head_rank(MPI_COMM_FESOM) end if rootrank = sbc_flfi(fld_idx)%read_forcing_rootrank From a070f902a4057b2ee1c5978e47b16acfd81d17df Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Sat, 11 Jul 2020 11:33:37 +0000 Subject: [PATCH 059/909] reduce differences to cray_compatibility --- src/gen_modules_partitioning.F90 | 1 - 1 file changed, 1 deletion(-) diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index 772d3f996..e3a48d2cd 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -14,7 +14,6 @@ module g_PARSUP integer :: MPI_COMM_FESOM integer, parameter :: MAX_LAENDERECK=16 integer, parameter :: MAX_NEIGHBOR_PARTITIONS=32 - type com_struct integer :: rPEnum ! the number of PE I receive info from integer, dimension(MAX_NEIGHBOR_PARTITIONS) :: rPE ! their list From 5e50d66fb24bd8e66208c0569bb1d21d776cda9b Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Sat, 11 Jul 2020 12:31:32 +0000 Subject: [PATCH 060/909] remove double initilization of d_eta. Is done further down already. --- src/oce_setup_step.F90 | 1 - 1 file changed, 1 deletion(-) diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index ef9f93ca0..37a2f292b 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -192,7 +192,6 @@ SUBROUTINE array_setup(mesh) ! elevation and its rhs ! ================ allocate(eta_n(node_size), d_eta(node_size)) -d_eta(:)=0.0 allocate(ssh_rhs(node_size)) ! ================ ! Monin-Obukhov From e428acef11b9be24c6d72059439882fa331c7db2 Mon Sep 17 00:00:00 2001 From: Natalja Rakowsky Date: Fri, 5 Jun 2020 15:38:58 +0200 Subject: [PATCH 061/909] add if allocated before pointer assignement (cherry picked from commit 7beda9db83a54b607416b657c0972b1f056e7bd5) --- src/associate_mesh.h | 79 +++++++++++++++++++++++++++++--------------- 1 file changed, 52 insertions(+), 27 deletions(-) diff --git a/src/associate_mesh.h b/src/associate_mesh.h index 5fa1dafdf..80403ea97 100644 --- a/src/associate_mesh.h +++ b/src/associate_mesh.h @@ -64,35 +64,60 @@ nl => mesh%nl !!$mesh_resolution => mesh%mesh_resolution !!$ssh_stiff => mesh%ssh_stiff - -coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%coord_nod2D -geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D -elem2D_nodes(1:3, 1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem2D_nodes -edges(1:2,1:myDim_edge2D+eDim_edge2D) => mesh%edges -edge_tri(1:2,1:myDim_edge2D+eDim_edge2D) => mesh%edge_tri -elem_edges(1:3,1:myDim_elem2D) => mesh%elem_edges -elem_area(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem_area -edge_dxdy(1:2,1:myDim_edge2D+eDim_edge2D) => mesh%edge_dxdy -edge_cross_dxdy(1:4,1:myDim_edge2D+eDim_edge2D) => mesh%edge_cross_dxdy -elem_cos(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem_cos -metric_factor(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%metric_factor -elem_neighbors(1:3,1:myDim_elem2D) => mesh%elem_neighbors +if (allocated(mesh%coord_nod2D )) & + coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%coord_nod2D +if (allocated(mesh%geo_coord_nod2D)) & + geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D +if (allocated(mesh%elem2D_nodes )) & + elem2D_nodes(1:3, 1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem2D_nodes +if (allocated(mesh%edges )) & + edges(1:2,1:myDim_edge2D+eDim_edge2D) => mesh%edges +if (allocated(mesh%edge_tri )) & + edge_tri(1:2,1:myDim_edge2D+eDim_edge2D) => mesh%edge_tri +if (allocated(mesh%elem_edges )) & + elem_edges(1:3,1:myDim_elem2D) => mesh%elem_edges +if (allocated(mesh%elem_area )) & + elem_area(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem_area +if (allocated(mesh%edge_dxdy )) & + edge_dxdy(1:2,1:myDim_edge2D+eDim_edge2D) => mesh%edge_dxdy +if (allocated(mesh%edge_cross_dxdy )) & + edge_cross_dxdy(1:4,1:myDim_edge2D+eDim_edge2D) => mesh%edge_cross_dxdy +if (allocated(mesh%elem_cos )) & + elem_cos(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem_cos +if (allocated(mesh%metric_factor )) & + metric_factor(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%metric_factor +if (allocated(mesh%elem_neighbors )) & + elem_neighbors(1:3,1:myDim_elem2D) => mesh%elem_neighbors nod_in_elem2D => mesh%nod_in_elem2D ! (maxval(rmax),myDim_nod2D+eDim_nod2D) x_corners => mesh%x_corners ! (myDim_nod2D, maxval(rmax)) y_corners => mesh%y_corners ! (myDim_nod2D, maxval(rmax)) -nod_in_elem2D_num(1:myDim_nod2D+eDim_nod2D) => mesh%nod_in_elem2D_num -depth(1:myDim_nod2D+eDim_nod2D) => mesh%depth -gradient_vec(1:6,1:myDim_elem2D) => mesh%gradient_vec -gradient_sca(1:6,1:myDim_elem2D) => mesh%gradient_sca -bc_index_nod2D(1:myDim_nod2D+eDim_nod2D) => mesh%bc_index_nod2D -zbar(1:mesh%nl) => mesh%zbar -Z(1:mesh%nl-1) => mesh%Z +if (allocated(mesh%nod_in_elem2D_num )) & + nod_in_elem2D_num(1:myDim_nod2D+eDim_nod2D) => mesh%nod_in_elem2D_num +if (allocated(mesh%depth )) & + depth(1:myDim_nod2D+eDim_nod2D) => mesh%depth +if (allocated(mesh%gradient_vec )) & + gradient_vec(1:6,1:myDim_elem2D) => mesh%gradient_vec +if (allocated(mesh%gradient_sca )) & + gradient_sca(1:6,1:myDim_elem2D) => mesh%gradient_sca +if (allocated(mesh%bc_index_nod2D )) & + bc_index_nod2D(1:myDim_nod2D+eDim_nod2D) => mesh%bc_index_nod2D +if (allocated(mesh%zbar )) & + zbar(1:mesh%nl) => mesh%zbar +if (allocated(mesh%Z )) & + Z(1:mesh%nl-1) => mesh%Z elem_depth => mesh%elem_depth ! never used, not even allocated -nlevels(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%nlevels -nlevels_nod2D(1:myDim_nod2D+eDim_nod2D) => mesh%nlevels_nod2D -area(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%area -area_inv(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%area_inv -mesh_resolution(1:myDim_nod2d+eDim_nod2D) => mesh%mesh_resolution +if (allocated(mesh%nlevels )) & + nlevels(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%nlevels +if (allocated(mesh%nlevels_nod2D )) & + nlevels_nod2D(1:myDim_nod2D+eDim_nod2D) => mesh%nlevels_nod2D +if (allocated(mesh%area )) & + area(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%area +if (allocated(mesh%area_inv )) & + area_inv(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%area_inv +if (allocated(mesh%mesh_resolution )) & + mesh_resolution(1:myDim_nod2d+eDim_nod2D) => mesh%mesh_resolution ssh_stiff => mesh%ssh_stiff -lump2d_north(1:myDim_nod2d) => mesh%lump2d_north -lump2d_south(1:myDim_nod2d) => mesh%lump2d_south +if (allocated(mesh%lump2d_north )) & + lump2d_north(1:myDim_nod2d) => mesh%lump2d_north +if (allocated(mesh%lump2d_south )) & + lump2d_south(1:myDim_nod2d) => mesh%lump2d_south From 012de8a2421af8f9da9fcf207598273d62f149f6 Mon Sep 17 00:00:00 2001 From: Natalja Rakowsky Date: Fri, 5 Jun 2020 15:39:47 +0200 Subject: [PATCH 062/909] better deal with NaN in netcdf: ieee_is_nan (cherry picked from commit b9f91419f47ffa79fea5c58b9d03393796e97110) --- src/gen_ic3d.F90 | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/src/gen_ic3d.F90 b/src/gen_ic3d.F90 index 91b3e2fc0..0b648e460 100644 --- a/src/gen_ic3d.F90 +++ b/src/gen_ic3d.F90 @@ -266,6 +266,8 @@ SUBROUTINE getcoeffld(mesh) !! ** Method : !! ** Action : !!---------------------------------------------------------------------- + + USE ieee_arithmetic IMPLICIT NONE integer :: iost !I/O status @@ -315,9 +317,20 @@ SUBROUTINE getcoeffld(mesh) iost = nf_get_vara_double(ncid, id_data, nf_start, nf_edges, ncdata(2:nc_Nlon-1,:,:)) ncdata(1,:,:) =ncdata(nc_Nlon-1,:,:) ncdata(nc_Nlon,:,:)=ncdata(2,:,:) - where (ncdata < -0.99_WP*dummy ) ! dummy values are only positive - ncdata = dummy - end where + + ! replace nan by dummy value + do k=1,nc_Ndepth + do j=1,nc_Nlat + do i=1,nc_Nlon + if (ieee_is_nan(ncdata(i,j,k))) then + ncdata(i,j,k) = dummy + elseif (ncdata(i,j,k) < -0.99_WP*dummy .or. ncdata(i,j,k) > dummy) then + ! and in case the input data has other conventions on missing values: + ncdata(i,j,k) = dummy + endif + end do + end do + end do end if call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) call check_nferr(iost,filename) From 6ac59f23b460db5321d618dd86a078503fc9b8b3 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Sat, 18 Jul 2020 01:16:36 +0000 Subject: [PATCH 063/909] added reading of _FillValue if present in hydrography files so we can decide in the code how to treat them. Before, the model was hanging on Cray (ECMWF) with many NaNs using normal compile flags. With -G0 debug option it was running since only then the nans in the input file were correctly interpreted as NaNs. --- src/gen_ic3d.F90 | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/gen_ic3d.F90 b/src/gen_ic3d.F90 index 0b648e460..6fabb10bc 100644 --- a/src/gen_ic3d.F90 +++ b/src/gen_ic3d.F90 @@ -286,6 +286,8 @@ SUBROUTINE getcoeffld(mesh) real(wp), allocatable, dimension(:) :: data1d integer :: elnodes(3) integer :: ierror ! return error code + integer :: NO_FILL ! 0=no fillval, 1=fillval + real(wp) :: FILL_VALUE type(t_mesh), intent(in), target :: mesh #include "associate_mesh.h" @@ -303,6 +305,12 @@ SUBROUTINE getcoeffld(mesh) ! get variable id if (mype==0) then iost = nf_inq_varid(ncid, varname, id_data) + iost = nf_inq_var_fill(ncid, id_data, NO_FILL, FILL_VALUE) ! FillValue defined? + if (NO_FILL==1) then + print *, 'No _FillValue is set in ', filename, ', trying dummy =', dummy, FILL_VALUE + else + print *, 'The FillValue in ', filename, ' is set to ', FILL_VALUE ! should set dummy accordingly + end if end if call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) call check_nferr(iost,filename) @@ -318,11 +326,11 @@ SUBROUTINE getcoeffld(mesh) ncdata(1,:,:) =ncdata(nc_Nlon-1,:,:) ncdata(nc_Nlon,:,:)=ncdata(2,:,:) - ! replace nan by dummy value + ! replace nan (or fillvalue) by dummy value do k=1,nc_Ndepth do j=1,nc_Nlat do i=1,nc_Nlon - if (ieee_is_nan(ncdata(i,j,k))) then + if (ieee_is_nan(ncdata(i,j,k)) .or. (ncdata(i,j,k)==FILL_VALUE)) then ncdata(i,j,k) = dummy elseif (ncdata(i,j,k) < -0.99_WP*dummy .or. ncdata(i,j,k) > dummy) then ! and in case the input data has other conventions on missing values: @@ -400,7 +408,6 @@ SUBROUTINE do_ic3d(mesh) IMPLICIT NONE integer :: n type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" if (mype==0) write(*,*) "Start: Initial conditions for tracers" From 3affea92bee56b761a4f7349eeffba3c3fa6d1a3 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Sat, 18 Jul 2020 11:11:02 +0000 Subject: [PATCH 064/909] add jobscripts for ecmwf machine --- work/job_ini_ecmwf | 45 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 work/job_ini_ecmwf diff --git a/work/job_ini_ecmwf b/work/job_ini_ecmwf new file mode 100644 index 000000000..9f3d8f6d5 --- /dev/null +++ b/work/job_ini_ecmwf @@ -0,0 +1,45 @@ +#!/bin/bash +#PBS -S /usr/bin/ksh +#PBS -N fesom2-ini +##PBS -q np +#PBS -q dp +#PBS -l EC_total_tasks=1 + +# optionally, specifiy that no OpenMP is used +#PBS -l EC_threads_per_task=1 + +#PBS -l EC_hyperthreading=1 +#PBS -l EC_user_defined_priority=99 +###PBS -l walltime=00:17:00 +#PBS -l walltime=00:20:00 + +##PBS -j oe #join out and err +#PBD -n +#PBS -o /scratch/rd/natr/run_dyamond/pbs.out +#PBS -e /scratch/rd/natr/run_dyamond/pbs.err + +#PBS -m abe +#PBS -M thomas.rackow@awi.de + +#queue suitable for target processors min/max processors per node memory limit wall-clock +#np parallel MOM+CN 1/72 not shared 72 120 GB 48 hours + +path=`pwd` +echo Initial path: $path + +cd /scratch/rd/natr/run_dyamond_ini + +# debug +set -x + +ln -s $HOME/fesom2/bin/fesom_ini.x . #../bin/fesom.x . # cp -n ../bin/fesom.x +cp -n /scratch/rd/natr/run_dyamond/namelist.config . #../config/namelist.config . +cp -n /scratch/rd/natr/run_dyamond/namelist.forcing . #../config/namelist.forcing . +cp -n /scratch/rd/natr/run_dyamond/namelist.oce . #../config/namelist.oce . +cp -n /scratch/rd/natr/run_dyamond/namelist.ice . #../config/namelist.ice . + +date +#echo tasks_per_node, total_tasks, HT: $EC_tasks_per_node $EC_total_tasks $EC_hyperthreads +#aprun -N $EC_tasks_per_node -n $EC_total_tasks -j $EC_hyperthreads ./fvom_ini.x > "fesom2_ini.out" +aprun -N $EC_tasks_per_node -n $EC_total_tasks ./fesom_ini.x > "fesom_ini.out" +date From 6a19082c8484d8cd71cd003af772856de1a8a4d4 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Sun, 19 Jul 2020 00:37:45 +0000 Subject: [PATCH 065/909] add surface zonal and meridional current case to io_meandata.F90 --- src/io_meandata.F90 | 6 ++++- work/job_ecmwfDYAMOND | 53 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+), 1 deletion(-) create mode 100644 work/job_ecmwfDYAMOND diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 0b34fac54..6b025d0c6 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -144,7 +144,11 @@ subroutine ini_mean_io(mesh) call def_stream(nod2D, myDim_nod2D, 'ssh', 'sea surface elevation', 'm', eta_n, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) CASE ('vve_5 ') call def_stream(nod2D, myDim_nod2D, 'vve_5', 'vertical velocity at 5th level', 'm/s', Wvel(5,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) - +CASE ('szonal_cur') + call def_stream(elem2D, myDim_elem2D,'curr_u', 'surface zonal current', 'm/s', UV(1,1,1:myDim_elem2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) +CASE ('smerid_cur') + call def_stream(elem2D, myDim_elem2D,'curr_v', 'surface meridional current', 'm/s', UV(2,1,1:myDim_elem2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + !___________________________________________________________________________________________________________________________________ ! output sea ice CASE ('uice ') diff --git a/work/job_ecmwfDYAMOND b/work/job_ecmwfDYAMOND new file mode 100644 index 000000000..216267233 --- /dev/null +++ b/work/job_ecmwfDYAMOND @@ -0,0 +1,53 @@ +#!/bin/bash +#PBS -S /usr/bin/ksh +#PBS -N fesom2DYMND +#PBS -q np +###PBS -q dp +###PBS -l EC_total_tasks=144 +#PBS -l EC_total_tasks=648 + +# optionally, specifiy that no OpenMP is used +#PBS -l EC_threads_per_task=1 + +#PBS -l EC_hyperthreading=0 +#PBS -l EC_user_defined_priority=99 +##PBS -l walltime=00:17:00 +#PBS -l walltime=03:00:00 + +##PBS -j oe #join out and err +#PBD -n +#PBS -o /scratch/rd/natr/run_dyamond/pbs.out +#PBS -e /scratch/rd/natr/run_dyamond/pbs.err + +#PBS -m abe +#PBS -M thomas.rackow@awi.de + +#queue suitable for target processors min/max processors per node memory limit wall-clock +#np parallel MOM+CN 1/72 not shared 72 120 GB 48 hours + +path=`pwd` +echo Initial path: $path + +cd /scratch/rd/natr/run_dyamond + +source $HOME/fesom2/env/ecaccess.ecmwf.int/shell + +# debug +set -x + +# did manually +#cp fesom.2020.???.restart.nc /scratch/rd/natr/run_dyamond/. +#cp fesom.clock /scratch/rd/natr/run_dyamond/. +#cp /fwsm/lb/project/fesom2/orca025-namelists/namelists-for-ERA5-forced-runs-with-newfesom2/namelist* /scratch/rd/natr/run_dyamond/. + +cp $HOME/fesom2/bin/fesom.x . +#ln -s $HOME/fesom2/bin/fesom.x . #../bin/fesom.x . # cp -n ../bin/fesom.x +#cp -n $HOME/fesom2/config/namelist.config . #../config/namelist.config . +#cp -n $HOME/fesom2/config/namelist.forcing . #../config/namelist.forcing . +#cp -n $HOME/fesom2/config/namelist.oce . #../config/namelist.oce . +#cp -n $HOME/fesom2/config/namelist.ice . #../config/namelist.ice . + +date +echo tasks_per_node, total_tasks, HT: $EC_tasks_per_node $EC_total_tasks $EC_hyperthreads +aprun -N $EC_tasks_per_node -n $EC_total_tasks -j $EC_hyperthreads ./fesom.x > "fesom2.out" +date From 7f09aeeda148e0f5806c07e16113de1c4234ff19 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Sun, 19 Jul 2020 01:27:59 +0000 Subject: [PATCH 066/909] update environment file for ECMWF (load newer cmake version) --- env/ecaccess.ecmwf.int/shell | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/env/ecaccess.ecmwf.int/shell b/env/ecaccess.ecmwf.int/shell index 0ddf47b0c..188b6f172 100644 --- a/env/ecaccess.ecmwf.int/shell +++ b/env/ecaccess.ecmwf.int/shell @@ -1,4 +1,5 @@ -export PATH=/home/rd/natr/cmake-3.11.2-Linux-x86_64/bin:$PATH +#export PATH=/home/rd/natr/cmake-3.11.2-Linux-x86_64/bin:$PATH +module load cmake/3.16.4 module unload cray-hdf5 module load cray-netcdf From 1fe250e4fe9e12089c95a8ca2e2416d5ac087d2e Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Sun, 19 Jul 2020 01:32:37 +0000 Subject: [PATCH 067/909] add mesh to ifs_interface which was stored in the module init2timestepping. We cannot pass mesh as argument to the subroutines since the interface is fixed - and IFS does not know about the mesh. Only associate variables we need without using full associate_mesh.h file. --- src/ifs_interface.F90 | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/src/ifs_interface.F90 b/src/ifs_interface.F90 index 045dee523..4467dfa9a 100644 --- a/src/ifs_interface.F90 +++ b/src/ifs_interface.F90 @@ -89,6 +89,7 @@ SUBROUTINE nemogcmcoup_coupinit( mypeIN, npesIN, icomm, & myDim_edge2D, eDim_edge2D, myList_nod2D, myList_elem2D USE MOD_MESH !USE o_MESH, only: nod2D, elem2D + USE g_init2timestepping, only: meshinmod ! Initialize single executable coupling USE parinter @@ -108,7 +109,9 @@ SUBROUTINE nemogcmcoup_coupinit( mypeIN, npesIN, icomm, & INTEGER :: iunit = 0 ! Local variables - type(t_mesh), target, save :: mesh + type(t_mesh), target :: mesh + integer , pointer :: nod2D + integer , pointer :: elem2D ! Namelist containing the file names of the weights CHARACTER(len=256) :: cdfile_gauss_to_T, cdfile_gauss_to_UV, & @@ -140,8 +143,11 @@ SUBROUTINE nemogcmcoup_coupinit( mypeIN, npesIN, icomm, & INTEGER :: i,j,k,ierr LOGICAL :: lexists - ! associate the mesh -#include "associate_mesh.h" + ! associate the mesh, only what is needed here + ! #include "associate_mesh.h" + mesh = meshinmod + nod2D => mesh%nod2D + elem2D => mesh%elem2D ! here FESOM knows about the (total number of) MPI tasks @@ -342,6 +348,7 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & USE g_PARSUP, only: myDim_nod2D,eDim_nod2D, myDim_elem2D,eDim_elem2D,eXDim_elem2D !USE o_MESH, only: elem2D_nodes, coord_nod2D USE MOD_MESH + USE g_init2timestepping, only: meshinmod USE g_rotate_grid, only: vector_r2g USE parinter @@ -355,7 +362,7 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & REAL(wpIFS), DIMENSION(nopoints,3) :: pgistl LOGICAL :: licelvls - type(t_mesh), target, save :: mesh + type(t_mesh), target :: mesh real(kind=wpIFS), dimension(:,:), pointer :: coord_nod2D integer, dimension(:,:) , pointer :: elem2D_nodes @@ -375,6 +382,7 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & !#include "associate_mesh.h" ! associate what is needed only + mesh = meshinmod coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%coord_nod2D elem2D_nodes(1:3, 1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem2D_nodes @@ -544,6 +552,7 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & USE g_PARSUP, only: myDim_nod2D, myDim_elem2D, par_ex, eDim_nod2D, eDim_elem2D, eXDim_elem2D, myDim_edge2D, eDim_edge2D !USE o_MESH, only: coord_nod2D !elem2D_nodes USE MOD_MESH + USE g_init2timestepping, only: meshinmod !USE o_PARAM, ONLY : WP, use wpIFS from par_kind (IFS) USE g_rotate_grid, only: vector_r2g, vector_g2r USE g_forcing_arrays, only: shortwave, prec_rain, prec_snow, runoff, & @@ -581,7 +590,7 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & ! QNS ice filter switch (requires tice_atm to be sent) LOGICAL, INTENT(IN) :: lqnsicefilt - type(t_mesh), target, save :: mesh + type(t_mesh), target :: mesh ! Local variables INTEGER :: n @@ -596,6 +605,7 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & !#include "associate_mesh.h" ! associate only the necessary things real(kind=WP), dimension(:,:), pointer :: coord_nod2D + mesh = meshinmod coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%coord_nod2D ! =================================================================== ! From 231e2a2e2e77ca94cfd13d9d982c36423da6f4fe Mon Sep 17 00:00:00 2001 From: JanStreffing Date: Wed, 1 Jul 2020 01:06:55 +0200 Subject: [PATCH 068/909] faster io on juwels through chunking (cherry picked from commit 9bce28a18cf4dd1902740008b7fd3b8029724c23) --- env.sh | 2 +- src/ice_thermo_cpl.F90 | 12 +++++++----- src/io_meandata.F90 | 7 +++++++ src/io_restart.F90 | 17 ++++++++++++++--- 4 files changed, 29 insertions(+), 9 deletions(-) diff --git a/env.sh b/env.sh index ff06a10c7..dd4de7ac0 100755 --- a/env.sh +++ b/env.sh @@ -45,7 +45,7 @@ elif [[ $LOGINHOST = ubuntu ]]; then STRATEGY="ubuntu" elif [[ $LOGINHOST = bsc ]]; then STRATEGY="bsc" -elif [[ $LOGINHOST =~ ^juwels[0-9]+\.fz\-juelich\.de$ ]]; then +elif [[ $LOGINHOST =~ ^juwels[0-9]+\.ib\.juwels\.fzj\.de$ ]]; then STRATEGY="juwels" elif [[ $LOGINHOST =~ ^cc[a-b]+-login[0-9]+\.ecmwf\.int$ ]]; then STRATEGY="ecaccess.ecmwf.int" diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index c710ed981..47027e3d2 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -117,15 +117,17 @@ subroutine thermodynamics(mesh) rsf = 0._WP #endif - !---- different lead closing parameter for NH and SH - call r2g(geolon, geolat, coord_nod2d(1,inod), coord_nod2d(2,inod)) +!#if defined (__oifs) +! !---- different lead closing parameter for NH and SH +! call r2g(geolon, geolat, coord_nod2d(1,inod), coord_nod2d(2,inod)) ! if (geolat.lt.0.) then -! h0min = 1.0 -! h0max = 1.5 -! else ! h0min = 0.75 ! h0max = 1.0 +! else +! h0min = 0.5 +! h0max = 0.75 ! endif +!#endif /* (__oifs) */ call ice_growth #if defined (__oifs) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 6b025d0c6..fc186670f 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -600,6 +600,13 @@ subroutine create_new_file(entry) entry%error_status(c) = nf_put_att_real(entry%ncid, entry%varID, 'scale_factor', NF_REAL, 1, entry%scale_factor); c=c+1 entry%error_status(c) = nf_put_att_real(entry%ncid, entry%varID, 'add_offset', NF_REAL, 1, entry%add_offset); c=c+1 endif + + if (entry%ndim==1) then + entry%error_status(c) = nf_def_var_chunking(entry%ncid, entry%varID, NF_CHUNKED, (/1/)); c=c+1 + elseif (entry%ndim==2) then + entry%error_status(c) = nf_def_var_chunking(entry%ncid, entry%varID, NF_CHUNKED, (/1, entry%glsize(1)/)); c=c+1 + endif + entry%error_status(c)=nf_put_att_text(entry%ncid, entry%varID, 'description', len_trim(entry%description), entry%description); c=c+1 entry%error_status(c)=nf_put_att_text(entry%ncid, entry%varID, 'units', len_trim(entry%units), entry%units); c=c+1 entry%error_status(c)=nf_close(entry%ncid); c=c+1 diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 39382b601..f868815f1 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -305,10 +305,14 @@ subroutine create_new_file(id) do l=1, id%ndim ! list all defined dimensions if (kdim==id%dim(l)%size) dimid(k)=id%dim(l)%code end do -!________write(*,*) kdim, ' -> ', dimid(k)__________________________________ + !write(*,*) "j",j,kdim, ' -> ', dimid(k) end do - id%error_status(c) = nf_def_var(id%ncid, trim(id%var(j)%name), NF_DOUBLE, id%var(j)%ndim+1, & - (/dimid(1:n), id%rec/), id%var(j)%code); c=c+1 + id%error_status(c) = nf_def_var(id%ncid, trim(id%var(j)%name), NF_DOUBLE, id%var(j)%ndim+1, (/dimid(1:n), id%rec/), id%var(j)%code); c=c+1 + if (n==1) then + id%error_status(c)=nf_def_var_chunking(id%ncid, id%var(j)%code, NF_CHUNKED, (/1/)); c=c+1 + elseif (n==2) then + id%error_status(c)=nf_def_var_chunking(id%ncid, id%var(j)%code, NF_CHUNKED, (/1, id%dim(1)%size/)); c=c+1 + end if id%error_status(c)=nf_put_att_text(id%ncid, id%var(j)%code, 'description', len_trim(id%var(j)%longname), id%var(j)%longname); c=c+1 id%error_status(c)=nf_put_att_text(id%ncid, id%var(j)%code, 'units', len_trim(id%var(j)%units), id%var(j)%units); c=c+1 end do @@ -428,6 +432,7 @@ subroutine write_restart(id, istep, mesh) real(kind=WP), allocatable :: aux(:), laux(:) integer :: i, lev, size1, size2, shape integer :: c + real(kind=WP) :: t0, t1, t2, t3 #include "associate_mesh.h" @@ -449,6 +454,7 @@ subroutine write_restart(id, istep, mesh) if (shape==1) then size1=id%var(i)%dims(1) if (mype==0) allocate(aux(size1)) + t0=MPI_Wtime() if (size1==nod2D) call gather_nod (id%var(i)%pt1, aux) if (size1==elem2D) call gather_elem(id%var(i)%pt1, aux) if (mype==0) then @@ -466,11 +472,16 @@ subroutine write_restart(id, istep, mesh) laux=id%var(i)%pt2(lev,:) ! if (size1==nod2D .or. size2==nod2D) call gather_nod (id%var(i)%pt2(lev,:), aux) ! if (size1==elem2D .or. size2==elem2D) call gather_elem(id%var(i)%pt2(lev,:), aux) + t0=MPI_Wtime() if (size1==nod2D .or. size2==nod2D) call gather_nod (laux, aux) if (size1==elem2D .or. size2==elem2D) call gather_elem(laux, aux) + t1=MPI_Wtime() if (mype==0) then id%error_status(c)=nf_put_vara_double(id%ncid, id%var(i)%code, (/lev, 1, id%rec_count/), (/1, size2, 1/), aux, 1); c=c+1 end if + t2=MPI_Wtime() + if (mype==0 .and. size2==nod2D) write(*,*) 'nvar: ', i, 'lev: ', lev, 'gather_nod: ', t1-t0 + if (mype==0 .and. size2==nod2D) write(*,*) 'nvar: ', i, 'lev: ', lev, 'nf_put_var: ', t2-t1 end do deallocate(laux) if (mype==0) deallocate(aux) From 7feb4b51b8b52c56d0c586d99664ae756dd39fd1 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 31 Aug 2020 17:09:21 +0200 Subject: [PATCH 069/909] Revert "faster io on juwels through chunking" This reverts commit 231e2a2e2e77ca94cfd13d9d982c36423da6f4fe. --- env.sh | 2 +- src/ice_thermo_cpl.F90 | 12 +++++------- src/io_meandata.F90 | 7 ------- src/io_restart.F90 | 17 +++-------------- 4 files changed, 9 insertions(+), 29 deletions(-) diff --git a/env.sh b/env.sh index dd4de7ac0..ff06a10c7 100755 --- a/env.sh +++ b/env.sh @@ -45,7 +45,7 @@ elif [[ $LOGINHOST = ubuntu ]]; then STRATEGY="ubuntu" elif [[ $LOGINHOST = bsc ]]; then STRATEGY="bsc" -elif [[ $LOGINHOST =~ ^juwels[0-9]+\.ib\.juwels\.fzj\.de$ ]]; then +elif [[ $LOGINHOST =~ ^juwels[0-9]+\.fz\-juelich\.de$ ]]; then STRATEGY="juwels" elif [[ $LOGINHOST =~ ^cc[a-b]+-login[0-9]+\.ecmwf\.int$ ]]; then STRATEGY="ecaccess.ecmwf.int" diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index 47027e3d2..c710ed981 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -117,17 +117,15 @@ subroutine thermodynamics(mesh) rsf = 0._WP #endif -!#if defined (__oifs) -! !---- different lead closing parameter for NH and SH -! call r2g(geolon, geolat, coord_nod2d(1,inod), coord_nod2d(2,inod)) + !---- different lead closing parameter for NH and SH + call r2g(geolon, geolat, coord_nod2d(1,inod), coord_nod2d(2,inod)) ! if (geolat.lt.0.) then +! h0min = 1.0 +! h0max = 1.5 +! else ! h0min = 0.75 ! h0max = 1.0 -! else -! h0min = 0.5 -! h0max = 0.75 ! endif -!#endif /* (__oifs) */ call ice_growth #if defined (__oifs) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index fc186670f..6b025d0c6 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -600,13 +600,6 @@ subroutine create_new_file(entry) entry%error_status(c) = nf_put_att_real(entry%ncid, entry%varID, 'scale_factor', NF_REAL, 1, entry%scale_factor); c=c+1 entry%error_status(c) = nf_put_att_real(entry%ncid, entry%varID, 'add_offset', NF_REAL, 1, entry%add_offset); c=c+1 endif - - if (entry%ndim==1) then - entry%error_status(c) = nf_def_var_chunking(entry%ncid, entry%varID, NF_CHUNKED, (/1/)); c=c+1 - elseif (entry%ndim==2) then - entry%error_status(c) = nf_def_var_chunking(entry%ncid, entry%varID, NF_CHUNKED, (/1, entry%glsize(1)/)); c=c+1 - endif - entry%error_status(c)=nf_put_att_text(entry%ncid, entry%varID, 'description', len_trim(entry%description), entry%description); c=c+1 entry%error_status(c)=nf_put_att_text(entry%ncid, entry%varID, 'units', len_trim(entry%units), entry%units); c=c+1 entry%error_status(c)=nf_close(entry%ncid); c=c+1 diff --git a/src/io_restart.F90 b/src/io_restart.F90 index f868815f1..39382b601 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -305,14 +305,10 @@ subroutine create_new_file(id) do l=1, id%ndim ! list all defined dimensions if (kdim==id%dim(l)%size) dimid(k)=id%dim(l)%code end do - !write(*,*) "j",j,kdim, ' -> ', dimid(k) +!________write(*,*) kdim, ' -> ', dimid(k)__________________________________ end do - id%error_status(c) = nf_def_var(id%ncid, trim(id%var(j)%name), NF_DOUBLE, id%var(j)%ndim+1, (/dimid(1:n), id%rec/), id%var(j)%code); c=c+1 - if (n==1) then - id%error_status(c)=nf_def_var_chunking(id%ncid, id%var(j)%code, NF_CHUNKED, (/1/)); c=c+1 - elseif (n==2) then - id%error_status(c)=nf_def_var_chunking(id%ncid, id%var(j)%code, NF_CHUNKED, (/1, id%dim(1)%size/)); c=c+1 - end if + id%error_status(c) = nf_def_var(id%ncid, trim(id%var(j)%name), NF_DOUBLE, id%var(j)%ndim+1, & + (/dimid(1:n), id%rec/), id%var(j)%code); c=c+1 id%error_status(c)=nf_put_att_text(id%ncid, id%var(j)%code, 'description', len_trim(id%var(j)%longname), id%var(j)%longname); c=c+1 id%error_status(c)=nf_put_att_text(id%ncid, id%var(j)%code, 'units', len_trim(id%var(j)%units), id%var(j)%units); c=c+1 end do @@ -432,7 +428,6 @@ subroutine write_restart(id, istep, mesh) real(kind=WP), allocatable :: aux(:), laux(:) integer :: i, lev, size1, size2, shape integer :: c - real(kind=WP) :: t0, t1, t2, t3 #include "associate_mesh.h" @@ -454,7 +449,6 @@ subroutine write_restart(id, istep, mesh) if (shape==1) then size1=id%var(i)%dims(1) if (mype==0) allocate(aux(size1)) - t0=MPI_Wtime() if (size1==nod2D) call gather_nod (id%var(i)%pt1, aux) if (size1==elem2D) call gather_elem(id%var(i)%pt1, aux) if (mype==0) then @@ -472,16 +466,11 @@ subroutine write_restart(id, istep, mesh) laux=id%var(i)%pt2(lev,:) ! if (size1==nod2D .or. size2==nod2D) call gather_nod (id%var(i)%pt2(lev,:), aux) ! if (size1==elem2D .or. size2==elem2D) call gather_elem(id%var(i)%pt2(lev,:), aux) - t0=MPI_Wtime() if (size1==nod2D .or. size2==nod2D) call gather_nod (laux, aux) if (size1==elem2D .or. size2==elem2D) call gather_elem(laux, aux) - t1=MPI_Wtime() if (mype==0) then id%error_status(c)=nf_put_vara_double(id%ncid, id%var(i)%code, (/lev, 1, id%rec_count/), (/1, size2, 1/), aux, 1); c=c+1 end if - t2=MPI_Wtime() - if (mype==0 .and. size2==nod2D) write(*,*) 'nvar: ', i, 'lev: ', lev, 'gather_nod: ', t1-t0 - if (mype==0 .and. size2==nod2D) write(*,*) 'nvar: ', i, 'lev: ', lev, 'nf_put_var: ', t2-t1 end do deallocate(laux) if (mype==0) deallocate(aux) From ed9a1ccfe35b7db2f203b0a855ba8ecfff0c28ed Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Mon, 31 Aug 2020 15:10:19 +0000 Subject: [PATCH 070/909] comment some lines that shouldnt be called when coupled to IFS --- src/gen_forcing_couple.F90 | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index 567f86243..c77424021 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -191,6 +191,8 @@ subroutine update_atm_forcing(istep, mesh) do_rotate_ice_wind=.false. end if #else + +#ifndef __ifsinterface call sbc_do(mesh) u_wind = atmdata(i_xwind,:) v_wind = atmdata(i_ywind,:) @@ -201,6 +203,9 @@ subroutine update_atm_forcing(istep, mesh) prec_rain = atmdata(i_prec ,:)/1000._WP prec_snow = atmdata(i_snow ,:)/1000._WP press_air = atmdata(i_mslp ,:) ! unit should be Pa +#endif +! IFS: press_air/Pair is new? Where is shum coming from? Couple D(q)/dT to T grid?? +! longwave..? ! second, compute exchange coefficients ! 1) drag coefficient From 34bdd1324599b8954b8a4453dd33262339466654 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Mon, 31 Aug 2020 16:45:53 +0000 Subject: [PATCH 071/909] use module for output_finalize() --- src/fvom_main.F90 | 1 + 1 file changed, 1 insertion(+) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index 13298e05a..5d8338677 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -337,6 +337,7 @@ subroutine main_finalize USE g_PARSUP, only: mype, npes, par_ex, MPI_COMM_FESOM, MPIERR, MPI_Wtime use g_clock, only: clock_finish USE o_PARAM, only: WP + USE io_MEANDATA, only: finalize_output IMPLICIT NONE From e3322407b14e6fee801320eb489037e99d406e3d Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 7 Sep 2020 14:42:57 +0200 Subject: [PATCH 072/909] - work around an error with multi threaded calls and invalid variables on the cray environment - enable full cray-mpich thread support --- env/ecaccess.ecmwf.int/shell | 2 ++ src/io_meandata.F90 | 12 +++++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/env/ecaccess.ecmwf.int/shell b/env/ecaccess.ecmwf.int/shell index 188b6f172..345f54e81 100644 --- a/env/ecaccess.ecmwf.int/shell +++ b/env/ecaccess.ecmwf.int/shell @@ -7,4 +7,6 @@ module load cray-hdf5 #export CRAYPE_LINK_TYPE=dynamic +# enable full MPI thread support level (MPI_THREAD_MULTIPLE) +export MPICH_MAX_THREAD_SAFETY=multiple # to also switch to an alternative (probably with faster locking) multi threading implementation of the cray-mpich library, use the compiler flag -craympich-mt export FC=ftn CC=cc CXX=CC diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index cf183093e..248e56323 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -46,6 +46,7 @@ module io_MEANDATA real(real64), allocatable, dimension(:,:) :: local_values_r8_copy real(real32), allocatable, dimension(:,:) :: local_values_r4_copy real(kind=WP) :: ctime_copy + integer :: mype_workaround contains final destructor end type @@ -758,11 +759,15 @@ subroutine output(istep, mesh) subroutine do_output_callback(entry_index) +use g_PARSUP integer, intent(in) :: entry_index ! EO args type(Meandata), pointer :: entry + entry=>io_stream(entry_index) + mype=entry%mype_workaround ! for the thread callback, copy back the value of our mype as a workaround for errors with the cray envinronment (at least with ftn 2.5.9 and cray-mpich 7.5.3) + call write_mean(entry, entry_index) end subroutine @@ -957,8 +962,13 @@ subroutine def_stream_after_dimension_specific(entry, name, description, units, call entry%thread%initialize(do_output_callback, entry_index) if(.not. async_netcdf_allowed) call entry%thread%disable_async() + ! check if we have multi thread support available in the MPI library + ! tough MPI_THREAD_FUNNELED should be enough here, at least on cray-mpich 7.5.3 async mpi calls fail if we do not have support level 'MPI_THREAD_MULTIPLE' + ! on cray-mpich we only get level 'MPI_THREAD_MULTIPLE' if 'MPICH_MAX_THREAD_SAFETY=multiple' is set in the environment call MPI_Query_thread(provided_mpi_thread_support_level, err) - if(provided_mpi_thread_support_level < MPI_THREAD_FUNNELED) call entry%thread%disable_async() + if(provided_mpi_thread_support_level < MPI_THREAD_MULTIPLE) call entry%thread%disable_async() + + entry%mype_workaround = mype ! make a copy of the mype variable as there is an error with the cray compiler or environment which voids the global mype for our threads end subroutine From a5f57af23243c7cbee3a6d3122df9a361c29fdf3 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 7 Sep 2020 15:51:34 +0200 Subject: [PATCH 073/909] append to existing output files --- src/io_meandata.F90 | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 248e56323..2e891557f 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -719,8 +719,11 @@ subroutine output(istep, mesh) if(filepath /= trim(entry%filename)) then if("" /= trim(entry%filename)) call assert_nf(nf_close(entry%ncid), __LINE__) entry%filename = filepath - call create_new_file(entry) - call assert_nf( nf_open(entry%filename, nf_write, entry%ncid), __LINE__) + ! use any existing file with this name or create a new one + if( nf_open(entry%filename, nf_write, entry%ncid) /= nf_noerr ) then + call create_new_file(entry) + call assert_nf( nf_open(entry%filename, nf_write, entry%ncid), __LINE__) + end if call assoc_ids(entry) end if From 3a38ac68a32f4c76d513f3743df199f0c951d02e Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 18 Sep 2020 18:37:29 +0200 Subject: [PATCH 074/909] do not reset the fesom run loop counter "n", even if "main_timestepping" is called in increments --- src/fvom_main.F90 | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index 5d8338677..3d1fd8507 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -219,6 +219,7 @@ subroutine main_timestepping(nsteps) real(kind=WP) :: t0, t1, t2, t3, t4, t5, t6, t7, t8, t0_ice, t1_ice, t0_frc, t1_frc real(kind=WP) :: rtime_fullice, rtime_write_restart, rtime_write_means, rtime_compute_diag, rtime_read_forcing type(t_mesh), target :: mesh + integer, save :: fromstep = 1 mesh = meshinmod !===================== @@ -248,7 +249,7 @@ subroutine main_timestepping(nsteps) if (use_global_tides) then call foreph_ini(yearnew, month) end if - do n=1, nsteps + do n=fromstep, fromstep-1+nsteps if (use_global_tides) then call foreph(mesh) end if @@ -322,6 +323,7 @@ subroutine main_timestepping(nsteps) rtime_read_forcing = rtime_read_forcing + t1_frc - t0_frc end do + fromstep = fromstep+nsteps end subroutine main_timestepping From e741b38c961767ca80067fe5785b929ce5ef9ee1 Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 29 Jan 2021 14:20:54 +0100 Subject: [PATCH 075/909] try to fix cavity related bug when computing the scalar area when cavity is used --- src/MOD_MESH.F90 | 2 +- src/associate_mesh.h | 4 +++- src/cavity_param.F90 | 17 ++++++++++++++-- src/gen_modules_diag.F90 | 3 ++- src/gen_support.F90 | 3 ++- src/ice_fct.F90 | 13 ++++++++++-- src/ice_oce_coupling.F90 | 21 +++++++++++++++----- src/oce_ale.F90 | 16 ++++++++++----- src/oce_ale_tracer.F90 | 34 +++++++++++++++---------------- src/oce_mesh.F90 | 43 ++++++++++++++++++++++++++++++++-------- src/oce_modules.F90 | 1 + src/oce_setup_step.F90 | 8 ++++++-- 12 files changed, 120 insertions(+), 45 deletions(-) diff --git a/src/MOD_MESH.F90 b/src/MOD_MESH.F90 index 53f45e1bd..795242a8d 100644 --- a/src/MOD_MESH.F90 +++ b/src/MOD_MESH.F90 @@ -18,7 +18,7 @@ MODULE MOD_MESH TYPE T_MESH integer :: nod2D ! the number of 2D nodes -real(kind=WP) :: ocean_area +real(kind=WP) :: ocean_area, ocean_areawithcav real(kind=WP), allocatable, dimension(:,:) :: coord_nod2D, geo_coord_nod2D integer :: edge2D ! the number of 2D edges integer :: edge2D_in ! the number of internal 2D edges diff --git a/src/associate_mesh.h b/src/associate_mesh.h index 554819e88..30611d44b 100644 --- a/src/associate_mesh.h +++ b/src/associate_mesh.h @@ -3,6 +3,7 @@ integer , pointer :: elem2D integer , pointer :: edge2D integer , pointer :: edge2D_in real(kind=WP) , pointer :: ocean_area +real(kind=WP) , pointer :: ocean_areawithcav integer , pointer :: nl real(kind=WP), dimension(:,:), pointer :: coord_nod2D, geo_coord_nod2D integer, dimension(:,:) , pointer :: elem2D_nodes @@ -35,7 +36,8 @@ nod2D => mesh%nod2D elem2D => mesh%elem2D edge2D => mesh%edge2D edge2D_in => mesh%edge2D_in -ocean_area => mesh%ocean_area +ocean_area => mesh%ocean_area +ocean_areawithcav => mesh%ocean_areawithcav nl => mesh%nl !!$coord_nod2D => mesh%coord_nod2D diff --git a/src/cavity_param.F90 b/src/cavity_param.F90 index ba6e2270c..68f1735e9 100644 --- a/src/cavity_param.F90 +++ b/src/cavity_param.F90 @@ -356,14 +356,14 @@ end subroutine cavity_heat_water_fluxes_2eq subroutine cavity_momentum_fluxes(mesh) use MOD_MESH use o_PARAM , only: density_0, C_d, WP - use o_ARRAYS, only: UV, stress_surf + use o_ARRAYS, only: UV, Unode, stress_surf, stress_node_surf use i_ARRAYS, only: u_w, v_w use g_PARSUP implicit none !___________________________________________________________________________ type(t_mesh), intent(inout) , target :: mesh - integer :: elem, elnodes(3), nzmin + integer :: elem, elnodes(3), nzmin, node real(kind=WP) :: aux #include "associate_mesh.h" @@ -381,6 +381,19 @@ subroutine cavity_momentum_fluxes(mesh) stress_surf(1,elem)=-aux*UV(1,nzmin,elem) stress_surf(2,elem)=-aux*UV(2,nzmin,elem) end do + + !___________________________________________________________________________ + do node=1,myDim_nod2D+eDim_nod2D + nzmin = ulevels_nod2d(node) + if(nzmin==1) cycle + + ! momentum stress: + ! need to check the sensitivity to the drag coefficient + ! here I use the bottom stress coefficient, which is 3e-3, for this FO2 work. + aux=sqrt(Unode(1,nzmin,node)**2+Unode(2,nzmin,node)**2)*density_0*C_d + stress_node_surf(1,node)=-aux*Unode(1,nzmin,node) + stress_node_surf(2,node)=-aux*Unode(2,nzmin,node) + end do end subroutine cavity_momentum_fluxes ! ! diff --git a/src/gen_modules_diag.F90 b/src/gen_modules_diag.F90 index 63fd0c98e..c1e1ec3d8 100755 --- a/src/gen_modules_diag.F90 +++ b/src/gen_modules_diag.F90 @@ -134,7 +134,8 @@ subroutine diag_curl_stress_surf(mode, mesh) end if END DO DO n=1, myDim_nod2D+eDim_nod2D - curl_stress_surf(n)=curl_stress_surf(n)/area(1,n) + !!PS curl_stress_surf(n)=curl_stress_surf(n)/area(1,n) + curl_stress_surf(n)=curl_stress_surf(n)/area(ulevels_nod2D(n),n) END DO end subroutine diag_curl_stress_surf ! ============================================================== diff --git a/src/gen_support.F90 b/src/gen_support.F90 index df1af60bc..eee45c814 100644 --- a/src/gen_support.F90 +++ b/src/gen_support.F90 @@ -272,7 +272,8 @@ subroutine integrate_nod_2D(data, int2D, mesh) #include "associate_mesh.h" lval=0.0_WP do row=1, myDim_nod2D - lval=lval+data(row)*area(1,row) + !!PS lval=lval+data(row)*area(1,row) + lval=lval+data(row)*area(ulevels_nod2D(row),row) end do int2D=0.0_WP diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index dcee2c15c..e82f2e11e 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -663,6 +663,10 @@ SUBROUTINE ice_mass_matrix_fill(mesh) do n=1,3 row=elnodes(n) if(row>myDim_nod2D) cycle + !___________________________________________________________________ + ! if node is cavity cycle over + if(ulevels_nod2d(row)>1) cycle + ! Global-to-local neighbourhood correspondence DO q=1,nn_num(row) col_pos(nn_pos(q,row))=q @@ -670,6 +674,10 @@ SUBROUTINE ice_mass_matrix_fill(mesh) offset=ssh_stiff%rowptr(row)-ssh_stiff%rowptr(1) DO q=1,3 col=elnodes(q) + !___________________________________________________________________ + ! if node is cavity cycle over + if(ulevels_nod2d(col)>1) cycle + ipos=offset+col_pos(col) mass_matrix(ipos)=mass_matrix(ipos)+elem_area(elem)/12.0_WP if(q==n) then @@ -689,7 +697,8 @@ SUBROUTINE ice_mass_matrix_fill(mesh) offset=ssh_stiff%rowptr(q)-ssh_stiff%rowptr(1)+1 n=ssh_stiff%rowptr(q+1)-ssh_stiff%rowptr(1) aa=sum(mass_matrix(offset:n)) - if(abs(area(1,q)-aa)>.1_WP) then + !!PS if(abs(area(1,q)-aa)>.1_WP) then + if(abs(area(ulevels_nod2d(q),q)-aa)>.1_WP) then iflag=q flag=1 endif @@ -698,7 +707,7 @@ SUBROUTINE ice_mass_matrix_fill(mesh) offset=ssh_stiff%rowptr(iflag)-ssh_stiff%rowptr(1)+1 n=ssh_stiff%rowptr(iflag+1)-ssh_stiff%rowptr(1) aa=sum(mass_matrix(offset:n)) - write(*,*) '#### MASS MATRIX PROBLEM', mype, iflag, aa, area(1,iflag) + write(*,*) '#### MASS MATRIX PROBLEM', mype, iflag, aa, area(1,iflag), ulevels_nod2D(iflag) endif deallocate(col_pos) END SUBROUTINE ice_mass_matrix_fill diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index 14c1fe29d..15db83c06 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -38,6 +38,10 @@ subroutine oce_fluxes_mom(mesh) stress_iceoce_x(n)=0.0_WP stress_iceoce_y(n)=0.0_WP end if + + ! total surface stress (iceoce+atmoce) on nodes + stress_node_surf(1,n) = stress_iceoce_x(n)*a_ice(n) + stress_atmoce_x(n)*(1.0_WP-a_ice(n)) + stress_node_surf(2,n) = stress_iceoce_y(n)*a_ice(n) + stress_atmoce_y(n)*(1.0_WP-a_ice(n)) end do !___________________________________________________________________________ @@ -48,10 +52,12 @@ subroutine oce_fluxes_mom(mesh) !_______________________________________________________________________ elnodes=elem2D_nodes(:,elem) - stress_surf(1,elem)=sum(stress_iceoce_x(elnodes)*a_ice(elnodes) + & - stress_atmoce_x(elnodes)*(1.0_WP-a_ice(elnodes)))/3.0_WP - stress_surf(2,elem)=sum(stress_iceoce_y(elnodes)*a_ice(elnodes) + & - stress_atmoce_y(elnodes)*(1.0_WP-a_ice(elnodes)))/3.0_WP + !!PS stress_surf(1,elem)=sum(stress_iceoce_x(elnodes)*a_ice(elnodes) + & + !!PS stress_atmoce_x(elnodes)*(1.0_WP-a_ice(elnodes)))/3.0_WP + !!PS stress_surf(2,elem)=sum(stress_iceoce_y(elnodes)*a_ice(elnodes) + & + !!PS stress_atmoce_y(elnodes)*(1.0_WP-a_ice(elnodes)))/3.0_WP + stress_surf(1,elem)=sum(stress_node_surf(1,elnodes))/3.0_WP + stress_surf(2,elem)=sum(stress_node_surf(2,elnodes))/3.0_WP END DO !___________________________________________________________________________ @@ -177,6 +183,9 @@ subroutine oce_fluxes(mesh) if (use_cavity) call cavity_heat_water_fluxes_3eq(mesh) !!PS if (use_cavity) call cavity_heat_water_fluxes_2eq(mesh) +!!PS where(ulevels_nod2D>1) heat_flux=0.0_WP +!!PS where(ulevels_nod2D>1) water_flux=0.0_WP + !___________________________________________________________________________ call exchange_nod(heat_flux, water_flux) @@ -268,7 +277,9 @@ subroutine oce_fluxes(mesh) ! here the + sign must be used because we switched up the sign of the ! water_flux with water_flux = -fresh_wa_flux, but evap, prec_... and runoff still ! have there original sign - water_flux=water_flux+net/ocean_area + ! if use_cavity=.false. --> ocean_area == ocean_areawithcav + !! water_flux=water_flux+net/ocean_area + water_flux=water_flux+net/ocean_areawithcav !___________________________________________________________________________ if (use_sw_pene) call cal_shortwave_rad(mesh) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 44cd3ea86..174697209 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -572,7 +572,10 @@ subroutine init_thickness_ale(mesh) end if !___________________________________________________________________________ ! Fill in ssh_rhs_old - ssh_rhs_old=(hbar-hbar_old)*area(1,:)/dt + !!PS ssh_rhs_old=(hbar-hbar_old)*area(1,:)/dt + do n=1,myDim_nod2D+eDim_nod2D + ssh_rhs_old(n)=(hbar(n)-hbar_old(n))*area(ulevels_nod2D(n),n)/dt + end do ! -->see equation (14) FESOM2:from finite elements to finie volume eta_n=alpha*hbar_old+(1.0_WP-alpha)*hbar @@ -1258,7 +1261,8 @@ subroutine init_stiff_mat_ale(mesh) ! Mass matrix part do row=1, myDim_nod2D offset = ssh_stiff%rowptr(row) - SSH_stiff%values(offset) = SSH_stiff%values(offset)+ area(1,row)/dt + !!PS SSH_stiff%values(offset) = SSH_stiff%values(offset)+ area(1,row)/dt + SSH_stiff%values(offset) = SSH_stiff%values(offset)+ area(ulevels_nod2D(row),row)/dt end do deallocate(n_pos,n_num) @@ -2550,9 +2554,11 @@ subroutine oce_timestep_ale(n, mesh) t0=MPI_Wtime() -!!PS water_flux = 0.0_WP -!!PS heat_flux = 0.0_WP -!!PS stress_surf= 0.0_WP + water_flux = 0.0_WP + heat_flux = 0.0_WP + stress_surf= 0.0_WP + stress_node_surf= 0.0_WP + !___________________________________________________________________________ ! calculate equation of state, density, pressure and mixed layer depths if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call pressure_bv'//achar(27)//'[0m' diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 34e2ff27d..24f77d628 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -613,13 +613,13 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) !_______________________________________________________________________ ! case of activated shortwave penetration into the ocean, ad 3d contribution - if (use_sw_pene .and. tr_num==1) then - !!PS do nz=1, nzmax-1 - do nz=nzmin, nzmax-1 - zinv=1.0_WP*dt !/(zbar(nz)-zbar(nz+1)) ale! - tr(nz)=tr(nz)+(sw_3d(nz, n)-sw_3d(nz+1, n)*area(nz+1,n)/area(nz,n))*zinv - end do - end if +!!PS if (use_sw_pene .and. tr_num==1) then +!!PS !!PS do nz=1, nzmax-1 +!!PS do nz=nzmin, nzmax-1 +!!PS zinv=1.0_WP*dt !/(zbar(nz)-zbar(nz+1)) ale! +!!PS tr(nz)=tr(nz)+(sw_3d(nz, n)-sw_3d(nz+1, n)*area(nz+1,n)/area(nz,n))*zinv +!!PS end do +!!PS end if !_______________________________________________________________________ ! The first row contains also the boundary condition from heatflux, @@ -634,7 +634,7 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) ! v (+) v (+) ! !!PS tr(1)= tr(1)+bc_surface(n, tracer_id(tr_num)) - tr(nzmin)= tr(nzmin)+bc_surface(n, tracer_id(tr_num),mesh) +!!PS tr(nzmin)= tr(nzmin)+bc_surface(n, tracer_id(tr_num),mesh) !_______________________________________________________________________ ! The forward sweep algorithm to solve the three-diagonal matrix @@ -719,15 +719,15 @@ subroutine diff_ver_part_redi_expl(mesh) Tx=0.0_WP Ty=0.0_WP do k=1, nod_in_elem2D_num(n) - elem=nod_in_elem2D(k,n) - !!PS if(nz.LE.(nlevels(elem)-1)) then - if( nz.LE.(nlevels(elem)-1) .and. nz.GE.(ulevels(elem))) then - Tx=Tx+tr_xy(1,nz,elem)*elem_area(elem) - Ty=Ty+tr_xy(2,nz,elem)*elem_area(elem) - endif - end do - tr_xynodes(1,nz,n)=tx/3.0_WP/area(nz,n) - tr_xynodes(2,nz,n)=ty/3.0_WP/area(nz,n) + elem=nod_in_elem2D(k,n) + !!PS if(nz.LE.(nlevels(elem)-1)) then + if( nz.LE.(nlevels(elem)-1) .and. nz.GE.(ulevels(elem))) then + Tx=Tx+tr_xy(1,nz,elem)*elem_area(elem) + Ty=Ty+tr_xy(2,nz,elem)*elem_area(elem) + endif + end do + tr_xynodes(1,nz,n)=tx/3.0_WP/area(nz,n) + tr_xynodes(2,nz,n)=ty/3.0_WP/area(nz,n) end do end do diff --git a/src/oce_mesh.F90 b/src/oce_mesh.F90 index e641b4de3..763c31020 100755 --- a/src/oce_mesh.F90 +++ b/src/oce_mesh.F90 @@ -1811,8 +1811,8 @@ SUBROUTINE mesh_areas(mesh) ! area(nl, myDim_nod2D) -integer :: n,j,q, elnodes(3), ed(2), elem, nz -real(kind=WP) :: a(2), b(2), ax, ay, lon, lat, vol +integer :: n,j,q, elnodes(3), ed(2), elem, nz,nzmin +real(kind=WP) :: a(2), b(2), ax, ay, lon, lat, vol, vol2 real(kind=WP), allocatable,dimension(:) :: work_array real(kind=WP) :: t0, t1 type(t_mesh), intent(inout), target :: mesh @@ -1855,13 +1855,12 @@ SUBROUTINE mesh_areas(mesh) DO n=1, myDim_nod2D DO j=1,mesh%nod_in_elem2D_num(n) elem=mesh%nod_in_elem2D(j,n) - !!PS DO nz=mesh%ulevels(elem),mesh%nlevels(elem)-1 - DO nz=1,mesh%nlevels(elem)-1 + DO nz=mesh%ulevels(elem),mesh%nlevels(elem)-1 + !!PS DO nz=1,mesh%nlevels(elem)-1 mesh%area(nz,n)=mesh%area(nz,n)+mesh%elem_area(elem)/3.0_WP END DO END DO END DO - ! Only areas through which there is exchange are counted ! =========== @@ -1870,7 +1869,17 @@ SUBROUTINE mesh_areas(mesh) mesh%elem_area=mesh%elem_area*r_earth*r_earth mesh%area=mesh%area*r_earth*r_earth - call exchange_nod(mesh%area) +call exchange_nod(mesh%area) + + +!!PS do n=1, myDim_nod2D +!!PS nzmin = mesh%ulevels_nod2d(n) +!!PS if (nzmin>1) then +!!PS write(*,*) ' --> mesh area:', mype, n, nzmin, mesh%area(nzmin,n),mesh%area(nzmin+1,n),mesh%area(nzmin+2,n) +!!PS end if +!!PS end do + + do n=1,myDim_nod2d+eDim_nod2D do nz=1,mesh%nl @@ -1884,9 +1893,19 @@ SUBROUTINE mesh_areas(mesh) ! coordinates are in radians, edge_dxdy are in meters, ! and areas are in m^2 +!!PS do n=1,myDim_nod2d+eDim_nod2D +!!PS mesh%area_inv(1:mesh%ulevels_nod2D(n)-1,n) = 0.0_WP +!!PS mesh%area(1:mesh%ulevels_nod2D(n)-1,n) = 0.0_WP +!!PS end do + + allocate(work_array(myDim_nod2D)) - mesh%mesh_resolution=sqrt(mesh%area(1, :)/pi)*2._WP + !!PS mesh%mesh_resolution=sqrt(mesh%area(1, :)/pi)*2._WP + do n=1,myDim_nod2d+eDim_nod2D + mesh%mesh_resolution(n)=sqrt(mesh%area(mesh%ulevels_nod2D(n), n)/pi)*2._WP + end do + DO q=1, 3 !apply mass matrix N times to smooth the field DO n=1, myDim_nod2D vol=0._WP @@ -1907,12 +1926,18 @@ SUBROUTINE mesh_areas(mesh) deallocate(work_array) vol=0.0_WP + vol2=0.0_WP do n=1, myDim_nod2D + vol2=vol2+mesh%area(mesh%ulevels_nod2D(n), n) + if (mesh%ulevels_nod2D(n)>1) cycle vol=vol+mesh%area(1, n) end do mesh%ocean_area=0.0 + mesh%ocean_areawithcav=0.0 call MPI_AllREDUCE(vol, mesh%ocean_area, 1, MPI_DOUBLE_PRECISION, MPI_SUM, & MPI_COMM_FESOM, MPIerr) + call MPI_AllREDUCE(vol2, mesh%ocean_areawithcav, 1, MPI_DOUBLE_PRECISION, MPI_SUM, & + MPI_COMM_FESOM, MPIerr) if (mype==0) then write(*,*) mype, 'Mesh statistics:' @@ -1921,10 +1946,12 @@ SUBROUTINE mesh_areas(mesh) ' MinScArea ', minval(mesh%area(1,:)) write(*,*) mype, 'Edges: ', mesh%edge2D, ' internal ', mesh%edge2D_in if (mype==0) then - write(*,*) 'Total ocean area is: ', mesh%ocean_area, ' m^2' + write(*,*) 'Total ocean surface area is : ', mesh%ocean_area, ' m^2' + write(*,*) 'Total ocean surface area wth cavity is: ', mesh%ocean_areawithcav, ' m^2' end if endif + t1=MPI_Wtime() if (mype==0) then write(*,*) 'mesh_areas finished in ', t1-t0, ' seconds' diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index 5ab678381..b1b646aba 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -205,6 +205,7 @@ MODULE o_ARRAYS real(kind=WP), allocatable :: ssh_rhs(:), hpressure(:,:) real(kind=WP), allocatable :: CFL_z(:,:) real(kind=WP), allocatable :: stress_surf(:,:) +real(kind=WP), allocatable :: stress_node_surf(:,:) REAL(kind=WP), ALLOCATABLE :: stress_atmoce_x(:) REAL(kind=WP), ALLOCATABLE :: stress_atmoce_y(:) real(kind=WP), allocatable :: T_rhs(:,:) diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 236878576..fe4273765 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -247,6 +247,7 @@ SUBROUTINE array_setup(mesh) ! ================ allocate(Tclim(nl-1,node_size), Sclim(nl-1, node_size)) allocate(stress_surf(2,myDim_elem2D)) !!! Attention, it is shorter !!! +allocate(stress_node_surf(2,node_size)) allocate(stress_atmoce_x(node_size), stress_atmoce_y(node_size)) allocate(relax2clim(node_size)) allocate(heat_flux(node_size), Tsurf(node_size)) @@ -371,8 +372,11 @@ SUBROUTINE array_setup(mesh) Ssurf_old=0.0_WP !PS real_salt_flux=0.0_WP - stress_atmoce_x=0. - stress_atmoce_y=0. + + stress_surf =0.0_WP + stress_node_surf =0.0_WP + stress_atmoce_x =0.0_WP + stress_atmoce_y =0.0_WP tr_arr=0.0_WP tr_arr_old=0.0_WP From 215bec169c5530b87b61b9f2f0801a6914142d2d Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 3 Feb 2021 14:00:28 +0100 Subject: [PATCH 076/909] fix bug mesh partitioning, that caused in cavity case to have isolated elements --- src/fvom_init.F90 | 98 +++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 83 insertions(+), 15 deletions(-) diff --git a/src/fvom_init.F90 b/src/fvom_init.F90 index 810899d93..2a2c73972 100755 --- a/src/fvom_init.F90 +++ b/src/fvom_init.F90 @@ -831,7 +831,9 @@ subroutine find_levels_cavity(mesh) integer :: exit_flag, count_iter, count_neighb_open, nneighb, cavity_maxlev real(kind=WP) :: dmean character*200 :: file_name - type(t_mesh), intent(inout), target :: mesh + integer, allocatable, dimension(:,:) :: numelemtonode, idxelemtonode + integer, allocatable, dimension(:) :: elemreducelvl + type(t_mesh), intent(inout), target :: mesh #include "associate_mesh_ini.h" !___________________________________________________________________________ @@ -839,8 +841,7 @@ subroutine find_levels_cavity(mesh) ulevels => mesh%ulevels allocate(mesh%ulevels_nod2D(nod2D)) ulevels_nod2D => mesh%ulevels_nod2D -!!PS allocate(mesh%cavity_flag_n(nod2D)) -!!PS cavity_flag_n => mesh%cavity_flag_n + !___________________________________________________________________________ ! Compute level position of ocean-cavity boundary @@ -862,9 +863,9 @@ subroutine find_levels_cavity(mesh) ulevels(elem) = 1 do nz=1,nlevels(elem)-1 !!PS if(Z(nz) I need 4 valid full depth, 3 valid mid-depth levels + ulevels(elem)=nz ! to compute shechpetkin PGF exit end if end do @@ -872,14 +873,16 @@ subroutine find_levels_cavity(mesh) cavity_maxlev = max(cavity_maxlev,ulevels(elem)) end do + !___________________________________________________________________________ ! Eliminate cells that have two cavity boundary faces --> should not be ! possible in FESOM2.0 ! loop over all cavity levels + allocate(elemreducelvl(elem2d)) do nz=1,cavity_maxlev exit_flag=0 count_iter=0 - + elemreducelvl=0 !_______________________________________________________________________ ! iteration loop within each layer do while((exit_flag==0).and.(count_iter<1000)) @@ -915,6 +918,22 @@ subroutine find_levels_cavity(mesh) end if end do ! --> do i = 1, nneighb +!!PS if (elem==133438) then +!!PS write(*,*) +!!PS write(*,*) 'nz =', nz +!!PS write(*,*) 'elem =', elem +!!PS write(*,*) 'ulevels(elem) =', ulevels(elem) +!!PS write(*,*) 'nlevels(elem) =', nlevels(elem) +!!PS write(*,*) 'elemreducelvl(elem)=',elemreducelvl(elem) +!!PS write(*,*) 'elems =', elems +!!PS write(*,*) 'ulevels(elems) =', ulevels(elems) +!!PS write(*,*) 'nlevels(elems) =', nlevels(elems) +!!PS write(*,*) 'nlvl-ulvl =', nlevels(elems)-ulevels(elems) +!!PS write(*,*) 'elemreducelvl(elems)=',elemreducelvl(elems) +!!PS write(*,*) 'count_neighb_open =',count_neighb_open +!!PS write(*,*) +!!PS end if + !___________________________________________________________ ! check how many open faces to neighboring triangles the cell ! has, if there are less than 2 its isolated (a cell should @@ -925,13 +944,33 @@ subroutine find_levels_cavity(mesh) ! except when this levels would remain less than 3 valid ! bottom levels --> in case make the levels of all sorounding ! one level shallower - if (nlevels(elem)-(nz+1)<=3) then + if (nlevels(elem)-(nz+1)<3) then do j = 1, nneighb - if (elems(j)>0 .and. ulevels(elems(j))>1 ) ulevels(elems(j)) = min(ulevels(elems(j)),nz) + if (elems(j)>0) then + if (ulevels(elems(j))>1 .and. ulevels(elems(j))>ulevels(elem) ) then + ulevels(elems(j)) = min(ulevels(elems(j)),nz) + elemreducelvl(elems(j))=1 + end if + end if end do - else + + ! if ulevel of element was already made once shalower, do + ! not allow ti be deepen again + elseif (elemreducelvl(elem)==0) then ulevels(elem)=nz+1 + ! if ulevel of element still hasent enough neighbors, can not + ! be deepend any more (elemreducelvl(elem)==1), so allow + ! neighbouring elemnts to become shallower + else + do j = 1, nneighb + if (elems(j)>0) then + if (ulevels(elems(j))>1 .and. ulevels(elems(j))>ulevels(elem) ) then + ulevels(elems(j)) = min(ulevels(elems(j)),nz) + elemreducelvl(elems(j))=1 + end if + end if + end do end if !force recheck for all current ocean cells @@ -942,6 +981,7 @@ subroutine find_levels_cavity(mesh) end do ! --> do elem=1,elem2D end do ! --> do while((exit_flag==0).and.(count_iter<1000)) end do ! --> do nz=1,cavity_maxlev + deallocate(elemreducelvl) !___________________________________________________________________________ ! vertical vertice level index of cavity_ocean boundary @@ -952,11 +992,7 @@ subroutine find_levels_cavity(mesh) ! loop over neighbouring triangles do j=1,nneighb node=elem2D_nodes(j,elem) -!!PS if(ulevels_nod2D(node)<=ulevels(elem)) then -!!PS ulevels_nod2D(node)=ulevels(elem) -!!PS end if ulevels_nod2D(node)=min(ulevels_nod2D(node),ulevels(elem)) - end do end do @@ -972,6 +1008,7 @@ subroutine find_levels_cavity(mesh) write(*,*) ' ulevels,nlevels = ',ulevels(elem), nlevels(elem) write(*,*) ' ulevels(neighb) = ',ulevels(elem_neighbors(1:3,elem)) write(*,*) ' nlevels(neighb) = ',nlevels(elem_neighbors(1:3,elem)) + call par_ex(0) end if end do @@ -996,7 +1033,38 @@ subroutine find_levels_cavity(mesh) call par_ex(0) end if end do -!!PS !___________________________________________________________________________ + + !___________________________________________________________________________ + ! check how many triangle elements contribute to every vertice in every layer + ! every vertice in every layer should be connected to at least two triangle + ! elements ! + allocate(numelemtonode(nl,nod2D),idxelemtonode(nl,nod2D)) + numelemtonode=0 + idxelemtonode=0 + do node=1, nod2D + do j=1,nod_in_elem2D_num(node) + elem=nod_in_elem2D(j,node) + do nz=ulevels(elem),nlevels(elem)-1 + numelemtonode(nz,node) = numelemtonode(nz,node) + 1 + idxelemtonode(nz,node) = elem + end do + end do + end do + + exit_flag = 0 + do node=1, nod2D + do nz=1,nl + if (numelemtonode(nz,node)== 1) then + write(*,*) 'ERROR: found vertice with just one triangle:', mype, nz, 'node=',node, ulevels_nod2d(node), nlevels_nod2D(node), & + 'elem=', idxelemtonode(nz,node), ulevels(idxelemtonode(nz,node)), nlevels(idxelemtonode(nz,node)) + exit_flag = 1 + end if + end do + end do + deallocate(numelemtonode,idxelemtonode) + if (exit_flag == 1) call par_ex(0) + + !___________________________________________________________________________ !!PS ! compute nodal cavity flag: 1 yes cavity/ 0 no cavity !!PS cavity_flag = 0 !!PS do node=1,nod2D From 4f6b8a357a296737f861d57ebcec8dc9027d8ae9 Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 17 Feb 2021 17:22:29 +0100 Subject: [PATCH 077/909] introduce new variable for the scalar cell area, make here the difference between the mid-scalar cell area and area of the upper scalar cell edge. In case on no cavity both are identical but with cavity they are different from each other. Since below the cavity, the scalar cell area is defined by the area of the lower edge of the scalar control volume --- src/MOD_MESH.F90 | 2 +- src/associate_mesh.h | 14 +- src/oce_mesh.F90 | 442 +++++++++++++++++++++++++++---------------- src/oce_modules.F90 | 2 +- 4 files changed, 294 insertions(+), 166 deletions(-) diff --git a/src/MOD_MESH.F90 b/src/MOD_MESH.F90 index 795242a8d..0e275067a 100644 --- a/src/MOD_MESH.F90 +++ b/src/MOD_MESH.F90 @@ -63,7 +63,7 @@ MODULE MOD_MESH ! ! !___horizontal mesh info________________________________________________________ -real(kind=WP), allocatable, dimension(:,:) :: area, area_inv +real(kind=WP), allocatable, dimension(:,:) :: area, area_inv, areasvol, areasvol_inv real(kind=WP), allocatable, dimension(:) :: mesh_resolution ! diff --git a/src/associate_mesh.h b/src/associate_mesh.h index 30611d44b..26c197ec6 100644 --- a/src/associate_mesh.h +++ b/src/associate_mesh.h @@ -23,7 +23,7 @@ real(kind=WP), dimension(:,:), pointer :: gradient_sca integer, dimension(:) , pointer :: bc_index_nod2D real(kind=WP), dimension(:) , pointer :: zbar, Z, elem_depth integer, dimension(:) , pointer :: nlevels, nlevels_nod2D, nlevels_nod2D_min -real(kind=WP), dimension(:,:), pointer :: area, area_inv +real(kind=WP), dimension(:,:), pointer :: area, area_inv, areasvol, areasvol_inv real(kind=WP), dimension(:) , pointer :: mesh_resolution real(kind=WP), dimension(:) , pointer :: lump2d_north, lump2d_south type(sparse_matrix) , pointer :: ssh_stiff @@ -47,6 +47,7 @@ nl => mesh%nl !!$edge_tri => mesh%edge_tri !!$elem_edges => mesh%elem_edges !!$elem_area => mesh%elem_area +!!$node_area => mesh%node_area !!$edge_dxdy => mesh%edge_dxdy !!$edge_cross_dxdy => mesh%edge_cross_dxdy !!$elem_cos => mesh%elem_cos @@ -66,7 +67,8 @@ nl => mesh%nl !!$nlevels => mesh%nlevels !!$nlevels_nod2D => mesh%nlevels_nod2D !!$nlevels_nod2D_min => mesh%nlevels_nod2D_min -!!$area => mesh%area +!!$area => mesh%area +!!$area2 => mesh%area2 !!$area_inv => mesh%area_inv !!$mesh_resolution => mesh%mesh_resolution !!$ssh_stiff => mesh%ssh_stiff @@ -84,7 +86,7 @@ elem2D_nodes(1:3, 1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem2D_nodes edges(1:2,1:myDim_edge2D+eDim_edge2D) => mesh%edges edge_tri(1:2,1:myDim_edge2D+eDim_edge2D) => mesh%edge_tri elem_edges(1:3,1:myDim_elem2D) => mesh%elem_edges -elem_area(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem_area +elem_area(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem_area edge_dxdy(1:2,1:myDim_edge2D+eDim_edge2D) => mesh%edge_dxdy edge_cross_dxdy(1:4,1:myDim_edge2D+eDim_edge2D) => mesh%edge_cross_dxdy elem_cos(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem_cos @@ -104,8 +106,10 @@ elem_depth => mesh%elem_depth ! never used, not even allocated nlevels(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%nlevels nlevels_nod2D(1:myDim_nod2D+eDim_nod2D) => mesh%nlevels_nod2D nlevels_nod2D_min(1:myDim_nod2D+eDim_nod2D) => mesh%nlevels_nod2D_min -area(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%area -area_inv(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%area_inv +area(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%area +areasvol(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%areasvol +area_inv(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%area_inv +areasvol_inv(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%areasvol_inv mesh_resolution(1:myDim_nod2d+eDim_nod2D) => mesh%mesh_resolution ssh_stiff => mesh%ssh_stiff lump2d_north(1:myDim_nod2d) => mesh%lump2d_north diff --git a/src/oce_mesh.F90 b/src/oce_mesh.F90 index 763c31020..2a6d5980a 100755 --- a/src/oce_mesh.F90 +++ b/src/oce_mesh.F90 @@ -118,13 +118,17 @@ SUBROUTINE mesh_setup(mesh) call set_mesh_transform_matrix !(rotated grid) call read_mesh(mesh) call set_par_support(mesh) - call find_levels(mesh) - - if (use_cavity) call find_levels_cavity(mesh) - +!!PS call find_levels(mesh) +!!PS +!!PS if (use_cavity) call find_levels_cavity(mesh) +!!PS call test_tri(mesh) call load_edges(mesh) call find_neighbors(mesh) + + call find_levels(mesh) + if (use_cavity) call find_levels_cavity(mesh) + call find_levels_min_e2n(mesh) call mesh_areas(mesh) call mesh_auxiliary_arrays(mesh) @@ -904,7 +908,8 @@ subroutine find_levels_cavity(mesh) integer, allocatable, dimension(:) :: ibuff real(kind=WP) :: t0, t1 logical :: file_exist=.False. - integer :: elem, elnodes(3), ule, uln(3) + integer :: elem, elnodes(3), ule, uln(3), node, j, nz + integer, allocatable, dimension(:,:) :: numelemtonode !NR Cannot include the pointers before the targets are allocated... !NR #include "associate_mesh.h" @@ -1283,6 +1288,30 @@ subroutine find_levels_cavity(mesh) end if end do + + !___________________________________________________________________________ + allocate(numelemtonode(mesh%nl,myDim_nod2d+eDim_nod2D)) + numelemtonode=0 + do node=1, myDim_nod2D+eDim_nod2D + do j=1,mesh%nod_in_elem2D_num(node) + elem=mesh%nod_in_elem2D(j,node) + do nz=mesh%ulevels(elem),mesh%nlevels(elem)-1 + numelemtonode(nz,node) = numelemtonode(nz,node) + 1 + end do + end do + end do + + ! check how many triangle elements contribute to every vertice in every layer + ! every vertice in every layer should be connected to at least two triangle + ! elements ! + do node=1, myDim_nod2D+eDim_nod2D + do nz=1,mesh%nl + if (numelemtonode(nz,node)== 1) then + write(*,*) 'ERROR A: found vertice with just one triangle:', mype, node, nz + end if + end do + end do + end subroutine find_levels_cavity ! ! @@ -1799,164 +1828,257 @@ subroutine elem_center(elem, x, y, mesh) end subroutine elem_center !========================================================================== SUBROUTINE mesh_areas(mesh) -USE MOD_MESH -USE o_PARAM -USE g_PARSUP -USE g_ROTATE_GRID -use g_comm_auto -IMPLICIT NONE -! Collects auxilliary information on the mesh -! Allocated and filled in are: -! elem_area(myDim_elem2D) -! area(nl, myDim_nod2D) - - -integer :: n,j,q, elnodes(3), ed(2), elem, nz,nzmin -real(kind=WP) :: a(2), b(2), ax, ay, lon, lat, vol, vol2 -real(kind=WP), allocatable,dimension(:) :: work_array -real(kind=WP) :: t0, t1 -type(t_mesh), intent(inout), target :: mesh - -!NR Cannot include the pointers before the targets are allocated... -!NR #include "associate_mesh.h" - -t0=MPI_Wtime() - - allocate(mesh%elem_area(myDim_elem2D+eDim_elem2D+eXDim_elem2D)) - !allocate(elem_area(myDim_elem2D)) - allocate(mesh%area(mesh%nl,myDim_nod2d+eDim_nod2D)) !! Extra size just for simplicity - !! in some further routines - allocate(mesh%area_inv(mesh%nl,myDim_nod2d+eDim_nod2D)) - allocate(mesh%mesh_resolution(myDim_nod2d+eDim_nod2D)) - ! ============ - ! The areas of triangles: - ! ============ - DO n=1, myDim_elem2D - !DO n=1, myDim_elem2D+eDim_elem2D+eXDim_elem2D - elnodes=mesh%elem2D_nodes(:,n) - ay=sum(mesh%coord_nod2D(2,elnodes))/3.0_WP - ay=cos(ay) - if (cartesian) ay=1.0_WP - a = mesh%coord_nod2D(:,elnodes(2))-mesh%coord_nod2D(:,elnodes(1)) - b = mesh%coord_nod2D(:,elnodes(3))-mesh%coord_nod2D(:,elnodes(1)) - call trim_cyclic(a(1)) - call trim_cyclic(b(1)) - a(1)=a(1)*ay - b(1)=b(1)*ay - mesh%elem_area(n)=0.5_WP*abs(a(1)*b(2)-b(1)*a(2)) - END DO - call exchange_elem(mesh%elem_area) - ! ============= - ! Scalar element - ! areas at different levels (there can be partly land) - ! ============= - - mesh%area=0.0_WP - DO n=1, myDim_nod2D - DO j=1,mesh%nod_in_elem2D_num(n) - elem=mesh%nod_in_elem2D(j,n) - DO nz=mesh%ulevels(elem),mesh%nlevels(elem)-1 - !!PS DO nz=1,mesh%nlevels(elem)-1 - mesh%area(nz,n)=mesh%area(nz,n)+mesh%elem_area(elem)/3.0_WP - END DO - END DO - END DO - ! Only areas through which there is exchange are counted - - ! =========== - ! Update to proper dimension - ! =========== - mesh%elem_area=mesh%elem_area*r_earth*r_earth - mesh%area=mesh%area*r_earth*r_earth - -call exchange_nod(mesh%area) - - -!!PS do n=1, myDim_nod2D -!!PS nzmin = mesh%ulevels_nod2d(n) -!!PS if (nzmin>1) then -!!PS write(*,*) ' --> mesh area:', mype, n, nzmin, mesh%area(nzmin,n),mesh%area(nzmin+1,n),mesh%area(nzmin+2,n) -!!PS end if -!!PS end do - - + USE MOD_MESH + USE o_PARAM + USE o_arrays, only: dum_3d_n + USE g_PARSUP + USE g_ROTATE_GRID + use g_comm_auto + IMPLICIT NONE + ! Collects auxilliary information on the mesh + ! Allocated and filled in are: + ! elem_area(myDim_elem2D) + ! area(nl, myDim_nod2D) + + integer :: n,j,q, elnodes(3), ed(2), elem, nz,nzmin, nzmax + real(kind=WP) :: a(2), b(2), ax, ay, lon, lat, vol, vol2 + real(kind=WP), allocatable,dimension(:) :: work_array + integer, allocatable,dimension(:,:) :: cavity_contribut + real(kind=WP) :: t0, t1 + type(t_mesh), intent(inout), target :: mesh + + !NR Cannot include the pointers before the targets are allocated... + !NR #include "associate_mesh.h" -do n=1,myDim_nod2d+eDim_nod2D - do nz=1,mesh%nl - if (mesh%area(nz,n) > 0._WP) then - mesh%area_inv(nz,n) = 1._WP/mesh%area(nz,n) - else - mesh%area_inv(nz,n) = 0._WP - end if - end do -end do - ! coordinates are in radians, edge_dxdy are in meters, - ! and areas are in m^2 - -!!PS do n=1,myDim_nod2d+eDim_nod2D -!!PS mesh%area_inv(1:mesh%ulevels_nod2D(n)-1,n) = 0.0_WP -!!PS mesh%area(1:mesh%ulevels_nod2D(n)-1,n) = 0.0_WP -!!PS end do + t0=MPI_Wtime() + + ! area of triangles + allocate(mesh%elem_area(myDim_elem2D+eDim_elem2D+eXDim_elem2D)) + + ! area of upper edge and lower edge of scalar cell: size nl x node + allocate(mesh%area(mesh%nl,myDim_nod2d+eDim_nod2D)) + + ! "mid" area of scalar cell in case of cavity area \= areasvol, size: nl-1 x node + allocate(mesh%areasvol(mesh%nl-1,myDim_nod2d+eDim_nod2D)) + + ! area inverse + allocate(mesh%area_inv(mesh%nl,myDim_nod2d+eDim_nod2D)) + allocate(mesh%areasvol_inv(mesh%nl-1,myDim_nod2d+eDim_nod2D)) + + ! resolution at nodes + allocate(mesh%mesh_resolution(myDim_nod2d+eDim_nod2D)) + + !___compute triangle areas__________________________________________________ + do n=1, myDim_elem2D + elnodes=mesh%elem2D_nodes(:,n) + ay=sum(mesh%coord_nod2D(2,elnodes))/3.0_WP + ay=cos(ay) + if (cartesian) ay=1.0_WP + a = mesh%coord_nod2D(:,elnodes(2))-mesh%coord_nod2D(:,elnodes(1)) + b = mesh%coord_nod2D(:,elnodes(3))-mesh%coord_nod2D(:,elnodes(1)) + call trim_cyclic(a(1)) + call trim_cyclic(b(1)) + a(1)=a(1)*ay + b(1)=b(1)*ay + mesh%elem_area(n)=0.5_WP*abs(a(1)*b(2)-b(1)*a(2)) + end do + call exchange_elem(mesh%elem_area) + + !___compute areas of upper/lower scalar cell edge___________________________ + ! areas at different levels (there can be partly land) + ! --> only areas through which there is exchange are counted + ! + !-----------------------------~+~~~~~~~+~~~ + ! ############################ | | + ! ############################ | | layer k-3 + ! #################### ._______|_______|___area_k-2 + ! ## CAVITY ######## | / / / | | + ! #################### |/ /°/ /| | layer k-2 --> Transport: w_k-2*A_k-1 + ! ############ ._______|_/_/_/_|_______|___area_k-1 -> A_k-1 lower prisma area defines + ! ############ | | | | scalar area under the cavity + ! ############ | ° | | | layer k-1 + !______________|_______|_______|_______|___area_k + ! | | / / / | | | + ! | |/ /°/ /| | | layer k --> Transport: w_k*A_k + !______|_______|_/_/_/_|_______|_______|___area_k+1 -> A_k upper prisma face area defines + ! | | | | | scalar area of cell + ! | | ° | | | layer k+1 + !______|_______|_______|_______|_______|___area_k+2 + ! #############| | | | + ! #############| ° | | | layer k+2 + ! #############|_______|_______|_______|___area_k+3 + ! #####################| | | + ! #####################| | | layer k+3 + ! ## BOTTOM #########|_______|_______|___area_k+4 + ! #############################| | + ! #############################| | : + ! #############################|_______|___area_k+5 + ! ######################################### + mesh%area = 0.0_WP + mesh%areasvol = 0.0_WP + + if (use_cavity) then + allocate(cavity_contribut(mesh%nl,myDim_nod2d+eDim_nod2D)) + cavity_contribut = 0 + end if + + do n=1, myDim_nod2D+eDim_nod2D + do j=1,mesh%nod_in_elem2D_num(n) + elem=mesh%nod_in_elem2D(j,n) + + !___________________________________________________________________ + ! compute scalar area of prisms at different depth layers. In normal + ! case without cavity the area of the scalar cell corresponds to the + ! area of the upper edge of the prism --> if there is cavity its + ! different. Directly under the cavity the area of scalar cell + ! corresponds to the area of the lower edge + nzmin = mesh%ulevels(elem) + nzmax = mesh%nlevels(elem) + do nz=nzmin,nzmax + mesh%area(nz,n)=mesh%area(nz,n)+mesh%elem_area(elem)/3.0_WP + end do + + !___________________________________________________________________ + ! how many ocean-cavity triangles contribute to an upper edge of a + ! scalar area + if (use_cavity) then + do nz=1,nzmin-1 + cavity_contribut(nz,n)=cavity_contribut(nz,n)+1 + end do + end if + end do + end do + + !___compute "mid" scalar cell area__________________________________________ + ! for cavity case: redefine scalar cell area from upper edge of prism to + ! lower edge of prism if a cavity triangle is present at the upper scalar + ! cell edge + if (use_cavity) then + do n = 1, myDim_nod2D+eDim_nod2D + nzmin = mesh%ulevels_nod2d(n) + nzmax = mesh%nlevels_nod2d(n)-1 + do nz=nzmin,nzmax + if (cavity_contribut(nz,n)>0) then + mesh%areasvol(nz,n) = mesh%area(min(nz+1,nzmax),n) + end if + end do + end do + deallocate(cavity_contribut) + ! for non cavity case: the "mid" area of the scalar cell always corresponds to + ! the area of the upper scalar cell edge + else + do n = 1, myDim_nod2D+eDim_nod2D + nzmin = mesh%ulevels_nod2d(n) + nzmax = mesh%nlevels_nod2d(n)-1 + do nz=nzmin,nzmax + mesh%areasvol(nz,n) = mesh%area(nz,n) + end do + end do + end if + + ! update to proper dimension + ! coordinates are in radians, edge_dxdy are in meters, + ! and areas are in m^2 + mesh%elem_area = mesh%elem_area*r_earth*r_earth + mesh%area = mesh%area *r_earth*r_earth + mesh%areasvol = mesh%areasvol *r_earth*r_earth + call exchange_nod(mesh%area) + call exchange_nod(mesh%areasvol) + + !___compute inverse area____________________________________________________ + mesh%area_inv = 0.0_WP + do n=1,myDim_nod2d+eDim_nod2D + nzmin = mesh%ulevels_nod2d(n) + nzmax = mesh%nlevels_nod2d(n) + do nz=nzmin,nzmax + mesh%area_inv(nz,n) = 1._WP/mesh%area(nz,n) +!!PS if (mesh%area(nz,n) > 0._WP) then +!!PS mesh%area_inv(nz,n) = 1._WP/mesh%area(nz,n) +!!PS else +!!PS mesh%area_inv(nz,n) = 0._WP +!!PS end if + end do + end do + + if (use_cavity) then + mesh%areasvol_inv = 0.0_WP + do n=1,myDim_nod2d+eDim_nod2D + nzmin = mesh%ulevels_nod2d(n) + nzmax = mesh%nlevels_nod2d(n)-1 + do nz=nzmin,nzmax + mesh%areasvol_inv(nz,n) = 1._WP/mesh%areasvol(nz,n) +!!PS if (mesh%areasvol(nz,n) > 0._WP) then +!!PS mesh%areasvol_inv(nz,n) = 1._WP/mesh%areasvol(nz,n) +!!PS else +!!PS mesh%areasvol_inv(nz,n) = 0._WP +!!PS end if + end do + end do + else + mesh%areasvol_inv = mesh%area_inv + endif - - allocate(work_array(myDim_nod2D)) - !!PS mesh%mesh_resolution=sqrt(mesh%area(1, :)/pi)*2._WP - do n=1,myDim_nod2d+eDim_nod2D - mesh%mesh_resolution(n)=sqrt(mesh%area(mesh%ulevels_nod2D(n), n)/pi)*2._WP - end do + !___compute scalar cell resolution__________________________________________ + allocate(work_array(myDim_nod2D)) + !!PS mesh%mesh_resolution=sqrt(mesh%area(1, :)/pi)*2._WP + do n=1,myDim_nod2d+eDim_nod2D + mesh%mesh_resolution(n)=sqrt(mesh%areasvol(mesh%ulevels_nod2d(n),n)/pi)*2._WP + end do - DO q=1, 3 !apply mass matrix N times to smooth the field - DO n=1, myDim_nod2D - vol=0._WP - work_array(n)=0._WP - DO j=1, mesh%nod_in_elem2D_num(n) - elem=mesh%nod_in_elem2D(j, n) - elnodes=mesh%elem2D_nodes(:,elem) - work_array(n)=work_array(n)+sum(mesh%mesh_resolution(elnodes))/3._WP*mesh%elem_area(elem) - vol=vol+mesh%elem_area(elem) - END DO - work_array(n)=work_array(n)/vol + ! smooth resolution + DO q=1, 3 !apply mass matrix N times to smooth the field + DO n=1, myDim_nod2D + vol=0._WP + work_array(n)=0._WP + DO j=1, mesh%nod_in_elem2D_num(n) + elem=mesh%nod_in_elem2D(j, n) + elnodes=mesh%elem2D_nodes(:,elem) + work_array(n)=work_array(n)+sum(mesh%mesh_resolution(elnodes))/3._WP*mesh%elem_area(elem) + vol=vol+mesh%elem_area(elem) + END DO + work_array(n)=work_array(n)/vol + END DO + DO n=1,myDim_nod2D + mesh%mesh_resolution(n)=work_array(n) + ENDDO + call exchange_nod(mesh%mesh_resolution) END DO - DO n=1,myDim_nod2D - mesh%mesh_resolution(n)=work_array(n) - ENDDO - call exchange_nod(mesh%mesh_resolution) - END DO - deallocate(work_array) - - vol=0.0_WP - vol2=0.0_WP - do n=1, myDim_nod2D - vol2=vol2+mesh%area(mesh%ulevels_nod2D(n), n) - if (mesh%ulevels_nod2D(n)>1) cycle - vol=vol+mesh%area(1, n) - end do - mesh%ocean_area=0.0 - mesh%ocean_areawithcav=0.0 - call MPI_AllREDUCE(vol, mesh%ocean_area, 1, MPI_DOUBLE_PRECISION, MPI_SUM, & - MPI_COMM_FESOM, MPIerr) - call MPI_AllREDUCE(vol2, mesh%ocean_areawithcav, 1, MPI_DOUBLE_PRECISION, MPI_SUM, & - MPI_COMM_FESOM, MPIerr) - -if (mype==0) then - write(*,*) mype, 'Mesh statistics:' - write(*,*) mype, 'maxArea ',maxval(mesh%elem_area), ' MinArea ', minval(mesh%elem_area) - write(*,*) mype, 'maxScArea ',maxval(mesh%area(1,:)), & - ' MinScArea ', minval(mesh%area(1,:)) - write(*,*) mype, 'Edges: ', mesh%edge2D, ' internal ', mesh%edge2D_in - if (mype==0) then - write(*,*) 'Total ocean surface area is : ', mesh%ocean_area, ' m^2' - write(*,*) 'Total ocean surface area wth cavity is: ', mesh%ocean_areawithcav, ' m^2' - end if -endif - + deallocate(work_array) + + !___compute total ocean areas with/without cavity___________________________ + vol = 0.0_WP + vol2= 0.0_WP + do n=1, myDim_nod2D + vol2=vol2+mesh%area(mesh%ulevels_nod2D(n), n) ! area also under cavity + if (mesh%ulevels_nod2D(n)>1) cycle + vol=vol+mesh%area(1, n) ! area only surface + end do + mesh%ocean_area=0.0 + mesh%ocean_areawithcav=0.0 + call MPI_AllREDUCE(vol, mesh%ocean_area, 1, MPI_DOUBLE_PRECISION, MPI_SUM, & + MPI_COMM_FESOM, MPIerr) + call MPI_AllREDUCE(vol2, mesh%ocean_areawithcav, 1, MPI_DOUBLE_PRECISION, MPI_SUM, & + MPI_COMM_FESOM, MPIerr) + + !___write mesh statistics___________________________________________________ + if (mype==0) then + write(*,*) '____________________________________________________________________' + write(*,*) ' --> mesh statistics:', mype + write(*,*) mype, 'maxArea ',maxval(mesh%elem_area), ' MinArea ', minval(mesh%elem_area) + write(*,*) mype, 'maxScArea ',maxval(mesh%area(1,:)), & + ' MinScArea ', minval(mesh%area(1,:)) + write(*,*) mype, 'Edges: ', mesh%edge2D, ' internal ', mesh%edge2D_in + if (mype==0) then + write(*,*) ' > Total ocean surface area is : ', mesh%ocean_area, ' m^2' + write(*,*) ' > Total ocean surface area wth cavity is: ', mesh%ocean_areawithcav, ' m^2' + end if + endif -t1=MPI_Wtime() -if (mype==0) then - write(*,*) 'mesh_areas finished in ', t1-t0, ' seconds' - write(*,*) '=========================' -endif + t1=MPI_Wtime() + if (mype==0) then + write(*,*) ' > mesh_areas finished in ', t1-t0, ' seconds' + endif END SUBROUTINE mesh_areas !=================================================================== @@ -2288,10 +2410,12 @@ SUBROUTINE mesh_auxiliary_arrays(mesh) do i=1, myDim_nod2D if (mesh%geo_coord_nod2D(2, i) > 0) then nn=nn+1 - mesh%lump2d_north(i)=mesh%area(1, i) +!!PS mesh%lump2d_north(i)=mesh%area(1, i)! --> TEST_cavity + mesh%lump2d_north(i)=mesh%node_area(i) else ns=ns+1 - mesh%lump2d_south(i)=mesh%area(1, i) +!!PS mesh%lump2d_south(i)=mesh%area(1, i)! --> TEST_cavity + mesh%lump2d_south(i)=mesh%node_area(i) end if end do diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index b1b646aba..460828137 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -304,7 +304,7 @@ MODULE o_ARRAYS !_______________________________________________________________________________ !!PS ! dummy arrays -!!PS real(kind=WP), allocatable,dimension(:,:) :: dum_3d_n, dum_3d_e +real(kind=WP), allocatable,dimension(:,:) :: dum_3d_n !, dum_3d_e !!PS real(kind=WP), allocatable,dimension(:) :: dum_2d_n, dum_2d_e !_______________________________________________________________________________ From 08d0d24d486f0c5b1006d1a07b5b0f7c63315056 Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 17 Feb 2021 17:40:29 +0100 Subject: [PATCH 078/909] improve computation of mid scalar cell area in oce_mesh.F90 --- src/oce_mesh.F90 | 36 +++++++++++++++++++----------------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/src/oce_mesh.F90 b/src/oce_mesh.F90 index 2a6d5980a..cc5a9e599 100755 --- a/src/oce_mesh.F90 +++ b/src/oce_mesh.F90 @@ -1950,7 +1950,7 @@ SUBROUTINE mesh_areas(mesh) end do !___compute "mid" scalar cell area__________________________________________ - ! for cavity case: redefine scalar cell area from upper edge of prism to + ! for cavity case: redefine "mid" scalar cell area from upper edge of prism to ! lower edge of prism if a cavity triangle is present at the upper scalar ! cell edge if (use_cavity) then @@ -1960,6 +1960,8 @@ SUBROUTINE mesh_areas(mesh) do nz=nzmin,nzmax if (cavity_contribut(nz,n)>0) then mesh%areasvol(nz,n) = mesh%area(min(nz+1,nzmax),n) + else + mesh%areasvol(nz,n) = mesh%area(nz,n) end if end do end do @@ -2027,23 +2029,23 @@ SUBROUTINE mesh_areas(mesh) end do ! smooth resolution - DO q=1, 3 !apply mass matrix N times to smooth the field - DO n=1, myDim_nod2D - vol=0._WP - work_array(n)=0._WP - DO j=1, mesh%nod_in_elem2D_num(n) - elem=mesh%nod_in_elem2D(j, n) - elnodes=mesh%elem2D_nodes(:,elem) - work_array(n)=work_array(n)+sum(mesh%mesh_resolution(elnodes))/3._WP*mesh%elem_area(elem) - vol=vol+mesh%elem_area(elem) - END DO - work_array(n)=work_array(n)/vol - END DO - DO n=1,myDim_nod2D - mesh%mesh_resolution(n)=work_array(n) - ENDDO + do q=1, 3 !apply mass matrix N times to smooth the field + do n=1, myDim_nod2D + vol=0._WP + work_array(n)=0._WP + do j=1, mesh%nod_in_elem2D_num(n) + elem=mesh%nod_in_elem2D(j, n) + elnodes=mesh%elem2D_nodes(:,elem) + work_array(n)=work_array(n)+sum(mesh%mesh_resolution(elnodes))/3._WP*mesh%elem_area(elem) + vol=vol+mesh%elem_area(elem) + end do + work_array(n)=work_array(n)/vol + end do + do n=1,myDim_nod2D + mesh%mesh_resolution(n)=work_array(n) + end do call exchange_nod(mesh%mesh_resolution) - END DO + end do deallocate(work_array) !___compute total ocean areas with/without cavity___________________________ From d746b16c94d55b24d23e1d4f8dc50b855628d482 Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 17 Feb 2021 17:44:02 +0100 Subject: [PATCH 079/909] exchange area_inv with areasvol_inv in subroutine momentum_adv_scalar --- src/oce_ale_vel_rhs.F90 | 312 ++++++++++++++++++++++------------------ 1 file changed, 174 insertions(+), 138 deletions(-) diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index a362bd1fe..210c7f28d 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -168,150 +168,186 @@ subroutine momentum_adv_scalar(mesh) #include "associate_mesh.h" -!_______________________________________________________________________________ -do n=1,myDim_nod2d - nl1 = nlevels_nod2D(n)-1 - ul1 = ulevels_nod2D(n) - wu(1:nl1+1) = 0._WP - wv(1:nl1+1) = 0._WP - - do k=1,nod_in_elem2D_num(n) - el = nod_in_elem2D(k,n) - nle = nlevels(el)-1 - ule = ulevels(el) - !___________________________________________________________________________ - ! The vertical part for each element is collected - !!PS wu(1) = wu(1) + UV(1,1,el)*elem_area(el) - !!PS wv(1) = wv(1) + UV(2,1,el)*elem_area(el) - wu(ule) = wu(ule) + UV(1,ule,el)*elem_area(el) - wv(ule) = wv(ule) + UV(2,ule,el)*elem_area(el) - - !!PS wu(2:nle) = wu(2:nle) + 0.5_WP*(UV(1,2:nle,el)+UV(1,1:nle-1,el))*elem_area(el) - !!PS wv(2:nle) = wv(2:nle) + 0.5_WP*(UV(2,2:nle,el)+UV(2,1:nle-1,el))*elem_area(el) - wu(ule+1:nle) = wu(ule+1:nle) + 0.5_WP*(UV(1,ule+1:nle,el)+UV(1,ule:nle-1,el))*elem_area(el) - wv(ule+1:nle) = wv(ule+1:nle) + 0.5_WP*(UV(2,ule+1:nle,el)+UV(2,ule:nle-1,el))*elem_area(el) - enddo - - !!PS wu(1:nl1) = wu(1:nl1)*Wvel_e(1:nl1,n) - !!PS wv(1:nl1) = wv(1:nl1)*Wvel_e(1:nl1,n) - wu(ul1:nl1) = wu(ul1:nl1)*Wvel_e(ul1:nl1,n) - wv(ul1:nl1) = wv(ul1:nl1)*Wvel_e(ul1:nl1,n) - - !!PS do nz=1,nl1 - do nz=ul1,nl1 - ! Here 1/3 because 1/3 of the area is related to the node - Unode_rhs(1,nz,n) = - (wu(nz) - wu(nz+1) ) / (3._WP*hnode(nz,n)) - Unode_rhs(2,nz,n) = - (wv(nz) - wv(nz+1) ) / (3._WP*hnode(nz,n)) - - enddo - - ! To get a clean checksum, set the remaining values to zero - Unode_rhs(1:2,nl1+1:nl-1,n) = 0._WP - Unode_rhs(1:2,1:ul1-1 ,n) = 0._WP -end do - - -!_______________________________________________________________________________ -DO ed=1, myDim_edge2D - nod = edges(:,ed) - el1 = edge_tri(1,ed) - el2 = edge_tri(2,ed) - nl1 = nlevels(el1)-1 - ul1 = ulevels(el1) - - !___________________________________________________________________________ - ! The horizontal part - !!PS un1(1:nl1) = UV(2,1:nl1,el1)*edge_cross_dxdy(1,ed) & - !!PS - UV(1,1:nl1,el1)*edge_cross_dxdy(2,ed) - un1(ul1:nl1) = UV(2,ul1:nl1,el1)*edge_cross_dxdy(1,ed) & - - UV(1,ul1:nl1,el1)*edge_cross_dxdy(2,ed) - !___________________________________________________________________________ - if (el2>0) then - nl2 = nlevels(el2)-1 - ul2 = ulevels(el2) - - !!PS un2(1:nl2) = - UV(2,1:nl2,el2)*edge_cross_dxdy(3,ed) & - !!PS + UV(1,1:nl2,el2)*edge_cross_dxdy(4,ed) - un2(ul2:nl2) = - UV(2,ul2:nl2,el2)*edge_cross_dxdy(3,ed) & - + UV(1,ul2:nl2,el2)*edge_cross_dxdy(4,ed) - - ! fill with zeros to combine the loops - ! Usually, no or only a very few levels have to be filled. In this case, - ! computing "zeros" is cheaper than the loop overhead. - un1(nl1+1:max(nl1,nl2)) = 0._WP - un2(nl2+1:max(nl1,nl2)) = 0._WP - un1(1:ul1-1) = 0._WP - un2(1:ul2-1) = 0._WP - - ! first edge node - ! Do not calculate on Halo nodes, as the result will not be used. - ! The "if" is cheaper than the avoided computiations. - if (nod(1) <= myDim_nod2d) then - !!PS do nz=1, max(nl1,nl2) - do nz=min(ul1,ul2), max(nl1,nl2) - Unode_rhs(1,nz,nod(1)) = Unode_rhs(1,nz,nod(1)) + un1(nz)*UV(1,nz,el1) + un2(nz)*UV(1,nz,el2) - Unode_rhs(2,nz,nod(1)) = Unode_rhs(2,nz,nod(1)) + un1(nz)*UV(2,nz,el1) + un2(nz)*UV(2,nz,el2) - end do - endif - - if (nod(2) <= myDim_nod2d) then - !!PS do nz=1, max(nl1,nl2) - do nz=min(ul1,ul2), max(nl1,nl2) - Unode_rhs(1,nz,nod(2)) = Unode_rhs(1,nz,nod(2)) - un1(nz)*UV(1,nz,el1) - un2(nz)*UV(1,nz,el2) - Unode_rhs(2,nz,nod(2)) = Unode_rhs(2,nz,nod(2)) - un1(nz)*UV(2,nz,el1) - un2(nz)*UV(2,nz,el2) - end do - endif + !___________________________________________________________________________ + ! 1st. compute vertical momentum advection component: w * du/dz, w*dv/dz + do n=1,myDim_nod2d + nl1 = nlevels_nod2D(n)-1 + ul1 = ulevels_nod2D(n) + wu(1:nl1+1) = 0._WP + wv(1:nl1+1) = 0._WP + + !_______________________________________________________________________ + ! loop over adjacent elements of vertice n + do k=1,nod_in_elem2D_num(n) + el = nod_in_elem2D(k,n) + !___________________________________________________________________ + nle = nlevels(el)-1 + ule = ulevels(el) + + !___________________________________________________________________ + ! accumulate horizontal velocities at full depth levels (top and + ! bottom faces of prism) + ! account here also for boundary condition below cavity --> + ! horizontal velocity at cavity-ocean interce ule (if ule>1) must be + ! zero ??? + if (ule==1) then + wu(ule) = wu(ule) + UV(1,ule,el)*elem_area(el) + wv(ule) = wv(ule) + UV(2,ule,el)*elem_area(el) + end if + + ! interpolate horizontal velocity from mid-depth levels to full + ! depth levels of upper and lower prism faces and average over adjacent + ! elements of vertice n + wu(ule+1:nle) = wu(ule+1:nle) + 0.5_WP*(UV(1,ule+1:nle,el)+UV(1,ule:nle-1,el))*elem_area(el) + wv(ule+1:nle) = wv(ule+1:nle) + 0.5_WP*(UV(2,ule+1:nle,el)+UV(2,ule:nle-1,el))*elem_area(el) + enddo + + !_______________________________________________________________________ + ! multiply w*du and w*dv + wu(ul1:nl1) = wu(ul1:nl1)*Wvel_e(ul1:nl1,n) + wv(ul1:nl1) = wv(ul1:nl1)*Wvel_e(ul1:nl1,n) + + !_______________________________________________________________________ + ! compute w*du/dz, w*dv/dz + do nz=ul1,nl1 +!!PS if (ul1>1) write(*,*) mype, wu(ul1:nl1) + ! Here 1/3 because 1/3 of the area is related to the node --> comes from + ! averaging the elemental velocities + Unode_rhs(1,nz,n) = - (wu(nz) - wu(nz+1) ) / (3._WP*hnode(nz,n)) + Unode_rhs(2,nz,n) = - (wv(nz) - wv(nz+1) ) / (3._WP*hnode(nz,n)) + + enddo + + !_______________________________________________________________________ + ! To get a clean checksum, set the remaining values to zero + Unode_rhs(1:2,nl1+1:nl-1,n) = 0._WP + Unode_rhs(1:2,1:ul1-1 ,n) = 0._WP + end do - else ! ed is a boundary edge, there is only the contribution from el1 - if (nod(1) <= myDim_nod2d) then - !!PS do nz=1, nl1 - do nz=ul1, nl1 + !___________________________________________________________________________ + ! 2nd. compute horizontal advection component: u*du/dx, u*dv/dx & v*du/dy, v*dv/dy + ! loop over triangle edges + do ed=1, myDim_edge2D + nod = edges(:,ed) + el1 = edge_tri(1,ed) + el2 = edge_tri(2,ed) + nl1 = nlevels(el1)-1 + ul1 = ulevels(el1) + + !_______________________________________________________________________ + ! compute horizontal normal velocity with respect to the edge from triangle + ! centroid towards triangel edge mid-pointe for element el1 + ! .o. + ! ./ \. + ! ./ el1 \. + ! ./ x \. + ! ./ |-------\.-----------------edge_cross_dxdy(1:2,ed) --> (dx,dy) + ! / |->n_vec \ + ! nod(1) o----------O----------o nod(2) + ! \. |->n_vec ./ + ! \. |------./------------------edge_cross_dxdy(3:4,ed) --> (dx,dy) + ! \. x ./ + ! \. el2 ./ + ! \. ./ + ! ° + un1(ul1:nl1) = UV(2,ul1:nl1,el1)*edge_cross_dxdy(1,ed) & + - UV(1,ul1:nl1,el1)*edge_cross_dxdy(2,ed) + + !_______________________________________________________________________ + ! compute horizontal normal velocity with respect to the edge from triangle + ! centroid towards triangel edge mid-pointe for element el2 when it is valid + ! --> if its a boundary triangle el2 will be not valid + if (el2>0) then ! --> el2 is valid element + nl2 = nlevels(el2)-1 + ul2 = ulevels(el2) - Unode_rhs(1,nz,nod(1)) = Unode_rhs(1,nz,nod(1)) + un1(nz)*UV(1,nz,el1) - Unode_rhs(2,nz,nod(1)) = Unode_rhs(2,nz,nod(1)) + un1(nz)*UV(2,nz,el1) - end do - endif - ! second edge node - if (nod(2) <= myDim_nod2d) then - !!PS do nz=1, nl1 - do nz=ul1, nl1 - Unode_rhs(1,nz,nod(2)) = Unode_rhs(1,nz,nod(2)) - un1(nz)*UV(1,nz,el1) - Unode_rhs(2,nz,nod(2)) = Unode_rhs(2,nz,nod(2)) - un1(nz)*UV(2,nz,el1) - end do - endif - endif - -end do + un2(ul2:nl2) = - UV(2,ul2:nl2,el2)*edge_cross_dxdy(3,ed) & + + UV(1,ul2:nl2,el2)*edge_cross_dxdy(4,ed) + + ! fill with zeros to combine the loops + ! Usually, no or only a very few levels have to be filled. In this case, + ! computing "zeros" is cheaper than the loop overhead. + un1(nl1+1:max(nl1,nl2)) = 0._WP + un2(nl2+1:max(nl1,nl2)) = 0._WP + un1(1:ul1-1) = 0._WP + un2(1:ul2-1) = 0._WP + + ! first edge node + ! Do not calculate on Halo nodes, as the result will not be used. + ! The "if" is cheaper than the avoided computiations. + if (nod(1) <= myDim_nod2d) then + do nz=min(ul1,ul2), max(nl1,nl2) + ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) + Unode_rhs(1,nz,nod(1)) = Unode_rhs(1,nz,nod(1)) + un1(nz)*UV(1,nz,el1) + un2(nz)*UV(1,nz,el2) + Unode_rhs(2,nz,nod(1)) = Unode_rhs(2,nz,nod(1)) + un1(nz)*UV(2,nz,el1) + un2(nz)*UV(2,nz,el2) + end do + endif + + ! second edge node + if (nod(2) <= myDim_nod2d) then + do nz=min(ul1,ul2), max(nl1,nl2) + ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) + Unode_rhs(1,nz,nod(2)) = Unode_rhs(1,nz,nod(2)) - un1(nz)*UV(1,nz,el1) - un2(nz)*UV(1,nz,el2) + Unode_rhs(2,nz,nod(2)) = Unode_rhs(2,nz,nod(2)) - un1(nz)*UV(2,nz,el1) - un2(nz)*UV(2,nz,el2) + end do + endif + + else ! el2 is not a valid element --> ed is a boundary edge, there is only the contribution from el1 + ! first edge node + if (nod(1) <= myDim_nod2d) then + do nz=ul1, nl1 + ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) + Unode_rhs(1,nz,nod(1)) = Unode_rhs(1,nz,nod(1)) + un1(nz)*UV(1,nz,el1) + Unode_rhs(2,nz,nod(1)) = Unode_rhs(2,nz,nod(1)) + un1(nz)*UV(2,nz,el1) + end do ! --> do nz=ul1, nl1 + endif + + ! second edge node + if (nod(2) <= myDim_nod2d) then + !!PS do nz=1, nl1 + do nz=ul1, nl1 + ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) + Unode_rhs(1,nz,nod(2)) = Unode_rhs(1,nz,nod(2)) - un1(nz)*UV(1,nz,el1) + Unode_rhs(2,nz,nod(2)) = Unode_rhs(2,nz,nod(2)) - un1(nz)*UV(2,nz,el1) + end do ! --> do nz=ul1, nl1 + endif + endif ! --> if (el2>0) then + end do ! --> do ed=1, myDim_edge2D -!_______________________________________________________________________________ -do n=1,myDim_nod2d - nl1 = nlevels_nod2D(n)-1 - ul1 = ulevels_nod2D(n) - - !!PS Unode_rhs(1,1:nl1,n) = Unode_rhs(1,1:nl1,n) *area_inv(1:nl1,n) - !!PS Unode_rhs(2,1:nl1,n) = Unode_rhs(2,1:nl1,n) *area_inv(1:nl1,n) - Unode_rhs(1,ul1:nl1,n) = Unode_rhs(1,ul1:nl1,n) *area_inv(ul1:nl1,n) - Unode_rhs(2,ul1:nl1,n) = Unode_rhs(2,ul1:nl1,n) *area_inv(ul1:nl1,n) -end do + !___________________________________________________________________________ + ! divide total nodal advection by scalar area + do n=1,myDim_nod2d + nl1 = nlevels_nod2D(n)-1 + ul1 = ulevels_nod2D(n) +!!PS Unode_rhs(1,ul1:nl1,n) = Unode_rhs(1,ul1:nl1,n) *area_inv(ul1:nl1,n) ! --> TEST_cavity +!!PS Unode_rhs(2,ul1:nl1,n) = Unode_rhs(2,ul1:nl1,n) *area_inv(ul1:nl1,n) ! --> TEST_cavity + Unode_rhs(1,ul1:nl1,n) = Unode_rhs(1,ul1:nl1,n) *areasvol_inv(ul1:nl1,n) + Unode_rhs(2,ul1:nl1,n) = Unode_rhs(2,ul1:nl1,n) *areasvol_inv(ul1:nl1,n) + + IF (ANY(areasvol_inv(ul1:nl1,n) < 1.e-15)) THEN + WRITE(*,*) "BLA, BLA" + write(*,*) areasvol_inv(ul1:nl1,n) + write(*,*) area(ul1:nl1,n) + CALL PAR_EX + STOP + END IF + end do !-->do n=1,myDim_nod2d -!_______________________________________________________________________________ -call exchange_nod(Unode_rhs) + !___________________________________________________________________________ + call exchange_nod(Unode_rhs) -!_______________________________________________________________________________ -do el=1, myDim_elem2D - nl1 = nlevels(el)-1 - ul1 = ulevels(el) - !!PS UV_rhsAB(1:2,1:nl1,el) = UV_rhsAB(1:2,1:nl1,el) & - !!PS + elem_area(el)*(Unode_rhs(1:2,1:nl1,elem2D_nodes(1,el)) & - !!PS + Unode_rhs(1:2,1:nl1,elem2D_nodes(2,el)) & - !!PS + Unode_rhs(1:2,1:nl1,elem2D_nodes(3,el))) / 3.0_WP - UV_rhsAB(1:2,ul1:nl1,el) = UV_rhsAB(1:2,ul1:nl1,el) & - + elem_area(el)*(Unode_rhs(1:2,ul1:nl1,elem2D_nodes(1,el)) & - + Unode_rhs(1:2,ul1:nl1,elem2D_nodes(2,el)) & - + Unode_rhs(1:2,ul1:nl1,elem2D_nodes(3,el))) / 3.0_WP - -end do + !___________________________________________________________________________ + ! convert total nodal advection from vertice --> elements + do el=1, myDim_elem2D + nl1 = nlevels(el)-1 + ul1 = ulevels(el) + UV_rhsAB(1:2,ul1:nl1,el) = UV_rhsAB(1:2,ul1:nl1,el) & + + elem_area(el)*(Unode_rhs(1:2,ul1:nl1,elem2D_nodes(1,el)) & + + Unode_rhs(1:2,ul1:nl1,elem2D_nodes(2,el)) & + + Unode_rhs(1:2,ul1:nl1,elem2D_nodes(3,el))) / 3.0_WP + + end do ! --> do el=1, myDim_elem2D end subroutine momentum_adv_scalar From 9991f35b5242ef1353eeeda481e19c86f94cc5d6 Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 17 Feb 2021 17:53:21 +0100 Subject: [PATCH 080/909] improve computation of mid scalar cell area in oce_mesh.F90 --- src/oce_mesh.F90 | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/oce_mesh.F90 b/src/oce_mesh.F90 index cc5a9e599..8e0fea2c1 100755 --- a/src/oce_mesh.F90 +++ b/src/oce_mesh.F90 @@ -2412,12 +2412,10 @@ SUBROUTINE mesh_auxiliary_arrays(mesh) do i=1, myDim_nod2D if (mesh%geo_coord_nod2D(2, i) > 0) then nn=nn+1 -!!PS mesh%lump2d_north(i)=mesh%area(1, i)! --> TEST_cavity - mesh%lump2d_north(i)=mesh%node_area(i) + mesh%lump2d_north(i)=mesh%areasvol(mesh%ulevels_nod2d(i), i) else ns=ns+1 -!!PS mesh%lump2d_south(i)=mesh%area(1, i)! --> TEST_cavity - mesh%lump2d_south(i)=mesh%node_area(i) + mesh%lump2d_south(i)=mesh%area(mesh%ulevels_nod2d(i), i) end if end do @@ -2468,7 +2466,7 @@ SUBROUTINE check_mesh_consistency(mesh) aux=0._WP do n=1, myDim_nod2D do nz=mesh%ulevels_nod2D(n), mesh%nlevels_nod2D(n)-1 - aux(nz)=aux(nz)+mesh%area(nz, n) + aux(nz)=aux(nz)+mesh%areasvol(nz, n) end do end do call MPI_AllREDUCE(aux, vol_n, mesh%nl, MPI_DOUBLE_PRECISION, MPI_SUM, & From 8eca26748a514c4819b5398081a4ad46ba6a6690 Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 17 Feb 2021 17:54:26 +0100 Subject: [PATCH 081/909] exchange area_inv with areasvol_inv in subroutine momentum_adv_scalar --- src/oce_ale_vel_rhs.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index 210c7f28d..1b54398ac 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -328,7 +328,7 @@ subroutine momentum_adv_scalar(mesh) IF (ANY(areasvol_inv(ul1:nl1,n) < 1.e-15)) THEN WRITE(*,*) "BLA, BLA" write(*,*) areasvol_inv(ul1:nl1,n) - write(*,*) area(ul1:nl1,n) + write(*,*) areasvol(ul1:nl1,n) CALL PAR_EX STOP END IF From 4ff09f40272a8dad3fe1c7ffdb0400c70ea02aa4 Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 12 Mar 2021 15:52:00 +0100 Subject: [PATCH 082/909] area definition for the scalar area and for the area of the upper edge of the scalar prism. If there is no cavity these two areas are identical but in presence of cavity they can be different --- src/oce_mesh.F90 | 35 +++++++++++++++++------------------ 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/src/oce_mesh.F90 b/src/oce_mesh.F90 index 8e0fea2c1..2d8324ac3 100755 --- a/src/oce_mesh.F90 +++ b/src/oce_mesh.F90 @@ -1859,11 +1859,11 @@ SUBROUTINE mesh_areas(mesh) allocate(mesh%area(mesh%nl,myDim_nod2d+eDim_nod2D)) ! "mid" area of scalar cell in case of cavity area \= areasvol, size: nl-1 x node - allocate(mesh%areasvol(mesh%nl-1,myDim_nod2d+eDim_nod2D)) + allocate(mesh%areasvol(mesh%nl,myDim_nod2d+eDim_nod2D)) ! area inverse allocate(mesh%area_inv(mesh%nl,myDim_nod2d+eDim_nod2D)) - allocate(mesh%areasvol_inv(mesh%nl-1,myDim_nod2d+eDim_nod2D)) + allocate(mesh%areasvol_inv(mesh%nl,myDim_nod2d+eDim_nod2D)) ! resolution at nodes allocate(mesh%mesh_resolution(myDim_nod2d+eDim_nod2D)) @@ -1914,14 +1914,12 @@ SUBROUTINE mesh_areas(mesh) ! #############################| | : ! #############################|_______|___area_k+5 ! ######################################### - mesh%area = 0.0_WP - mesh%areasvol = 0.0_WP - if (use_cavity) then allocate(cavity_contribut(mesh%nl,myDim_nod2d+eDim_nod2D)) cavity_contribut = 0 end if + mesh%area = 0.0_WP do n=1, myDim_nod2D+eDim_nod2D do j=1,mesh%nod_in_elem2D_num(n) elem=mesh%nod_in_elem2D(j,n) @@ -1933,7 +1931,7 @@ SUBROUTINE mesh_areas(mesh) ! different. Directly under the cavity the area of scalar cell ! corresponds to the area of the lower edge nzmin = mesh%ulevels(elem) - nzmax = mesh%nlevels(elem) + nzmax = mesh%nlevels(elem)-1 do nz=nzmin,nzmax mesh%area(nz,n)=mesh%area(nz,n)+mesh%elem_area(elem)/3.0_WP end do @@ -1953,6 +1951,7 @@ SUBROUTINE mesh_areas(mesh) ! for cavity case: redefine "mid" scalar cell area from upper edge of prism to ! lower edge of prism if a cavity triangle is present at the upper scalar ! cell edge + mesh%areasvol = 0.0_WP if (use_cavity) then do n = 1, myDim_nod2D+eDim_nod2D nzmin = mesh%ulevels_nod2d(n) @@ -1994,12 +1993,12 @@ SUBROUTINE mesh_areas(mesh) nzmin = mesh%ulevels_nod2d(n) nzmax = mesh%nlevels_nod2d(n) do nz=nzmin,nzmax - mesh%area_inv(nz,n) = 1._WP/mesh%area(nz,n) -!!PS if (mesh%area(nz,n) > 0._WP) then -!!PS mesh%area_inv(nz,n) = 1._WP/mesh%area(nz,n) -!!PS else -!!PS mesh%area_inv(nz,n) = 0._WP -!!PS end if +!!PS mesh%area_inv(nz,n) = 1._WP/mesh%area(nz,n) + if (mesh%area(nz,n) > 0._WP) then + mesh%area_inv(nz,n) = 1._WP/mesh%area(nz,n) + else + mesh%area_inv(nz,n) = 0._WP + end if end do end do @@ -2009,12 +2008,12 @@ SUBROUTINE mesh_areas(mesh) nzmin = mesh%ulevels_nod2d(n) nzmax = mesh%nlevels_nod2d(n)-1 do nz=nzmin,nzmax - mesh%areasvol_inv(nz,n) = 1._WP/mesh%areasvol(nz,n) -!!PS if (mesh%areasvol(nz,n) > 0._WP) then -!!PS mesh%areasvol_inv(nz,n) = 1._WP/mesh%areasvol(nz,n) -!!PS else -!!PS mesh%areasvol_inv(nz,n) = 0._WP -!!PS end if +!!PS mesh%areasvol_inv(nz,n) = 1._WP/mesh%areasvol(nz,n) + if (mesh%areasvol(nz,n) > 0._WP) then + mesh%areasvol_inv(nz,n) = 1._WP/mesh%areasvol(nz,n) + else + mesh%areasvol_inv(nz,n) = 0._WP + end if end do end do else From 83887097c8e6153b09400cb90af73ef785560ca5 Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 12 Mar 2021 15:57:56 +0100 Subject: [PATCH 083/909] exchange area with areasvol where it is neccessary --- src/oce_ale.F90 | 33 ++++++++--------- src/oce_ale_tracer.F90 | 77 ++++++++++++++++++++++------------------ src/oce_ale_vel_rhs.F90 | 8 ----- src/oce_vel_rhs_vinv.F90 | 4 +-- 4 files changed, 62 insertions(+), 60 deletions(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 174697209..967c74dc2 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -574,7 +574,7 @@ subroutine init_thickness_ale(mesh) ! Fill in ssh_rhs_old !!PS ssh_rhs_old=(hbar-hbar_old)*area(1,:)/dt do n=1,myDim_nod2D+eDim_nod2D - ssh_rhs_old(n)=(hbar(n)-hbar_old(n))*area(ulevels_nod2D(n),n)/dt + ssh_rhs_old(n)=(hbar(n)-hbar_old(n))*areasvol(ulevels_nod2D(n),n)/dt ! --> TEST_cavity end do ! -->see equation (14) FESOM2:from finite elements to finie volume @@ -1260,9 +1260,12 @@ subroutine init_stiff_mat_ale(mesh) ! 2nd do first term of lhs od equation (18) of "FESOM2 from finite element to finite volumes" ! Mass matrix part do row=1, myDim_nod2D + ! if cavity no time derivative for eta in case of rigid lid approximation at + ! thee cavity-ocean interface, which means cavity-ocean interface is not allowed + ! to move vertically. + if (ulevels_nod2D(row)>1) cycle offset = ssh_stiff%rowptr(row) - !!PS SSH_stiff%values(offset) = SSH_stiff%values(offset)+ area(1,row)/dt - SSH_stiff%values(offset) = SSH_stiff%values(offset)+ area(ulevels_nod2D(row),row)/dt + SSH_stiff%values(offset) = SSH_stiff%values(offset)+ areasvol(ulevels_nod2D(row),row)/dt end do deallocate(n_pos,n_num) @@ -1551,8 +1554,7 @@ subroutine compute_ssh_rhs_ale(mesh) if ( .not. trim(which_ALE)=='linfs') then do n=1,myDim_nod2D nzmin = ulevels_nod2D(n) - ssh_rhs(n)=ssh_rhs(n)-alpha*water_flux(n)*area(nzmin,n)+(1.0_WP-alpha)*ssh_rhs_old(n) - !!PS ssh_rhs(n)=ssh_rhs(n)-alpha*water_flux(n)*area(1,n)+(1.0_WP-alpha)*ssh_rhs_old(n) + ssh_rhs(n)=ssh_rhs(n)-alpha*water_flux(n)*areasvol(nzmin,n)+(1.0_WP-alpha)*ssh_rhs_old(n) end do else do n=1,myDim_nod2D @@ -1646,7 +1648,7 @@ subroutine compute_hbar_ale(mesh) !!PS end if if (.not. trim(which_ALE)=='linfs') then do n=1,myDim_nod2D - ssh_rhs_old(n)=ssh_rhs_old(n)-water_flux(n)*area(ulevels_nod2D(n),n) + ssh_rhs_old(n)=ssh_rhs_old(n)-water_flux(n)*areasvol(ulevels_nod2D(n),n) end do call exchange_nod(ssh_rhs_old) end if @@ -1658,7 +1660,7 @@ subroutine compute_hbar_ale(mesh) !!PS call exchange_nod(hbar) hbar_old=hbar do n=1,myDim_nod2D - hbar(n)=hbar_old(n)+ssh_rhs_old(n)*dt/area(ulevels_nod2D(n),n) + hbar(n)=hbar_old(n)+ssh_rhs_old(n)*dt/areasvol(ulevels_nod2D(n),n) end do call exchange_nod(hbar) @@ -1702,7 +1704,7 @@ subroutine vert_vel_ale(mesh) use g_forcing_arrays !!PS implicit none - integer :: el(2), enodes(2), n, nz, ed, nzmin, nzmax + integer :: el(2), enodes(2), n, nz, ed, nzmin, nzmax, uln1, uln2, nln1, nln2 real(kind=WP) :: c1, c2, deltaX1, deltaY1, deltaX2, deltaY2, dd, dd1, dddt, cflmax !_______________________________ @@ -1738,7 +1740,7 @@ subroutine vert_vel_ale(mesh) ! do it with gauss-law: int( div(u_vec)*dV) = int( u_vec * n_vec * dS ) nzmin = ulevels(el(1)) nzmax = nlevels(el(1))-1 - !!PS do nz=nlevels(el(1))-1,1,-1 + do nz = nzmax, nzmin, -1 ! --> h * u_vec * n_vec ! --> e_vec = (dx,dy), n_vec = (-dy,dx); @@ -1754,7 +1756,6 @@ subroutine vert_vel_ale(mesh) fer_Wvel(nz,enodes(1))=fer_Wvel(nz,enodes(1))+c1 fer_Wvel(nz,enodes(2))=fer_Wvel(nz,enodes(2))-c1 end if - end do !_______________________________________________________________________ @@ -1764,8 +1765,8 @@ subroutine vert_vel_ale(mesh) deltaX2=edge_cross_dxdy(3,ed) deltaY2=edge_cross_dxdy(4,ed) nzmin = ulevels(el(2)) - nzmax = nlevels(el(2))-1 - !!PS do nz=nlevels(el(2))-1,1,-1 + nzmax = nlevels(el(2))-1 + do nz = nzmax, nzmin, -1 c2=-(UV(2,nz,el(2))*deltaX2 - UV(1,nz,el(2))*deltaY2)*helem(nz,el(2)) Wvel(nz,enodes(1))=Wvel(nz,enodes(1))+c2 @@ -1790,7 +1791,7 @@ subroutine vert_vel_ale(mesh) do n=1, myDim_nod2D nzmin = ulevels_nod2D(n) nzmax = nlevels_nod2d(n)-1 - !!PS do nz=nl-1,1,-1 + do nz=nzmax,nzmin,-1 Wvel(nz,n)=Wvel(nz,n)+Wvel(nz+1,n) if (Fer_GM) then @@ -1805,11 +1806,11 @@ subroutine vert_vel_ale(mesh) do n=1, myDim_nod2D nzmin = ulevels_nod2D(n) nzmax = nlevels_nod2d(n)-1 - !!PS do nz=1,nlevels_nod2D(n)-1 + do nz=nzmin,nzmax Wvel(nz,n)=Wvel(nz,n)/area(nz,n) if (Fer_GM) then - fer_Wvel(nz,n)=fer_Wvel(nz,n)/area(nz,n) + fer_Wvel(nz,n)=fer_Wvel(nz,n)/area(nz,n) end if end do end do @@ -2744,7 +2745,7 @@ subroutine oce_timestep_ale(n, mesh) !___________________________________________________________________________ ! solve tracer equation if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call solve_tracers_ale'//achar(27)//'[0m' - call solve_tracers_ale(mesh) +!!PS call solve_tracers_ale(mesh) t8=MPI_Wtime() !___________________________________________________________________________ diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 24f77d628..7e57cfbc8 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -364,9 +364,9 @@ subroutine diff_ver_part_expl_ale(tr_num, mesh) !_______________________________________________________________________ !!PS do nz=1,nl1-1 do nz=ul1,nl1-1 - del_ttf(nz,n) = del_ttf(nz,n) + (vd_flux(nz) - vd_flux(nz+1))/(zbar_3d_n(nz,n)-zbar_3d_n(nz+1,n))*dt/area(nz,n) + del_ttf(nz,n) = del_ttf(nz,n) + (vd_flux(nz) - vd_flux(nz+1))/(zbar_3d_n(nz,n)-zbar_3d_n(nz+1,n))*dt/areasvol(nz,n) end do - del_ttf(nl1,n) = del_ttf(nl1,n) + (vd_flux(nl1)/(zbar_3d_n(nl1,n)-zbar_3d_n(nl1+1,n)))*dt/area(nl1,n) + del_ttf(nl1,n) = del_ttf(nl1,n) + (vd_flux(nl1)/(zbar_3d_n(nl1,n)-zbar_3d_n(nl1+1,n)))*dt/areasvol(nl1,n) end do ! --> do n=1, myDim_nod2D end subroutine diff_ver_part_expl_ale @@ -413,7 +413,7 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) ! --> h^(n+0.5)* (dTnew) = dt*(K_33*d/dz*dTnew) + RHS ! --> solve for dT_new ! - ! ----------- zbar_1, V_1 (Volume eq. to Area) + ! ----------- zbar_1, V_1 (Skalar Volume), A_1 (Area of edge), no Cavity A1==V1, yes Cavity A1 !=V1 ! Z_1 o T_1 ! ----------- zbar_2, V_2 ! Z_2 o T_2 @@ -422,8 +422,8 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) ! ----------- zbar_4 ! : ! --> Difference Quotient at Volume _2: ddTnew_2/dt + d/dz*K_33 d/dz*dTnew_2 = 0 --> homogene solution - ! V2*dTnew_2 *h^(n+0.5) = -dt * [ (dTnew_1-dTnew_2)/(Z_1-Z_2)*V_2 + (dTnew_2-dTnew_3)/(Z_2-Z_3)*V_3 ] + RHS - ! dTnew_2 *h^(n+0.5) = -dt * [ (dTnew_1-dTnew_2)/(Z_1-Z_2)*V_2 + (dTnew_2-dTnew_3)/(Z_2-Z_3)*V_3/V_2 ] + RHS + ! V2*dTnew_2 *h^(n+0.5) = -dt * [ (dTnew_1-dTnew_2)/(Z_1-Z_2)*A_2 + (dTnew_2-dTnew_3)/(Z_2-Z_3)*A_3 ] + RHS + ! dTnew_2 *h^(n+0.5) = -dt * [ (dTnew_1-dTnew_2)/(Z_1-Z_2)*A_2/V_2 + (dTnew_2-dTnew_3)/(Z_2-Z_3)*A_3/V_2 ] + RHS ! | | ! v v ! diffusive flux towards diffusive flux towards @@ -432,11 +432,11 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) ! --> solve coefficents for homogene part ! dTnew_2 *h^(n+0.5) = -dt * [ a*dTnew_1 + b*dTnew_2 + c*dTnew_3 ] ! - ! --> a = -dt*K_33/(Z_1-Z_2) + ! --> a = -dt*K_33/(Z_1-Z_2)*A_2/V_2 ! - ! --> c = -dt*K_33/(Z_2-Z_3)*V_3/V_2 + ! --> c = -dt*K_33/(Z_2-Z_3)*A_3/V_2 ! - ! --> b = h^(n+0.5) -[ dt*K_33/(Z_1-Z_2) + dt*K_33/(Z_2-Z_3)*V_3/V_2 ] = -(a+c) + h^(n+0.5) + ! --> b = h^(n+0.5) -[ dt*K_33/(Z_1-Z_2)*A_2/V_2 + dt*K_33/(Z_2-Z_3)*A_3/V_2 ] = -(a+c) + h^(n+0.5) !___________________________________________________________________________ ! loop over local nodes @@ -490,15 +490,21 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) ! layer dependent coefficients for for solving dT(1)/dt+d/dz*K_33*d/dz*T(1) = ... a(nz)=0.0_WP !!PS c(nz)=-(Kv(2,n)+Ty1)*zinv2*zinv*area(nz+1,n)/area(nz,n) - c(nz)=-(Kv(nz+1,n)+Ty1)*zinv2*zinv*area(nz+1,n)/area(nz,n) + c(nz)=-(Kv(nz+1,n)+Ty1)*zinv2*zinv*area(nz+1,n)/areasvol(nz,n) b(nz)=-c(nz)+hnode_new(nz,n) ! ale ! update from the vertical advection --> comes from splitting of vert ! velocity into explicite and implicite contribution if (do_wimpl) then - v_adv=zinv*area(nz+1,n)/area(nz,n) - b(nz)=b(nz)+Wvel_i(nz, n)*zinv-min(0._WP, Wvel_i(nz+1, n))*v_adv - c(nz)=c(nz)-max(0._WP, Wvel_i(nz+1, n))*v_adv + !!PS v_adv =zinv*area(nz+1,n)/areasvol(nz,n) + !!PS b(nz) =b(nz)+Wvel_i(nz, n)*zinv-min(0._WP, Wvel_i(nz+1, n))*v_adv + !!PS c(nz) =c(nz)-max(0._WP, Wvel_i(nz+1, n))*v_adv + v_adv =zinv*area(nz ,n)/areasvol(nz,n) + b(nz) =b(nz)+Wvel_i(nz, n)*v_adv + + v_adv =zinv*area(nz+1,n)/areasvol(nz,n) + b(nz) =b(nz)-min(0._WP, Wvel_i(nz+1, n))*v_adv + c(nz) =c(nz)-max(0._WP, Wvel_i(nz+1, n))*v_adv end if ! backup zinv2 for next depth level zinv1=zinv2 @@ -518,8 +524,8 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) Ty =Ty *isredi Ty1=Ty1*isredi ! layer dependent coefficients for for solving dT(nz)/dt+d/dz*K_33*d/dz*T(nz) = ... - a(nz)=-(Kv(nz,n) +Ty )*zinv1*zinv - c(nz)=-(Kv(nz+1,n)+Ty1)*zinv2*zinv*area(nz+1,n)/area(nz,n) + a(nz)=-(Kv(nz,n) +Ty )*zinv1*zinv*area(nz ,n)/areasvol(nz,n) + c(nz)=-(Kv(nz+1,n)+Ty1)*zinv2*zinv*area(nz+1,n)/areasvol(nz,n) b(nz)=-a(nz)-c(nz)+hnode_new(nz,n) ! backup zinv2 for next depth level @@ -527,10 +533,12 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) ! update from the vertical advection if (do_wimpl) then - v_adv=zinv + !!PS v_adv=zinv + v_adv=zinv*area(nz ,n)/areasvol(nz,n) a(nz)=a(nz)+min(0._WP, Wvel_i(nz, n))*v_adv b(nz)=b(nz)+max(0._WP, Wvel_i(nz, n))*v_adv - v_adv=v_adv*area(nz+1,n)/area(nz,n) + !!PS v_adv=v_adv*areasvol(nz+1,n)/areasvol(nz,n) + v_adv=zinv*area(nz+1,n)/areasvol(nz,n) b(nz)=b(nz)-min(0._WP, Wvel_i(nz+1, n))*v_adv c(nz)=c(nz)-max(0._WP, Wvel_i(nz+1, n))*v_adv end if @@ -547,13 +555,14 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) (zbar_n(nz)-Z_n(nz)) *zinv1 *slope_tapered(3,nz,n)**2 *Ki(nz,n) Ty =Ty *isredi ! layer dependent coefficients for for solving dT(nz)/dt+d/dz*K_33*d/dz*T(nz) = ... - a(nz)=-(Kv(nz,n)+Ty)*zinv1*zinv + a(nz)=-(Kv(nz,n)+Ty)*zinv1*zinv*area(nz ,n)/areasvol(nz,n) c(nz)=0.0_WP b(nz)=-a(nz)+hnode_new(nz,n) ! update from the vertical advection if (do_wimpl) then - v_adv=zinv + !!PS v_adv=zinv + v_adv=zinv*area(nz ,n)/areasvol(nz,n) a(nz)=a(nz)+min(0._WP, Wvel_i(nz, n))*v_adv b(nz)=b(nz)+max(0._WP, Wvel_i(nz, n))*v_adv end if @@ -613,13 +622,13 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) !_______________________________________________________________________ ! case of activated shortwave penetration into the ocean, ad 3d contribution -!!PS if (use_sw_pene .and. tr_num==1) then -!!PS !!PS do nz=1, nzmax-1 -!!PS do nz=nzmin, nzmax-1 -!!PS zinv=1.0_WP*dt !/(zbar(nz)-zbar(nz+1)) ale! -!!PS tr(nz)=tr(nz)+(sw_3d(nz, n)-sw_3d(nz+1, n)*area(nz+1,n)/area(nz,n))*zinv -!!PS end do -!!PS end if + if (use_sw_pene .and. tr_num==1) then + !!PS do nz=1, nzmax-1 + do nz=nzmin, nzmax-1 + zinv=1.0_WP*dt !/(zbar(nz)-zbar(nz+1)) ale! + tr(nz)=tr(nz)+(sw_3d(nz, n)-sw_3d(nz+1, n)*area(nz+1,n)/areasvol(nz,n))*zinv + end do + end if !_______________________________________________________________________ ! The first row contains also the boundary condition from heatflux, @@ -634,7 +643,7 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) ! v (+) v (+) ! !!PS tr(1)= tr(1)+bc_surface(n, tracer_id(tr_num)) -!!PS tr(nzmin)= tr(nzmin)+bc_surface(n, tracer_id(tr_num),mesh) + tr(nzmin)= tr(nzmin)+bc_surface(n, tracer_id(tr_num),mesh) !_______________________________________________________________________ ! The forward sweep algorithm to solve the three-diagonal matrix @@ -726,8 +735,8 @@ subroutine diff_ver_part_redi_expl(mesh) Ty=Ty+tr_xy(2,nz,elem)*elem_area(elem) endif end do - tr_xynodes(1,nz,n)=tx/3.0_WP/area(nz,n) - tr_xynodes(2,nz,n)=ty/3.0_WP/area(nz,n) + tr_xynodes(1,nz,n)=tx/3.0_WP/areasvol(nz,n) + tr_xynodes(2,nz,n)=ty/3.0_WP/areasvol(nz,n) end do end do @@ -756,13 +765,13 @@ subroutine diff_ver_part_redi_expl(mesh) !!PS do nz=2,nl1 do nz=ul1+1,nl1 vd_flux(nz)=(Z_n(nz-1)-zbar_n(nz))*(slope_tapered(1,nz-1,n)*tr_xynodes(1,nz-1,n)+slope_tapered(2,nz-1,n)*tr_xynodes(2,nz-1,n))*Ki(nz-1,n) - vd_flux(nz)=vd_flux(nz)+& - (zbar_n(nz)-Z_n(nz)) *(slope_tapered(1,nz,n) *tr_xynodes(1,nz,n) +slope_tapered(2,nz,n) *tr_xynodes(2,nz,n)) *Ki(nz,n) - vd_flux(nz)=vd_flux(nz)/(Z_n(nz-1)-Z_n(nz))*area(nz,n) + vd_flux(nz)=vd_flux(nz)+& + (zbar_n(nz)-Z_n(nz)) *(slope_tapered(1,nz,n) *tr_xynodes(1,nz,n) +slope_tapered(2,nz,n) *tr_xynodes(2,nz,n)) *Ki(nz,n) + vd_flux(nz)=vd_flux(nz)/(Z_n(nz-1)-Z_n(nz))*area(nz,n) enddo !!PS do nz=1,nl1 do nz=ul1,nl1 - del_ttf(nz,n) = del_ttf(nz,n)+(vd_flux(nz) - vd_flux(nz+1))*dt/area(nz,n) + del_ttf(nz,n) = del_ttf(nz,n)+(vd_flux(nz) - vd_flux(nz+1))*dt/areasvol(nz,n) enddo end do end subroutine diff_ver_part_redi_expl! @@ -908,8 +917,8 @@ subroutine diff_part_hor_redi(mesh) if (ul2>0) ul12=min(ul1,ul2) !!PS del_ttf(1:nl12,enodes(1))=del_ttf(1:nl12,enodes(1))+rhs1(1:nl12)*dt/area(1:nl12,enodes(1)) !!PS del_ttf(1:nl12,enodes(2))=del_ttf(1:nl12,enodes(2))+rhs2(1:nl12)*dt/area(1:nl12,enodes(2)) - del_ttf(ul12:nl12,enodes(1))=del_ttf(ul12:nl12,enodes(1))+rhs1(ul12:nl12)*dt/area(ul12:nl12,enodes(1)) - del_ttf(ul12:nl12,enodes(2))=del_ttf(ul12:nl12,enodes(2))+rhs2(ul12:nl12)*dt/area(ul12:nl12,enodes(2)) + del_ttf(ul12:nl12,enodes(1))=del_ttf(ul12:nl12,enodes(1))+rhs1(ul12:nl12)*dt/areasvol(ul12:nl12,enodes(1)) + del_ttf(ul12:nl12,enodes(2))=del_ttf(ul12:nl12,enodes(2))+rhs2(ul12:nl12)*dt/areasvol(ul12:nl12,enodes(2)) end do end subroutine diff_part_hor_redi diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index 1b54398ac..0f5b0ac6e 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -324,14 +324,6 @@ subroutine momentum_adv_scalar(mesh) !!PS Unode_rhs(2,ul1:nl1,n) = Unode_rhs(2,ul1:nl1,n) *area_inv(ul1:nl1,n) ! --> TEST_cavity Unode_rhs(1,ul1:nl1,n) = Unode_rhs(1,ul1:nl1,n) *areasvol_inv(ul1:nl1,n) Unode_rhs(2,ul1:nl1,n) = Unode_rhs(2,ul1:nl1,n) *areasvol_inv(ul1:nl1,n) - - IF (ANY(areasvol_inv(ul1:nl1,n) < 1.e-15)) THEN - WRITE(*,*) "BLA, BLA" - write(*,*) areasvol_inv(ul1:nl1,n) - write(*,*) areasvol(ul1:nl1,n) - CALL PAR_EX - STOP - END IF end do !-->do n=1,myDim_nod2d !___________________________________________________________________________ diff --git a/src/oce_vel_rhs_vinv.F90 b/src/oce_vel_rhs_vinv.F90 index a09e6658e..849b5aea9 100755 --- a/src/oce_vel_rhs_vinv.F90 +++ b/src/oce_vel_rhs_vinv.F90 @@ -92,7 +92,7 @@ subroutine relative_vorticity(mesh) nl1 = nlevels_nod2D(n) !!PS DO nz=1,nlevels_nod2D(n)-1 DO nz=ul1,nl1-1 - vorticity(nz,n)=vorticity(nz,n)/area(nz,n) + vorticity(nz,n)=vorticity(nz,n)/areasvol(nz,n) END DO END DO @@ -151,7 +151,7 @@ subroutine compute_vel_rhs_vinv(mesh) !vector invariant !!PS DO nz=1, nlevels_nod2D(n)-1 DO nz=nzmin, nzmax-1 !DO nz=1, nl-1 - KE_node(nz,n)=KE_node(nz,n)/(6._WP*area(nz,n)) !NR divide by 6 here + KE_node(nz,n)=KE_node(nz,n)/(6._WP*areasvol(nz,n)) !NR divide by 6 here END DO END DO From 4152b2d1b6801eb42c2035c261dd57f4f93db0c3 Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 12 Mar 2021 15:58:59 +0100 Subject: [PATCH 084/909] exchange area with areasvol where it is neccessary --- src/oce_adv_tra_driver.F90 | 8 ++++---- src/oce_adv_tra_fct.F90 | 4 ++-- src/oce_adv_tra_ver.F90 | 27 ++++++++++++++++++++------- 3 files changed, 26 insertions(+), 13 deletions(-) diff --git a/src/oce_adv_tra_driver.F90 b/src/oce_adv_tra_driver.F90 index 9454a007b..988b6a090 100644 --- a/src/oce_adv_tra_driver.F90 +++ b/src/oce_adv_tra_driver.F90 @@ -117,7 +117,7 @@ subroutine do_oce_adv_tra(ttf, ttfAB, vel, w, wi, we, do_Xmoment, dttf_h, dttf_v nl1 = nlevels_nod2D(n) !!PS do nz=1, nlevels_nod2D(n)-1 do nz= nu1, nl1-1 - fct_LO(nz,n)=(ttf(nz,n)*hnode(nz,n)+(fct_LO(nz,n)+(adv_flux_ver(nz, n)-adv_flux_ver(nz+1, n)))*dt/area(nz,n))/hnode_new(nz,n) + fct_LO(nz,n)=(ttf(nz,n)*hnode(nz,n)+(fct_LO(nz,n)+(adv_flux_ver(nz, n)-adv_flux_ver(nz+1, n)))*dt/areasvol(nz,n))/hnode_new(nz,n) end do end do @@ -237,7 +237,7 @@ subroutine oce_tra_adv_flux2dtracer(dttf_h, dttf_v, flux_h, flux_v, mesh, use_lo nu1 = ulevels_nod2D(n) nl1 = nlevels_nod2D(n) do nz=nu1,nl1-1 - dttf_v(nz,n)=dttf_v(nz,n) + (flux_v(nz,n)-flux_v(nz+1,n))*dt/area(nz,n) + dttf_v(nz,n)=dttf_v(nz,n) + (flux_v(nz,n)-flux_v(nz+1,n))*dt/areasvol(nz,n) end do end do @@ -262,8 +262,8 @@ subroutine oce_tra_adv_flux2dtracer(dttf_h, dttf_v, flux_h, flux_v, mesh, use_lo !!PS do nz=1, max(nl1, nl2) do nz=nu12, nl12 - dttf_h(nz,enodes(1))=dttf_h(nz,enodes(1))+flux_h(nz,edge)*dt/area(nz,enodes(1)) - dttf_h(nz,enodes(2))=dttf_h(nz,enodes(2))-flux_h(nz,edge)*dt/area(nz,enodes(2)) + dttf_h(nz,enodes(1))=dttf_h(nz,enodes(1))+flux_h(nz,edge)*dt/areasvol(nz,enodes(1)) + dttf_h(nz,enodes(2))=dttf_h(nz,enodes(2))-flux_h(nz,edge)*dt/areasvol(nz,enodes(2)) end do end do end subroutine oce_tra_adv_flux2dtracer diff --git a/src/oce_adv_tra_fct.F90 b/src/oce_adv_tra_fct.F90 index 4af76fdf7..62db9a7d3 100644 --- a/src/oce_adv_tra_fct.F90 +++ b/src/oce_adv_tra_fct.F90 @@ -268,9 +268,9 @@ subroutine oce_tra_adv_fct(dttf_h, dttf_v, ttf, lo, adf_h, adf_v, mesh) nu1=ulevels_nod2D(n) nl1=nlevels_nod2D(n) do nz=nu1,nl1-1 - flux=fct_plus(nz,n)*dt/area(nz,n)+flux_eps + flux=fct_plus(nz,n)*dt/areasvol(nz,n)+flux_eps fct_plus(nz,n)=min(1.0_WP,fct_ttf_max(nz,n)/flux) - flux=fct_minus(nz,n)*dt/area(nz,n)-flux_eps + flux=fct_minus(nz,n)*dt/areasvol(nz,n)-flux_eps fct_minus(nz,n)=min(1.0_WP,fct_ttf_min(nz,n)/flux) end do end do diff --git a/src/oce_adv_tra_ver.F90 b/src/oce_adv_tra_ver.F90 index 8bc872575..7cdf4acbc 100644 --- a/src/oce_adv_tra_ver.F90 +++ b/src/oce_adv_tra_ver.F90 @@ -146,19 +146,28 @@ subroutine adv_tra_vert_impl(ttf, w, mesh) ! 1/dz(nz) zinv=1.0_WP*dt ! no .../(zbar(1)-zbar(2)) because of ALE + !!PS a(nz)=0.0_WP + !!PS v_adv=zinv*areasvol(nz+1,n)/areasvol(nz,n) + !!PS b(nz)= hnode_new(nz,n)+W(nz, n)*zinv-min(0._WP, W(nz+1, n))*v_adv + !!PS c(nz)=-max(0._WP, W(nz+1, n))*v_adv + a(nz)=0.0_WP - v_adv=zinv*area(nz+1,n)/area(nz,n) - b(nz)= hnode_new(nz,n)+W(nz, n)*zinv-min(0._WP, W(nz+1, n))*v_adv + v_adv=zinv*area(nz ,n)/areasvol(nz,n) + b(nz)= hnode_new(nz,n)+W(nz, n)*v_adv + + v_adv=zinv*area(nz+1,n)/areasvol(nz,n) + b(nz)= b(nz)-min(0._WP, W(nz+1, n))*v_adv c(nz)=-max(0._WP, W(nz+1, n))*v_adv !_______________________________________________________________________ ! Regular part of coefficients: --> 2nd...nl-2 layer do nz=nzmin+1, nzmax-2 ! update from the vertical advection - a(nz)=min(0._WP, W(nz, n))*zinv - b(nz)=hnode_new(nz,n)+max(0._WP, W(nz, n))*zinv + v_adv=zinv*area(nz ,n)/areasvol(nz,n) + a(nz)=min(0._WP, W(nz, n))*v_adv + b(nz)=hnode_new(nz,n)+max(0._WP, W(nz, n))*v_adv - v_adv=zinv*area(nz+1,n)/area(nz,n) + v_adv=zinv*area(nz+1,n)/areasvol(nz,n) b(nz)=b(nz)-min(0._WP, W(nz+1, n))*v_adv c(nz)= -max(0._WP, W(nz+1, n))*v_adv end do ! --> do nz=2, nzmax-2 @@ -167,8 +176,12 @@ subroutine adv_tra_vert_impl(ttf, w, mesh) ! Regular part of coefficients: --> nl-1 layer nz=nzmax-1 ! update from the vertical advection - a(nz)= min(0._WP, W(nz, n))*zinv - b(nz)=hnode_new(nz,n)+max(0._WP, W(nz, n))*zinv + !!PS a(nz)= min(0._WP, W(nz, n))*zinv + !!PS b(nz)=hnode_new(nz,n)+max(0._WP, W(nz, n))*zinv + !!PS c(nz)=0.0_WP + v_adv=zinv*area(nz ,n)/areasvol(nz,n) + a(nz)= min(0._WP, W(nz, n))*v_adv + b(nz)=hnode_new(nz,n)+max(0._WP, W(nz, n))*v_adv c(nz)=0.0_WP !_______________________________________________________________________ From 067c582f832669a85b00862359d1bac4834dbbb8 Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 12 Mar 2021 15:59:38 +0100 Subject: [PATCH 085/909] exchange area with areasvol where it is neccessary --- src/gen_support.F90 | 4 +++- src/oce_spp.F90 | 7 ++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/src/gen_support.F90 b/src/gen_support.F90 index eee45c814..7a5a2752e 100644 --- a/src/gen_support.F90 +++ b/src/gen_support.F90 @@ -274,6 +274,7 @@ subroutine integrate_nod_2D(data, int2D, mesh) do row=1, myDim_nod2D !!PS lval=lval+data(row)*area(1,row) lval=lval+data(row)*area(ulevels_nod2D(row),row) +!!PS lval=lval+data(row)*node_area(row) ! --> TEST_cavity end do int2D=0.0_WP @@ -300,7 +301,8 @@ subroutine integrate_nod_3D(data, int3D, mesh) do row=1, myDim_nod2D !!PS do k=1, nlevels_nod2D(row)-1 do k=ulevels_nod2D(row), nlevels_nod2D(row)-1 - lval=lval+data(k, row)*area(k,row)*hnode_new(k,row) + lval=lval+data(k, row)*area(k,row)*hnode_new(k,row) ! --> TEST_cavity +!!PS lval=lval+data(k, row)*area2(k,row)*hnode_new(k,row) end do end do int3D=0.0_WP diff --git a/src/oce_spp.F90 b/src/oce_spp.F90 index 83cc17e36..26de0ef8e 100644 --- a/src/oce_spp.F90 +++ b/src/oce_spp.F90 @@ -28,7 +28,7 @@ subroutine cal_rejected_salt(mesh) aux=rhoice/rhowat*dt do row=1, myDim_nod2d +eDim_nod2D! myDim is sufficient - if (thdgr(row)>0.0_WP) then + if (thdgr(row)>0.0_WP .and. ulevels_nod2D(row)==1) then ice_rejected_salt(row)= & (S_oc_array(row)-Sice)*thdgr(row)*aux*area(1, row) !unit: psu m3 @@ -63,6 +63,7 @@ subroutine app_rejected_salt(mesh) #include "associate_mesh.h" do row=1,myDim_nod2d+eDim_nod2D ! myDim is sufficient + if (ulevels_nod2D(row)>1) cycle if (ice_rejected_salt(row)<=0.0_WP) cycle ! do not parameterize brine rejection in regions with low salinity ! 1. it leads to further decrease of SSS @@ -92,10 +93,10 @@ subroutine app_rejected_salt(mesh) !!PS end do !!PS endif if (kml>nzmin) then - tr_arr(nzmin,row,2)=tr_arr(nzmin,row,2)-ice_rejected_salt(row)/area(1,row)/hnode(1,row) + tr_arr(nzmin,row,2)=tr_arr(nzmin,row,2)-ice_rejected_salt(row)/areasvol(1,row)/hnode(1,row) spar(nzmin+1:kml)=spar(nzmin+1:kml)/sum(spar(nzmin+1:kml)) do k=nzmin+1,kml - tr_arr(k,row,2)=tr_arr(k,row,2)+ice_rejected_salt(row)*spar(k)/area(k,row)/hnode(k,row) + tr_arr(k,row,2)=tr_arr(k,row,2)+ice_rejected_salt(row)*spar(k)/areasvol(k,row)/hnode(k,row) end do endif endif From 69f71684ad643d335e745689b798c41c864204bf Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 12 Mar 2021 18:14:37 +0100 Subject: [PATCH 086/909] exchange area with areasvol where it is neccessary --- src/gen_modules_cvmix_idemix.F90 | 26 ++++++++++++++------------ src/gen_modules_diag.F90 | 4 ++-- src/gen_support.F90 | 6 ++---- 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/src/gen_modules_cvmix_idemix.F90 b/src/gen_modules_cvmix_idemix.F90 index 98929506d..e8203868b 100644 --- a/src/gen_modules_cvmix_idemix.F90 +++ b/src/gen_modules_cvmix_idemix.F90 @@ -272,7 +272,7 @@ subroutine calc_cvmix_idemix(mesh) implicit none type(t_mesh), intent(in), target :: mesh integer :: node, elem, edge, node_size - integer :: nz, nln, nl1, nl2, nl12, nu1, nu2, nu12 + integer :: nz, nln, nl1, nl2, nl12, nu1, nu2, nu12, uln integer :: elnodes1(3), elnodes2(3), el(2), ednodes(2) real(kind=WP) :: dz_trr(mesh%nl), dz_trr2(mesh%nl), bvfreq2(mesh%nl), vflux, dz_el, aux, cflfac real(kind=WP) :: grad_v0Eiw(2), deltaX1, deltaY1, deltaX2, deltaY2 @@ -286,19 +286,20 @@ subroutine calc_cvmix_idemix(mesh) node_size = myDim_nod2D do node = 1,node_size nln = nlevels_nod2D(node)-1 + uln = ulevels_nod2D(node) !___________________________________________________________________ ! calculate for TKE square of Brünt-Väisälä frequency, be aware that ! bvfreq contains already the squared brünt Väisälä frequency ... bvfreq2 = 0.0_WP - bvfreq2(2:nln) = bvfreq(2:nln,node) + bvfreq2(uln:nln) = bvfreq(uln:nln,node) !___________________________________________________________________ ! dz_trr distance between tracer points, surface and bottom dz_trr is half ! the layerthickness ... dz_trr = 0.0_WP - dz_trr(2:nln) = abs(Z_3d_n(1:nln-1,node)-Z_3d_n(2:nln,node)) - dz_trr(1) = hnode(1,node)/2.0_WP + dz_trr(uln+1:nln) = abs(Z_3d_n(uln:nln-1,node)-Z_3d_n(uln+1:nln,node)) + dz_trr(uln) = hnode(uln,node)/2.0_WP dz_trr(nln+1) = hnode(nln,node)/2.0_WP !___________________________________________________________________ @@ -389,23 +390,24 @@ subroutine calc_cvmix_idemix(mesh) ! number of above bottom levels at node nln = nlevels_nod2D(node)-1 + uln = ulevels_nod2D(node) ! thickness of mid-level to mid-level interface at node dz_trr = 0.0_WP - dz_trr(2:nln) = Z_3d_n(1:nln-1,node)-Z_3d_n(2:nln,node) - dz_trr(1) = hnode(1,node)/2.0_WP + dz_trr(uln+1:nln) = Z_3d_n(uln:nln-1,node)-Z_3d_n(uln+1:nln,node) + dz_trr(uln) = hnode(uln,node)/2.0_WP dz_trr(nln+1) = hnode(nln,node)/2.0_WP ! surface cell - vol_wcelli(1,node) = 1/(area(1,node)*dz_trr(1)) - aux = sqrt(cflfac*(area(1,node)/pi*4.0_WP)/(idemix_tau_h*dt/idemix_n_hor_iwe_prop_iter)) - iwe_v0(1,node) = min(iwe_v0(1,node),aux) + vol_wcelli(uln,node) = 1/(areasvol(uln,node)*dz_trr(uln)) + aux = sqrt(cflfac*(area(uln,node)/pi*4.0_WP)/(idemix_tau_h*dt/idemix_n_hor_iwe_prop_iter)) + iwe_v0(uln,node) = min(iwe_v0(uln,node),aux) ! bulk cells !!PS do nz=2,nln - do nz=ulevels_nod2D(node)+1,nln + do nz=uln+1,nln ! inverse volumne - vol_wcelli(nz,node) = 1/(area(nz-1,node)*dz_trr(nz)) + vol_wcelli(nz,node) = 1/(areasvol(nz-1,node)*dz_trr(nz)) ! restrict iwe_v0 aux = sqrt(cflfac*(area(nz-1,node)/pi*4.0_WP)/(idemix_tau_h*dt/idemix_n_hor_iwe_prop_iter)) @@ -415,7 +417,7 @@ subroutine calc_cvmix_idemix(mesh) end do ! bottom cell - vol_wcelli(nln+1,node) = 1/(area(nln,node)*dz_trr(nln+1)) + vol_wcelli(nln+1,node) = 1/(areasvol(nln,node)*dz_trr(nln+1)) aux = sqrt(cflfac*(area(nln,node)/pi*4.0_WP)/(idemix_tau_h*dt/idemix_n_hor_iwe_prop_iter)) iwe_v0(nln+1,node) = min(iwe_v0(nln+1,node),aux) diff --git a/src/gen_modules_diag.F90 b/src/gen_modules_diag.F90 index c1e1ec3d8..a8477094e 100755 --- a/src/gen_modules_diag.F90 +++ b/src/gen_modules_diag.F90 @@ -135,7 +135,7 @@ subroutine diag_curl_stress_surf(mode, mesh) END DO DO n=1, myDim_nod2D+eDim_nod2D !!PS curl_stress_surf(n)=curl_stress_surf(n)/area(1,n) - curl_stress_surf(n)=curl_stress_surf(n)/area(ulevels_nod2D(n),n) + curl_stress_surf(n)=curl_stress_surf(n)/areasvol(ulevels_nod2D(n),n) END DO end subroutine diag_curl_stress_surf ! ============================================================== @@ -210,7 +210,7 @@ subroutine diag_curl_vel3(mode, mesh) DO n=1, myDim_nod2D !!PS DO nz=1, nlevels_nod2D(n)-1 DO nz=ulevels_nod2D(n), nlevels_nod2D(n)-1 - curl_vel3(nz,n)=curl_vel3(nz,n)/area(nz,n) + curl_vel3(nz,n)=curl_vel3(nz,n)/areasvol(nz,n) END DO END DO end subroutine diag_curl_vel3 diff --git a/src/gen_support.F90 b/src/gen_support.F90 index 7a5a2752e..bb9e24ad6 100644 --- a/src/gen_support.F90 +++ b/src/gen_support.F90 @@ -273,8 +273,7 @@ subroutine integrate_nod_2D(data, int2D, mesh) lval=0.0_WP do row=1, myDim_nod2D !!PS lval=lval+data(row)*area(1,row) - lval=lval+data(row)*area(ulevels_nod2D(row),row) -!!PS lval=lval+data(row)*node_area(row) ! --> TEST_cavity + lval=lval+data(row)*areasvol(ulevels_nod2D(row),row) end do int2D=0.0_WP @@ -301,8 +300,7 @@ subroutine integrate_nod_3D(data, int3D, mesh) do row=1, myDim_nod2D !!PS do k=1, nlevels_nod2D(row)-1 do k=ulevels_nod2D(row), nlevels_nod2D(row)-1 - lval=lval+data(k, row)*area(k,row)*hnode_new(k,row) ! --> TEST_cavity -!!PS lval=lval+data(k, row)*area2(k,row)*hnode_new(k,row) + lval=lval+data(k, row)*areasvol(k,row)*hnode_new(k,row) ! --> TEST_cavity end do end do int3D=0.0_WP From 452f597b7313649151671a8e46cb94366a5674ca Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 12 Mar 2021 18:16:26 +0100 Subject: [PATCH 087/909] only print info where is cavity in write_step_info.F90 --- src/write_step_info.F90 | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index d3d7d47fd..3be6a08cd 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -268,29 +268,29 @@ subroutine check_blowup(istep, mesh) write(*,*) write(*,*) 'm_ice = ',m_ice(n),', m_ice_old = ',m_ice_old(n) write(*,*) 'a_ice = ',a_ice(n),', a_ice_old = ',a_ice_old(n) - write(*,*) 'thdgr = ',thdgr(n),', thdgr_old = ',thdgr_old(n) - write(*,*) 'thdgrsn = ',thdgrsn(n) +!!PS write(*,*) 'thdgr = ',thdgr(n),', thdgr_old = ',thdgr_old(n) +!!PS write(*,*) 'thdgrsn = ',thdgrsn(n) write(*,*) - if (lcurt_stress_surf) then - write(*,*) 'curl_stress_surf = ',curl_stress_surf(n) - write(*,*) - endif - do el=1,nod_in_elem2d_num(n) - elidx = nod_in_elem2D(el,n) - write(*,*) ' elem#=',el,', elemidx=',elidx - write(*,*) ' pgf_x =',pgf_x(:,elidx) - write(*,*) ' pgf_y =',pgf_y(:,elidx) -! write(*,*) ' U =',UV(1,:,elidx) -! write(*,*) ' V =',UV(2,:,elidx) - write(*,*) - enddo - write(*,*) 'Wvel(1, n) = ',Wvel(1,n) - write(*,*) 'Wvel(:, n) = ',Wvel(:,n) +!!PS if (lcurt_stress_surf) then +!!PS write(*,*) 'curl_stress_surf = ',curl_stress_surf(n) +!!PS write(*,*) +!!PS endif +!!PS do el=1,nod_in_elem2d_num(n) +!!PS elidx = nod_in_elem2D(el,n) +!!PS write(*,*) ' elem#=',el,', elemidx=',elidx +!!PS write(*,*) ' pgf_x =',pgf_x(:,elidx) +!!PS write(*,*) ' pgf_y =',pgf_y(:,elidx) +!!PS ! write(*,*) ' U =',UV(1,:,elidx) +!!PS ! write(*,*) ' V =',UV(2,:,elidx) +!!PS write(*,*) +!!PS enddo +!!PS write(*,*) 'Wvel(1, n) = ',Wvel(,n) + write(*,*) 'Wvel(:, n) = ',Wvel(ulevels_nod2D(n):nlevels_nod2D(n),n) write(*,*) - write(*,*) 'CFL_z(:,n) = ',CFL_z(:,n) + write(*,*) 'CFL_z(:,n) = ',CFL_z(ulevels_nod2D(n):nlevels_nod2D(n),n) write(*,*) - write(*,*) 'hnode(1, n) = ',hnode(1,n) - write(*,*) 'hnode(:, n) = ',hnode(:,n) +!!PS write(*,*) 'hnode(1, n) = ',hnode(1,n) + write(*,*) 'hnode(:, n) = ',hnode(ulevels_nod2D(n):nlevels_nod2D(n),n) write(*,*) endif From d4f351ee6a8b2e3826a7eb613a036935df0b03cd Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 13 Mar 2021 18:32:47 +0100 Subject: [PATCH 088/909] switch fluxes back on --- src/oce_ale.F90 | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 967c74dc2..360243118 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2555,10 +2555,10 @@ subroutine oce_timestep_ale(n, mesh) t0=MPI_Wtime() - water_flux = 0.0_WP - heat_flux = 0.0_WP - stress_surf= 0.0_WP - stress_node_surf= 0.0_WP +!!PS water_flux = 0.0_WP +!!PS heat_flux = 0.0_WP +!!PS stress_surf= 0.0_WP +!!PS stress_node_surf= 0.0_WP !___________________________________________________________________________ ! calculate equation of state, density, pressure and mixed layer depths @@ -2745,7 +2745,7 @@ subroutine oce_timestep_ale(n, mesh) !___________________________________________________________________________ ! solve tracer equation if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call solve_tracers_ale'//achar(27)//'[0m' -!!PS call solve_tracers_ale(mesh) + call solve_tracers_ale(mesh) t8=MPI_Wtime() !___________________________________________________________________________ From 0cead93d603a4cf05d7b3f317105a93cd657199c Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 15 Mar 2021 12:40:25 +0100 Subject: [PATCH 089/909] add routine to compute total vertice/elemental ocean volume --- src/oce_mesh.F90 | 72 ++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 66 insertions(+), 6 deletions(-) diff --git a/src/oce_mesh.F90 b/src/oce_mesh.F90 index 2d8324ac3..7cd5cf5c5 100755 --- a/src/oce_mesh.F90 +++ b/src/oce_mesh.F90 @@ -90,7 +90,14 @@ subroutine find_levels_min_e2n(mesh) end subroutine end interface end module - +module check_total_volume_interface + interface + subroutine check_total_volume(mesh) + use mod_mesh + type(t_mesh), intent(inout) , target :: mesh + end subroutine + end interface +end module ! Driving routine. The distributed mesh information and mesh proper ! are read from files. @@ -2440,9 +2447,9 @@ SUBROUTINE mesh_auxiliary_arrays(mesh) endif END SUBROUTINE mesh_auxiliary_arrays - -!=================================================================== - +! +! +!_______________________________________________________________________________ SUBROUTINE check_mesh_consistency(mesh) USE MOD_MESH USE o_PARAM @@ -2475,7 +2482,7 @@ SUBROUTINE check_mesh_consistency(mesh) do elem=1, myDim_elem2D elnodes=mesh%elem2D_nodes(:, elem) if (elnodes(1) > myDim_nod2D) CYCLE - do nz=mesh%ulevels(elem), mesh%nlevels(elem) + do nz=mesh%ulevels(elem), mesh%nlevels(elem)-1 aux(nz)=aux(nz)+mesh%elem_area(elem) end do end do @@ -2493,4 +2500,57 @@ SUBROUTINE check_mesh_consistency(mesh) !call par_ex !stop END SUBROUTINE check_mesh_consistency -!================================================================== +! +! +!_______________________________________________________________________________ +subroutine check_total_volume(mesh) + USE MOD_MESH + USE o_PARAM + USE g_PARSUP + use g_comm_auto + use o_ARRAYS + + IMPLICIT NONE + type(t_mesh), intent(inout), target :: mesh + integer :: nz, n, elem , elnodes(3) + real(kind=WP) :: vol_n, vol_e, aux + +#include "associate_mesh.h" + + !___________________________________________________________________________ + vol_n=0._WP + vol_e=0._WP + !___________________________________________________________________________ + ! total ocean volume on nodes + aux=0._WP + do n=1, myDim_nod2D + do nz=ulevels_nod2D(n), nlevels_nod2D(n)-1 + aux=aux+areasvol(nz, n)*hnode(nz,n) + end do + end do + call MPI_AllREDUCE(aux, vol_n, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) + !___________________________________________________________________________ + ! total ocean volume on elements + aux=0._WP + do elem=1, myDim_elem2D + elnodes=elem2D_nodes(:, elem) + if (elnodes(1) > myDim_nod2D) cycle + do nz=ulevels(elem), nlevels(elem)-1 + aux=aux+elem_area(elem)*helem(nz,elem) + end do + end do + call MPI_AllREDUCE(aux, vol_e, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) + + !___write mesh statistics___________________________________________________ + if (mype==0) then + write(*,*) '____________________________________________________________________' + write(*,*) ' --> ocean volume check:', mype + write(*,*) ' > Total ocean volume node:', vol_n, ' m^3' + write(*,*) ' > Total ocean volume elem:', vol_e, ' m^3' + + end if + +end subroutine check_total_volume +! +! +!_______________________________________________________________________________ From ade88b707453f52bd8c1161fef0112ede0bca55c Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 15 Mar 2021 12:43:09 +0100 Subject: [PATCH 090/909] check for total ocean volume in init_thickness_ale --- src/oce_ale.F90 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 360243118..580f36e4d 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -790,6 +790,8 @@ subroutine init_thickness_ale(mesh) !___________________________________________________________________________ hnode_new=hnode ! Should be initialized, because only variable part is updated. + !!PS call check_total_volume(mesh) + end subroutine init_thickness_ale ! ! From 8144937bec32aa3088fbb95403cd7538d41e7064 Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 20 Mar 2021 16:33:13 +0100 Subject: [PATCH 091/909] exclude ice operations on cavity vertices and elements --- src/ice_EVP.F90 | 4 ++-- src/ice_fct.F90 | 53 ++++++++++++++++++++++++------------------ src/ice_maEVP.F90 | 24 +++++++++++-------- src/ice_setup_step.F90 | 8 +++++-- src/ice_thermo_oce.F90 | 39 ++++++++++++++++++++----------- 5 files changed, 78 insertions(+), 50 deletions(-) diff --git a/src/ice_EVP.F90 b/src/ice_EVP.F90 index e9357e80d..f5267bd69 100755 --- a/src/ice_EVP.F90 +++ b/src/ice_EVP.F90 @@ -458,7 +458,7 @@ subroutine EVPdynamics(mesh) elnodes = elem2D_nodes(:,el) !_______________________________________________________________________ ! if element has any cavity node skip it - !!PS if ( any(ulevels_nod2d(elnodes)>1) ) cycle + if ( any(ulevels_nod2d(elnodes)>1) ) cycle if (ulevels(el) > 1) cycle !_______________________________________________________________________ @@ -508,7 +508,7 @@ subroutine EVPdynamics(mesh) elnodes = elem2D_nodes(:,el) !_______________________________________________________________________ ! if element has any cavity node skip it - !!PS if ( any(ulevels_nod2d(elnodes)>1) ) cycle + if ( any(ulevels_nod2d(elnodes)>1) ) cycle if (ulevels(el) > 1) cycle !_______________________________________________________________________ diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index e82f2e11e..501b9ac36 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -62,11 +62,13 @@ subroutine ice_TG_rhs(mesh) ! Velocities at nodes do elem=1,myDim_elem2D !assembling rhs over elements + elnodes=elem2D_nodes(:,elem) !_______________________________________________________________________ ! if cavity element skip it if (ulevels(elem)>1) cycle + if(any(ulevels_nod2D(elnodes)>1)) cycle !LK89140 + - elnodes=elem2D_nodes(:,elem) !derivatives dx=gradient_sca(1:3,elem) dy=gradient_sca(4:6,elem) @@ -80,6 +82,7 @@ subroutine ice_TG_rhs(mesh) diff=ice_diff*sqrt(elem_area(elem)/scale_area) DO n=1,3 row=elnodes(n) +!!PS if (ulevels_nod2D(row)>1) cycle DO q = 1,3 !entries(q)= vol*dt*((dx(n)*um+dy(n)*vm)/3.0_WP - & ! diff*(dx(n)*dx(q)+ dy(n)*dy(q))- & @@ -372,7 +375,7 @@ subroutine ice_fem_fct(tr_array_id, mesh) !_______________________________________________________________________ ! if cavity cycle over - !!PS if(any(ulevels_nod2D(elnodes)>1)) cycle !LK89140 + if(any(ulevels_nod2D(elnodes)>1)) cycle !LK89140 if(ulevels(elem)>1) cycle !LK89140 !_______________________________________________________________________ @@ -421,6 +424,7 @@ subroutine ice_fem_fct(tr_array_id, mesh) !========================== if (tr_array_id==1) then do row=1, myDim_nod2D + if (ulevels_nod2d(row)>1) cycle n=nn_num(row) tmax(row)=maxval(m_icel(nn_pos(1:n,row))) tmin(row)=minval(m_icel(nn_pos(1:n,row))) @@ -432,6 +436,7 @@ subroutine ice_fem_fct(tr_array_id, mesh) if (tr_array_id==2) then do row=1, myDim_nod2D + if (ulevels_nod2d(row)>1) cycle n=nn_num(row) tmax(row)=maxval(a_icel(nn_pos(1:n,row))) tmin(row)=minval(a_icel(nn_pos(1:n,row))) @@ -443,6 +448,7 @@ subroutine ice_fem_fct(tr_array_id, mesh) if (tr_array_id==3) then do row=1, myDim_nod2D + if (ulevels_nod2d(row)>1) cycle n=nn_num(row) tmax(row)=maxval(m_snowl(nn_pos(1:n,row))) tmin(row)=minval(m_snowl(nn_pos(1:n,row))) @@ -455,6 +461,7 @@ subroutine ice_fem_fct(tr_array_id, mesh) #if defined (__oifs) if (tr_array_id==4) then do row=1, myDim_nod2D + if (ulevels_nod2d(row)>1) cycle n=nn_num(row) tmax(row)=maxval(m_templ(nn_pos(1:n,row))) tmin(row)=minval(m_templ(nn_pos(1:n,row))) @@ -476,7 +483,7 @@ subroutine ice_fem_fct(tr_array_id, mesh) !_______________________________________________________________________ ! if cavity cycle over - !!PS if(any(ulevels_nod2D(elnodes)>1)) cycle !LK89140 + if(any(ulevels_nod2D(elnodes)>1)) cycle !LK89140 if(ulevels(elem)>1) cycle !LK89140 !_______________________________________________________________________ @@ -525,7 +532,7 @@ subroutine ice_fem_fct(tr_array_id, mesh) !_______________________________________________________________________ ! if cavity cycle over - !!PS if(any(ulevels_nod2D(elnodes)>1)) cycle !LK89140 + if(any(ulevels_nod2D(elnodes)>1)) cycle !LK89140 if(ulevels(elem)>1) cycle !LK89140 !_______________________________________________________________________ @@ -548,12 +555,13 @@ subroutine ice_fem_fct(tr_array_id, mesh) m_ice(n)=m_icel(n) end do do elem=1, myDim_elem2D + elnodes=elem2D_nodes(:,elem) + !___________________________________________________________________ ! if cavity cycle over - !PS if(any(ulevels_nod2D(elnodes)>1)) cycle !LK89140 + if(any(ulevels_nod2D(elnodes)>1)) cycle !LK89140 if(ulevels(elem)>1) cycle !LK89140 - elnodes=elem2D_nodes(:,elem) do q=1,3 n=elnodes(q) m_ice(n)=m_ice(n)+icefluxes(elem,q) @@ -567,12 +575,13 @@ subroutine ice_fem_fct(tr_array_id, mesh) a_ice(n)=a_icel(n) end do do elem=1, myDim_elem2D + elnodes=elem2D_nodes(:,elem) + !___________________________________________________________________ ! if cavity cycle over - !!PS if(any(ulevels_nod2D(elnodes)>1)) cycle !LK89140 + if(any(ulevels_nod2D(elnodes)>1)) cycle !LK89140 if(ulevels(elem)>1) cycle !LK89140 - elnodes=elem2D_nodes(:,elem) do q=1,3 n=elnodes(q) a_ice(n)=a_ice(n)+icefluxes(elem,q) @@ -587,14 +596,15 @@ subroutine ice_fem_fct(tr_array_id, mesh) end do do elem=1, myDim_elem2D elnodes=elem2D_nodes(:,elem) + !___________________________________________________________________ ! if cavity cycle over - !!PS if(any(ulevels_nod2D(elnodes)>1)) cycle !LK89140 + if(any(ulevels_nod2D(elnodes)>1)) cycle !LK89140 if(ulevels(elem)>1) cycle !LK89140 do q=1,3 - n=elnodes(q) - m_snow(n)=m_snow(n)+icefluxes(elem,q) + n=elnodes(q) + m_snow(n)=m_snow(n)+icefluxes(elem,q) end do end do end if @@ -602,12 +612,14 @@ subroutine ice_fem_fct(tr_array_id, mesh) #if defined (__oifs) if(tr_array_id==4) then do n=1,myDim_nod2D + if(ulevels_nod2D(n)>1) cycle !LK89140 ice_temp(n)=m_templ(n) end do do elem=1, myDim_elem2D elnodes=elem2D_nodes(:,elem) !___________________________________________________________________ ! if cavity cycle over + if(any(ulevels_nod2D(elnodes)>1)) cycle !LK89140 if(ulevels(elem)>1) cycle !LK89140 do q=1,3 @@ -656,17 +668,11 @@ SUBROUTINE ice_mass_matrix_fill(mesh) DO elem=1,myDim_elem2D elnodes=elem2D_nodes(:,elem) - !___________________________________________________________________ - ! if cavity cycle over - if(ulevels(elem)>1) cycle - + !_______________________________________________________________________ do n=1,3 row=elnodes(n) if(row>myDim_nod2D) cycle !___________________________________________________________________ - ! if node is cavity cycle over - if(ulevels_nod2d(row)>1) cycle - ! Global-to-local neighbourhood correspondence DO q=1,nn_num(row) col_pos(nn_pos(q,row))=q @@ -674,9 +680,9 @@ SUBROUTINE ice_mass_matrix_fill(mesh) offset=ssh_stiff%rowptr(row)-ssh_stiff%rowptr(1) DO q=1,3 col=elnodes(q) - !___________________________________________________________________ - ! if node is cavity cycle over - if(ulevels_nod2d(col)>1) cycle + !_______________________________________________________________ + ! if element is cavity cycle over + if(ulevels(elem)>1) cycle ipos=offset+col_pos(col) mass_matrix(ipos)=mass_matrix(ipos)+elem_area(elem)/12.0_WP @@ -749,12 +755,12 @@ subroutine ice_TG_rhs_div(mesh) #endif /* (__oifs) */ END DO do elem=1,myDim_elem2D !assembling rhs over elements + elnodes=elem2D_nodes(:,elem) !___________________________________________________________________________ ! if cavity element skip it if (ulevels(elem)>1) cycle + if(any(ulevels_nod2D(elnodes)>1)) cycle !LK89140 - !! elem=myList_elem2D(m) - elnodes=elem2D_nodes(:,elem) !derivatives dx=gradient_sca(1:3,elem) dy=gradient_sca(4:6,elem) @@ -769,6 +775,7 @@ subroutine ice_TG_rhs_div(mesh) c4=sum(dx*u_ice(elnodes)+dy*v_ice(elnodes)) DO n=1,3 row=elnodes(n) +!!PS if(ulevels_nod2D(row)>1) cycle !LK89140 DO q = 1,3 entries(q)= vol*ice_dt*((1.0_WP-0.5_WP*ice_dt*c4)*(dx(n)*(um+u_ice(elnodes(q)))+ & dy(n)*(vm+v_ice(elnodes(q))))/12.0_WP - & diff --git a/src/ice_maEVP.F90 b/src/ice_maEVP.F90 index b10d4ff11..cfe1b0bac 100644 --- a/src/ice_maEVP.F90 +++ b/src/ice_maEVP.F90 @@ -61,7 +61,7 @@ subroutine stress_tensor_m(mesh) elnodes=elem2D_nodes(:,elem) !_______________________________________________________________________ ! if element has any cavity node skip it - !!PS if ( any(ulevels_nod2d(elnodes)>1) ) cycle + if ( any(ulevels_nod2d(elnodes)>1) ) cycle if (ulevels(elem) > 1) cycle msum=sum(m_ice(elnodes))*val3 @@ -147,7 +147,7 @@ subroutine ssh2rhs(mesh) elnodes=elem2D_nodes(:,elem) !_______________________________________________________________________ ! if element has any cavity node skip it - !!PS if ( any(ulevels_nod2d(elnodes)>1) ) cycle + if ( any(ulevels_nod2d(elnodes)>1) ) cycle if (ulevels(elem) > 1) cycle !_______________________________________________________________________ @@ -174,7 +174,7 @@ subroutine ssh2rhs(mesh) elnodes=elem2D_nodes(:,elem) !_______________________________________________________________________ ! if element has any cavity node skip it - !!PS if ( any(ulevels_nod2d(elnodes)>1) ) cycle + if ( any(ulevels_nod2d(elnodes)>1) ) cycle if (ulevels(elem) > 1) cycle vol=elem_area(elem) @@ -224,7 +224,7 @@ subroutine stress2rhs_m(mesh) elnodes=elem2D_nodes(:,elem) !_______________________________________________________________________ ! if element has any cavity node skip it - !!PS if ( any(ulevels_nod2d(elnodes)>1) ) cycle + if ( any(ulevels_nod2d(elnodes)>1) ) cycle if (ulevels(elem) > 1) cycle if(sum(a_ice(elnodes)) < 0.01_WP) cycle !DS @@ -325,7 +325,7 @@ subroutine EVPdynamics_m(mesh) !_______________________________________________________________________ ! if element has any cavity node skip it - !!PS if ( any(ulevels_nod2d(elnodes)>1) ) cycle + if ( any(ulevels_nod2d(elnodes)>1) ) cycle if (ulevels(el) > 1) cycle !_______________________________________________________________________ @@ -354,7 +354,7 @@ subroutine EVPdynamics_m(mesh) elnodes=elem2D_nodes(:,el) !_______________________________________________________________________ ! if element has any cavity node skip it - !!PS if ( any(ulevels_nod2d(elnodes)>1) ) cycle + if ( any(ulevels_nod2d(elnodes)>1) ) cycle if (ulevels(el) > 1) cycle vol=elem_area(el) @@ -401,7 +401,7 @@ subroutine EVPdynamics_m(mesh) !_______________________________________________________________________ ! if element has any cavity node skip it - !!PS if ( any(ulevels_nod2d(elnodes)>1) ) cycle + if ( any(ulevels_nod2d(elnodes)>1) ) cycle if (ulevels(el) > 1) cycle msum=sum(m_ice(elnodes))*val3 @@ -434,7 +434,7 @@ subroutine EVPdynamics_m(mesh) do el=1,myDim_elem2D !__________________________________________________________________________ if (ulevels(el)>1) cycle - + if ( any(ulevels_nod2d(elnodes)>1) ) cycle !__________________________________________________________________________ if(ice_el(el)) then @@ -580,7 +580,7 @@ subroutine find_alpha_field_a(mesh) elnodes=elem2D_nodes(:,elem) !_______________________________________________________________________ ! if element has any cavity node skip it - !!PS if ( any(ulevels_nod2d(elnodes)>1) ) cycle + if ( any(ulevels_nod2d(elnodes)>1) ) cycle if (ulevels(elem) > 1) cycle msum=sum(m_ice(elnodes))*val3 @@ -647,7 +647,7 @@ subroutine stress_tensor_a(mesh) do elem=1,myDim_elem2D !__________________________________________________________________________ ! if element has any cavity node skip it - !!PS if ( any(ulevels_nod2d(elnodes)>1) ) cycle + if ( any(ulevels_nod2d(elnodes)>1) ) cycle if (ulevels(elem) > 1) cycle !__________________________________________________________________________ @@ -798,6 +798,10 @@ subroutine find_beta_field_a(mesh) #include "associate_mesh.h" DO n=1, myDim_nod2D + !_______________________________________________________________________ + ! if element has any cavity node skip it + if (ulevels_nod2d(n)>1) cycle + ! ============== ! FESOM1.4 and stand-alone FESIM ! beta_evp_array(n) = maxval(alpha_evp_array(nod_in_elem2D(n)%addresses(1:nod_in_elem2D(n)%nmb))) diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index 50a8f4cc3..ff9ea9944 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -216,7 +216,7 @@ subroutine ice_timestep(step, mesh) end do #endif /* (__oifs) */ if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call cut_off...'//achar(27)//'[0m' - call cut_off + call cut_off(mesh) if (use_cavity) call cavity_ice_clean_ma(mesh) t2=MPI_Wtime() @@ -270,7 +270,11 @@ subroutine ice_initial_state(mesh) do i=1,myDim_nod2D+eDim_nod2D !_______________________________________________________________________ - if (ulevels_nod2d(i)>1) cycle ! --> if cavity, no sea ice, no initial state + if (ulevels_nod2d(i)>1) then + !!PS m_ice(i) = 1.0e15_WP + !!PS m_snow(i) = 0.1e15_WP + cycle ! --> if cavity, no sea ice, no initial state + endif !_______________________________________________________________________ if (tr_arr(1,i,1)< 0.0_WP) then diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index a6fef3ea1..63e0f9074 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -1,13 +1,19 @@ !=================================================================== -subroutine cut_off() -use o_param -use i_arrays -implicit none +subroutine cut_off(mesh) + use o_param + use i_arrays + use MOD_MESH + use g_parsup + implicit none + type(t_mesh), intent(in) , target :: mesh -where(a_ice>1.0_WP) - a_ice=1.0_WP -end where +#include "associate_mesh.h" +!_______________________________________________________________________________ +! lower cutoff: a_ice +where(a_ice>1.0_WP) a_ice=1.0_WP + +! upper cutoff: a_ice where(a_ice<0.1e-8_WP) a_ice=0.0_WP #if defined (__oifs) @@ -17,6 +23,8 @@ subroutine cut_off() #endif /* (__oifs) */ end where +!_______________________________________________________________________________ +! lower cutoff: m_ice where(m_ice<0.1e-8_WP) m_ice=0.0_WP #if defined (__oifs) @@ -25,17 +33,22 @@ subroutine cut_off() ice_temp=273.15_WP #endif /* (__oifs) */ end where +! upper cutoff: m_ice +where(m_ice>10.0_WP .and. ulevels_nod2d==1) m_ice=10.0_WP + +!_______________________________________________________________________________ +! lower cutoff: m_snow +where(m_snow<0.1e-8_WP) m_snow=0.0_WP +! upper cutoff: m_snow +where(m_snow>2.5_WP .and. ulevels_nod2d==1) m_snow=2.5_WP +!_______________________________________________________________________________ #if defined (__oifs) -where(ice_temp>273.15_WP) - ice_temp=273.15_WP -end where +where(ice_temp>273.15_WP) ice_temp=273.15_WP #endif /* (__oifs) */ #if defined (__oifs) -where(ice_temp < 173.15_WP .and. a_ice >= 0.1e-8_WP) - ice_temp=271.35_WP -end where +where(ice_temp < 173.15_WP .and. a_ice >= 0.1e-8_WP) ice_temp=271.35_WP #endif /* (__oifs) */ end subroutine cut_off From 92cb917d85349246a0ca23d53b0297ec545d3ab5 Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 20 Mar 2021 16:40:36 +0100 Subject: [PATCH 092/909] ommit term from ssh_rhs_old when computing ssh_rhs, after talking to sergey --> leads to rigid lid approximation under the cavity --> no moving surface possible --- src/oce_ale.F90 | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 580f36e4d..1498b2bd8 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -1210,7 +1210,7 @@ subroutine init_stiff_mat_ale(mesh) if (el(i)<1) cycle ! if el(i)<1, it means its an outer boundary edge this ! has only one triangle element to which it contribute - + ! which three nodes span up triangle el(i) ! elnodes ... node indices elnodes=elem2D_nodes(:,el(i)) @@ -1555,11 +1555,13 @@ subroutine compute_ssh_rhs_ale(mesh) ! shown in eq (11) rhs of "FESOM2: from finite elements to finte volumes, S. Danilov..." eq. (11) rhs if ( .not. trim(which_ALE)=='linfs') then do n=1,myDim_nod2D + if (ulevels_nod2D(n)>1) cycle nzmin = ulevels_nod2D(n) ssh_rhs(n)=ssh_rhs(n)-alpha*water_flux(n)*areasvol(nzmin,n)+(1.0_WP-alpha)*ssh_rhs_old(n) end do else do n=1,myDim_nod2D + if (ulevels_nod2D(n)>1) cycle ssh_rhs(n)=ssh_rhs(n)+(1.0_WP-alpha)*ssh_rhs_old(n) end do end if @@ -1639,15 +1641,10 @@ subroutine compute_hbar_ale(mesh) !_______________________________________________________________________ ssh_rhs_old(enodes(1))=ssh_rhs_old(enodes(1))+(c1+c2) ssh_rhs_old(enodes(2))=ssh_rhs_old(enodes(2))-(c1+c2) - end do !___________________________________________________________________________ ! take into account water flux -!!PS if (.not. trim(which_ALE)=='linfs') then -!!PS ssh_rhs_old(1:myDim_nod2D)=ssh_rhs_old(1:myDim_nod2D)-water_flux(1:myDim_nod2D)*area(1,1:myDim_nod2D) -!!PS call exchange_nod(ssh_rhs_old) -!!PS end if if (.not. trim(which_ALE)=='linfs') then do n=1,myDim_nod2D ssh_rhs_old(n)=ssh_rhs_old(n)-water_flux(n)*areasvol(ulevels_nod2D(n),n) @@ -1657,9 +1654,6 @@ subroutine compute_hbar_ale(mesh) !___________________________________________________________________________ ! update the thickness -!!PS hbar_old=hbar -!!PS hbar(1:myDim_nod2D)=hbar_old(1:myDim_nod2D)+ssh_rhs_old(1:myDim_nod2D)*dt/area(1,1:myDim_nod2D) -!!PS call exchange_nod(hbar) hbar_old=hbar do n=1,myDim_nod2D hbar(n)=hbar_old(n)+ssh_rhs_old(n)*dt/areasvol(ulevels_nod2D(n),n) @@ -2669,6 +2663,11 @@ subroutine oce_timestep_ale(n, mesh) !___________________________________________________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call compute_vel_rhs'//achar(27)//'[0m' +!!PS if (any(UV_rhs/=UV_rhs)) write(*,*) mype,' --> found NaN UV_rhs before compute_vel_rhs' +!!PS if (any(UV/=UV)) write(*,*) mype,' --> found NaN UV before compute_vel_rhs' +!!PS if (any(ssh_rhs/=ssh_rhs))write(*,*) mype,' --> found NaN ssh_rhs before compute_ssh_rhs_ale' +!!PS if (any(ssh_rhs_old/=ssh_rhs_old))write(*,*) mype,' --> found NaN ssh_rhs_old before compute_ssh_rhs_ale' + if(mom_adv/=3) then call compute_vel_rhs(mesh) else @@ -2676,7 +2675,6 @@ subroutine oce_timestep_ale(n, mesh) end if !___________________________________________________________________________ - if (any(UV_rhs/=UV_rhs)) write(*,*) ' --> found NaN UV_rhs MARK 2' call viscosity_filter(visc_option, mesh) !___________________________________________________________________________ @@ -2717,7 +2715,7 @@ subroutine oce_timestep_ale(n, mesh) ! Current dynamic elevation alpha*hbar(n+1/2)+(1-alpha)*hbar(n-1/2) ! equation (14) Danlov et.al "the finite volume sea ice ocean model FESOM2 ! ...if we do it here we don't need to write hbar_old into a restart file... - eta_n=alpha*hbar+(1.0_WP-alpha)*hbar_old + where(ulevels_nod2D==1) eta_n=alpha*hbar+(1.0_WP-alpha)*hbar_old ! --> eta_(n) ! call zero_dynamics !DS, zeros several dynamical variables; to be used for testing new implementations! From 98ecb193df9e339bf2e5a198febb9fc1f8f43142 Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 20 Mar 2021 16:42:39 +0100 Subject: [PATCH 093/909] add also d_eta = NaN to blowup check --- src/write_step_info.F90 | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index 3be6a08cd..35a5ba69d 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -237,7 +237,8 @@ subroutine check_blowup(istep, mesh) !___________________________________________________________________ ! check ssh if ( ((eta_n(n) /= eta_n(n)) .or. & - eta_n(n)<-50.0 .or. eta_n(n)>50.0)) then + eta_n(n)<-50.0 .or. eta_n(n)>50.0 .or. & + (d_eta(n) /= d_eta(n)) ) ) then !!PS eta_n(n)<-10.0 .or. eta_n(n)>10.0)) then found_blowup_loc=1 write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep @@ -292,7 +293,8 @@ subroutine check_blowup(istep, mesh) !!PS write(*,*) 'hnode(1, n) = ',hnode(1,n) write(*,*) 'hnode(:, n) = ',hnode(ulevels_nod2D(n):nlevels_nod2D(n),n) write(*,*) - endif + + endif !___________________________________________________________________ ! check surface vertical velocity --> in case of zlevel and zstar From 72c6343bef77ec4981005dd12b6cc8b4ad45f020 Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 22 Mar 2021 22:02:02 +0100 Subject: [PATCH 094/909] clean up ice_thermo_oce.F90 --- src/ice_thermo_oce.F90 | 62 ++++++++++++++++++++++-------------------- 1 file changed, 33 insertions(+), 29 deletions(-) diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index 63e0f9074..a37a30a82 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -9,46 +9,50 @@ subroutine cut_off(mesh) #include "associate_mesh.h" -!_______________________________________________________________________________ -! lower cutoff: a_ice -where(a_ice>1.0_WP) a_ice=1.0_WP + !___________________________________________________________________________ + ! lower cutoff: a_ice + where(a_ice>1.0_WP) a_ice=1.0_WP -! upper cutoff: a_ice -where(a_ice<0.1e-8_WP) - a_ice=0.0_WP + ! upper cutoff: a_ice + where(a_ice<0.1e-8_WP) + a_ice=0.0_WP #if defined (__oifs) - m_ice=0.0_WP - m_snow=0.0_WP - ice_temp=273.15_WP + m_ice=0.0_WP + m_snow=0.0_WP + ice_temp=273.15_WP #endif /* (__oifs) */ -end where + end where -!_______________________________________________________________________________ -! lower cutoff: m_ice -where(m_ice<0.1e-8_WP) - m_ice=0.0_WP + !___________________________________________________________________________ + ! lower cutoff: m_ice + where(m_ice<0.1e-8_WP) + m_ice=0.0_WP #if defined (__oifs) - m_snow=0.0_WP - a_ice=0.0_WP - ice_temp=273.15_WP + m_snow=0.0_WP + a_ice=0.0_WP + ice_temp=273.15_WP #endif /* (__oifs) */ -end where -! upper cutoff: m_ice -where(m_ice>10.0_WP .and. ulevels_nod2d==1) m_ice=10.0_WP - -!_______________________________________________________________________________ -! lower cutoff: m_snow -where(m_snow<0.1e-8_WP) m_snow=0.0_WP -! upper cutoff: m_snow -where(m_snow>2.5_WP .and. ulevels_nod2d==1) m_snow=2.5_WP + end where + + ! upper cutoff SH: m_ice + where(m_ice>5.0_WP .and. ulevels_nod2d==1 .and. geo_coord_nod2D(2,:)<0.0_WP) m_ice=5.0_WP + ! upper cutoff NH: m_ice + where(m_ice>10.0_WP .and. ulevels_nod2d==1 .and. geo_coord_nod2D(2,:)>0.0_WP) m_ice=10.0_WP + + !___________________________________________________________________________ + ! lower cutoff: m_snow + where(m_snow<0.1e-8_WP) m_snow=0.0_WP + + ! upper cutoff: m_snow + where(m_snow>2.5_WP .and. ulevels_nod2d==1) m_snow=2.5_WP -!_______________________________________________________________________________ + !___________________________________________________________________________ #if defined (__oifs) -where(ice_temp>273.15_WP) ice_temp=273.15_WP + where(ice_temp>273.15_WP) ice_temp=273.15_WP #endif /* (__oifs) */ #if defined (__oifs) -where(ice_temp < 173.15_WP .and. a_ice >= 0.1e-8_WP) ice_temp=271.35_WP + where(ice_temp < 173.15_WP .and. a_ice >= 0.1e-8_WP) ice_temp=271.35_WP #endif /* (__oifs) */ end subroutine cut_off From 2ea94531e2b65c642cdb9c64b6d0230ae26cff55 Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 23 Mar 2021 13:55:54 +0100 Subject: [PATCH 095/909] fix bug regarding cavity rigid lid approximation for zstar --- src/oce_ale.F90 | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 1498b2bd8..7fa249ff5 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -1555,9 +1555,12 @@ subroutine compute_ssh_rhs_ale(mesh) ! shown in eq (11) rhs of "FESOM2: from finite elements to finte volumes, S. Danilov..." eq. (11) rhs if ( .not. trim(which_ALE)=='linfs') then do n=1,myDim_nod2D - if (ulevels_nod2D(n)>1) cycle nzmin = ulevels_nod2D(n) - ssh_rhs(n)=ssh_rhs(n)-alpha*water_flux(n)*areasvol(nzmin,n)+(1.0_WP-alpha)*ssh_rhs_old(n) + if (ulevels_nod2D(n)>1) then + ssh_rhs(n)=ssh_rhs(n)-alpha*water_flux(n)*areasvol(nzmin,n) + else + ssh_rhs(n)=ssh_rhs(n)-alpha*water_flux(n)*areasvol(nzmin,n)+(1.0_WP-alpha)*ssh_rhs_old(n) + end if end do else do n=1,myDim_nod2D From ced4a3e77b6a7736bf492c557cef114f7148787e Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 6 Apr 2021 12:30:53 +0200 Subject: [PATCH 096/909] improve partitioning routine, so that the cavity geometric constrains fully converge --- src/fvom_init.F90 | 574 +++++++++++++++++++++---------------- src/gen_modules_config.F90 | 1 + 2 files changed, 329 insertions(+), 246 deletions(-) diff --git a/src/fvom_init.F90 b/src/fvom_init.F90 index 2a2c73972..7069fa29e 100755 --- a/src/fvom_init.F90 +++ b/src/fvom_init.F90 @@ -149,6 +149,11 @@ subroutine read_mesh_ini(mesh) ! =================== ! Surface mesh ! =================== + if (mype==0) then + print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;1m' //' -->: read elem2d.out & nod2d.out '//achar(27)//'[0m' + end if + open (20,file=trim(meshpath)//'nod2d.out', status='old') open (21,file=trim(meshpath)//'elem2d.out', status='old') READ(20,*) mesh%nod2D @@ -217,8 +222,8 @@ subroutine read_mesh_cavity(mesh) !___________________________________________________________________________ if (mype==0) then - write(*,*) '____________________________________________________________' - write(*,*) ' --> read cavity depth' + print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;1m' //' -->: read cavity depth '//achar(27)//'[0m' end if !___________________________________________________________________________ @@ -324,6 +329,10 @@ end subroutine elem_center ! (a) find edges. To make the procedure fast ! one needs neighbourhood arrays ! ==================== +if (mype==0) then + print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;1m' //' -->: compute edge connectivity '//achar(27)//'[0m' +end if allocate(ne_num(nod2d), ne_pos(MAX_ADJACENT, nod2D), nn_num(nod2D)) ne_num=0 @@ -632,47 +641,59 @@ END SUBROUTINE find_edges_ini !> Fixes rough topography, by converting some oceans cells to ground cell(reflected by changing levels arrays) !> Creates 2 files: elvls.out, nlvls.out subroutine find_levels(mesh) -use g_config -use mod_mesh -use g_parsup -implicit none -INTEGER :: nodes(3), elems(3), eledges(3) -integer :: elem, elem1, j, n, q, node, enum,count1,count2,exit_flag,i,nz,fileID=111 -real(kind=WP) :: x,dmean -integer :: thers_lev=5 -character*200 :: file_name -type(t_mesh), intent(inout), target :: mesh + use g_config + use mod_mesh + use g_parsup + implicit none + INTEGER :: nodes(3), elems(3), eledges(3) + integer :: elem, elem1, j, n, nneighb,q , node, i, nz + integer :: count_iter, count_neighb_open, exit_flag, fileID=111 + real(kind=WP) :: x,dmean + integer :: max_iter=1000 + character*200 :: file_name + type(t_mesh), intent(inout), target :: mesh #include "associate_mesh_ini.h" + if (mype==0) then + print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;1m' //' -->: read bottom depth '//achar(27)//'[0m' + end if + + ALLOCATE(mesh%depth(nod2D)) + depth => mesh%depth !required after the allocation, otherwise the pointer remains undefined + file_name=trim(meshpath)//'aux3d.out' + open(fileID, file=file_name) + read(fileID,*) nl ! the number of levels + allocate(mesh%zbar(nl)) ! their standard depths + + zbar => mesh%zbar !required after the allocation, otherwise the pointer remains undefined + read(fileID,*) zbar + if(zbar(2)>0) zbar=-zbar ! zbar is negative + + allocate(mesh%Z(nl-1)) + Z => mesh%Z !required after the allocation, otherwise the pointer remains undefined + Z=zbar(1:nl-1)+zbar(2:nl) ! mid-depths of cells + Z=0.5_WP*Z + DO n=1,nod2D + read(fileID,*) x + if (x>0) x=-x + if (x>zbar(thers_zbar_lev)) x=zbar(thers_zbar_lev) !TODO KK threshholding for depth + depth(n)=x + END DO + close(fileID) + + if(depth(2)>0) depth=-depth ! depth is negative -ALLOCATE(mesh%depth(nod2D)) -depth => mesh%depth !required after the allocation, otherwise the pointer remains undefined -file_name=trim(meshpath)//'aux3d.out' -open(fileID, file=file_name) -read(fileID,*) nl ! the number of levels -allocate(mesh%zbar(nl)) ! their standard depths -zbar => mesh%zbar !required after the allocation, otherwise the pointer remains undefined -read(fileID,*) zbar -if(zbar(2)>0) zbar=-zbar ! zbar is negative -allocate(mesh%Z(nl-1)) -Z => mesh%Z !required after the allocation, otherwise the pointer remains undefined -Z=zbar(1:nl-1)+zbar(2:nl) ! mid-depths of cells -Z=0.5_WP*Z -DO n=1,nod2D - read(fileID,*) x - if (x>0) x=-x - if (x>zbar(thers_lev)) x=zbar(thers_lev) !TODO KK threshholding for depth - depth(n)=x -END DO -close(fileID) - -if(depth(2)>0) depth=-depth ! depth is negative - - -allocate(mesh%nlevels(elem2D)) -nlevels => mesh%nlevels !required after the allocation, otherwise the pointer remains undefined -allocate(mesh%nlevels_nod2D(nod2D)) -nlevels_nod2D => mesh%nlevels_nod2D !required after the allocation, otherwise the pointer remains undefined + !___________________________________________________________________________ + if (mype==0) then + print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;1m' //' -->: compute elem, vertice bottom depth index '//achar(27)//'[0m' + end if + + allocate(mesh%nlevels(elem2D)) + nlevels => mesh%nlevels !required after the allocation, otherwise the pointer remains undefined + allocate(mesh%nlevels_nod2D(nod2D)) + nlevels_nod2D => mesh%nlevels_nod2D !required after the allocation, otherwise the pointer remains undefined !___________________________________________________________________________ ! Compute the initial number number of elementa levels, based on the vertice @@ -699,25 +720,38 @@ subroutine find_levels(mesh) end if end do if((exit_flag==0).and.(dmean<0)) nlevels(n)=nl - if(dmean>=0) nlevels(n)=thers_lev + if(dmean>=0) nlevels(n)=thers_zbar_lev ! set minimum number of levels to --> thers_lev=5 - if(nlevels(n) do n=1, elem2D + + !___________________________________________________________________________ + ! write out vertical level indices before iterative geometric adaption to + ! exclude isolated cells + if (mype==0) then + !_______________________________________________________________________ + file_name=trim(meshpath)//'elvls_raw.out' + open(fileID, file=file_name) + do n=1,elem2D + write(fileID,*) nlevels(n) + end do + close(fileID) + endif !___________________________________________________________________________ ! check for isolated cells (cells with at least two boundary faces or three ! boundary vertices) and eliminate them --> FESOM2.0 doesn't like these kind ! of cells - do nz=4,nl + do nz=thers_zbar_lev+1,nl exit_flag=0 - count1=0 + count_iter=0 !_______________________________________________________________________ ! iteration loop within each layer - do while((exit_flag==0).and.(count1<1000)) + do while((exit_flag==0).and.(count_iter if elem2D_nodes(1,n) == elem2D_nodes(4,n): True --> q=3 --> triangular mesh ! --> if elem2D_nodes(1,n) == elem2D_nodes(4,n): False --> q=4 --> quad mesh - q = merge(3,4,elem2D_nodes(1,n) == elem2D_nodes(4,n)) + nneighb = merge(3,4,elem2D_nodes(1,n) == elem2D_nodes(4,n)) ! ! +---isolated bottom cell ! ._______________ | _______________________. @@ -736,16 +770,15 @@ subroutine find_levels(mesh) ! |###|###|###|###|###|###|###|###|###|###|###|###|###|###| ! if (nlevels(n)>=nz) then - count2=0 + count_neighb_open=0 elems=elem_neighbors(1:3,n) - !___________________________________________________________ ! loop over neighbouring triangles - do i=1,q + do i=1,nneighb if (elems(i)>1) then if (nlevels(elems(i))>=nz) then !count neighbours - count2=count2+1 + count_neighb_open=count_neighb_open+1 endif endif enddo @@ -754,15 +787,18 @@ subroutine find_levels(mesh) ! check how many open faces to neighboring triangles the cell ! has, if there are less than 2 its isolated (a cell should ! have at least 2 valid neighbours) - if (count2<2) then + if (count_neighb_open<2) then ! if cell is "isolated", and the one levels shallower bottom ! cell would be shallower than the minimum vertical level ! treshhold (thers_lev). --> in this make sorrounding elements ! one level deeper to reconnect the isolated cell - if (nz-11) nlevels(elems(i)) = max(nlevels(elems(i)),nz) + if (nz-10) then + nlevels(elems(i)) = max(nlevels(elems(i)),nz) + end if end do + !if cell is "isolated" convert to one level shallower bottom cell else nlevels(n)=nz-1 @@ -774,6 +810,7 @@ subroutine find_levels(mesh) end if ! --> if (nlevels(n)>=nz) then end do ! --> do n=1,elem2D end do ! --> do while((exit_flag==0).and.(count1<1000)) + write(*,*) ' -[iter ]->: nlevel',count_iter,'/',max_iter,', nz=',nz end do ! --> do nz=4,nl !___________________________________________________________________________ @@ -827,8 +864,9 @@ subroutine find_levels_cavity(mesh) use g_parsup implicit none integer :: nodes(3), elems(3) - integer :: elem, node, nz, j - integer :: exit_flag, count_iter, count_neighb_open, nneighb, cavity_maxlev + integer :: elem, node, nz, j, idx + integer :: count_neighb_open, nneighb, cavity_maxlev, count_isoelem + integer :: exit_flag1, count_iter, max_iter=1000, exit_flag2, count_iter2, max_iter2=10 real(kind=WP) :: dmean character*200 :: file_name integer, allocatable, dimension(:,:) :: numelemtonode, idxelemtonode @@ -836,13 +874,17 @@ subroutine find_levels_cavity(mesh) type(t_mesh), intent(inout), target :: mesh #include "associate_mesh_ini.h" + if (mype==0) then + print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;1m' //' -->: compute elem,vertice cavity depth index '//achar(27)//'[0m' + end if + !___________________________________________________________________________ allocate(mesh%ulevels(elem2D)) ulevels => mesh%ulevels allocate(mesh%ulevels_nod2D(nod2D)) ulevels_nod2D => mesh%ulevels_nod2D - - + !___________________________________________________________________________ ! Compute level position of ocean-cavity boundary cavity_maxlev=0 @@ -859,217 +901,248 @@ subroutine find_levels_cavity(mesh) !_______________________________________________________________________ ! vertical elem level index of cavity-ocean boundary - exit_flag=0 ulevels(elem) = 1 + if (dmean<0.0_WP) ulevels(elem) = 2 + do nz=1,nlevels(elem)-1 - !!PS if(Z(nz) I need 4 valid full depth, 3 valid mid-depth levels ulevels(elem)=nz ! to compute shechpetkin PGF exit end if end do - if ((exit_flag==0).and.(dmean<0)) ulevels(elem)=nlevels(elem) cavity_maxlev = max(cavity_maxlev,ulevels(elem)) end do + !___________________________________________________________________________ + ! write out cavity mesh files for vertice and elemental position of + ! vertical cavity-ocean boundary before the iterative geometric adaption to + ! eliminate isolated cells + if (mype==0) then + ! write out elemental cavity-ocean boundary level + file_name=trim(meshpath)//'cavity_elvls_raw.out' + open(20, file=file_name) + do elem=1,elem2D + write(20,*) ulevels(elem) + enddo + close(20) + endif !___________________________________________________________________________ ! Eliminate cells that have two cavity boundary faces --> should not be ! possible in FESOM2.0 ! loop over all cavity levels allocate(elemreducelvl(elem2d)) - do nz=1,cavity_maxlev - exit_flag=0 - count_iter=0 + allocate(numelemtonode(nl,nod2D),idxelemtonode(nl,nod2D)) + + !___________________________________________________________________________ + ! outer iteration loop + count_iter2 = 0 + exit_flag2 = 0 + do while((exit_flag2==0) .and. (count_iter2 tri mesh, nneighb = 4 --> quad mesh - nneighb = merge(3,4,elem2D_nodes(1,elem) == elem2D_nodes(4,elem)) - ! - ! .___________________________.~~~~~~~~~~~~~~~~~~~~~~~~~~ - ! |###|###|###|###|###|###|###| - ! |# CAVITY |###| . |###|###| OCEAN - ! |###|###|###| /|\|###| - ! |###|###| | - ! |###| +-- Not good can lead to isolated cells - ! - if (nz >= ulevels(elem)) then - count_neighb_open=0 - elems=elem_neighbors(1:3,elem) - - !___________________________________________________________ - ! loop over neighbouring triangles - do j = 1, nneighb - if (elems(j)>0) then ! if its a valid boundary triangle, 0=missing value - ! check for isolated cell - if (ulevels(elems(j))<=nz) then - !count the open faces to neighboring cells - count_neighb_open=count_neighb_open+1 - endif - end if - end do ! --> do i = 1, nneighb - -!!PS if (elem==133438) then -!!PS write(*,*) -!!PS write(*,*) 'nz =', nz -!!PS write(*,*) 'elem =', elem -!!PS write(*,*) 'ulevels(elem) =', ulevels(elem) -!!PS write(*,*) 'nlevels(elem) =', nlevels(elem) -!!PS write(*,*) 'elemreducelvl(elem)=',elemreducelvl(elem) -!!PS write(*,*) 'elems =', elems -!!PS write(*,*) 'ulevels(elems) =', ulevels(elems) -!!PS write(*,*) 'nlevels(elems) =', nlevels(elems) -!!PS write(*,*) 'nlvl-ulvl =', nlevels(elems)-ulevels(elems) -!!PS write(*,*) 'elemreducelvl(elems)=',elemreducelvl(elems) -!!PS write(*,*) 'count_neighb_open =',count_neighb_open -!!PS write(*,*) -!!PS end if - - !___________________________________________________________ - ! check how many open faces to neighboring triangles the cell - ! has, if there are less than 2 its isolated (a cell should - ! have at least 2 valid neighbours) - ! --> in this case shift cavity-ocean interface one level down - if (count_neighb_open<2) then - ! if cell is isolated convert it to a deeper ocean levels - ! except when this levels would remain less than 3 valid - ! bottom levels --> in case make the levels of all sorounding - ! one level shallower - if (nlevels(elem)-(nz+1)<3) then - do j = 1, nneighb - if (elems(j)>0) then - if (ulevels(elems(j))>1 .and. ulevels(elems(j))>ulevels(elem) ) then - ulevels(elems(j)) = min(ulevels(elems(j)),nz) - elemreducelvl(elems(j))=1 - end if - end if - end do + ! iteration loop within each layer + do while((exit_flag1==0).and.(count_iter tri mesh, nneighb = 4 --> quad mesh + nneighb = merge(3,4,elem2D_nodes(1,elem) == elem2D_nodes(4,elem)) + elems = elem_neighbors(1:3,elem) + ! + ! .___________________________.~~~~~~~~~~~~~~~~~~~~~~~~~~ + ! |###|###|###|###|###|###|###| + ! |# CAVITY |###| . |###|###| OCEAN + ! |###|###|###| /|\|###| + ! |###|###| | + ! |###| +-- Not good can lead to isolated cells + ! + if ( nz >= ulevels(elem) .and. nz0) then - if (ulevels(elems(j))>1 .and. ulevels(elems(j))>ulevels(elem) ) then - ulevels(elems(j)) = min(ulevels(elems(j)),nz) - elemreducelvl(elems(j))=1 - end if - end if - end do - end if + !_______________________________________________________ + ! loop over neighbouring triangles + do j = 1, nneighb + if (elems(j)>0) then ! if its a valid boundary triangle, 0=missing value + ! check for isolated cell + if ( ulevels(elems(j))<= nz .and. & + nlevels(elems(j))> nz ) then + !count the open faces to neighboring cells + count_neighb_open=count_neighb_open+1 + endif + end if + end do ! --> do i = 1, nneighb - !force recheck for all current ocean cells - exit_flag=0 - endif ! --> if (count_neighb_open<2) then - - end if ! --> if (nz >= ulevels(elem)) then - end do ! --> do elem=1,elem2D - end do ! --> do while((exit_flag==0).and.(count_iter<1000)) - end do ! --> do nz=1,cavity_maxlev - deallocate(elemreducelvl) - - !___________________________________________________________________________ - ! vertical vertice level index of cavity_ocean boundary - ulevels_nod2D = nl - do elem=1,elem2D - nneighb = merge(3,4,elem2D_nodes(1,elem) == elem2D_nodes(4,elem)) + !_______________________________________________________ + ! check how many open faces to neighboring triangles the cell + ! has, if there are less than 2 its isolated (a cell should + ! have at least 2 valid neighbours) + ! --> in this case shift cavity-ocean interface one level down + if (count_neighb_open<2) then + count_isoelem = count_isoelem+1 + ! if cell is isolated convert it to a deeper ocean levels + ! except when this levels would remain less than 3 valid + ! bottom levels --> in case make the levels of all sorounding + ! triangles shallower + if ( (nlevels(elem)-(nz+1))>=3 .and. elemreducelvl(elem)==0 ) then + ulevels(elem)=nz+1 + else + ! --> can not increase depth anymore to eleminate isolated + ! cell, otherwise lessthan 3 valid layers + ! --> therefor reduce depth of ONE!!! of the neighbouring + ! triangles. Choose trinagle whos depth is already closest + ! to nz + idx = minloc(ulevels(elems)-nz, 1, MASK=( (elems>0) .and. ((ulevels(elems)-nz)>0) ) ) + ulevels(elems(idx)) = nz-1 + elemreducelvl(elems(idx)) = elemreducelvl(elems(idx))+1 + end if + + !force recheck for all current ocean cells + exit_flag1=0 + end if ! --> if (count_neighb_open<2) then + end if ! --> if (nz >= ulevels(elem)) then + end do ! --> do elem=1,elem2D + end do ! --> do while((exit_flag==0).and.(count_iter<1000)) + write(*,*) ' -[iter ]->: ulevel',count_iter,'/',max_iter,', nz=',nz + end do ! --> do nz=1,cavity_maxlev + !_______________________________________________________________________ - ! loop over neighbouring triangles - do j=1,nneighb - node=elem2D_nodes(j,elem) - ulevels_nod2D(node)=min(ulevels_nod2D(node),ulevels(elem)) - end do - end do - - !___________________________________________________________________________ - ! check ulevels if ulevels=nlevels(elem)) then - if (mype==0) write(*,*) ' ERROR: found element cavity depth deeper or equal bottom depth' - call par_ex(0) - end if - if (nlevels(elem)-ulevels(elem)<3) then - write(*,*) ' ERROR: found less than three valid element ocean layers' - write(*,*) ' ulevels,nlevels = ',ulevels(elem), nlevels(elem) - write(*,*) ' ulevels(neighb) = ',ulevels(elem_neighbors(1:3,elem)) - write(*,*) ' nlevels(neighb) = ',nlevels(elem_neighbors(1:3,elem)) - call par_ex(0) - end if - end do - - !___________________________________________________________________________ - ! check ulevels_nod2d if ulevels_nod2d=nlevels_nod2D(elem)) then - if (mype==0) write(*,*) ' ERROR: found vertice cavity depth deeper or equal bottom depth' - call par_ex(0) - end if - if (nlevels_nod2D(elem)-ulevels_nod2D(elem)<3) then - if (mype==0) write(*,*) ' ERROR: found less than three valid vertice ocean layers' - end if - end do - - do elem=1,elem2D - if (ulevels(elem)< maxval(ulevels_nod2D(elem2D_nodes(:,elem))) ) then - if (mype==0) then - write(*,*) ' ERROR: found element cavity depth that is shallower than its valid maximum cavity vertice depths' - write(*,*) ' ule | uln = ',ulevels(elem),' | ',ulevels_nod2D(elem2D_nodes(:,elem)) - end if - call par_ex(0) - end if - end do - - !___________________________________________________________________________ - ! check how many triangle elements contribute to every vertice in every layer - ! every vertice in every layer should be connected to at least two triangle - ! elements ! - allocate(numelemtonode(nl,nod2D),idxelemtonode(nl,nod2D)) - numelemtonode=0 - idxelemtonode=0 - do node=1, nod2D - do j=1,nod_in_elem2D_num(node) - elem=nod_in_elem2D(j,node) - do nz=ulevels(elem),nlevels(elem)-1 - numelemtonode(nz,node) = numelemtonode(nz,node) + 1 - idxelemtonode(nz,node) = elem + ! vertical vertice level index of cavity_ocean boundary + ulevels_nod2D = nl + do elem=1,elem2D + nneighb = merge(3,4,elem2D_nodes(1,elem) == elem2D_nodes(4,elem)) + !___________________________________________________________________ + ! loop over neighbouring triangles + do j=1,nneighb + node=elem2D_nodes(j,elem) + ulevels_nod2D(node)=min(ulevels_nod2D(node),ulevels(elem)) end do end do - end do - - exit_flag = 0 - do node=1, nod2D - do nz=1,nl - if (numelemtonode(nz,node)== 1) then - write(*,*) 'ERROR: found vertice with just one triangle:', mype, nz, 'node=',node, ulevels_nod2d(node), nlevels_nod2D(node), & - 'elem=', idxelemtonode(nz,node), ulevels(idxelemtonode(nz,node)), nlevels(idxelemtonode(nz,node)) - exit_flag = 1 + + !_______________________________________________________________________ + ! check ulevels if ulevels=nlevels(elem)) then + if (mype==0) write(*,*) ' -[check]->: elem cavity depth deeper or equal bottom depth, elem=',elem + exit_flag2 = 0 + end if + + if (nlevels(elem)-ulevels(elem)<3) then + write(*,*) ' -[check]->: less than three valid elem ocean layers, elem=',elem + !!PS write(*,*) ' elem = ',elem + !!PS write(*,*) ' elem_neighbors = ',elem_neighbors(1:3,elem) + !!PS write(*,*) ' ulevels,nlevels = ',ulevels(elem), nlevels(elem) + !!PS write(*,*) ' ulevels(neighb) = ',ulevels(elem_neighbors(1:3,elem)) + !!PS write(*,*) ' nlevels(neighb) = ',nlevels(elem_neighbors(1:3,elem)) + !!PS write(*,*) ' lon_node = ',mesh%coord_nod2D(1,elem2D_nodes(1:3,elem))/rad + !!PS write(*,*) ' lat_node = ',mesh%coord_nod2D(2,elem2D_nodes(1:3,elem))/rad + !!PS write(*,*) ' lon_elem = ',sum(mesh%coord_nod2D(1,elem2D_nodes(1:3,elem))/rad)/3.0 + !!PS write(*,*) ' lat_elem = ',sum(mesh%coord_nod2D(2,elem2D_nodes(1:3,elem))/rad)/3.0 + exit_flag2 = 0 + + end if + end do + + !_______________________________________________________________________ + ! check ulevels_nod2d if ulevels_nod2d=nlevels_nod2D(elem)) then + if (mype==0) write(*,*) ' -[check]->: vertice cavity depth deeper or equal bottom depth, node=', elem2d + exit_flag2 = 0 + + end if + if (nlevels_nod2D(elem)-ulevels_nod2D(elem)<3) then + if (mype==0) write(*,*) ' -[check]->: less than three valid vertice ocean layers, node=', elem2d + exit_flag2 = 0 + + end if + end do + + do elem=1,elem2D + if (ulevels(elem)< maxval(ulevels_nod2D(elem2D_nodes(:,elem))) ) then + if (mype==0) then + write(*,*) ' -[check]->: found elem cavity shallower than its valid maximum cavity vertice depths, elem=', elem2d + !!PS write(*,*) ' ule | uln = ',ulevels(elem),' | ',ulevels_nod2D(elem2D_nodes(:,elem)) + end if + exit_flag2 = 0 + + end if + end do + + !_______________________________________________________________________ + ! check how many triangle elements contribute to every vertice in every layer + ! every vertice in every layer should be connected to at least two triangle + ! elements ! + numelemtonode=0 + idxelemtonode=0 + do node=1, nod2D + do j=1,nod_in_elem2D_num(node) + elem=nod_in_elem2D(j,node) + do nz=ulevels(elem),nlevels(elem)-1 + numelemtonode(nz,node) = numelemtonode(nz,node) + 1 + idxelemtonode(nz,node) = elem + end do + end do + end do + + count_iter=0 + do node=1, nod2D + do nz=1,nl + if (numelemtonode(nz,node)== 1) then + exit_flag2 = 0 + count_iter = count_iter+1 + write(*,*) ' -[check]->: node has only 1 triangle: n=', node, ',nz=',nz +!!PS write(*,*) ' node=', node, ulevels_nod2d(node), nlevels_nod2D(node) +!!PS write(*,*) ' elem=', idxelemtonode(nz,node), ulevels(idxelemtonode(nz,node)), nlevels(idxelemtonode(nz,node)) +!!PS write(*,*) ' lon/lat_n = ',mesh%coord_nod2D(1,node)/rad, mesh%coord_nod2D(2,node)/rad +!!PS write(*,*) ' n_eneighb =', nod_in_elem2D(1:nod_in_elem2D_num(node),node) +!!PS write(*,*) ' ulvls(n_eneighb)=', ulevels(nod_in_elem2D(1:nod_in_elem2D_num(node),node)) +!!PS write(*,*) ' nlvls(n_eneighb)=', nlevels(nod_in_elem2D(1:nod_in_elem2D_num(node),node)) +!!PS write(*,*) + exit + end if + end do end do - end do + + !_______________________________________________________________________ + if (exit_flag2 == 0) then + print *, achar(27)//'[33m' //'____________________________________________________________'//achar(27)//'[0m' + print *, ' -['//achar(27)//'[33m'//'WARN'//achar(27)//'[0m'//']->: Cavity geom. not converged yet, do further outer iteration' + write(*,*) ' iter step ', count_iter2,' out of ', max_iter2 + write(*,*) + end if + + !_______________________________________________________________________ + end do + deallocate(elemreducelvl) deallocate(numelemtonode,idxelemtonode) - if (exit_flag == 1) call par_ex(0) !___________________________________________________________________________ -!!PS ! compute nodal cavity flag: 1 yes cavity/ 0 no cavity -!!PS cavity_flag = 0 -!!PS do node=1,nod2D -!!PS if (ulevels_nod2D(node)>1) cavity_flag(node)=1 -!!PS end do + if (exit_flag2 == 0) then + write(*,*) + print *, achar(27)//'[31m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;31m'//' -[ERROR]->: Cavity geometry constrains did not converge !!!'//achar(27)//'[0m' + write(*,*) + call par_ex(0) + else + write(*,*) + print *, achar(27)//'[32m' //'____________________________________________________________'//achar(27)//'[0m' + print *, ' -['//achar(27)//'[7;32m'//' OK '//achar(27)//'[0m'//']->: Cavity geometry constrains did converge!!!' + write(*,*) + end if + !___________________________________________________________________________ ! write out cavity mesh files for vertice and elemental position of @@ -1086,14 +1159,10 @@ subroutine find_levels_cavity(mesh) ! write out vertice cavity-ocean boundary level + yes/no cavity flag file_name=trim(meshpath)//'cavity_nlvls.out' open(20, file=file_name) -!!PS file_name=trim(meshpath)//'cavity_flag.out' -!!PS open(21, file=file_name) do node=1,nod2D write(20,*) ulevels_nod2D(node) -!!PS write(21,*) cavity_flag(node) enddo close(20) -!!PS close(21) endif end subroutine find_levels_cavity @@ -1322,6 +1391,10 @@ subroutine communication_ini(mesh) type(t_mesh), intent(inout), target :: mesh #include "associate_mesh_ini.h" + if (mype==0) then + print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;1m' //' -->: compute communication arrays '//achar(27)//'[0m' + end if ! Create the distributed mesh subdirectory write(npes_string,"(I10)") npes dist_mesh_dir=trim(meshpath)//'dist_'//trim(ADJUSTL(npes_string))//'/' @@ -1381,6 +1454,11 @@ end subroutine partit end interface #include "associate_mesh_ini.h" + if (mype==0) then + print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;1m' //' -->: compute partitioning '//achar(27)//'[0m' + end if + ! Construct partitioning vector if (n_levels<1 .OR. n_levels>10) then print *,'Number of hierarchic partition levels is out of range [1-10]! Aborting...' @@ -1446,7 +1524,11 @@ subroutine check_partitioning(mesh) integer, allocatable :: ne_part(:), ne_part_num(:), ne_part_load(:,:) type(t_mesh), intent(inout), target :: mesh #include "associate_mesh_ini.h" - + + if (mype==0) then + print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;1m' //' -->: check partitioning '//achar(27)//'[0m' + end if ! Check load balancing do i=0,npes-1 nod_per_partition(1,i) = count(part(:) == i) diff --git a/src/gen_modules_config.F90 b/src/gen_modules_config.F90 index 25ba9be26..4c2df68b8 100755 --- a/src/gen_modules_config.F90 +++ b/src/gen_modules_config.F90 @@ -80,6 +80,7 @@ module g_config real(kind=WP) :: gammaEuler=-90. ! then around new z. ! Set to zeros to work with ! geographical coordinates + integer :: thers_zbar_lev=5 ! minimum number of levels to be character(len=5) :: which_depth_n2e='mean' namelist /geometry/ cartesian, fplane, & cyclic_length, rotated_grid, alphaEuler, betaEuler, gammaEuler, force_rotation, which_depth_n2e From 4b9b0d13ebaec087aa24abe773c4d5e133a234e8 Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 6 Apr 2021 12:31:51 +0200 Subject: [PATCH 097/909] fix bug in zlevel found by thomas rackow --- src/oce_ale.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 7fa249ff5..52fcd0a51 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -1870,7 +1870,7 @@ subroutine vert_vel_ale(mesh) ! layer reached already minimum layerthickness) max_dhbar2distr = 0.0_WP !max_dhbar2distr = (zbar(1:lzstar_lev)-zbar(2:lzstar_lev+1))*min_hnode - hnode(1:lzstar_lev,n); - max_dhbar2distr = (zbar(nzmin:nzmin+lzstar_lev-1)-zbar(nzmin:nzmin+lzstar_lev-1+1))*min_hnode - hnode(nzmin:nzmin+lzstar_lev-1,n); + max_dhbar2distr = (zbar(nzmin:nzmin+lzstar_lev-1)-zbar(nzmin+1:nzmin+lzstar_lev-1+1))*min_hnode - hnode(nzmin:nzmin+lzstar_lev-1,n); where (max_dhbar2distr>=0.0_WP) max_dhbar2distr=0.0_WP !_______________________________________________________________ From b5aea4cfc68fcfcb6a56693dc99f3959f08010a6 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 8 Apr 2021 11:07:00 +0200 Subject: [PATCH 098/909] fix bug in cavity partitioning --- src/fvom_init.F90 | 135 ++++++++++++++++++++++++++-------------------- 1 file changed, 78 insertions(+), 57 deletions(-) diff --git a/src/fvom_init.F90 b/src/fvom_init.F90 index 7069fa29e..cf690ae03 100755 --- a/src/fvom_init.F90 +++ b/src/fvom_init.F90 @@ -810,7 +810,7 @@ subroutine find_levels(mesh) end if ! --> if (nlevels(n)>=nz) then end do ! --> do n=1,elem2D end do ! --> do while((exit_flag==0).and.(count1<1000)) - write(*,*) ' -[iter ]->: nlevel',count_iter,'/',max_iter,', nz=',nz + write(*,"(A, I5, A, i5, A, I3)") ' -[iter ]->: nlevel, iter/maxiter=',count_iter,'/',max_iter,', nz=',nz end do ! --> do nz=4,nl !___________________________________________________________________________ @@ -854,7 +854,8 @@ subroutine find_levels(mesh) endif end subroutine find_levels - +! +! !_______________________________________________________________________________ ! finds elemental and nodal levels of cavity-ocean boundary. ! Creates 2 files: cavity_elvls.out, cavity_nlvls.out @@ -870,10 +871,10 @@ subroutine find_levels_cavity(mesh) real(kind=WP) :: dmean character*200 :: file_name integer, allocatable, dimension(:,:) :: numelemtonode, idxelemtonode - integer, allocatable, dimension(:) :: elemreducelvl + integer, allocatable, dimension(:) :: elemreducelvl, elemfixlvl type(t_mesh), intent(inout), target :: mesh #include "associate_mesh_ini.h" - + !___________________________________________________________________________ if (mype==0) then print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' print *, achar(27)//'[7;1m' //' -->: compute elem,vertice cavity depth index '//achar(27)//'[0m' @@ -931,21 +932,22 @@ subroutine find_levels_cavity(mesh) ! Eliminate cells that have two cavity boundary faces --> should not be ! possible in FESOM2.0 ! loop over all cavity levels - allocate(elemreducelvl(elem2d)) + allocate(elemreducelvl(elem2d),elemfixlvl(elem2d)) allocate(numelemtonode(nl,nod2D),idxelemtonode(nl,nod2D)) !___________________________________________________________________________ ! outer iteration loop count_iter2 = 0 exit_flag2 = 0 + elemfixlvl = 0 do while((exit_flag2==0) .and. (count_iter20) then ! if its a valid boundary triangle, 0=missing value ! check for isolated cell - if ( ulevels(elems(j))<= nz .and. & + if ( ulevels(elems(j))<= nz .and. & nlevels(elems(j))> nz ) then !count the open faces to neighboring cells count_neighb_open=count_neighb_open+1 @@ -993,7 +995,7 @@ subroutine find_levels_cavity(mesh) ! except when this levels would remain less than 3 valid ! bottom levels --> in case make the levels of all sorounding ! triangles shallower - if ( (nlevels(elem)-(nz+1))>=3 .and. elemreducelvl(elem)==0 ) then + if ( (nlevels(elem)-(nz+1))>=3 .and. elemreducelvl(elem)==0 .and. elemfixlvl(elem)==0) then ulevels(elem)=nz+1 else ! --> can not increase depth anymore to eleminate isolated @@ -1009,10 +1011,13 @@ subroutine find_levels_cavity(mesh) !force recheck for all current ocean cells exit_flag1=0 end if ! --> if (count_neighb_open<2) then - end if ! --> if (nz >= ulevels(elem)) then + + end if ! --> if ( nz >= ulevels(elem) .and. nz do elem=1,elem2D + end do ! --> do while((exit_flag==0).and.(count_iter<1000)) - write(*,*) ' -[iter ]->: ulevel',count_iter,'/',max_iter,', nz=',nz + write(*,"(A, I5, A, i5, A, I3)") ' -[iter ]->: ulevel, iter/maxiter=',count_iter,'/',max_iter,', nz=',nz end do ! --> do nz=1,cavity_maxlev !_______________________________________________________________________ @@ -1026,7 +1031,7 @@ subroutine find_levels_cavity(mesh) node=elem2D_nodes(j,elem) ulevels_nod2D(node)=min(ulevels_nod2D(node),ulevels(elem)) end do - end do + end do ! --> do elem=1,elem2D !_______________________________________________________________________ ! check ulevels if ulevels: less than three valid elem ocean layers, elem=',elem - !!PS write(*,*) ' elem = ',elem - !!PS write(*,*) ' elem_neighbors = ',elem_neighbors(1:3,elem) - !!PS write(*,*) ' ulevels,nlevels = ',ulevels(elem), nlevels(elem) - !!PS write(*,*) ' ulevels(neighb) = ',ulevels(elem_neighbors(1:3,elem)) - !!PS write(*,*) ' nlevels(neighb) = ',nlevels(elem_neighbors(1:3,elem)) - !!PS write(*,*) ' lon_node = ',mesh%coord_nod2D(1,elem2D_nodes(1:3,elem))/rad - !!PS write(*,*) ' lat_node = ',mesh%coord_nod2D(2,elem2D_nodes(1:3,elem))/rad - !!PS write(*,*) ' lon_elem = ',sum(mesh%coord_nod2D(1,elem2D_nodes(1:3,elem))/rad)/3.0 - !!PS write(*,*) ' lat_elem = ',sum(mesh%coord_nod2D(2,elem2D_nodes(1:3,elem))/rad)/3.0 exit_flag2 = 0 end if - end do + end do ! --> do elem=1,elem2D !_______________________________________________________________________ ! check ulevels_nod2d if ulevels_nod2d=nlevels_nod2D(elem)) then - if (mype==0) write(*,*) ' -[check]->: vertice cavity depth deeper or equal bottom depth, node=', elem2d + do node=1,nod2D + !___________________________________________________________________ + if (ulevels_nod2D(node)>=nlevels_nod2D(node)) then + if (mype==0) write(*,*) ' -[check]->: vertice cavity depth deeper or equal bottom depth, node=', node exit_flag2 = 0 - end if - if (nlevels_nod2D(elem)-ulevels_nod2D(elem)<3) then - if (mype==0) write(*,*) ' -[check]->: less than three valid vertice ocean layers, node=', elem2d + + !___________________________________________________________________ + if (nlevels_nod2D(node)-ulevels_nod2D(node)<3) then + if (mype==0) write(*,*) ' -[check]->: less than three valid vertice ocean layers, node=', node exit_flag2 = 0 - end if - end do + end do ! --> do node=1,nod2D do elem=1,elem2D if (ulevels(elem)< maxval(ulevels_nod2D(elem2D_nodes(:,elem))) ) then - if (mype==0) then - write(*,*) ' -[check]->: found elem cavity shallower than its valid maximum cavity vertice depths, elem=', elem2d - !!PS write(*,*) ' ule | uln = ',ulevels(elem),' | ',ulevels_nod2D(elem2D_nodes(:,elem)) - end if + if (mype==0) write(*,*) ' -[check]->: found elem cavity shallower than its valid maximum cavity vertice depths, elem=', elem2d exit_flag2 = 0 - end if - end do + end do ! --> do elem=1,elem2D !_______________________________________________________________________ - ! check how many triangle elements contribute to every vertice in every layer - ! every vertice in every layer should be connected to at least two triangle - ! elements ! + ! compute how many triangle elements contribute to every vertice in every layer numelemtonode=0 idxelemtonode=0 do node=1, nod2D @@ -1095,32 +1086,59 @@ subroutine find_levels_cavity(mesh) idxelemtonode(nz,node) = elem end do end do - end do + end do ! --> do node=1, nod2D + !_______________________________________________________________________ + ! check if every vertice in every layer should be connected to at least + ! two triangle elements ! count_iter=0 do node=1, nod2D - do nz=1,nl - if (numelemtonode(nz,node)== 1) then + + !___________________________________________________________________ + do nz = ulevels_nod2D(node), nlevels_nod2D(node)-1 + + !_______________________________________________________________ + ! nodes has zero neighbouring triangles and is completely isolated + ! need to adapt ulevels by hand --> inflicts another outher + ! iteration loop (exit_flag2=0) + if (numelemtonode(nz,node)==0) then exit_flag2 = 0 count_iter = count_iter+1 - write(*,*) ' -[check]->: node has only 1 triangle: n=', node, ',nz=',nz -!!PS write(*,*) ' node=', node, ulevels_nod2d(node), nlevels_nod2D(node) -!!PS write(*,*) ' elem=', idxelemtonode(nz,node), ulevels(idxelemtonode(nz,node)), nlevels(idxelemtonode(nz,node)) -!!PS write(*,*) ' lon/lat_n = ',mesh%coord_nod2D(1,node)/rad, mesh%coord_nod2D(2,node)/rad -!!PS write(*,*) ' n_eneighb =', nod_in_elem2D(1:nod_in_elem2D_num(node),node) -!!PS write(*,*) ' ulvls(n_eneighb)=', ulevels(nod_in_elem2D(1:nod_in_elem2D_num(node),node)) -!!PS write(*,*) ' nlvls(n_eneighb)=', nlevels(nod_in_elem2D(1:nod_in_elem2D_num(node),node)) -!!PS write(*,*) - exit + write(*,"( A, I1, A, I7, A, I3)") ' -[check]->: node has only ', numelemtonode(nz,node) ,' triangle: n=', node, ', nz=',nz + !___________________________________________________________ + ! if node has no neighboring triangle somewhere in the middle + ! of the water column at nz (can happen but seldom) than set + ! all ulevels(elem) of sorounding trinagles whos ulevel is + ! depper than nz, equal to nz and fix that value to forbit it + ! to be changed (elemfixlvl > 0) + do j=1,nod_in_elem2D_num(node) + elem=nod_in_elem2D(j,node) + if (ulevels(elem)>nz) then + ulevels(elem) = nz + elemfixlvl(elem) = elemfixlvl(elem)+1 + end if + end do + end if + + !_______________________________________________________________ + ! nodes has just one neighbouring triangle --> but needs two --> + ! inflicts another outher iteration loop (exit_flag2=0) + if (numelemtonode(nz,node)==1) then + exit_flag2 = 0 + count_iter = count_iter+1 + write(*,"( A, I1, A, I7, A, I3)") ' -[check]->: node has only ', numelemtonode(nz,node) ,' triangle: n=', node, ', nz=',nz end if - end do - end do + + end do ! --> do nz = ulevels_nod2D(node), nlevels_nod2D(node)-1 + + end do ! --> do node=1, nod2D !_______________________________________________________________________ + ! check if cavity geometry did converge if (exit_flag2 == 0) then print *, achar(27)//'[33m' //'____________________________________________________________'//achar(27)//'[0m' print *, ' -['//achar(27)//'[33m'//'WARN'//achar(27)//'[0m'//']->: Cavity geom. not converged yet, do further outer iteration' - write(*,*) ' iter step ', count_iter2,' out of ', max_iter2 + write(*,"(A, I3, A, I3)") ' iter step ', count_iter2,'/', max_iter2 write(*,*) end if @@ -1130,6 +1148,8 @@ subroutine find_levels_cavity(mesh) deallocate(numelemtonode,idxelemtonode) !___________________________________________________________________________ + ! check if cavity geometry totaly converged or failed to converge in the later + ! case will break up model if (exit_flag2 == 0) then write(*,*) print *, achar(27)//'[31m' //'____________________________________________________________'//achar(27)//'[0m' @@ -1139,7 +1159,8 @@ subroutine find_levels_cavity(mesh) else write(*,*) print *, achar(27)//'[32m' //'____________________________________________________________'//achar(27)//'[0m' - print *, ' -['//achar(27)//'[7;32m'//' OK '//achar(27)//'[0m'//']->: Cavity geometry constrains did converge!!!' + print *, ' -['//achar(27)//'[7;32m'//' OK '//achar(27)//'[0m'//']->: Cavity geometry constrains did converge, Yippee-Ki-Yay, Beep!!!' + write(*,*) end if From b489384b9d255b23d561e4cebd468b3bb0f5017f Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 8 Apr 2021 12:34:47 +0200 Subject: [PATCH 099/909] delete some check marks --- src/oce_ale.F90 | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 52fcd0a51..9d2fbb2a4 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -1811,6 +1811,7 @@ subroutine vert_vel_ale(mesh) if (Fer_GM) then fer_Wvel(nz,n)=fer_Wvel(nz,n)/area(nz,n) end if + end do end do ! | @@ -2400,16 +2401,7 @@ subroutine impl_vert_visc_ale(mesh) b(nz)=b(nz)-min(0._WP, wd)*zinv c(nz)=c(nz)-max(0._WP, wd)*zinv - if (a(nz)/=a(nz) .or. b(nz)/=b(nz) .or. c(nz)/=c(nz)) then - write(*,*) ' --> found a,b,c is NaN' - write(*,*) 'mype=',mype - write(*,*) 'nz=',nz - write(*,*) 'a(nz), b(nz), c(nz)=',a(nz), b(nz), c(nz) - write(*,*) 'Av(nz,elem)=',Av(nz,elem) - write(*,*) 'Av(nz+1,elem)=',Av(nz+1,elem) - write(*,*) 'Z_n(nz-1:nz+1)=',Z_n(nz-1:nz+1) - write(*,*) 'zbar_n(nz:nz+1)=',zbar_n(nz:nz+1) - endif + end do ! The last row zinv=1.0_WP*dt/(zbar_n(nzmax-1)-zbar_n(nzmax)) @@ -2666,10 +2658,12 @@ subroutine oce_timestep_ale(n, mesh) !___________________________________________________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call compute_vel_rhs'//achar(27)//'[0m' -!!PS if (any(UV_rhs/=UV_rhs)) write(*,*) mype,' --> found NaN UV_rhs before compute_vel_rhs' -!!PS if (any(UV/=UV)) write(*,*) mype,' --> found NaN UV before compute_vel_rhs' -!!PS if (any(ssh_rhs/=ssh_rhs))write(*,*) mype,' --> found NaN ssh_rhs before compute_ssh_rhs_ale' -!!PS if (any(ssh_rhs_old/=ssh_rhs_old))write(*,*) mype,' --> found NaN ssh_rhs_old before compute_ssh_rhs_ale' + +!!PS if (any(UV_rhs/=UV_rhs)) write(*,*) n, mype,' --> found NaN UV_rhs before compute_vel_rhs' +!!PS if (any(UV/=UV)) write(*,*) n, mype,' --> found NaN UV before compute_vel_rhs' +!!PS if (any(ssh_rhs/=ssh_rhs)) write(*,*) n, mype,' --> found NaN ssh_rhs before compute_vel_rhs' +!!PS if (any(ssh_rhs_old/=ssh_rhs_old)) write(*,*) n, mype,' --> found NaN ssh_rhs_old before compute_vel_rhs' +!!PS if (any(abs(Wvel_e)>1.0e20)) write(*,*) n, mype,' --> found Inf Wvel_e before compute_vel_rhs' if(mom_adv/=3) then call compute_vel_rhs(mesh) @@ -2684,7 +2678,7 @@ subroutine oce_timestep_ale(n, mesh) if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call impl_vert_visc_ale'//achar(27)//'[0m' if(i_vert_visc) call impl_vert_visc_ale(mesh) t2=MPI_Wtime() - + !___________________________________________________________________________ ! >->->->->->->->->->->->-> ALE-part starts <-<-<-<-<-<-<-<-<-<-<-<- !___________________________________________________________________________ @@ -2699,6 +2693,7 @@ subroutine oce_timestep_ale(n, mesh) ! Take updated ssh matrix and solve --> new ssh! t30=MPI_Wtime() call solve_ssh_ale(mesh) + if ((toy_ocean) .AND. (TRIM(which_toy)=="soufflet")) call relax_zonal_vel(mesh) t3=MPI_Wtime() From 4568b03e0b8f87105d3daeb4806ae7737e419682 Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 14 Apr 2021 17:45:19 +0200 Subject: [PATCH 100/909] change some screen output when partition --- src/fvom_init.F90 | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/fvom_init.F90 b/src/fvom_init.F90 index cf690ae03..63ad3baf8 100755 --- a/src/fvom_init.F90 +++ b/src/fvom_init.F90 @@ -815,6 +815,7 @@ subroutine find_levels(mesh) !___________________________________________________________________________ ! vertical vertice level index of ocean bottom boundary + write(*,"(A)" ) ' -[compu]->: nlevels_nod2D ' nlevels_nod2D=0 do n=1,elem2D q = merge(3,4,elem2D_nodes(1,n) == elem2D_nodes(4,n)) @@ -1022,6 +1023,7 @@ subroutine find_levels_cavity(mesh) !_______________________________________________________________________ ! vertical vertice level index of cavity_ocean boundary + write(*,"(A)" ) ' -[compu]->: ulevels_nod2D ' ulevels_nod2D = nl do elem=1,elem2D nneighb = merge(3,4,elem2D_nodes(1,elem) == elem2D_nodes(4,elem)) @@ -1153,13 +1155,13 @@ subroutine find_levels_cavity(mesh) if (exit_flag2 == 0) then write(*,*) print *, achar(27)//'[31m' //'____________________________________________________________'//achar(27)//'[0m' - print *, achar(27)//'[7;31m'//' -[ERROR]->: Cavity geometry constrains did not converge !!!'//achar(27)//'[0m' + print *, achar(27)//'[7;31m'//' -[ERROR]->: Cavity geometry constrains did not converge !!! *\(>︿<)/*'//achar(27)//'[0m' write(*,*) call par_ex(0) else write(*,*) print *, achar(27)//'[32m' //'____________________________________________________________'//achar(27)//'[0m' - print *, ' -['//achar(27)//'[7;32m'//' OK '//achar(27)//'[0m'//']->: Cavity geometry constrains did converge, Yippee-Ki-Yay, Beep!!!' + print *, ' -['//achar(27)//'[7;32m'//' OK '//achar(27)//'[0m'//']->: Cavity geometry constrains did converge !!! *\(^o^)/*' write(*,*) end if From d7201e8180fa2d81a065da6360d595b344cbcfe3 Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 14 Apr 2021 17:46:05 +0200 Subject: [PATCH 101/909] small change in ../src/write_step_info.F90 --- src/write_step_info.F90 | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index 35a5ba69d..01de0d6d3 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -56,11 +56,12 @@ subroutine write_step_info(istep,outfreq, mesh) loc =0. !_______________________________________________________________________ do n=1, myDim_nod2D - loc_eta = loc_eta + area(1, n)*eta_n(n) - loc_hbar = loc_hbar + area(1, n)*hbar(n) - loc_deta = loc_deta + area(1, n)*d_eta(n) - loc_dhbar = loc_dhbar + area(1, n)*(hbar(n)-hbar_old(n)) - loc_wflux = loc_wflux + area(1, n)*water_flux(n) + if (ulevels_nod2D(n)>1) cycle + loc_eta = loc_eta + area(ulevels_nod2D(n), n)*eta_n(n) + loc_hbar = loc_hbar + area(ulevels_nod2D(n), n)*hbar(n) + loc_deta = loc_deta + area(ulevels_nod2D(n), n)*d_eta(n) + loc_dhbar = loc_dhbar + area(ulevels_nod2D(n), n)*(hbar(n)-hbar_old(n)) + loc_wflux = loc_wflux + area(ulevels_nod2D(n), n)*water_flux(n) !!PS loc_hflux = loc_hflux + area(1, n)*heat_flux(n) !!PS loc_temp = loc_temp + area(1, n)*sum(tr_arr(:,n,1))/(nlevels_nod2D(n)-1) !!PS loc_salt = loc_salt + area(1, n)*sum(tr_arr(:,n,2))/(nlevels_nod2D(n)-1) @@ -75,11 +76,19 @@ subroutine write_step_info(istep,outfreq, mesh) !!PS call MPI_AllREDUCE(loc_hflux, int_hflux, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) !!PS call MPI_AllREDUCE(loc_temp , int_temp , 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) !!PS call MPI_AllREDUCE(loc_salt , int_salt , 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) + int_eta = int_eta /ocean_area int_hbar = int_hbar /ocean_area int_deta = int_deta /ocean_area int_dhbar= int_dhbar/ocean_area int_wflux= int_wflux/ocean_area + +!!PS int_eta = int_eta /ocean_areawithcav +!!PS int_hbar = int_hbar /ocean_areawithcav +!!PS int_deta = int_deta /ocean_areawithcav +!!PS int_dhbar= int_dhbar/ocean_areawithcav +!!PS int_wflux= int_wflux/ocean_areawithcav + !!PS int_hflux= int_hflux/ocean_area !!PS int_temp = int_temp /ocean_area !!PS int_salt = int_salt /ocean_area @@ -300,19 +309,20 @@ subroutine check_blowup(istep, mesh) ! check surface vertical velocity --> in case of zlevel and zstar ! vertical coordinate its indicator if Volume is conserved for ! Wvel(1,n)~maschine preccision - if ( .not. trim(which_ALE)=='linfs' .and. ( Wvel(1, n) /= Wvel(1, n) .or. abs(Wvel(1,n))>1e-12 )) then +!!PS if ( .not. trim(which_ALE)=='linfs' .and. ( Wvel(1, n) /= Wvel(1, n) .or. abs(Wvel(1,n))>1e-12 )) then + if ( .not. trim(which_ALE)=='linfs' .and. ( Wvel(1, n) /= Wvel(1, n) )) then found_blowup_loc=1 write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep write(*,*) ' --STOP--> found surface layer vertical velocity becomes NaN or >1e-12' write(*,*) 'mype = ',mype write(*,*) 'mstep = ',istep write(*,*) 'node = ',n + write(*,*) 'uln, nln = ',ulevels_nod2D(n), nlevels_nod2D(n) + write(*,*) 'glon,glat = ',geo_coord_nod2D(:,n)/rad write(*,*) write(*,*) 'Wvel(1, n) = ',Wvel(1,n) write(*,*) 'Wvel(:, n) = ',Wvel(:,n) write(*,*) - write(*,*) 'glon,glat = ',geo_coord_nod2D(:,n)/rad - write(*,*) write(*,*) 'hnode(1, n) = ',hnode(1,n) write(*,*) 'hnode(:, n) = ',hnode(:,n) write(*,*) 'hflux = ',heat_flux(n) From b07e7f9bd54b21dd631ba3fdc1a58bf5aaff4ad4 Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 19 Apr 2021 12:34:08 +0200 Subject: [PATCH 102/909] switch back compuation of total surface windstress on elements --- src/ice_oce_coupling.F90 | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index 15db83c06..e85fec10f 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -52,12 +52,12 @@ subroutine oce_fluxes_mom(mesh) !_______________________________________________________________________ elnodes=elem2D_nodes(:,elem) - !!PS stress_surf(1,elem)=sum(stress_iceoce_x(elnodes)*a_ice(elnodes) + & - !!PS stress_atmoce_x(elnodes)*(1.0_WP-a_ice(elnodes)))/3.0_WP - !!PS stress_surf(2,elem)=sum(stress_iceoce_y(elnodes)*a_ice(elnodes) + & - !!PS stress_atmoce_y(elnodes)*(1.0_WP-a_ice(elnodes)))/3.0_WP - stress_surf(1,elem)=sum(stress_node_surf(1,elnodes))/3.0_WP - stress_surf(2,elem)=sum(stress_node_surf(2,elnodes))/3.0_WP + stress_surf(1,elem)=sum(stress_iceoce_x(elnodes)*a_ice(elnodes) + & + stress_atmoce_x(elnodes)*(1.0_WP-a_ice(elnodes)))/3.0_WP + stress_surf(2,elem)=sum(stress_iceoce_y(elnodes)*a_ice(elnodes) + & + stress_atmoce_y(elnodes)*(1.0_WP-a_ice(elnodes)))/3.0_WP + !!PS stress_surf(1,elem)=sum(stress_node_surf(1,elnodes))/3.0_WP + !!PS stress_surf(2,elem)=sum(stress_node_surf(2,elnodes))/3.0_WP END DO !___________________________________________________________________________ From 7502300e24161b3426c32e996ccb2821a1e80c88 Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 20 Apr 2021 14:28:17 +0200 Subject: [PATCH 103/909] comment lower cutoff for m_snow --- src/ice_thermo_oce.F90 | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index a37a30a82..ab1e609ac 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -3,6 +3,7 @@ subroutine cut_off(mesh) use o_param use i_arrays use MOD_MESH + use g_config, only: use_cavity use g_parsup implicit none type(t_mesh), intent(in) , target :: mesh @@ -34,18 +35,22 @@ subroutine cut_off(mesh) #endif /* (__oifs) */ end where - ! upper cutoff SH: m_ice - where(m_ice>5.0_WP .and. ulevels_nod2d==1 .and. geo_coord_nod2D(2,:)<0.0_WP) m_ice=5.0_WP - ! upper cutoff NH: m_ice - where(m_ice>10.0_WP .and. ulevels_nod2d==1 .and. geo_coord_nod2D(2,:)>0.0_WP) m_ice=10.0_WP - !___________________________________________________________________________ - ! lower cutoff: m_snow - where(m_snow<0.1e-8_WP) m_snow=0.0_WP + if (use_cavity) then + ! upper cutoff SH: m_ice + where(m_ice>5.0_WP .and. ulevels_nod2d==1 .and. geo_coord_nod2D(2,:)<0.0_WP) m_ice=5.0_WP + + ! upper cutoff NH: m_ice + where(m_ice>10.0_WP .and. ulevels_nod2d==1 .and. geo_coord_nod2D(2,:)>0.0_WP) m_ice=10.0_WP + + ! upper cutoff: m_snow + where(m_snow>2.5_WP .and. ulevels_nod2d==1) m_snow=2.5_WP + + !___________________________________________________________________________ + ! lower cutoff: m_snow + !!PS where(m_snow<0.1e-8_WP) m_snow=0.0_WP + end if - ! upper cutoff: m_snow - where(m_snow>2.5_WP .and. ulevels_nod2d==1) m_snow=2.5_WP - !___________________________________________________________________________ #if defined (__oifs) where(ice_temp>273.15_WP) ice_temp=273.15_WP From 99ddb3dc47def0583ba11299dcde30b4660c5763 Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 20 Apr 2021 18:11:05 +0200 Subject: [PATCH 104/909] check for numeric consistency --- src/oce_ale_tracer.F90 | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 7e57cfbc8..914abb2f1 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -524,7 +524,8 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) Ty =Ty *isredi Ty1=Ty1*isredi ! layer dependent coefficients for for solving dT(nz)/dt+d/dz*K_33*d/dz*T(nz) = ... - a(nz)=-(Kv(nz,n) +Ty )*zinv1*zinv*area(nz ,n)/areasvol(nz,n) +!!PS numerics a(nz)=-(Kv(nz,n) +Ty )*zinv1*zinv*area(nz ,n)/areasvol(nz,n) + a(nz)=-(Kv(nz,n) +Ty )*zinv1*zinv !!PS numerics *area(nz ,n)/areasvol(nz,n) c(nz)=-(Kv(nz+1,n)+Ty1)*zinv2*zinv*area(nz+1,n)/areasvol(nz,n) b(nz)=-a(nz)-c(nz)+hnode_new(nz,n) From dcf37362c2fc2f56ed505873390ea957fc6ca5e6 Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 20 Apr 2021 22:36:10 +0200 Subject: [PATCH 105/909] try to fix numerical deviation from github testcase --- src/oce_ale_pressure_bv.F90 | 12 +++++++- src/oce_ale_tracer.F90 | 58 ++++++++++++++++++++++++++++++++----- 2 files changed, 62 insertions(+), 8 deletions(-) diff --git a/src/oce_ale_pressure_bv.F90 b/src/oce_ale_pressure_bv.F90 index c4ce88e0c..0feb5cd0a 100644 --- a/src/oce_ale_pressure_bv.F90 +++ b/src/oce_ale_pressure_bv.F90 @@ -2761,6 +2761,7 @@ subroutine sw_alpha_beta(TF1,SF1, mesh) use o_arrays use g_parsup use o_param + use g_comm_auto implicit none ! type(t_mesh), intent(in) , target :: mesh @@ -2815,6 +2816,8 @@ subroutine sw_alpha_beta(TF1,SF1, mesh) sw_alpha(nz,n) = a_over_b*sw_beta(nz,n) end do end do +call exchange_nod(sw_alpha) +call exchange_nod(sw_beta) end subroutine sw_alpha_beta ! ! @@ -2992,6 +2995,7 @@ SUBROUTINE density_linear(t, s, bulk_0, bulk_pz, bulk_pz2, rho_out, mesh) USE o_ARRAYS USE o_PARAM use g_PARSUP !, only: par_ex,pe_status +use g_config !, only: which_toy, toy_ocean IMPLICIT NONE real(kind=WP), intent(IN) :: t,s @@ -3005,7 +3009,13 @@ SUBROUTINE density_linear(t, s, bulk_0, bulk_pz, bulk_pz2, rho_out, mesh) bulk_0 = 1 bulk_pz = 0 bulk_pz2 = 0 - rho_out = density_0 + 0.8_WP*(s - 34.0_WP) - 0.2_WP*(t - 20.0_WP) + + IF((toy_ocean) .AND. (TRIM(which_toy)=="soufflet")) THEN + rho_out = density_0 - 0.00025_WP*(t - 10.0_WP)*density_0 + ELSE + rho_out = density_0 + 0.8_WP*(s - 34.0_WP) - 0.2*(t - 20.0_WP) + END IF + end subroutine density_linear ! ! diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 914abb2f1..a75cf140c 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -499,7 +499,17 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) !!PS v_adv =zinv*area(nz+1,n)/areasvol(nz,n) !!PS b(nz) =b(nz)+Wvel_i(nz, n)*zinv-min(0._WP, Wvel_i(nz+1, n))*v_adv !!PS c(nz) =c(nz)-max(0._WP, Wvel_i(nz+1, n))*v_adv - v_adv =zinv*area(nz ,n)/areasvol(nz,n) + + !___________________________________________________________________ + ! do this here for numerical reasons since area(nz ,n)/areasvol(nz,n) + ! is not exactly 1.0_WP when no cavity is used leads to not binary + ! identical results with github testcase + if (.not. use_cavity) then + !!PS v_adv =zinv!!PS numeric *area(nz ,n)/areasvol(nz,n) + v_adv =zinv + else + v_adv =zinv*area(nz ,n)/areasvol(nz,n) + end if b(nz) =b(nz)+Wvel_i(nz, n)*v_adv v_adv =zinv*area(nz+1,n)/areasvol(nz,n) @@ -523,9 +533,18 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) (zbar_n(nz+1)-Z_n(nz+1 ))*zinv2 *slope_tapered(3,nz+1,n)**2*Ki(nz+1,n) Ty =Ty *isredi Ty1=Ty1*isredi + ! layer dependent coefficients for for solving dT(nz)/dt+d/dz*K_33*d/dz*T(nz) = ... -!!PS numerics a(nz)=-(Kv(nz,n) +Ty )*zinv1*zinv*area(nz ,n)/areasvol(nz,n) - a(nz)=-(Kv(nz,n) +Ty )*zinv1*zinv !!PS numerics *area(nz ,n)/areasvol(nz,n) + !___________________________________________________________________ + ! do this here for numerical reasons since area(nz ,n)/areasvol(nz,n) + ! is not exactly 1.0_WP when no cavity is used leads to not binary + ! identical results with github testcase + if (.not. use_cavity) then + a(nz)=-(Kv(nz,n) +Ty )*zinv1*zinv + else + a(nz)=-(Kv(nz,n) +Ty )*zinv1*zinv*area(nz ,n)/areasvol(nz,n) + end if + c(nz)=-(Kv(nz+1,n)+Ty1)*zinv2*zinv*area(nz+1,n)/areasvol(nz,n) b(nz)=-a(nz)-c(nz)+hnode_new(nz,n) @@ -534,8 +553,15 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) ! update from the vertical advection if (do_wimpl) then - !!PS v_adv=zinv - v_adv=zinv*area(nz ,n)/areasvol(nz,n) + !___________________________________________________________________ + ! do this here for numerical reasons since area(nz ,n)/areasvol(nz,n) + ! is not exactly 1.0_WP when no cavity is used leads to not binary + ! identical results with github testcase + if (.not. use_cavity) then + v_adv=zinv + else + v_adv=zinv*area(nz ,n)/areasvol(nz,n) + end if a(nz)=a(nz)+min(0._WP, Wvel_i(nz, n))*v_adv b(nz)=b(nz)+max(0._WP, Wvel_i(nz, n))*v_adv !!PS v_adv=v_adv*areasvol(nz+1,n)/areasvol(nz,n) @@ -556,14 +582,32 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) (zbar_n(nz)-Z_n(nz)) *zinv1 *slope_tapered(3,nz,n)**2 *Ki(nz,n) Ty =Ty *isredi ! layer dependent coefficients for for solving dT(nz)/dt+d/dz*K_33*d/dz*T(nz) = ... - a(nz)=-(Kv(nz,n)+Ty)*zinv1*zinv*area(nz ,n)/areasvol(nz,n) + + !___________________________________________________________________ + ! do this here for numerical reasons since area(nz ,n)/areasvol(nz,n) + ! is not exactly 1.0_WP when no cavity is used leads to not binary + ! identical results with github testcase + if (.not. use_cavity) then + a(nz)=-(Kv(nz,n)+Ty)*zinv1*zinv + else + a(nz)=-(Kv(nz,n)+Ty)*zinv1*zinv*area(nz ,n)/areasvol(nz,n) + end if c(nz)=0.0_WP b(nz)=-a(nz)+hnode_new(nz,n) ! update from the vertical advection if (do_wimpl) then !!PS v_adv=zinv - v_adv=zinv*area(nz ,n)/areasvol(nz,n) + !___________________________________________________________________ + ! do this here for numerical reasons since area(nz ,n)/areasvol(nz,n) + ! is not exactly 1.0_WP when no cavity is used leads to not binary + ! identical results with github testcase + if (.not. use_cavity) then + !!PS v_adv=zinv!!PS numeric *area(nz ,n)/areasvol(nz,n) + v_adv=zinv + else + v_adv=zinv*area(nz ,n)/areasvol(nz,n) + end if a(nz)=a(nz)+min(0._WP, Wvel_i(nz, n))*v_adv b(nz)=b(nz)+max(0._WP, Wvel_i(nz, n))*v_adv end if From 72f79faa71bd096968d239befe01dd66f6b99487 Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 21 Apr 2021 16:41:00 +0200 Subject: [PATCH 106/909] simplify solution to overcome numerical problem when computing area(nz ,n)/areasvol(nz,n) --- src/oce_ale_tracer.F90 | 66 ++++++++++++++---------------------------- 1 file changed, 21 insertions(+), 45 deletions(-) diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 61505c100..b0ce14032 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -516,15 +516,10 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) !!PS c(nz) =c(nz)-max(0._WP, Wvel_i(nz+1, n))*v_adv !___________________________________________________________________ - ! do this here for numerical reasons since area(nz ,n)/areasvol(nz,n) - ! is not exactly 1.0_WP when no cavity is used leads to not binary - ! identical results with github testcase - if (.not. use_cavity) then - !!PS v_adv =zinv!!PS numeric *area(nz ,n)/areasvol(nz,n) - v_adv =zinv - else - v_adv =zinv*area(nz ,n)/areasvol(nz,n) - end if + ! use brackets when computing ( area(nz ,n)/areasvol(nz,n) ) for + ! numerical reasons, to gurante that area/areasvol in case of no + ! cavity is ==1.0_WP + v_adv =zinv* ( area(nz ,n)/areasvol(nz,n) ) b(nz) =b(nz)+Wvel_i(nz, n)*v_adv v_adv =zinv*area(nz+1,n)/areasvol(nz,n) @@ -551,15 +546,10 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) ! layer dependent coefficients for for solving dT(nz)/dt+d/dz*K_33*d/dz*T(nz) = ... !___________________________________________________________________ - ! do this here for numerical reasons since area(nz ,n)/areasvol(nz,n) - ! is not exactly 1.0_WP when no cavity is used leads to not binary - ! identical results with github testcase - if (.not. use_cavity) then - a(nz)=-(Kv(nz,n) +Ty )*zinv1*zinv - else - a(nz)=-(Kv(nz,n) +Ty )*zinv1*zinv*area(nz ,n)/areasvol(nz,n) - end if - + ! use brackets when computing ( area(nz ,n)/areasvol(nz,n) ) for + ! numerical reasons, to gurante that area/areasvol in case of no + ! cavity is ==1.0_WP + a(nz)=-(Kv(nz,n) +Ty )*zinv1*zinv* ( area(nz ,n)/areasvol(nz,n) ) c(nz)=-(Kv(nz+1,n)+Ty1)*zinv2*zinv*area(nz+1,n)/areasvol(nz,n) b(nz)=-a(nz)-c(nz)+hnode_new(nz,n) @@ -568,15 +558,11 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) ! update from the vertical advection if (do_wimpl) then - !___________________________________________________________________ - ! do this here for numerical reasons since area(nz ,n)/areasvol(nz,n) - ! is not exactly 1.0_WP when no cavity is used leads to not binary - ! identical results with github testcase - if (.not. use_cavity) then - v_adv=zinv - else - v_adv=zinv*area(nz ,n)/areasvol(nz,n) - end if + !_______________________________________________________________ + ! use brackets when computing ( area(nz ,n)/areasvol(nz,n) ) for + ! numerical reasons, to gurante that area/areasvol in case of no + ! cavity is ==1.0_WP + v_adv=zinv* ( area(nz ,n)/areasvol(nz,n) ) a(nz)=a(nz)+min(0._WP, Wvel_i(nz, n))*v_adv b(nz)=b(nz)+max(0._WP, Wvel_i(nz, n))*v_adv !!PS v_adv=v_adv*areasvol(nz+1,n)/areasvol(nz,n) @@ -599,30 +585,20 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) ! layer dependent coefficients for for solving dT(nz)/dt+d/dz*K_33*d/dz*T(nz) = ... !___________________________________________________________________ - ! do this here for numerical reasons since area(nz ,n)/areasvol(nz,n) - ! is not exactly 1.0_WP when no cavity is used leads to not binary - ! identical results with github testcase - if (.not. use_cavity) then - a(nz)=-(Kv(nz,n)+Ty)*zinv1*zinv - else - a(nz)=-(Kv(nz,n)+Ty)*zinv1*zinv*area(nz ,n)/areasvol(nz,n) - end if + ! use brackets when computing ( area(nz ,n)/areasvol(nz,n) ) for + ! numerical reasons, to gurante that area/areasvol in case of no + ! cavity is ==1.0_WP + a(nz)=-(Kv(nz,n)+Ty)*zinv1*zinv* ( area(nz ,n)/areasvol(nz,n) ) c(nz)=0.0_WP b(nz)=-a(nz)+hnode_new(nz,n) ! update from the vertical advection if (do_wimpl) then - !!PS v_adv=zinv !___________________________________________________________________ - ! do this here for numerical reasons since area(nz ,n)/areasvol(nz,n) - ! is not exactly 1.0_WP when no cavity is used leads to not binary - ! identical results with github testcase - if (.not. use_cavity) then - !!PS v_adv=zinv!!PS numeric *area(nz ,n)/areasvol(nz,n) - v_adv=zinv - else - v_adv=zinv*area(nz ,n)/areasvol(nz,n) - end if + ! use brackets when computing ( area(nz ,n)/areasvol(nz,n) ) for + ! numerical reasons, to gurante that area/areasvol in case of no + ! cavity is ==1.0_WP + v_adv=zinv* ( area(nz ,n)/areasvol(nz,n) ) a(nz)=a(nz)+min(0._WP, Wvel_i(nz, n))*v_adv b(nz)=b(nz)+max(0._WP, Wvel_i(nz, n))*v_adv end if From 4a95870acaaa3fd89c93c8ce3b6dd4ce81feaa8a Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Wed, 28 Apr 2021 10:17:02 +0200 Subject: [PATCH 107/909] density / buoyancy flux computation is addad (no cost, always computed) --- src/gen_modules_diag.F90 | 12 ++++++------ src/ice_oce_coupling.F90 | 7 ++++++- src/io_meandata.F90 | 4 +++- src/oce_modules.F90 | 1 + src/oce_setup_step.F90 | 6 ++++-- 5 files changed, 20 insertions(+), 10 deletions(-) diff --git a/src/gen_modules_diag.F90 b/src/gen_modules_diag.F90 index 7dc0540b8..22498f7b3 100755 --- a/src/gen_modules_diag.F90 +++ b/src/gen_modules_diag.F90 @@ -18,7 +18,7 @@ module diagnostics public :: ldiag_solver, lcurt_stress_surf, ldiag_energy, ldiag_dMOC, ldiag_DVD, ldiag_forc, ldiag_salt3D, ldiag_curl_vel3, diag_list, & compute_diagnostics, rhs_diag, curl_stress_surf, curl_vel3, wrhof, rhof, & u_x_u, u_x_v, v_x_v, v_x_w, u_x_w, dudx, dudy, dvdx, dvdy, dudz, dvdz, utau_surf, utau_bott, av_dudz_sq, av_dudz, av_dvdz, stress_bott, u_surf, v_surf, u_bott, v_bott, & - std_dens_min, std_dens_max, std_dens_N, std_dens, std_dens_UVDZ, std_dens_DIV, std_dens_Z, std_dens_dVdT, std_dens_flux, dens_flux, & + std_dens_min, std_dens_max, std_dens_N, std_dens, std_dens_UVDZ, std_dens_DIV, std_dens_Z, std_dens_dVdT, std_dens_flux, dens_flux_e, & compute_diag_dvd_2ndmoment_klingbeil_etal_2014, compute_diag_dvd_2ndmoment_burchard_etal_2008, compute_diag_dvd ! Arrays used for diagnostics, some shall be accessible to the I/O ! 1. solver diagnostics: A*x=rhs? @@ -50,7 +50,7 @@ module diagnostics real(kind=WP), save, target :: std_dd(std_dens_N-1) real(kind=WP), save, target :: std_dens_min=1030., std_dens_max=1040. real(kind=WP), save, allocatable, target :: std_dens_UVDZ(:,:,:), std_dens_flux(:,:,:), std_dens_dVdT(:,:), std_dens_DIV(:,:), std_dens_Z(:,:) - real(kind=WP), save, allocatable, target :: dens_flux(:) + real(kind=WP), save, allocatable, target :: dens_flux_e(:) logical :: ldiag_solver =.false. logical :: lcurt_stress_surf=.false. @@ -408,7 +408,7 @@ subroutine diag_densMOC(mode, mesh) allocate(std_dens_VOL2( std_dens_N, myDim_elem2D)) allocate(std_dens_flux(3,std_dens_N, myDim_elem2D)) allocate(std_dens_Z ( std_dens_N, myDim_elem2D)) - allocate(dens_flux(elem2D)) + allocate(dens_flux_e(elem2D)) allocate(aux (nl-1)) allocate(dens (nl)) allocate(el_depth(nl)) @@ -438,7 +438,7 @@ subroutine diag_densMOC(mode, mesh) std_dens_UVDZ=0. std_dens_w =0.! temporat thing for wiighting (ageraging) mean fields within a bin std_dens_flux=0. - dens_flux =0. + dens_flux_e =0. std_dens_VOL2=0. std_dens_DIV =0. std_dens_Z =0. @@ -450,11 +450,11 @@ subroutine diag_densMOC(mode, mesh) ! density flux on elements (although not related to binning it might be usefull for diagnostic and to verify the consistency) do jj=1,3 - dens_flux(elem)= dens_flux(elem) + (sw_alpha(ulevels_nod2D(elnodes(jj)),elnodes(jj)) * heat_flux_in(elnodes(jj)) / vcpw + & + dens_flux_e(elem)=dens_flux_e(elem) + (sw_alpha(ulevels_nod2D(elnodes(jj)),elnodes(jj)) * heat_flux_in(elnodes(jj)) / vcpw + & sw_beta(ulevels_nod2D(elnodes(jj)),elnodes(jj)) * (relax_salt (elnodes(jj)) + water_flux(elnodes(jj)) * & tr_arr(ulevels_nod2D(elnodes(jj)),elnodes(jj),2))) end do - dens_flux(elem) = dens_flux(elem)/3.0_WP + dens_flux_e(elem) =dens_flux_e(elem)/3.0_WP ! density_dmoc is the sigma_2 density given at nodes. it is computed in oce_ale_pressure_bv do nz=nzmin, nzmax-1 aux(nz)=sum(density_dmoc(nz, elnodes))/3.-1000. diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index 2e8bd57fc..510f432b1 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -250,7 +250,12 @@ subroutine oce_fluxes(mesh) end if virtual_salt=virtual_salt-net/ocean_area end if - + + where (ulevels_nod2d == 1) + dens_flux=sw_alpha(1,:) * heat_flux_in / vcpw + sw_beta(1, :) * (relax_salt + water_flux * tr_arr(1,:,2)) + elsewhere + dens_flux=0.0_WP + end where !___________________________________________________________________________ ! balance SSS restoring to climatology if (use_cavity) then diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 23a827d8b..86c9fdf91 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -207,6 +207,8 @@ subroutine ini_mean_io(mesh) call def_stream(nod2D, myDim_nod2D, 'alpha', 'thermal expansion', 'none', sw_alpha(1,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) CASE ('beta ') call def_stream(nod2D, myDim_nod2D, 'beta', 'saline contraction', 'none', sw_beta (1,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) +CASE ('dens_flux ') + call def_stream(nod2D, myDim_nod2D , 'dflux', 'density flux', 'kg/(m3*s)', dens_flux(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) CASE ('runoff ') sel_forcvar(10)= 1 call def_stream(nod2D, myDim_nod2D, 'runoff', 'river runoff', 'none', runoff(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) @@ -358,7 +360,7 @@ subroutine ini_mean_io(mesh) call def_stream((/std_dens_N, nod2D /), (/std_dens_N, myDim_nod2D/), 'std_dens_DIV', 'm3/s', 'm3/s' ,std_dens_DIV(:,:), 1, 'y', i_real4, mesh) call def_stream((/std_dens_N, elem2D/), (/std_dens_N, myDim_elem2D/), 'std_dens_Z', 'm', 'm' ,std_dens_Z(:,:), 1, 'y', i_real4, mesh) call def_stream((/nl-1, nod2D /), (/nl-1, myDim_nod2D /), 'density_dMOC', 'density' , 'm', density_dmoc(:,:), 1, 'y', i_real4, mesh) - call def_stream(elem2D, myDim_elem2D , 'density_flux', 'density' , 'm', dens_flux(:), 1, 'y', i_real4, mesh) + call def_stream(elem2D, myDim_elem2D , 'density_flux_e', 'density flux at elems ', 'm', dens_flux_e(:), 1, 'y', i_real4, mesh) end if !___________________________________________________________________________________________________________________________________ CASE ('pgf_x ') diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index 6ac8506bc..6597bcee2 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -272,6 +272,7 @@ MODULE o_ARRAYS real(kind=WP), allocatable,dimension(:,:,:) :: slope_tapered real(kind=WP), allocatable,dimension(:,:,:) :: sigma_xy real(kind=WP), allocatable,dimension(:,:) :: sw_beta, sw_alpha +real(kind=WP), allocatable,dimension(:) :: dens_flux !real(kind=WP), allocatable,dimension(:,:,:) :: tsh, tsv, tsh_nodes !real(kind=WP), allocatable,dimension(:,:) :: hd_flux,vd_flux !Isoneutral diffusivities (or xy diffusivities if Redi=.false) diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 0d9dc083b..02aa93f91 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -344,8 +344,10 @@ SUBROUTINE array_setup(mesh) ! alpha and beta in the EoS allocate(sw_beta(nl-1, node_size), sw_alpha(nl-1, node_size)) -sw_beta=0.0_WP -sw_alpha=0.0_WP +allocate(dens_flux(node_size)) +sw_beta =0.0_WP +sw_alpha =0.0_WP +dens_flux=0.0_WP if (Fer_GM) then allocate(fer_c(node_size),fer_scal(node_size), fer_gamma(2, nl, node_size), fer_K(nl, node_size)) From 38b437b6387271c36a7cc523551437d86792e532 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 29 Apr 2021 12:26:25 +0200 Subject: [PATCH 108/909] fix bug of sea-ice height accumulation along the cavity-ocean edge by ensuring uice,vice == 0 boundary condition along the cavity edge --- src/ice_EVP.F90 | 57 ++++++++++++++++++++++---------- src/ice_fct.F90 | 11 ------- src/ice_maEVP.F90 | 74 ++++++++++++++++++++++++++++++++---------- src/ice_setup_step.F90 | 15 +++++++++ src/ice_thermo_oce.F90 | 28 ++++++++-------- 5 files changed, 124 insertions(+), 61 deletions(-) diff --git a/src/ice_EVP.F90 b/src/ice_EVP.F90 index 2cf727698..fcfd8d224 100755 --- a/src/ice_EVP.F90 +++ b/src/ice_EVP.F90 @@ -58,7 +58,6 @@ subroutine stress_tensor(ice_strength, mesh) do el=1,myDim_elem2D !__________________________________________________________________________ ! if element contains cavity node skip it - !!PS if ( any(ulevels_nod2d(elem2D_nodes(:,el)) > 1) ) cycle if (ulevels(el) > 1) cycle ! ===== Check if there is ice on elem @@ -166,7 +165,6 @@ subroutine stress_tensor_no1(ice_strength, mesh) do el=1,myDim_elem2D !__________________________________________________________________________ ! if element contains cavity node skip it - !!PS if ( any(ulevels_nod2d(elem2D_nodes(:,el)) > 1) ) cycle if (ulevels(el) > 1) cycle ! ===== Check if there is ice on elem @@ -203,7 +201,10 @@ subroutine stress_tensor_no1(ice_strength, mesh) ! ===== if delta is too small or zero, viscosity will too large (unlimited) ! (limit delta_inv) - delta_inv = 1.0_WP/max(delta,delta_min) + delta_inv = 1.0_WP/max(delta,delta_min) + +!!PS delta_inv = delta/(delta+delta_min) + zeta = ice_strength(el)*delta_inv ! ===== Limiting pressure/Delta (zeta): it may still happen that pressure/Delta ! is too large in some regions and CFL criterion is violated. @@ -218,7 +219,7 @@ subroutine stress_tensor_no1(ice_strength, mesh) !end if zeta = zeta*Tevp_inv - + r1 = zeta*(eps11(el)+eps22(el)) - ice_strength(el)*Tevp_inv r2 = zeta*(eps11(el)-eps22(el))*vale r3 = zeta*eps12(el)*vale @@ -247,6 +248,7 @@ subroutine stress2rhs_e(mesh) USE i_therm_param USE i_arrays USE g_PARSUP +use g_config, only: use_cavity IMPLICIT NONE @@ -266,7 +268,13 @@ subroutine stress2rhs_e(mesh) DO ed=1,myDim_edge2D ednodes=edges(:,ed) el=edge_tri(:,ed) - if(myList_edge2D(ed)>edge2D_in) cycle + if(myList_edge2D(ed)>edge2D_in) cycle + + ! stress boundary condition at ocean cavity boundary edge ==0 + if (use_cavity) then + if ( (ulevels(el(1))>1) .or. ( el(2)>0 .and. ulevels(el(2))>1) ) cycle + end if + ! elements on both sides uc = - sigma12(el(1))*edge_cross_dxdy(1,ed) + sigma11(el(1))*edge_cross_dxdy(2,ed) & + sigma12(el(2))*edge_cross_dxdy(3,ed) - sigma11(el(2))*edge_cross_dxdy(4,ed) @@ -345,7 +353,6 @@ subroutine stress2rhs(inv_areamass, ice_strength, mesh) ! if (any(m_ice(elnodes)<= 0.) .or. any(a_ice(elnodes) <=0.)) CYCLE !____________________________________________________________________________ ! if element contains cavity node skip it - !!OS if ( any(ulevels_nod2d(elem2D_nodes(:,el)) > 1) ) cycle if (ulevels(el) > 1) cycle !____________________________________________________________________________ @@ -488,7 +495,6 @@ subroutine EVPdynamics(mesh) elnodes = elem2D_nodes(:,el) !_______________________________________________________________________ ! if element has any cavity node skip it - if ( any(ulevels_nod2d(elnodes)>1) ) cycle if (ulevels(el) > 1) cycle !_______________________________________________________________________ @@ -542,7 +548,6 @@ subroutine EVPdynamics(mesh) elnodes = elem2D_nodes(:,el) !_______________________________________________________________________ ! if element has any cavity node skip it - if ( any(ulevels_nod2d(elnodes)>1) ) cycle if (ulevels(el) > 1) cycle !_______________________________________________________________________ @@ -601,7 +606,7 @@ subroutine EVPdynamics(mesh) do n=1,myDim_nod2D !_________________________________________________________________________ - ! if cavity ndoe skip it + ! if cavity node skip it if ( ulevels_nod2d(n)>1 ) cycle !_________________________________________________________________________ @@ -631,15 +636,31 @@ subroutine EVPdynamics(mesh) end if end do - DO ed=1,myDim_edge2D - ! boundary conditions - if(myList_edge2D(ed) > edge2D_in) then - U_ice(edges(1:2,ed))=0.0_WP - V_ice(edges(1:2,ed))=0.0_WP - endif - end do - - call exchange_nod(U_ice,V_ice) + + !___________________________________________________________________________ + ! apply sea ice velocity boundary condition + DO ed=1,myDim_edge2D + !_______________________________________________________________________ + ! apply coastal sea ice velocity boundary conditions + if(myList_edge2D(ed) > edge2D_in) then + U_ice(edges(1:2,ed))=0.0_WP + V_ice(edges(1:2,ed))=0.0_WP + endif + + !_______________________________________________________________________ + ! apply sea ice velocity boundary conditions at cavity-ocean edge + if (use_cavity) then + if ( (ulevels(edge_tri(1,ed))>1) .or. & + ( edge_tri(2,ed)>0 .and. ulevels(edge_tri(2,ed))>1) ) then + U_ice(edges(1:2,ed))=0.0_WP + V_ice(edges(1:2,ed))=0.0_WP + end if + end if + + end do + + !___________________________________________________________________________ + call exchange_nod(U_ice,V_ice) END DO diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index 501b9ac36..fb91b370c 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -66,8 +66,6 @@ subroutine ice_TG_rhs(mesh) !_______________________________________________________________________ ! if cavity element skip it if (ulevels(elem)>1) cycle - if(any(ulevels_nod2D(elnodes)>1)) cycle !LK89140 - !derivatives dx=gradient_sca(1:3,elem) @@ -82,7 +80,6 @@ subroutine ice_TG_rhs(mesh) diff=ice_diff*sqrt(elem_area(elem)/scale_area) DO n=1,3 row=elnodes(n) -!!PS if (ulevels_nod2D(row)>1) cycle DO q = 1,3 !entries(q)= vol*dt*((dx(n)*um+dy(n)*vm)/3.0_WP - & ! diff*(dx(n)*dx(q)+ dy(n)*dy(q))- & @@ -375,7 +372,6 @@ subroutine ice_fem_fct(tr_array_id, mesh) !_______________________________________________________________________ ! if cavity cycle over - if(any(ulevels_nod2D(elnodes)>1)) cycle !LK89140 if(ulevels(elem)>1) cycle !LK89140 !_______________________________________________________________________ @@ -483,7 +479,6 @@ subroutine ice_fem_fct(tr_array_id, mesh) !_______________________________________________________________________ ! if cavity cycle over - if(any(ulevels_nod2D(elnodes)>1)) cycle !LK89140 if(ulevels(elem)>1) cycle !LK89140 !_______________________________________________________________________ @@ -532,7 +527,6 @@ subroutine ice_fem_fct(tr_array_id, mesh) !_______________________________________________________________________ ! if cavity cycle over - if(any(ulevels_nod2D(elnodes)>1)) cycle !LK89140 if(ulevels(elem)>1) cycle !LK89140 !_______________________________________________________________________ @@ -559,7 +553,6 @@ subroutine ice_fem_fct(tr_array_id, mesh) !___________________________________________________________________ ! if cavity cycle over - if(any(ulevels_nod2D(elnodes)>1)) cycle !LK89140 if(ulevels(elem)>1) cycle !LK89140 do q=1,3 @@ -579,7 +572,6 @@ subroutine ice_fem_fct(tr_array_id, mesh) !___________________________________________________________________ ! if cavity cycle over - if(any(ulevels_nod2D(elnodes)>1)) cycle !LK89140 if(ulevels(elem)>1) cycle !LK89140 do q=1,3 @@ -599,7 +591,6 @@ subroutine ice_fem_fct(tr_array_id, mesh) !___________________________________________________________________ ! if cavity cycle over - if(any(ulevels_nod2D(elnodes)>1)) cycle !LK89140 if(ulevels(elem)>1) cycle !LK89140 do q=1,3 @@ -619,7 +610,6 @@ subroutine ice_fem_fct(tr_array_id, mesh) elnodes=elem2D_nodes(:,elem) !___________________________________________________________________ ! if cavity cycle over - if(any(ulevels_nod2D(elnodes)>1)) cycle !LK89140 if(ulevels(elem)>1) cycle !LK89140 do q=1,3 @@ -759,7 +749,6 @@ subroutine ice_TG_rhs_div(mesh) !___________________________________________________________________________ ! if cavity element skip it if (ulevels(elem)>1) cycle - if(any(ulevels_nod2D(elnodes)>1)) cycle !LK89140 !derivatives dx=gradient_sca(1:3,elem) diff --git a/src/ice_maEVP.F90 b/src/ice_maEVP.F90 index 365307566..f5aedca3d 100644 --- a/src/ice_maEVP.F90 +++ b/src/ice_maEVP.F90 @@ -66,7 +66,6 @@ subroutine stress_tensor_m(mesh) elnodes=elem2D_nodes(:,elem) !_______________________________________________________________________ ! if element has any cavity node skip it - if ( any(ulevels_nod2d(elnodes)>1) ) cycle if (ulevels(elem) > 1) cycle msum=sum(m_ice(elnodes))*val3 @@ -162,7 +161,6 @@ subroutine ssh2rhs(mesh) elnodes=elem2D_nodes(:,elem) !_______________________________________________________________________ ! if element has any cavity node skip it - if ( any(ulevels_nod2d(elnodes)>1) ) cycle if (ulevels(elem) > 1) cycle !_______________________________________________________________________ @@ -189,7 +187,6 @@ subroutine ssh2rhs(mesh) elnodes=elem2D_nodes(:,elem) !_______________________________________________________________________ ! if element has any cavity node skip it - if ( any(ulevels_nod2d(elnodes)>1) ) cycle if (ulevels(elem) > 1) cycle vol=elem_area(elem) @@ -239,7 +236,6 @@ subroutine stress2rhs_m(mesh) elnodes=elem2D_nodes(:,elem) !_______________________________________________________________________ ! if element has any cavity node skip it - if ( any(ulevels_nod2d(elnodes)>1) ) cycle if (ulevels(elem) > 1) cycle if(sum(a_ice(elnodes)) < 0.01_WP) cycle !DS @@ -355,7 +351,6 @@ subroutine EVPdynamics_m(mesh) !_______________________________________________________________________ ! if element has any cavity node skip it - if ( any(ulevels_nod2d(elnodes)>1) ) cycle if (ulevels(el) > 1) cycle !_______________________________________________________________________ @@ -384,7 +379,6 @@ subroutine EVPdynamics_m(mesh) elnodes=elem2D_nodes(:,el) !_______________________________________________________________________ ! if element has any cavity node skip it - if ( any(ulevels_nod2d(elnodes)>1) ) cycle if (ulevels(el) > 1) cycle vol=elem_area(el) @@ -431,7 +425,6 @@ subroutine EVPdynamics_m(mesh) !_______________________________________________________________________ ! if element has any cavity node skip it - if ( any(ulevels_nod2d(elnodes)>1) ) cycle if (ulevels(el) > 1) cycle msum=sum(m_ice(elnodes))*val3 @@ -468,7 +461,7 @@ subroutine EVPdynamics_m(mesh) do el=1,myDim_elem2D !__________________________________________________________________________ if (ulevels(el)>1) cycle - if ( any(ulevels_nod2d(elnodes)>1) ) cycle + !__________________________________________________________________________ if(ice_el(el)) then @@ -538,7 +531,7 @@ subroutine EVPdynamics_m(mesh) (sigma12(el)*dx(3)+sigma22(el)*dy(3) - sigma11(el)*meancos) ! metrics end if end if - end do + end do ! --> do el=1,myDim_elem2D do i=1, myDim_nod2d !__________________________________________________________________________ @@ -568,20 +561,44 @@ subroutine EVPdynamics_m(mesh) u_ice_aux(i) = det*((1.0_WP+beta_evp+drag)*rhsu +rdt*coriolis_node(i)*rhsv) v_ice_aux(i) = det*((1.0_WP+beta_evp+drag)*rhsv -rdt*coriolis_node(i)*rhsu) end if - end do + end do ! --> do i=1, myDim_nod2d - call exchange_nod_begin(u_ice_aux, v_ice_aux) + !___________________________________________________________________________ + ! apply sea ice velocity boundary condition + do ed=1,myDim_edge2D + !_______________________________________________________________________ + ! apply coastal sea ice velocity boundary conditions + if(myList_edge2D(ed) > edge2D_in) then + u_ice_aux(edges(:,ed))=0.0_WP + v_ice_aux(edges(:,ed))=0.0_WP + end if + + !_______________________________________________________________________ + ! apply sea ice velocity boundary conditions at cavity-ocean edge + if (use_cavity) then + if ( (ulevels(edge_tri(1,ed))>1) .or. & + ( edge_tri(2,ed)>0 .and. ulevels(edge_tri(2,ed))>1) ) then + u_ice_aux(edges(1:2,ed))=0.0_WP + v_ice_aux(edges(1:2,ed))=0.0_WP + end if + end if + end do ! --> do ed=1,myDim_edge2D + + !___________________________________________________________________________ + call exchange_nod_begin(u_ice_aux, v_ice_aux) - do row=1, myDim_nod2d + do row=1, myDim_nod2d u_rhs_ice(row)=0.0_WP v_rhs_ice(row)=0.0_WP - end do + end do - call exchange_nod_end - end do + call exchange_nod_end + + end do ! --> do shortstep=1, steps u_ice=u_ice_aux v_ice=v_ice_aux + end subroutine EVPdynamics_m ! ! @@ -624,7 +641,6 @@ subroutine find_alpha_field_a(mesh) elnodes=elem2D_nodes(:,elem) !_______________________________________________________________________ ! if element has any cavity node skip it - if ( any(ulevels_nod2d(elnodes)>1) ) cycle if (ulevels(elem) > 1) cycle msum=sum(m_ice(elnodes))*val3 @@ -700,7 +716,6 @@ subroutine stress_tensor_a(mesh) do elem=1,myDim_elem2D !__________________________________________________________________________ ! if element has any cavity node skip it - if ( any(ulevels_nod2d(elnodes)>1) ) cycle if (ulevels(elem) > 1) cycle !__________________________________________________________________________ @@ -782,6 +797,7 @@ subroutine EVPdynamics_a(mesh) use o_PARAM use i_therm_param use g_parsup +use g_config, only: use_cavity use g_comm_auto use ice_maEVP_interfaces @@ -830,7 +846,7 @@ subroutine EVPdynamics_a(mesh) rhsv=v_ice(i)+drag*v_w(i)+rdt*(inv_thickness*stress_atmice_y(i)+v_rhs_ice(i)) rhsu=beta_evp_array(i)*u_ice_aux(i)+rhsu - rhsv=beta_evp_array(i)*v_ice_aux(i)+rhsv + rhsv=beta_evp_array(i)*v_ice_aux(i)+rhsv !solve (Coriolis and water stress are treated implicitly) fc=rdt*coriolis_node(i) det=(1.0_WP+beta_evp_array(i)+drag)**2+fc**2 @@ -838,6 +854,28 @@ subroutine EVPdynamics_a(mesh) u_ice_aux(i)=det*((1.0_WP+beta_evp_array(i)+drag)*rhsu+fc*rhsv) v_ice_aux(i)=det*((1.0_WP+beta_evp_array(i)+drag)*rhsv-fc*rhsu) end do + + !___________________________________________________________________________ + ! apply sea ice velocity boundary condition + do ed=1,myDim_edge2D + !_______________________________________________________________________ + ! apply coastal sea ice velocity boundary conditions + if(myList_edge2D(ed) > edge2D_in) then + u_ice_aux(edges(:,ed))=0.0_WP + v_ice_aux(edges(:,ed))=0.0_WP + end if + + !_______________________________________________________________________ + ! apply sea ice velocity boundary conditions at cavity-ocean edge + if (use_cavity) then + if ( (ulevels(edge_tri(1,ed))>1) .or. & + ( edge_tri(2,ed)>0 .and. ulevels(edge_tri(2,ed))>1) ) then + u_ice_aux(edges(1:2,ed))=0.0_WP + v_ice_aux(edges(1:2,ed))=0.0_WP + end if + end if + end do ! --> do ed=1,myDim_edge2D + call exchange_nod(u_ice_aux, v_ice_aux) end do diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index 1d56e957d..183b2ba5e 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -242,6 +242,21 @@ subroutine ice_timestep(step, mesh) if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call thermodynamics...'//achar(27)//'[0m' call thermodynamics(mesh) #endif /* (__icepack) */ + + + do i=1,myDim_nod2D+eDim_nod2D + if ( ( U_ice(i)/=0.0_WP .and. mesh%ulevels_nod2d(i)>1) .or. (V_ice(i)/=0.0_WP .and. mesh%ulevels_nod2d(i)>1) ) then + write(*,*) " --> found cavity velocity /= 0.0_WP , ", mype + write(*,*) " ulevels_nod2d(n) = ", mesh%ulevels_nod2d(i) + write(*,*) " U_ice(n) = ", U_ice(i) + write(*,*) " V_ice(n) = ", V_ice(i) + write(*,*) + end if + end do + + + + t3=MPI_Wtime() rtime_ice = rtime_ice + (t3-t0) rtime_tot = rtime_tot + (t3-t0) diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index ab1e609ac..9e12224dc 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -36,20 +36,20 @@ subroutine cut_off(mesh) end where - if (use_cavity) then - ! upper cutoff SH: m_ice - where(m_ice>5.0_WP .and. ulevels_nod2d==1 .and. geo_coord_nod2D(2,:)<0.0_WP) m_ice=5.0_WP - - ! upper cutoff NH: m_ice - where(m_ice>10.0_WP .and. ulevels_nod2d==1 .and. geo_coord_nod2D(2,:)>0.0_WP) m_ice=10.0_WP - - ! upper cutoff: m_snow - where(m_snow>2.5_WP .and. ulevels_nod2d==1) m_snow=2.5_WP - - !___________________________________________________________________________ - ! lower cutoff: m_snow - !!PS where(m_snow<0.1e-8_WP) m_snow=0.0_WP - end if +!!PS if (use_cavity) then +!!PS ! upper cutoff SH: m_ice +!!PS where(m_ice>5.0_WP .and. ulevels_nod2d==1 .and. geo_coord_nod2D(2,:)<0.0_WP) m_ice=5.0_WP +!!PS +!!PS ! upper cutoff NH: m_ice +!!PS where(m_ice>10.0_WP .and. ulevels_nod2d==1 .and. geo_coord_nod2D(2,:)>0.0_WP) m_ice=10.0_WP +!!PS +!!PS ! upper cutoff: m_snow +!!PS where(m_snow>2.5_WP .and. ulevels_nod2d==1) m_snow=2.5_WP +!!PS +!!PS !___________________________________________________________________________ +!!PS ! lower cutoff: m_snow +!!PS !!PS where(m_snow<0.1e-8_WP) m_snow=0.0_WP +!!PS end if !___________________________________________________________________________ #if defined (__oifs) From 2fae97e183fef007eb711e206889db4bf5ee3898 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 29 Apr 2021 13:19:24 +0200 Subject: [PATCH 109/909] trim filename in src/gen_surface_forcing.F90 since it can be very long now --- src/gen_surface_forcing.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/gen_surface_forcing.F90 b/src/gen_surface_forcing.F90 index 151e2c8ca..eb9b5eb5d 100644 --- a/src/gen_surface_forcing.F90 +++ b/src/gen_surface_forcing.F90 @@ -676,7 +676,7 @@ SUBROUTINE getcoeffld(fld_idx, rdate, mesh) delta_t = 1.0_wp if (mype==0) then write(*,*) 'WARNING: no temporal extrapolation into future (nearest neighbour is used): ', trim(var_name), ' !' - write(*,*) file_name + write(*,*) trim(file_name) write(*,*) nc_time(1), nc_time(nc_Ntime), now_date end if elseif (t_indx < 1) then ! NO extrapolation back in time @@ -685,7 +685,7 @@ SUBROUTINE getcoeffld(fld_idx, rdate, mesh) delta_t = 1.0_wp if (mype==0) then write(*,*) 'WARNING: no temporal extrapolation back in time (nearest neighbour is used): ', trim(var_name), ' !' - write(*,*) file_name + write(*,*) trim(file_name) write(*,*) nc_time(1), nc_time(nc_Ntime), now_date end if end if From e9fea1b30f51e372f2e4514ba0d6003a4054562b Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 29 Apr 2021 15:43:10 +0200 Subject: [PATCH 110/909] do elemreducelvl and elemfixlvl array as logicals instead of integer, should reduce memory demand for large meshes --- src/fvom_init.F90 | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/fvom_init.F90 b/src/fvom_init.F90 index 79f49530b..1b57fa45a 100755 --- a/src/fvom_init.F90 +++ b/src/fvom_init.F90 @@ -887,7 +887,7 @@ subroutine find_levels_cavity(mesh) real(kind=WP) :: dmean character(MAX_PATH) :: file_name integer, allocatable, dimension(:,:) :: numelemtonode, idxelemtonode - integer, allocatable, dimension(:) :: elemreducelvl, elemfixlvl + logical, allocatable, dimension(:) :: elemreducelvl, elemfixlvl type(t_mesh), intent(inout), target :: mesh #include "associate_mesh_ini.h" !___________________________________________________________________________ @@ -955,10 +955,10 @@ subroutine find_levels_cavity(mesh) ! outer iteration loop count_iter2 = 0 exit_flag2 = 0 - elemfixlvl = 0 + elemfixlvl = .false. do while((exit_flag2==0) .and. (count_iter2 in case make the levels of all sorounding ! triangles shallower - if ( (nlevels(elem)-(nz+1))>=3 .and. elemreducelvl(elem)==0 .and. elemfixlvl(elem)==0) then + if ( (nlevels(elem)-(nz+1))>=3 .and. elemreducelvl(elem)==.false. .and. elemfixlvl(elem)==.false.) then ulevels(elem)=nz+1 else ! --> can not increase depth anymore to eleminate isolated @@ -1021,7 +1021,7 @@ subroutine find_levels_cavity(mesh) ! to nz idx = minloc(ulevels(elems)-nz, 1, MASK=( (elems>0) .and. ((ulevels(elems)-nz)>0) ) ) ulevels(elems(idx)) = nz-1 - elemreducelvl(elems(idx)) = elemreducelvl(elems(idx))+1 + elemreducelvl(elems(idx)) = .true. end if !force recheck for all current ocean cells @@ -1132,7 +1132,7 @@ subroutine find_levels_cavity(mesh) elem=nod_in_elem2D(j,node) if (ulevels(elem)>nz) then ulevels(elem) = nz - elemfixlvl(elem) = elemfixlvl(elem)+1 + elemfixlvl(elem) = .true. end if end do end if @@ -1161,7 +1161,7 @@ subroutine find_levels_cavity(mesh) !_______________________________________________________________________ end do - deallocate(elemreducelvl) + deallocate(elemreducelvl,elemfixlvl) deallocate(numelemtonode,idxelemtonode) !___________________________________________________________________________ From 401a225dadbad80964d87ac5f06857880e02b9ce Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 29 Apr 2021 16:36:48 +0200 Subject: [PATCH 111/909] reduce size of numelemtonode idxelemtonode in fvom_init.F90 --- src/fvom_init.F90 | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/src/fvom_init.F90 b/src/fvom_init.F90 index 1b57fa45a..98eea6b45 100755 --- a/src/fvom_init.F90 +++ b/src/fvom_init.F90 @@ -886,7 +886,7 @@ subroutine find_levels_cavity(mesh) integer :: exit_flag1, count_iter, max_iter=1000, exit_flag2, count_iter2, max_iter2=10 real(kind=WP) :: dmean character(MAX_PATH) :: file_name - integer, allocatable, dimension(:,:) :: numelemtonode, idxelemtonode + integer, allocatable, dimension(:) :: numelemtonode, idxelemtonode logical, allocatable, dimension(:) :: elemreducelvl, elemfixlvl type(t_mesh), intent(inout), target :: mesh #include "associate_mesh_ini.h" @@ -949,7 +949,7 @@ subroutine find_levels_cavity(mesh) ! possible in FESOM2.0 ! loop over all cavity levels allocate(elemreducelvl(elem2d),elemfixlvl(elem2d)) - allocate(numelemtonode(nl,nod2D),idxelemtonode(nl,nod2D)) + allocate(numelemtonode(nl),idxelemtonode(nl)) !___________________________________________________________________________ ! outer iteration loop @@ -1093,35 +1093,35 @@ subroutine find_levels_cavity(mesh) !_______________________________________________________________________ ! compute how many triangle elements contribute to every vertice in every layer - numelemtonode=0 - idxelemtonode=0 + count_iter=0 do node=1, nod2D + !___________________________________________________________________ + numelemtonode=0 + idxelemtonode=0 + + !___________________________________________________________________ + ! compute how many triangle elements contribute to vertice in every layer do j=1,nod_in_elem2D_num(node) elem=nod_in_elem2D(j,node) do nz=ulevels(elem),nlevels(elem)-1 - numelemtonode(nz,node) = numelemtonode(nz,node) + 1 - idxelemtonode(nz,node) = elem + numelemtonode(nz) = numelemtonode(nz) + 1 + idxelemtonode(nz) = elem end do end do - end do ! --> do node=1, nod2D - - !_______________________________________________________________________ - ! check if every vertice in every layer should be connected to at least - ! two triangle elements ! - count_iter=0 - do node=1, nod2D !___________________________________________________________________ + ! check if every vertice in every layer should be connected to at least + ! two triangle elements ! do nz = ulevels_nod2D(node), nlevels_nod2D(node)-1 !_______________________________________________________________ ! nodes has zero neighbouring triangles and is completely isolated ! need to adapt ulevels by hand --> inflicts another outher ! iteration loop (exit_flag2=0) - if (numelemtonode(nz,node)==0) then + if (numelemtonode(nz)==0) then exit_flag2 = 0 count_iter = count_iter+1 - write(*,"( A, I1, A, I7, A, I3)") ' -[check]->: node has only ', numelemtonode(nz,node) ,' triangle: n=', node, ', nz=',nz + write(*,"( A, I1, A, I7, A, I3)") ' -[check]->: node has only ', numelemtonode(nz) ,' triangle: n=', node, ', nz=',nz !___________________________________________________________ ! if node has no neighboring triangle somewhere in the middle ! of the water column at nz (can happen but seldom) than set @@ -1140,10 +1140,10 @@ subroutine find_levels_cavity(mesh) !_______________________________________________________________ ! nodes has just one neighbouring triangle --> but needs two --> ! inflicts another outher iteration loop (exit_flag2=0) - if (numelemtonode(nz,node)==1) then + if (numelemtonode(nz)==1) then exit_flag2 = 0 count_iter = count_iter+1 - write(*,"( A, I1, A, I7, A, I3)") ' -[check]->: node has only ', numelemtonode(nz,node) ,' triangle: n=', node, ', nz=',nz + write(*,"( A, I1, A, I7, A, I3)") ' -[check]->: node has only ', numelemtonode(nz) ,' triangle: n=', node, ', nz=',nz end if end do ! --> do nz = ulevels_nod2D(node), nlevels_nod2D(node)-1 From 229b8d6fa3e67b2f52abfb365208dce6a019d891 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 29 Apr 2021 16:37:58 +0200 Subject: [PATCH 112/909] reduce size of numelemtonode in oce_mesh.F90: subroutine find_levels_cavity and ensure deallocation --- src/oce_mesh.F90 | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/src/oce_mesh.F90 b/src/oce_mesh.F90 index fb91adc32..7d37a8372 100755 --- a/src/oce_mesh.F90 +++ b/src/oce_mesh.F90 @@ -916,7 +916,7 @@ subroutine find_levels_cavity(mesh) real(kind=WP) :: t0, t1 logical :: file_exist=.False. integer :: elem, elnodes(3), ule, uln(3), node, j, nz - integer, allocatable, dimension(:,:) :: numelemtonode + integer, allocatable, dimension(:) :: numelemtonode !NR Cannot include the pointers before the targets are allocated... !NR #include "associate_mesh.h" @@ -1297,27 +1297,29 @@ subroutine find_levels_cavity(mesh) !___________________________________________________________________________ - allocate(numelemtonode(mesh%nl,myDim_nod2d+eDim_nod2D)) - numelemtonode=0 + allocate(numelemtonode(mesh%nl)) do node=1, myDim_nod2D+eDim_nod2D + numelemtonode=0 + !_______________________________________________________________________ do j=1,mesh%nod_in_elem2D_num(node) elem=mesh%nod_in_elem2D(j,node) do nz=mesh%ulevels(elem),mesh%nlevels(elem)-1 - numelemtonode(nz,node) = numelemtonode(nz,node) + 1 + numelemtonode(nz) = numelemtonode(nz) + 1 end do end do - end do - - ! check how many triangle elements contribute to every vertice in every layer - ! every vertice in every layer should be connected to at least two triangle - ! elements ! - do node=1, myDim_nod2D+eDim_nod2D - do nz=1,mesh%nl - if (numelemtonode(nz,node)== 1) then + + !_______________________________________________________________________ + ! check how many triangle elements contribute to every vertice in every layer + ! every vertice in every layer should be connected to at least two triangle + ! elements ! + do nz=mesh%ulevels_nod2D(node),mesh%nlevels_nod2D(node)-1 + if (numelemtonode(nz)== 1) then write(*,*) 'ERROR A: found vertice with just one triangle:', mype, node, nz end if end do - end do + + end do + deallocate(numelemtonode) end subroutine find_levels_cavity ! From 03e7aacce2a059a3a507c7588735701b8af9448c Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 29 Apr 2021 16:44:38 +0200 Subject: [PATCH 113/909] exchange logical comparison == with .eqv. --- src/fvom_init.F90 | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/fvom_init.F90 b/src/fvom_init.F90 index 98eea6b45..14bc79742 100755 --- a/src/fvom_init.F90 +++ b/src/fvom_init.F90 @@ -1011,7 +1011,9 @@ subroutine find_levels_cavity(mesh) ! except when this levels would remain less than 3 valid ! bottom levels --> in case make the levels of all sorounding ! triangles shallower - if ( (nlevels(elem)-(nz+1))>=3 .and. elemreducelvl(elem)==.false. .and. elemfixlvl(elem)==.false.) then + if ( (nlevels(elem)-(nz+1))>=3 .and. & + elemreducelvl(elem) .eqv. .false. .and. & + elemfixlvl(elem) .eqv. .false.) then ulevels(elem)=nz+1 else ! --> can not increase depth anymore to eleminate isolated From 745949a89d7a879f8abd951782bef31dd972023c Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Fri, 30 Apr 2021 13:04:28 +0200 Subject: [PATCH 114/909] fixed a bug in PPM. Another thing is that due to the lagrangian nature of the scheme it shall not be used in combination with Adams-Bashforth. We shall hide this option in the namelist.oce for now but it hopefully will be supported in the next release. --- src/oce_adv_tra_ver.F90 | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/src/oce_adv_tra_ver.F90 b/src/oce_adv_tra_ver.F90 index cd07d947f..06986e724 100644 --- a/src/oce_adv_tra_ver.F90 +++ b/src/oce_adv_tra_ver.F90 @@ -390,18 +390,16 @@ subroutine adv_tra_vert_ppm(ttf, w, do_Xmoment, mesh, flux, init_zero) nzmax=nlevels_nod2D(n) nzmin=ulevels_nod2D(n) - ! tracer at surface layer - tv(nzmin)=ttf(nzmin,n) - - ! tracer at surface+1 layer - ! tv(2)=-ttf(1,n)*min(sign(1.0, W(2,n)), 0._WP)+ttf(2,n)*max(sign(1.0, W(2,n)), 0._WP) - tv(nzmin+1)=0.5*(ttf(nzmin,n)+ttf(nzmin+1,n)) - - ! tacer at bottom-1 layer - !tv(nzmax-1)=-ttf(nzmax-2,n)*min(sign(1.0, W(nzmax-1,n)), 0._WP)+ttf(nzmax-1,n)*max(sign(1.0, W(nzmax-1,n)), 0._WP) - tv(nzmax-1)=0.5_WP*(ttf(nzmax-2,n)+ttf(nzmax-1,n)) - - ! tracer at bottom layer + ! tracer at surface level + tv(nzmin)=ttf(nzmin,n) + ! tracer at surface+1 level +! tv(2)=-ttf(1,n)*min(sign(1.0, W(2,n)), 0._WP)+ttf(2,n)*max(sign(1.0, W(2,n)), 0._WP) +! tv(3)=-ttf(2,n)*min(sign(1.0, W(3,n)), 0._WP)+ttf(3,n)*max(sign(1.0, W(3,n)), 0._WP) + tv(nzmin+1)=0.5*(ttf(nzmin, n)+ttf(nzmin+1,n)) + ! tacer at bottom-1 level + tv(nzmax-1)=-ttf(nzmax-2,n)*min(sign(1.0, W(nzmax-1,n)), 0._WP)+ttf(nzmax-1,n)*max(sign(1.0, W(nzmax-1,n)), 0._WP) +! tv(nzmax-1)=0.5_WP*(ttf(nzmax-2,n)+ttf(nzmax-1,n)) + ! tracer at bottom level tv(nzmax)=ttf(nzmax-1,n) !_______________________________________________________________________ @@ -409,7 +407,7 @@ subroutine adv_tra_vert_ppm(ttf, w, do_Xmoment, mesh, flux, init_zero) ! see Colella and Woodward, JCP, 1984, 174-201 --> equation (1.9) ! loop over layers (segments) !!PS do nz=3, nzmax-3 - do nz=nzmin+2, nzmax-2 + do nz=nzmin+1, nzmax-3 !___________________________________________________________________ ! for uniform spaced vertical grids --> piecewise parabolic method (ppm) ! equation (1.9) @@ -419,10 +417,10 @@ subroutine adv_tra_vert_ppm(ttf, w, do_Xmoment, mesh, flux, init_zero) ! for non-uniformity spaced vertical grids --> piecewise parabolic ! method (ppm) see see Colella and Woodward, JCP, 1984, 174-201 ! --> full equation (1.6), (1.7) and (1.8) - dzjm1 = hnode_new(nz-2,n) - dzj = hnode_new(nz-1,n) - dzjp1 = hnode_new(nz,n) - dzjp2 = hnode_new(nz+1,n) + dzjm1 = hnode_new(nz-1,n) + dzj = hnode_new(nz ,n) + dzjp1 = hnode_new(nz+1,n) + dzjp2 = hnode_new(nz+2,n) ! Be carefull here vertical operation have to be done on NEW vertical mesh !!! !___________________________________________________________________ From 0ba4c58b95d7f3ff6905ea6343fbb4e78876d124 Mon Sep 17 00:00:00 2001 From: patrickscholz Date: Mon, 3 May 2021 14:34:20 +0200 Subject: [PATCH 115/909] Update sub_climatology.py be sure that input for sw.ptmp(valueS, valueT, np.abs(depth3d)) gets positive depth value --- view_pscholz/sub_climatology.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/view_pscholz/sub_climatology.py b/view_pscholz/sub_climatology.py index aa868775a..dfe5a8aa6 100644 --- a/view_pscholz/sub_climatology.py +++ b/view_pscholz/sub_climatology.py @@ -92,7 +92,7 @@ def __init__(self,path,fname,var=[]): depth3d = np.zeros(valueS.shape) for di in range(0,self.depth.size): depth3d[di,:,:] = self.depth[di] - self.value = sw.ptmp(valueS, valueT, depth3d) + self.value = sw.ptmp(valueS, valueT, np.abs(depth3d)) elif var=='salt': self.value = valueS From 41f394b839f3ac3be44f62eae318d652d5e060f2 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 22 Apr 2021 16:35:09 +0200 Subject: [PATCH 116/909] - read options passed to the main executable - add --smoketest option --- src/command_line_options.F90 | 37 ++++++++++++++++++++++++++++++++++++ src/fvom_main.F90 | 6 ++++++ 2 files changed, 43 insertions(+) create mode 100644 src/command_line_options.F90 diff --git a/src/command_line_options.F90 b/src/command_line_options.F90 new file mode 100644 index 000000000..cd1a9ab98 --- /dev/null +++ b/src/command_line_options.F90 @@ -0,0 +1,37 @@ +module command_line_options_module +! synopsis: read options passed to the main executable and trigger corresponding actions + + implicit none + public command_line_options + private + + type :: command_line_options_type + contains + procedure, nopass :: parse + end type + type(command_line_options_type) command_line_options + +contains + + subroutine parse() + integer i + character(len=:), allocatable :: arg + integer arglength + + do i = 1, command_argument_count() + call get_command_argument(i, length=arglength) + allocate(character(arglength) :: arg) + call get_command_argument(i, value=arg) + select case (arg) + case('--smoketest') + print '(g0)', 'smoketest' + case default + print *, 'unknown option: ', arg + error stop + end select + deallocate(arg) + end do + + end subroutine + +end module diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index 4c9f0fb97..7f0dd2725 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -23,6 +23,7 @@ program main use diagnostics use mo_tidal use fesom_version_info_module +use command_line_options_module ! Define icepack module #if defined (__icepack) @@ -45,6 +46,11 @@ program main type(t_mesh), target, save :: mesh + if(command_argument_count() > 0) then + call command_line_options%parse() + stop + end if + #ifndef __oifs !ECHAM6-FESOM2 coupling: cpl_oasis3mct_init is called here in order to avoid circular dependencies between modules (cpl_driver and g_PARSUP) !OIFS-FESOM2 coupling: does not require MPI_INIT here as this is done by OASIS From 51dd7381b6368d329681aa0098fc122c4c5ccf34 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 23 Apr 2021 15:23:05 +0200 Subject: [PATCH 117/909] - add procedure to print the status of preprocessor definitions - add --info option to the FESOM executable --- src/command_line_options.F90 | 4 ++ src/info_module.F90 | 92 ++++++++++++++++++++++++++++++++++++ 2 files changed, 96 insertions(+) create mode 100644 src/info_module.F90 diff --git a/src/command_line_options.F90 b/src/command_line_options.F90 index cd1a9ab98..369150964 100644 --- a/src/command_line_options.F90 +++ b/src/command_line_options.F90 @@ -14,6 +14,7 @@ module command_line_options_module contains subroutine parse() + use info_module integer i character(len=:), allocatable :: arg integer arglength @@ -25,6 +26,9 @@ subroutine parse() select case (arg) case('--smoketest') print '(g0)', 'smoketest' + case('--info') + print '(g0)', '# Definitions' + call info%print_definitions() case default print *, 'unknown option: ', arg error stop diff --git a/src/info_module.F90 b/src/info_module.F90 new file mode 100644 index 000000000..592d55466 --- /dev/null +++ b/src/info_module.F90 @@ -0,0 +1,92 @@ +module info_module +! synopsis: query information from FESOM + + implicit none + public info + private + + type :: info_type + contains + procedure, nopass :: print_definitions + end type + type(info_type) info + +contains + + ! this is a list of preprocessor definitions from the FESOM Fortran source files + ! it will probably become outdated at some point and should be reviewed + ! the result will reflect the status of definitions as they are set when *this file* had been compiled + subroutine print_definitions() +#ifdef __icepack + print '(g0)', '__icepack is ON' +#else + print '(g0)', '__icepack is OFF' +#endif +#ifdef __oasis + print '(g0)', '__oasis is ON' +#else + print '(g0)', '__oasis is OFF' +#endif +#ifdef __oifs + print '(g0)', '__oifs is ON' +#else + print '(g0)', '__oifs is OFF' +#endif +#ifdef DEBUG + print '(g0)', 'DEBUG is ON' +#else + print '(g0)', 'DEBUG is OFF' +#endif +#ifdef DISABLE_MULTITHREADING + print '(g0)', 'DISABLE_MULTITHREADING is ON' +#else + print '(g0)', 'DISABLE_MULTITHREADING is OFF' +#endif +#ifdef false + print '(g0)', 'false is ON' +#else + print '(g0)', 'false is OFF' +#endif +#ifdef FVOM_INIT + print '(g0)', 'FVOM_INIT is ON' +#else + print '(g0)', 'FVOM_INIT is OFF' +#endif +#ifdef oifs + print '(g0)', 'oifs is ON' +#else + print '(g0)', 'oifs is OFF' +#endif +#ifdef OMP_MAX_THREADS + print '(g0)', 'OMP_MAX_THREADS is ON' +#else + print '(g0)', 'OMP_MAX_THREADS is OFF' +#endif +#ifdef PARMS + print '(g0)', 'PARMS is ON' +#else + print '(g0)', 'PARMS is OFF' +#endif +#ifdef PETSC + print '(g0)', 'PETSC is ON' +#else + print '(g0)', 'PETSC is OFF' +#endif +#ifdef use_cavity + print '(g0)', 'use_cavity is ON' +#else + print '(g0)', 'use_cavity is OFF' +#endif +#ifdef use_fullfreesurf + print '(g0)', 'use_fullfreesurf is ON' +#else + print '(g0)', 'use_fullfreesurf is OFF' +#endif +#ifdef VERBOSE + print '(g0)', 'VERBOSE is ON' +#else + print '(g0)', 'VERBOSE is OFF' +#endif + end subroutine + +end module From b308dc25b6fbf3f836b0e833ee61b541b3c81e63 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 5 May 2021 13:17:52 +0200 Subject: [PATCH 118/909] print information about the currently used MPI library at FESOM startup --- src/fvom_main.F90 | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index 4c9f0fb97..e116e4c11 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -44,6 +44,9 @@ program main real(kind=real32) :: runtime_alltimesteps type(t_mesh), target, save :: mesh +character(LEN=MPI_MAX_LIBRARY_VERSION_STRING) :: mpi_version_txt +integer mpi_version_len + #ifndef __oifs !ECHAM6-FESOM2 coupling: cpl_oasis3mct_init is called here in order to avoid circular dependencies between modules (cpl_driver and g_PARSUP) @@ -61,6 +64,8 @@ program main if(mype==0) then write(*,*) print *,"FESOM2 git SHA: "//fesom_git_sha() + call MPI_Get_library_version(mpi_version_txt, mpi_version_len, MPIERR) + print *,"MPI library version: "//trim(mpi_version_txt) print *, achar(27)//'[32m' //'____________________________________________________________'//achar(27)//'[0m' print *, achar(27)//'[7;32m'//' --> FESOM BUILDS UP MODEL CONFIGURATION '//achar(27)//'[0m' end if From bee2d85109d420685b00c839db9f03258c14c2a9 Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Fri, 7 May 2021 19:02:35 +0200 Subject: [PATCH 119/909] add juwels temporary paths --- setups/paths.yml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/setups/paths.yml b/setups/paths.yml index b352c7473..e74aa8cb6 100644 --- a/setups/paths.yml +++ b/setups/paths.yml @@ -67,4 +67,25 @@ docker: opath: opath: ../results/ +juwels: + lnodename: + - 'jwlogin*' + meshes: + test_global: ./test/meshes/pi/ + test_souf: ./test/meshes/soufflet/ + pi: /p/project/chhb19/hhb192/meshes/POOL/FESOM2/meshes/pi/ + core2: /p/project/chhb19/hhb192/meshes/POOL/FESOM2/meshes/core2/ + mr: /p/project/chhb19/hhb192/meshes/POOL/FESOM2/meshes/mr/ + hr: /p/project/chhb19/hhb192/meshes/POOL/FESOM2/meshes/hr/ + orca25: /p/project/chhb19/hhb192/meshes/POOL/FESOM2/meshes/orca25/ + farc: /p/project/chhb19/hhb192/meshes/POOL/FESOM2/meshes/farc/ + forcing: + test_global: ./test/input/global/ + CORE2: /p/project/chhb19/hhb192/forcing/forcing/CORE2/ + JRA55: /p/project/chhb19/hhb192/forcing/forcing/JRA55-do-v1.4.0/ + clim: + test_global: ./test/input/global/ + phc: /p/project/chhb19/hhb192/meshes/POOL/FESOM2/hydrography/phc3.0/ + opath: + opath: /p/scratch/chhb19/${USER}/ From 6aed0c23a0f700c07a3455fd01d5b34a9e6b9bc9 Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Mon, 10 May 2021 16:41:11 +0200 Subject: [PATCH 120/909] change elem to elements for variable, otherwise xarray cant nadle it --- src/io_mesh_info.F90 | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/io_mesh_info.F90 b/src/io_mesh_info.F90 index ea58cb4b3..551c063e7 100644 --- a/src/io_mesh_info.F90 +++ b/src/io_mesh_info.F90 @@ -90,15 +90,15 @@ subroutine write_mesh_info(mesh) call my_def_var(ncid, 'zbar_e_bottom', NF_DOUBLE, 1, (/elem_n_id/), zbar_e_bot_id, 'element bottom depth') call my_def_var(ncid, 'zbar_n_bottom', NF_DOUBLE, 1, (/nod_n_id/) , zbar_n_bot_id, 'nodal bottom depth') ! 2D - call my_def_var(ncid, 'nod_area', NF_DOUBLE, 2, (/nod_n_id, nl_id/), nod_area_id, 'nodal areas' ) - call my_def_var(ncid, 'elem', NF_INT, 2, (/elem_n_id, id_3/), elem_id, 'elements' ) - call my_def_var(ncid, 'nodes', NF_DOUBLE, 2, (/nod_n_id, id_2/), nod_id, 'nodal geo. coordinates' ) - call my_def_var(ncid, 'nod_in_elem2D', NF_INT, 2, (/nod_n_id, id_N/), nod_in_elem2D_id, 'elements containing the node') - call my_def_var(ncid, 'edges', NF_INT, 2, (/edge_n_id, id_2/), edges_id, 'edges' ) - call my_def_var(ncid, 'edge_tri', NF_INT, 2, (/edge_n_id, id_2/), edge_tri_id, 'edge triangles' ) - call my_def_var(ncid, 'edge_cross_dxdy', NF_DOUBLE, 2, (/edge_n_id, id_4/), edge_cross_dxdy_id, 'edge cross distancess' ) - call my_def_var(ncid, 'gradient_sca_x', NF_DOUBLE, 2, (/id_3, elem_n_id/), gradient_sca_x_id, 'x component of a gradient at nodes of an element') - call my_def_var(ncid, 'gradient_sca_y', NF_DOUBLE, 2, (/id_3, elem_n_id/), gradient_sca_y_id, 'y component of a gradient at nodes of an element') + call my_def_var(ncid, 'nod_area', NF_DOUBLE, 2, (/nod_n_id, nl_id/), nod_area_id, 'nodal areas' ) + call my_def_var(ncid, 'elements', NF_INT, 2, (/elem_n_id, id_3/), elem_id, 'elements' ) + call my_def_var(ncid, 'nodes', NF_DOUBLE, 2, (/nod_n_id, id_2/), nod_id, 'nodal geo. coordinates' ) + call my_def_var(ncid, 'nod_in_elem2D', NF_INT, 2, (/nod_n_id, id_N/), nod_in_elem2D_id, 'elements containing the node') + call my_def_var(ncid, 'edges', NF_INT, 2, (/edge_n_id, id_2/), edges_id, 'edges' ) + call my_def_var(ncid, 'edge_tri', NF_INT, 2, (/edge_n_id, id_2/), edge_tri_id, 'edge triangles' ) + call my_def_var(ncid, 'edge_cross_dxdy', NF_DOUBLE, 2, (/edge_n_id, id_4/), edge_cross_dxdy_id, 'edge cross distancess' ) + call my_def_var(ncid, 'gradient_sca_x', NF_DOUBLE, 2, (/id_3, elem_n_id/), gradient_sca_x_id, 'x component of a gradient at nodes of an element') + call my_def_var(ncid, 'gradient_sca_y', NF_DOUBLE, 2, (/id_3, elem_n_id/), gradient_sca_y_id, 'y component of a gradient at nodes of an element') call my_nf_enddef(ncid) ! vercical levels/layers From 2633d249fc827b7105ed69c45b32ec37273e1f44 Mon Sep 17 00:00:00 2001 From: Natalja Rakowsky Date: Tue, 25 May 2021 08:50:10 +0200 Subject: [PATCH 121/909] Makefile updated --- src/Makefile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/Makefile b/src/Makefile index 7d93ff9c2..d4e453811 100755 --- a/src/Makefile +++ b/src/Makefile @@ -29,6 +29,8 @@ OBJ_INI = fvom_init.o \ # objects MODULES = oce_modules.o \ + info_module.o \ + command_line_options.o \ MOD_MESH.o \ ice_modules.o \ gen_modules_config.o \ @@ -78,7 +80,7 @@ MODULES = oce_modules.o \ gen_ic3d.o \ gen_surface_forcing.o \ gen_modules_gpot.o \ - toy_channel_soufflet.o + toy_channel_soufflet.o OBJECTS= fvom_main.o \ gen_comm.o \ From f811fc66e45bfee3083c677cf1c12f87b69c9509 Mon Sep 17 00:00:00 2001 From: Natalja Rakowsky Date: Tue, 25 May 2021 08:51:08 +0200 Subject: [PATCH 122/909] Minor bugfixes to prevent warnings from gfortran 9.3.0 --- src/cavity_param.F90 | 56 +++++++++++++++++++++++++---------------- src/oce_adv_tra_ver.F90 | 2 +- src/oce_ale_vel_rhs.F90 | 2 +- 3 files changed, 37 insertions(+), 23 deletions(-) diff --git a/src/cavity_param.F90 b/src/cavity_param.F90 index 68f1735e9..c43a8018b 100644 --- a/src/cavity_param.F90 +++ b/src/cavity_param.F90 @@ -458,11 +458,12 @@ end subroutine dist_on_earth ! [oC] (TIN) bezogen auf den in-situ Druck[dbar] (PRES) mit Hilfe ! eines Iterationsverfahrens aus. subroutine potit(salz,pt,pres,rfpres,tin) + use o_PARAM , only: WP integer iter - real salz,pt,pres,rfpres,tin - real epsi,tpmd,pt1,ptd,pttmpr + real(kind=WP) :: salz,pt,pres,rfpres,tin + real(kind=WP) :: epsi, pt1,ptd,pttmpr - data tpmd / 0.001 / + real(kind=WP), parameter :: tpmd=0.001_WP epsi = 0. do iter=1,100 @@ -489,15 +490,19 @@ end subroutine potit ! TEMP = 40.0 DegC ! PRES = 10000.000 dbar ! RFPRES = 0.000 dbar -real function pttmpr(salz,temp,pres,rfpres) - - data ct2 ,ct3 /0.29289322 , 1.707106781/ - data cq2a,cq2b /0.58578644 , 0.121320344/ - data cq3a,cq3b /3.414213562, -4.121320344/ +real(kind=WP) function pttmpr(salz,temp,pres,rfpres) + use o_PARAM , only: WP - real salz,temp,pres,rfpres - real p,t,dp,dt,q,ct2,ct3,cq2a,cq2b,cq3a,cq3b - real adlprt + real(kind=WP) :: salz,temp,pres,rfpres + real(kind=WP) :: p,t,dp,dt,q + real(kind=WP) :: adlprt + real(kind=WP), parameter :: ct2 = 0.29289322_WP + real(kind=WP), parameter :: ct3 = 1.707106781_WP + real(kind=WP), parameter :: cq2a = 0.58578644_WP + real(kind=WP), parameter :: cq2b = 0.121320344_WP + real(kind=WP), parameter :: cq3a = 3.414213562_WP + real(kind=WP), parameter :: cq3b = -4.121320344_WP + p = pres t = temp @@ -528,17 +533,26 @@ end function pttmpr ! fuer SALZ = 40.0 psu ! TEMP = 40.0 DegC ! PRES = 10000.000 dbar -real function adlprt(salz,temp,pres) +real(kind=WP) function adlprt(salz,temp,pres) - real salz,temp,pres - real s0,a0,a1,a2,a3,b0,b1,c0,c1,c2,c3,d0,d1,e0,e1,e2,ds - - data s0 /35.0/ - data a0,a1,a2,a3 /3.5803E-5, 8.5258E-6, -6.8360E-8, 6.6228E-10/ - data b0,b1 /1.8932E-6, -4.2393E-8/ - data c0,c1,c2,c3 /1.8741E-8, -6.7795E-10, 8.7330E-12, -5.4481E-14/ - data d0,d1 /-1.1351E-10, 2.7759E-12/ - data e0,e1,e2 /-4.6206E-13, 1.8676E-14, -2.1687E-16/ + use o_PARAM , only: WP + real(kind=WP) :: salz,temp,pres + real(kind=WP), parameter :: s0 = 35.0 + real(kind=WP), parameter :: a0 = 3.5803E-5 + real(kind=WP), parameter :: a1 = 8.5258E-6 + real(kind=WP), parameter :: a2 = -6.8360E-8 + real(kind=WP), parameter :: a3 = 6.6228E-10 + real(kind=WP), parameter :: b0 = 1.8932E-6 + real(kind=WP), parameter :: b1 = -4.2393E-8 + real(kind=WP), parameter :: c0 = 1.8741E-8 + real(kind=WP), parameter :: c1 = -6.7795E-10 + real(kind=WP), parameter :: c2 = 8.7330E-12 + real(kind=WP), parameter :: c3 = -5.4481E-14 + real(kind=WP), parameter :: d0 = -1.1351E-10 + real(kind=WP), parameter :: d1 = 2.7759E-12 + real(kind=WP), parameter :: e0 = -4.6206E-13 + real(kind=WP), parameter :: e1 = 1.8676E-14 + real(kind=WP), parameter :: e2 = -2.1687E-16 ds = salz-s0 adlprt = ( ( (e2*temp + e1)*temp + e0 )*pres & diff --git a/src/oce_adv_tra_ver.F90 b/src/oce_adv_tra_ver.F90 index 5ced1642b..0dffb12ab 100644 --- a/src/oce_adv_tra_ver.F90 +++ b/src/oce_adv_tra_ver.F90 @@ -410,7 +410,7 @@ subroutine adv_tra_vert_ppm(ttf, w, do_Xmoment, mesh, flux, init_zero) ! tv(3)=-ttf(2,n)*min(sign(1.0, W(3,n)), 0._WP)+ttf(3,n)*max(sign(1.0, W(3,n)), 0._WP) tv(nzmin+1)=0.5*(ttf(nzmin, n)+ttf(nzmin+1,n)) ! tacer at bottom-1 level - tv(nzmax-1)=-ttf(nzmax-2,n)*min(sign(1.0, W(nzmax-1,n)), 0._WP)+ttf(nzmax-1,n)*max(sign(1.0, W(nzmax-1,n)), 0._WP) + tv(nzmax-1)=-ttf(nzmax-2,n)*min(sign(1.0_wp, W(nzmax-1,n)), 0._WP)+ttf(nzmax-1,n)*max(sign(1.0_wp, W(nzmax-1,n)), 0._WP) ! tv(nzmax-1)=0.5_WP*(ttf(nzmax-2,n)+ttf(nzmax-1,n)) ! tracer at bottom level tv(nzmax)=ttf(nzmax-1,n) diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index 0f5b0ac6e..d13945542 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -243,7 +243,7 @@ subroutine momentum_adv_scalar(mesh) ! ./ el1 \. ! ./ x \. ! ./ |-------\.-----------------edge_cross_dxdy(1:2,ed) --> (dx,dy) - ! / |->n_vec \ + ! / |->n_vec \ ! nod(1) o----------O----------o nod(2) ! \. |->n_vec ./ ! \. |------./------------------edge_cross_dxdy(3:4,ed) --> (dx,dy) From 1a5942807101c99ac4665d8e1456017ca0bd6a66 Mon Sep 17 00:00:00 2001 From: Natalja Rakowsky Date: Tue, 25 May 2021 08:55:19 +0200 Subject: [PATCH 123/909] Bug fix: variable ds was not initialised --- src/cavity_param.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cavity_param.F90 b/src/cavity_param.F90 index c43a8018b..0491ee4ed 100644 --- a/src/cavity_param.F90 +++ b/src/cavity_param.F90 @@ -536,7 +536,7 @@ end function pttmpr real(kind=WP) function adlprt(salz,temp,pres) use o_PARAM , only: WP - real(kind=WP) :: salz,temp,pres + real(kind=WP) :: salz,temp,pres, ds real(kind=WP), parameter :: s0 = 35.0 real(kind=WP), parameter :: a0 = 3.5803E-5 real(kind=WP), parameter :: a1 = 8.5258E-6 From 104231b24096118a8ef42caaa7b46fb49d9c6281 Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 29 May 2021 17:16:37 +0200 Subject: [PATCH 124/909] fix bugs in the zstar volume conservation of the soufflet channel experiment --- src/io_meandata.F90 | 7 ++++- src/oce_ale.F90 | 8 +++--- src/oce_ale_tracer.F90 | 7 +++-- src/toy_channel_soufflet.F90 | 51 ++++++++++++++++++++++-------------- 4 files changed, 46 insertions(+), 27 deletions(-) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 86c9fdf91..3cd67d342 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -146,7 +146,12 @@ subroutine ini_mean_io(mesh) call def_stream(nod2D, myDim_nod2D, 'ssh', 'sea surface elevation', 'm', eta_n, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) CASE ('vve_5 ') call def_stream(nod2D, myDim_nod2D, 'vve_5', 'vertical velocity at 5th level', 'm/s', Wvel(5,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) - + +CASE ('ssh_rhs ') + call def_stream(nod2D, myDim_nod2D, 'ssh_rhs', 'ssh rhs', '?', ssh_rhs, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) +CASE ('ssh_rhs_old ') + call def_stream(nod2D, myDim_nod2D, 'ssh_rhs_old', 'ssh rhs', '?', ssh_rhs_old, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + !___________________________________________________________________________________________________________________________________ ! output sea ice CASE ('uice ') diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 9dcd9c230..447929d75 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2555,10 +2555,10 @@ subroutine oce_timestep_ale(n, mesh) t0=MPI_Wtime() -!!PS water_flux = 0.0_WP -!!PS heat_flux = 0.0_WP -!!PS stress_surf= 0.0_WP -!!PS stress_node_surf= 0.0_WP +! water_flux = 0.0_WP +! heat_flux = 0.0_WP +! stress_surf= 0.0_WP +! stress_node_surf= 0.0_WP !___________________________________________________________________________ ! calculate equation of state, density, pressure and mixed layer depths diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index b0ce14032..7e3e39ebc 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -147,8 +147,11 @@ subroutine solve_tracers_ale(mesh) ! relax to salt and temp climatology if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call relax_to_clim'//achar(27)//'[0m' - call relax_to_clim(tr_num, mesh) - if ((toy_ocean) .AND. (TRIM(which_toy)=="soufflet")) call relax_zonal_temp(mesh) + if ((toy_ocean) .AND. (TRIM(which_toy)=="soufflet")) then + call relax_zonal_temp(mesh) + else + call relax_to_clim(tr_num, mesh) + end if call exchange_nod(tr_arr(:,:,tr_num)) end do diff --git a/src/toy_channel_soufflet.F90 b/src/toy_channel_soufflet.F90 index 06e1d60ac..d7a27e79b 100644 --- a/src/toy_channel_soufflet.F90 +++ b/src/toy_channel_soufflet.F90 @@ -4,6 +4,7 @@ MODULE Toy_Channel_Soufflet USE o_PARAM USE g_PARSUP USE g_config + use g_comm_auto implicit none SAVE @@ -79,7 +80,7 @@ subroutine relax_zonal_temp(mesh) type(t_mesh), intent(in) , target :: mesh #include "associate_mesh.h" - do n=1, myDim_nod2D + do n=1, myDim_nod2D+eDim_nod2D yy=coord_nod2D(2,n)-lat0 a=0 if (yy Date: Mon, 31 May 2021 14:13:44 +0200 Subject: [PATCH 125/909] change index in tr_arr for Ssurf (not sure ssurf is realy needed for channel) --- src/toy_channel_soufflet.F90 | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/src/toy_channel_soufflet.F90 b/src/toy_channel_soufflet.F90 index d7a27e79b..c354f7685 100644 --- a/src/toy_channel_soufflet.F90 +++ b/src/toy_channel_soufflet.F90 @@ -71,6 +71,8 @@ subroutine relax_zonal_vel(mesh) UV_rhs(1,nz,elem) = UV_rhs(1,nz,elem)+dt*tau_inv*(Uclim(nz,elem)-Uzon) END DO END DO + call exchange_elem(UV_rhs) + end subroutine relax_zonal_vel !========================================================================== subroutine relax_zonal_temp(mesh) @@ -232,7 +234,7 @@ subroutine initial_state_soufflet(mesh) stress_surf = 0.0_WP heat_flux = 0.0_WP tr_arr(:,:,2) = 35.0_WP - Ssurf = tr_arr(1,:,1) + Ssurf = tr_arr(1,:,2) water_flux = 0.0_WP relax2clim = 0.0_WP @@ -327,13 +329,16 @@ subroutine initial_state_soufflet(mesh) allocate(Uclim(nl-1,myDim_elem2D+eDim_elem2D)) Uclim=UV(1,:,:) -! UV=0.0_WP -! UV(1,:,:) = 0.01_WP -! ! tr_arr(:,:,1) = 16.0_WP -! ! tr_arr(:,:,2) = 35.0_WP -! -! Tclim=tr_arr(:,:,1) -! Uclim=UV(1,:,:) +!!PS tr_arr(:,:,1) = 16.0_WP +!!PS tr_arr(:,:,2) = 35.0_WP +!!PS Ssurf = tr_arr(1,:,2) +!!PS Tsurf = tr_arr(1,:,1) +!!PS Tclim = tr_arr(:,:,1) + +!!PS UV = 0.0_WP +!!PS UV(1,:,:) = 0.01_WP +!!PS Uclim = UV(1,:,:) + write(*,*) mype, 'Vel', maxval(UV(1,:,:)), minval(UV(1,:,:)) END subroutine initial_state_soufflet ! =============================================================================== From 84173ede6b3f8174ef5778584a89bf1d354236e4 Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 31 May 2021 17:05:58 +0200 Subject: [PATCH 126/909] really weired bug where the logical comparison did not work --- src/fvom_init.F90 | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/src/fvom_init.F90 b/src/fvom_init.F90 index 14bc79742..c5396bdf7 100755 --- a/src/fvom_init.F90 +++ b/src/fvom_init.F90 @@ -955,10 +955,10 @@ subroutine find_levels_cavity(mesh) ! outer iteration loop count_iter2 = 0 exit_flag2 = 0 - elemfixlvl = .false. + elemfixlvl = .False. do while((exit_flag2==0) .and. (count_iter2 in case make the levels of all sorounding ! triangles shallower - if ( (nlevels(elem)-(nz+1))>=3 .and. & - elemreducelvl(elem) .eqv. .false. .and. & - elemfixlvl(elem) .eqv. .false.) then + if ( (nlevels(elem)-(nz+1))>=3 .and. & + (elemreducelvl(elem) .eqv. .False.) .and. & + (elemfixlvl( elem) .eqv. .False.) & + ) then ulevels(elem)=nz+1 else ! --> can not increase depth anymore to eleminate isolated @@ -1023,7 +1024,7 @@ subroutine find_levels_cavity(mesh) ! to nz idx = minloc(ulevels(elems)-nz, 1, MASK=( (elems>0) .and. ((ulevels(elems)-nz)>0) ) ) ulevels(elems(idx)) = nz-1 - elemreducelvl(elems(idx)) = .true. + elemreducelvl(elems(idx)) = .True. end if !force recheck for all current ocean cells @@ -1134,7 +1135,7 @@ subroutine find_levels_cavity(mesh) elem=nod_in_elem2D(j,node) if (ulevels(elem)>nz) then ulevels(elem) = nz - elemfixlvl(elem) = .true. + elemfixlvl(elem) = .True. end if end do end if From 67c6b8408ec43dda3c64bedfdf9d639a37a16a89 Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 1 Jun 2021 13:49:13 +0200 Subject: [PATCH 127/909] forgot upper level index for cavity in SUBROUTINE diff_part_bh(ttf, mesh) --- src/oce_ale_tracer.F90 | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index b0ce14032..cd7d35522 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -974,7 +974,7 @@ SUBROUTINE diff_part_bh(ttf, mesh) type(t_mesh), intent(in), target :: mesh real(kind=WP), intent(inout), target :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP) :: u1, v1, len, vi, tt, ww - integer :: nz, ed, el(2), en(2), k, elem, nl1 + integer :: nz, ed, el(2), en(2), k, elem, nl1, ul1 real(kind=WP), allocatable :: temporary_ttf(:,:) #include "associate_mesh.h" @@ -988,8 +988,9 @@ SUBROUTINE diff_part_bh(ttf, mesh) el=edge_tri(:,ed) en=edges(:,ed) len=sqrt(sum(elem_area(el))) + ul1=minval(ulevels_nod2D_max(en)) nl1=maxval(nlevels_nod2D_min(en))-1 - DO nz=1,nl1 + DO nz=ul1,nl1 u1=UV(1, nz,el(1))-UV(1, nz,el(2)) v1=UV(2, nz,el(1))-UV(2, nz,el(2)) vi=u1*u1+v1*v1 @@ -1011,8 +1012,9 @@ SUBROUTINE diff_part_bh(ttf, mesh) el=edge_tri(:,ed) en=edges(:,ed) len=sqrt(sum(elem_area(el))) + ul1=minval(ulevels_nod2D_max(en)) nl1=maxval(nlevels_nod2D_min(en))-1 - DO nz=1,nl1 + DO nz=ul1,nl1 u1=UV(1, nz,el(1))-UV(1, nz,el(2)) v1=UV(2, nz,el(1))-UV(2, nz,el(2)) vi=u1*u1+v1*v1 From 98e3c23d25e1d0f328fc8405008c2e8dd47ce532 Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 2 Jun 2021 12:17:39 +0200 Subject: [PATCH 128/909] fix small indix bug in the scaling of GM when using cavity --- src/oce_fer_gm.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/oce_fer_gm.F90 b/src/oce_fer_gm.F90 index 3f95cd951..a0cd0f863 100644 --- a/src/oce_fer_gm.F90 +++ b/src/oce_fer_gm.F90 @@ -311,7 +311,7 @@ subroutine init_Redi_GM(mesh) !fer_compute_C_K_Redi ! the surface template for the scaling !!PS do nz=2, nzmax do nz=nzmin+1, nzmax - fer_k(nz,n)=fer_k(1,n)*zscaling(nz) + fer_k(nz,n)=fer_k(nzmin,n)*zscaling(nz) end do ! after vertical Ferreira scaling is done also scale surface template !!PS fer_k(1,n)=fer_k(1,n)*zscaling(1) From 5b4600482f8a95386e75170082cf2e1b85e2fc68 Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Wed, 2 Jun 2021 16:44:08 +0200 Subject: [PATCH 129/909] Update test values for channel --- setups/test_souf/setup.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/setups/test_souf/setup.yml b/setups/test_souf/setup.yml index be91281af..f076d102b 100644 --- a/setups/test_souf/setup.yml +++ b/setups/test_souf/setup.yml @@ -81,8 +81,8 @@ namelist.io: fcheck: salt: 35.0 - temp: 14.329708493584528 - sst: 18.939699611886315 - u: 0.027431619139927817 - v: -0.0008870629489199685 + temp: 14.329708416524904 + sst: 18.939699613817496 + u: 0.0274316731683607 + v: -0.0008870790518593145 From c0977ca6c7486855d0231dfc69573c1877d823e5 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 3 Jun 2021 11:56:54 +0200 Subject: [PATCH 130/909] fix bug in tke+idemix when using cavity + fix bug in cvmix_TKE where to variables of different size are multiplied --- src/cvmix_tke.F90 | 3 +- src/gen_modules_cvmix_idemix.F90 | 71 ++++++++++++------------ src/gen_modules_cvmix_tke.F90 | 95 ++++++++++++++------------------ 3 files changed, 80 insertions(+), 89 deletions(-) diff --git a/src/cvmix_tke.F90 b/src/cvmix_tke.F90 index 1fa16f8db..5f6ecc72b 100644 --- a/src/cvmix_tke.F90 +++ b/src/cvmix_tke.F90 @@ -452,7 +452,8 @@ subroutine integrate_tke( & dzw ! ! IDEMIX variables, if run coupled iw_diss is added as forcing to TKE - real(cvmix_r8), dimension(max_nlev), intent(in), optional :: & +!!PS real(cvmix_r8), dimension(max_nlev), intent(in), optional :: & + real(cvmix_r8), dimension(nlev+1), intent(in), optional :: & E_iw ,& ! alpha_c ,& ! iw_diss ! diff --git a/src/gen_modules_cvmix_idemix.F90 b/src/gen_modules_cvmix_idemix.F90 index 222df66a2..01f0af4a9 100644 --- a/src/gen_modules_cvmix_idemix.F90 +++ b/src/gen_modules_cvmix_idemix.F90 @@ -291,16 +291,16 @@ subroutine calc_cvmix_idemix(mesh) !___________________________________________________________________ ! calculate for TKE square of Brünt-Väisälä frequency, be aware that ! bvfreq contains already the squared brünt Väisälä frequency ... - bvfreq2 = 0.0_WP + bvfreq2 = 0.0_WP bvfreq2(uln:nln) = bvfreq(uln:nln,node) !___________________________________________________________________ ! dz_trr distance between tracer points, surface and bottom dz_trr is half ! the layerthickness ... - dz_trr = 0.0_WP - dz_trr(uln+1:nln) = abs(Z_3d_n(uln:nln-1,node)-Z_3d_n(uln+1:nln,node)) + dz_trr = 0.0_WP + dz_trr(uln+1:nln)= abs(Z_3d_n(uln:nln-1,node)-Z_3d_n(uln+1:nln,node)) dz_trr(uln) = hnode(uln,node)/2.0_WP - dz_trr(nln+1) = hnode(nln,node)/2.0_WP + dz_trr(nln+1) = hnode(nln,node)/2.0_WP !___________________________________________________________________ ! main call to calculate idemix @@ -308,41 +308,42 @@ subroutine calc_cvmix_idemix(mesh) call cvmix_coeffs_idemix(& ! parameter - dzw = hnode(:,node), & - dzt = dz_trr(:), & - nlev = nln, & - max_nlev = nl-1, & - dtime = dt, & - coriolis = coriolis_node(node), & + dzw = hnode(uln:nln,node), & + dzt = dz_trr(uln:nln+1), & +! nlev = nln, & + nlev = nln-uln+1, & + max_nlev = nl-1, & + dtime = dt, & + coriolis = coriolis_node(node), & ! essentials - iwe_new = iwe(:,node), & ! out - iwe_old = iwe_old(:), & ! in - forc_iw_surface = forc_iw_surface_2D(node), & ! in - forc_iw_bottom = forc_iw_bottom_2D(node), & ! in + iwe_new = iwe(uln:nln+1,node), & ! out + iwe_old = iwe_old(uln:nln+1), & ! in + forc_iw_surface = forc_iw_surface_2D(node), & ! in + forc_iw_bottom = forc_iw_bottom_2D(node), & ! in ! FIXME: nils: better output IDEMIX Ri directly - alpha_c = iwe_alpha_c(:,node), & ! out (for Ri IMIX) + alpha_c = iwe_alpha_c(uln:nln+1,node), & ! out (for Ri IMIX) ! only for Osborn shortcut ! FIXME: nils: put this to cvmix_tke - KappaM_out = iwe_Av(:,node), & ! out - KappaH_out = iwe_Kv(:,node), & ! out - Nsqr = bvfreq2(:), & ! in + KappaM_out = iwe_Av( uln:nln+1,node), & ! out + KappaH_out = iwe_Kv( uln:nln+1,node), & ! out + Nsqr = bvfreq2( uln:nln+1), & ! in ! diagnostics - iwe_Ttot = iwe_Ttot(:,node), & - iwe_Tdif = iwe_Tdif(:,node), & - iwe_Thdi = iwe_Thdi(:,node), & - iwe_Tdis = iwe_Tdis(:,node), & - iwe_Tsur = iwe_Tsur(:,node), & - iwe_Tbot = iwe_Tbot(:,node), & - c0 = iwe_c0(:,node), & - v0 = iwe_v0(:,node), & + iwe_Ttot = iwe_Ttot(uln:nln+1,node), & + iwe_Tdif = iwe_Tdif(uln:nln+1,node), & + iwe_Thdi = iwe_Thdi(uln:nln+1,node), & + iwe_Tdis = iwe_Tdis(uln:nln+1,node), & + iwe_Tsur = iwe_Tsur(uln:nln+1,node), & + iwe_Tbot = iwe_Tbot(uln:nln+1,node), & + c0 = iwe_c0( uln:nln+1,node), & + v0 = iwe_v0( uln:nln+1,node), & ! debugging - debug = debug, & + debug = debug, & !i = i, & !j = j, & !tstep_count = tstep_count, & - cvmix_int_1 = cvmix_dummy_1(:,node), & - cvmix_int_2 = cvmix_dummy_2(:,node), & - cvmix_int_3 = cvmix_dummy_3(:,node) & + cvmix_int_1 = cvmix_dummy_1(uln:nln+1,node), & + cvmix_int_2 = cvmix_dummy_2(uln:nln+1,node), & + cvmix_int_3 = cvmix_dummy_3(uln:nln+1,node) & ) end do !-->do node = 1,node_size @@ -393,10 +394,10 @@ subroutine calc_cvmix_idemix(mesh) uln = ulevels_nod2D(node) ! thickness of mid-level to mid-level interface at node - dz_trr = 0.0_WP + dz_trr = 0.0_WP dz_trr(uln+1:nln) = Z_3d_n(uln:nln-1,node)-Z_3d_n(uln+1:nln,node) - dz_trr(uln) = hnode(uln,node)/2.0_WP - dz_trr(nln+1) = hnode(nln,node)/2.0_WP + dz_trr(uln) = hnode(uln,node)/2.0_WP + dz_trr(nln+1) = hnode(nln,node)/2.0_WP ! surface cell vol_wcelli(uln,node) = 1/(areasvol(uln,node)*dz_trr(uln)) @@ -459,7 +460,7 @@ subroutine calc_cvmix_idemix(mesh) ! thickness of mid-level to mid-level interface of element el(1) dz_trr = 0.0_WP - dz_trr(1) = helem(1,el(1))/2.0_WP + dz_trr(nu1) = helem(nu1,el(1))/2.0_WP !!PS do nz=2,nl1-1 do nz=nu1+1,nl1-1 dz_trr(nz) = helem(nz-1,el(1))/2.0_WP + helem(nz,el(1))/2.0_WP @@ -480,7 +481,7 @@ subroutine calc_cvmix_idemix(mesh) ! thickness of mid-level to mid-level interface of element el(2) dz_trr2 = 0.0_WP - dz_trr2(1) = helem(1,el(2))/2.0_WP + dz_trr2(nu2) = helem(nu2,el(2))/2.0_WP !!PS do nz=2,nl2-1 do nz=nu2+1,nl2-1 dz_trr2(nz) = helem(nz-1,el(2))/2.0_WP + helem(nz,el(2))/2.0_WP diff --git a/src/gen_modules_cvmix_tke.F90 b/src/gen_modules_cvmix_tke.F90 index 1f6434812..c809811b8 100644 --- a/src/gen_modules_cvmix_tke.F90 +++ b/src/gen_modules_cvmix_tke.F90 @@ -277,19 +277,14 @@ subroutine calc_cvmix_tke(mesh) ! calcualte for TKE surface momentum forcing --> norm of nodal ! surface wind stress --> tke_forc2d_normstress --> interpolate from elements ! to nodes - tvol = 0.0_WP - do nelem=1,nod_in_elem2D_num(node) - elem = nod_in_elem2D(nelem,node) - tvol = tvol + elem_area(elem) - tke_forc2d_normstress(node) = tke_forc2d_normstress(node) & - + sqrt(stress_surf(1,elem)**2 + stress_surf(2,elem)**2)*elem_area(elem)/density_0 - end do !--> do nelem=1,nod_in_elem2D_num(node) - tke_forc2d_normstress(node) = tke_forc2d_normstress(node)/tvol - + tke_forc2d_normstress(node) = sqrt( & + stress_node_surf(1,node)**2 + & + stress_node_surf(2,node)**2 & + )/density_0 + !___________________________________________________________________ ! calculate for TKE 3D vertical velocity shear vshear2=0.0_WP - !!PS do nz=2,nln do nz=nun+1,nln vshear2(nz)=(( Unode(1, nz-1, node) - Unode(1, nz, node))**2 + & ( Unode(2, nz-1, node) - Unode(2, nz, node))**2)/ & @@ -313,14 +308,10 @@ subroutine calc_cvmix_tke(mesh) !___________________________________________________________________ ! dz_trr distance between tracer points, surface and bottom dz_trr is half ! the layerthickness ... - !!PS dz_trr = 0.0_WP - !!PS dz_trr(2:nln) = abs(Z_3d_n(1:nln-1,node)-Z_3d_n(2:nln,node)) - !!PS dz_trr(1) = hnode(1,node)/2.0_WP - !!PS dz_trr(nln+1) = hnode(nln,node)/2.0_WP - dz_trr = 0.0_WP - dz_trr(nun+1:nln) = abs(Z_3d_n(nun:nln-1,node)-Z_3d_n(nun+1:nln,node)) - dz_trr(nun) = hnode(nun,node)/2.0_WP - dz_trr(nln+1) = hnode(nln,node)/2.0_WP + dz_trr = 0.0_WP + dz_trr(nun+1:nln) = abs(Z_3d_n(nun:nln-1,node)-Z_3d_n(nun+1:nln,node)) + dz_trr(nun) = hnode(nun,node)/2.0_WP + dz_trr(nln+1) = hnode(nln,node)/2.0_WP !___________________________________________________________________ ! main cvmix call to calculate tke @@ -330,44 +321,45 @@ subroutine calc_cvmix_tke(mesh) call cvmix_coeffs_tke(& ! parameter - dzw = hnode(:,node), & ! distance between layer interface --> hnode - dzt = dz_trr(:), & ! distnace between tracer points - nlev = nln, & + dzw = hnode(nun:nln,node), & ! distance between layer interface --> hnode + dzt = dz_trr(nun:nln+1), & ! distnace between tracer points +! nlev = nln, & + nlev = nln-nun+1, & max_nlev = nl-1, & dtime = dt, & rho_ref = density_0, & grav = g, & ! essentials - tke_new = tke(:,node), & ! out--> turbulent kinetic energy - KappaM_out = tke_Av(:,node), & ! out - KappaH_out = tke_Kv(:,node), & ! out - tke_old = tke_old(:), & ! in --> turbulent kinetic energy previous time step - old_KappaM = tke_Av_old(:), & ! in - old_KappaH = tke_Kv_old(:), & ! in - Ssqr = vshear2(:), & ! in --> square vert. vel. shear - Nsqr = bvfreq2(:), & ! in --> square brunt Väisälä freq - alpha_c = tke_in3d_iwealphac(:,node), & ! in for IDEMIX Ri - E_iw = tke_in3d_iwe(:,node), & ! in for IDEMIX Ri + tke_new = tke( nun:nln+1,node), & ! out--> turbulent kinetic energy + KappaM_out = tke_Av( nun:nln+1,node), & ! out + KappaH_out = tke_Kv( nun:nln+1,node), & ! out + tke_old = tke_old( nun:nln+1), & ! in --> turbulent kinetic energy previous time step + old_KappaM = tke_Av_old(nun:nln+1), & ! in + old_KappaH = tke_Kv_old(nun:nln+1), & ! in + Ssqr = vshear2( nun:nln+1), & ! in --> square vert. vel. shear + Nsqr = bvfreq2( nun:nln+1), & ! in --> square brunt Väisälä freq + alpha_c = tke_in3d_iwealphac(nun:nln+1,node), & ! in for IDEMIX Ri + E_iw = tke_in3d_iwe(nun:nln+1,node), & ! in for IDEMIX Ri ! forcing - forc_tke_surf= tke_forc2d_normstress(node), & ! in --> wind stress - forc_rho_surf= tke_forc2d_rhosurf(node), & ! in - bottom_fric = tke_forc2d_botfrict(node), & ! in - iw_diss = tke_in3d_iwdis(:,node), & ! in + forc_tke_surf= tke_forc2d_normstress( node), & ! in --> wind stress + forc_rho_surf= tke_forc2d_rhosurf( node), & ! in + bottom_fric = tke_forc2d_botfrict( node), & ! in + iw_diss = tke_in3d_iwdis(nun:nln+1,node), & ! in ! diagnostics - tke_Tbpr = tke_Tbpr(:,node), & ! buoyancy production - tke_Tspr = tke_Tspr(:,node), & ! shear production - tke_Tdif = tke_Tdif(:,node), & ! vertical diffusion d/dz(k d/dz)TKE - tke_Tdis = tke_Tdis(:,node), & ! dissipation - tke_Twin = tke_Twin(:,node), & ! wind forcing - tke_Tiwf = tke_Tiwf(:,node), & ! internal wave forcing when idemix is used - tke_Tbck = tke_Tbck(:,node), & ! background forcing only active if IDEMIX is not active, forcing that results from resetting TKE to minimum background TKE value - tke_Ttot = tke_Ttot(:,node), & ! sum of all terms - tke_Lmix = tke_Lmix(:,node), & ! mixing length scale of the TKE scheme - tke_Pr = tke_Pr(:,node), & ! Prantl number + tke_Tbpr = tke_Tbpr(nun:nln+1,node), & ! buoyancy production + tke_Tspr = tke_Tspr(nun:nln+1,node), & ! shear production + tke_Tdif = tke_Tdif(nun:nln+1,node), & ! vertical diffusion d/dz(k d/dz)TKE + tke_Tdis = tke_Tdis(nun:nln+1,node), & ! dissipation + tke_Twin = tke_Twin(nun:nln+1,node), & ! wind forcing + tke_Tiwf = tke_Tiwf(nun:nln+1,node), & ! internal wave forcing when idemix is used + tke_Tbck = tke_Tbck(nun:nln+1,node), & ! background forcing only active if IDEMIX is not active, forcing that results from resetting TKE to minimum background TKE value + tke_Ttot = tke_Ttot(nun:nln+1,node), & ! sum of all terms + tke_Lmix = tke_Lmix(nun:nln+1,node), & ! mixing length scale of the TKE scheme + tke_Pr = tke_Pr( nun:nln+1,node), & ! Prantl number ! debugging - cvmix_int_1 = cvmix_dummy_1(:,node), & ! - cvmix_int_2 = cvmix_dummy_2(:,node), & ! - cvmix_int_3 = cvmix_dummy_3(:,node), & ! + cvmix_int_1 = cvmix_dummy_1(nun:nln+1,node), & ! + cvmix_int_2 = cvmix_dummy_2(nun:nln+1,node), & ! + cvmix_int_3 = cvmix_dummy_3(nun:nln+1,node), & ! i = 1, & j = 1, & tstep_count = tstep_count & @@ -375,10 +367,8 @@ subroutine calc_cvmix_tke(mesh) tke_Av(nln+1,node)=0.0_WP tke_Kv(nln+1,node)=0.0_WP - !!PS tke_Av(1,node)=0.0_WP - !!PS tke_Kv(1,node)=0.0_WP - tke_Av(nun,node)=0.0_WP - tke_Kv(nun,node)=0.0_WP + tke_Av(nun ,node)=0.0_WP + tke_Kv(nun ,node)=0.0_WP end do !--> do node = 1,node_size @@ -393,7 +383,6 @@ subroutine calc_cvmix_tke(mesh) Av = 0.0_WP do elem=1, myDim_elem2D elnodes=elem2D_nodes(:,elem) - !!PS do nz=2,nlevels(elem)-1 do nz=ulevels(elem)+1,nlevels(elem)-1 Av(nz,elem) = sum(tke_Av(nz,elnodes))/3.0_WP ! (elementwise) end do From cefcad70da82664826bba0e02e571bd78b52c749 Mon Sep 17 00:00:00 2001 From: Jan Streffing Date: Mon, 14 Jun 2021 14:46:21 +0200 Subject: [PATCH 131/909] Update ice_thermo_cpl.F90 --- src/ice_thermo_cpl.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index 57085894a..5aeedd104 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -488,7 +488,7 @@ subroutine ice_surftemp(h,hsn,a2ihf,t) zcprosn=rhosno*cpsno/dt ! Specific Energy required to change temperature of 1m snow on ice [J/(sm³K)] zcpdte=zcpdt+zcprosn*hsn ! Combined Energy required to change temperature of snow + 0.05m of upper ice t=(zcpdte*t+a2ihf+zicefl)/(zcpdte+con/zsniced) ! New sea ice surf temp [K] - t=min(TFrezs,t) ! Not warmer than freezing please! + t=min(273.15,t) ! Not warmer than freezing please! end subroutine ice_surftemp subroutine ice_albedo(h,hsn,t,alb) From 7a8f803391230d6e160c34b60180f766c849a6ee Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 19 Nov 2020 12:06:13 +0100 Subject: [PATCH 132/909] add stub of netcdf reader/writer implementation --- src/io_netcdf.F90 | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 src/io_netcdf.F90 diff --git a/src/io_netcdf.F90 b/src/io_netcdf.F90 new file mode 100644 index 000000000..a02909147 --- /dev/null +++ b/src/io_netcdf.F90 @@ -0,0 +1,40 @@ +module io_netcdf_module + implicit none + private + + type netcdf_variable_handle + private + character(:), allocatable :: filepath + character(:), allocatable :: varname + integer fileid + integer varid + integer, allocatable :: varshape(:) + contains + end type + + contains + + + subroutine assert_nc(status, line) + integer, intent(in) :: status + integer, intent(in) :: line + ! EO args + include "netcdf.inc" ! old netcdf fortran interface required? + if(status /= NF_NOERR) then + print *, "error in line ",line, __FILE__, ' ', trim(nf_strerror(status)) + stop 1 + endif + end subroutine + + + subroutine assert(val, line) + logical, intent(in) :: val + integer, intent(in) :: line + ! EO args + if(.NOT. val) then + print *, "error in line ",line, __FILE__ + stop 1 + end if + end subroutine + +end module From 999344566f902f3c439a2f36d5936def0582cb34 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 23 Nov 2020 19:09:32 +0100 Subject: [PATCH 133/909] switch to Fortran 90 NetCDF interface --- src/io_netcdf.F90 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/io_netcdf.F90 b/src/io_netcdf.F90 index a02909147..663972686 100644 --- a/src/io_netcdf.F90 +++ b/src/io_netcdf.F90 @@ -16,12 +16,12 @@ module io_netcdf_module subroutine assert_nc(status, line) + use netcdf integer, intent(in) :: status integer, intent(in) :: line ! EO args - include "netcdf.inc" ! old netcdf fortran interface required? - if(status /= NF_NOERR) then - print *, "error in line ",line, __FILE__, ' ', trim(nf_strerror(status)) + if(status /= nf90_noerr) then + print *, "error in line ",line, __FILE__, ' ', trim(nf90_strerror(status)) stop 1 endif end subroutine From e5b773f05dc4bec0917744ac8eacf7157d5555d8 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 24 Nov 2020 16:22:02 +0100 Subject: [PATCH 134/909] rename file --- src/{io_netcdf.F90 => io_netcdf_module.F90} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{io_netcdf.F90 => io_netcdf_module.F90} (100%) diff --git a/src/io_netcdf.F90 b/src/io_netcdf_module.F90 similarity index 100% rename from src/io_netcdf.F90 rename to src/io_netcdf_module.F90 From 921461bb48bc61d0b9c659836fc8578aa00fbd3e Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 24 Nov 2020 16:32:58 +0100 Subject: [PATCH 135/909] add subroutine to initialize the netcdf handle for reading --- src/io_netcdf_module.F90 | 43 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/src/io_netcdf_module.F90 b/src/io_netcdf_module.F90 index 663972686..76a509aee 100644 --- a/src/io_netcdf_module.F90 +++ b/src/io_netcdf_module.F90 @@ -1,5 +1,6 @@ module io_netcdf_module implicit none + public netcdf_variable_handle private type netcdf_variable_handle @@ -10,11 +11,53 @@ module io_netcdf_module integer varid integer, allocatable :: varshape(:) contains + procedure, public :: initialize + procedure open_netcdf_variable end type + contains + subroutine initialize(this, filepath, varname) + use netcdf + class(netcdf_variable_handle), intent(inout) :: this + character(len=*), intent(in) :: filepath + character(len=*), intent(in) :: varname + ! EO args + integer mode + + this%filepath = filepath + this%varname = varname + + ! assert varshape is not allocated, i.e. initialize has not been called + call assert(.not. allocated(this%varshape), __LINE__) + call this%open_netcdf_variable(NF90_NOWRITE) + end subroutine + + + subroutine open_netcdf_variable(this, mode) + use netcdf + class(netcdf_variable_handle), intent(inout) :: this + integer, intent(in) :: mode + ! EO args + integer var_dim_size + integer, allocatable, dimension(:) :: dimids + integer i + + call assert_nc( nf90_open(this%filepath, mode, this%fileid) , __LINE__) + call assert_nc( nf90_inq_varid(this%fileid, this%varname, this%varid) , __LINE__) + call assert_nc( nf90_inquire_variable(this%fileid, this%varid, ndims=var_dim_size) , __LINE__) + allocate(dimids(var_dim_size)) + call assert_nc( nf90_inquire_variable(this%fileid, this%varid, dimids=dimids) , __LINE__) + + allocate(this%varshape(var_dim_size)) + do i=1, var_dim_size + call assert_nc( nf90_inquire_dimension(this%fileid, dimids(i), len=this%varshape(i)) , __LINE__) + end do + end subroutine + + subroutine assert_nc(status, line) use netcdf integer, intent(in) :: status From 877e593966e8196e5c6ae22fdd98475c827ce1ed Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 24 Nov 2020 16:39:17 +0100 Subject: [PATCH 136/909] add initial unit test for io_netcdf_module including fixture --- test/fortran/CMakeLists.txt | 2 ++ .../fixtures/io_netcdf/columnwise.salt.nc | Bin 0 -> 720 bytes test/fortran/io_netcdf_module_tests.pf | 16 ++++++++++++++++ 3 files changed, 18 insertions(+) create mode 100644 test/fortran/fixtures/io_netcdf/columnwise.salt.nc create mode 100644 test/fortran/io_netcdf_module_tests.pf diff --git a/test/fortran/CMakeLists.txt b/test/fortran/CMakeLists.txt index 47bb252b1..632d46b62 100644 --- a/test/fortran/CMakeLists.txt +++ b/test/fortran/CMakeLists.txt @@ -12,6 +12,7 @@ add_library(${LIB_TARGET} ${CMAKE_CURRENT_LIST_DIR}/../../src/forcing_provider_a ${CMAKE_CURRENT_LIST_DIR}/../../src/async_threads_module.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/forcing_provider_netcdf_module.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/forcing_lookahead_reader_module.F90 + ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_module.F90 ) add_subdirectory(../../src/async_threads_cpp ${PROJECT_BINARY_DIR}/async_threads_cpp) @@ -25,6 +26,7 @@ add_pfunit_ctest (${PROJECT_NAME} TEST_SOURCES forcing_provider_module_tests.pf # must be a path relative to CMAKE_CURRENT_SOURCE_DIR, then the generated files will be placed in CMAKE_CURRENT_BINARY_DIR (see add_pfunit_ctest.cmake) forcing_provider_netcdf_module_tests.pf forcing_lookahead_reader_module_tests.pf + io_netcdf_module_tests.pf LINK_LIBRARIES ${LIB_TARGET} ) diff --git a/test/fortran/fixtures/io_netcdf/columnwise.salt.nc b/test/fortran/fixtures/io_netcdf/columnwise.salt.nc new file mode 100644 index 0000000000000000000000000000000000000000..770e4ce1e0c96b00f1320636eafbd7289e763506 GIT binary patch literal 720 zcmZuu&1%~~5MJA|aUBOt38cN4y_B3DA>YUr7?Y7c&FuW`e52DJXAQc_0&nJ; z0$Z{Nd%K2uk@SH8cr#1v^v1?@A>DJRbn1We*z;Mu&>5c_l_th|gjoC58t=$j&(xT& z?QWVlxHER5dHD77mye?mFCTrwgWGoI{8_o!!1*N8>PDGNPrONpPxT+rCetyx zO{+xW(?#eUI&krf7DYR+I75>VFY@gv?IxK?H|?sNE@}+cx Date: Tue, 24 Nov 2020 17:10:53 +0100 Subject: [PATCH 137/909] add procedure to be able close the file explicitly --- src/io_netcdf_module.F90 | 13 ++++++++++++- test/fortran/io_netcdf_module_tests.pf | 3 ++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/src/io_netcdf_module.F90 b/src/io_netcdf_module.F90 index 76a509aee..b20f944c3 100644 --- a/src/io_netcdf_module.F90 +++ b/src/io_netcdf_module.F90 @@ -11,7 +11,7 @@ module io_netcdf_module integer varid integer, allocatable :: varshape(:) contains - procedure, public :: initialize + procedure, public :: initialize, finalize procedure open_netcdf_variable end type @@ -58,6 +58,17 @@ subroutine open_netcdf_variable(this, mode) end subroutine + subroutine finalize(this) + ! do not implicitly close the file (e.g. upon deallocation via destructor), as we might have a copy of this object with access to the same fileid + use netcdf + class(netcdf_variable_handle), intent(inout) :: this + ! EO args + if(allocated(this%varshape)) then + call assert_nc( nf90_close(this%fileid) , __LINE__) + end if + end subroutine + + subroutine assert_nc(status, line) use netcdf integer, intent(in) :: status diff --git a/test/fortran/io_netcdf_module_tests.pf b/test/fortran/io_netcdf_module_tests.pf index e931edfc2..bf5a496fa 100644 --- a/test/fortran/io_netcdf_module_tests.pf +++ b/test/fortran/io_netcdf_module_tests.pf @@ -9,7 +9,8 @@ contains subroutine test_can_initialize_netcdf_variable_for_existing_file() type(netcdf_variable_handle) handle - call handle%initialize("fixtures/io_netcdf/columnwise.salt.nc", "salt") + call handle%initialize("fixtures/io_netcdf/columnwise.salt.nc", "salt") + call handle%finalize() end subroutine From f6dac2e85860d25cb66339be0b26e8c84e24dcad Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 24 Nov 2020 17:26:25 +0100 Subject: [PATCH 138/909] - add procedure to get number of timesteps for a variable - add unit test --- src/io_netcdf_module.F90 | 12 +++++++++++- test/fortran/io_netcdf_module_tests.pf | 11 +++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/src/io_netcdf_module.F90 b/src/io_netcdf_module.F90 index b20f944c3..375b5f5d5 100644 --- a/src/io_netcdf_module.F90 +++ b/src/io_netcdf_module.F90 @@ -11,7 +11,7 @@ module io_netcdf_module integer varid integer, allocatable :: varshape(:) contains - procedure, public :: initialize, finalize + procedure, public :: initialize, finalize, number_of_timesteps procedure open_netcdf_variable end type @@ -69,6 +69,16 @@ subroutine finalize(this) end subroutine + function number_of_timesteps(this) result(t) + class(netcdf_variable_handle), intent(in) :: this + integer t + ! EO args + call assert(size(this%varshape) > 0, __LINE__) + ! assume the last dimension for this variable is the time dimension (i.e. first in ncdump) + t = this%varshape(size(this%varshape)) + end function + + subroutine assert_nc(status, line) use netcdf integer, intent(in) :: status diff --git a/test/fortran/io_netcdf_module_tests.pf b/test/fortran/io_netcdf_module_tests.pf index bf5a496fa..7000c8934 100644 --- a/test/fortran/io_netcdf_module_tests.pf +++ b/test/fortran/io_netcdf_module_tests.pf @@ -5,6 +5,17 @@ module io_netcdf_module_tests contains + @test + subroutine test_can_read_number_of_timesteps() + type(netcdf_variable_handle) handle + + call handle%initialize("fixtures/io_netcdf/columnwise.salt.nc", "salt") + @assertEqual(2,handle%number_of_timesteps()) + + call handle%finalize() + end subroutine + + @test subroutine test_can_initialize_netcdf_variable_for_existing_file() type(netcdf_variable_handle) handle From 38a9e2838764b7dc999e0ab08e0d832208220ce9 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 24 Nov 2020 17:49:05 +0100 Subject: [PATCH 139/909] make the index for the time dimension a type member and store it upon initialization --- src/io_netcdf_module.F90 | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/io_netcdf_module.F90 b/src/io_netcdf_module.F90 index 375b5f5d5..301d698d8 100644 --- a/src/io_netcdf_module.F90 +++ b/src/io_netcdf_module.F90 @@ -9,6 +9,7 @@ module io_netcdf_module character(:), allocatable :: varname integer fileid integer varid + integer timedim_index integer, allocatable :: varshape(:) contains procedure, public :: initialize, finalize, number_of_timesteps @@ -33,6 +34,10 @@ subroutine initialize(this, filepath, varname) ! assert varshape is not allocated, i.e. initialize has not been called call assert(.not. allocated(this%varshape), __LINE__) call this%open_netcdf_variable(NF90_NOWRITE) + + ! assume the last dimension for this variable is the time dimension (i.e. first in ncdump) + call assert(size(this%varshape) > 0, __LINE__) + this%timedim_index = size(this%varshape) end subroutine @@ -73,9 +78,7 @@ function number_of_timesteps(this) result(t) class(netcdf_variable_handle), intent(in) :: this integer t ! EO args - call assert(size(this%varshape) > 0, __LINE__) - ! assume the last dimension for this variable is the time dimension (i.e. first in ncdump) - t = this%varshape(size(this%varshape)) + t = this%varshape(this%timedim_index) end function From 9f83c122e3fbe9279c103cd66dcac8ce6ac54854 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 25 Nov 2020 18:06:32 +0100 Subject: [PATCH 140/909] add 3D data reader and unit test --- src/io_netcdf_module.F90 | 31 +++++++++++++++++++++++++- test/fortran/io_netcdf_module_tests.pf | 21 +++++++++++++++++ 2 files changed, 51 insertions(+), 1 deletion(-) diff --git a/src/io_netcdf_module.F90 b/src/io_netcdf_module.F90 index 301d698d8..8fae86d8b 100644 --- a/src/io_netcdf_module.F90 +++ b/src/io_netcdf_module.F90 @@ -12,7 +12,7 @@ module io_netcdf_module integer timedim_index integer, allocatable :: varshape(:) contains - procedure, public :: initialize, finalize, number_of_timesteps + procedure, public :: initialize, finalize, number_of_timesteps, read_values procedure open_netcdf_variable end type @@ -82,6 +82,35 @@ function number_of_timesteps(this) result(t) end function + subroutine read_values(this, timeindex, values) + use netcdf + class(netcdf_variable_handle), intent(in) :: this + integer, intent(in) :: timeindex + real(4), allocatable, intent(inout) :: values(:,:) ! must be inout or the allocation is screwed + ! EO args + integer, allocatable, dimension(:) :: starts, sizes + integer i + + call assert(allocated(this%varshape), __LINE__) + + ! todo: check if variable datatype is single precision (f77 real) + + allocate(starts(size(this%varshape))) + allocate(sizes(size(this%varshape))) + + call assert(0 < timeindex, __LINE__) + call assert(timeindex <= this%number_of_timesteps(), __LINE__) + + starts = 1 + sizes = this%varshape + starts(this%timedim_index) = timeindex + sizes(this%timedim_index) = 1 !timeindex_last-timeindex_first+1 + + call assert(product(sizes) == product(shape(values)), __LINE__) + call assert_nc(nf90_get_var(this%fileid, this%varid, values, start=starts, count=sizes), __LINE__) + end subroutine + + subroutine assert_nc(status, line) use netcdf integer, intent(in) :: status diff --git a/test/fortran/io_netcdf_module_tests.pf b/test/fortran/io_netcdf_module_tests.pf index 7000c8934..181b39104 100644 --- a/test/fortran/io_netcdf_module_tests.pf +++ b/test/fortran/io_netcdf_module_tests.pf @@ -5,6 +5,27 @@ module io_netcdf_module_tests contains + @test + subroutine test_can_read_data() + type(netcdf_variable_handle) handle + real(4), allocatable :: values(:,:) + + call handle%initialize("fixtures/io_netcdf/columnwise.salt.nc", "salt") + allocate(values(3,5)) + + call handle%read_values(1, values) + @assertEqual(1.001, values(1,1), tolerance=1.e-6) + @assertEqual(2.001, values(2,1), tolerance=1.e-6) + @assertEqual(3.001, values(3,1), tolerance=1.e-6) + + @assertEqual(1.005, values(1,5), tolerance=1.e-6) + @assertEqual(2.005, values(2,5), tolerance=1.e-6) + @assertEqual(3.005, values(3,5), tolerance=1.e-6) + + call handle%finalize() + end subroutine + + @test subroutine test_can_read_number_of_timesteps() type(netcdf_variable_handle) handle From a2b19dd50d53f27263ac4bf5b106956c88479b3d Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 25 Nov 2020 18:38:13 +0100 Subject: [PATCH 141/909] - check for whole level data instead of node-column in unit test - rename fixture file --- ...lumnwise.salt.nc => columnwise_3d_salt.nc} | Bin test/fortran/io_netcdf_module_tests.pf | 23 ++++++++++++------ 2 files changed, 15 insertions(+), 8 deletions(-) rename test/fortran/fixtures/io_netcdf/{columnwise.salt.nc => columnwise_3d_salt.nc} (100%) diff --git a/test/fortran/fixtures/io_netcdf/columnwise.salt.nc b/test/fortran/fixtures/io_netcdf/columnwise_3d_salt.nc similarity index 100% rename from test/fortran/fixtures/io_netcdf/columnwise.salt.nc rename to test/fortran/fixtures/io_netcdf/columnwise_3d_salt.nc diff --git a/test/fortran/io_netcdf_module_tests.pf b/test/fortran/io_netcdf_module_tests.pf index 181b39104..cd84d42dd 100644 --- a/test/fortran/io_netcdf_module_tests.pf +++ b/test/fortran/io_netcdf_module_tests.pf @@ -6,21 +6,28 @@ contains @test - subroutine test_can_read_data() + subroutine test_can_read_3d_variable() type(netcdf_variable_handle) handle real(4), allocatable :: values(:,:) - call handle%initialize("fixtures/io_netcdf/columnwise.salt.nc", "salt") + call handle%initialize("fixtures/io_netcdf/columnwise_3d_salt.nc", "salt") + allocate(values(3,5)) call handle%read_values(1, values) + ! check level 1 values @assertEqual(1.001, values(1,1), tolerance=1.e-6) - @assertEqual(2.001, values(2,1), tolerance=1.e-6) - @assertEqual(3.001, values(3,1), tolerance=1.e-6) - + @assertEqual(1.002, values(1,2), tolerance=1.e-6) + @assertEqual(1.003, values(1,3), tolerance=1.e-6) + @assertEqual(1.004, values(1,4), tolerance=1.e-6) @assertEqual(1.005, values(1,5), tolerance=1.e-6) + + ! check level 2 values + @assertEqual(2.001, values(2,1), tolerance=1.e-6) + @assertEqual(2.002, values(2,2), tolerance=1.e-6) + @assertEqual(2.003, values(2,3), tolerance=1.e-6) + @assertEqual(2.004, values(2,4), tolerance=1.e-6) @assertEqual(2.005, values(2,5), tolerance=1.e-6) - @assertEqual(3.005, values(3,5), tolerance=1.e-6) call handle%finalize() end subroutine @@ -30,7 +37,7 @@ contains subroutine test_can_read_number_of_timesteps() type(netcdf_variable_handle) handle - call handle%initialize("fixtures/io_netcdf/columnwise.salt.nc", "salt") + call handle%initialize("fixtures/io_netcdf/columnwise_3d_salt.nc", "salt") @assertEqual(2,handle%number_of_timesteps()) call handle%finalize() @@ -41,7 +48,7 @@ contains subroutine test_can_initialize_netcdf_variable_for_existing_file() type(netcdf_variable_handle) handle - call handle%initialize("fixtures/io_netcdf/columnwise.salt.nc", "salt") + call handle%initialize("fixtures/io_netcdf/columnwise_3d_salt.nc", "salt") call handle%finalize() end subroutine From 4d7515fdadfd99ae5d8e967a8870660a83ab98ab Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 27 Nov 2020 21:21:30 +0100 Subject: [PATCH 142/909] go back to using the NetCDF Fortran 77 API as the Fortran 90 API seems to waste time on checking and creating the optional parameters --- src/io_netcdf_module.F90 | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/src/io_netcdf_module.F90 b/src/io_netcdf_module.F90 index 8fae86d8b..a835356c6 100644 --- a/src/io_netcdf_module.F90 +++ b/src/io_netcdf_module.F90 @@ -21,11 +21,11 @@ module io_netcdf_module subroutine initialize(this, filepath, varname) - use netcdf class(netcdf_variable_handle), intent(inout) :: this character(len=*), intent(in) :: filepath character(len=*), intent(in) :: varname ! EO args + include "netcdf.inc" integer mode this%filepath = filepath @@ -33,7 +33,7 @@ subroutine initialize(this, filepath, varname) ! assert varshape is not allocated, i.e. initialize has not been called call assert(.not. allocated(this%varshape), __LINE__) - call this%open_netcdf_variable(NF90_NOWRITE) + call this%open_netcdf_variable(NF_NOWRITE) ! assume the last dimension for this variable is the time dimension (i.e. first in ncdump) call assert(size(this%varshape) > 0, __LINE__) @@ -42,34 +42,34 @@ subroutine initialize(this, filepath, varname) subroutine open_netcdf_variable(this, mode) - use netcdf class(netcdf_variable_handle), intent(inout) :: this integer, intent(in) :: mode ! EO args + include "netcdf.inc" integer var_dim_size integer, allocatable, dimension(:) :: dimids integer i - call assert_nc( nf90_open(this%filepath, mode, this%fileid) , __LINE__) - call assert_nc( nf90_inq_varid(this%fileid, this%varname, this%varid) , __LINE__) - call assert_nc( nf90_inquire_variable(this%fileid, this%varid, ndims=var_dim_size) , __LINE__) + call assert_nc( nf_open(this%filepath, mode, this%fileid) , __LINE__) + call assert_nc( nf_inq_varid(this%fileid, this%varname, this%varid) , __LINE__) + call assert_nc( nf_inq_varndims(this%fileid, this%varid, var_dim_size) , __LINE__) allocate(dimids(var_dim_size)) - call assert_nc( nf90_inquire_variable(this%fileid, this%varid, dimids=dimids) , __LINE__) + call assert_nc( nf_inq_vardimid(this%fileid, this%varid, dimids) , __LINE__) allocate(this%varshape(var_dim_size)) do i=1, var_dim_size - call assert_nc( nf90_inquire_dimension(this%fileid, dimids(i), len=this%varshape(i)) , __LINE__) + call assert_nc( nf_inq_dimlen(this%fileid, dimids(i), this%varshape(i)) , __LINE__) end do end subroutine subroutine finalize(this) ! do not implicitly close the file (e.g. upon deallocation via destructor), as we might have a copy of this object with access to the same fileid - use netcdf class(netcdf_variable_handle), intent(inout) :: this ! EO args + include "netcdf.inc" if(allocated(this%varshape)) then - call assert_nc( nf90_close(this%fileid) , __LINE__) + call assert_nc( nf_close(this%fileid) , __LINE__) end if end subroutine @@ -83,10 +83,10 @@ function number_of_timesteps(this) result(t) subroutine read_values(this, timeindex, values) - use netcdf class(netcdf_variable_handle), intent(in) :: this integer, intent(in) :: timeindex real(4), allocatable, intent(inout) :: values(:,:) ! must be inout or the allocation is screwed + include "netcdf.inc" ! EO args integer, allocatable, dimension(:) :: starts, sizes integer i @@ -107,17 +107,17 @@ subroutine read_values(this, timeindex, values) sizes(this%timedim_index) = 1 !timeindex_last-timeindex_first+1 call assert(product(sizes) == product(shape(values)), __LINE__) - call assert_nc(nf90_get_var(this%fileid, this%varid, values, start=starts, count=sizes), __LINE__) + call assert_nc(nf_get_vara_real(this%fileid, this%varid, starts, sizes, values), __LINE__) end subroutine subroutine assert_nc(status, line) - use netcdf integer, intent(in) :: status integer, intent(in) :: line ! EO args - if(status /= nf90_noerr) then - print *, "error in line ",line, __FILE__, ' ', trim(nf90_strerror(status)) + include "netcdf.inc" + if(status /= nf_noerr) then + print *, "error in line ",line, __FILE__, ' ', trim(nf_strerror(status)) stop 1 endif end subroutine From 72b6984a856da63117762d5b8aa30971d3119594 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 4 Dec 2020 13:45:43 +0100 Subject: [PATCH 143/909] - add subroutine to read any-dimension real8 data - add subroutine to read any-dimension real4 data - add interface to have a single API call - add interface for netcdf functions - add unit tests and 2D input file --- src/io_netcdf_module.F90 | 56 ++++++++++++++---- src/io_netcdf_nf_interface.F90 | 24 ++++++++ test/fortran/CMakeLists.txt | 2 +- .../fixtures/io_netcdf/columnwise_2d_sss.nc | Bin 0 -> 472 bytes test/fortran/io_netcdf_module_tests.pf | 44 +++++++++++++- 5 files changed, 112 insertions(+), 14 deletions(-) create mode 100644 src/io_netcdf_nf_interface.F90 create mode 100644 test/fortran/fixtures/io_netcdf/columnwise_2d_sss.nc diff --git a/src/io_netcdf_module.F90 b/src/io_netcdf_module.F90 index a835356c6..30e87b2e3 100644 --- a/src/io_netcdf_module.F90 +++ b/src/io_netcdf_module.F90 @@ -12,8 +12,9 @@ module io_netcdf_module integer timedim_index integer, allocatable :: varshape(:) contains - procedure, public :: initialize, finalize, number_of_timesteps, read_values - procedure open_netcdf_variable + generic, public :: read_values => read_values_r4,read_values_r8 + procedure, public :: initialize, finalize, number_of_timesteps + procedure, private :: open_netcdf_variable, read_values_r4, read_values_r8 end type @@ -82,19 +83,53 @@ function number_of_timesteps(this) result(t) end function - subroutine read_values(this, timeindex, values) + subroutine read_values_r8(this, timeindex, values) + use io_netcdf_nf_interface + use, intrinsic :: ISO_C_BINDING class(netcdf_variable_handle), intent(in) :: this integer, intent(in) :: timeindex - real(4), allocatable, intent(inout) :: values(:,:) ! must be inout or the allocation is screwed - include "netcdf.inc" + real(8), intent(inout), target :: values(..) ! must be inout or the allocation might be screwed ! EO args + real(8), pointer :: values_ptr(:) integer, allocatable, dimension(:) :: starts, sizes - integer i - call assert(allocated(this%varshape), __LINE__) - - ! todo: check if variable datatype is single precision (f77 real) + call read_values_preflight(this, timeindex, starts, sizes) + + call assert(product(sizes) == product(shape(values)), __LINE__) + + call c_f_pointer(c_loc(values), values_ptr, [product(shape(values))]) + call assert_nc(nf_get_vara_x(this%fileid, this%varid, starts, sizes, values_ptr), __LINE__) + end subroutine + + + subroutine read_values_r4(this, timeindex, values) + use io_netcdf_nf_interface + use, intrinsic :: ISO_C_BINDING + class(netcdf_variable_handle), intent(in) :: this + integer, intent(in) :: timeindex + real(4), intent(inout), target :: values(..) ! must be inout or the allocation might be screwed + ! EO args + real(4), pointer :: values_ptr(:) + integer, allocatable, dimension(:) :: starts, sizes + call read_values_preflight(this, timeindex, starts, sizes) + + call assert(product(sizes) == product(shape(values)), __LINE__) + + call c_f_pointer(c_loc(values), values_ptr, [product(shape(values))]) + call assert_nc(nf_get_vara_x(this%fileid, this%varid, starts, sizes, values_ptr), __LINE__) + end subroutine + + + subroutine read_values_preflight(this, timeindex, starts, sizes) + class(netcdf_variable_handle), intent(in) :: this + integer, intent(in) :: timeindex + ! EO args + + integer, allocatable, dimension(:) :: starts, sizes + + call assert(allocated(this%varshape), __LINE__) + allocate(starts(size(this%varshape))) allocate(sizes(size(this%varshape))) @@ -105,9 +140,6 @@ subroutine read_values(this, timeindex, values) sizes = this%varshape starts(this%timedim_index) = timeindex sizes(this%timedim_index) = 1 !timeindex_last-timeindex_first+1 - - call assert(product(sizes) == product(shape(values)), __LINE__) - call assert_nc(nf_get_vara_real(this%fileid, this%varid, starts, sizes, values), __LINE__) end subroutine diff --git a/src/io_netcdf_nf_interface.F90 b/src/io_netcdf_nf_interface.F90 new file mode 100644 index 000000000..af791a8df --- /dev/null +++ b/src/io_netcdf_nf_interface.F90 @@ -0,0 +1,24 @@ +module io_netcdf_nf_interface +implicit none + + interface + function nf_get_vara_double(ncid, varid, start, counts, dvals) RESULT(status) + integer, intent(in) :: ncid, varid + integer, intent(in) :: start(*), counts(*) + real(8), intent(out) :: dvals(*) + integer status + end function + + function nf_get_vara_real(ncid, varid, start, counts, dvals) RESULT(status) + integer, intent(in) :: ncid, varid + integer, intent(in) :: start(*), counts(*) + real(4), intent(out) :: dvals(*) + integer status + end function + end interface + + + interface nf_get_vara_x + procedure nf_get_vara_real, nf_get_vara_double + end interface +end module diff --git a/test/fortran/CMakeLists.txt b/test/fortran/CMakeLists.txt index 632d46b62..5925e18e6 100644 --- a/test/fortran/CMakeLists.txt +++ b/test/fortran/CMakeLists.txt @@ -12,7 +12,7 @@ add_library(${LIB_TARGET} ${CMAKE_CURRENT_LIST_DIR}/../../src/forcing_provider_a ${CMAKE_CURRENT_LIST_DIR}/../../src/async_threads_module.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/forcing_provider_netcdf_module.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/forcing_lookahead_reader_module.F90 - ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_module.F90 + ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_module.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_nf_interface.F90 ) add_subdirectory(../../src/async_threads_cpp ${PROJECT_BINARY_DIR}/async_threads_cpp) diff --git a/test/fortran/fixtures/io_netcdf/columnwise_2d_sss.nc b/test/fortran/fixtures/io_netcdf/columnwise_2d_sss.nc new file mode 100644 index 0000000000000000000000000000000000000000..33e9f0d2a8fff79a149778b875ee84140e8f9e5f GIT binary patch literal 472 zcmZ`#%}N6?5Ki5#)V4yAJ=BZj1%$2$qL(yOJb3Wry`-C10yQa_EGS+Ao_q_zOJAgq zAij>1bPGjnc4sHq?`LK*xxH%gcu8sD9Cb**R3{+)}1Pa>{S z03|5Y?nz*0AQl>1e?{(p^Y7#Soi|%T9ZSc1EU>TDcs-0~!UA2hz&}`+J+Sb8!ot_j X_ Date: Fri, 4 Dec 2020 14:27:16 +0100 Subject: [PATCH 144/909] - add procedure to get number of dimensions - add according unit test --- src/io_netcdf_module.F90 | 10 +++++++++- test/fortran/io_netcdf_module_tests.pf | 11 +++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/src/io_netcdf_module.F90 b/src/io_netcdf_module.F90 index 30e87b2e3..f70accea2 100644 --- a/src/io_netcdf_module.F90 +++ b/src/io_netcdf_module.F90 @@ -12,8 +12,8 @@ module io_netcdf_module integer timedim_index integer, allocatable :: varshape(:) contains + procedure, public :: initialize, finalize, number_of_timesteps, number_of_dims generic, public :: read_values => read_values_r4,read_values_r8 - procedure, public :: initialize, finalize, number_of_timesteps procedure, private :: open_netcdf_variable, read_values_r4, read_values_r8 end type @@ -82,7 +82,15 @@ function number_of_timesteps(this) result(t) t = this%varshape(this%timedim_index) end function + + function number_of_dims(this) result(d) + class(netcdf_variable_handle), intent(in) :: this + integer d + ! EO args + d = size(this%varshape) + end function + subroutine read_values_r8(this, timeindex, values) use io_netcdf_nf_interface use, intrinsic :: ISO_C_BINDING diff --git a/test/fortran/io_netcdf_module_tests.pf b/test/fortran/io_netcdf_module_tests.pf index 6b41e9d0b..275d732e7 100644 --- a/test/fortran/io_netcdf_module_tests.pf +++ b/test/fortran/io_netcdf_module_tests.pf @@ -86,6 +86,17 @@ contains end subroutine + @test + subroutine test_can_read_number_of_dimensions() + type(netcdf_variable_handle) handle + + call handle%initialize("fixtures/io_netcdf/columnwise_3d_salt.nc", "salt") + @assertEqual(3,handle%number_of_dims()) + + call handle%finalize() + end subroutine + + @test subroutine test_can_initialize_netcdf_variable_for_existing_file() type(netcdf_variable_handle) handle From 185a67e644d7dc5ab31d6e4c83fcfda96df6c425 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 4 Dec 2020 14:34:43 +0100 Subject: [PATCH 145/909] - add procedure to get size of the dimension at a given index - add according unit test --- src/io_netcdf_module.F90 | 12 +++++++++++- test/fortran/io_netcdf_module_tests.pf | 13 +++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/src/io_netcdf_module.F90 b/src/io_netcdf_module.F90 index f70accea2..1fe6f7cd9 100644 --- a/src/io_netcdf_module.F90 +++ b/src/io_netcdf_module.F90 @@ -12,7 +12,7 @@ module io_netcdf_module integer timedim_index integer, allocatable :: varshape(:) contains - procedure, public :: initialize, finalize, number_of_timesteps, number_of_dims + procedure, public :: initialize, finalize, number_of_timesteps, number_of_dims, dimsize_at generic, public :: read_values => read_values_r4,read_values_r8 procedure, private :: open_netcdf_variable, read_values_r4, read_values_r8 end type @@ -91,6 +91,16 @@ function number_of_dims(this) result(d) end function + function dimsize_at(this,index) result(s) + class(netcdf_variable_handle), intent(in) :: this + integer, intent(in) :: index + integer s + ! EO args + call assert(index <= size(this%varshape), __LINE__) + s = this%varshape(index) + end function + + subroutine read_values_r8(this, timeindex, values) use io_netcdf_nf_interface use, intrinsic :: ISO_C_BINDING diff --git a/test/fortran/io_netcdf_module_tests.pf b/test/fortran/io_netcdf_module_tests.pf index 275d732e7..c70d9f1c2 100644 --- a/test/fortran/io_netcdf_module_tests.pf +++ b/test/fortran/io_netcdf_module_tests.pf @@ -97,6 +97,19 @@ contains end subroutine + @test + subroutine test_can_read_dimension_size_at_index() + type(netcdf_variable_handle) handle + + call handle%initialize("fixtures/io_netcdf/columnwise_3d_salt.nc", "salt") + @assertEqual(3,handle%dimsize_at(1)) + @assertEqual(5,handle%dimsize_at(2)) + @assertEqual(2,handle%dimsize_at(3)) + + call handle%finalize() + end subroutine + + @test subroutine test_can_initialize_netcdf_variable_for_existing_file() type(netcdf_variable_handle) handle From 2e585cd3e68875cb910ff02a1276015c722a48fd Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 9 Dec 2020 17:44:08 +0100 Subject: [PATCH 146/909] rename netcdf id variables for dims and vars to be less ambiguous --- src/io_restart.F90 | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index d65cb87f0..509ded47b 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -38,7 +38,7 @@ MODULE io_RESTART type(nc_dims), allocatable, dimension(:) :: dim type(nc_vars), allocatable, dimension(:) :: var integer :: ndim=0, nvar=0 - integer :: rec, Tid, Iid + integer :: rec_dimid, time_varid, iter_varid integer :: ncid integer :: rec_count=0 integer :: error_status(250), error_count @@ -204,11 +204,11 @@ subroutine restart(istep, l_write, l_read, mesh) ctime=timeold+(dayold-1.)*86400 if (.not. l_read) then - call ini_ocean_io(yearnew, mesh) - if (use_ice) call ini_ice_io (yearnew, mesh) + call ini_ocean_io(yearnew, mesh) + if (use_ice) call ini_ice_io (yearnew, mesh) else - call ini_ocean_io(yearold, mesh) - if (use_ice) call ini_ice_io (yearold, mesh) + call ini_ocean_io(yearold, mesh) + if (use_ice) call ini_ice_io (yearold, mesh) end if if (l_read) then @@ -286,19 +286,19 @@ subroutine create_new_file(id) end do !___Create time related dimentions__________________________________________ - id%error_status(c) = nf_def_dim(id%ncid, 'time', NF_UNLIMITED, id%rec); c=c+1 + id%error_status(c) = nf_def_dim(id%ncid, 'time', NF_UNLIMITED, id%rec_dimid); c=c+1 !___Define the time and iteration variables_________________________________ - id%error_status(c) = nf_def_var(id%ncid, 'time', NF_DOUBLE, 1, id%rec, id%tID); c=c+1 - id%error_status(c) = nf_def_var(id%ncid, 'iter', NF_INT, 1, id%rec, id%iID); c=c+1 + id%error_status(c) = nf_def_var(id%ncid, 'time', NF_DOUBLE, 1, id%rec_dimid, id%time_varid); c=c+1 + id%error_status(c) = nf_def_var(id%ncid, 'iter', NF_INT, 1, id%rec_dimid, id%iter_varid); c=c+1 att_text='time' - id%error_status(c) = nf_put_att_text(id%ncid, id%tID, 'long_name', len_trim(att_text), trim(att_text)); c=c+1 + id%error_status(c) = nf_put_att_text(id%ncid, id%time_varid, 'long_name', len_trim(att_text), trim(att_text)); c=c+1 write(att_text, '(a14,I4.4,a1,I2.2,a1,I2.2,a6)') 'seconds since ', yearold, '-', 1, '-', 1, ' 0:0:0' - id%error_status(c) = nf_put_att_text(id%ncid, id%tID, 'units', len_trim(att_text), trim(att_text)); c=c+1 + id%error_status(c) = nf_put_att_text(id%ncid, id%time_varid, 'units', len_trim(att_text), trim(att_text)); c=c+1 att_text='iteration_count' - id%error_status(c) = nf_put_att_text(id%ncid, id%iID, 'long_name', len_trim(att_text), trim(att_text)); c=c+1 + id%error_status(c) = nf_put_att_text(id%ncid, id%iter_varid, 'long_name', len_trim(att_text), trim(att_text)); c=c+1 do j=1, id%nvar !___associate physical dimension with the netcdf IDs________________________ @@ -311,7 +311,7 @@ subroutine create_new_file(id) end do !write(*,*) "j",j,kdim, ' -> ', dimid(k) end do - id%error_status(c) = nf_def_var(id%ncid, trim(id%var(j)%name), NF_DOUBLE, id%var(j)%ndim+1, (/dimid(1:n), id%rec/), id%var(j)%code); c=c+1 + id%error_status(c) = nf_def_var(id%ncid, trim(id%var(j)%name), NF_DOUBLE, id%var(j)%ndim+1, (/dimid(1:n), id%rec_dimid/), id%var(j)%code); c=c+1 !if (n==1) then ! id%error_status(c)=nf_def_var_chunking(id%ncid, id%var(j)%code, NF_CHUNKED, (/1/)); c=c+1 if (n==2) then @@ -446,8 +446,8 @@ subroutine write_restart(id, istep, mesh) !id%rec_count=id%rec_count+1 write(*,*) 'writing restart record ', id%rec_count id%error_status(c)=nf_open(id%filename, nf_write, id%ncid); c=c+1 - id%error_status(c)=nf_put_vara_double(id%ncid, id%tID, id%rec_count, 1, ctime, 1); c=c+1 - id%error_status(c)=nf_put_vara_int(id%ncid, id%iID, id%rec_count, 1, globalstep+istep, 1); c=c+1 + id%error_status(c)=nf_put_vara_double(id%ncid, id%time_varid, id%rec_count, 1, ctime, 1); c=c+1 + id%error_status(c)=nf_put_vara_int(id%ncid, id%iter_varid, id%rec_count, 1, globalstep+istep, 1); c=c+1 end if call was_error(id); c=1 @@ -538,8 +538,8 @@ subroutine read_restart(id, mesh, arg) if (file_exist) then write(*,*) ' reading restart file: ', trim(id%filename) id%error_status(c)=nf_open(id%filename, nf_nowrite, id%ncid); c=c+1 - id%error_status(c)=nf_get_vara_int(id%ncid, id%iID, id%rec_count, 1, globalstep, 1); c=c+1 - id%error_status(c)=nf_get_vara_double(id%ncid, id%tID, id%rec_count, 1, rtime, 1); c=c+1 + id%error_status(c)=nf_get_vara_int(id%ncid, id%iter_varid, id%rec_count, 1, globalstep, 1); c=c+1 + id%error_status(c)=nf_get_vara_double(id%ncid, id%time_varid, id%rec_count, 1, rtime, 1); c=c+1 else write(*,*) print *, achar(27)//'[33m' @@ -650,15 +650,15 @@ subroutine assoc_ids(id) id%error_status(c) = nf_inq_dimid(id%ncid, id%dim(j)%name, id%dim(j)%code); c=c+1 end do !___Associate time related dimentions_______________________________________ - id%error_status(c) = nf_inq_dimid (id%ncid, 'time', id%rec); c=c+1 - id%error_status(c) = nf_inq_dimlen(id%ncid, id%rec, id%rec_count); c=c+1 + id%error_status(c) = nf_inq_dimid (id%ncid, 'time', id%rec_dimid); c=c+1 + id%error_status(c) = nf_inq_dimlen(id%ncid, id%rec_dimid, id%rec_count); c=c+1 !___Associate the time and iteration variables______________________________ - id%error_status(c) = nf_inq_varid(id%ncid, 'time', id%tID); c=c+1 - id%error_status(c) = nf_inq_varid(id%ncid, 'iter', id%iID); c=c+1 + id%error_status(c) = nf_inq_varid(id%ncid, 'time', id%time_varid); c=c+1 + id%error_status(c) = nf_inq_varid(id%ncid, 'iter', id%iter_varid); c=c+1 !___if the time rtime at the rec_count does not equal ctime we look for the closest record with the ! timestamp less than ctime do k=id%rec_count, 1, -1 - id%error_status(c)=nf_get_vara_double(id%ncid, id%tID, k, 1, rtime, 1); + id%error_status(c)=nf_get_vara_double(id%ncid, id%time_varid, k, 1, rtime, 1); if (ctime > rtime) then id%rec_count=k+1 exit ! a proper rec_count detected, ready for writing restart, exit the loop From 27e3bbe81f633d06362b02766dfc83bf542768b6 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 9 Dec 2020 17:48:32 +0100 Subject: [PATCH 147/909] rename variables for ocean and ice files so it is clear they are variables for files --- src/io_restart.F90 | 92 +++++++++++++++++++++++----------------------- 1 file changed, 46 insertions(+), 46 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 509ded47b..fab198a3f 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -53,12 +53,12 @@ MODULE io_RESTART ! !-------------------------------------------------------------------------------------------- ! id will keep the IDs of all required dimentions and variables - type(nc_file), save :: oid, iid + type(nc_file), save :: ocean_file, ice_file integer, save :: globalstep=0 real(kind=WP) :: ctime !current time in seconds from the beginning of the year PRIVATE - PUBLIC :: restart, oid, iid + PUBLIC :: restart, ocean_file, ice_file ! !-------------------------------------------------------------------------------------------- ! generic interface was required to associate variables of unknown rank with the pointers of the same rank @@ -72,7 +72,7 @@ MODULE io_RESTART contains ! !-------------------------------------------------------------------------------------------- -! ini_ocean_io initializes oid datatype which contains information of all variables need to be written into +! ini_ocean_io initializes ocean_file datatype which contains information of all variables need to be written into ! the ocean restart file. This is the only place need to be modified if a new variable is added! subroutine ini_ocean_io(year, mesh) implicit none @@ -90,38 +90,38 @@ subroutine ini_ocean_io(year, mesh) write(cyear,'(i4)') year ! create an ocean restart file; serial output implemented so far - oid%filename=trim(ResultPath)//trim(runid)//'.'//cyear//'.oce.restart.nc' - if (oid%is_in_use) return - oid%is_in_use=.true. - call def_dim(oid, 'node', nod2d) - call def_dim(oid, 'elem', elem2d) - call def_dim(oid, 'nz_1', nl-1) - call def_dim(oid, 'nz', nl) + ocean_file%filename=trim(ResultPath)//trim(runid)//'.'//cyear//'.oce.restart.nc' + if (ocean_file%is_in_use) return + ocean_file%is_in_use=.true. + call def_dim(ocean_file, 'node', nod2d) + call def_dim(ocean_file, 'elem', elem2d) + call def_dim(ocean_file, 'nz_1', nl-1) + call def_dim(ocean_file, 'nz', nl) !=========================================================================== !===================== Definition part ===================================== !=========================================================================== !___Define the netCDF variables for 2D fields_______________________________ !___SSH_____________________________________________________________________ - call def_variable(oid, 'ssh', (/nod2D/), 'sea surface elevation', 'm', eta_n); + call def_variable(ocean_file, 'ssh', (/nod2D/), 'sea surface elevation', 'm', eta_n); !___ALE related fields______________________________________________________ - call def_variable(oid, 'hbar', (/nod2D/), 'ALE surface elevation', 'm', hbar); -!!PS call def_variable(oid, 'ssh_rhs', (/nod2D/), 'RHS for the elevation', '?', ssh_rhs); - call def_variable(oid, 'ssh_rhs_old', (/nod2D/), 'RHS for the elevation', '?', ssh_rhs_old); - call def_variable(oid, 'hnode', (/nl-1, nod2D/), 'nodal layer thickness', 'm', hnode); + call def_variable(ocean_file, 'hbar', (/nod2D/), 'ALE surface elevation', 'm', hbar); +!!PS call def_variable(ocean_file, 'ssh_rhs', (/nod2D/), 'RHS for the elevation', '?', ssh_rhs); + call def_variable(ocean_file, 'ssh_rhs_old', (/nod2D/), 'RHS for the elevation', '?', ssh_rhs_old); + call def_variable(ocean_file, 'hnode', (/nl-1, nod2D/), 'nodal layer thickness', 'm', hnode); !___Define the netCDF variables for 3D fields_______________________________ - call def_variable(oid, 'u', (/nl-1, elem2D/), 'zonal velocity', 'm/s', UV(1,:,:)); - call def_variable(oid, 'v', (/nl-1, elem2D/), 'meridional velocity', 'm/s', UV(2,:,:)); - call def_variable(oid, 'urhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for u', 'm/s', UV_rhsAB(1,:,:)); - call def_variable(oid, 'vrhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for v', 'm/s', UV_rhsAB(2,:,:)); + call def_variable(ocean_file, 'u', (/nl-1, elem2D/), 'zonal velocity', 'm/s', UV(1,:,:)); + call def_variable(ocean_file, 'v', (/nl-1, elem2D/), 'meridional velocity', 'm/s', UV(2,:,:)); + call def_variable(ocean_file, 'urhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for u', 'm/s', UV_rhsAB(1,:,:)); + call def_variable(ocean_file, 'vrhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for v', 'm/s', UV_rhsAB(2,:,:)); !___Save restart variables for TKE and IDEMIX_________________________________ if (trim(mix_scheme)=='cvmix_TKE' .or. trim(mix_scheme)=='cvmix_TKE+IDEMIX') then - call def_variable(oid, 'tke', (/nl, nod2d/), 'Turbulent Kinetic Energy', 'm2/s2', tke(:,:)); + call def_variable(ocean_file, 'tke', (/nl, nod2d/), 'Turbulent Kinetic Energy', 'm2/s2', tke(:,:)); endif if (trim(mix_scheme)=='cvmix_IDEMIX' .or. trim(mix_scheme)=='cvmix_TKE+IDEMIX') then - call def_variable(oid, 'iwe', (/nl, nod2d/), 'Internal Wave eneryy', 'm2/s2', tke(:,:)); + call def_variable(ocean_file, 'iwe', (/nl, nod2d/), 'Internal Wave eneryy', 'm2/s2', tke(:,:)); endif do j=1,num_tracers @@ -139,17 +139,17 @@ subroutine ini_ocean_io(year, mesh) write(longname,'(A15,i1)') 'passive tracer ', j units='none' END SELECT - call def_variable(oid, trim(trname), (/nl-1, nod2D/), trim(longname), trim(units), tr_arr(:,:,j)); + call def_variable(ocean_file, trim(trname), (/nl-1, nod2D/), trim(longname), trim(units), tr_arr(:,:,j)); longname=trim(longname)//', Adams–Bashforth' - call def_variable(oid, trim(trname)//'_AB',(/nl-1, nod2D/), trim(longname), trim(units), tr_arr_old(:,:,j)); + call def_variable(ocean_file, trim(trname)//'_AB',(/nl-1, nod2D/), trim(longname), trim(units), tr_arr_old(:,:,j)); end do - call def_variable(oid, 'w', (/nl, nod2D/), 'vertical velocity', 'm/s', Wvel); - call def_variable(oid, 'w_expl', (/nl, nod2D/), 'vertical velocity', 'm/s', Wvel_e); - call def_variable(oid, 'w_impl', (/nl, nod2D/), 'vertical velocity', 'm/s', Wvel_i); + call def_variable(ocean_file, 'w', (/nl, nod2D/), 'vertical velocity', 'm/s', Wvel); + call def_variable(ocean_file, 'w_expl', (/nl, nod2D/), 'vertical velocity', 'm/s', Wvel_e); + call def_variable(ocean_file, 'w_impl', (/nl, nod2D/), 'vertical velocity', 'm/s', Wvel_i); end subroutine ini_ocean_io ! !-------------------------------------------------------------------------------------------- -! ini_ice_io initializes iid datatype which contains information of all variables need to be written into +! ini_ice_io initializes ice_file datatype which contains information of all variables need to be written into ! the ice restart file. This is the only place need to be modified if a new variable is added! subroutine ini_ice_io(year, mesh) implicit none @@ -167,23 +167,23 @@ subroutine ini_ice_io(year, mesh) write(cyear,'(i4)') year ! create an ocean restart file; serial output implemented so far - iid%filename=trim(ResultPath)//trim(runid)//'.'//cyear//'.ice.restart.nc' - if (iid%is_in_use) return - iid%is_in_use=.true. - call def_dim(iid, 'node', nod2d) + ice_file%filename=trim(ResultPath)//trim(runid)//'.'//cyear//'.ice.restart.nc' + if (ice_file%is_in_use) return + ice_file%is_in_use=.true. + call def_dim(ice_file, 'node', nod2d) !=========================================================================== !===================== Definition part ===================================== !=========================================================================== !___Define the netCDF variables for 2D fields_______________________________ - call def_variable(iid, 'area', (/nod2D/), 'ice concentration [0 to 1]', '%', a_ice); - call def_variable(iid, 'hice', (/nod2D/), 'effective ice thickness', 'm', m_ice); - call def_variable(iid, 'hsnow', (/nod2D/), 'effective snow thickness', 'm', m_snow); - call def_variable(iid, 'uice', (/nod2D/), 'zonal velocity', 'm/s', u_ice); - call def_variable(iid, 'vice', (/nod2D/), 'meridional velocity', 'm', v_ice); + call def_variable(ice_file, 'area', (/nod2D/), 'ice concentration [0 to 1]', '%', a_ice); + call def_variable(ice_file, 'hice', (/nod2D/), 'effective ice thickness', 'm', m_ice); + call def_variable(ice_file, 'hsnow', (/nod2D/), 'effective snow thickness', 'm', m_snow); + call def_variable(ice_file, 'uice', (/nod2D/), 'zonal velocity', 'm/s', u_ice); + call def_variable(ice_file, 'vice', (/nod2D/), 'meridional velocity', 'm', v_ice); #if defined (__oifs) - call def_variable(iid, 'ice_albedo', (/nod2D/), 'ice albedo', '-', ice_alb); - call def_variable(iid, 'ice_temp',(/nod2D/), 'ice surface temperature', 'K', ice_temp); + call def_variable(ice_file, 'ice_albedo', (/nod2D/), 'ice albedo', '-', ice_alb); + call def_variable(ice_file, 'ice_temp',(/nod2D/), 'ice surface temperature', 'K', ice_temp); #endif /* (__oifs) */ end subroutine ini_ice_io @@ -212,11 +212,11 @@ subroutine restart(istep, l_write, l_read, mesh) end if if (l_read) then - call assoc_ids(oid); call was_error(oid) - call read_restart(oid, mesh); call was_error(oid) + call assoc_ids(ocean_file); call was_error(ocean_file) + call read_restart(ocean_file, mesh); call was_error(ocean_file) if (use_ice) then - call assoc_ids(iid); call was_error(iid) - call read_restart(iid, mesh); call was_error(iid) + call assoc_ids(ice_file); call was_error(ice_file) + call read_restart(ice_file, mesh); call was_error(ice_file) end if end if @@ -248,11 +248,11 @@ subroutine restart(istep, l_write, l_read, mesh) ! write restart if(mype==0) write(*,*)'Do output (netCDF, restart) ...' - call assoc_ids(oid); call was_error(oid) - call write_restart(oid, istep, mesh); call was_error(oid) + call assoc_ids(ocean_file); call was_error(ocean_file) + call write_restart(ocean_file, istep, mesh); call was_error(ocean_file) if (use_ice) then - call assoc_ids(iid); call was_error(iid) - call write_restart(iid, istep, mesh); call was_error(iid) + call assoc_ids(ice_file); call was_error(ice_file) + call write_restart(ice_file, istep, mesh); call was_error(ice_file) end if ! actualize clock file to latest restart point From c9295c1d3109e407a3346c9a452be5942fb79361 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 9 Dec 2020 17:49:49 +0100 Subject: [PATCH 148/909] remove unused derived type --- src/io_restart.F90 | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index fab198a3f..84fdde46d 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -44,15 +44,8 @@ MODULE io_RESTART integer :: error_status(250), error_count logical :: is_in_use=.false. end type nc_file -! -!-------------------------------------------------------------------------------------------- -! - type type_id - integer :: nd, el, nz, nz1, T, rec, iter - end type type_id -! -!-------------------------------------------------------------------------------------------- -! id will keep the IDs of all required dimentions and variables + + type(nc_file), save :: ocean_file, ice_file integer, save :: globalstep=0 real(kind=WP) :: ctime !current time in seconds from the beginning of the year From c7163e0481533caeb47d6ba5f2fc625fd4dd01c7 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 9 Dec 2020 17:58:41 +0100 Subject: [PATCH 149/909] rename file variable so it is clear it is a file (not a netcdf id) --- src/io_restart.F90 | 52 +++++++++++++++++++++++----------------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 84fdde46d..de6420057 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -616,65 +616,65 @@ end subroutine read_restart ! !-------------------------------------------------------------------------------------------- ! -subroutine assoc_ids(id) +subroutine assoc_ids(file) implicit none - type(nc_file), intent(inout) :: id + type(nc_file), intent(inout) :: file character(500) :: longname integer :: c, j, k real(kind=WP) :: rtime !timestamp of the record ! Serial output implemented so far if (mype/=0) return c=1 - id%error_status=0 + file%error_status=0 ! open existing netcdf file - write(*,*) 'associating restart file ', trim(id%filename) + write(*,*) 'associating restart file ', trim(file%filename) - id%error_status(c) = nf_open(id%filename, nf_nowrite, id%ncid) + file%error_status(c) = nf_open(file%filename, nf_nowrite, file%ncid) !if the file does not exist it will be created! - if (id%error_status(c) .ne. nf_noerr) then - call create_new_file(id) ! error status counter will be reset - c=id%error_count+1 - id%error_status(c) = nf_open(id%filename, nf_nowrite, id%ncid); c=c+1 + if (file%error_status(c) .ne. nf_noerr) then + call create_new_file(file) ! error status counter will be reset + c=file%error_count+1 + file%error_status(c) = nf_open(file%filename, nf_nowrite, file%ncid); c=c+1 end if - do j=1, id%ndim + do j=1, file%ndim !___Associate mesh related dimentions_______________________________________ - id%error_status(c) = nf_inq_dimid(id%ncid, id%dim(j)%name, id%dim(j)%code); c=c+1 + file%error_status(c) = nf_inq_dimid(file%ncid, file%dim(j)%name, file%dim(j)%code); c=c+1 end do !___Associate time related dimentions_______________________________________ - id%error_status(c) = nf_inq_dimid (id%ncid, 'time', id%rec_dimid); c=c+1 - id%error_status(c) = nf_inq_dimlen(id%ncid, id%rec_dimid, id%rec_count); c=c+1 + file%error_status(c) = nf_inq_dimid (file%ncid, 'time', file%rec_dimid); c=c+1 + file%error_status(c) = nf_inq_dimlen(file%ncid, file%rec_dimid, file%rec_count); c=c+1 !___Associate the time and iteration variables______________________________ - id%error_status(c) = nf_inq_varid(id%ncid, 'time', id%time_varid); c=c+1 - id%error_status(c) = nf_inq_varid(id%ncid, 'iter', id%iter_varid); c=c+1 + file%error_status(c) = nf_inq_varid(file%ncid, 'time', file%time_varid); c=c+1 + file%error_status(c) = nf_inq_varid(file%ncid, 'iter', file%iter_varid); c=c+1 !___if the time rtime at the rec_count does not equal ctime we look for the closest record with the ! timestamp less than ctime - do k=id%rec_count, 1, -1 - id%error_status(c)=nf_get_vara_double(id%ncid, id%time_varid, k, 1, rtime, 1); + do k=file%rec_count, 1, -1 + file%error_status(c)=nf_get_vara_double(file%ncid, file%time_varid, k, 1, rtime, 1); if (ctime > rtime) then - id%rec_count=k+1 + file%rec_count=k+1 exit ! a proper rec_count detected, ready for writing restart, exit the loop elseif (ctime == rtime) then - id%rec_count=k + file%rec_count=k exit ! a proper rec_count detected, ready for reading restart, exit the loop end if if (k==1) then if (mype==0) write(*,*) 'WARNING: all dates in restart file are after the current date' if (mype==0) write(*,*) 'reading restart will not be possible !' if (mype==0) write(*,*) 'the model attempted to start with the time stamp = ', int(ctime) - id%error_status(c)=-310; + file%error_status(c)=-310; end if end do c=c+1 ! check will be made only for the last nf_get_vara_double - id%rec_count=max(id%rec_count, 1) + file%rec_count=max(file%rec_count, 1) !___Associate physical variables____________________________________________ - do j=1, id%nvar - id%error_status(c) = nf_inq_varid(id%ncid, id%var(j)%name, id%var(j)%code); c=c+1 + do j=1, file%nvar + file%error_status(c) = nf_inq_varid(file%ncid, file%var(j)%name, file%var(j)%code); c=c+1 end do - id%error_status(c)=nf_close(id%ncid); c=c+1 - id%error_count=c-1 - write(*,*) 'current restart counter = ', id%rec_count + file%error_status(c)=nf_close(file%ncid); c=c+1 + file%error_count=c-1 + write(*,*) 'current restart counter = ', file%rec_count end subroutine assoc_ids ! !-------------------------------------------------------------------------------------------- From 59d1a2507cc08be6a5eb0bbc55936aa75bb0e5b2 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 9 Dec 2020 18:00:49 +0100 Subject: [PATCH 150/909] rename file variable so it is clear it is a file (not a netcdf id) --- src/io_restart.F90 | 34 ++++++++++++++++------------------ 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index de6420057..0f0d3ff32 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -317,38 +317,36 @@ subroutine create_new_file(id) id%error_status(c)=nf_close(id%ncid); c=c+1 id%error_count=c-1 end subroutine create_new_file -! -!-------------------------------------------------------------------------------------------- -! -subroutine def_dim(id, name, ndim) + + +subroutine def_dim(file, name, ndim) implicit none - type(nc_file), intent(inout) :: id + type(nc_file), intent(inout) :: file character(len=*), intent(in) :: name integer, intent(in) :: ndim type(nc_dims), allocatable, dimension(:) :: temp - if (id%ndim > 0) then + if (file%ndim > 0) then ! create temporal dimension - allocate(temp(id%ndim)); temp=id%dim + allocate(temp(file%ndim)); temp=file%dim ! deallocate the input data array - deallocate(id%dim) + deallocate(file%dim) ! then reallocate - id%ndim=id%ndim+1 - allocate(id%dim(id%ndim)) + file%ndim=file%ndim+1 + allocate(file%dim(file%ndim)) ! restore the original data - id%dim(1:id%ndim-1)=temp + file%dim(1:file%ndim-1)=temp deallocate(temp) else ! first dimension in a file - id%ndim=1 - allocate(id%dim(id%ndim)) + file%ndim=1 + allocate(file%dim(file%ndim)) end if - id%dim(id%ndim)%name=trim(name) - id%dim(id%ndim)%size=ndim + file%dim(file%ndim)%name=trim(name) + file%dim(file%ndim)%size=ndim end subroutine def_dim -! -!-------------------------------------------------------------------------------------------- -! + + subroutine def_variable_1d(id, name, dims, longname, units, data) implicit none type(nc_file), intent(inout) :: id From 7c47e785cd149c15d3a8dcd76c997ea8d4fe835b Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 9 Dec 2020 18:02:05 +0100 Subject: [PATCH 151/909] rename file variable so it is clear it is a file (not a netcdf id) --- src/io_restart.F90 | 74 ++++++++++++++++++++++------------------------ 1 file changed, 36 insertions(+), 38 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 0f0d3ff32..9bebaf5c0 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -347,9 +347,9 @@ subroutine def_dim(file, name, ndim) end subroutine def_dim -subroutine def_variable_1d(id, name, dims, longname, units, data) +subroutine def_variable_1d(file, name, dims, longname, units, data) implicit none - type(nc_file), intent(inout) :: id + type(nc_file), intent(inout) :: file character(len=*), intent(in) :: name integer, intent(in) :: dims(1) character(len=*), intent(in), optional :: units, longname @@ -357,35 +357,34 @@ subroutine def_variable_1d(id, name, dims, longname, units, data) integer :: c type(nc_vars), allocatable, dimension(:) :: temp - if (id%nvar > 0) then + if (file%nvar > 0) then ! create temporal dimension - allocate(temp(id%nvar)); temp=id%var + allocate(temp(file%nvar)); temp=file%var ! deallocate the input data array - deallocate(id%var) + deallocate(file%var) ! then reallocate - id%nvar=id%nvar+1 - allocate(id%var(id%nvar)) + file%nvar=file%nvar+1 + allocate(file%var(file%nvar)) ! restore the original data - id%var(1:id%nvar-1)=temp + file%var(1:file%nvar-1)=temp deallocate(temp) else ! first dimension in a file - id%nvar=1 - allocate(id%var(id%nvar)) + file%nvar=1 + allocate(file%var(file%nvar)) end if - id%var(id%nvar)%name=trim(name) - id%var(id%nvar)%longname=trim(longname) - id%var(id%nvar)%units=trim(units) - id%var(id%nvar)%ndim=1 - id%var(id%nvar)%dims(1)=dims(1) - id%var(id%nvar)%pt1=>data + file%var(file%nvar)%name=trim(name) + file%var(file%nvar)%longname=trim(longname) + file%var(file%nvar)%units=trim(units) + file%var(file%nvar)%ndim=1 + file%var(file%nvar)%dims(1)=dims(1) + file%var(file%nvar)%pt1=>data end subroutine def_variable_1d -! -!-------------------------------------------------------------------------------------------- -! -subroutine def_variable_2d(id, name, dims, longname, units, data) + + +subroutine def_variable_2d(file, name, dims, longname, units, data) implicit none - type(nc_file), intent(inout) :: id + type(nc_file), intent(inout) :: file character(len=*), intent(in) :: name integer, intent(in) :: dims(2) character(len=*), intent(in), optional :: units, longname @@ -393,32 +392,31 @@ subroutine def_variable_2d(id, name, dims, longname, units, data) integer :: c type(nc_vars), allocatable, dimension(:) :: temp - if (id%nvar > 0) then + if (file%nvar > 0) then ! create temporal dimension - allocate(temp(id%nvar)); temp=id%var + allocate(temp(file%nvar)); temp=file%var ! deallocate the input data array - deallocate(id%var) + deallocate(file%var) ! then reallocate - id%nvar=id%nvar+1 - allocate(id%var(id%nvar)) + file%nvar=file%nvar+1 + allocate(file%var(file%nvar)) ! restore the original data - id%var(1:id%nvar-1)=temp + file%var(1:file%nvar-1)=temp deallocate(temp) else ! first dimension in a file - id%nvar=1 - allocate(id%var(id%nvar)) + file%nvar=1 + allocate(file%var(file%nvar)) end if - id%var(id%nvar)%name=trim(name) - id%var(id%nvar)%longname=trim(longname) - id%var(id%nvar)%units=trim(units) - id%var(id%nvar)%ndim=2 - id%var(id%nvar)%dims(1:2)=dims - id%var(id%nvar)%pt2=>data + file%var(file%nvar)%name=trim(name) + file%var(file%nvar)%longname=trim(longname) + file%var(file%nvar)%units=trim(units) + file%var(file%nvar)%ndim=2 + file%var(file%nvar)%dims(1:2)=dims + file%var(file%nvar)%pt2=>data end subroutine def_variable_2d -! -!-------------------------------------------------------------------------------------------- -! + + subroutine write_restart(id, istep, mesh) implicit none type(nc_file), intent(inout) :: id From 7e17f340eee9c41047e19332252eda99c96e7a83 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 9 Dec 2020 18:03:57 +0100 Subject: [PATCH 152/909] rename file variable so it is clear it is a file (not a netcdf id) --- src/io_restart.F90 | 55 +++++++++++++++++++++++----------------------- 1 file changed, 27 insertions(+), 28 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 9bebaf5c0..0d0bf0496 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -255,67 +255,66 @@ subroutine restart(istep, l_write, l_read, mesh) end if end subroutine restart -! -!-------------------------------------------------------------------------------------------- -! -subroutine create_new_file(id) + + +subroutine create_new_file(file) implicit none - type(nc_file), intent(inout) :: id + type(nc_file), intent(inout) :: file integer :: c, j integer :: n, k, l, kdim, dimid(4) character(2000) :: att_text ! Serial output implemented so far if (mype/=0) return c=1 - id%error_status=0 + file%error_status=0 ! create an ocean output file - write(*,*) 'initializing restart file ', trim(id%filename) - id%error_status(c) = nf_create(id%filename, IOR(NF_NOCLOBBER,IOR(NF_NETCDF4,NF_CLASSIC_MODEL)), id%ncid); c=c+1 + write(*,*) 'initializing restart file ', trim(file%filename) + file%error_status(c) = nf_create(file%filename, IOR(NF_NOCLOBBER,IOR(NF_NETCDF4,NF_CLASSIC_MODEL)), file%ncid); c=c+1 - do j=1, id%ndim + do j=1, file%ndim !___Create mesh related dimentions__________________________________________ - id%error_status(c) = nf_def_dim(id%ncid, id%dim(j)%name, id%dim(j)%size, id%dim(j)%code ); c=c+1 + file%error_status(c) = nf_def_dim(file%ncid, file%dim(j)%name, file%dim(j)%size, file%dim(j)%code ); c=c+1 end do !___Create time related dimentions__________________________________________ - id%error_status(c) = nf_def_dim(id%ncid, 'time', NF_UNLIMITED, id%rec_dimid); c=c+1 + file%error_status(c) = nf_def_dim(file%ncid, 'time', NF_UNLIMITED, file%rec_dimid); c=c+1 !___Define the time and iteration variables_________________________________ - id%error_status(c) = nf_def_var(id%ncid, 'time', NF_DOUBLE, 1, id%rec_dimid, id%time_varid); c=c+1 - id%error_status(c) = nf_def_var(id%ncid, 'iter', NF_INT, 1, id%rec_dimid, id%iter_varid); c=c+1 + file%error_status(c) = nf_def_var(file%ncid, 'time', NF_DOUBLE, 1, file%rec_dimid, file%time_varid); c=c+1 + file%error_status(c) = nf_def_var(file%ncid, 'iter', NF_INT, 1, file%rec_dimid, file%iter_varid); c=c+1 att_text='time' - id%error_status(c) = nf_put_att_text(id%ncid, id%time_varid, 'long_name', len_trim(att_text), trim(att_text)); c=c+1 + file%error_status(c) = nf_put_att_text(file%ncid, file%time_varid, 'long_name', len_trim(att_text), trim(att_text)); c=c+1 write(att_text, '(a14,I4.4,a1,I2.2,a1,I2.2,a6)') 'seconds since ', yearold, '-', 1, '-', 1, ' 0:0:0' - id%error_status(c) = nf_put_att_text(id%ncid, id%time_varid, 'units', len_trim(att_text), trim(att_text)); c=c+1 + file%error_status(c) = nf_put_att_text(file%ncid, file%time_varid, 'units', len_trim(att_text), trim(att_text)); c=c+1 att_text='iteration_count' - id%error_status(c) = nf_put_att_text(id%ncid, id%iter_varid, 'long_name', len_trim(att_text), trim(att_text)); c=c+1 + file%error_status(c) = nf_put_att_text(file%ncid, file%iter_varid, 'long_name', len_trim(att_text), trim(att_text)); c=c+1 - do j=1, id%nvar + do j=1, file%nvar !___associate physical dimension with the netcdf IDs________________________ - n=id%var(j)%ndim ! shape size of the variable (exluding time) + n=file%var(j)%ndim ! shape size of the variable (exluding time) do k=1, n !k_th dimension of the variable - kdim=id%var(j)%dims(k) - do l=1, id%ndim ! list all defined dimensions - if (kdim==id%dim(l)%size) dimid(k)=id%dim(l)%code + kdim=file%var(j)%dims(k) + do l=1, file%ndim ! list all defined dimensions + if (kdim==file%dim(l)%size) dimid(k)=file%dim(l)%code end do !write(*,*) "j",j,kdim, ' -> ', dimid(k) end do - id%error_status(c) = nf_def_var(id%ncid, trim(id%var(j)%name), NF_DOUBLE, id%var(j)%ndim+1, (/dimid(1:n), id%rec_dimid/), id%var(j)%code); c=c+1 + file%error_status(c) = nf_def_var(file%ncid, trim(file%var(j)%name), NF_DOUBLE, file%var(j)%ndim+1, (/dimid(1:n), file%rec_dimid/), file%var(j)%code); c=c+1 !if (n==1) then - ! id%error_status(c)=nf_def_var_chunking(id%ncid, id%var(j)%code, NF_CHUNKED, (/1/)); c=c+1 + ! file%error_status(c)=nf_def_var_chunking(file%ncid, file%var(j)%code, NF_CHUNKED, (/1/)); c=c+1 if (n==2) then - id%error_status(c)=nf_def_var_chunking(id%ncid, id%var(j)%code, NF_CHUNKED, (/1, id%dim(1)%size/)); ! c=c+1 + file%error_status(c)=nf_def_var_chunking(file%ncid, file%var(j)%code, NF_CHUNKED, (/1, file%dim(1)%size/)); ! c=c+1 end if - id%error_status(c)=nf_put_att_text(id%ncid, id%var(j)%code, 'description', len_trim(id%var(j)%longname), id%var(j)%longname); c=c+1 - id%error_status(c)=nf_put_att_text(id%ncid, id%var(j)%code, 'units', len_trim(id%var(j)%units), id%var(j)%units); c=c+1 + file%error_status(c)=nf_put_att_text(file%ncid, file%var(j)%code, 'description', len_trim(file%var(j)%longname), file%var(j)%longname); c=c+1 + file%error_status(c)=nf_put_att_text(file%ncid, file%var(j)%code, 'units', len_trim(file%var(j)%units), file%var(j)%units); c=c+1 end do - id%error_status(c)=nf_close(id%ncid); c=c+1 - id%error_count=c-1 + file%error_status(c)=nf_close(file%ncid); c=c+1 + file%error_count=c-1 end subroutine create_new_file From 0dd6ca4e7d7c5fe0c7ed9163bcd508d98f0f96b0 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 9 Dec 2020 18:06:57 +0100 Subject: [PATCH 153/909] rename file variable so it is clear it is a file (not a netcdf id) --- src/io_restart.F90 | 57 +++++++++++++++++++++++----------------------- 1 file changed, 28 insertions(+), 29 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 0d0bf0496..500d5447e 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -416,9 +416,9 @@ subroutine def_variable_2d(file, name, dims, longname, units, data) end subroutine def_variable_2d -subroutine write_restart(id, istep, mesh) +subroutine write_restart(file, istep, mesh) implicit none - type(nc_file), intent(inout) :: id + type(nc_file), intent(inout) :: file integer, intent(in) :: istep type(t_mesh), intent(in) , target :: mesh real(kind=WP), allocatable :: aux(:), laux(:) @@ -431,27 +431,27 @@ subroutine write_restart(id, istep, mesh) ! Serial output implemented so far if (mype==0) then c=1 - !id%rec_count=id%rec_count+1 - write(*,*) 'writing restart record ', id%rec_count - id%error_status(c)=nf_open(id%filename, nf_write, id%ncid); c=c+1 - id%error_status(c)=nf_put_vara_double(id%ncid, id%time_varid, id%rec_count, 1, ctime, 1); c=c+1 - id%error_status(c)=nf_put_vara_int(id%ncid, id%iter_varid, id%rec_count, 1, globalstep+istep, 1); c=c+1 + !file%rec_count=file%rec_count+1 + write(*,*) 'writing restart record ', file%rec_count + file%error_status(c)=nf_open(file%filename, nf_write, file%ncid); c=c+1 + file%error_status(c)=nf_put_vara_double(file%ncid, file%time_varid, file%rec_count, 1, ctime, 1); c=c+1 + file%error_status(c)=nf_put_vara_int(file%ncid, file%iter_varid, file%rec_count, 1, globalstep+istep, 1); c=c+1 end if - call was_error(id); c=1 + call was_error(file); c=1 - do i=1, id%nvar - shape=id%var(i)%ndim + do i=1, file%nvar + shape=file%var(i)%ndim !_______writing 2D fields________________________________________________ if (shape==1) then - size1=id%var(i)%dims(1) + size1=file%var(i)%dims(1) if (mype==0) allocate(aux(size1)) t0=MPI_Wtime() - if (size1==nod2D) call gather_nod (id%var(i)%pt1, aux) - if (size1==elem2D) call gather_elem(id%var(i)%pt1, aux) + if (size1==nod2D) call gather_nod (file%var(i)%pt1, aux) + if (size1==elem2D) call gather_elem(file%var(i)%pt1, aux) t1=MPI_Wtime() if (mype==0) then - id%error_status(c)=nf_put_vara_double(id%ncid, id%var(i)%code, (/1, id%rec_count/), (/size1, 1/), aux, 1); c=c+1 + file%error_status(c)=nf_put_vara_double(file%ncid, file%var(i)%code, (/1, file%rec_count/), (/size1, 1/), aux, 1); c=c+1 end if t2=MPI_Wtime() #ifdef DEBUG @@ -462,21 +462,21 @@ subroutine write_restart(id, istep, mesh) if (mype==0) deallocate(aux) !_______writing 3D fields________________________________________________ elseif (shape==2) then - size1=id%var(i)%dims(1) - size2=id%var(i)%dims(2) + size1=file%var(i)%dims(1) + size2=file%var(i)%dims(2) if (mype==0) allocate(aux (size2)) if (size2==nod2D) allocate(laux(myDim_nod2D +eDim_nod2D )) if (size2==elem2D) allocate(laux(myDim_elem2D+eDim_elem2D)) do lev=1, size1 - laux=id%var(i)%pt2(lev,:) -! if (size1==nod2D .or. size2==nod2D) call gather_nod (id%var(i)%pt2(lev,:), aux) -! if (size1==elem2D .or. size2==elem2D) call gather_elem(id%var(i)%pt2(lev,:), aux) + laux=file%var(i)%pt2(lev,:) +! if (size1==nod2D .or. size2==nod2D) call gather_nod (file%var(i)%pt2(lev,:), aux) +! if (size1==elem2D .or. size2==elem2D) call gather_elem(file%var(i)%pt2(lev,:), aux) t0=MPI_Wtime() if (size1==nod2D .or. size2==nod2D) call gather_nod (laux, aux) if (size1==elem2D .or. size2==elem2D) call gather_elem(laux, aux) t1=MPI_Wtime() if (mype==0) then - id%error_status(c)=nf_put_vara_double(id%ncid, id%var(i)%code, (/lev, 1, id%rec_count/), (/1, size2, 1/), aux, 1); c=c+1 + file%error_status(c)=nf_put_vara_double(file%ncid, file%var(i)%code, (/lev, 1, file%rec_count/), (/1, size2, 1/), aux, 1); c=c+1 end if t2=MPI_Wtime() #ifdef DEBUG @@ -492,18 +492,17 @@ subroutine write_restart(id, istep, mesh) call par_ex stop end if - call was_error(id); c=1 + call was_error(file); c=1 end do - if (mype==0) id%error_count=c-1 - call was_error(id) - if (mype==0) id%error_status(1)=nf_close(id%ncid); - id%error_count=1 - call was_error(id) + if (mype==0) file%error_count=c-1 + call was_error(file) + if (mype==0) file%error_status(1)=nf_close(file%ncid); + file%error_count=1 + call was_error(file) end subroutine write_restart -! -!-------------------------------------------------------------------------------------------- -! + + subroutine read_restart(id, mesh, arg) implicit none type(nc_file), intent(inout) :: id From 1c0b830c2ff99a543b89ef581d9f20745dcd8fed Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 9 Dec 2020 18:07:56 +0100 Subject: [PATCH 154/909] rename file variable so it is clear it is a file (not a netcdf id) --- src/io_restart.F90 | 67 +++++++++++++++++++++++----------------------- 1 file changed, 33 insertions(+), 34 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 500d5447e..5db4ab024 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -503,9 +503,9 @@ subroutine write_restart(file, istep, mesh) end subroutine write_restart -subroutine read_restart(id, mesh, arg) +subroutine read_restart(file, mesh, arg) implicit none - type(nc_file), intent(inout) :: id + type(nc_file), intent(inout) :: file integer, optional, intent(in) :: arg real(kind=WP), allocatable :: aux(:), laux(:) integer :: i, lev, size1, size2, shape @@ -521,17 +521,17 @@ subroutine read_restart(id, mesh, arg) c=1 if (mype==0) then file_exist=.False. - inquire(file=id%filename,exist=file_exist) + inquire(file=file%filename,exist=file_exist) if (file_exist) then - write(*,*) ' reading restart file: ', trim(id%filename) - id%error_status(c)=nf_open(id%filename, nf_nowrite, id%ncid); c=c+1 - id%error_status(c)=nf_get_vara_int(id%ncid, id%iter_varid, id%rec_count, 1, globalstep, 1); c=c+1 - id%error_status(c)=nf_get_vara_double(id%ncid, id%time_varid, id%rec_count, 1, rtime, 1); c=c+1 + write(*,*) ' reading restart file: ', trim(file%filename) + file%error_status(c)=nf_open(file%filename, nf_nowrite, file%ncid); c=c+1 + file%error_status(c)=nf_get_vara_int(file%ncid, file%iter_varid, file%rec_count, 1, globalstep, 1); c=c+1 + file%error_status(c)=nf_get_vara_double(file%ncid, file%time_varid, file%rec_count, 1, rtime, 1); c=c+1 else write(*,*) print *, achar(27)//'[33m' write(*,*) '____________________________________________________________________' - write(*,*) ' ERROR: could not find restart_file:',trim(id%filename),'!' + write(*,*) ' ERROR: could not find restart_file:',trim(file%filename),'!' write(*,*) '____________________________________________________________________' print *, achar(27)//'[0m' write(*,*) @@ -539,58 +539,58 @@ subroutine read_restart(id, mesh, arg) end if if (.not. present(arg)) then - rec2read=id%rec_count + rec2read=file%rec_count else rec2read=arg end if - write(*,*) 'restart from record ', rec2read, ' of ', id%rec_count + write(*,*) 'restart from record ', rec2read, ' of ', file%rec_count if (int(ctime)/=int(rtime)) then write(*,*) 'Reading restart: timestamps in restart and in clock files do not match' write(*,*) 'restart/ times are:', ctime, rtime write(*,*) 'the model will stop!' - id%error_status(c)=-310; c=c+1 + file%error_status(c)=-310; c=c+1 end if end if - call was_error(id); c=1 + call was_error(file); c=1 - do i=1, id%nvar - shape=id%var(i)%ndim - if (mype==0) write(*,*) 'reading restart for ', trim(id%var(i)%name) + do i=1, file%nvar + shape=file%var(i)%ndim + if (mype==0) write(*,*) 'reading restart for ', trim(file%var(i)%name) !_______writing 2D fields________________________________________________ if (shape==1) then - size1=id%var(i)%dims(1) + size1=file%var(i)%dims(1) if (mype==0) then allocate(aux(size1)) - id%error_status(c)=nf_get_vara_double(id%ncid, id%var(i)%code, (/1, id%rec_count/), (/size1, 1/), aux, 1); c=c+1 + file%error_status(c)=nf_get_vara_double(file%ncid, file%var(i)%code, (/1, file%rec_count/), (/size1, 1/), aux, 1); c=c+1 ! write(*,*) 'min/max 2D =', minval(aux), maxval(aux) end if - if (size1==nod2D) call broadcast_nod (id%var(i)%pt1, aux) - if (size1==elem2D) call broadcast_elem(id%var(i)%pt1, aux) + if (size1==nod2D) call broadcast_nod (file%var(i)%pt1, aux) + if (size1==elem2D) call broadcast_elem(file%var(i)%pt1, aux) if (mype==0) deallocate(aux) !_______writing 3D fields________________________________________________ elseif (shape==2) then - size1=id%var(i)%dims(1) - size2=id%var(i)%dims(2) + size1=file%var(i)%dims(1) + size2=file%var(i)%dims(2) if (mype==0) allocate(aux (size2)) if (size2==nod2D) allocate(laux(myDim_nod2D +eDim_nod2D )) if (size2==elem2D) allocate(laux(myDim_elem2D+eDim_elem2D)) do lev=1, size1 if (mype==0) then - id%error_status(c)=nf_get_vara_double(id%ncid, id%var(i)%code, (/lev, 1, id%rec_count/), (/1, size2, 1/), aux, 1); c=c+1 + file%error_status(c)=nf_get_vara_double(file%ncid, file%var(i)%code, (/lev, 1, file%rec_count/), (/1, size2, 1/), aux, 1); c=c+1 ! write(*,*) 'min/max 3D ', lev,'=', minval(aux), maxval(aux) end if - id%var(i)%pt2(lev,:)=0. -! if (size1==nod2D .or. size2==nod2D) call broadcast_nod (id%var(i)%pt2(lev,:), aux) -! if (size1==elem2D .or. size2==elem2D) call broadcast_elem(id%var(i)%pt2(lev,:), aux) + file%var(i)%pt2(lev,:)=0. +! if (size1==nod2D .or. size2==nod2D) call broadcast_nod (file%var(i)%pt2(lev,:), aux) +! if (size1==elem2D .or. size2==elem2D) call broadcast_elem(file%var(i)%pt2(lev,:), aux) if (size2==nod2D) then call broadcast_nod (laux, aux) - id%var(i)%pt2(lev,:)=laux(1:myDim_nod2D+eDim_nod2D) + file%var(i)%pt2(lev,:)=laux(1:myDim_nod2D+eDim_nod2D) end if if (size2==elem2D) then call broadcast_elem(laux, aux) - id%var(i)%pt2(lev,:)=laux(1:myDim_elem2D+eDim_elem2D) + file%var(i)%pt2(lev,:)=laux(1:myDim_elem2D+eDim_elem2D) end if end do deallocate(laux) @@ -600,16 +600,15 @@ subroutine read_restart(id, mesh, arg) call par_ex stop end if - call was_error(id); c=1 + call was_error(file); c=1 end do - if (mype==0) id%error_status(1)=nf_close(id%ncid); - id%error_count=1 - call was_error(id) + if (mype==0) file%error_status(1)=nf_close(file%ncid); + file%error_count=1 + call was_error(file) end subroutine read_restart -! -!-------------------------------------------------------------------------------------------- -! + + subroutine assoc_ids(file) implicit none From dce0a4715c6d0f92c758f03e1c3e075e4c773057 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 10 Dec 2020 13:02:12 +0100 Subject: [PATCH 155/909] rename parameters to make it clearer what they represent --- src/io_restart.F90 | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 5db4ab024..c20c5d825 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -346,13 +346,13 @@ subroutine def_dim(file, name, ndim) end subroutine def_dim -subroutine def_variable_1d(file, name, dims, longname, units, data) +subroutine def_variable_1d(file, name, global_shape, longname, units, local_data) implicit none type(nc_file), intent(inout) :: file character(len=*), intent(in) :: name - integer, intent(in) :: dims(1) + integer, intent(in) :: global_shape(1) character(len=*), intent(in), optional :: units, longname - real(kind=WP),target, intent(inout) :: data(:) + real(kind=WP),target, intent(inout) :: local_data(:) integer :: c type(nc_vars), allocatable, dimension(:) :: temp @@ -376,18 +376,18 @@ subroutine def_variable_1d(file, name, dims, longname, units, data) file%var(file%nvar)%longname=trim(longname) file%var(file%nvar)%units=trim(units) file%var(file%nvar)%ndim=1 - file%var(file%nvar)%dims(1)=dims(1) - file%var(file%nvar)%pt1=>data + file%var(file%nvar)%dims(1)=global_shape(1) + file%var(file%nvar)%pt1=>local_data end subroutine def_variable_1d -subroutine def_variable_2d(file, name, dims, longname, units, data) +subroutine def_variable_2d(file, name, global_shape, longname, units, local_data) implicit none type(nc_file), intent(inout) :: file character(len=*), intent(in) :: name - integer, intent(in) :: dims(2) + integer, intent(in) :: global_shape(2) character(len=*), intent(in), optional :: units, longname - real(kind=WP),target, intent(inout) :: data(:,:) + real(kind=WP),target, intent(inout) :: local_data(:,:) integer :: c type(nc_vars), allocatable, dimension(:) :: temp @@ -411,8 +411,8 @@ subroutine def_variable_2d(file, name, dims, longname, units, data) file%var(file%nvar)%longname=trim(longname) file%var(file%nvar)%units=trim(units) file%var(file%nvar)%ndim=2 - file%var(file%nvar)%dims(1:2)=dims - file%var(file%nvar)%pt2=>data + file%var(file%nvar)%dims(1:2)=global_shape + file%var(file%nvar)%pt2=>local_data end subroutine def_variable_2d From ffd1c48751f0197b754926016d21cc5d79cb353e Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 10 Dec 2020 16:44:42 +0100 Subject: [PATCH 156/909] use all pfunit files in the test directory via file glob when building unit tests --- test/fortran/CMakeLists.txt | 6 ++---- test/fortran_parallel/CMakeLists.txt | 3 ++- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/test/fortran/CMakeLists.txt b/test/fortran/CMakeLists.txt index 5925e18e6..1d4c195c6 100644 --- a/test/fortran/CMakeLists.txt +++ b/test/fortran/CMakeLists.txt @@ -22,11 +22,9 @@ target_link_libraries(${LIB_TARGET} ${NETCDF_Fortran_LIBRARIES} ${NETCDF_C_LIBRA target_link_libraries(${LIB_TARGET} async_threads_cpp) set_target_properties(${LIB_TARGET} PROPERTIES Fortran_MODULE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) +file(GLOB sources_pfunit RELATIVE ${CMAKE_CURRENT_LIST_DIR} ${CMAKE_CURRENT_LIST_DIR}/*.pf) add_pfunit_ctest (${PROJECT_NAME} - TEST_SOURCES forcing_provider_module_tests.pf # must be a path relative to CMAKE_CURRENT_SOURCE_DIR, then the generated files will be placed in CMAKE_CURRENT_BINARY_DIR (see add_pfunit_ctest.cmake) - forcing_provider_netcdf_module_tests.pf - forcing_lookahead_reader_module_tests.pf - io_netcdf_module_tests.pf + TEST_SOURCES ${sources_pfunit} # must be a path relative to CMAKE_CURRENT_SOURCE_DIR, then the generated files will be placed in CMAKE_CURRENT_BINARY_DIR (see add_pfunit_ctest.cmake) LINK_LIBRARIES ${LIB_TARGET} ) diff --git a/test/fortran_parallel/CMakeLists.txt b/test/fortran_parallel/CMakeLists.txt index 88cc7cd34..95dfb08f6 100644 --- a/test/fortran_parallel/CMakeLists.txt +++ b/test/fortran_parallel/CMakeLists.txt @@ -11,8 +11,9 @@ target_include_directories(${LIB_TARGET} PRIVATE ${CMAKE_CURRENT_BINARY_DIR} ${C target_link_libraries(${LIB_TARGET} ${NETCDF_Fortran_LIBRARIES} ${NETCDF_C_LIBRARIES}) set_target_properties(${LIB_TARGET} PROPERTIES Fortran_MODULE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) +file(GLOB sources_pfunit RELATIVE ${CMAKE_CURRENT_LIST_DIR} ${CMAKE_CURRENT_LIST_DIR}/*.pf) add_pfunit_ctest (${PROJECT_NAME} - TEST_SOURCES mpi_topology_module_tests.pf # must be a path relative to CMAKE_CURRENT_SOURCE_DIR, then the generated files will be placed in CMAKE_CURRENT_BINARY_DIR (see add_pfunit_ctest.cmake) + TEST_SOURCES ${sources_pfunit} # must be a path relative to CMAKE_CURRENT_SOURCE_DIR, then the generated files will be placed in CMAKE_CURRENT_BINARY_DIR (see add_pfunit_ctest.cmake) LINK_LIBRARIES ${LIB_TARGET} MAX_PES 6 ) From 651c0e322a587f9379d4a39877ba79a61ae3c2ff Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 10 Dec 2020 16:50:58 +0100 Subject: [PATCH 157/909] - add module to represent a fesom file - add initial unit test --- src/io_fesom_file_module.F90 | 21 +++++++++++++++++++++ test/fortran/CMakeLists.txt | 1 + test/fortran/io_fesom_file_module_tests.pf | 15 +++++++++++++++ 3 files changed, 37 insertions(+) create mode 100644 src/io_fesom_file_module.F90 create mode 100644 test/fortran/io_fesom_file_module_tests.pf diff --git a/src/io_fesom_file_module.F90 b/src/io_fesom_file_module.F90 new file mode 100644 index 000000000..4aa1c5d3b --- /dev/null +++ b/src/io_fesom_file_module.F90 @@ -0,0 +1,21 @@ +module io_fesom_file_module + implicit none + public fesom_file + private + + + type fesom_file + contains + procedure, public :: initialize + end type + + +contains + + + subroutine initialize(this) + class(fesom_file), intent(inout) :: this + end subroutine + + +end module diff --git a/test/fortran/CMakeLists.txt b/test/fortran/CMakeLists.txt index 1d4c195c6..c87562f5c 100644 --- a/test/fortran/CMakeLists.txt +++ b/test/fortran/CMakeLists.txt @@ -13,6 +13,7 @@ add_library(${LIB_TARGET} ${CMAKE_CURRENT_LIST_DIR}/../../src/forcing_provider_a ${CMAKE_CURRENT_LIST_DIR}/../../src/forcing_provider_netcdf_module.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/forcing_lookahead_reader_module.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_module.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_nf_interface.F90 + ${CMAKE_CURRENT_LIST_DIR}/../../src/io_fesom_file_module.F90 ) add_subdirectory(../../src/async_threads_cpp ${PROJECT_BINARY_DIR}/async_threads_cpp) diff --git a/test/fortran/io_fesom_file_module_tests.pf b/test/fortran/io_fesom_file_module_tests.pf new file mode 100644 index 000000000..eace2c958 --- /dev/null +++ b/test/fortran/io_fesom_file_module_tests.pf @@ -0,0 +1,15 @@ +module io_fesom_file_module_tests + use io_fesom_file_module + use funit; implicit none + +contains + + + @test + subroutine test_can_initialize_without_filepath() + type(fesom_file) f + call f%initialize() + end subroutine + + +end module From 2af8a0fb71dc60e6630b8c92ca1af95213a40f25 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 10 Dec 2020 17:42:30 +0100 Subject: [PATCH 158/909] be able to add dimensions to the fesom file type --- src/io_fesom_file_module.F90 | 40 +++++++++++++++++++--- test/fortran/io_fesom_file_module_tests.pf | 16 ++++++++- 2 files changed, 50 insertions(+), 6 deletions(-) diff --git a/src/io_fesom_file_module.F90 b/src/io_fesom_file_module.F90 index 4aa1c5d3b..5b4d97be6 100644 --- a/src/io_fesom_file_module.F90 +++ b/src/io_fesom_file_module.F90 @@ -1,12 +1,20 @@ module io_fesom_file_module implicit none - public fesom_file + public fesom_file_type private - type fesom_file + type fesom_file_type + private + type(dim_type), allocatable :: dims(:) contains - procedure, public :: initialize + procedure, public :: initialize, add_dim + end type + + + type dim_type + character(:), allocatable :: name + integer len end type @@ -14,8 +22,30 @@ module io_fesom_file_module subroutine initialize(this) - class(fesom_file), intent(inout) :: this - end subroutine + class(fesom_file_type), intent(inout) :: this + end subroutine + + + function add_dim(this, name, len) result(dimindex) + class(fesom_file_type), intent(inout) :: this + character(len=*), intent(in) :: name + integer, intent(in) :: len + integer dimindex + ! EO parameters + type(dim_type), allocatable :: tmparr(:) + + if( .not. allocated(this%dims)) then + allocate(this%dims(1)) + else + allocate( tmparr(size(this%dims)+1) ) + tmparr(1:size(this%dims)) = this%dims + deallocate(this%dims) + call move_alloc(tmparr, this%dims) + end if + dimindex = size(this%dims) + this%dims(dimindex) = dim_type(name=name, len=len) + end function + end module diff --git a/test/fortran/io_fesom_file_module_tests.pf b/test/fortran/io_fesom_file_module_tests.pf index eace2c958..df6384e74 100644 --- a/test/fortran/io_fesom_file_module_tests.pf +++ b/test/fortran/io_fesom_file_module_tests.pf @@ -7,9 +7,23 @@ contains @test subroutine test_can_initialize_without_filepath() - type(fesom_file) f + type(fesom_file_type) f + call f%initialize() end subroutine + @test + subroutine test_can_add_dims() + type(fesom_file_type) f + integer nz_dimidx, node_dimidx + + call f%initialize() + nz_dimidx = f%add_dim("nz", 47) + @assertEqual(1, nz_dimidx) + node_dimidx = f%add_dim("node", 47) + @assertEqual(2, node_dimidx) + end subroutine + + end module From 3f205fe8d67f146212268866f8dd98042ac4af4a Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 10 Dec 2020 21:26:07 +0100 Subject: [PATCH 159/909] add assertion procedures --- src/io_fesom_file_module.F90 | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/src/io_fesom_file_module.F90 b/src/io_fesom_file_module.F90 index 5b4d97be6..8de8a2857 100644 --- a/src/io_fesom_file_module.F90 +++ b/src/io_fesom_file_module.F90 @@ -48,4 +48,28 @@ function add_dim(this, name, len) result(dimindex) end function + + + subroutine assert(val, line) + logical, intent(in) :: val + integer, intent(in) :: line + ! EO parameters + if(.not. val) then + print *, "error in line ",line, __FILE__ + stop 1 + end if + end subroutine + + + subroutine assert_nc(status, line) + integer, intent(in) :: status + integer, intent(in) :: line + ! EO parameters + include "netcdf.inc" + if(status /= nf_noerr) then + print *, "error in line ",line, __FILE__, ' ', trim(nf_strerror(status)) + stop 1 + endif + end subroutine + end module From 319a0fbd36295e01d049450ab18083eb73eaa94e Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 10 Dec 2020 21:27:25 +0100 Subject: [PATCH 160/909] be able to open a netcdf file in read mode and attach our dims --- src/io_fesom_file_module.F90 | 41 ++++++++++++++++++++-- test/fortran/io_fesom_file_module_tests.pf | 14 ++++++++ 2 files changed, 53 insertions(+), 2 deletions(-) diff --git a/src/io_fesom_file_module.F90 b/src/io_fesom_file_module.F90 index 8de8a2857..e99c803c9 100644 --- a/src/io_fesom_file_module.F90 +++ b/src/io_fesom_file_module.F90 @@ -7,14 +7,20 @@ module io_fesom_file_module type fesom_file_type private type(dim_type), allocatable :: dims(:) + + character(:), allocatable :: filepath + integer mode + integer ncid contains - procedure, public :: initialize, add_dim + procedure, public :: initialize, add_dim, open_readmode, close_file end type type dim_type character(:), allocatable :: name integer len + + integer ncid end type @@ -23,6 +29,8 @@ module io_fesom_file_module subroutine initialize(this) class(fesom_file_type), intent(inout) :: this + + this%filepath = "" end subroutine @@ -44,10 +52,39 @@ function add_dim(this, name, len) result(dimindex) end if dimindex = size(this%dims) - this%dims(dimindex) = dim_type(name=name, len=len) + this%dims(dimindex) = dim_type(name=name, len=len, ncid=-1) end function + subroutine open_readmode(this, filepath) + class(fesom_file_type), intent(inout) :: this + character(len=*), intent(in) :: filepath + ! EO parameters + include "netcdf.inc" + integer i + integer actual_len + + this%mode = nf_nowrite + this%filepath = filepath + + call assert_nc( nf_open(this%filepath, this%mode, this%ncid) , __LINE__) + + ! attach our dims to their counterparts in the file + do i=1, size(this%dims) + call assert_nc( nf_inq_dimid(this%ncid, this%dims(i)%name, this%dims(i)%ncid) , __LINE__) + call assert_nc( nf_inq_dimlen(this%ncid, this%dims(i)%ncid, actual_len) , __LINE__) + call assert(this%dims(i)%len == actual_len, __LINE__) + end do + end subroutine + + + subroutine close_file(this) + ! do not implicitly close the file (e.g. upon deallocation via destructor), as we might have a copy of this object with access to the same ncid + class(fesom_file_type), intent(inout) :: this + ! EO parameters + include "netcdf.inc" + call assert_nc( nf_close(this%ncid) , __LINE__) + end subroutine subroutine assert(val, line) diff --git a/test/fortran/io_fesom_file_module_tests.pf b/test/fortran/io_fesom_file_module_tests.pf index df6384e74..9638c116e 100644 --- a/test/fortran/io_fesom_file_module_tests.pf +++ b/test/fortran/io_fesom_file_module_tests.pf @@ -26,4 +26,18 @@ contains end subroutine + @test + subroutine test_can_open_file_in_readmode() + type(fesom_file_type) f + integer z_dimidx + + call f%initialize() + z_dimidx = f%add_dim("nz1", 3) + + call f%open_readmode("fixtures/io_netcdf/columnwise_3d_salt.nc") + + call f%close_file() + end subroutine + + end module From d3b9cf61c185d6945274fd469f47748094437f82 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 10 Dec 2020 21:28:17 +0100 Subject: [PATCH 161/909] enable inactive test --- test/fortran/forcing_provider_netcdf_module_tests.pf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/fortran/forcing_provider_netcdf_module_tests.pf b/test/fortran/forcing_provider_netcdf_module_tests.pf index 21b1eaa65..109083bd5 100644 --- a/test/fortran/forcing_provider_netcdf_module_tests.pf +++ b/test/fortran/forcing_provider_netcdf_module_tests.pf @@ -53,7 +53,7 @@ contains @assertEqual(0.0000, values(1,1), tolerance=1.e-6) @assertEqual(0.0001, values(2,1), tolerance=1.e-6) @assertEqual(0.0007, values(2,3), tolerance=1.e-6) - + call handle%finalize() end subroutine From cd3407b6183e6758cc706932f7d653e739b0a0eb Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 11 Dec 2020 09:46:14 +0100 Subject: [PATCH 162/909] be able to add unlimited dim --- src/io_fesom_file_module.F90 | 15 +++++++++++-- test/fortran/io_fesom_file_module_tests.pf | 25 ++++++++++++++++++++++ 2 files changed, 38 insertions(+), 2 deletions(-) diff --git a/src/io_fesom_file_module.F90 b/src/io_fesom_file_module.F90 index e99c803c9..028ede221 100644 --- a/src/io_fesom_file_module.F90 +++ b/src/io_fesom_file_module.F90 @@ -12,7 +12,7 @@ module io_fesom_file_module integer mode integer ncid contains - procedure, public :: initialize, add_dim, open_readmode, close_file + procedure, public :: initialize, add_dim, add_dim_unlimited, open_readmode, close_file end type @@ -33,6 +33,17 @@ subroutine initialize(this) this%filepath = "" end subroutine + + function add_dim_unlimited(this, name) result(dimindex) + class(fesom_file_type), intent(inout) :: this + character(len=*), intent(in) :: name + integer dimindex + ! EO parameters + include "netcdf.inc" + + dimindex = this%add_dim(name, nf_unlimited) + end function + function add_dim(this, name, len) result(dimindex) class(fesom_file_type), intent(inout) :: this @@ -73,7 +84,7 @@ subroutine open_readmode(this, filepath) do i=1, size(this%dims) call assert_nc( nf_inq_dimid(this%ncid, this%dims(i)%name, this%dims(i)%ncid) , __LINE__) call assert_nc( nf_inq_dimlen(this%ncid, this%dims(i)%ncid, actual_len) , __LINE__) - call assert(this%dims(i)%len == actual_len, __LINE__) + if(this%dims(i)%len .ne. nf_unlimited) call assert(this%dims(i)%len == actual_len, __LINE__) end do end subroutine diff --git a/test/fortran/io_fesom_file_module_tests.pf b/test/fortran/io_fesom_file_module_tests.pf index 9638c116e..3d7e4d73e 100644 --- a/test/fortran/io_fesom_file_module_tests.pf +++ b/test/fortran/io_fesom_file_module_tests.pf @@ -26,6 +26,17 @@ contains end subroutine + @test + subroutine test_can_add_unlimited_dim() + type(fesom_file_type) f + integer dimidx + + call f%initialize() + dimidx = f%add_dim_unlimited("time") + @assertEqual(1, dimidx) + end subroutine + + @test subroutine test_can_open_file_in_readmode() type(fesom_file_type) f @@ -40,4 +51,18 @@ contains end subroutine + @test + subroutine test_can_open_file_with_unlimited_dim() + type(fesom_file_type) f + integer dimidx + + call f%initialize() + dimidx = f%add_dim_unlimited("time") + + call f%open_readmode("fixtures/io_netcdf/columnwise_3d_salt.nc") + + call f%close_file() + end subroutine + + end module From dee025dce4a25d6d31ee0847f0957a6efe1ab08f Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 11 Dec 2020 10:22:57 +0100 Subject: [PATCH 163/909] be able to add a simple variable --- src/io_fesom_file_module.F90 | 38 +++++++++++++++++++++- test/fortran/io_fesom_file_module_tests.pf | 16 +++++++++ 2 files changed, 53 insertions(+), 1 deletion(-) diff --git a/src/io_fesom_file_module.F90 b/src/io_fesom_file_module.F90 index 028ede221..4bc1b9d69 100644 --- a/src/io_fesom_file_module.F90 +++ b/src/io_fesom_file_module.F90 @@ -7,12 +7,13 @@ module io_fesom_file_module type fesom_file_type private type(dim_type), allocatable :: dims(:) + type(var_type), allocatable :: vars(:) character(:), allocatable :: filepath integer mode integer ncid contains - procedure, public :: initialize, add_dim, add_dim_unlimited, open_readmode, close_file + procedure, public :: initialize, add_dim, add_dim_unlimited, add_var, open_readmode, close_file end type @@ -24,6 +25,16 @@ module io_fesom_file_module end type + type var_type ! todo: use variable type from io_netcdf_module here + character(:), allocatable :: name + + character(:), allocatable :: units_txt + character(:), allocatable :: longname_txt + + integer ncid + end type + + contains @@ -67,6 +78,31 @@ function add_dim(this, name, len) result(dimindex) end function + ! the sizes of the dims define the global shape of the var + function add_var(this, name, dim_indices, units_txt, longname) result(varindex) + class(fesom_file_type), intent(inout) :: this + character(len=*), intent(in) :: name + integer, intent(in) :: dim_indices(:) + character(len=*), intent(in) :: units_txt + character(len=*), intent(in) :: longname + integer varindex + ! EO parameters + type(var_type), allocatable :: tmparr(:) + + if( .not. allocated(this%vars)) then + allocate(this%vars(1)) + else + allocate( tmparr(size(this%vars)+1) ) + tmparr(1:size(this%vars)) = this%vars + deallocate(this%vars) + call move_alloc(tmparr, this%vars) + end if + + varindex = size(this%vars) + this%vars(varindex) = var_type(name, units_txt, longname, ncid=-1) + end function + + subroutine open_readmode(this, filepath) class(fesom_file_type), intent(inout) :: this character(len=*), intent(in) :: filepath diff --git a/test/fortran/io_fesom_file_module_tests.pf b/test/fortran/io_fesom_file_module_tests.pf index 3d7e4d73e..003fb787f 100644 --- a/test/fortran/io_fesom_file_module_tests.pf +++ b/test/fortran/io_fesom_file_module_tests.pf @@ -37,6 +37,22 @@ contains end subroutine + @test + subroutine test_can_add_vars() + type(fesom_file_type) f + integer nz_dimidx, node_dimidx + integer salt_varid + + call f%initialize() + nz_dimidx = f%add_dim("nz", 47) + @assertEqual(1, nz_dimidx) + node_dimidx = f%add_dim("node", 47) + @assertEqual(2, node_dimidx) + + salt_varid = f%add_var("salt", [1,2], "psu", "salinity") + end subroutine + + @test subroutine test_can_open_file_in_readmode() type(fesom_file_type) f From 370fc961803bf836ea8a654dd89b2ebc695e1cdb Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 11 Dec 2020 12:52:28 +0100 Subject: [PATCH 164/909] - store dimindices with variable - check dims of all variables when loading a file - add unit test --- src/io_fesom_file_module.F90 | 22 +++++++++++++++++++--- test/fortran/io_fesom_file_module_tests.pf | 20 ++++++++++++++++++++ 2 files changed, 39 insertions(+), 3 deletions(-) diff --git a/src/io_fesom_file_module.F90 b/src/io_fesom_file_module.F90 index 4bc1b9d69..6bcec78ac 100644 --- a/src/io_fesom_file_module.F90 +++ b/src/io_fesom_file_module.F90 @@ -27,6 +27,7 @@ module io_fesom_file_module type var_type ! todo: use variable type from io_netcdf_module here character(:), allocatable :: name + integer, allocatable :: dim_indices(:) character(:), allocatable :: units_txt character(:), allocatable :: longname_txt @@ -99,7 +100,7 @@ function add_var(this, name, dim_indices, units_txt, longname) result(varindex) end if varindex = size(this%vars) - this%vars(varindex) = var_type(name, units_txt, longname, ncid=-1) + this%vars(varindex) = var_type(name, dim_indices, units_txt, longname, ncid=-1) end function @@ -108,20 +109,35 @@ subroutine open_readmode(this, filepath) character(len=*), intent(in) :: filepath ! EO parameters include "netcdf.inc" - integer i + integer i, ii integer actual_len + integer actual_dimcount + integer, allocatable :: actual_dimids(:) + integer exp_dimid, act_dimid this%mode = nf_nowrite this%filepath = filepath call assert_nc( nf_open(this%filepath, this%mode, this%ncid) , __LINE__) - ! attach our dims to their counterparts in the file + ! attach our dims and vars to their counterparts in the file do i=1, size(this%dims) call assert_nc( nf_inq_dimid(this%ncid, this%dims(i)%name, this%dims(i)%ncid) , __LINE__) call assert_nc( nf_inq_dimlen(this%ncid, this%dims(i)%ncid, actual_len) , __LINE__) if(this%dims(i)%len .ne. nf_unlimited) call assert(this%dims(i)%len == actual_len, __LINE__) end do + do i=1, size(this%vars) + call assert_nc( nf_inq_varid(this%ncid, this%vars(i)%name, this%vars(i)%ncid) , __LINE__) + ! see if this var has the expected dims + call assert_nc( nf_inq_varndims(this%ncid, this%vars(i)%ncid, actual_dimcount) , __LINE__) + call assert(size(this%vars(i)%dim_indices) == actual_dimcount, __LINE__) + allocate(actual_dimids(actual_dimcount)) + call assert_nc( nf_inq_vardimid(this%ncid, this%vars(i)%ncid, actual_dimids) , __LINE__) + do ii=1, actual_dimcount + exp_dimid = this%dims( this%vars(i)%dim_indices(ii) )%ncid + call assert(exp_dimid == actual_dimids(ii), __LINE__) + end do + end do end subroutine diff --git a/test/fortran/io_fesom_file_module_tests.pf b/test/fortran/io_fesom_file_module_tests.pf index 003fb787f..a02e4d689 100644 --- a/test/fortran/io_fesom_file_module_tests.pf +++ b/test/fortran/io_fesom_file_module_tests.pf @@ -81,4 +81,24 @@ contains end subroutine + @test + subroutine test_can_open_file_with_variable() + type(fesom_file_type) f + integer nz_dimidx, node_dimidx, time_dimidx + integer salt_varid + call f%initialize() + nz_dimidx = f%add_dim("nz1", 3) + @assertEqual(1, nz_dimidx) + node_dimidx = f%add_dim("nod2", 5) + @assertEqual(2, node_dimidx) + time_dimidx = f%add_dim_unlimited("time") + @assertEqual(3, time_dimidx) + + salt_varid = f%add_var("salt", [nz_dimidx,node_dimidx,time_dimidx], "psu", "salinity") + call f%open_readmode("fixtures/io_netcdf/columnwise_3d_salt.nc") + + call f%close_file() + end subroutine + + end module From 1e590b61618ce6afeee8f6d66bfb82dbb807d472 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 14 Dec 2020 13:33:25 +0100 Subject: [PATCH 165/909] rename fesom netcdf file module as it is not FESOM specific at this point and could be used for general netcdf files --- src/{io_fesom_file_module.F90 => io_netcdf_file_module.F90} | 2 +- test/fortran/CMakeLists.txt | 2 +- ...om_file_module_tests.pf => io_netcdf_file_module_tests.pf} | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) rename src/{io_fesom_file_module.F90 => io_netcdf_file_module.F90} (99%) rename test/fortran/{io_fesom_file_module_tests.pf => io_netcdf_file_module_tests.pf} (97%) diff --git a/src/io_fesom_file_module.F90 b/src/io_netcdf_file_module.F90 similarity index 99% rename from src/io_fesom_file_module.F90 rename to src/io_netcdf_file_module.F90 index 6bcec78ac..02116a505 100644 --- a/src/io_fesom_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -1,4 +1,4 @@ -module io_fesom_file_module +module io_netcdf_file_module implicit none public fesom_file_type private diff --git a/test/fortran/CMakeLists.txt b/test/fortran/CMakeLists.txt index c87562f5c..09eea721c 100644 --- a/test/fortran/CMakeLists.txt +++ b/test/fortran/CMakeLists.txt @@ -13,7 +13,7 @@ add_library(${LIB_TARGET} ${CMAKE_CURRENT_LIST_DIR}/../../src/forcing_provider_a ${CMAKE_CURRENT_LIST_DIR}/../../src/forcing_provider_netcdf_module.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/forcing_lookahead_reader_module.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_module.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_nf_interface.F90 - ${CMAKE_CURRENT_LIST_DIR}/../../src/io_fesom_file_module.F90 + ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_file_module.F90 ) add_subdirectory(../../src/async_threads_cpp ${PROJECT_BINARY_DIR}/async_threads_cpp) diff --git a/test/fortran/io_fesom_file_module_tests.pf b/test/fortran/io_netcdf_file_module_tests.pf similarity index 97% rename from test/fortran/io_fesom_file_module_tests.pf rename to test/fortran/io_netcdf_file_module_tests.pf index a02e4d689..ae2ff8638 100644 --- a/test/fortran/io_fesom_file_module_tests.pf +++ b/test/fortran/io_netcdf_file_module_tests.pf @@ -1,5 +1,5 @@ -module io_fesom_file_module_tests - use io_fesom_file_module +module io_netcdf_file_module_tests + use io_netcdf_file_module use funit; implicit none contains From 563f496ee3644c860a4a1060659bf7e8cb372e32 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 15 Dec 2020 12:56:44 +0100 Subject: [PATCH 166/909] - be able to read real 4 and real 8 variable data of any rank - add unit tests to test reading of 2D data --- src/io_netcdf_file_module.F90 | 40 +++++++++++++++ test/fortran/io_netcdf_file_module_tests.pf | 54 +++++++++++++++++++++ 2 files changed, 94 insertions(+) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index 02116a505..6886430ff 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -14,6 +14,8 @@ module io_netcdf_file_module integer ncid contains procedure, public :: initialize, add_dim, add_dim_unlimited, add_var, open_readmode, close_file + generic, public :: read_var => read_var_r4, read_var_r8 + procedure, private :: read_var_r4, read_var_r8 end type @@ -141,6 +143,44 @@ subroutine open_readmode(this, filepath) end subroutine + ! values array is not required to have the same shape as the variable but must fit the product of all items of the sizes array + ! this way we can retrieve e.g. data from a 3D variable to a 2D array with one size set to 1 (e.g. to get a single timestep) + subroutine read_var_r8(this, varindex, starts, sizes, values) + use io_netcdf_nf_interface + use, intrinsic :: ISO_C_BINDING + class(fesom_file_type), intent(in) :: this + integer, intent(in) :: varindex + integer, dimension(:) :: starts, sizes + real(8), intent(inout), target :: values(..) ! must be inout or the allocation might be screwed + ! EO parameters + real(8), pointer :: values_ptr(:) + + call assert(product(sizes) == product(shape(values)), __LINE__) + + call c_f_pointer(c_loc(values), values_ptr, [product(shape(values))]) + call assert_nc(nf_get_vara_x(this%ncid, this%vars(varindex)%ncid, starts, sizes, values_ptr), __LINE__) + end subroutine + + + ! values array is not required to have the same shape as the variable but must fit the product of all items of the sizes array + ! this way we can retrieve e.g. data from a 3D variable to a 2D array with one size set to 1 (e.g. to get a single timestep) + subroutine read_var_r4(this, varindex, starts, sizes, values) + use io_netcdf_nf_interface + use, intrinsic :: ISO_C_BINDING + class(fesom_file_type), intent(in) :: this + integer, intent(in) :: varindex + integer, dimension(:) :: starts, sizes + real(4), intent(inout), target :: values(..) ! must be inout or the allocation might be screwed + ! EO parameters + real(4), pointer :: values_ptr(:) + + call assert(product(sizes) == product(shape(values)), __LINE__) + + call c_f_pointer(c_loc(values), values_ptr, [product(shape(values))]) + call assert_nc(nf_get_vara_x(this%ncid, this%vars(varindex)%ncid, starts, sizes, values_ptr), __LINE__) + end subroutine + + subroutine close_file(this) ! do not implicitly close the file (e.g. upon deallocation via destructor), as we might have a copy of this object with access to the same ncid class(fesom_file_type), intent(inout) :: this diff --git a/test/fortran/io_netcdf_file_module_tests.pf b/test/fortran/io_netcdf_file_module_tests.pf index ae2ff8638..22e2b9a28 100644 --- a/test/fortran/io_netcdf_file_module_tests.pf +++ b/test/fortran/io_netcdf_file_module_tests.pf @@ -101,4 +101,58 @@ contains end subroutine + @test + subroutine test_can_read_2d_variable_real4() + type(fesom_file_type) f + real(4), allocatable :: values(:) + + integer node_dimidx, time_dimidx + integer sss_varindex + call f%initialize() + node_dimidx = f%add_dim("nod2", 5) + time_dimidx = f%add_dim_unlimited("time") + + sss_varindex = f%add_var("sss", [node_dimidx,time_dimidx], "psu", "sea surface salinity") + call f%open_readmode("fixtures/io_netcdf/columnwise_2d_sss.nc") + + allocate(values(5)) + call f%read_var(sss_varindex, [1,1], [5,1], values) + ! check level 1 values + @assertEqual(1.001, values(1), tolerance=1.e-6) + @assertEqual(1.002, values(2), tolerance=1.e-6) + @assertEqual(1.003, values(3), tolerance=1.e-6) + @assertEqual(1.004, values(4), tolerance=1.e-6) + @assertEqual(1.005, values(5), tolerance=1.e-6) + + call f%close_file() + end subroutine + + + @test + subroutine test_can_read_2d_variable_real8() + type(fesom_file_type) f + real(8), allocatable :: values(:) + + integer node_dimidx, time_dimidx + integer sss_varindex + call f%initialize() + node_dimidx = f%add_dim("nod2", 5) + time_dimidx = f%add_dim_unlimited("time") + + sss_varindex = f%add_var("sss", [node_dimidx,time_dimidx], "psu", "sea surface salinity") + call f%open_readmode("fixtures/io_netcdf/columnwise_2d_sss.nc") + + allocate(values(5)) + call f%read_var(sss_varindex, [1,1], [5,1], values) + ! check level 1 values + @assertEqual(1.001, values(1), tolerance=1.e-6) + @assertEqual(1.002, values(2), tolerance=1.e-6) + @assertEqual(1.003, values(3), tolerance=1.e-6) + @assertEqual(1.004, values(4), tolerance=1.e-6) + @assertEqual(1.005, values(5), tolerance=1.e-6) + + call f%close_file() + end subroutine + + end module From dc934b5d93e02eec2d26c3675cadb9c756292168 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 15 Dec 2020 13:40:41 +0100 Subject: [PATCH 167/909] be able to add any number of variable attributes with any name --- src/io_netcdf_file_module.F90 | 40 ++++++++++++++++----- test/fortran/io_netcdf_file_module_tests.pf | 13 ++++--- 2 files changed, 41 insertions(+), 12 deletions(-) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index 6886430ff..1eaf9fc64 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -13,7 +13,7 @@ module io_netcdf_file_module integer mode integer ncid contains - procedure, public :: initialize, add_dim, add_dim_unlimited, add_var, open_readmode, close_file + procedure, public :: initialize, add_dim, add_dim_unlimited, add_var, add_var_att, open_readmode, close_file generic, public :: read_var => read_var_r4, read_var_r8 procedure, private :: read_var_r4, read_var_r8 end type @@ -30,12 +30,17 @@ module io_netcdf_file_module type var_type ! todo: use variable type from io_netcdf_module here character(:), allocatable :: name integer, allocatable :: dim_indices(:) - - character(:), allocatable :: units_txt - character(:), allocatable :: longname_txt + type(att_type), allocatable :: atts(:) integer ncid end type + + + type att_type + character(:), allocatable :: name + character(:), allocatable :: text + ! todo: make this work for other data types like int + end type contains @@ -82,12 +87,10 @@ function add_dim(this, name, len) result(dimindex) ! the sizes of the dims define the global shape of the var - function add_var(this, name, dim_indices, units_txt, longname) result(varindex) + function add_var(this, name, dim_indices) result(varindex) class(fesom_file_type), intent(inout) :: this character(len=*), intent(in) :: name integer, intent(in) :: dim_indices(:) - character(len=*), intent(in) :: units_txt - character(len=*), intent(in) :: longname integer varindex ! EO parameters type(var_type), allocatable :: tmparr(:) @@ -102,10 +105,31 @@ function add_var(this, name, dim_indices, units_txt, longname) result(varindex) end if varindex = size(this%vars) - this%vars(varindex) = var_type(name, dim_indices, units_txt, longname, ncid=-1) + this%vars(varindex) = var_type(name, dim_indices, ncid=-1) end function + subroutine add_var_att(this, varindex, att_name, att_text) + class(fesom_file_type), intent(inout) :: this + integer, intent(in) :: varindex + character(len=*), intent(in) :: att_name + character(len=*), intent(in) :: att_text + ! EO parameters + type(att_type), allocatable :: tmparr(:) + + if( .not. allocated(this%vars(varindex)%atts)) then + allocate(this%vars(varindex)%atts(1)) + else + allocate( tmparr(size(this%vars(varindex)%atts)+1) ) + tmparr(1:size(this%vars(varindex)%atts)) = this%vars(varindex)%atts + deallocate(this%vars(varindex)%atts) + call move_alloc(tmparr, this%vars(varindex)%atts) + end if + + this%vars(varindex)%atts( size(this%vars(varindex)%atts) ) = att_type(name=att_name, text=att_text) + end subroutine + + subroutine open_readmode(this, filepath) class(fesom_file_type), intent(inout) :: this character(len=*), intent(in) :: filepath diff --git a/test/fortran/io_netcdf_file_module_tests.pf b/test/fortran/io_netcdf_file_module_tests.pf index 22e2b9a28..ae0e15ecb 100644 --- a/test/fortran/io_netcdf_file_module_tests.pf +++ b/test/fortran/io_netcdf_file_module_tests.pf @@ -49,7 +49,9 @@ contains node_dimidx = f%add_dim("node", 47) @assertEqual(2, node_dimidx) - salt_varid = f%add_var("salt", [1,2], "psu", "salinity") + salt_varid = f%add_var("salt", [1,2]) + call f%add_var_att(salt_varid, "units", "psu") + call f%add_var_att(salt_varid, "long_name", "salinity") end subroutine @@ -94,7 +96,10 @@ contains time_dimidx = f%add_dim_unlimited("time") @assertEqual(3, time_dimidx) - salt_varid = f%add_var("salt", [nz_dimidx,node_dimidx,time_dimidx], "psu", "salinity") + salt_varid = f%add_var("salt", [nz_dimidx,node_dimidx,time_dimidx]) + call f%add_var_att(salt_varid, "units", "psu") + call f%add_var_att(salt_varid, "long_name", "salinity") + call f%open_readmode("fixtures/io_netcdf/columnwise_3d_salt.nc") call f%close_file() @@ -112,7 +117,7 @@ contains node_dimidx = f%add_dim("nod2", 5) time_dimidx = f%add_dim_unlimited("time") - sss_varindex = f%add_var("sss", [node_dimidx,time_dimidx], "psu", "sea surface salinity") + sss_varindex = f%add_var("sss", [node_dimidx,time_dimidx]) call f%open_readmode("fixtures/io_netcdf/columnwise_2d_sss.nc") allocate(values(5)) @@ -139,7 +144,7 @@ contains node_dimidx = f%add_dim("nod2", 5) time_dimidx = f%add_dim_unlimited("time") - sss_varindex = f%add_var("sss", [node_dimidx,time_dimidx], "psu", "sea surface salinity") + sss_varindex = f%add_var("sss", [node_dimidx,time_dimidx]) call f%open_readmode("fixtures/io_netcdf/columnwise_2d_sss.nc") allocate(values(5)) From 8fc25d4f19a7659be401d3106776ce8414898d12 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 15 Dec 2020 15:04:12 +0100 Subject: [PATCH 168/909] add unit tests to test reading of 3D data --- test/fortran/io_netcdf_file_module_tests.pf | 72 +++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/test/fortran/io_netcdf_file_module_tests.pf b/test/fortran/io_netcdf_file_module_tests.pf index ae0e15ecb..cfd712c7e 100644 --- a/test/fortran/io_netcdf_file_module_tests.pf +++ b/test/fortran/io_netcdf_file_module_tests.pf @@ -160,4 +160,76 @@ contains end subroutine + @test + subroutine test_can_read_3d_variable_real4() + type(fesom_file_type) f + real(4), allocatable :: values(:,:) + + integer node_dimidx, time_dimidx, z_dimidx + integer varindex + + call f%initialize() + node_dimidx = f%add_dim("nod2", 5) + z_dimidx = f%add_dim("nz1", 3) + time_dimidx = f%add_dim_unlimited("time") + + varindex = f%add_var("salt", [z_dimidx, node_dimidx,time_dimidx]) + call f%open_readmode("fixtures/io_netcdf/columnwise_3d_salt.nc") + + allocate(values(3,5)) + call f%read_var(varindex, [1,1,1], [3,5,1], values) + ! check level 1 values + @assertEqual(1.001, values(1,1), tolerance=1.e-6) + @assertEqual(1.002, values(1,2), tolerance=1.e-6) + @assertEqual(1.003, values(1,3), tolerance=1.e-6) + @assertEqual(1.004, values(1,4), tolerance=1.e-6) + @assertEqual(1.005, values(1,5), tolerance=1.e-6) + + ! check level 2 values + @assertEqual(2.001, values(2,1), tolerance=1.e-6) + @assertEqual(2.002, values(2,2), tolerance=1.e-6) + @assertEqual(2.003, values(2,3), tolerance=1.e-6) + @assertEqual(2.004, values(2,4), tolerance=1.e-6) + @assertEqual(2.005, values(2,5), tolerance=1.e-6) + + call f%close_file() + end subroutine + + + @test + subroutine test_can_read_3d_variable_real8() + type(fesom_file_type) f + real(8), allocatable :: values(:,:) + + integer node_dimidx, time_dimidx, z_dimidx + integer varindex + + call f%initialize() + node_dimidx = f%add_dim("nod2", 5) + z_dimidx = f%add_dim("nz1", 3) + time_dimidx = f%add_dim_unlimited("time") + + varindex = f%add_var("salt", [z_dimidx, node_dimidx,time_dimidx]) + call f%open_readmode("fixtures/io_netcdf/columnwise_3d_salt.nc") + + allocate(values(3,5)) + call f%read_var(varindex, [1,1,1], [3,5,1], values) + ! check level 1 values + @assertEqual(1.001, values(1,1), tolerance=1.e-6) + @assertEqual(1.002, values(1,2), tolerance=1.e-6) + @assertEqual(1.003, values(1,3), tolerance=1.e-6) + @assertEqual(1.004, values(1,4), tolerance=1.e-6) + @assertEqual(1.005, values(1,5), tolerance=1.e-6) + + ! check level 2 values + @assertEqual(2.001, values(2,1), tolerance=1.e-6) + @assertEqual(2.002, values(2,2), tolerance=1.e-6) + @assertEqual(2.003, values(2,3), tolerance=1.e-6) + @assertEqual(2.004, values(2,4), tolerance=1.e-6) + @assertEqual(2.005, values(2,5), tolerance=1.e-6) + + call f%close_file() + end subroutine + + end module From a42b71ddb3d55239e103403f71750293dd095cfc Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 15 Dec 2020 15:20:43 +0100 Subject: [PATCH 169/909] bail out if starts+sizes arrays do not match the dimension count when reading a variable --- src/io_netcdf_file_module.F90 | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index 1eaf9fc64..2902481e8 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -169,6 +169,7 @@ subroutine open_readmode(this, filepath) ! values array is not required to have the same shape as the variable but must fit the product of all items of the sizes array ! this way we can retrieve e.g. data from a 3D variable to a 2D array with one size set to 1 (e.g. to get a single timestep) + ! starts and sizes must have the same rank as the variable has dimensions subroutine read_var_r8(this, varindex, starts, sizes, values) use io_netcdf_nf_interface use, intrinsic :: ISO_C_BINDING @@ -179,6 +180,8 @@ subroutine read_var_r8(this, varindex, starts, sizes, values) ! EO parameters real(8), pointer :: values_ptr(:) + call assert(size(sizes) == size(starts), __LINE__) + call assert(size(starts) == size(this%dims), __LINE__) call assert(product(sizes) == product(shape(values)), __LINE__) call c_f_pointer(c_loc(values), values_ptr, [product(shape(values))]) @@ -186,8 +189,7 @@ subroutine read_var_r8(this, varindex, starts, sizes, values) end subroutine - ! values array is not required to have the same shape as the variable but must fit the product of all items of the sizes array - ! this way we can retrieve e.g. data from a 3D variable to a 2D array with one size set to 1 (e.g. to get a single timestep) + ! see read_var_r8 for usage comment subroutine read_var_r4(this, varindex, starts, sizes, values) use io_netcdf_nf_interface use, intrinsic :: ISO_C_BINDING @@ -198,6 +200,8 @@ subroutine read_var_r4(this, varindex, starts, sizes, values) ! EO parameters real(4), pointer :: values_ptr(:) + call assert(size(sizes) == size(starts), __LINE__) + call assert(size(starts) == size(this%dims), __LINE__) call assert(product(sizes) == product(shape(values)), __LINE__) call c_f_pointer(c_loc(values), values_ptr, [product(shape(values))]) From 1b4bbc8f87f18f2709e656becdd5b875e547a690 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 15 Dec 2020 18:18:58 +0100 Subject: [PATCH 170/909] do not store file access mode as modes for reading and creating a file seem to have overlapping bitmasks --- src/io_netcdf_file_module.F90 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index 2902481e8..49e02a52e 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -10,7 +10,6 @@ module io_netcdf_file_module type(var_type), allocatable :: vars(:) character(:), allocatable :: filepath - integer mode integer ncid contains procedure, public :: initialize, add_dim, add_dim_unlimited, add_var, add_var_att, open_readmode, close_file @@ -140,11 +139,12 @@ subroutine open_readmode(this, filepath) integer actual_dimcount integer, allocatable :: actual_dimids(:) integer exp_dimid, act_dimid + integer mode - this%mode = nf_nowrite + mode = nf_nowrite this%filepath = filepath - call assert_nc( nf_open(this%filepath, this%mode, this%ncid) , __LINE__) + call assert_nc( nf_open(this%filepath, mode, this%ncid) , __LINE__) ! attach our dims and vars to their counterparts in the file do i=1, size(this%dims) From 13fd237f074eba8b14ca4953aca3deb91b486745 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 15 Dec 2020 18:57:23 +0100 Subject: [PATCH 171/909] rename file_type#open_readmode procedure --- src/io_netcdf_file_module.F90 | 4 ++-- test/fortran/io_netcdf_file_module_tests.pf | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index 49e02a52e..7bac826b9 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -12,7 +12,7 @@ module io_netcdf_file_module character(:), allocatable :: filepath integer ncid contains - procedure, public :: initialize, add_dim, add_dim_unlimited, add_var, add_var_att, open_readmode, close_file + procedure, public :: initialize, add_dim, add_dim_unlimited, add_var, add_var_att, open_read, close_file generic, public :: read_var => read_var_r4, read_var_r8 procedure, private :: read_var_r4, read_var_r8 end type @@ -129,7 +129,7 @@ subroutine add_var_att(this, varindex, att_name, att_text) end subroutine - subroutine open_readmode(this, filepath) + subroutine open_read(this, filepath) class(fesom_file_type), intent(inout) :: this character(len=*), intent(in) :: filepath ! EO parameters diff --git a/test/fortran/io_netcdf_file_module_tests.pf b/test/fortran/io_netcdf_file_module_tests.pf index cfd712c7e..900e5335e 100644 --- a/test/fortran/io_netcdf_file_module_tests.pf +++ b/test/fortran/io_netcdf_file_module_tests.pf @@ -2,7 +2,7 @@ module io_netcdf_file_module_tests use io_netcdf_file_module use funit; implicit none -contains +contains @test @@ -63,7 +63,7 @@ contains call f%initialize() z_dimidx = f%add_dim("nz1", 3) - call f%open_readmode("fixtures/io_netcdf/columnwise_3d_salt.nc") + call f%open_read("fixtures/io_netcdf/columnwise_3d_salt.nc") call f%close_file() end subroutine @@ -77,7 +77,7 @@ contains call f%initialize() dimidx = f%add_dim_unlimited("time") - call f%open_readmode("fixtures/io_netcdf/columnwise_3d_salt.nc") + call f%open_read("fixtures/io_netcdf/columnwise_3d_salt.nc") call f%close_file() end subroutine @@ -100,7 +100,7 @@ contains call f%add_var_att(salt_varid, "units", "psu") call f%add_var_att(salt_varid, "long_name", "salinity") - call f%open_readmode("fixtures/io_netcdf/columnwise_3d_salt.nc") + call f%open_read("fixtures/io_netcdf/columnwise_3d_salt.nc") call f%close_file() end subroutine @@ -118,7 +118,7 @@ contains time_dimidx = f%add_dim_unlimited("time") sss_varindex = f%add_var("sss", [node_dimidx,time_dimidx]) - call f%open_readmode("fixtures/io_netcdf/columnwise_2d_sss.nc") + call f%open_read("fixtures/io_netcdf/columnwise_2d_sss.nc") allocate(values(5)) call f%read_var(sss_varindex, [1,1], [5,1], values) @@ -145,7 +145,7 @@ contains time_dimidx = f%add_dim_unlimited("time") sss_varindex = f%add_var("sss", [node_dimidx,time_dimidx]) - call f%open_readmode("fixtures/io_netcdf/columnwise_2d_sss.nc") + call f%open_read("fixtures/io_netcdf/columnwise_2d_sss.nc") allocate(values(5)) call f%read_var(sss_varindex, [1,1], [5,1], values) @@ -174,7 +174,7 @@ contains time_dimidx = f%add_dim_unlimited("time") varindex = f%add_var("salt", [z_dimidx, node_dimidx,time_dimidx]) - call f%open_readmode("fixtures/io_netcdf/columnwise_3d_salt.nc") + call f%open_read("fixtures/io_netcdf/columnwise_3d_salt.nc") allocate(values(3,5)) call f%read_var(varindex, [1,1,1], [3,5,1], values) @@ -210,7 +210,7 @@ contains time_dimidx = f%add_dim_unlimited("time") varindex = f%add_var("salt", [z_dimidx, node_dimidx,time_dimidx]) - call f%open_readmode("fixtures/io_netcdf/columnwise_3d_salt.nc") + call f%open_read("fixtures/io_netcdf/columnwise_3d_salt.nc") allocate(values(3,5)) call f%read_var(varindex, [1,1,1], [3,5,1], values) From 4e02af852eb8f214ccbd20b843a524ce38c4d059 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 16 Dec 2020 09:50:17 +0100 Subject: [PATCH 172/909] move attaching dimensions and variables to separate procedure --- src/io_netcdf_file_module.F90 | 57 +++++++++++++++++++++-------------- 1 file changed, 34 insertions(+), 23 deletions(-) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index 7bac826b9..af720f8dd 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -14,7 +14,7 @@ module io_netcdf_file_module contains procedure, public :: initialize, add_dim, add_dim_unlimited, add_var, add_var_att, open_read, close_file generic, public :: read_var => read_var_r4, read_var_r8 - procedure, private :: read_var_r4, read_var_r8 + procedure, private :: read_var_r4, read_var_r8, attach_dims_vars_to_file end type @@ -134,11 +134,6 @@ subroutine open_read(this, filepath) character(len=*), intent(in) :: filepath ! EO parameters include "netcdf.inc" - integer i, ii - integer actual_len - integer actual_dimcount - integer, allocatable :: actual_dimids(:) - integer exp_dimid, act_dimid integer mode mode = nf_nowrite @@ -147,23 +142,7 @@ subroutine open_read(this, filepath) call assert_nc( nf_open(this%filepath, mode, this%ncid) , __LINE__) ! attach our dims and vars to their counterparts in the file - do i=1, size(this%dims) - call assert_nc( nf_inq_dimid(this%ncid, this%dims(i)%name, this%dims(i)%ncid) , __LINE__) - call assert_nc( nf_inq_dimlen(this%ncid, this%dims(i)%ncid, actual_len) , __LINE__) - if(this%dims(i)%len .ne. nf_unlimited) call assert(this%dims(i)%len == actual_len, __LINE__) - end do - do i=1, size(this%vars) - call assert_nc( nf_inq_varid(this%ncid, this%vars(i)%name, this%vars(i)%ncid) , __LINE__) - ! see if this var has the expected dims - call assert_nc( nf_inq_varndims(this%ncid, this%vars(i)%ncid, actual_dimcount) , __LINE__) - call assert(size(this%vars(i)%dim_indices) == actual_dimcount, __LINE__) - allocate(actual_dimids(actual_dimcount)) - call assert_nc( nf_inq_vardimid(this%ncid, this%vars(i)%ncid, actual_dimids) , __LINE__) - do ii=1, actual_dimcount - exp_dimid = this%dims( this%vars(i)%dim_indices(ii) )%ncid - call assert(exp_dimid == actual_dimids(ii), __LINE__) - end do - end do + call this%attach_dims_vars_to_file() end subroutine @@ -218,6 +197,38 @@ subroutine close_file(this) end subroutine + ! connect our dims and vars to their counterparts in the NetCDF file, bail out if they do not match + ! ignore any additional dims and vars the file might contain + subroutine attach_dims_vars_to_file(this) + class(fesom_file_type), intent(inout) :: this + ! EO parameters + include "netcdf.inc" + integer i, ii + integer actual_len + integer actual_dimcount + integer, allocatable :: actual_dimids(:) + integer exp_dimid, act_dimid + + do i=1, size(this%dims) + call assert_nc( nf_inq_dimid(this%ncid, this%dims(i)%name, this%dims(i)%ncid) , __LINE__) + call assert_nc( nf_inq_dimlen(this%ncid, this%dims(i)%ncid, actual_len) , __LINE__) + if(this%dims(i)%len .ne. nf_unlimited) call assert(this%dims(i)%len == actual_len, __LINE__) + end do + do i=1, size(this%vars) + call assert_nc( nf_inq_varid(this%ncid, this%vars(i)%name, this%vars(i)%ncid) , __LINE__) + ! see if this var has the expected dims + call assert_nc( nf_inq_varndims(this%ncid, this%vars(i)%ncid, actual_dimcount) , __LINE__) + call assert(size(this%vars(i)%dim_indices) == actual_dimcount, __LINE__) + allocate(actual_dimids(actual_dimcount)) + call assert_nc( nf_inq_vardimid(this%ncid, this%vars(i)%ncid, actual_dimids) , __LINE__) + do ii=1, actual_dimcount + exp_dimid = this%dims( this%vars(i)%dim_indices(ii) )%ncid + call assert(exp_dimid == actual_dimids(ii), __LINE__) + end do + end do + end subroutine + + subroutine assert(val, line) logical, intent(in) :: val integer, intent(in) :: line From 506160865a3577e8b3a2bb8520a371235162ef70 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 16 Dec 2020 14:08:42 +0100 Subject: [PATCH 173/909] - be able to explicitly choose the NetCDF data type when creating a variable - check if the available data type matches the expected one when attaching to a NetCDF file --- src/io_netcdf_file_module.F90 | 40 ++++++++++++++++++--- test/fortran/io_netcdf_file_module_tests.pf | 12 +++---- 2 files changed, 42 insertions(+), 10 deletions(-) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index af720f8dd..9f4968fda 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -12,9 +12,9 @@ module io_netcdf_file_module character(:), allocatable :: filepath integer ncid contains - procedure, public :: initialize, add_dim, add_dim_unlimited, add_var, add_var_att, open_read, close_file + procedure, public :: initialize, add_dim, add_dim_unlimited, add_var_double, add_var_real, add_var_att, open_read, close_file generic, public :: read_var => read_var_r4, read_var_r8 - procedure, private :: read_var_r4, read_var_r8, attach_dims_vars_to_file + procedure, private :: read_var_r4, read_var_r8, attach_dims_vars_to_file, add_var_x end type @@ -29,6 +29,7 @@ module io_netcdf_file_module type var_type ! todo: use variable type from io_netcdf_module here character(:), allocatable :: name integer, allocatable :: dim_indices(:) + integer datatype type(att_type), allocatable :: atts(:) integer ncid @@ -86,12 +87,39 @@ function add_dim(this, name, len) result(dimindex) ! the sizes of the dims define the global shape of the var - function add_var(this, name, dim_indices) result(varindex) + function add_var_double(this, name, dim_indices) result(varindex) class(fesom_file_type), intent(inout) :: this character(len=*), intent(in) :: name integer, intent(in) :: dim_indices(:) integer varindex ! EO parameters + include "netcdf.inc" + + varindex = this%add_var_x(name, dim_indices, nf_double) + end function + + + ! the sizes of the dims define the global shape of the var + function add_var_real(this, name, dim_indices) result(varindex) + class(fesom_file_type), intent(inout) :: this + character(len=*), intent(in) :: name + integer, intent(in) :: dim_indices(:) + integer varindex + ! EO parameters + include "netcdf.inc" + + varindex = this%add_var_x(name, dim_indices, nf_real) + end function + + + function add_var_x(this, name, dim_indices, netcdf_datatype) result(varindex) + class(fesom_file_type), intent(inout) :: this + character(len=*), intent(in) :: name + integer, intent(in) :: dim_indices(:) + integer netcdf_datatype + integer varindex + ! EO parameters + include "netcdf.inc" type(var_type), allocatable :: tmparr(:) if( .not. allocated(this%vars)) then @@ -104,7 +132,7 @@ function add_var(this, name, dim_indices) result(varindex) end if varindex = size(this%vars) - this%vars(varindex) = var_type(name, dim_indices, ncid=-1) + this%vars(varindex) = var_type(name, dim_indices, netcdf_datatype, ncid=-1) end function @@ -208,6 +236,7 @@ subroutine attach_dims_vars_to_file(this) integer actual_dimcount integer, allocatable :: actual_dimids(:) integer exp_dimid, act_dimid + integer actual_datatype do i=1, size(this%dims) call assert_nc( nf_inq_dimid(this%ncid, this%dims(i)%name, this%dims(i)%ncid) , __LINE__) @@ -216,6 +245,9 @@ subroutine attach_dims_vars_to_file(this) end do do i=1, size(this%vars) call assert_nc( nf_inq_varid(this%ncid, this%vars(i)%name, this%vars(i)%ncid) , __LINE__) + ! see if this var has the expected datatype + call assert_nc( nf_inq_vartype(this%ncid, this%vars(i)%ncid, actual_datatype) , __LINE__) + call assert(this%vars(i)%datatype == actual_datatype, __LINE__) ! see if this var has the expected dims call assert_nc( nf_inq_varndims(this%ncid, this%vars(i)%ncid, actual_dimcount) , __LINE__) call assert(size(this%vars(i)%dim_indices) == actual_dimcount, __LINE__) diff --git a/test/fortran/io_netcdf_file_module_tests.pf b/test/fortran/io_netcdf_file_module_tests.pf index 900e5335e..addab05f9 100644 --- a/test/fortran/io_netcdf_file_module_tests.pf +++ b/test/fortran/io_netcdf_file_module_tests.pf @@ -49,7 +49,7 @@ contains node_dimidx = f%add_dim("node", 47) @assertEqual(2, node_dimidx) - salt_varid = f%add_var("salt", [1,2]) + salt_varid = f%add_var_real("salt", [1,2]) call f%add_var_att(salt_varid, "units", "psu") call f%add_var_att(salt_varid, "long_name", "salinity") end subroutine @@ -96,7 +96,7 @@ contains time_dimidx = f%add_dim_unlimited("time") @assertEqual(3, time_dimidx) - salt_varid = f%add_var("salt", [nz_dimidx,node_dimidx,time_dimidx]) + salt_varid = f%add_var_real("salt", [nz_dimidx,node_dimidx,time_dimidx]) call f%add_var_att(salt_varid, "units", "psu") call f%add_var_att(salt_varid, "long_name", "salinity") @@ -117,7 +117,7 @@ contains node_dimidx = f%add_dim("nod2", 5) time_dimidx = f%add_dim_unlimited("time") - sss_varindex = f%add_var("sss", [node_dimidx,time_dimidx]) + sss_varindex = f%add_var_real("sss", [node_dimidx,time_dimidx]) call f%open_read("fixtures/io_netcdf/columnwise_2d_sss.nc") allocate(values(5)) @@ -144,7 +144,7 @@ contains node_dimidx = f%add_dim("nod2", 5) time_dimidx = f%add_dim_unlimited("time") - sss_varindex = f%add_var("sss", [node_dimidx,time_dimidx]) + sss_varindex = f%add_var_real("sss", [node_dimidx,time_dimidx]) call f%open_read("fixtures/io_netcdf/columnwise_2d_sss.nc") allocate(values(5)) @@ -173,7 +173,7 @@ contains z_dimidx = f%add_dim("nz1", 3) time_dimidx = f%add_dim_unlimited("time") - varindex = f%add_var("salt", [z_dimidx, node_dimidx,time_dimidx]) + varindex = f%add_var_real("salt", [z_dimidx, node_dimidx,time_dimidx]) call f%open_read("fixtures/io_netcdf/columnwise_3d_salt.nc") allocate(values(3,5)) @@ -209,7 +209,7 @@ contains z_dimidx = f%add_dim("nz1", 3) time_dimidx = f%add_dim_unlimited("time") - varindex = f%add_var("salt", [z_dimidx, node_dimidx,time_dimidx]) + varindex = f%add_var_real("salt", [z_dimidx, node_dimidx,time_dimidx]) call f%open_read("fixtures/io_netcdf/columnwise_3d_salt.nc") allocate(values(3,5)) From c727331705ca1be78c0e5caa08a46e59dc12f179 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 16 Dec 2020 15:08:29 +0100 Subject: [PATCH 174/909] - allocate dims and vars arrays upon initialization - change unit test to test if we can use empty dims and vars --- src/io_netcdf_file_module.F90 | 12 ++++-------- test/fortran/io_netcdf_file_module_tests.pf | 6 +----- 2 files changed, 5 insertions(+), 13 deletions(-) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index 9f4968fda..f4fa20ff4 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -50,6 +50,8 @@ subroutine initialize(this) class(fesom_file_type), intent(inout) :: this this%filepath = "" + allocate(this%dims(0)) + allocate(this%vars(0)) end subroutine @@ -72,14 +74,11 @@ function add_dim(this, name, len) result(dimindex) ! EO parameters type(dim_type), allocatable :: tmparr(:) - if( .not. allocated(this%dims)) then - allocate(this%dims(1)) - else + ! assume the dims array is allocated allocate( tmparr(size(this%dims)+1) ) tmparr(1:size(this%dims)) = this%dims deallocate(this%dims) call move_alloc(tmparr, this%dims) - end if dimindex = size(this%dims) this%dims(dimindex) = dim_type(name=name, len=len, ncid=-1) @@ -122,14 +121,11 @@ function add_var_x(this, name, dim_indices, netcdf_datatype) result(varindex) include "netcdf.inc" type(var_type), allocatable :: tmparr(:) - if( .not. allocated(this%vars)) then - allocate(this%vars(1)) - else + ! assume the vars array is allocated allocate( tmparr(size(this%vars)+1) ) tmparr(1:size(this%vars)) = this%vars deallocate(this%vars) call move_alloc(tmparr, this%vars) - end if varindex = size(this%vars) this%vars(varindex) = var_type(name, dim_indices, netcdf_datatype, ncid=-1) diff --git a/test/fortran/io_netcdf_file_module_tests.pf b/test/fortran/io_netcdf_file_module_tests.pf index addab05f9..9d1b13603 100644 --- a/test/fortran/io_netcdf_file_module_tests.pf +++ b/test/fortran/io_netcdf_file_module_tests.pf @@ -56,15 +56,11 @@ contains @test - subroutine test_can_open_file_in_readmode() + subroutine test_can_open_file_in_readmode_without_expecting_dims_and_vars() type(fesom_file_type) f - integer z_dimidx call f%initialize() - z_dimidx = f%add_dim("nz1", 3) - call f%open_read("fixtures/io_netcdf/columnwise_3d_salt.nc") - call f%close_file() end subroutine From 845d2743a0edfa3ccfc8549dbc98368ec28886d0 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 16 Dec 2020 15:20:28 +0100 Subject: [PATCH 175/909] - be able to create a NetCDF file with predefined dims and vars - add unit tests for this --- src/io_netcdf_file_module.F90 | 53 +++++++++++++++++---- test/fortran/io_netcdf_file_module_tests.pf | 43 +++++++++++++++++ 2 files changed, 87 insertions(+), 9 deletions(-) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index f4fa20ff4..2060e29e1 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -12,7 +12,7 @@ module io_netcdf_file_module character(:), allocatable :: filepath integer ncid contains - procedure, public :: initialize, add_dim, add_dim_unlimited, add_var_double, add_var_real, add_var_att, open_read, close_file + procedure, public :: initialize, add_dim, add_dim_unlimited, add_var_double, add_var_real, add_var_att, open_read, close_file, open_create generic, public :: read_var => read_var_r4, read_var_r8 procedure, private :: read_var_r4, read_var_r8, attach_dims_vars_to_file, add_var_x end type @@ -75,10 +75,10 @@ function add_dim(this, name, len) result(dimindex) type(dim_type), allocatable :: tmparr(:) ! assume the dims array is allocated - allocate( tmparr(size(this%dims)+1) ) - tmparr(1:size(this%dims)) = this%dims - deallocate(this%dims) - call move_alloc(tmparr, this%dims) + allocate( tmparr(size(this%dims)+1) ) + tmparr(1:size(this%dims)) = this%dims + deallocate(this%dims) + call move_alloc(tmparr, this%dims) dimindex = size(this%dims) this%dims(dimindex) = dim_type(name=name, len=len, ncid=-1) @@ -122,10 +122,10 @@ function add_var_x(this, name, dim_indices, netcdf_datatype) result(varindex) type(var_type), allocatable :: tmparr(:) ! assume the vars array is allocated - allocate( tmparr(size(this%vars)+1) ) - tmparr(1:size(this%vars)) = this%vars - deallocate(this%vars) - call move_alloc(tmparr, this%vars) + allocate( tmparr(size(this%vars)+1) ) + tmparr(1:size(this%vars)) = this%vars + deallocate(this%vars) + call move_alloc(tmparr, this%vars) varindex = size(this%vars) this%vars(varindex) = var_type(name, dim_indices, netcdf_datatype, ncid=-1) @@ -212,6 +212,41 @@ subroutine read_var_r4(this, varindex, starts, sizes, values) end subroutine + subroutine open_create(this, filepath) + class(fesom_file_type), intent(inout) :: this + character(len=*), intent(in) :: filepath + ! EO parameters + include "netcdf.inc" + integer cmode + integer i, ii + integer var_ndims + integer, allocatable :: var_dimids(:) + + this%filepath = filepath + + cmode = ior(nf_noclobber, ior(nf_netcdf4, nf_classic_model)) + call assert_nc( nf_create(filepath, cmode, this%ncid) , __LINE__) + + ! create our dims in the file + do i=1, size(this%dims) + call assert_nc( nf_def_dim(this%ncid, this%dims(i)%name, this%dims(i)%len, this%dims(i)%ncid) , __LINE__) + end do + + ! create our vars in the file + do i=1, size(this%vars) + var_ndims = size(this%vars(i)%dim_indices) + if(allocated(var_dimids)) deallocate(var_dimids) + allocate(var_dimids(var_ndims)) + do ii=1, var_ndims + var_dimids(ii) = this%dims( this%vars(i)%dim_indices(ii) )%ncid + end do + call assert_nc( nf_def_var(this%ncid, this%vars(i)%name, this%vars(i)%datatype, var_ndims, var_dimids, this%dims(i)%ncid) , __LINE__) + end do + + call assert_nc( nf_enddef(this%ncid), __LINE__ ) + end subroutine + + subroutine close_file(this) ! do not implicitly close the file (e.g. upon deallocation via destructor), as we might have a copy of this object with access to the same ncid class(fesom_file_type), intent(inout) :: this diff --git a/test/fortran/io_netcdf_file_module_tests.pf b/test/fortran/io_netcdf_file_module_tests.pf index 9d1b13603..0a8e5164c 100644 --- a/test/fortran/io_netcdf_file_module_tests.pf +++ b/test/fortran/io_netcdf_file_module_tests.pf @@ -2,6 +2,8 @@ module io_netcdf_file_module_tests use io_netcdf_file_module use funit; implicit none + character(len=*), parameter :: TMPPATHPREFIX = "./io_netcdf_file_module_tests_DAEA1C34_F042_4243_AA88_273E4AA9D4A6__" + contains @@ -228,4 +230,45 @@ contains end subroutine + @test + subroutine test_can_create_empty_file() + type(fesom_file_type) f + integer exitstat + character(len=*), parameter :: filepath = TMPPATHPREFIX//"test_can_create_empty_file.nc" + + call execute_command_line("rm -f "//filepath) ! silently remove the file if it exists from an aborted previous run + + call f%initialize() + call f%open_create(filepath) + call f%close_file() + ! todo: actually test if the file has bee written correctly + + call execute_command_line("rm "//filepath, exitstat=exitstat) + if(exitstat .ne. 0) stop 1 + end subroutine + + + @test + subroutine test_can_create_file_with_dims_and_vars() + type(fesom_file_type) f + integer z_dimidx, time_dimidx + integer varindex + integer exitstat + character(len=*), parameter :: filepath = TMPPATHPREFIX//"test_can_create_file_with_dims_and_vars.nc" + + call execute_command_line("rm -f "//filepath, exitstat=exitstat) ! silently remove the file if it exists from an aborted previous run + + call f%initialize() + z_dimidx = f%add_dim("nz1", 3) + time_dimidx = f%add_dim_unlimited("time") + varindex = f%add_var_real("salt", [z_dimidx, time_dimidx]) + + call f%open_create(filepath) + call f%close_file() + ! todo: actually test if the file has bee written correctly + + call execute_command_line("rm "//filepath, exitstat=exitstat) + if(exitstat .ne. 0) stop 1 + end subroutine + end module From a1e84fadd630a1b19ca7ab0df24a15f5462aaa43 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 16 Dec 2020 18:50:22 +0100 Subject: [PATCH 176/909] - be able to open an existing file in write mode - be able to append variable data to a file - add corresponding unit tests --- src/io_netcdf_file_module.F90 | 61 +++++++++++++++- src/io_netcdf_nf_interface.F90 | 27 ++++++- test/fortran/io_netcdf_file_module_tests.pf | 80 +++++++++++++++++++++ 3 files changed, 164 insertions(+), 4 deletions(-) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index 2060e29e1..6a7f26f51 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -12,9 +12,10 @@ module io_netcdf_file_module character(:), allocatable :: filepath integer ncid contains - procedure, public :: initialize, add_dim, add_dim_unlimited, add_var_double, add_var_real, add_var_att, open_read, close_file, open_create + procedure, public :: initialize, add_dim, add_dim_unlimited, add_var_double, add_var_real, add_var_att, open_read, close_file, open_create, open_write_append generic, public :: read_var => read_var_r4, read_var_r8 - procedure, private :: read_var_r4, read_var_r8, attach_dims_vars_to_file, add_var_x + generic, public :: write_var => write_var_r4, write_var_r8 + procedure, private :: read_var_r4, read_var_r8, attach_dims_vars_to_file, add_var_x, write_var_r4, write_var_r8 end type @@ -247,6 +248,62 @@ subroutine open_create(this, filepath) end subroutine + ! open an existing file and prepare to write data to it + subroutine open_write_append(this, filepath) + class(fesom_file_type), intent(inout) :: this + character(len=*), intent(in) :: filepath + ! EO parameters + include "netcdf.inc" + integer cmode + + this%filepath = filepath + + cmode = nf_write + call assert_nc( nf_open(filepath, cmode, this%ncid) , __LINE__) + + ! make sure that all our dims and vars exist in this file and get hold of them + call this%attach_dims_vars_to_file() + end subroutine + + + subroutine write_var_r8(this, varindex, starts, sizes, values) + use io_netcdf_nf_interface + use, intrinsic :: ISO_C_BINDING + class(fesom_file_type), intent(in) :: this + integer, intent(in) :: varindex + integer, dimension(:) :: starts, sizes + real(8), intent(in), target :: values(..) ! must be inout or the allocation might be screwed + ! EO parameters + real(8), pointer :: values_ptr(:) + + call assert(size(sizes) == size(starts), __LINE__) + call assert(size(starts) == size(this%dims), __LINE__) + call assert(product(sizes) == product(shape(values)), __LINE__) + + call c_f_pointer(c_loc(values), values_ptr, [product(shape(values))]) + call assert_nc(nf_put_vara_x(this%ncid, this%vars(varindex)%ncid, starts, sizes, values_ptr), __LINE__) + end subroutine + + + subroutine write_var_r4(this, varindex, starts, sizes, values) + use io_netcdf_nf_interface + use, intrinsic :: ISO_C_BINDING + class(fesom_file_type), intent(in) :: this + integer, intent(in) :: varindex + integer, dimension(:) :: starts, sizes + real(4), intent(in), target :: values(..) ! must be inout or the allocation might be screwed + ! EO parameters + real(4), pointer :: values_ptr(:) + + call assert(size(sizes) == size(starts), __LINE__) + call assert(size(starts) == size(this%dims), __LINE__) + call assert(product(sizes) == product(shape(values)), __LINE__) + + call c_f_pointer(c_loc(values), values_ptr, [product(shape(values))]) + call assert_nc(nf_put_vara_x(this%ncid, this%vars(varindex)%ncid, starts, sizes, values_ptr), __LINE__) + end subroutine + + subroutine close_file(this) ! do not implicitly close the file (e.g. upon deallocation via destructor), as we might have a copy of this object with access to the same ncid class(fesom_file_type), intent(inout) :: this diff --git a/src/io_netcdf_nf_interface.F90 b/src/io_netcdf_nf_interface.F90 index af791a8df..2c65de35d 100644 --- a/src/io_netcdf_nf_interface.F90 +++ b/src/io_netcdf_nf_interface.F90 @@ -2,14 +2,14 @@ module io_netcdf_nf_interface implicit none interface - function nf_get_vara_double(ncid, varid, start, counts, dvals) RESULT(status) + function nf_get_vara_double(ncid, varid, start, counts, dvals) result(status) integer, intent(in) :: ncid, varid integer, intent(in) :: start(*), counts(*) real(8), intent(out) :: dvals(*) integer status end function - function nf_get_vara_real(ncid, varid, start, counts, dvals) RESULT(status) + function nf_get_vara_real(ncid, varid, start, counts, dvals) result(status) integer, intent(in) :: ncid, varid integer, intent(in) :: start(*), counts(*) real(4), intent(out) :: dvals(*) @@ -21,4 +21,27 @@ function nf_get_vara_real(ncid, varid, start, counts, dvals) RESULT(status) interface nf_get_vara_x procedure nf_get_vara_real, nf_get_vara_double end interface + + + interface + function nf_put_vara_double(ncid, varid, start, counts, dvals) result(status) + integer, intent(in) :: ncid, varid + integer, intent(in) :: start(*), counts(*) + real(8), intent(in) :: dvals(*) + integer status + end function + + function nf_put_vara_real(ncid, varid, start, counts, dvals) result(status) + integer, intent(in) :: ncid, varid + integer, intent(in) :: start(*), counts(*) + real(4), intent(in) :: dvals(*) + integer status + end function + end interface + + + interface nf_put_vara_x + procedure nf_put_vara_real, nf_put_vara_double + end interface + end module diff --git a/test/fortran/io_netcdf_file_module_tests.pf b/test/fortran/io_netcdf_file_module_tests.pf index 0a8e5164c..04f60ab0e 100644 --- a/test/fortran/io_netcdf_file_module_tests.pf +++ b/test/fortran/io_netcdf_file_module_tests.pf @@ -271,4 +271,84 @@ contains if(exitstat .ne. 0) stop 1 end subroutine + + @test + subroutine test_can_append_to_existing_file_real4() + type(fesom_file_type) f + real(4), allocatable :: values(:) + + integer node_dimidx, time_dimidx + integer varindex + integer exitstat + character(len=*), parameter :: filepath = TMPPATHPREFIX//"test_can_append_to_existing_file_real4.nc" + + call f%initialize() + node_dimidx = f%add_dim("nod2", 5) + time_dimidx = f%add_dim_unlimited("time") + + call execute_command_line("rm -f "//filepath, exitstat=exitstat) ! silently remove the file if it exists from an aborted previous run + + call execute_command_line("cp fixtures/io_netcdf/columnwise_2d_sss.nc "//filepath, exitstat=exitstat) + if(exitstat .ne. 0) stop 1 + + varindex = f%add_var_real("sss", [node_dimidx,time_dimidx]) + call f%open_write_append(filepath) + + allocate(values(5)) + values(1) = 100.001 + values(2) = 100.002 + values(3) = 100.003 + values(4) = 100.004 + values(5) = 100.005 + + ! the file has 2 timesteps, we append a 3rd one + call f%write_var(varindex, [1,3], [5,1], values) + + call f%close_file() + ! todo: actually test if the file has bee written correctly + + call execute_command_line("rm "//filepath, exitstat=exitstat) + if(exitstat .ne. 0) stop 1 + end subroutine + + + @test + subroutine test_can_append_to_existing_file_real8() + type(fesom_file_type) f + real(8), allocatable :: values(:) + + integer node_dimidx, time_dimidx + integer varindex + integer exitstat + character(len=*), parameter :: filepath = TMPPATHPREFIX//"test_can_append_to_existing_file_real8.nc" + + call f%initialize() + node_dimidx = f%add_dim("nod2", 5) + time_dimidx = f%add_dim_unlimited("time") + + call execute_command_line("rm -f "//filepath, exitstat=exitstat) ! silently remove the file if it exists from an aborted previous run + + call execute_command_line("cp fixtures/io_netcdf/columnwise_2d_sss.nc "//filepath, exitstat=exitstat) + if(exitstat .ne. 0) stop 1 + + varindex = f%add_var_real("sss", [node_dimidx,time_dimidx]) + call f%open_write_append(filepath) + + allocate(values(5)) + values(1) = 100.001_8 + values(2) = 100.002_8 + values(3) = 100.003_8 + values(4) = 100.004_8 + values(5) = 100.005_8 + + ! the file has 2 timesteps, we append a 3rd one + call f%write_var(varindex, [1,3], [5,1], values) + + call f%close_file() + ! todo: actually test if the file has bee written correctly + + call execute_command_line("rm "//filepath, exitstat=exitstat) + if(exitstat .ne. 0) stop 1 + end subroutine + end module From 8a058e75ae42ce0b7c3cbd5afb4b37d90af44c1c Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 16 Dec 2020 18:54:01 +0100 Subject: [PATCH 177/909] rename procedure for creating a new NetCDF file --- src/io_netcdf_file_module.F90 | 4 ++-- test/fortran/io_netcdf_file_module_tests.pf | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index 6a7f26f51..ced6300da 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -12,7 +12,7 @@ module io_netcdf_file_module character(:), allocatable :: filepath integer ncid contains - procedure, public :: initialize, add_dim, add_dim_unlimited, add_var_double, add_var_real, add_var_att, open_read, close_file, open_create, open_write_append + procedure, public :: initialize, add_dim, add_dim_unlimited, add_var_double, add_var_real, add_var_att, open_read, close_file, open_write_create, open_write_append generic, public :: read_var => read_var_r4, read_var_r8 generic, public :: write_var => write_var_r4, write_var_r8 procedure, private :: read_var_r4, read_var_r8, attach_dims_vars_to_file, add_var_x, write_var_r4, write_var_r8 @@ -213,7 +213,7 @@ subroutine read_var_r4(this, varindex, starts, sizes, values) end subroutine - subroutine open_create(this, filepath) + subroutine open_write_create(this, filepath) class(fesom_file_type), intent(inout) :: this character(len=*), intent(in) :: filepath ! EO parameters diff --git a/test/fortran/io_netcdf_file_module_tests.pf b/test/fortran/io_netcdf_file_module_tests.pf index 04f60ab0e..7c07df2ce 100644 --- a/test/fortran/io_netcdf_file_module_tests.pf +++ b/test/fortran/io_netcdf_file_module_tests.pf @@ -239,7 +239,7 @@ contains call execute_command_line("rm -f "//filepath) ! silently remove the file if it exists from an aborted previous run call f%initialize() - call f%open_create(filepath) + call f%open_write_create(filepath) call f%close_file() ! todo: actually test if the file has bee written correctly @@ -263,7 +263,7 @@ contains time_dimidx = f%add_dim_unlimited("time") varindex = f%add_var_real("salt", [z_dimidx, time_dimidx]) - call f%open_create(filepath) + call f%open_write_create(filepath) call f%close_file() ! todo: actually test if the file has bee written correctly From 0b4952242403ce9d9ae3c57bcef2dd51123f37c2 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 17 Dec 2020 12:51:42 +0100 Subject: [PATCH 178/909] allocate variable attributes array when creating the variable instance --- src/io_netcdf_file_module.F90 | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index ced6300da..49d0be2e6 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -121,6 +121,7 @@ function add_var_x(this, name, dim_indices, netcdf_datatype) result(varindex) ! EO parameters include "netcdf.inc" type(var_type), allocatable :: tmparr(:) + type(att_type) empty_atts(0) ! assume the vars array is allocated allocate( tmparr(size(this%vars)+1) ) @@ -129,7 +130,7 @@ function add_var_x(this, name, dim_indices, netcdf_datatype) result(varindex) call move_alloc(tmparr, this%vars) varindex = size(this%vars) - this%vars(varindex) = var_type(name, dim_indices, netcdf_datatype, ncid=-1) + this%vars(varindex) = var_type(name=name, dim_indices=dim_indices, datatype=netcdf_datatype, atts=empty_atts, ncid=-1) end function @@ -141,14 +142,10 @@ subroutine add_var_att(this, varindex, att_name, att_text) ! EO parameters type(att_type), allocatable :: tmparr(:) - if( .not. allocated(this%vars(varindex)%atts)) then - allocate(this%vars(varindex)%atts(1)) - else - allocate( tmparr(size(this%vars(varindex)%atts)+1) ) - tmparr(1:size(this%vars(varindex)%atts)) = this%vars(varindex)%atts - deallocate(this%vars(varindex)%atts) - call move_alloc(tmparr, this%vars(varindex)%atts) - end if + allocate( tmparr(size(this%vars(varindex)%atts)+1) ) + tmparr(1:size(this%vars(varindex)%atts)) = this%vars(varindex)%atts + deallocate(this%vars(varindex)%atts) + call move_alloc(tmparr, this%vars(varindex)%atts) this%vars(varindex)%atts( size(this%vars(varindex)%atts) ) = att_type(name=att_name, text=att_text) end subroutine From a2767f18598840bbe0d925d418a8bcfc90c24f28 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 17 Dec 2020 13:25:47 +0100 Subject: [PATCH 179/909] fix storing the variable id when creating a NetCDF variable --- src/io_netcdf_file_module.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index 49d0be2e6..fe1018df5 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -238,7 +238,7 @@ subroutine open_write_create(this, filepath) do ii=1, var_ndims var_dimids(ii) = this%dims( this%vars(i)%dim_indices(ii) )%ncid end do - call assert_nc( nf_def_var(this%ncid, this%vars(i)%name, this%vars(i)%datatype, var_ndims, var_dimids, this%dims(i)%ncid) , __LINE__) + call assert_nc( nf_def_var(this%ncid, this%vars(i)%name, this%vars(i)%datatype, var_ndims, var_dimids, this%vars(i)%ncid) , __LINE__) end do call assert_nc( nf_enddef(this%ncid), __LINE__ ) From cfa24879287beedbca7a7a9cf596b745808b7a51 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 17 Dec 2020 13:44:31 +0100 Subject: [PATCH 180/909] - be able to write text attributes to a NetCDF variable - add corresponding unit test --- src/io_netcdf_file_module.F90 | 10 +++++++- test/fortran/io_netcdf_file_module_tests.pf | 26 +++++++++++++++++++++ 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index fe1018df5..5e0a7189d 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -211,7 +211,7 @@ subroutine read_var_r4(this, varindex, starts, sizes, values) subroutine open_write_create(this, filepath) - class(fesom_file_type), intent(inout) :: this + class(fesom_file_type), target, intent(inout) :: this character(len=*), intent(in) :: filepath ! EO parameters include "netcdf.inc" @@ -219,6 +219,8 @@ subroutine open_write_create(this, filepath) integer i, ii integer var_ndims integer, allocatable :: var_dimids(:) + character(:), pointer :: att_name + character(:), pointer :: att_text this%filepath = filepath @@ -239,6 +241,12 @@ subroutine open_write_create(this, filepath) var_dimids(ii) = this%dims( this%vars(i)%dim_indices(ii) )%ncid end do call assert_nc( nf_def_var(this%ncid, this%vars(i)%name, this%vars(i)%datatype, var_ndims, var_dimids, this%vars(i)%ncid) , __LINE__) + + do ii=1, size(this%vars(i)%atts) + att_name => this%vars(i)%atts(ii)%name + att_text => this%vars(i)%atts(ii)%text + call assert_nc( nf_put_att_text(this%ncid, this%vars(i)%ncid, att_name, len(att_text), att_text) , __LINE__) + end do end do call assert_nc( nf_enddef(this%ncid), __LINE__ ) diff --git a/test/fortran/io_netcdf_file_module_tests.pf b/test/fortran/io_netcdf_file_module_tests.pf index 7c07df2ce..32805c83a 100644 --- a/test/fortran/io_netcdf_file_module_tests.pf +++ b/test/fortran/io_netcdf_file_module_tests.pf @@ -272,6 +272,32 @@ contains end subroutine + @test + subroutine test_can_create_file_and_var_text_attributes() + type(fesom_file_type) f + integer z_dimidx, time_dimidx + integer varindex + integer exitstat + character(len=*), parameter :: filepath = TMPPATHPREFIX//"test_can_create_file_and_var_attributes.nc" + + call execute_command_line("rm -f "//filepath, exitstat=exitstat) ! silently remove the file if it exists from an aborted previous run + + call f%initialize() + z_dimidx = f%add_dim("nz1", 3) + time_dimidx = f%add_dim_unlimited("time") + varindex = f%add_var_real("salt", [z_dimidx, time_dimidx]) + call f%add_var_att(varindex, "units", "psu") + call f%add_var_att(varindex, "long_name", "sea surface salinity") + + call f%open_write_create(filepath) + call f%close_file() + ! todo: actually test if the file has bee written correctly + + call execute_command_line("rm "//filepath, exitstat=exitstat) + if(exitstat .ne. 0) stop 1 + end subroutine + + @test subroutine test_can_append_to_existing_file_real4() type(fesom_file_type) f From 3a82be88334b1769848ff91419a9d120d389575c Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 18 Dec 2020 09:18:21 +0100 Subject: [PATCH 181/909] move attribute type to separate module to reduce upcoming clutter due to its planned type extensions --- src/io_netcdf_attribute_module.F90 | 17 +++++++++++++++++ src/io_netcdf_file_module.F90 | 8 +------- test/fortran/CMakeLists.txt | 1 + 3 files changed, 19 insertions(+), 7 deletions(-) create mode 100644 src/io_netcdf_attribute_module.F90 diff --git a/src/io_netcdf_attribute_module.F90 b/src/io_netcdf_attribute_module.F90 new file mode 100644 index 000000000..86102d6d9 --- /dev/null +++ b/src/io_netcdf_attribute_module.F90 @@ -0,0 +1,17 @@ +module io_netcdf_attribute_module + implicit none + public att_type + private + + + type att_type + character(:), allocatable :: name + character(:), allocatable :: text + ! todo: make this work for other data types like int + end type + + +contains + + +end module diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index 5e0a7189d..a0a75d393 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -1,4 +1,5 @@ module io_netcdf_file_module + use io_netcdf_attribute_module implicit none public fesom_file_type private @@ -37,13 +38,6 @@ module io_netcdf_file_module end type - type att_type - character(:), allocatable :: name - character(:), allocatable :: text - ! todo: make this work for other data types like int - end type - - contains diff --git a/test/fortran/CMakeLists.txt b/test/fortran/CMakeLists.txt index 09eea721c..475c8cd87 100644 --- a/test/fortran/CMakeLists.txt +++ b/test/fortran/CMakeLists.txt @@ -14,6 +14,7 @@ add_library(${LIB_TARGET} ${CMAKE_CURRENT_LIST_DIR}/../../src/forcing_provider_a ${CMAKE_CURRENT_LIST_DIR}/../../src/forcing_lookahead_reader_module.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_module.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_nf_interface.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_file_module.F90 + ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_attribute_module.F90 ) add_subdirectory(../../src/async_threads_cpp ${PROJECT_BINARY_DIR}/async_threads_cpp) From 4f509947a9c021eb13fc9d6778d6de0752b60153 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 18 Dec 2020 09:28:54 +0100 Subject: [PATCH 182/909] give the NetCDF file type a more general name --- src/io_netcdf_file_module.F90 | 36 ++++++++++----------- test/fortran/io_netcdf_file_module_tests.pf | 32 +++++++++--------- 2 files changed, 34 insertions(+), 34 deletions(-) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index a0a75d393..0cb8fa8cc 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -1,11 +1,11 @@ module io_netcdf_file_module use io_netcdf_attribute_module implicit none - public fesom_file_type + public netcdf_file_type private - type fesom_file_type + type netcdf_file_type private type(dim_type), allocatable :: dims(:) type(var_type), allocatable :: vars(:) @@ -42,7 +42,7 @@ module io_netcdf_file_module subroutine initialize(this) - class(fesom_file_type), intent(inout) :: this + class(netcdf_file_type), intent(inout) :: this this%filepath = "" allocate(this%dims(0)) @@ -51,7 +51,7 @@ subroutine initialize(this) function add_dim_unlimited(this, name) result(dimindex) - class(fesom_file_type), intent(inout) :: this + class(netcdf_file_type), intent(inout) :: this character(len=*), intent(in) :: name integer dimindex ! EO parameters @@ -62,7 +62,7 @@ function add_dim_unlimited(this, name) result(dimindex) function add_dim(this, name, len) result(dimindex) - class(fesom_file_type), intent(inout) :: this + class(netcdf_file_type), intent(inout) :: this character(len=*), intent(in) :: name integer, intent(in) :: len integer dimindex @@ -82,7 +82,7 @@ function add_dim(this, name, len) result(dimindex) ! the sizes of the dims define the global shape of the var function add_var_double(this, name, dim_indices) result(varindex) - class(fesom_file_type), intent(inout) :: this + class(netcdf_file_type), intent(inout) :: this character(len=*), intent(in) :: name integer, intent(in) :: dim_indices(:) integer varindex @@ -95,7 +95,7 @@ function add_var_double(this, name, dim_indices) result(varindex) ! the sizes of the dims define the global shape of the var function add_var_real(this, name, dim_indices) result(varindex) - class(fesom_file_type), intent(inout) :: this + class(netcdf_file_type), intent(inout) :: this character(len=*), intent(in) :: name integer, intent(in) :: dim_indices(:) integer varindex @@ -107,7 +107,7 @@ function add_var_real(this, name, dim_indices) result(varindex) function add_var_x(this, name, dim_indices, netcdf_datatype) result(varindex) - class(fesom_file_type), intent(inout) :: this + class(netcdf_file_type), intent(inout) :: this character(len=*), intent(in) :: name integer, intent(in) :: dim_indices(:) integer netcdf_datatype @@ -129,7 +129,7 @@ function add_var_x(this, name, dim_indices, netcdf_datatype) result(varindex) subroutine add_var_att(this, varindex, att_name, att_text) - class(fesom_file_type), intent(inout) :: this + class(netcdf_file_type), intent(inout) :: this integer, intent(in) :: varindex character(len=*), intent(in) :: att_name character(len=*), intent(in) :: att_text @@ -146,7 +146,7 @@ subroutine add_var_att(this, varindex, att_name, att_text) subroutine open_read(this, filepath) - class(fesom_file_type), intent(inout) :: this + class(netcdf_file_type), intent(inout) :: this character(len=*), intent(in) :: filepath ! EO parameters include "netcdf.inc" @@ -168,7 +168,7 @@ subroutine open_read(this, filepath) subroutine read_var_r8(this, varindex, starts, sizes, values) use io_netcdf_nf_interface use, intrinsic :: ISO_C_BINDING - class(fesom_file_type), intent(in) :: this + class(netcdf_file_type), intent(in) :: this integer, intent(in) :: varindex integer, dimension(:) :: starts, sizes real(8), intent(inout), target :: values(..) ! must be inout or the allocation might be screwed @@ -188,7 +188,7 @@ subroutine read_var_r8(this, varindex, starts, sizes, values) subroutine read_var_r4(this, varindex, starts, sizes, values) use io_netcdf_nf_interface use, intrinsic :: ISO_C_BINDING - class(fesom_file_type), intent(in) :: this + class(netcdf_file_type), intent(in) :: this integer, intent(in) :: varindex integer, dimension(:) :: starts, sizes real(4), intent(inout), target :: values(..) ! must be inout or the allocation might be screwed @@ -205,7 +205,7 @@ subroutine read_var_r4(this, varindex, starts, sizes, values) subroutine open_write_create(this, filepath) - class(fesom_file_type), target, intent(inout) :: this + class(netcdf_file_type), target, intent(inout) :: this character(len=*), intent(in) :: filepath ! EO parameters include "netcdf.inc" @@ -249,7 +249,7 @@ subroutine open_write_create(this, filepath) ! open an existing file and prepare to write data to it subroutine open_write_append(this, filepath) - class(fesom_file_type), intent(inout) :: this + class(netcdf_file_type), intent(inout) :: this character(len=*), intent(in) :: filepath ! EO parameters include "netcdf.inc" @@ -268,7 +268,7 @@ subroutine open_write_append(this, filepath) subroutine write_var_r8(this, varindex, starts, sizes, values) use io_netcdf_nf_interface use, intrinsic :: ISO_C_BINDING - class(fesom_file_type), intent(in) :: this + class(netcdf_file_type), intent(in) :: this integer, intent(in) :: varindex integer, dimension(:) :: starts, sizes real(8), intent(in), target :: values(..) ! must be inout or the allocation might be screwed @@ -287,7 +287,7 @@ subroutine write_var_r8(this, varindex, starts, sizes, values) subroutine write_var_r4(this, varindex, starts, sizes, values) use io_netcdf_nf_interface use, intrinsic :: ISO_C_BINDING - class(fesom_file_type), intent(in) :: this + class(netcdf_file_type), intent(in) :: this integer, intent(in) :: varindex integer, dimension(:) :: starts, sizes real(4), intent(in), target :: values(..) ! must be inout or the allocation might be screwed @@ -305,7 +305,7 @@ subroutine write_var_r4(this, varindex, starts, sizes, values) subroutine close_file(this) ! do not implicitly close the file (e.g. upon deallocation via destructor), as we might have a copy of this object with access to the same ncid - class(fesom_file_type), intent(inout) :: this + class(netcdf_file_type), intent(inout) :: this ! EO parameters include "netcdf.inc" call assert_nc( nf_close(this%ncid) , __LINE__) @@ -315,7 +315,7 @@ subroutine close_file(this) ! connect our dims and vars to their counterparts in the NetCDF file, bail out if they do not match ! ignore any additional dims and vars the file might contain subroutine attach_dims_vars_to_file(this) - class(fesom_file_type), intent(inout) :: this + class(netcdf_file_type), intent(inout) :: this ! EO parameters include "netcdf.inc" integer i, ii diff --git a/test/fortran/io_netcdf_file_module_tests.pf b/test/fortran/io_netcdf_file_module_tests.pf index 32805c83a..8a6cab426 100644 --- a/test/fortran/io_netcdf_file_module_tests.pf +++ b/test/fortran/io_netcdf_file_module_tests.pf @@ -9,7 +9,7 @@ contains @test subroutine test_can_initialize_without_filepath() - type(fesom_file_type) f + type(netcdf_file_type) f call f%initialize() end subroutine @@ -17,7 +17,7 @@ contains @test subroutine test_can_add_dims() - type(fesom_file_type) f + type(netcdf_file_type) f integer nz_dimidx, node_dimidx call f%initialize() @@ -30,7 +30,7 @@ contains @test subroutine test_can_add_unlimited_dim() - type(fesom_file_type) f + type(netcdf_file_type) f integer dimidx call f%initialize() @@ -41,7 +41,7 @@ contains @test subroutine test_can_add_vars() - type(fesom_file_type) f + type(netcdf_file_type) f integer nz_dimidx, node_dimidx integer salt_varid @@ -59,7 +59,7 @@ contains @test subroutine test_can_open_file_in_readmode_without_expecting_dims_and_vars() - type(fesom_file_type) f + type(netcdf_file_type) f call f%initialize() call f%open_read("fixtures/io_netcdf/columnwise_3d_salt.nc") @@ -69,7 +69,7 @@ contains @test subroutine test_can_open_file_with_unlimited_dim() - type(fesom_file_type) f + type(netcdf_file_type) f integer dimidx call f%initialize() @@ -83,7 +83,7 @@ contains @test subroutine test_can_open_file_with_variable() - type(fesom_file_type) f + type(netcdf_file_type) f integer nz_dimidx, node_dimidx, time_dimidx integer salt_varid call f%initialize() @@ -106,7 +106,7 @@ contains @test subroutine test_can_read_2d_variable_real4() - type(fesom_file_type) f + type(netcdf_file_type) f real(4), allocatable :: values(:) integer node_dimidx, time_dimidx @@ -133,7 +133,7 @@ contains @test subroutine test_can_read_2d_variable_real8() - type(fesom_file_type) f + type(netcdf_file_type) f real(8), allocatable :: values(:) integer node_dimidx, time_dimidx @@ -160,7 +160,7 @@ contains @test subroutine test_can_read_3d_variable_real4() - type(fesom_file_type) f + type(netcdf_file_type) f real(4), allocatable :: values(:,:) integer node_dimidx, time_dimidx, z_dimidx @@ -196,7 +196,7 @@ contains @test subroutine test_can_read_3d_variable_real8() - type(fesom_file_type) f + type(netcdf_file_type) f real(8), allocatable :: values(:,:) integer node_dimidx, time_dimidx, z_dimidx @@ -232,7 +232,7 @@ contains @test subroutine test_can_create_empty_file() - type(fesom_file_type) f + type(netcdf_file_type) f integer exitstat character(len=*), parameter :: filepath = TMPPATHPREFIX//"test_can_create_empty_file.nc" @@ -250,7 +250,7 @@ contains @test subroutine test_can_create_file_with_dims_and_vars() - type(fesom_file_type) f + type(netcdf_file_type) f integer z_dimidx, time_dimidx integer varindex integer exitstat @@ -274,7 +274,7 @@ contains @test subroutine test_can_create_file_and_var_text_attributes() - type(fesom_file_type) f + type(netcdf_file_type) f integer z_dimidx, time_dimidx integer varindex integer exitstat @@ -300,7 +300,7 @@ contains @test subroutine test_can_append_to_existing_file_real4() - type(fesom_file_type) f + type(netcdf_file_type) f real(4), allocatable :: values(:) integer node_dimidx, time_dimidx @@ -340,7 +340,7 @@ contains @test subroutine test_can_append_to_existing_file_real8() - type(fesom_file_type) f + type(netcdf_file_type) f real(8), allocatable :: values(:) integer node_dimidx, time_dimidx From 69be3eceb901531c9d394854889b5daaa0c0f08d Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 18 Dec 2020 11:17:05 +0100 Subject: [PATCH 183/909] use an abstract base type to represent different types of attributes --- src/io_netcdf_attribute_module.F90 | 45 ++++++++++++++++++++++++++++-- src/io_netcdf_file_module.F90 | 27 ++++++++++-------- 2 files changed, 57 insertions(+), 15 deletions(-) diff --git a/src/io_netcdf_attribute_module.F90 b/src/io_netcdf_attribute_module.F90 index 86102d6d9..9c05f8823 100644 --- a/src/io_netcdf_attribute_module.F90 +++ b/src/io_netcdf_attribute_module.F90 @@ -1,17 +1,56 @@ module io_netcdf_attribute_module implicit none - public att_type + public att_type, att_type_text private - type att_type + type, abstract :: att_type character(:), allocatable :: name + contains + procedure(define_in_var), deferred :: define_in_var + end type + + + interface + subroutine define_in_var(this, fileid, varid) + import att_type + class(att_type), intent(inout) :: this + integer, intent(in) :: fileid + integer, intent(in) :: varid + end subroutine + end interface + + + type, extends(att_type) :: att_type_text character(:), allocatable :: text - ! todo: make this work for other data types like int + contains + procedure :: define_in_var => define_in_var_text end type contains + subroutine define_in_var_text(this, fileid, varid) + class(att_type_text), intent(inout) :: this + integer, intent(in) :: fileid + integer, intent(in) :: varid + ! EO parameters + include "netcdf.inc" + + call assert_nc( nf_put_att_text(fileid, varid, this%name, len(this%text), this%text) , __LINE__) + end subroutine + + + subroutine assert_nc(status, line) + integer, intent(in) :: status + integer, intent(in) :: line + ! EO parameters + include "netcdf.inc" + if(status /= nf_noerr) then + print *, "error in line ",line, __FILE__, ' ', trim(nf_strerror(status)) + stop 1 + endif + end subroutine + end module diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index 0cb8fa8cc..88b9c9a11 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -13,10 +13,11 @@ module io_netcdf_file_module character(:), allocatable :: filepath integer ncid contains - procedure, public :: initialize, add_dim, add_dim_unlimited, add_var_double, add_var_real, add_var_att, open_read, close_file, open_write_create, open_write_append + procedure, public :: initialize, add_dim, add_dim_unlimited, add_var_double, add_var_real, open_read, close_file, open_write_create, open_write_append generic, public :: read_var => read_var_r4, read_var_r8 generic, public :: write_var => write_var_r4, write_var_r8 - procedure, private :: read_var_r4, read_var_r8, attach_dims_vars_to_file, add_var_x, write_var_r4, write_var_r8 + generic, public :: add_var_att => add_var_att_text + procedure, private :: read_var_r4, read_var_r8, attach_dims_vars_to_file, add_var_x, write_var_r4, write_var_r8, add_var_att_text end type @@ -32,12 +33,16 @@ module io_netcdf_file_module character(:), allocatable :: name integer, allocatable :: dim_indices(:) integer datatype - type(att_type), allocatable :: atts(:) + type(att_type_wrapper), allocatable :: atts(:) integer ncid end type - - + + + type att_type_wrapper ! work around Fortran not being able to have polymorphic types in the same array + class(att_type), allocatable :: it + end type + contains @@ -115,7 +120,7 @@ function add_var_x(this, name, dim_indices, netcdf_datatype) result(varindex) ! EO parameters include "netcdf.inc" type(var_type), allocatable :: tmparr(:) - type(att_type) empty_atts(0) + type(att_type_wrapper) empty_atts(0) ! assume the vars array is allocated allocate( tmparr(size(this%vars)+1) ) @@ -128,20 +133,20 @@ function add_var_x(this, name, dim_indices, netcdf_datatype) result(varindex) end function - subroutine add_var_att(this, varindex, att_name, att_text) + subroutine add_var_att_text(this, varindex, att_name, att_text) class(netcdf_file_type), intent(inout) :: this integer, intent(in) :: varindex character(len=*), intent(in) :: att_name character(len=*), intent(in) :: att_text ! EO parameters - type(att_type), allocatable :: tmparr(:) + type(att_type_wrapper), allocatable :: tmparr(:) allocate( tmparr(size(this%vars(varindex)%atts)+1) ) tmparr(1:size(this%vars(varindex)%atts)) = this%vars(varindex)%atts deallocate(this%vars(varindex)%atts) call move_alloc(tmparr, this%vars(varindex)%atts) - this%vars(varindex)%atts( size(this%vars(varindex)%atts) ) = att_type(name=att_name, text=att_text) + this%vars(varindex)%atts( size(this%vars(varindex)%atts) )%it = att_type_text(name=att_name, text=att_text) end subroutine @@ -237,9 +242,7 @@ subroutine open_write_create(this, filepath) call assert_nc( nf_def_var(this%ncid, this%vars(i)%name, this%vars(i)%datatype, var_ndims, var_dimids, this%vars(i)%ncid) , __LINE__) do ii=1, size(this%vars(i)%atts) - att_name => this%vars(i)%atts(ii)%name - att_text => this%vars(i)%atts(ii)%text - call assert_nc( nf_put_att_text(this%ncid, this%vars(i)%ncid, att_name, len(att_text), att_text) , __LINE__) + call this%vars(i)%atts(ii)%it%define_in_var(this%ncid, this%vars(i)%ncid) end do end do From f1b45db4f9fdcc818a07f4c80396ddd159a3e62d Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 18 Dec 2020 11:24:10 +0100 Subject: [PATCH 184/909] fix typo in comment --- test/fortran/io_netcdf_file_module_tests.pf | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/test/fortran/io_netcdf_file_module_tests.pf b/test/fortran/io_netcdf_file_module_tests.pf index 8a6cab426..c6bfc0be1 100644 --- a/test/fortran/io_netcdf_file_module_tests.pf +++ b/test/fortran/io_netcdf_file_module_tests.pf @@ -241,7 +241,7 @@ contains call f%initialize() call f%open_write_create(filepath) call f%close_file() - ! todo: actually test if the file has bee written correctly + ! todo: actually test if the file has been written correctly call execute_command_line("rm "//filepath, exitstat=exitstat) if(exitstat .ne. 0) stop 1 @@ -265,7 +265,7 @@ contains call f%open_write_create(filepath) call f%close_file() - ! todo: actually test if the file has bee written correctly + ! todo: actually test if the file has been written correctly call execute_command_line("rm "//filepath, exitstat=exitstat) if(exitstat .ne. 0) stop 1 @@ -291,7 +291,7 @@ contains call f%open_write_create(filepath) call f%close_file() - ! todo: actually test if the file has bee written correctly + ! todo: actually test if the file has been written correctly call execute_command_line("rm "//filepath, exitstat=exitstat) if(exitstat .ne. 0) stop 1 @@ -331,7 +331,7 @@ contains call f%write_var(varindex, [1,3], [5,1], values) call f%close_file() - ! todo: actually test if the file has bee written correctly + ! todo: actually test if the file has been written correctly call execute_command_line("rm "//filepath, exitstat=exitstat) if(exitstat .ne. 0) stop 1 @@ -371,7 +371,7 @@ contains call f%write_var(varindex, [1,3], [5,1], values) call f%close_file() - ! todo: actually test if the file has bee written correctly + ! todo: actually test if the file has been written correctly call execute_command_line("rm "//filepath, exitstat=exitstat) if(exitstat .ne. 0) stop 1 From 0cdac4d5a28e6911964fef5102387fed80434481 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 18 Dec 2020 12:53:35 +0100 Subject: [PATCH 185/909] use grep to check the NetCDF files created via unit tests --- test/fortran/io_netcdf_file_module_tests.pf | 54 ++++++++++++++++++--- 1 file changed, 48 insertions(+), 6 deletions(-) diff --git a/test/fortran/io_netcdf_file_module_tests.pf b/test/fortran/io_netcdf_file_module_tests.pf index c6bfc0be1..def382254 100644 --- a/test/fortran/io_netcdf_file_module_tests.pf +++ b/test/fortran/io_netcdf_file_module_tests.pf @@ -6,6 +6,31 @@ module io_netcdf_file_module_tests contains + ! utility procedure to grep a NetCDF file for a string + function is_in_file_header(filepath, searchtext) result(is_in) + character(len=*), intent(in) :: filepath + character(len=*), intent(in) :: searchtext + logical is_in + ! EO parameters + integer exitstat + + call execute_command_line("ncdump -h -p9,17 "//filepath//" | grep -q '"//searchtext//"'", exitstat=exitstat) + is_in = (exitstat == 0) + end function + + + ! utility procedure to grep a NetCDF file for a string + function is_in_file(filepath, searchtext) result(is_in) + character(len=*), intent(in) :: filepath + character(len=*), intent(in) :: searchtext + logical is_in + ! EO parameters + integer exitstat + + call execute_command_line("ncdump -p9,17 "//filepath//" | grep -q '"//searchtext//"'", exitstat=exitstat) + is_in = (exitstat == 0) + end function + @test subroutine test_can_initialize_without_filepath() @@ -241,7 +266,9 @@ contains call f%initialize() call f%open_write_create(filepath) call f%close_file() - ! todo: actually test if the file has been written correctly + + ! test if the file has been written correctly + @assertTrue( is_in_file_header(filepath, '}') ) call execute_command_line("rm "//filepath, exitstat=exitstat) if(exitstat .ne. 0) stop 1 @@ -265,7 +292,11 @@ contains call f%open_write_create(filepath) call f%close_file() - ! todo: actually test if the file has been written correctly + + ! test if the file has been written correctly + @assertTrue( is_in_file_header(filepath, 'nz1 = 3 ;') ) + @assertTrue( is_in_file_header(filepath, 'time = UNLIMITED') ) + @assertTrue( is_in_file_header(filepath, 'float salt(time, nz1) ;') ) call execute_command_line("rm "//filepath, exitstat=exitstat) if(exitstat .ne. 0) stop 1 @@ -279,7 +310,7 @@ contains integer varindex integer exitstat character(len=*), parameter :: filepath = TMPPATHPREFIX//"test_can_create_file_and_var_attributes.nc" - + call execute_command_line("rm -f "//filepath, exitstat=exitstat) ! silently remove the file if it exists from an aborted previous run call f%initialize() @@ -291,7 +322,10 @@ contains call f%open_write_create(filepath) call f%close_file() - ! todo: actually test if the file has been written correctly + + ! test if the file has been written correctly + @assertTrue( is_in_file_header(filepath, 'salt:units = "psu"') ) + @assertTrue( is_in_file_header(filepath, 'salt:long_name = "sea surface salinity"') ) call execute_command_line("rm "//filepath, exitstat=exitstat) if(exitstat .ne. 0) stop 1 @@ -331,7 +365,11 @@ contains call f%write_var(varindex, [1,3], [5,1], values) call f%close_file() - ! todo: actually test if the file has been written correctly + + ! test if the file has been written correctly + @assertTrue( is_in_file_header(filepath, 'time = UNLIMITED ; // (3 currently)') ) + ! todo: check if the values have been written correctly + @assertTrue( is_in_file(filepath, '100.00') ) call execute_command_line("rm "//filepath, exitstat=exitstat) if(exitstat .ne. 0) stop 1 @@ -371,7 +409,11 @@ contains call f%write_var(varindex, [1,3], [5,1], values) call f%close_file() - ! todo: actually test if the file has been written correctly + + ! test if the file has been written correctly + @assertTrue( is_in_file_header(filepath, 'time = UNLIMITED ; // (3 currently)') ) + ! todo: check if the values have been written correctly + @assertTrue( is_in_file(filepath, '100.00') ) call execute_command_line("rm "//filepath, exitstat=exitstat) if(exitstat .ne. 0) stop 1 From 56835dc14bb49ebe818dca36dbda3c0d38e92ecd Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 18 Dec 2020 13:45:11 +0100 Subject: [PATCH 186/909] - compare with the number of dimensions of the variable (instead of the number of dimensions of the file) when reading or writing - avoid duplicate allocation to allow attaching multiple variables to a file - capture the above errors with unit tests --- src/io_netcdf_file_module.F90 | 9 ++--- test/fortran/io_netcdf_file_module_tests.pf | 37 +++++++++++++++++++-- 2 files changed, 39 insertions(+), 7 deletions(-) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index 88b9c9a11..15f2f049e 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -181,7 +181,7 @@ subroutine read_var_r8(this, varindex, starts, sizes, values) real(8), pointer :: values_ptr(:) call assert(size(sizes) == size(starts), __LINE__) - call assert(size(starts) == size(this%dims), __LINE__) + call assert(size(starts) == size(this%vars(varindex)%dim_indices), __LINE__) call assert(product(sizes) == product(shape(values)), __LINE__) call c_f_pointer(c_loc(values), values_ptr, [product(shape(values))]) @@ -201,7 +201,7 @@ subroutine read_var_r4(this, varindex, starts, sizes, values) real(4), pointer :: values_ptr(:) call assert(size(sizes) == size(starts), __LINE__) - call assert(size(starts) == size(this%dims), __LINE__) + call assert(size(starts) == size(this%vars(varindex)%dim_indices), __LINE__) call assert(product(sizes) == product(shape(values)), __LINE__) call c_f_pointer(c_loc(values), values_ptr, [product(shape(values))]) @@ -279,7 +279,7 @@ subroutine write_var_r8(this, varindex, starts, sizes, values) real(8), pointer :: values_ptr(:) call assert(size(sizes) == size(starts), __LINE__) - call assert(size(starts) == size(this%dims), __LINE__) + call assert(size(starts) == size(this%vars(varindex)%dim_indices), __LINE__) call assert(product(sizes) == product(shape(values)), __LINE__) call c_f_pointer(c_loc(values), values_ptr, [product(shape(values))]) @@ -298,7 +298,7 @@ subroutine write_var_r4(this, varindex, starts, sizes, values) real(4), pointer :: values_ptr(:) call assert(size(sizes) == size(starts), __LINE__) - call assert(size(starts) == size(this%dims), __LINE__) + call assert(size(starts) == size(this%vars(varindex)%dim_indices), __LINE__) call assert(product(sizes) == product(shape(values)), __LINE__) call c_f_pointer(c_loc(values), values_ptr, [product(shape(values))]) @@ -341,6 +341,7 @@ subroutine attach_dims_vars_to_file(this) ! see if this var has the expected dims call assert_nc( nf_inq_varndims(this%ncid, this%vars(i)%ncid, actual_dimcount) , __LINE__) call assert(size(this%vars(i)%dim_indices) == actual_dimcount, __LINE__) + if(allocated(actual_dimids)) deallocate(actual_dimids) allocate(actual_dimids(actual_dimcount)) call assert_nc( nf_inq_vardimid(this%ncid, this%vars(i)%ncid, actual_dimids) , __LINE__) do ii=1, actual_dimcount diff --git a/test/fortran/io_netcdf_file_module_tests.pf b/test/fortran/io_netcdf_file_module_tests.pf index def382254..18e0e8945 100644 --- a/test/fortran/io_netcdf_file_module_tests.pf +++ b/test/fortran/io_netcdf_file_module_tests.pf @@ -255,6 +255,29 @@ contains end subroutine + @test + subroutine test_can_read_variable_with_less_dims_than_in_file() + type(netcdf_file_type) f + real(8) value + + integer node_dimidx, time_dimidx, unused_dimidx + integer varindex + call f%initialize() + ! 2 dims in file + node_dimidx = f%add_dim("nod2", 5) + time_dimidx = f%add_dim_unlimited("time") + + ! read var with 1 dim + varindex = f%add_var_double("time", [time_dimidx]) + call f%open_read("fixtures/io_netcdf/columnwise_2d_sss.nc") + + call f%read_var(varindex, [1], [1], value) + @assertEqual(10800.0_8, value, tolerance=1.e-6) + + call f%close_file() + end subroutine + + @test subroutine test_can_create_empty_file() type(netcdf_file_type) f @@ -279,7 +302,7 @@ contains subroutine test_can_create_file_with_dims_and_vars() type(netcdf_file_type) f integer z_dimidx, time_dimidx - integer varindex + integer varindex, time_varindex integer exitstat character(len=*), parameter :: filepath = TMPPATHPREFIX//"test_can_create_file_with_dims_and_vars.nc" @@ -288,6 +311,7 @@ contains call f%initialize() z_dimidx = f%add_dim("nz1", 3) time_dimidx = f%add_dim_unlimited("time") + time_varindex = f%add_var_double("time", [time_dimidx]) varindex = f%add_var_real("salt", [z_dimidx, time_dimidx]) call f%open_write_create(filepath) @@ -296,6 +320,7 @@ contains ! test if the file has been written correctly @assertTrue( is_in_file_header(filepath, 'nz1 = 3 ;') ) @assertTrue( is_in_file_header(filepath, 'time = UNLIMITED') ) + @assertTrue( is_in_file_header(filepath, 'double time(time) ;') ) @assertTrue( is_in_file_header(filepath, 'float salt(time, nz1) ;') ) call execute_command_line("rm "//filepath, exitstat=exitstat) @@ -338,7 +363,7 @@ contains real(4), allocatable :: values(:) integer node_dimidx, time_dimidx - integer varindex + integer varindex, time_varindex integer exitstat character(len=*), parameter :: filepath = TMPPATHPREFIX//"test_can_append_to_existing_file_real4.nc" @@ -351,6 +376,7 @@ contains call execute_command_line("cp fixtures/io_netcdf/columnwise_2d_sss.nc "//filepath, exitstat=exitstat) if(exitstat .ne. 0) stop 1 + time_varindex = f%add_var_double("time", [time_dimidx]) varindex = f%add_var_real("sss", [node_dimidx,time_dimidx]) call f%open_write_append(filepath) @@ -362,6 +388,7 @@ contains values(5) = 100.005 ! the file has 2 timesteps, we append a 3rd one + call f%write_var(time_varindex, [3], [1], [32400.0_8]) call f%write_var(varindex, [1,3], [5,1], values) call f%close_file() @@ -369,6 +396,7 @@ contains ! test if the file has been written correctly @assertTrue( is_in_file_header(filepath, 'time = UNLIMITED ; // (3 currently)') ) ! todo: check if the values have been written correctly + @assertTrue( is_in_file(filepath, '32400') ) @assertTrue( is_in_file(filepath, '100.00') ) call execute_command_line("rm "//filepath, exitstat=exitstat) @@ -382,7 +410,7 @@ contains real(8), allocatable :: values(:) integer node_dimidx, time_dimidx - integer varindex + integer varindex, time_varindex integer exitstat character(len=*), parameter :: filepath = TMPPATHPREFIX//"test_can_append_to_existing_file_real8.nc" @@ -395,6 +423,7 @@ contains call execute_command_line("cp fixtures/io_netcdf/columnwise_2d_sss.nc "//filepath, exitstat=exitstat) if(exitstat .ne. 0) stop 1 + time_varindex = f%add_var_double("time", [time_dimidx]) varindex = f%add_var_real("sss", [node_dimidx,time_dimidx]) call f%open_write_append(filepath) @@ -406,6 +435,7 @@ contains values(5) = 100.005_8 ! the file has 2 timesteps, we append a 3rd one + call f%write_var(time_varindex, [3], [1], [32400.0_8]) call f%write_var(varindex, [1,3], [5,1], values) call f%close_file() @@ -413,6 +443,7 @@ contains ! test if the file has been written correctly @assertTrue( is_in_file_header(filepath, 'time = UNLIMITED ; // (3 currently)') ) ! todo: check if the values have been written correctly + @assertTrue( is_in_file(filepath, '32400') ) @assertTrue( is_in_file(filepath, '100.00') ) call execute_command_line("rm "//filepath, exitstat=exitstat) From 23038a4902db7b0efbc3147f8d78f2a003dc81fb Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 18 Dec 2020 16:09:33 +0100 Subject: [PATCH 187/909] - add int attribute type and procedures to add int attributes to a variable - add unit test --- src/io_netcdf_attribute_module.F90 | 20 +++++++++++++- src/io_netcdf_file_module.F90 | 21 +++++++++++++-- test/fortran/io_netcdf_file_module_tests.pf | 29 ++++++++++++++++++++- 3 files changed, 66 insertions(+), 4 deletions(-) diff --git a/src/io_netcdf_attribute_module.F90 b/src/io_netcdf_attribute_module.F90 index 9c05f8823..a04409b13 100644 --- a/src/io_netcdf_attribute_module.F90 +++ b/src/io_netcdf_attribute_module.F90 @@ -1,6 +1,6 @@ module io_netcdf_attribute_module implicit none - public att_type, att_type_text + public att_type, att_type_text, att_type_int private @@ -28,6 +28,13 @@ subroutine define_in_var(this, fileid, varid) end type + type, extends(att_type) :: att_type_int + integer :: val + contains + procedure :: define_in_var => define_in_var_int + end type + + contains @@ -42,6 +49,17 @@ subroutine define_in_var_text(this, fileid, varid) end subroutine + subroutine define_in_var_int(this, fileid, varid) + class(att_type_int), intent(inout) :: this + integer, intent(in) :: fileid + integer, intent(in) :: varid + ! EO parameters + include "netcdf.inc" + + call assert_nc( nf_put_att_int(fileid, varid, this%name, nf_int, 1, this%val) , __LINE__) + end subroutine + + subroutine assert_nc(status, line) integer, intent(in) :: status integer, intent(in) :: line diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index 15f2f049e..dc587eb5d 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -16,8 +16,8 @@ module io_netcdf_file_module procedure, public :: initialize, add_dim, add_dim_unlimited, add_var_double, add_var_real, open_read, close_file, open_write_create, open_write_append generic, public :: read_var => read_var_r4, read_var_r8 generic, public :: write_var => write_var_r4, write_var_r8 - generic, public :: add_var_att => add_var_att_text - procedure, private :: read_var_r4, read_var_r8, attach_dims_vars_to_file, add_var_x, write_var_r4, write_var_r8, add_var_att_text + generic, public :: add_var_att => add_var_att_text, add_var_att_int + procedure, private :: read_var_r4, read_var_r8, attach_dims_vars_to_file, add_var_x, write_var_r4, write_var_r8, add_var_att_text, add_var_att_int end type @@ -150,6 +150,23 @@ subroutine add_var_att_text(this, varindex, att_name, att_text) end subroutine + subroutine add_var_att_int(this, varindex, att_name, att_val) + class(netcdf_file_type), intent(inout) :: this + integer, intent(in) :: varindex + character(len=*), intent(in) :: att_name + integer, intent(in) :: att_val + ! EO parameters + type(att_type_wrapper), allocatable :: tmparr(:) + + allocate( tmparr(size(this%vars(varindex)%atts)+1) ) + tmparr(1:size(this%vars(varindex)%atts)) = this%vars(varindex)%atts + deallocate(this%vars(varindex)%atts) + call move_alloc(tmparr, this%vars(varindex)%atts) + + this%vars(varindex)%atts( size(this%vars(varindex)%atts) )%it = att_type_int(name=att_name, val=att_val) + end subroutine + + subroutine open_read(this, filepath) class(netcdf_file_type), intent(inout) :: this character(len=*), intent(in) :: filepath diff --git a/test/fortran/io_netcdf_file_module_tests.pf b/test/fortran/io_netcdf_file_module_tests.pf index 18e0e8945..1ef42b556 100644 --- a/test/fortran/io_netcdf_file_module_tests.pf +++ b/test/fortran/io_netcdf_file_module_tests.pf @@ -334,7 +334,7 @@ contains integer z_dimidx, time_dimidx integer varindex integer exitstat - character(len=*), parameter :: filepath = TMPPATHPREFIX//"test_can_create_file_and_var_attributes.nc" + character(len=*), parameter :: filepath = TMPPATHPREFIX//"test_can_create_file_and_var_text_attributes.nc" call execute_command_line("rm -f "//filepath, exitstat=exitstat) ! silently remove the file if it exists from an aborted previous run @@ -357,6 +357,33 @@ contains end subroutine + @test + subroutine test_can_create_file_and_var_int_attributes() + type(netcdf_file_type) f + integer z_dimidx, time_dimidx + integer varindex + integer exitstat + character(len=*), parameter :: filepath = TMPPATHPREFIX//"test_can_create_file_and_var_int_attributes.nc" + + call execute_command_line("rm -f "//filepath, exitstat=exitstat) ! silently remove the file if it exists from an aborted previous run + + call f%initialize() + z_dimidx = f%add_dim("nz1", 3) + time_dimidx = f%add_dim_unlimited("time") + varindex = f%add_var_real("salt", [z_dimidx, time_dimidx]) + call f%add_var_att(varindex, "number", 42) + + call f%open_write_create(filepath) + call f%close_file() + + ! test if the file has been written correctly + @assertTrue( is_in_file_header(filepath, 'salt:number = 42 ;') ) + + call execute_command_line("rm "//filepath, exitstat=exitstat) + if(exitstat .ne. 0) stop 1 + end subroutine + + @test subroutine test_can_append_to_existing_file_real4() type(netcdf_file_type) f From 8023a1252cc947158b8a4107c0a70b976d79d411 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 18 Dec 2020 16:16:18 +0100 Subject: [PATCH 188/909] be able to add global attributes --- src/io_netcdf_file_module.F90 | 36 +++++++++++++++++++++ test/fortran/io_netcdf_file_module_tests.pf | 20 +++++++++++- 2 files changed, 55 insertions(+), 1 deletion(-) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index dc587eb5d..c136d0430 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -9,6 +9,7 @@ module io_netcdf_file_module private type(dim_type), allocatable :: dims(:) type(var_type), allocatable :: vars(:) + type(att_type_wrapper), allocatable :: gatts(:) character(:), allocatable :: filepath integer ncid @@ -17,7 +18,9 @@ module io_netcdf_file_module generic, public :: read_var => read_var_r4, read_var_r8 generic, public :: write_var => write_var_r4, write_var_r8 generic, public :: add_var_att => add_var_att_text, add_var_att_int + generic, public :: add_global_att => add_global_att_text, add_global_att_int procedure, private :: read_var_r4, read_var_r8, attach_dims_vars_to_file, add_var_x, write_var_r4, write_var_r8, add_var_att_text, add_var_att_int + procedure, private :: add_global_att_text, add_global_att_int end type @@ -52,6 +55,7 @@ subroutine initialize(this) this%filepath = "" allocate(this%dims(0)) allocate(this%vars(0)) + allocate(this%gatts(0)) end subroutine @@ -133,6 +137,38 @@ function add_var_x(this, name, dim_indices, netcdf_datatype) result(varindex) end function + subroutine add_global_att_text(this, att_name, att_text) + class(netcdf_file_type), intent(inout) :: this + character(len=*), intent(in) :: att_name + character(len=*), intent(in) :: att_text + ! EO parameters + type(att_type_wrapper), allocatable :: tmparr(:) + + allocate( tmparr(size(this%gatts)+1) ) + tmparr(1:size(this%gatts)) = this%gatts + deallocate(this%gatts) + call move_alloc(tmparr, this%gatts) + + this%gatts( size(this%gatts) )%it = att_type_text(name=att_name, text=att_text) + end subroutine + + + subroutine add_global_att_int(this, att_name, att_val) + class(netcdf_file_type), intent(inout) :: this + character(len=*), intent(in) :: att_name + integer, intent(in) :: att_val + ! EO parameters + type(att_type_wrapper), allocatable :: tmparr(:) + + allocate( tmparr(size(this%gatts)+1) ) + tmparr(1:size(this%gatts)) = this%gatts + deallocate(this%gatts) + call move_alloc(tmparr, this%gatts) + + this%gatts( size(this%gatts) )%it = att_type_int(name=att_name, val=att_val) + end subroutine + + subroutine add_var_att_text(this, varindex, att_name, att_text) class(netcdf_file_type), intent(inout) :: this integer, intent(in) :: varindex diff --git a/test/fortran/io_netcdf_file_module_tests.pf b/test/fortran/io_netcdf_file_module_tests.pf index 1ef42b556..0bd04a233 100644 --- a/test/fortran/io_netcdf_file_module_tests.pf +++ b/test/fortran/io_netcdf_file_module_tests.pf @@ -65,7 +65,7 @@ contains @test - subroutine test_can_add_vars() + subroutine test_can_add_vars_with_attributes() type(netcdf_file_type) f integer nz_dimidx, node_dimidx integer salt_varid @@ -82,6 +82,24 @@ contains end subroutine + @test + subroutine test_can_add_global_attribute_text() + type(netcdf_file_type) f + + call f%initialize() + call f%add_global_att("FESOM_model", "FESOM2") + end subroutine + + + @test + subroutine test_can_add_global_attribute_int() + type(netcdf_file_type) f + + call f%initialize() + call f%add_global_att("FESOM_force_rotation", 0) + end subroutine + + @test subroutine test_can_open_file_in_readmode_without_expecting_dims_and_vars() type(netcdf_file_type) f From 030d5a9dc36a73f4765d57a2f60bfbbdf8e23620 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 21 Dec 2020 12:07:12 +0100 Subject: [PATCH 189/909] - be able to write global attributes to a NetCDf file - add according unit tests --- src/io_netcdf_file_module.F90 | 4 ++ test/fortran/io_netcdf_file_module_tests.pf | 42 +++++++++++++++++++++ 2 files changed, 46 insertions(+) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index c136d0430..e76278e3d 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -283,6 +283,10 @@ subroutine open_write_create(this, filepath) do i=1, size(this%dims) call assert_nc( nf_def_dim(this%ncid, this%dims(i)%name, this%dims(i)%len, this%dims(i)%ncid) , __LINE__) end do + + do i=1, size(this%gatts) + call this%gatts(i)%it%define_in_var(this%ncid, nf_global) + end do ! create our vars in the file do i=1, size(this%vars) diff --git a/test/fortran/io_netcdf_file_module_tests.pf b/test/fortran/io_netcdf_file_module_tests.pf index 0bd04a233..b8b1a19d1 100644 --- a/test/fortran/io_netcdf_file_module_tests.pf +++ b/test/fortran/io_netcdf_file_module_tests.pf @@ -316,6 +316,48 @@ contains end subroutine + @test + subroutine test_can_create_file_with_global_attributes_text() + type(netcdf_file_type) f + integer exitstat + character(len=*), parameter :: filepath = TMPPATHPREFIX//"test_can_create_file_with_global_attributes_text.nc" + + call execute_command_line("rm -f "//filepath) ! silently remove the file if it exists from an aborted previous run + + call f%initialize() + call f%add_global_att("FESOM_model", "FESOM2") + call f%open_write_create(filepath) + call f%close_file() + + ! test if the file has been written correctly + @assertTrue( is_in_file_header(filepath, ':FESOM_model = "FESOM2" ;') ) + + call execute_command_line("rm "//filepath, exitstat=exitstat) + if(exitstat .ne. 0) stop 1 + end subroutine + + + @test + subroutine test_can_create_file_with_global_attributes_int() + type(netcdf_file_type) f + integer exitstat + character(len=*), parameter :: filepath = TMPPATHPREFIX//"test_can_create_file_with_global_attributes_int.nc" + + call execute_command_line("rm -f "//filepath) ! silently remove the file if it exists from an aborted previous run + + call f%initialize() + call f%add_global_att("FESOM_force_rotation", 0) + call f%open_write_create(filepath) + call f%close_file() + + ! test if the file has been written correctly + @assertTrue( is_in_file_header(filepath, ':FESOM_force_rotation = 0 ;') ) + + call execute_command_line("rm "//filepath, exitstat=exitstat) + if(exitstat .ne. 0) stop 1 + end subroutine + + @test subroutine test_can_create_file_with_dims_and_vars() type(netcdf_file_type) f From 2fbc4c17a0823e48f4f11922a80b50c732b9d0d2 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 22 Dec 2020 10:57:17 +0100 Subject: [PATCH 190/909] do not use generic interface to NetCDF get_vara and put_vara procedures to make it simpler to add new put/get types --- src/io_netcdf_file_module.F90 | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index e76278e3d..1627bc049 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -224,13 +224,13 @@ subroutine open_read(this, filepath) ! this way we can retrieve e.g. data from a 3D variable to a 2D array with one size set to 1 (e.g. to get a single timestep) ! starts and sizes must have the same rank as the variable has dimensions subroutine read_var_r8(this, varindex, starts, sizes, values) - use io_netcdf_nf_interface use, intrinsic :: ISO_C_BINDING class(netcdf_file_type), intent(in) :: this integer, intent(in) :: varindex integer, dimension(:) :: starts, sizes real(8), intent(inout), target :: values(..) ! must be inout or the allocation might be screwed ! EO parameters + include "netcdf.inc" real(8), pointer :: values_ptr(:) call assert(size(sizes) == size(starts), __LINE__) @@ -238,19 +238,19 @@ subroutine read_var_r8(this, varindex, starts, sizes, values) call assert(product(sizes) == product(shape(values)), __LINE__) call c_f_pointer(c_loc(values), values_ptr, [product(shape(values))]) - call assert_nc(nf_get_vara_x(this%ncid, this%vars(varindex)%ncid, starts, sizes, values_ptr), __LINE__) + call assert_nc(nf_get_vara_double(this%ncid, this%vars(varindex)%ncid, starts, sizes, values_ptr), __LINE__) end subroutine ! see read_var_r8 for usage comment subroutine read_var_r4(this, varindex, starts, sizes, values) - use io_netcdf_nf_interface use, intrinsic :: ISO_C_BINDING class(netcdf_file_type), intent(in) :: this integer, intent(in) :: varindex integer, dimension(:) :: starts, sizes real(4), intent(inout), target :: values(..) ! must be inout or the allocation might be screwed ! EO parameters + include "netcdf.inc" real(4), pointer :: values_ptr(:) call assert(size(sizes) == size(starts), __LINE__) @@ -258,7 +258,7 @@ subroutine read_var_r4(this, varindex, starts, sizes, values) call assert(product(sizes) == product(shape(values)), __LINE__) call c_f_pointer(c_loc(values), values_ptr, [product(shape(values))]) - call assert_nc(nf_get_vara_x(this%ncid, this%vars(varindex)%ncid, starts, sizes, values_ptr), __LINE__) + call assert_nc(nf_get_vara_real(this%ncid, this%vars(varindex)%ncid, starts, sizes, values_ptr), __LINE__) end subroutine @@ -326,13 +326,13 @@ subroutine open_write_append(this, filepath) subroutine write_var_r8(this, varindex, starts, sizes, values) - use io_netcdf_nf_interface use, intrinsic :: ISO_C_BINDING class(netcdf_file_type), intent(in) :: this integer, intent(in) :: varindex integer, dimension(:) :: starts, sizes real(8), intent(in), target :: values(..) ! must be inout or the allocation might be screwed ! EO parameters + include "netcdf.inc" real(8), pointer :: values_ptr(:) call assert(size(sizes) == size(starts), __LINE__) @@ -340,18 +340,18 @@ subroutine write_var_r8(this, varindex, starts, sizes, values) call assert(product(sizes) == product(shape(values)), __LINE__) call c_f_pointer(c_loc(values), values_ptr, [product(shape(values))]) - call assert_nc(nf_put_vara_x(this%ncid, this%vars(varindex)%ncid, starts, sizes, values_ptr), __LINE__) + call assert_nc(nf_put_vara_double(this%ncid, this%vars(varindex)%ncid, starts, sizes, values_ptr), __LINE__) end subroutine subroutine write_var_r4(this, varindex, starts, sizes, values) - use io_netcdf_nf_interface use, intrinsic :: ISO_C_BINDING class(netcdf_file_type), intent(in) :: this integer, intent(in) :: varindex integer, dimension(:) :: starts, sizes real(4), intent(in), target :: values(..) ! must be inout or the allocation might be screwed ! EO parameters + include "netcdf.inc" real(4), pointer :: values_ptr(:) call assert(size(sizes) == size(starts), __LINE__) @@ -359,7 +359,7 @@ subroutine write_var_r4(this, varindex, starts, sizes, values) call assert(product(sizes) == product(shape(values)), __LINE__) call c_f_pointer(c_loc(values), values_ptr, [product(shape(values))]) - call assert_nc(nf_put_vara_x(this%ncid, this%vars(varindex)%ncid, starts, sizes, values_ptr), __LINE__) + call assert_nc(nf_put_vara_real(this%ncid, this%vars(varindex)%ncid, starts, sizes, values_ptr), __LINE__) end subroutine From d52d35978c68ed1293accef35ba068d57af13f12 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 22 Dec 2020 10:59:10 +0100 Subject: [PATCH 191/909] - be able to add and write integer variables - add unit test --- src/io_netcdf_file_module.F90 | 38 +++++++++++++++++++-- test/fortran/io_netcdf_file_module_tests.pf | 30 ++++++++++++++++ 2 files changed, 65 insertions(+), 3 deletions(-) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index 1627bc049..d4c9dcded 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -14,12 +14,12 @@ module io_netcdf_file_module character(:), allocatable :: filepath integer ncid contains - procedure, public :: initialize, add_dim, add_dim_unlimited, add_var_double, add_var_real, open_read, close_file, open_write_create, open_write_append + procedure, public :: initialize, add_dim, add_dim_unlimited, add_var_double, add_var_real, add_var_int, open_read, close_file, open_write_create, open_write_append generic, public :: read_var => read_var_r4, read_var_r8 - generic, public :: write_var => write_var_r4, write_var_r8 + generic, public :: write_var => write_var_r4, write_var_r8, write_var_integer generic, public :: add_var_att => add_var_att_text, add_var_att_int generic, public :: add_global_att => add_global_att_text, add_global_att_int - procedure, private :: read_var_r4, read_var_r8, attach_dims_vars_to_file, add_var_x, write_var_r4, write_var_r8, add_var_att_text, add_var_att_int + procedure, private :: read_var_r4, read_var_r8, attach_dims_vars_to_file, add_var_x, write_var_r4, write_var_r8, write_var_integer, add_var_att_text, add_var_att_int procedure, private :: add_global_att_text, add_global_att_int end type @@ -115,6 +115,19 @@ function add_var_real(this, name, dim_indices) result(varindex) end function + ! the sizes of the dims define the global shape of the var + function add_var_int(this, name, dim_indices) result(varindex) + class(netcdf_file_type), intent(inout) :: this + character(len=*), intent(in) :: name + integer, intent(in) :: dim_indices(:) + integer varindex + ! EO parameters + include "netcdf.inc" + + varindex = this%add_var_x(name, dim_indices, nf_int) + end function + + function add_var_x(this, name, dim_indices, netcdf_datatype) result(varindex) class(netcdf_file_type), intent(inout) :: this character(len=*), intent(in) :: name @@ -363,6 +376,25 @@ subroutine write_var_r4(this, varindex, starts, sizes, values) end subroutine + subroutine write_var_integer(this, varindex, starts, sizes, values) + use, intrinsic :: ISO_C_BINDING + class(netcdf_file_type), intent(in) :: this + integer, intent(in) :: varindex + integer, dimension(:) :: starts, sizes + integer, intent(in), target :: values(..) ! must be inout or the allocation might be screwed + ! EO parameters + include "netcdf.inc" + integer, pointer :: values_ptr(:) + + call assert(size(sizes) == size(starts), __LINE__) + call assert(size(starts) == size(this%vars(varindex)%dim_indices), __LINE__) + call assert(product(sizes) == product(shape(values)), __LINE__) + + call c_f_pointer(c_loc(values), values_ptr, [product(shape(values))]) + call assert_nc(nf_put_vara_int(this%ncid, this%vars(varindex)%ncid, starts, sizes, values_ptr), __LINE__) + end subroutine + + subroutine close_file(this) ! do not implicitly close the file (e.g. upon deallocation via destructor), as we might have a copy of this object with access to the same ncid class(netcdf_file_type), intent(inout) :: this diff --git a/test/fortran/io_netcdf_file_module_tests.pf b/test/fortran/io_netcdf_file_module_tests.pf index b8b1a19d1..eb13611f4 100644 --- a/test/fortran/io_netcdf_file_module_tests.pf +++ b/test/fortran/io_netcdf_file_module_tests.pf @@ -444,6 +444,36 @@ contains end subroutine + @test + subroutine test_can_write_to_new_file_int() + type(netcdf_file_type) f + + integer time_dimidx + integer varindex, time_varindex + integer exitstat + character(len=*), parameter :: filepath = TMPPATHPREFIX//"test_can_write_to_new_file_int.nc" + + call f%initialize() + time_dimidx = f%add_dim_unlimited("time") + + call execute_command_line("rm -f "//filepath, exitstat=exitstat) ! silently remove the file if it exists from an aborted previous run + + time_varindex = f%add_var_double("time", [time_dimidx]) + varindex = f%add_var_int("iter", [time_dimidx]) + call f%open_write_create(filepath) + + call f%write_var(time_varindex, [1], [1], [10800.0_8]) + call f%write_var(varindex, [1], [1], [42]) + + call f%close_file() + + @assertTrue( is_in_file(filepath, 'iter = 42 ;') ) + + call execute_command_line("rm "//filepath, exitstat=exitstat) + if(exitstat .ne. 0) stop 1 + end subroutine + + @test subroutine test_can_append_to_existing_file_real4() type(netcdf_file_type) f From 8e6698f07bae0d05cdfc7f44c3b2e6be6185d45f Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 22 Dec 2020 11:06:06 +0100 Subject: [PATCH 192/909] - be able to read integer variables - add unit test --- src/io_netcdf_file_module.F90 | 24 ++++++++++++++++-- test/fortran/io_netcdf_file_module_tests.pf | 27 +++++++++++++++++++++ 2 files changed, 49 insertions(+), 2 deletions(-) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index d4c9dcded..af87d7ef8 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -15,11 +15,11 @@ module io_netcdf_file_module integer ncid contains procedure, public :: initialize, add_dim, add_dim_unlimited, add_var_double, add_var_real, add_var_int, open_read, close_file, open_write_create, open_write_append - generic, public :: read_var => read_var_r4, read_var_r8 + generic, public :: read_var => read_var_r4, read_var_r8, read_var_integer generic, public :: write_var => write_var_r4, write_var_r8, write_var_integer generic, public :: add_var_att => add_var_att_text, add_var_att_int generic, public :: add_global_att => add_global_att_text, add_global_att_int - procedure, private :: read_var_r4, read_var_r8, attach_dims_vars_to_file, add_var_x, write_var_r4, write_var_r8, write_var_integer, add_var_att_text, add_var_att_int + procedure, private :: read_var_r4, read_var_r8, read_var_integer, attach_dims_vars_to_file, add_var_x, write_var_r4, write_var_r8, write_var_integer, add_var_att_text, add_var_att_int procedure, private :: add_global_att_text, add_global_att_int end type @@ -275,6 +275,26 @@ subroutine read_var_r4(this, varindex, starts, sizes, values) end subroutine + ! see read_var_r8 for usage comment + subroutine read_var_integer(this, varindex, starts, sizes, values) + use, intrinsic :: ISO_C_BINDING + class(netcdf_file_type), intent(in) :: this + integer, intent(in) :: varindex + integer, dimension(:) :: starts, sizes + integer, intent(inout), target :: values(..) ! must be inout or the allocation might be screwed + ! EO parameters + include "netcdf.inc" + integer, pointer :: values_ptr(:) + + call assert(size(sizes) == size(starts), __LINE__) + call assert(size(starts) == size(this%vars(varindex)%dim_indices), __LINE__) + call assert(product(sizes) == product(shape(values)), __LINE__) + + call c_f_pointer(c_loc(values), values_ptr, [product(shape(values))]) + call assert_nc(nf_get_vara_int(this%ncid, this%vars(varindex)%ncid, starts, sizes, values_ptr), __LINE__) + end subroutine + + subroutine open_write_create(this, filepath) class(netcdf_file_type), target, intent(inout) :: this character(len=*), intent(in) :: filepath diff --git a/test/fortran/io_netcdf_file_module_tests.pf b/test/fortran/io_netcdf_file_module_tests.pf index eb13611f4..0fc3903b1 100644 --- a/test/fortran/io_netcdf_file_module_tests.pf +++ b/test/fortran/io_netcdf_file_module_tests.pf @@ -273,6 +273,33 @@ contains end subroutine + @test + subroutine test_can_read_2d_variable_integer() + type(netcdf_file_type) f + integer, allocatable :: values(:) + + integer node_dimidx, time_dimidx + integer sss_varindex + call f%initialize() + node_dimidx = f%add_dim("nod2", 5) + time_dimidx = f%add_dim_unlimited("time") + + sss_varindex = f%add_var_real("sss", [node_dimidx,time_dimidx]) + call f%open_read("fixtures/io_netcdf/columnwise_2d_sss.nc") + + allocate(values(5)) + call f%read_var(sss_varindex, [1,2], [5,1], values) + ! check level 1 values + @assertEqual(10, values(1)) + @assertEqual(10, values(2)) + @assertEqual(10, values(3)) + @assertEqual(10, values(4)) + @assertEqual(10, values(5)) + + call f%close_file() + end subroutine + + @test subroutine test_can_read_variable_with_less_dims_than_in_file() type(netcdf_file_type) f From e08b012fe7892e261d1dc5ba6c1e8017977e12da Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 23 Dec 2020 12:51:42 +0100 Subject: [PATCH 193/909] - be able to query the number of specified dimensions - add unit test --- src/io_netcdf_file_module.F90 | 11 +++++++++++ test/fortran/io_netcdf_file_module_tests.pf | 14 ++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index af87d7ef8..76e2a851a 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -15,6 +15,7 @@ module io_netcdf_file_module integer ncid contains procedure, public :: initialize, add_dim, add_dim_unlimited, add_var_double, add_var_real, add_var_int, open_read, close_file, open_write_create, open_write_append + procedure, public :: ndims generic, public :: read_var => read_var_r4, read_var_r8, read_var_integer generic, public :: write_var => write_var_r4, write_var_r8, write_var_integer generic, public :: add_var_att => add_var_att_text, add_var_att_int @@ -87,6 +88,16 @@ function add_dim(this, name, len) result(dimindex) dimindex = size(this%dims) this%dims(dimindex) = dim_type(name=name, len=len, ncid=-1) end function + + + ! return number of specified dimensions (which might be less dimensions than an attached file has) + function ndims(this) + class(netcdf_file_type), intent(inout) :: this + integer ndims + ! EO parameters + + ndims = size(this%dims) + end function ! the sizes of the dims define the global shape of the var diff --git a/test/fortran/io_netcdf_file_module_tests.pf b/test/fortran/io_netcdf_file_module_tests.pf index 0fc3903b1..0a548e9fd 100644 --- a/test/fortran/io_netcdf_file_module_tests.pf +++ b/test/fortran/io_netcdf_file_module_tests.pf @@ -64,6 +64,20 @@ contains end subroutine + @test + subroutine test_can_query_ndims() + type(netcdf_file_type) f + integer nz_dimidx, node_dimidx + + call f%initialize() + nz_dimidx = f%add_dim("nz", 48) + node_dimidx = f%add_dim("nz_1", 47) + + @assertEqual(2, f%ndims()) + + end subroutine + + @test subroutine test_can_add_vars_with_attributes() type(netcdf_file_type) f From fdda7123e7fe6866e31d61a6ed222581005d9f2c Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 6 Jan 2021 11:46:27 +0100 Subject: [PATCH 194/909] add (work in progress) generic implementation to asynchronously read/write FESOM mesh variable(s) with distributed cell or element data in 2D or 3D to/from a NetCDF file --- src/io_fesom_file.F90 | 198 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 198 insertions(+) create mode 100644 src/io_fesom_file.F90 diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 new file mode 100644 index 000000000..bc9560814 --- /dev/null +++ b/src/io_fesom_file.F90 @@ -0,0 +1,198 @@ + ! synopsis: generic implementation to asynchronously read/write FESOM mesh variable(s) with distributed cell or element data in 2D or 3D to/from a NetCDF file +module io_fesom_file_module + use mod_mesh + use io_netcdf_file_module + implicit none + public fesom_file_type + private + + + type var_info + integer var_index + real(kind=8), pointer :: local_data_ptr3(:,:) => null() + real(kind=8), allocatable :: global_level_data(:) + integer :: global_level_data_size = 0 + end type + + + type dim_info + integer idx + integer len ! better query the len from the netcdf_file_type ? + end type + + + type, extends(netcdf_file_type) :: fesom_file_type + private + integer time_dimidx + integer time_varidx + type(var_info) var_infos(20); integer :: nvar_infos = 0 ! todo: allow dynamically allocated size without messing with shallow copied pointers + type(dim_info), allocatable :: dim_infos(:) + integer :: rec_cnt = 0 + integer :: iorank = 0 + contains + procedure, public :: gather_and_write, init, specify_node_var + end type + + + type(t_mesh), save :: mesh + + +contains + + + subroutine init(f, mesh_) ! todo: would like to call it initialize but Fortran is rather cluncky with overwriting base type procedures + use mod_mesh + use o_arrays + class(fesom_file_type), intent(inout) :: f + type(t_mesh), intent(in) :: mesh_ + ! EO parameters + + mesh = mesh_ ! get hold of our mesh for later use (assume the mesh instance will never change) + + f%rec_cnt = 0 + + call f%netcdf_file_type%initialize() + + ! add the dimensions we intend to use to the file spec and also store here so we can use them when creating the variables + ! todo: store in a separate "dim pool" without calling f%add_dim and add only if a variable requires it + allocate(f%dim_infos(4)) + f%dim_infos(1) = dim_info( idx=f%add_dim('node', mesh%nod2d), len=mesh%nod2d) + f%dim_infos(2) = dim_info( idx=f%add_dim('elem', mesh%elem2d), len=mesh%elem2d) + f%dim_infos(3) = dim_info( idx=f%add_dim('nz_1', mesh%nl-1), len=mesh%nl-1) + f%dim_infos(4) = dim_info( idx=f%add_dim('nz', mesh%nl), len=mesh%nl) + + f%time_dimidx = f%add_dim_unlimited('time') + + f%time_varidx = f%add_var_double('time', [f%time_dimidx]) + end subroutine + + + subroutine gather_and_write(f) + use g_PARSUP + use io_gather_module + class(fesom_file_type), intent(inout) :: f + ! EO parameters + integer i,lvl, nlvl, nodes_per_lvl + logical is_2d + + f%rec_cnt = f%rec_cnt+1 + + do i=1, f%nvar_infos + +call assert(associated(f%var_infos(i)%local_data_ptr3), __LINE__) + + nlvl = size(f%var_infos(i)%local_data_ptr3,dim=1) + nodes_per_lvl = f%var_infos(i)%global_level_data_size + is_2d = (nlvl == 1) + + if(mype == f%iorank) then + ! todo: choose how many levels we write at once + if(.not. allocated(f%var_infos(i)%global_level_data)) allocate(f%var_infos(i)%global_level_data(nodes_per_lvl)) + end if + + lvl=1 ! todo: loop lvls + call gather_nod2D(f%var_infos(i)%local_data_ptr3(lvl,:), f%var_infos(i)%global_level_data, f%iorank, 42, MPI_comm_fesom) + if(mype == f%iorank) then + if(is_2d) then + call f%write_var(f%var_infos(i)%var_index, [1,f%rec_cnt], [size(f%var_infos(i)%global_level_data),1], f%var_infos(i)%global_level_data) + else + ! z,nod,time + call f%write_var(f%var_infos(i)%var_index, [lvl,1,f%rec_cnt], [1,size(f%var_infos(i)%global_level_data),1], f%var_infos(i)%global_level_data) + end if + end if + end do + end subroutine + + + subroutine specify_node_var(f, name, local_data, longname, units) + use, intrinsic :: ISO_C_BINDING + use g_PARSUP + class(fesom_file_type), intent(inout) :: f + character(len=*), intent(in) :: name + real(kind=8), target, intent(inout) :: local_data(..) ! todo: be able to set precision + character(len=*), intent(in) :: units, longname + ! EO parameters + real(8), pointer :: local_data_ptr3(:,:) + type(dim_info) level_diminfo, depth_diminfo + + level_diminfo = find_diminfo(f, mesh%nod2d) + + if(size(shape(local_data)) == 1) then ! 1D data + call c_f_pointer(c_loc(local_data), local_data_ptr3, [1,size(local_data)]) + call specify_variable(f, name, [level_diminfo%idx, f%time_dimidx], level_diminfo%len, local_data_ptr3, longname, units) + + else if(size(shape(local_data)) == 2) then ! 2D data + depth_diminfo = find_diminfo(f, size(local_data, dim=1)) + call c_f_pointer(c_loc(local_data), local_data_ptr3, [size(local_data, dim=1),size(local_data, dim=2)]) + call specify_variable(f, name, [depth_diminfo%idx, level_diminfo%idx, f%time_dimidx], level_diminfo%len, local_data_ptr3, longname, units) + end if + end subroutine + + + function find_diminfo(f, len) result(info) + type(fesom_file_type), intent(inout) :: f + type(dim_info) info + integer len + ! EO parameters + integer i + + do i=1, size(f%dim_infos) + if(f%dim_infos(i)%len == len) then + info = f%dim_infos(i) + return + end if + end do + + print *, "error in line ",__LINE__, __FILE__," can not find dimension with size",len + stop 1 + end function + + + subroutine specify_variable(f, name, dim_indices, global_level_data_size, local_data, longname, units) + use g_PARSUP + type(fesom_file_type), intent(inout) :: f + character(len=*), intent(in) :: name +! integer, intent(in) :: global_shape(:) + integer, intent(in) :: dim_indices(:) + integer global_level_data_size + real(kind=8), target, intent(inout) :: local_data(:,:) ! todo: be able to set precision? + character(len=*), intent(in) :: units, longname + ! EO parameters + integer var_index + + var_index = f%add_var_double(name, dim_indices) + call f%add_var_att(var_index, "units", units) + call f%add_var_att(var_index, "long_name", longname) + + call assert(f%nvar_infos < size(f%var_infos), __LINE__) + f%nvar_infos = f%nvar_infos+1 + f%var_infos(f%nvar_infos)%var_index = var_index + f%var_infos(f%nvar_infos)%local_data_ptr3 => local_data + f%var_infos(f%nvar_infos)%global_level_data_size = global_level_data_size + end subroutine + + + + subroutine assert(val, line) + logical, intent(in) :: val + integer, intent(in) :: line + ! EO parameters + if(.not. val) then + print *, "error in line ",line, __FILE__ + stop 1 + end if + end subroutine + + + subroutine assert_nc(status, line) + integer, intent(in) :: status + integer, intent(in) :: line + ! EO parameters + include "netcdf.inc" + if(status /= nf_noerr) then + print *, "error in line ",line, __FILE__, ' ', trim(nf_strerror(status)) + stop 1 + endif + end subroutine + +end module From ce470d2b24de46f30d1074f92117207a8405ca6e Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 6 Jan 2021 12:21:09 +0100 Subject: [PATCH 195/909] - be able to query rec_count, time_varindex, time_dimindex - be able to query if the current MPI rank is assigned to do the I/O for this file --- src/io_fesom_file.F90 | 34 +++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index bc9560814..31655ba1f 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -30,7 +30,7 @@ module io_fesom_file_module integer :: rec_cnt = 0 integer :: iorank = 0 contains - procedure, public :: gather_and_write, init, specify_node_var + procedure, public :: gather_and_write, init, specify_node_var, is_iorank, rec_count, time_varindex, time_dimindex end type @@ -40,6 +40,38 @@ module io_fesom_file_module contains + function is_iorank(this) result(x) + use g_PARSUP + class(fesom_file_type), intent(in) :: this + logical x + x = (mype == this%iorank) + end function + + + function rec_count(this) result(x) + use g_PARSUP + class(fesom_file_type), intent(in) :: this + integer x + x = this%rec_cnt + end function + + + function time_varindex(this) result(x) + use g_PARSUP + class(fesom_file_type), intent(in) :: this + integer x + x = this%time_varidx + end function + + + function time_dimindex(this) result(x) + use g_PARSUP + class(fesom_file_type), intent(in) :: this + integer x + x = this%time_dimidx + end function + + subroutine init(f, mesh_) ! todo: would like to call it initialize but Fortran is rather cluncky with overwriting base type procedures use mod_mesh use o_arrays From 3f83ef24b9c87e4c572f8bdf337c33b11606a692 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 7 Jan 2021 11:15:19 +0100 Subject: [PATCH 196/909] - be able to read variable shape with unlimited dimension from a file - add according unit test --- src/io_netcdf_file_module.F90 | 32 +++++++++++++++++++++ test/fortran/io_netcdf_file_module_tests.pf | 22 ++++++++++++++ 2 files changed, 54 insertions(+) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index 76e2a851a..ce0d36a42 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -15,6 +15,7 @@ module io_netcdf_file_module integer ncid contains procedure, public :: initialize, add_dim, add_dim_unlimited, add_var_double, add_var_real, add_var_int, open_read, close_file, open_write_create, open_write_append + procedure, public :: read_var_shape procedure, public :: ndims generic, public :: read_var => read_var_r4, read_var_r8, read_var_integer generic, public :: write_var => write_var_r4, write_var_r8, write_var_integer @@ -244,6 +245,37 @@ subroutine open_read(this, filepath) end subroutine + ! return an array with the dimension sizes for all dimensions of the given variable + ! this currently only makes sense for variables with unlimited dimensions, + ! as all other dimensions must be known when adding the variable to the file specification, e.g before reading the file + subroutine read_var_shape(this, varindex, varshape) + class(netcdf_file_type), target, intent(inout) :: this + integer, intent(in) :: varindex + integer, allocatable, intent(out) :: varshape(:) + ! EO parameters + include "netcdf.inc" + type(var_type), pointer :: var + integer var_ndims + integer i + + var => this%vars(varindex) + var_ndims = size(var%dim_indices) + + if(allocated(varshape)) deallocate(varshape) + allocate(varshape(var_ndims)) + + do i=1, var_ndims + if(this%dims(i)%len == nf_unlimited) then + ! actually read from the file + call assert_nc( nf_inq_dimlen(this%ncid, this%dims(i)%ncid, varshape(i)) , __LINE__) + else + ! use the dim size which has been set without the file and is thus known anyway to the user + varshape(i) = this%dims( var%dim_indices(i) )%len + end if + end do + end subroutine + + ! values array is not required to have the same shape as the variable but must fit the product of all items of the sizes array ! this way we can retrieve e.g. data from a 3D variable to a 2D array with one size set to 1 (e.g. to get a single timestep) ! starts and sizes must have the same rank as the variable has dimensions diff --git a/test/fortran/io_netcdf_file_module_tests.pf b/test/fortran/io_netcdf_file_module_tests.pf index 0a548e9fd..ece4b6d48 100644 --- a/test/fortran/io_netcdf_file_module_tests.pf +++ b/test/fortran/io_netcdf_file_module_tests.pf @@ -608,4 +608,26 @@ contains if(exitstat .ne. 0) stop 1 end subroutine + + @test + subroutine test_can_read_var_shape_from_file() + type(netcdf_file_type) f + integer, allocatable :: varshape(:) + + integer node_dimidx, time_dimidx + integer varindex + call f%initialize() + node_dimidx = f%add_dim("nod2", 5) + time_dimidx = f%add_dim_unlimited("time") + + varindex = f%add_var_real("sss", [node_dimidx,time_dimidx]) + call f%open_read("fixtures/io_netcdf/columnwise_2d_sss.nc") + + call f%read_var_shape(varindex, varshape) + + @assertEqual([5,2], varshape) + + call f%close_file() + end subroutine + end module From a11fea48b908144f1dcac208072f44fb87f947f5 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 7 Jan 2021 12:34:30 +0100 Subject: [PATCH 197/909] - use correct dim index when determining shape of a variable - add unit test to cover this issue --- src/io_netcdf_file_module.F90 | 4 ++-- test/fortran/io_netcdf_file_module_tests.pf | 22 +++++++++++++++++++-- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index ce0d36a42..605f1004f 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -265,9 +265,9 @@ subroutine read_var_shape(this, varindex, varshape) allocate(varshape(var_ndims)) do i=1, var_ndims - if(this%dims(i)%len == nf_unlimited) then + if(this%dims( var%dim_indices(i) )%len == nf_unlimited) then ! actually read from the file - call assert_nc( nf_inq_dimlen(this%ncid, this%dims(i)%ncid, varshape(i)) , __LINE__) + call assert_nc( nf_inq_dimlen(this%ncid, this%dims( var%dim_indices(i) )%ncid, varshape(i)) , __LINE__) else ! use the dim size which has been set without the file and is thus known anyway to the user varshape(i) = this%dims( var%dim_indices(i) )%len diff --git a/test/fortran/io_netcdf_file_module_tests.pf b/test/fortran/io_netcdf_file_module_tests.pf index ece4b6d48..6208fd323 100644 --- a/test/fortran/io_netcdf_file_module_tests.pf +++ b/test/fortran/io_netcdf_file_module_tests.pf @@ -613,9 +613,9 @@ contains subroutine test_can_read_var_shape_from_file() type(netcdf_file_type) f integer, allocatable :: varshape(:) - integer node_dimidx, time_dimidx integer varindex + call f%initialize() node_dimidx = f%add_dim("nod2", 5) time_dimidx = f%add_dim_unlimited("time") @@ -624,9 +624,27 @@ contains call f%open_read("fixtures/io_netcdf/columnwise_2d_sss.nc") call f%read_var_shape(varindex, varshape) - @assertEqual([5,2], varshape) + call f%close_file() + end subroutine + + + @test + subroutine test_can_read_var_shape_from_file_with_reverse_dim_index_order() + type(netcdf_file_type) f + integer, allocatable :: varshape(:) + integer node_dimidx, time_dimidx + integer varindex + + call f%initialize() + time_dimidx = f%add_dim_unlimited("time") + node_dimidx = f%add_dim("nod2", 5) + + varindex = f%add_var_real("sss", [node_dimidx,time_dimidx]) + call f%open_read("fixtures/io_netcdf/columnwise_2d_sss.nc") + call f%read_var_shape(varindex, varshape) + @assertEqual([5,2], varshape) call f%close_file() end subroutine From df85da8677d2e4c80b1159a23a45f9fd25f7b0a5 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 7 Jan 2021 13:33:01 +0100 Subject: [PATCH 198/909] - be able to query if a file has been opened - add unit tests --- src/io_netcdf_file_module.F90 | 15 ++++++++-- test/fortran/io_netcdf_file_module_tests.pf | 32 ++++++++++++++++++++- 2 files changed, 44 insertions(+), 3 deletions(-) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index 605f1004f..c41a7a451 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -15,7 +15,7 @@ module io_netcdf_file_module integer ncid contains procedure, public :: initialize, add_dim, add_dim_unlimited, add_var_double, add_var_real, add_var_int, open_read, close_file, open_write_create, open_write_append - procedure, public :: read_var_shape + procedure, public :: is_attached, read_var_shape procedure, public :: ndims generic, public :: read_var => read_var_r4, read_var_r8, read_var_integer generic, public :: write_var => write_var_r4, write_var_r8, write_var_integer @@ -201,7 +201,7 @@ subroutine add_var_att_text(this, varindex, att_name, att_text) character(len=*), intent(in) :: att_text ! EO parameters type(att_type_wrapper), allocatable :: tmparr(:) - + allocate( tmparr(size(this%vars(varindex)%atts)+1) ) tmparr(1:size(this%vars(varindex)%atts)) = this%vars(varindex)%atts deallocate(this%vars(varindex)%atts) @@ -226,6 +226,15 @@ subroutine add_var_att_int(this, varindex, att_name, att_val) this%vars(varindex)%atts( size(this%vars(varindex)%atts) )%it = att_type_int(name=att_name, val=att_val) end subroutine + + + function is_attached(this) result(x) + class(netcdf_file_type), intent(in) :: this + logical x + ! EO parameters + + x = (this%filepath .ne. "") + end function subroutine open_read(this, filepath) @@ -464,6 +473,8 @@ subroutine close_file(this) ! EO parameters include "netcdf.inc" call assert_nc( nf_close(this%ncid) , __LINE__) + + this%filepath = "" end subroutine diff --git a/test/fortran/io_netcdf_file_module_tests.pf b/test/fortran/io_netcdf_file_module_tests.pf index 6208fd323..bcf6a5591 100644 --- a/test/fortran/io_netcdf_file_module_tests.pf +++ b/test/fortran/io_netcdf_file_module_tests.pf @@ -642,10 +642,40 @@ contains varindex = f%add_var_real("sss", [node_dimidx,time_dimidx]) - call f%open_read("fixtures/io_netcdf/columnwise_2d_sss.nc") + call f%open_read("fixtures/io_netcdf/columnwise_2d_sss.nc") call f%read_var_shape(varindex, varshape) @assertEqual([5,2], varshape) call f%close_file() end subroutine + + @test + subroutine test_file_is_attached_is_false_after_initializing() + type(netcdf_file_type) f + + call f%initialize() + @assertFalse(f%is_attached()) + end subroutine + + + @test + subroutine test_file_is_attached_is_true_after_opening_a_file() + type(netcdf_file_type) f + integer, allocatable :: varshape(:) + integer node_dimidx, time_dimidx + integer varindex + + call f%initialize() + time_dimidx = f%add_dim_unlimited("time") + node_dimidx = f%add_dim("nod2", 5) + + varindex = f%add_var_real("sss", [node_dimidx,time_dimidx]) + + call f%open_read("fixtures/io_netcdf/columnwise_2d_sss.nc") + + call f%read_var_shape(varindex, varshape) + @assertTrue(f%is_attached()) + call f%close_file() + end subroutine + end module From 0658d948228c828b97efb0b4f6beaede78d2cd85 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 7 Jan 2021 13:48:57 +0100 Subject: [PATCH 199/909] improve accessibility intent for dummy parameter --- src/io_netcdf_file_module.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index c41a7a451..c46a2e073 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -258,7 +258,7 @@ subroutine open_read(this, filepath) ! this currently only makes sense for variables with unlimited dimensions, ! as all other dimensions must be known when adding the variable to the file specification, e.g before reading the file subroutine read_var_shape(this, varindex, varshape) - class(netcdf_file_type), target, intent(inout) :: this + class(netcdf_file_type), target, intent(in) :: this integer, intent(in) :: varindex integer, allocatable, intent(out) :: varshape(:) ! EO parameters From 500fb1c2c161d391a2f811896713a192dd892723 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 7 Jan 2021 19:18:39 +0100 Subject: [PATCH 200/909] - be able to read a single value from a NetCDF variable - add unit tests --- src/io_netcdf_file_module.F90 | 50 ++++++++++++++++ test/fortran/io_netcdf_file_module_tests.pf | 66 +++++++++++++++++++++ 2 files changed, 116 insertions(+) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index c46a2e073..d58e2327f 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -19,9 +19,11 @@ module io_netcdf_file_module procedure, public :: ndims generic, public :: read_var => read_var_r4, read_var_r8, read_var_integer generic, public :: write_var => write_var_r4, write_var_r8, write_var_integer + generic, public :: read_var1 => read_var1_r4, read_var1_r8, read_var1_integer generic, public :: add_var_att => add_var_att_text, add_var_att_int generic, public :: add_global_att => add_global_att_text, add_global_att_int procedure, private :: read_var_r4, read_var_r8, read_var_integer, attach_dims_vars_to_file, add_var_x, write_var_r4, write_var_r8, write_var_integer, add_var_att_text, add_var_att_int + procedure, private :: read_var1_r4, read_var1_r8, read_var1_integer procedure, private :: add_global_att_text, add_global_att_int end type @@ -347,6 +349,54 @@ subroutine read_var_integer(this, varindex, starts, sizes, values) end subroutine + ! retrieve a single value specified via the indices array + subroutine read_var1_r8(this, varindex, indices, value) + use, intrinsic :: ISO_C_BINDING + class(netcdf_file_type), intent(in) :: this + integer, intent(in) :: varindex + integer, dimension(:) :: indices + real(8), intent(out) :: value + ! EO parameters + include "netcdf.inc" + + call assert(size(indices) == size(this%vars(varindex)%dim_indices), __LINE__) + + call assert_nc(nf_get_var1_double(this%ncid, this%vars(varindex)%ncid, indices, value), __LINE__) + end subroutine + + + ! see read_var1_r8 for usage comment + subroutine read_var1_r4(this, varindex, indices, value) + use, intrinsic :: ISO_C_BINDING + class(netcdf_file_type), intent(in) :: this + integer, intent(in) :: varindex + integer, dimension(:) :: indices + real(4), intent(out) :: value + ! EO parameters + include "netcdf.inc" + + call assert(size(indices) == size(this%vars(varindex)%dim_indices), __LINE__) + + call assert_nc(nf_get_var1_real(this%ncid, this%vars(varindex)%ncid, indices, value), __LINE__) + end subroutine + + + ! see read_var1_r8 for usage comment + subroutine read_var1_integer(this, varindex, indices, value) + use, intrinsic :: ISO_C_BINDING + class(netcdf_file_type), intent(in) :: this + integer, intent(in) :: varindex + integer, dimension(:) :: indices + integer, intent(out) :: value + ! EO parameters + include "netcdf.inc" + + call assert(size(indices) == size(this%vars(varindex)%dim_indices), __LINE__) + + call assert_nc(nf_get_var1_int(this%ncid, this%vars(varindex)%ncid, indices, value), __LINE__) + end subroutine + + subroutine open_write_create(this, filepath) class(netcdf_file_type), target, intent(inout) :: this character(len=*), intent(in) :: filepath diff --git a/test/fortran/io_netcdf_file_module_tests.pf b/test/fortran/io_netcdf_file_module_tests.pf index bcf6a5591..8a1f80492 100644 --- a/test/fortran/io_netcdf_file_module_tests.pf +++ b/test/fortran/io_netcdf_file_module_tests.pf @@ -314,6 +314,72 @@ contains end subroutine + @test + subroutine test_can_read_single_variable_integer() + type(netcdf_file_type) f + integer value + + integer node_dimidx, time_dimidx + integer sss_varindex + call f%initialize() + node_dimidx = f%add_dim("nod2", 5) + time_dimidx = f%add_dim_unlimited("time") + + sss_varindex = f%add_var_real("sss", [node_dimidx,time_dimidx]) + call f%open_read("fixtures/io_netcdf/columnwise_2d_sss.nc") + + call f%read_var1(sss_varindex, [1,2], value) + ! check level 1 values + @assertEqual(10, value) + + call f%close_file() + end subroutine + + + @test + subroutine test_can_read_single_variable_real4() + type(netcdf_file_type) f + real(4) value + + integer node_dimidx, time_dimidx + integer sss_varindex + call f%initialize() + node_dimidx = f%add_dim("nod2", 5) + time_dimidx = f%add_dim_unlimited("time") + + sss_varindex = f%add_var_real("sss", [node_dimidx,time_dimidx]) + call f%open_read("fixtures/io_netcdf/columnwise_2d_sss.nc") + + call f%read_var1(sss_varindex, [1,2], value) + ! check level 1 values + @assertEqual(10.001, value, tolerance=1.e-6) + + call f%close_file() + end subroutine + + + @test + subroutine test_can_read_single_variable_real8() + type(netcdf_file_type) f + real(8) value + + integer node_dimidx, time_dimidx + integer sss_varindex + call f%initialize() + node_dimidx = f%add_dim("nod2", 5) + time_dimidx = f%add_dim_unlimited("time") + + sss_varindex = f%add_var_real("sss", [node_dimidx,time_dimidx]) + call f%open_read("fixtures/io_netcdf/columnwise_2d_sss.nc") + + call f%read_var1(sss_varindex, [1,2], value) + ! check level 1 values + @assertEqual(10.001_8, value, tolerance=1.e-6) + + call f%close_file() + end subroutine + + @test subroutine test_can_read_variable_with_less_dims_than_in_file() type(netcdf_file_type) f From cceefa3b04d5ede9e9ad47da8e380eadac39c51a Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 8 Jan 2021 19:07:55 +0100 Subject: [PATCH 201/909] remove dependency to the mod_mesh module so we can unit test it --- src/io_fesom_file.F90 | 43 ++++++++++++++++++++++--------------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index 31655ba1f..688d51734 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -1,6 +1,5 @@ ! synopsis: generic implementation to asynchronously read/write FESOM mesh variable(s) with distributed cell or element data in 2D or 3D to/from a NetCDF file module io_fesom_file_module - use mod_mesh use io_netcdf_file_module implicit none public fesom_file_type @@ -34,7 +33,9 @@ module io_fesom_file_module end type - type(t_mesh), save :: mesh + integer, save :: m_nod2d + integer, save :: m_elem2d + integer, save :: m_nl contains @@ -70,35 +71,35 @@ function time_dimindex(this) result(x) integer x x = this%time_dimidx end function - - - subroutine init(f, mesh_) ! todo: would like to call it initialize but Fortran is rather cluncky with overwriting base type procedures - use mod_mesh - use o_arrays + + + subroutine init(f, mesh_nod2d, mesh_elem2d, mesh_nl) ! todo: would like to call it initialize but Fortran is rather cluncky with overwriting base type procedures class(fesom_file_type), intent(inout) :: f - type(t_mesh), intent(in) :: mesh_ + integer mesh_nod2d + integer mesh_elem2d + integer mesh_nl ! EO parameters - mesh = mesh_ ! get hold of our mesh for later use (assume the mesh instance will never change) - - f%rec_cnt = 0 - + ! get hold of our mesh data for later use (assume the mesh instance will not change) + m_nod2d = mesh_nod2d + m_elem2d = mesh_elem2d + m_nl = mesh_nl call f%netcdf_file_type%initialize() ! add the dimensions we intend to use to the file spec and also store here so we can use them when creating the variables ! todo: store in a separate "dim pool" without calling f%add_dim and add only if a variable requires it allocate(f%dim_infos(4)) - f%dim_infos(1) = dim_info( idx=f%add_dim('node', mesh%nod2d), len=mesh%nod2d) - f%dim_infos(2) = dim_info( idx=f%add_dim('elem', mesh%elem2d), len=mesh%elem2d) - f%dim_infos(3) = dim_info( idx=f%add_dim('nz_1', mesh%nl-1), len=mesh%nl-1) - f%dim_infos(4) = dim_info( idx=f%add_dim('nz', mesh%nl), len=mesh%nl) + f%dim_infos(1) = dim_info( idx=f%add_dim('node', m_nod2d), len=m_nod2d) + f%dim_infos(2) = dim_info( idx=f%add_dim('elem', m_elem2d), len=m_elem2d) + f%dim_infos(3) = dim_info( idx=f%add_dim('nz_1', m_nl-1), len=m_nl-1) + f%dim_infos(4) = dim_info( idx=f%add_dim('nz', m_nl), len=m_nl) f%time_dimidx = f%add_dim_unlimited('time') f%time_varidx = f%add_var_double('time', [f%time_dimidx]) end subroutine - - + + subroutine gather_and_write(f) use g_PARSUP use io_gather_module @@ -106,7 +107,7 @@ subroutine gather_and_write(f) ! EO parameters integer i,lvl, nlvl, nodes_per_lvl logical is_2d - + f%rec_cnt = f%rec_cnt+1 do i=1, f%nvar_infos @@ -131,7 +132,7 @@ subroutine gather_and_write(f) ! z,nod,time call f%write_var(f%var_infos(i)%var_index, [lvl,1,f%rec_cnt], [1,size(f%var_infos(i)%global_level_data),1], f%var_infos(i)%global_level_data) end if - end if + end if end do end subroutine @@ -147,7 +148,7 @@ subroutine specify_node_var(f, name, local_data, longname, units) real(8), pointer :: local_data_ptr3(:,:) type(dim_info) level_diminfo, depth_diminfo - level_diminfo = find_diminfo(f, mesh%nod2d) + level_diminfo = find_diminfo(f, m_nod2d) if(size(shape(local_data)) == 1) then ! 1D data call c_f_pointer(c_loc(local_data), local_data_ptr3, [1,size(local_data)]) From 454a6b886084833a22a6b8b6f7301720efb8a590 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 8 Jan 2021 19:23:00 +0100 Subject: [PATCH 202/909] add unit test file for the io_fesom_file_module --- test/fortran/CMakeLists.txt | 5 +++++ test/fortran/io_fesom_file_module_tests.pf | 18 ++++++++++++++++++ 2 files changed, 23 insertions(+) create mode 100644 test/fortran/io_fesom_file_module_tests.pf diff --git a/test/fortran/CMakeLists.txt b/test/fortran/CMakeLists.txt index 475c8cd87..688b8b22b 100644 --- a/test/fortran/CMakeLists.txt +++ b/test/fortran/CMakeLists.txt @@ -15,6 +15,7 @@ add_library(${LIB_TARGET} ${CMAKE_CURRENT_LIST_DIR}/../../src/forcing_provider_a ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_module.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_nf_interface.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_file_module.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_attribute_module.F90 + ${CMAKE_CURRENT_LIST_DIR}/../../src/io_fesom_file.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/gen_modules_partitioning.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_gather.F90 ) add_subdirectory(../../src/async_threads_cpp ${PROJECT_BINARY_DIR}/async_threads_cpp) @@ -36,6 +37,10 @@ if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Intel) elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU) target_compile_options(${LIB_TARGET} PRIVATE -cpp -ffree-line-length-none) target_compile_options(${PROJECT_NAME} PRIVATE -cpp -ffree-line-length-none) + if(CMAKE_Fortran_COMPILER_VERSION VERSION_GREATER_EQUAL 10 ) + target_compile_options(${LIB_TARGET} PRIVATE -fallow-argument-mismatch) # gfortran v10 is strict about erroneous API calls: "Rank mismatch between actual argument at (1) and actual argument at (2) (scalar and rank-1)" + target_compile_options(${PROJECT_NAME} PRIVATE -fallow-argument-mismatch) # gfortran v10 is strict about erroneous API calls: "Rank mismatch between actual argument at (1) and actual argument at (2) (scalar and rank-1)" + endif() endif() add_custom_command( diff --git a/test/fortran/io_fesom_file_module_tests.pf b/test/fortran/io_fesom_file_module_tests.pf new file mode 100644 index 000000000..a54ac859a --- /dev/null +++ b/test/fortran/io_fesom_file_module_tests.pf @@ -0,0 +1,18 @@ +module io_fesom_file_module_tests + use io_fesom_file_module + use funit; implicit none + +contains + + + @test + subroutine can_be_initialized() + type(fesom_file_type) f + integer, parameter :: mesh_nod2d = 3140 + integer, parameter :: mesh_elem2d = 5839 + integer, parameter :: mesh_nl = 48 + + call f%init(mesh_nod2d, mesh_elem2d, mesh_nl) + end subroutine + +end module From 19dc34f248b26df1a32f277f451676ef5b4e93a2 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 8 Jan 2021 20:06:38 +0100 Subject: [PATCH 203/909] - automatically use the rec count from the file if a file has been attached - add unit tests for this --- src/io_fesom_file.F90 | 16 +++++++-- test/fortran/io_fesom_file_module_tests.pf | 38 ++++++++++++++++++++++ 2 files changed, 51 insertions(+), 3 deletions(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index 688d51734..9152f7233 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -26,7 +26,7 @@ module io_fesom_file_module integer time_varidx type(var_info) var_infos(20); integer :: nvar_infos = 0 ! todo: allow dynamically allocated size without messing with shallow copied pointers type(dim_info), allocatable :: dim_infos(:) - integer :: rec_cnt = 0 + integer :: rec_cnt = -1 integer :: iorank = 0 contains procedure, public :: gather_and_write, init, specify_node_var, is_iorank, rec_count, time_varindex, time_dimindex @@ -49,10 +49,20 @@ function is_iorank(this) result(x) end function + ! return the number of timesteps of the file if a file is attached or return the default value of -1 function rec_count(this) result(x) use g_PARSUP - class(fesom_file_type), intent(in) :: this + class(fesom_file_type), intent(inout) :: this integer x + ! EO parameters + integer, allocatable :: time_shape(:) + + if(this%rec_cnt == -1 .and. this%is_attached()) then + ! update from file if rec_cnt has never been used before + call this%read_var_shape(this%time_varidx, time_shape) + this%rec_cnt = time_shape(1) + end if + x = this%rec_cnt end function @@ -108,7 +118,7 @@ subroutine gather_and_write(f) integer i,lvl, nlvl, nodes_per_lvl logical is_2d - f%rec_cnt = f%rec_cnt+1 + if(f%is_iorank()) f%rec_cnt = f%rec_count()+1 do i=1, f%nvar_infos diff --git a/test/fortran/io_fesom_file_module_tests.pf b/test/fortran/io_fesom_file_module_tests.pf index a54ac859a..6db26b1a0 100644 --- a/test/fortran/io_fesom_file_module_tests.pf +++ b/test/fortran/io_fesom_file_module_tests.pf @@ -2,6 +2,8 @@ module io_fesom_file_module_tests use io_fesom_file_module use funit; implicit none + character(len=*), parameter :: TMPPATHPREFIX = "./io_fesom_file_module_tests_DAEA1C34_F042_4243_AA88_273E4AA9D4A6__" + contains @@ -15,4 +17,40 @@ contains call f%init(mesh_nod2d, mesh_elem2d, mesh_nl) end subroutine + + @test + subroutine rec_count_returns_neg1_for_an_unattached_file() + type(fesom_file_type) f + integer, parameter :: mesh_nod2d = 3140 + integer, parameter :: mesh_elem2d = 5839 + integer, parameter :: mesh_nl = 48 + + call f%init(mesh_nod2d, mesh_elem2d, mesh_nl) + + @assertEqual(-1, f%rec_count()) + end subroutine + + + @test + subroutine rec_count_returns_0_for_a_newly_created_file() + character(len=*), parameter :: filepath = TMPPATHPREFIX//"rec_count_returns_0_for_a_newly_created_file.nc" + integer exitstat + type(fesom_file_type) f + integer, parameter :: mesh_nod2d = 3140 + integer, parameter :: mesh_elem2d = 5839 + integer, parameter :: mesh_nl = 48 + + call execute_command_line("rm -f "//filepath) ! silently remove the file if it exists from an aborted previous run + + call f%init(mesh_nod2d, mesh_elem2d, mesh_nl) + call f%open_write_create(filepath) + + @assertEqual(0, f%rec_count()) + + call f%close_file() + + call execute_command_line("rm "//filepath, exitstat=exitstat) + if(exitstat .ne. 0) stop 1 + end subroutine + end module From 5de2e374d5a4a65d785b6ab01f3efe5085891ae7 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 8 Jan 2021 21:42:22 +0100 Subject: [PATCH 204/909] - do not add all possible dims to a fesom file as there might be only a subset in existing files, e.g. output files - add dims required for mesh based variables only for specified variables - add unit test which requires a file specification with only one mesh dim --- src/io_fesom_file.F90 | 47 ++++++++++++++-------- test/fortran/io_fesom_file_module_tests.pf | 25 ++++++++++++ 2 files changed, 56 insertions(+), 16 deletions(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index 9152f7233..196c8174d 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -25,7 +25,7 @@ module io_fesom_file_module integer time_dimidx integer time_varidx type(var_info) var_infos(20); integer :: nvar_infos = 0 ! todo: allow dynamically allocated size without messing with shallow copied pointers - type(dim_info), allocatable :: dim_infos(:) + type(dim_info), allocatable :: used_mesh_dims(:) ! the dims we add for our variables, we need to identify them when adding our mesh related variables integer :: rec_cnt = -1 integer :: iorank = 0 contains @@ -58,6 +58,7 @@ function rec_count(this) result(x) integer, allocatable :: time_shape(:) if(this%rec_cnt == -1 .and. this%is_attached()) then +call assert(this%is_iorank(),__LINE__) ! update from file if rec_cnt has never been used before call this%read_var_shape(this%time_varidx, time_shape) this%rec_cnt = time_shape(1) @@ -96,13 +97,7 @@ subroutine init(f, mesh_nod2d, mesh_elem2d, mesh_nl) ! todo: would like to call m_nl = mesh_nl call f%netcdf_file_type%initialize() - ! add the dimensions we intend to use to the file spec and also store here so we can use them when creating the variables - ! todo: store in a separate "dim pool" without calling f%add_dim and add only if a variable requires it - allocate(f%dim_infos(4)) - f%dim_infos(1) = dim_info( idx=f%add_dim('node', m_nod2d), len=m_nod2d) - f%dim_infos(2) = dim_info( idx=f%add_dim('elem', m_elem2d), len=m_elem2d) - f%dim_infos(3) = dim_info( idx=f%add_dim('nz_1', m_nl-1), len=m_nl-1) - f%dim_infos(4) = dim_info( idx=f%add_dim('nz', m_nl), len=m_nl) + allocate(f%used_mesh_dims(0)) f%time_dimidx = f%add_dim_unlimited('time') @@ -158,36 +153,56 @@ subroutine specify_node_var(f, name, local_data, longname, units) real(8), pointer :: local_data_ptr3(:,:) type(dim_info) level_diminfo, depth_diminfo - level_diminfo = find_diminfo(f, m_nod2d) + level_diminfo = obtain_diminfo(f, m_nod2d) if(size(shape(local_data)) == 1) then ! 1D data call c_f_pointer(c_loc(local_data), local_data_ptr3, [1,size(local_data)]) call specify_variable(f, name, [level_diminfo%idx, f%time_dimidx], level_diminfo%len, local_data_ptr3, longname, units) else if(size(shape(local_data)) == 2) then ! 2D data - depth_diminfo = find_diminfo(f, size(local_data, dim=1)) + depth_diminfo = obtain_diminfo(f, size(local_data, dim=1)) call c_f_pointer(c_loc(local_data), local_data_ptr3, [size(local_data, dim=1),size(local_data, dim=2)]) call specify_variable(f, name, [depth_diminfo%idx, level_diminfo%idx, f%time_dimidx], level_diminfo%len, local_data_ptr3, longname, units) end if end subroutine - function find_diminfo(f, len) result(info) + function obtain_diminfo(f, len) result(info) type(fesom_file_type), intent(inout) :: f type(dim_info) info integer len ! EO parameters integer i + type(dim_info), allocatable :: tmparr(:) - do i=1, size(f%dim_infos) - if(f%dim_infos(i)%len == len) then - info = f%dim_infos(i) + do i=1, size(f%used_mesh_dims) + if(f%used_mesh_dims(i)%len == len) then + info = f%used_mesh_dims(i) return end if end do - print *, "error in line ",__LINE__, __FILE__," can not find dimension with size",len - stop 1 + ! the dim has not been added yet, see if it is one of our allowed mesh related dims + if(len == m_nod2d) then + info = dim_info( idx=f%add_dim('node', len), len=len) + else if(len == m_elem2d) then + info = dim_info( idx=f%add_dim('elem', len), len=len) + else if(len == m_nl-1) then + info = dim_info( idx=f%add_dim('nz_1', len), len=len) + else if(len == m_nl) then + info = dim_info( idx=f%add_dim('nz', len), len=len) + else + print *, "error in line ",__LINE__, __FILE__," can not find dimension with size",len + stop 1 + end if + + ! append the new dim to our list of used dims, i.e. the dims we use for the mesh based variables created via #specify_variable + ! assume the used_mesh_dims array is allocated + allocate( tmparr(size(f%used_mesh_dims)+1) ) + tmparr(1:size(f%used_mesh_dims)) = f%used_mesh_dims + deallocate(f%used_mesh_dims) + call move_alloc(tmparr, f%used_mesh_dims) + f%used_mesh_dims( size(f%used_mesh_dims) ) = info end function diff --git a/test/fortran/io_fesom_file_module_tests.pf b/test/fortran/io_fesom_file_module_tests.pf index 6db26b1a0..54eee817f 100644 --- a/test/fortran/io_fesom_file_module_tests.pf +++ b/test/fortran/io_fesom_file_module_tests.pf @@ -53,4 +53,29 @@ contains if(exitstat .ne. 0) stop 1 end subroutine + + @test + subroutine rec_count_returns_2_for_existing_file_with_2_timesteps() + type(fesom_file_type) f + integer, parameter :: mesh_nod2d = 3140 + integer, parameter :: mesh_elem2d = 5839 + integer, parameter :: mesh_nl = 48 + + integer, allocatable :: varshape(:) + integer node_dimidx, time_dimidx + integer varindex + + call f%init(mesh_nod2d, mesh_elem2d, mesh_nl) + + time_dimidx = f%add_dim_unlimited("time") + node_dimidx = f%add_dim("nod2", 5) + varindex = f%add_var_real("sss", [node_dimidx,time_dimidx]) + call f%open_read("fixtures/io_netcdf/columnwise_2d_sss.nc") + call f%read_var_shape(varindex, varshape) + + @assertEqual(2, f%rec_count()) + + call f%close_file() + end subroutine + end module From 677fe4505f6c02a1f4af29d5ac3ca9eaca303c79 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 11 Jan 2021 18:04:21 +0100 Subject: [PATCH 205/909] add module to distribute node data to all mesh partitions from any rank --- src/io_scatter.F90 | 69 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 src/io_scatter.F90 diff --git a/src/io_scatter.F90 b/src/io_scatter.F90 new file mode 100644 index 000000000..ab90b089e --- /dev/null +++ b/src/io_scatter.F90 @@ -0,0 +1,69 @@ +module io_scatter_module + implicit none + public scatter_nod2D + private + +contains + + + ! thread-safe procedure + subroutine scatter_nod2D(arr2D_global, arr2D_local, root_rank, comm) + use g_PARSUP + use o_mesh + use, intrinsic :: iso_fortran_env, only: real64 + real(real64), intent(in) :: arr2D_global(:) + real(real64), intent(out) :: arr2D_local(:) + integer, intent(in) :: root_rank ! rank of sending process + integer, intent(in) :: comm + ! EO args + integer :: tag = 0 + integer :: mpi_precision = MPI_DOUBLE_PRECISION + integer status(MPI_STATUS_SIZE) + integer :: n, sender_rank + integer, allocatable :: remote_list_nod2d(:) + real(real64), allocatable :: sendbuf(:) + integer node_size + + call assert(size(arr2D_local) == size(mylist_nod2d), __LINE__) ! == mydim_nod2d+edim_nod2d, i.e. partition nodes + halo nodes + + if(mype == root_rank) then + arr2D_local = arr2D_global(mylist_nod2d) + do n = 1, npes-1 + ! receive remote partition 2D size + call mpi_recv(node_size, 1, mpi_integer, MPI_ANY_SOURCE, tag+0, comm, status, mpierr) + sender_rank = status(mpi_source) + + ! receive remote mylist_nod2d + allocate(remote_list_nod2d(node_size)) + call mpi_recv(remote_list_nod2d(1), node_size, mpi_integer, sender_rank, tag+1, comm, status, mpierr) + + allocate(sendbuf(node_size)) + sendbuf = arr2D_global(remote_list_nod2d) + deallocate(remote_list_nod2d) + + call mpi_send(sendbuf(1), node_size, mpi_double_precision, sender_rank, tag+2, comm, mpierr) + deallocate(sendbuf) + end do + + else + node_size = size(mylist_nod2d) + call mpi_send(node_size, 1, mpi_integer, root_rank, tag+0, comm, mpierr) + call mpi_send(mylist_nod2d(1), node_size, mpi_integer, root_rank, tag+1, comm, mpierr) + + call mpi_recv(arr2D_local(1), node_size, mpi_precision, root_rank, tag+2, comm, status, mpierr) + end if + end subroutine + + + subroutine assert(val, line) + logical, intent(in) :: val + integer, intent(in) :: line + ! EO parameters + if(.not. val) then + print *, "error in line ",line, __FILE__ + stop 1 + end if + end subroutine + +end module + From 945fbf878d943489116a52cf15e80293e10c1eeb Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 11 Jan 2021 18:12:35 +0100 Subject: [PATCH 206/909] use a pointer alias to increase readability --- src/io_fesom_file.F90 | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index 196c8174d..38759d2f2 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -108,34 +108,35 @@ subroutine init(f, mesh_nod2d, mesh_elem2d, mesh_nl) ! todo: would like to call subroutine gather_and_write(f) use g_PARSUP use io_gather_module - class(fesom_file_type), intent(inout) :: f + class(fesom_file_type), target :: f ! EO parameters integer i,lvl, nlvl, nodes_per_lvl logical is_2d + type(var_info), pointer :: var if(f%is_iorank()) f%rec_cnt = f%rec_count()+1 do i=1, f%nvar_infos + var => f%var_infos(i) -call assert(associated(f%var_infos(i)%local_data_ptr3), __LINE__) - nlvl = size(f%var_infos(i)%local_data_ptr3,dim=1) - nodes_per_lvl = f%var_infos(i)%global_level_data_size + nlvl = size(var%local_data_ptr3,dim=1) + nodes_per_lvl = var%global_level_data_size is_2d = (nlvl == 1) if(mype == f%iorank) then ! todo: choose how many levels we write at once - if(.not. allocated(f%var_infos(i)%global_level_data)) allocate(f%var_infos(i)%global_level_data(nodes_per_lvl)) + if(.not. allocated(var%global_level_data)) allocate(var%global_level_data(nodes_per_lvl)) end if lvl=1 ! todo: loop lvls - call gather_nod2D(f%var_infos(i)%local_data_ptr3(lvl,:), f%var_infos(i)%global_level_data, f%iorank, 42, MPI_comm_fesom) + call gather_nod2D(var%local_data_ptr3(lvl,:), var%global_level_data, f%iorank, 42, MPI_comm_fesom) if(mype == f%iorank) then if(is_2d) then - call f%write_var(f%var_infos(i)%var_index, [1,f%rec_cnt], [size(f%var_infos(i)%global_level_data),1], f%var_infos(i)%global_level_data) + call f%write_var(var%var_index, [1,f%rec_cnt], [size(var%global_level_data),1], var%global_level_data) else ! z,nod,time - call f%write_var(f%var_infos(i)%var_index, [lvl,1,f%rec_cnt], [1,size(f%var_infos(i)%global_level_data),1], f%var_infos(i)%global_level_data) + call f%write_var(var%var_index, [lvl,1,f%rec_cnt], [1,size(var%global_level_data),1], var%global_level_data) end if end if end do From 8c624085994ae0f3096a89c56329522b737eaaf6 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 11 Jan 2021 18:19:33 +0100 Subject: [PATCH 207/909] remove superfluous sanity assertion --- src/io_fesom_file.F90 | 1 - 1 file changed, 1 deletion(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index 38759d2f2..698875c5c 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -58,7 +58,6 @@ function rec_count(this) result(x) integer, allocatable :: time_shape(:) if(this%rec_cnt == -1 .and. this%is_attached()) then -call assert(this%is_iorank(),__LINE__) ! update from file if rec_cnt has never been used before call this%read_var_shape(this%time_varidx, time_shape) this%rec_cnt = time_shape(1) From f1119dfa07976fbc4097032aecd683a257077aa7 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 11 Jan 2021 18:23:09 +0100 Subject: [PATCH 208/909] - add initial implementation of procedure to read and distribute all variables - change procedure name to better state its purpose --- src/io_fesom_file.F90 | 42 +++++++++++++++++++++++++++++++++++-- test/fortran/CMakeLists.txt | 2 +- 2 files changed, 41 insertions(+), 3 deletions(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index 698875c5c..d758d8f90 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -29,7 +29,7 @@ module io_fesom_file_module integer :: rec_cnt = -1 integer :: iorank = 0 contains - procedure, public :: gather_and_write, init, specify_node_var, is_iorank, rec_count, time_varindex, time_dimindex + procedure, public :: read_and_scatter_variables, gather_and_write_variables, init, specify_node_var, is_iorank, rec_count, time_varindex, time_dimindex end type @@ -104,7 +104,45 @@ subroutine init(f, mesh_nod2d, mesh_elem2d, mesh_nl) ! todo: would like to call end subroutine - subroutine gather_and_write(f) + subroutine read_and_scatter_variables(f) + use g_PARSUP + use io_scatter_module + class(fesom_file_type), target :: f + ! EO parameters + integer i,lvl, nlvl, nodes_per_lvl + logical is_2d + integer last_rec_idx + type(var_info), pointer :: var + + last_rec_idx = f%rec_count() + + do i=1, f%nvar_infos + var => f%var_infos(i) + + nlvl = size(var%local_data_ptr3,dim=1) + nodes_per_lvl = var%global_level_data_size + is_2d = (nlvl == 1) + + if(mype == f%iorank) then + ! todo: choose how many levels we read at once + if(.not. allocated(var%global_level_data)) allocate(var%global_level_data(nodes_per_lvl)) + end if + + lvl=1 ! todo: loop lvls + if(mype == f%iorank) then + if(is_2d) then + call f%read_var(var%var_index, [1,last_rec_idx], [size(var%global_level_data),1], var%global_level_data) + else + ! z,nod,time + call f%read_var(var%var_index, [lvl,1,last_rec_idx], [1,size(var%global_level_data),1], var%global_level_data) + end if + end if + call scatter_nod2D(var%global_level_data, var%local_data_ptr3(lvl,:), f%iorank, MPI_comm_fesom) + end do + end subroutine + + + subroutine gather_and_write_variables(f) use g_PARSUP use io_gather_module class(fesom_file_type), target :: f diff --git a/test/fortran/CMakeLists.txt b/test/fortran/CMakeLists.txt index 688b8b22b..76c3646f1 100644 --- a/test/fortran/CMakeLists.txt +++ b/test/fortran/CMakeLists.txt @@ -15,7 +15,7 @@ add_library(${LIB_TARGET} ${CMAKE_CURRENT_LIST_DIR}/../../src/forcing_provider_a ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_module.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_nf_interface.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_file_module.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_attribute_module.F90 - ${CMAKE_CURRENT_LIST_DIR}/../../src/io_fesom_file.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/gen_modules_partitioning.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_gather.F90 + ${CMAKE_CURRENT_LIST_DIR}/../../src/io_fesom_file.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/gen_modules_partitioning.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_gather.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_scatter.F90 ) add_subdirectory(../../src/async_threads_cpp ${PROJECT_BINARY_DIR}/async_threads_cpp) From 7b1d185a4455e2dfce1d1cadc68586084e6c7b4b Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 11 Jan 2021 18:45:30 +0100 Subject: [PATCH 209/909] loop all levels of a variable when reading or writing it --- src/io_fesom_file.F90 | 40 ++++++++++++++++++++++------------------ 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index d758d8f90..13aff29ff 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -128,16 +128,18 @@ subroutine read_and_scatter_variables(f) if(.not. allocated(var%global_level_data)) allocate(var%global_level_data(nodes_per_lvl)) end if - lvl=1 ! todo: loop lvls - if(mype == f%iorank) then - if(is_2d) then - call f%read_var(var%var_index, [1,last_rec_idx], [size(var%global_level_data),1], var%global_level_data) - else - ! z,nod,time - call f%read_var(var%var_index, [lvl,1,last_rec_idx], [1,size(var%global_level_data),1], var%global_level_data) + do lvl=1, nlvl + if(mype == f%iorank) then + if(is_2d) then + call f%read_var(var%var_index, [1,last_rec_idx], [size(var%global_level_data),1], var%global_level_data) + else + ! z,nod,time + call f%read_var(var%var_index, [lvl,1,last_rec_idx], [1,size(var%global_level_data),1], var%global_level_data) + end if end if - end if - call scatter_nod2D(var%global_level_data, var%local_data_ptr3(lvl,:), f%iorank, MPI_comm_fesom) + + call scatter_nod2D(var%global_level_data, var%local_data_ptr3(lvl,:), f%iorank, MPI_comm_fesom) + end do end do end subroutine @@ -166,16 +168,18 @@ subroutine gather_and_write_variables(f) if(.not. allocated(var%global_level_data)) allocate(var%global_level_data(nodes_per_lvl)) end if - lvl=1 ! todo: loop lvls - call gather_nod2D(var%local_data_ptr3(lvl,:), var%global_level_data, f%iorank, 42, MPI_comm_fesom) - if(mype == f%iorank) then - if(is_2d) then - call f%write_var(var%var_index, [1,f%rec_cnt], [size(var%global_level_data),1], var%global_level_data) - else - ! z,nod,time - call f%write_var(var%var_index, [lvl,1,f%rec_cnt], [1,size(var%global_level_data),1], var%global_level_data) + do lvl=1, nlvl + call gather_nod2D(var%local_data_ptr3(lvl,:), var%global_level_data, f%iorank, 42, MPI_comm_fesom) + + if(mype == f%iorank) then + if(is_2d) then + call f%write_var(var%var_index, [1,f%rec_cnt], [size(var%global_level_data),1], var%global_level_data) + else + ! z,nod,time + call f%write_var(var%var_index, [lvl,1,f%rec_cnt], [1,size(var%global_level_data),1], var%global_level_data) + end if end if - end if + end do end do end subroutine From 0883ac7312b8c88c11c3de8506a86482a41147fa Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 12 Jan 2021 14:08:33 +0100 Subject: [PATCH 210/909] reset record counter then changing file paths --- src/io_fesom_file.F90 | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index 13aff29ff..b9a661ffb 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -20,7 +20,7 @@ module io_fesom_file_module end type - type, extends(netcdf_file_type) :: fesom_file_type + type, extends(netcdf_file_type) :: fesom_file_type ! todo maybe: do not inherit but use composition to have different implementations for the iorank and non-io ranks private integer time_dimidx integer time_varidx @@ -30,6 +30,7 @@ module io_fesom_file_module integer :: iorank = 0 contains procedure, public :: read_and_scatter_variables, gather_and_write_variables, init, specify_node_var, is_iorank, rec_count, time_varindex, time_dimindex + procedure, public :: close_file ! inherited procedures we overwrite end type @@ -271,6 +272,13 @@ subroutine specify_variable(f, name, dim_indices, global_level_data_size, local_ f%var_infos(f%nvar_infos)%global_level_data_size = global_level_data_size end subroutine + + subroutine close_file(this) + class(fesom_file_type), intent(inout) :: this + + this%rec_cnt = -1 ! reset state (should probably be done in all the open_ procedures, not here) + call this%netcdf_file_type%close_file() + end subroutine subroutine assert(val, line) From 67bb76416559aa8280b5410ab48bd7744bb31ab1 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 12 Jan 2021 17:23:42 +0100 Subject: [PATCH 211/909] store whether a variable is nod or elem based --- src/io_fesom_file.F90 | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index b9a661ffb..f6e5928d1 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -11,6 +11,7 @@ module io_fesom_file_module real(kind=8), pointer :: local_data_ptr3(:,:) => null() real(kind=8), allocatable :: global_level_data(:) integer :: global_level_data_size = 0 + logical is_elem_based end type @@ -139,7 +140,11 @@ subroutine read_and_scatter_variables(f) end if end if - call scatter_nod2D(var%global_level_data, var%local_data_ptr3(lvl,:), f%iorank, MPI_comm_fesom) + if(var%is_elem_based) then + ; + else + call scatter_nod2D(var%global_level_data, var%local_data_ptr3(lvl,:), f%iorank, MPI_comm_fesom) + end if end do end do end subroutine @@ -170,7 +175,11 @@ subroutine gather_and_write_variables(f) end if do lvl=1, nlvl - call gather_nod2D(var%local_data_ptr3(lvl,:), var%global_level_data, f%iorank, 42, MPI_comm_fesom) + if(var%is_elem_based) then + ; + else + call gather_nod2D (var%local_data_ptr3(lvl,:), var%global_level_data, f%iorank, 42, MPI_comm_fesom) + end if if(mype == f%iorank) then if(is_2d) then @@ -200,12 +209,12 @@ subroutine specify_node_var(f, name, local_data, longname, units) if(size(shape(local_data)) == 1) then ! 1D data call c_f_pointer(c_loc(local_data), local_data_ptr3, [1,size(local_data)]) - call specify_variable(f, name, [level_diminfo%idx, f%time_dimidx], level_diminfo%len, local_data_ptr3, longname, units) + call specify_variable(f, name, [level_diminfo%idx, f%time_dimidx], level_diminfo%len, local_data_ptr3, .false., longname, units) else if(size(shape(local_data)) == 2) then ! 2D data depth_diminfo = obtain_diminfo(f, size(local_data, dim=1)) call c_f_pointer(c_loc(local_data), local_data_ptr3, [size(local_data, dim=1),size(local_data, dim=2)]) - call specify_variable(f, name, [depth_diminfo%idx, level_diminfo%idx, f%time_dimidx], level_diminfo%len, local_data_ptr3, longname, units) + call specify_variable(f, name, [depth_diminfo%idx, level_diminfo%idx, f%time_dimidx], level_diminfo%len, local_data_ptr3, .false., longname, units) end if end subroutine @@ -249,7 +258,7 @@ function obtain_diminfo(f, len) result(info) end function - subroutine specify_variable(f, name, dim_indices, global_level_data_size, local_data, longname, units) + subroutine specify_variable(f, name, dim_indices, global_level_data_size, local_data, is_elem_based, longname, units) use g_PARSUP type(fesom_file_type), intent(inout) :: f character(len=*), intent(in) :: name @@ -257,6 +266,7 @@ subroutine specify_variable(f, name, dim_indices, global_level_data_size, local_ integer, intent(in) :: dim_indices(:) integer global_level_data_size real(kind=8), target, intent(inout) :: local_data(:,:) ! todo: be able to set precision? + logical, intent(in) :: is_elem_based character(len=*), intent(in) :: units, longname ! EO parameters integer var_index @@ -270,6 +280,7 @@ subroutine specify_variable(f, name, dim_indices, global_level_data_size, local_ f%var_infos(f%nvar_infos)%var_index = var_index f%var_infos(f%nvar_infos)%local_data_ptr3 => local_data f%var_infos(f%nvar_infos)%global_level_data_size = global_level_data_size + f%var_infos(f%nvar_infos)%is_elem_based = is_elem_based end subroutine From 9b128c5ca0f77e82e6394b5d44d4f857b4b7c798 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 12 Jan 2021 17:24:02 +0100 Subject: [PATCH 212/909] add procedure to distribute elem data to all mesh partitions from any rank --- src/io_scatter.F90 | 51 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 50 insertions(+), 1 deletion(-) diff --git a/src/io_scatter.F90 b/src/io_scatter.F90 index ab90b089e..ebd6f73ff 100644 --- a/src/io_scatter.F90 +++ b/src/io_scatter.F90 @@ -1,6 +1,6 @@ module io_scatter_module implicit none - public scatter_nod2D + public scatter_nod2D, scatter_elem2D private contains @@ -55,6 +55,55 @@ subroutine scatter_nod2D(arr2D_global, arr2D_local, root_rank, comm) end subroutine + ! thread-safe procedure + subroutine scatter_elem2D(arr2D_global, arr2D_local, root_rank, comm) + use g_PARSUP + use o_mesh + use, intrinsic :: iso_fortran_env, only: real64 + real(real64), intent(in) :: arr2D_global(:) + real(real64), intent(out) :: arr2D_local(:) + integer, intent(in) :: root_rank ! rank of sending process + integer, intent(in) :: comm + ! EO args + integer :: tag = 0 + integer :: mpi_precision = MPI_DOUBLE_PRECISION + integer status(MPI_STATUS_SIZE) + integer :: n, sender_rank + integer, allocatable :: remote_list_elem2d(:) + real(real64), allocatable :: sendbuf(:) + integer elem_size + + call assert(size(arr2D_local) == size(mylist_elem2d), __LINE__) ! == mydim_elem2d+edim_elem2d, i.e. partition elems + halo elems + + if(mype == root_rank) then + arr2D_local = arr2D_global(mylist_elem2d) + do n = 1, npes-1 + ! receive remote partition 2D size + call mpi_recv(elem_size, 1, mpi_integer, MPI_ANY_SOURCE, tag+0, comm, status, mpierr) + sender_rank = status(mpi_source) + + ! receive remote mylist_elem2d + allocate(remote_list_elem2d(elem_size)) + call mpi_recv(remote_list_elem2d(1), elem_size, mpi_integer, sender_rank, tag+1, comm, status, mpierr) + + allocate(sendbuf(elem_size)) + sendbuf = arr2D_global(remote_list_elem2d) + deallocate(remote_list_elem2d) + + call mpi_send(sendbuf(1), elem_size, mpi_double_precision, sender_rank, tag+2, comm, mpierr) + deallocate(sendbuf) + end do + + else + elem_size = size(mylist_elem2d) + call mpi_send(elem_size, 1, mpi_integer, root_rank, tag+0, comm, mpierr) + call mpi_send(mylist_elem2d(1), elem_size, mpi_integer, root_rank, tag+1, comm, mpierr) + + call mpi_recv(arr2D_local(1), elem_size, mpi_precision, root_rank, tag+2, comm, status, mpierr) + end if + end subroutine + + subroutine assert(val, line) logical, intent(in) :: val integer, intent(in) :: line From 38729b5782c2ce2c055fdca99b07c51dc5b1fe26 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 12 Jan 2021 18:11:41 +0100 Subject: [PATCH 213/909] be able to specify elem based variables --- src/io_fesom_file.F90 | 31 ++++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index f6e5928d1..eb2447875 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -30,7 +30,7 @@ module io_fesom_file_module integer :: rec_cnt = -1 integer :: iorank = 0 contains - procedure, public :: read_and_scatter_variables, gather_and_write_variables, init, specify_node_var, is_iorank, rec_count, time_varindex, time_dimindex + procedure, public :: read_and_scatter_variables, gather_and_write_variables, init, specify_node_var, specify_elem_var, is_iorank, rec_count, time_varindex, time_dimindex procedure, public :: close_file ! inherited procedures we overwrite end type @@ -141,7 +141,7 @@ subroutine read_and_scatter_variables(f) end if if(var%is_elem_based) then - ; + call scatter_elem2D(var%global_level_data, var%local_data_ptr3(lvl,:), f%iorank, MPI_comm_fesom) else call scatter_nod2D(var%global_level_data, var%local_data_ptr3(lvl,:), f%iorank, MPI_comm_fesom) end if @@ -176,7 +176,7 @@ subroutine gather_and_write_variables(f) do lvl=1, nlvl if(var%is_elem_based) then - ; + call gather_elem2D(var%local_data_ptr3(lvl,:), var%global_level_data, f%iorank, 42, MPI_comm_fesom) else call gather_nod2D (var%local_data_ptr3(lvl,:), var%global_level_data, f%iorank, 42, MPI_comm_fesom) end if @@ -217,6 +217,31 @@ subroutine specify_node_var(f, name, local_data, longname, units) call specify_variable(f, name, [depth_diminfo%idx, level_diminfo%idx, f%time_dimidx], level_diminfo%len, local_data_ptr3, .false., longname, units) end if end subroutine + + + subroutine specify_elem_var(f, name, local_data, longname, units) + use, intrinsic :: ISO_C_BINDING + use g_PARSUP + class(fesom_file_type), intent(inout) :: f + character(len=*), intent(in) :: name + real(kind=8), target, intent(inout) :: local_data(..) ! todo: be able to set precision + character(len=*), intent(in) :: units, longname + ! EO parameters + real(8), pointer :: local_data_ptr3(:,:) + type(dim_info) level_diminfo, depth_diminfo + + level_diminfo = obtain_diminfo(f, m_elem2d) + + if(size(shape(local_data)) == 1) then ! 1D data + call c_f_pointer(c_loc(local_data), local_data_ptr3, [1,size(local_data)]) + call specify_variable(f, name, [level_diminfo%idx, f%time_dimidx], level_diminfo%len, local_data_ptr3, .true., longname, units) + + else if(size(shape(local_data)) == 2) then ! 2D data + depth_diminfo = obtain_diminfo(f, size(local_data, dim=1)) + call c_f_pointer(c_loc(local_data), local_data_ptr3, [size(local_data, dim=1),size(local_data, dim=2)]) + call specify_variable(f, name, [depth_diminfo%idx, level_diminfo%idx, f%time_dimidx], level_diminfo%len, local_data_ptr3, .true., longname, units) + end if + end subroutine function obtain_diminfo(f, len) result(info) From 32d8b7ca4da1dc13d20190a17ec274110f49bb75 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 12 Jan 2021 18:35:55 +0100 Subject: [PATCH 214/909] do not use the size of mylist_elem2d for local data size comparison as it is larger --- src/io_scatter.F90 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/io_scatter.F90 b/src/io_scatter.F90 index ebd6f73ff..95048e864 100644 --- a/src/io_scatter.F90 +++ b/src/io_scatter.F90 @@ -73,10 +73,11 @@ subroutine scatter_elem2D(arr2D_global, arr2D_local, root_rank, comm) real(real64), allocatable :: sendbuf(:) integer elem_size - call assert(size(arr2D_local) == size(mylist_elem2d), __LINE__) ! == mydim_elem2d+edim_elem2d, i.e. partition elems + halo elems + elem_size = size(arr2D_local) + call assert(elem_size == mydim_elem2d+edim_elem2d, __LINE__) ! mylist_elem2d is larger and can not be used for comparison here if(mype == root_rank) then - arr2D_local = arr2D_global(mylist_elem2d) + arr2D_local = arr2D_global(1:elem_size) do n = 1, npes-1 ! receive remote partition 2D size call mpi_recv(elem_size, 1, mpi_integer, MPI_ANY_SOURCE, tag+0, comm, status, mpierr) @@ -95,7 +96,6 @@ subroutine scatter_elem2D(arr2D_global, arr2D_local, root_rank, comm) end do else - elem_size = size(mylist_elem2d) call mpi_send(elem_size, 1, mpi_integer, root_rank, tag+0, comm, mpierr) call mpi_send(mylist_elem2d(1), elem_size, mpi_integer, root_rank, tag+1, comm, mpierr) From 1cd2a7e691d06f951125bdaa53271aa57fe5318b Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 12 Jan 2021 18:41:57 +0100 Subject: [PATCH 215/909] change argument ordering when specifying variables so it is more similar to the original ordering in def_variable of the io_RESTART module --- src/io_fesom_file.F90 | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index eb2447875..992631713 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -194,13 +194,13 @@ subroutine gather_and_write_variables(f) end subroutine - subroutine specify_node_var(f, name, local_data, longname, units) + subroutine specify_node_var(f, name, longname, units, local_data) use, intrinsic :: ISO_C_BINDING use g_PARSUP class(fesom_file_type), intent(inout) :: f character(len=*), intent(in) :: name - real(kind=8), target, intent(inout) :: local_data(..) ! todo: be able to set precision character(len=*), intent(in) :: units, longname + real(kind=8), target, intent(inout) :: local_data(..) ! todo: be able to set precision ! EO parameters real(8), pointer :: local_data_ptr3(:,:) type(dim_info) level_diminfo, depth_diminfo @@ -219,13 +219,13 @@ subroutine specify_node_var(f, name, local_data, longname, units) end subroutine - subroutine specify_elem_var(f, name, local_data, longname, units) + subroutine specify_elem_var(f, name, longname, units, local_data) use, intrinsic :: ISO_C_BINDING use g_PARSUP class(fesom_file_type), intent(inout) :: f character(len=*), intent(in) :: name - real(kind=8), target, intent(inout) :: local_data(..) ! todo: be able to set precision character(len=*), intent(in) :: units, longname + real(kind=8), target, intent(inout) :: local_data(..) ! todo: be able to set precision ! EO parameters real(8), pointer :: local_data_ptr3(:,:) type(dim_info) level_diminfo, depth_diminfo From 6eaffc19832ba681b7b2492597ee00c5c0006ccf Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 13 Jan 2021 16:58:22 +0100 Subject: [PATCH 216/909] use our provided mpi data type variable for precision --- src/io_scatter.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/io_scatter.F90 b/src/io_scatter.F90 index 95048e864..ee5a0d6c6 100644 --- a/src/io_scatter.F90 +++ b/src/io_scatter.F90 @@ -41,7 +41,7 @@ subroutine scatter_nod2D(arr2D_global, arr2D_local, root_rank, comm) sendbuf = arr2D_global(remote_list_nod2d) deallocate(remote_list_nod2d) - call mpi_send(sendbuf(1), node_size, mpi_double_precision, sender_rank, tag+2, comm, mpierr) + call mpi_send(sendbuf(1), node_size, mpi_precision, sender_rank, tag+2, comm, mpierr) deallocate(sendbuf) end do @@ -91,7 +91,7 @@ subroutine scatter_elem2D(arr2D_global, arr2D_local, root_rank, comm) sendbuf = arr2D_global(remote_list_elem2d) deallocate(remote_list_elem2d) - call mpi_send(sendbuf(1), elem_size, mpi_double_precision, sender_rank, tag+2, comm, mpierr) + call mpi_send(sendbuf(1), elem_size, mpi_precision, sender_rank, tag+2, comm, mpierr) deallocate(sendbuf) end do From 059a371499ab3f61057930b359b7b976c42eb2f6 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 13 Jan 2021 17:24:44 +0100 Subject: [PATCH 217/909] introduce an mpi barrier when distributing variables to avoid overlapping receive results --- src/io_scatter.F90 | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/io_scatter.F90 b/src/io_scatter.F90 index ee5a0d6c6..20279b9e5 100644 --- a/src/io_scatter.F90 +++ b/src/io_scatter.F90 @@ -52,6 +52,10 @@ subroutine scatter_nod2D(arr2D_global, arr2D_local, root_rank, comm) call mpi_recv(arr2D_local(1), node_size, mpi_precision, root_rank, tag+2, comm, status, mpierr) end if + + ! without a barrier, we get wrong results in arr2D_local + ! todo: not sure why this happens (probably because the 3D levels have the same send/recv signature), get rid of the barrier if possible + call mpi_barrier(comm, mpierr) end subroutine @@ -101,6 +105,10 @@ subroutine scatter_elem2D(arr2D_global, arr2D_local, root_rank, comm) call mpi_recv(arr2D_local(1), elem_size, mpi_precision, root_rank, tag+2, comm, status, mpierr) end if + + ! without a barrier, we get wrong results in arr2D_local + ! todo: not sure why this happens (probably because the 3D levels have the same send/recv signature), get rid of the barrier if possible + call mpi_barrier(comm, mpierr) end subroutine From 9ff1cfc7d160782bb24a43d68e9b07f0007eb5d4 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 14 Jan 2021 16:33:47 +0100 Subject: [PATCH 218/909] use actual element indices for the local data copy --- src/io_scatter.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/io_scatter.F90 b/src/io_scatter.F90 index 20279b9e5..663f8a4aa 100644 --- a/src/io_scatter.F90 +++ b/src/io_scatter.F90 @@ -81,7 +81,7 @@ subroutine scatter_elem2D(arr2D_global, arr2D_local, root_rank, comm) call assert(elem_size == mydim_elem2d+edim_elem2d, __LINE__) ! mylist_elem2d is larger and can not be used for comparison here if(mype == root_rank) then - arr2D_local = arr2D_global(1:elem_size) + arr2D_local = arr2D_global(myList_elem2D(1:elem_size)) do n = 1, npes-1 ! receive remote partition 2D size call mpi_recv(elem_size, 1, mpi_integer, MPI_ANY_SOURCE, tag+0, comm, status, mpierr) From 8d2ff1442e7105a2f3177b05f4a01127670f13a6 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 14 Jan 2021 17:27:15 +0100 Subject: [PATCH 219/909] - remove variable with ambiguous name - allocate unused buffers to zero size --- src/io_fesom_file.F90 | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index 992631713..ffdbaf8aa 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -111,7 +111,7 @@ subroutine read_and_scatter_variables(f) use io_scatter_module class(fesom_file_type), target :: f ! EO parameters - integer i,lvl, nlvl, nodes_per_lvl + integer i,lvl, nlvl logical is_2d integer last_rec_idx type(var_info), pointer :: var @@ -122,12 +122,13 @@ subroutine read_and_scatter_variables(f) var => f%var_infos(i) nlvl = size(var%local_data_ptr3,dim=1) - nodes_per_lvl = var%global_level_data_size is_2d = (nlvl == 1) if(mype == f%iorank) then ! todo: choose how many levels we read at once - if(.not. allocated(var%global_level_data)) allocate(var%global_level_data(nodes_per_lvl)) + if(.not. allocated(var%global_level_data)) allocate(var%global_level_data( var%global_level_data_size )) + else + if(.not. allocated(var%global_level_data)) allocate(var%global_level_data( 0 )) end if do lvl=1, nlvl @@ -155,7 +156,7 @@ subroutine gather_and_write_variables(f) use io_gather_module class(fesom_file_type), target :: f ! EO parameters - integer i,lvl, nlvl, nodes_per_lvl + integer i,lvl, nlvl logical is_2d type(var_info), pointer :: var @@ -166,12 +167,13 @@ subroutine gather_and_write_variables(f) nlvl = size(var%local_data_ptr3,dim=1) - nodes_per_lvl = var%global_level_data_size is_2d = (nlvl == 1) if(mype == f%iorank) then ! todo: choose how many levels we write at once - if(.not. allocated(var%global_level_data)) allocate(var%global_level_data(nodes_per_lvl)) + if(.not. allocated(var%global_level_data)) allocate(var%global_level_data( var%global_level_data_size )) + else + if(.not. allocated(var%global_level_data)) allocate(var%global_level_data( 0 )) end if do lvl=1, nlvl From e500cad365c953b2d6c9aadbac1304af544f1beb Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 14 Jan 2021 17:56:31 +0100 Subject: [PATCH 220/909] use an intermediate buffer to gather/scatter our variable data as the 3D data it is not stored contiguously --- src/io_fesom_file.F90 | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index ffdbaf8aa..3231e7274 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -115,6 +115,7 @@ subroutine read_and_scatter_variables(f) logical is_2d integer last_rec_idx type(var_info), pointer :: var + real(kind=8), allocatable :: laux(:) last_rec_idx = f%rec_count() @@ -123,6 +124,7 @@ subroutine read_and_scatter_variables(f) nlvl = size(var%local_data_ptr3,dim=1) is_2d = (nlvl == 1) + allocate(laux( size(var%local_data_ptr3,dim=2) )) ! i.e. myDim_elem2D+eDim_elem2D or myDim_nod2D+eDim_nod2D if(mype == f%iorank) then ! todo: choose how many levels we read at once @@ -142,11 +144,14 @@ subroutine read_and_scatter_variables(f) end if if(var%is_elem_based) then - call scatter_elem2D(var%global_level_data, var%local_data_ptr3(lvl,:), f%iorank, MPI_comm_fesom) + call scatter_elem2D(var%global_level_data, laux, f%iorank, MPI_comm_fesom) else - call scatter_nod2D(var%global_level_data, var%local_data_ptr3(lvl,:), f%iorank, MPI_comm_fesom) + call scatter_nod2D(var%global_level_data, laux, f%iorank, MPI_comm_fesom) end if + ! the data from our pointer is not contiguous (if it is 3D data), so we can not pass the pointer directly to MPI + var%local_data_ptr3(lvl,:) = laux ! todo: remove this buffer and pass the data directly to MPI (change order of data layout to be levelwise or do not gather levelwise but by columns) end do + deallocate(laux) end do end subroutine @@ -158,16 +163,17 @@ subroutine gather_and_write_variables(f) ! EO parameters integer i,lvl, nlvl logical is_2d + real(kind=8), allocatable :: laux(:) type(var_info), pointer :: var if(f%is_iorank()) f%rec_cnt = f%rec_count()+1 do i=1, f%nvar_infos var => f%var_infos(i) - nlvl = size(var%local_data_ptr3,dim=1) is_2d = (nlvl == 1) + allocate(laux( size(var%local_data_ptr3,dim=2) )) ! i.e. myDim_elem2D+eDim_elem2D or myDim_nod2D+eDim_nod2D if(mype == f%iorank) then ! todo: choose how many levels we write at once @@ -177,10 +183,13 @@ subroutine gather_and_write_variables(f) end if do lvl=1, nlvl + ! the data from our pointer is not contiguous (if it is 3D data), so we can not pass the pointer directly to MPI + laux = var%local_data_ptr3(lvl,:) ! todo: remove this buffer and pass the data directly to MPI (change order of data layout to be levelwise or do not gather levelwise but by columns) + if(var%is_elem_based) then - call gather_elem2D(var%local_data_ptr3(lvl,:), var%global_level_data, f%iorank, 42, MPI_comm_fesom) + call gather_elem2D(laux, var%global_level_data, f%iorank, 42, MPI_comm_fesom) else - call gather_nod2D (var%local_data_ptr3(lvl,:), var%global_level_data, f%iorank, 42, MPI_comm_fesom) + call gather_nod2D (laux, var%global_level_data, f%iorank, 42, MPI_comm_fesom) end if if(mype == f%iorank) then @@ -192,6 +201,7 @@ subroutine gather_and_write_variables(f) end if end if end do + deallocate(laux) end do end subroutine @@ -233,13 +243,13 @@ subroutine specify_elem_var(f, name, longname, units, local_data) type(dim_info) level_diminfo, depth_diminfo level_diminfo = obtain_diminfo(f, m_elem2d) - + if(size(shape(local_data)) == 1) then ! 1D data call c_f_pointer(c_loc(local_data), local_data_ptr3, [1,size(local_data)]) - call specify_variable(f, name, [level_diminfo%idx, f%time_dimidx], level_diminfo%len, local_data_ptr3, .true., longname, units) - + call specify_variable(f, name, [level_diminfo%idx, f%time_dimidx], level_diminfo%len, local_data_ptr3, .true., longname, units) + else if(size(shape(local_data)) == 2) then ! 2D data - depth_diminfo = obtain_diminfo(f, size(local_data, dim=1)) + depth_diminfo = obtain_diminfo(f, size(local_data, dim=1)) call c_f_pointer(c_loc(local_data), local_data_ptr3, [size(local_data, dim=1),size(local_data, dim=2)]) call specify_variable(f, name, [depth_diminfo%idx, level_diminfo%idx, f%time_dimidx], level_diminfo%len, local_data_ptr3, .true., longname, units) end if From 99bf8422ffc20fcff9e0acbe17d8d55c16e146eb Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 15 Jan 2021 10:53:51 +0100 Subject: [PATCH 221/909] remove reshaping the array pointer via a c pointer as e.g. the tracer arrays we put in are not contiguous in memory --- src/io_fesom_file.F90 | 32 ++++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index 3231e7274..37027dead 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -30,8 +30,10 @@ module io_fesom_file_module integer :: rec_cnt = -1 integer :: iorank = 0 contains - procedure, public :: read_and_scatter_variables, gather_and_write_variables, init, specify_node_var, specify_elem_var, is_iorank, rec_count, time_varindex, time_dimindex + procedure, public :: read_and_scatter_variables, gather_and_write_variables, init, specify_node_var, is_iorank, rec_count, time_varindex, time_dimindex procedure, public :: close_file ! inherited procedures we overwrite + generic, public :: specify_elem_var => specify_elem_var_2d, specify_elem_var_3d + procedure, private :: specify_elem_var_2d, specify_elem_var_3d end type @@ -231,28 +233,38 @@ subroutine specify_node_var(f, name, longname, units, local_data) end subroutine - subroutine specify_elem_var(f, name, longname, units, local_data) + subroutine specify_elem_var_2d(f, name, longname, units, local_data) use, intrinsic :: ISO_C_BINDING use g_PARSUP class(fesom_file_type), intent(inout) :: f character(len=*), intent(in) :: name character(len=*), intent(in) :: units, longname - real(kind=8), target, intent(inout) :: local_data(..) ! todo: be able to set precision + real(kind=8), target, intent(inout) :: local_data(:) ! todo: be able to set precision ! EO parameters real(8), pointer :: local_data_ptr3(:,:) - type(dim_info) level_diminfo, depth_diminfo + type(dim_info) level_diminfo level_diminfo = obtain_diminfo(f, m_elem2d) - if(size(shape(local_data)) == 1) then ! 1D data - call c_f_pointer(c_loc(local_data), local_data_ptr3, [1,size(local_data)]) + local_data_ptr3(1:1,1:size(local_data)) => local_data call specify_variable(f, name, [level_diminfo%idx, f%time_dimidx], level_diminfo%len, local_data_ptr3, .true., longname, units) + end subroutine - else if(size(shape(local_data)) == 2) then ! 2D data + + subroutine specify_elem_var_3d(f, name, longname, units, local_data) + use, intrinsic :: ISO_C_BINDING + use g_PARSUP + class(fesom_file_type), intent(inout) :: f + character(len=*), intent(in) :: name + character(len=*), intent(in) :: units, longname + real(kind=8), target, intent(inout) :: local_data(:,:) ! todo: be able to set precision + ! EO parameters + type(dim_info) level_diminfo, depth_diminfo + + level_diminfo = obtain_diminfo(f, m_elem2d) depth_diminfo = obtain_diminfo(f, size(local_data, dim=1)) - call c_f_pointer(c_loc(local_data), local_data_ptr3, [size(local_data, dim=1),size(local_data, dim=2)]) - call specify_variable(f, name, [depth_diminfo%idx, level_diminfo%idx, f%time_dimidx], level_diminfo%len, local_data_ptr3, .true., longname, units) - end if + + call specify_variable(f, name, [depth_diminfo%idx, level_diminfo%idx, f%time_dimidx], level_diminfo%len, local_data, .true., longname, units) end subroutine From 4b2262434d7e2fb687ee13264c82a0b09be43094 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 15 Jan 2021 10:55:07 +0100 Subject: [PATCH 222/909] remove unused commented parameter --- src/io_fesom_file.F90 | 1 - 1 file changed, 1 deletion(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index 37027dead..90011299d 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -311,7 +311,6 @@ subroutine specify_variable(f, name, dim_indices, global_level_data_size, local_ use g_PARSUP type(fesom_file_type), intent(inout) :: f character(len=*), intent(in) :: name -! integer, intent(in) :: global_shape(:) integer, intent(in) :: dim_indices(:) integer global_level_data_size real(kind=8), target, intent(inout) :: local_data(:,:) ! todo: be able to set precision? From 77d848d015dda7f5d525a34b1947e6c45caeb65e Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 15 Jan 2021 15:48:17 +0100 Subject: [PATCH 223/909] maintain a private array with all fesom_file_type instances to be able to identify them from a thread via an index --- src/io_fesom_file.F90 | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index 90011299d..7ba9dd8a7 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -29,6 +29,7 @@ module io_fesom_file_module type(dim_info), allocatable :: used_mesh_dims(:) ! the dims we add for our variables, we need to identify them when adding our mesh related variables integer :: rec_cnt = -1 integer :: iorank = 0 + integer :: fesom_file_index contains procedure, public :: read_and_scatter_variables, gather_and_write_variables, init, specify_node_var, is_iorank, rec_count, time_varindex, time_dimindex procedure, public :: close_file ! inherited procedures we overwrite @@ -40,6 +41,12 @@ module io_fesom_file_module integer, save :: m_nod2d integer, save :: m_elem2d integer, save :: m_nl + + + type fesom_file_type_ptr + class(fesom_file_type), pointer :: ptr + end type + type(fesom_file_type_ptr), allocatable, save :: all_fesom_files(:) contains @@ -88,11 +95,12 @@ function time_dimindex(this) result(x) subroutine init(f, mesh_nod2d, mesh_elem2d, mesh_nl) ! todo: would like to call it initialize but Fortran is rather cluncky with overwriting base type procedures - class(fesom_file_type), intent(inout) :: f + class(fesom_file_type), target, intent(inout) :: f integer mesh_nod2d integer mesh_elem2d integer mesh_nl ! EO parameters + type(fesom_file_type_ptr), allocatable :: tmparr(:) ! get hold of our mesh data for later use (assume the mesh instance will not change) m_nod2d = mesh_nod2d @@ -105,6 +113,19 @@ subroutine init(f, mesh_nod2d, mesh_elem2d, mesh_nl) ! todo: would like to call f%time_dimidx = f%add_dim_unlimited('time') f%time_varidx = f%add_var_double('time', [f%time_dimidx]) + + ! add this instance to global array + ! the array is being used to identify the instance in an async call + if( .not. allocated(all_fesom_files)) then + allocate(all_fesom_files(1)) + else + allocate( tmparr(size(all_fesom_files)+1) ) + tmparr(1:size(all_fesom_files)) = all_fesom_files + deallocate(all_fesom_files) + call move_alloc(tmparr, all_fesom_files) + end if + all_fesom_files(size(all_fesom_files))%ptr => f + f%fesom_file_index = size(all_fesom_files) end subroutine @@ -334,7 +355,7 @@ subroutine specify_variable(f, name, dim_indices, global_level_data_size, local_ subroutine close_file(this) class(fesom_file_type), intent(inout) :: this - + this%rec_cnt = -1 ! reset state (should probably be done in all the open_ procedures, not here) call this%netcdf_file_type%close_file() end subroutine From d7ec819f0d067f0b4f015073d0de70f1e902234d Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 15 Jan 2021 15:53:57 +0100 Subject: [PATCH 224/909] add functionality to use async threads but do not use them yet --- src/io_fesom_file.F90 | 59 +++++++++++++++++++++++++++++++++++++ test/fortran/CMakeLists.txt | 2 +- 2 files changed, 60 insertions(+), 1 deletion(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index 7ba9dd8a7..446927a33 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -1,6 +1,7 @@ ! synopsis: generic implementation to asynchronously read/write FESOM mesh variable(s) with distributed cell or element data in 2D or 3D to/from a NetCDF file module io_fesom_file_module use io_netcdf_file_module + use async_threads_module implicit none public fesom_file_type private @@ -30,6 +31,10 @@ module io_fesom_file_module integer :: rec_cnt = -1 integer :: iorank = 0 integer :: fesom_file_index + type(thread_type) thread + logical :: thread_running = .false. + integer :: comm + integer :: mype_workaround contains procedure, public :: read_and_scatter_variables, gather_and_write_variables, init, specify_node_var, is_iorank, rec_count, time_varindex, time_dimindex procedure, public :: close_file ! inherited procedures we overwrite @@ -229,6 +234,36 @@ subroutine gather_and_write_variables(f) end subroutine + subroutine join(f) + class(fesom_file_type) f + ! EO parameters + + if(f%thread_running) call f%thread%join() + f%thread_running = .false. + end subroutine + + + subroutine async_gather_and_write_variables(f) + class(fesom_file_type) f + ! EO parameters + + call assert(.not. f%thread_running, __LINE__) + + call f%thread%run() + f%thread_running = .true. + end subroutine + + + subroutine async_worker(fesom_file_index) + integer, intent(in) :: fesom_file_index + ! EO parameters + type(fesom_file_type), pointer :: f + + f => all_fesom_files(fesom_file_index)%ptr +! mype=entry%mype_workaround ! for the thread callback, copy back the value of our mype as a workaround for errors with the cray envinronment (at least with ftn 2.5.9 and cray-mpich 7.5.3) + end subroutine + + subroutine specify_node_var(f, name, longname, units, local_data) use, intrinsic :: ISO_C_BINDING use g_PARSUP @@ -330,6 +365,7 @@ function obtain_diminfo(f, len) result(info) subroutine specify_variable(f, name, dim_indices, global_level_data_size, local_data, is_elem_based, longname, units) use g_PARSUP + use io_netcdf_workaround_module type(fesom_file_type), intent(inout) :: f character(len=*), intent(in) :: name integer, intent(in) :: dim_indices(:) @@ -339,6 +375,9 @@ subroutine specify_variable(f, name, dim_indices, global_level_data_size, local_ character(len=*), intent(in) :: units, longname ! EO parameters integer var_index + logical async_netcdf_allowed + integer err + integer provided_mpi_thread_support_level var_index = f%add_var_double(name, dim_indices) call f%add_var_att(var_index, "units", units) @@ -350,11 +389,31 @@ subroutine specify_variable(f, name, dim_indices, global_level_data_size, local_ f%var_infos(f%nvar_infos)%local_data_ptr3 => local_data f%var_infos(f%nvar_infos)%global_level_data_size = global_level_data_size f%var_infos(f%nvar_infos)%is_elem_based = is_elem_based + + ! set up async output + + f%iorank = next_io_rank(MPI_COMM_FESOM, async_netcdf_allowed) + + call MPI_Comm_dup(MPI_COMM_FESOM, f%comm, err) + + call f%thread%initialize(async_worker, f%fesom_file_index) + if(.not. async_netcdf_allowed) call f%thread%disable_async() + + ! check if we have multi thread support available in the MPI library + ! tough MPI_THREAD_FUNNELED should be enough here, at least on cray-mpich 7.5.3 async mpi calls fail if we do not have support level 'MPI_THREAD_MULTIPLE' + ! on cray-mpich we only get level 'MPI_THREAD_MULTIPLE' if 'MPICH_MAX_THREAD_SAFETY=multiple' is set in the environment + call MPI_Query_thread(provided_mpi_thread_support_level, err) + if(provided_mpi_thread_support_level < MPI_THREAD_MULTIPLE) call f%thread%disable_async() + + f%mype_workaround = mype ! make a copy of the mype variable as there is an error with the cray compiler or environment which voids the global mype for our threads end subroutine subroutine close_file(this) class(fesom_file_type), intent(inout) :: this + + if(this%thread_running) call this%thread%join() + this%thread_running = .false. this%rec_cnt = -1 ! reset state (should probably be done in all the open_ procedures, not here) call this%netcdf_file_type%close_file() diff --git a/test/fortran/CMakeLists.txt b/test/fortran/CMakeLists.txt index 76c3646f1..ae726f23d 100644 --- a/test/fortran/CMakeLists.txt +++ b/test/fortran/CMakeLists.txt @@ -15,7 +15,7 @@ add_library(${LIB_TARGET} ${CMAKE_CURRENT_LIST_DIR}/../../src/forcing_provider_a ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_module.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_nf_interface.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_file_module.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_attribute_module.F90 - ${CMAKE_CURRENT_LIST_DIR}/../../src/io_fesom_file.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/gen_modules_partitioning.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_gather.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_scatter.F90 + ${CMAKE_CURRENT_LIST_DIR}/../../src/io_fesom_file.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/gen_modules_partitioning.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_gather.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_scatter.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_workaround_module.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/mpi_topology_module.F90 ) add_subdirectory(../../src/async_threads_cpp ${PROJECT_BINARY_DIR}/async_threads_cpp) From f0e1375696d5673732e904add648d5063c011560 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 15 Jan 2021 16:12:07 +0100 Subject: [PATCH 225/909] ba able to flush the file to disk --- src/io_netcdf_file_module.F90 | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index d58e2327f..9e5605e5d 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -14,7 +14,7 @@ module io_netcdf_file_module character(:), allocatable :: filepath integer ncid contains - procedure, public :: initialize, add_dim, add_dim_unlimited, add_var_double, add_var_real, add_var_int, open_read, close_file, open_write_create, open_write_append + procedure, public :: initialize, add_dim, add_dim_unlimited, add_var_double, add_var_real, add_var_int, open_read, flush_file, close_file, open_write_create, open_write_append procedure, public :: is_attached, read_var_shape procedure, public :: ndims generic, public :: read_var => read_var_r4, read_var_r8, read_var_integer @@ -517,6 +517,15 @@ subroutine write_var_integer(this, varindex, starts, sizes, values) end subroutine + subroutine flush_file(this) + class(netcdf_file_type), intent(inout) :: this + ! EO parameters + include "netcdf.inc" + + call assert_nc( nf_sync(this%ncid), __LINE__ ) ! flush the file to disk + end subroutine + + subroutine close_file(this) ! do not implicitly close the file (e.g. upon deallocation via destructor), as we might have a copy of this object with access to the same ncid class(netcdf_file_type), intent(inout) :: this From afb27b4418e09e9864c5fe415e63c41831ad883e Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 19 Jan 2021 10:16:01 +0100 Subject: [PATCH 226/909] use only one thread per file (not one per variable) to simplify things when writing --- src/io_fesom_file.F90 | 44 +++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index 446927a33..ff9c6bf1b 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -100,12 +100,17 @@ function time_dimindex(this) result(x) subroutine init(f, mesh_nod2d, mesh_elem2d, mesh_nl) ! todo: would like to call it initialize but Fortran is rather cluncky with overwriting base type procedures + use g_PARSUP + use io_netcdf_workaround_module class(fesom_file_type), target, intent(inout) :: f integer mesh_nod2d integer mesh_elem2d integer mesh_nl ! EO parameters type(fesom_file_type_ptr), allocatable :: tmparr(:) + logical async_netcdf_allowed + integer err + integer provided_mpi_thread_support_level ! get hold of our mesh data for later use (assume the mesh instance will not change) m_nod2d = mesh_nod2d @@ -131,6 +136,23 @@ subroutine init(f, mesh_nod2d, mesh_elem2d, mesh_nl) ! todo: would like to call end if all_fesom_files(size(all_fesom_files))%ptr => f f%fesom_file_index = size(all_fesom_files) + + ! set up async output + + f%iorank = next_io_rank(MPI_COMM_FESOM, async_netcdf_allowed) + + call MPI_Comm_dup(MPI_COMM_FESOM, f%comm, err) + + call f%thread%initialize(async_worker, f%fesom_file_index) + if(.not. async_netcdf_allowed) call f%thread%disable_async() + + ! check if we have multi thread support available in the MPI library + ! tough MPI_THREAD_FUNNELED should be enough here, at least on cray-mpich 7.5.3 async mpi calls fail if we do not have support level 'MPI_THREAD_MULTIPLE' + ! on cray-mpich we only get level 'MPI_THREAD_MULTIPLE' if 'MPICH_MAX_THREAD_SAFETY=multiple' is set in the environment + call MPI_Query_thread(provided_mpi_thread_support_level, err) + if(provided_mpi_thread_support_level < MPI_THREAD_MULTIPLE) call f%thread%disable_async() + + f%mype_workaround = mype ! make a copy of the mype variable as there is an error with the cray compiler or environment which voids the global mype for our threads end subroutine @@ -364,8 +386,6 @@ function obtain_diminfo(f, len) result(info) subroutine specify_variable(f, name, dim_indices, global_level_data_size, local_data, is_elem_based, longname, units) - use g_PARSUP - use io_netcdf_workaround_module type(fesom_file_type), intent(inout) :: f character(len=*), intent(in) :: name integer, intent(in) :: dim_indices(:) @@ -375,9 +395,6 @@ subroutine specify_variable(f, name, dim_indices, global_level_data_size, local_ character(len=*), intent(in) :: units, longname ! EO parameters integer var_index - logical async_netcdf_allowed - integer err - integer provided_mpi_thread_support_level var_index = f%add_var_double(name, dim_indices) call f%add_var_att(var_index, "units", units) @@ -389,23 +406,6 @@ subroutine specify_variable(f, name, dim_indices, global_level_data_size, local_ f%var_infos(f%nvar_infos)%local_data_ptr3 => local_data f%var_infos(f%nvar_infos)%global_level_data_size = global_level_data_size f%var_infos(f%nvar_infos)%is_elem_based = is_elem_based - - ! set up async output - - f%iorank = next_io_rank(MPI_COMM_FESOM, async_netcdf_allowed) - - call MPI_Comm_dup(MPI_COMM_FESOM, f%comm, err) - - call f%thread%initialize(async_worker, f%fesom_file_index) - if(.not. async_netcdf_allowed) call f%thread%disable_async() - - ! check if we have multi thread support available in the MPI library - ! tough MPI_THREAD_FUNNELED should be enough here, at least on cray-mpich 7.5.3 async mpi calls fail if we do not have support level 'MPI_THREAD_MULTIPLE' - ! on cray-mpich we only get level 'MPI_THREAD_MULTIPLE' if 'MPICH_MAX_THREAD_SAFETY=multiple' is set in the environment - call MPI_Query_thread(provided_mpi_thread_support_level, err) - if(provided_mpi_thread_support_level < MPI_THREAD_MULTIPLE) call f%thread%disable_async() - - f%mype_workaround = mype ! make a copy of the mype variable as there is an error with the cray compiler or environment which voids the global mype for our threads end subroutine From e57abe549157635ebe3eb89f13ebe3f794928a7b Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 19 Jan 2021 14:17:33 +0100 Subject: [PATCH 227/909] flush the file to disk after each write --- src/io_fesom_file.F90 | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index ff9c6bf1b..0f1e352fa 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -216,7 +216,7 @@ subroutine gather_and_write_variables(f) real(kind=8), allocatable :: laux(:) type(var_info), pointer :: var - if(f%is_iorank()) f%rec_cnt = f%rec_count()+1 + if(mype == f%iorank) f%rec_cnt = f%rec_count()+1 do i=1, f%nvar_infos var => f%var_infos(i) @@ -253,6 +253,8 @@ subroutine gather_and_write_variables(f) end do deallocate(laux) end do + + if(mype == f%iorank) call f%flush_file() ! flush the file to disk after each write end subroutine From bacd1c4a8b94c8a9afd224c7850048c94b41289f Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 19 Jan 2021 14:30:01 +0100 Subject: [PATCH 228/909] rename pointer to external data to make clear it is not a pointer to our internal buffer copy --- src/io_fesom_file.F90 | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index 0f1e352fa..f5d804163 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -9,7 +9,7 @@ module io_fesom_file_module type var_info integer var_index - real(kind=8), pointer :: local_data_ptr3(:,:) => null() + real(kind=8), pointer :: external_local_data_ptr(:,:) => null() real(kind=8), allocatable :: global_level_data(:) integer :: global_level_data_size = 0 logical is_elem_based @@ -172,9 +172,9 @@ subroutine read_and_scatter_variables(f) do i=1, f%nvar_infos var => f%var_infos(i) - nlvl = size(var%local_data_ptr3,dim=1) + nlvl = size(var%external_local_data_ptr,dim=1) is_2d = (nlvl == 1) - allocate(laux( size(var%local_data_ptr3,dim=2) )) ! i.e. myDim_elem2D+eDim_elem2D or myDim_nod2D+eDim_nod2D + allocate(laux( size(var%external_local_data_ptr,dim=2) )) ! i.e. myDim_elem2D+eDim_elem2D or myDim_nod2D+eDim_nod2D if(mype == f%iorank) then ! todo: choose how many levels we read at once @@ -199,7 +199,7 @@ subroutine read_and_scatter_variables(f) call scatter_nod2D(var%global_level_data, laux, f%iorank, MPI_comm_fesom) end if ! the data from our pointer is not contiguous (if it is 3D data), so we can not pass the pointer directly to MPI - var%local_data_ptr3(lvl,:) = laux ! todo: remove this buffer and pass the data directly to MPI (change order of data layout to be levelwise or do not gather levelwise but by columns) + var%external_local_data_ptr(lvl,:) = laux ! todo: remove this buffer and pass the data directly to MPI (change order of data layout to be levelwise or do not gather levelwise but by columns) end do deallocate(laux) end do @@ -221,9 +221,9 @@ subroutine gather_and_write_variables(f) do i=1, f%nvar_infos var => f%var_infos(i) - nlvl = size(var%local_data_ptr3,dim=1) + nlvl = size(var%external_local_data_ptr,dim=1) is_2d = (nlvl == 1) - allocate(laux( size(var%local_data_ptr3,dim=2) )) ! i.e. myDim_elem2D+eDim_elem2D or myDim_nod2D+eDim_nod2D + allocate(laux( size(var%external_local_data_ptr,dim=2) )) ! i.e. myDim_elem2D+eDim_elem2D or myDim_nod2D+eDim_nod2D if(mype == f%iorank) then ! todo: choose how many levels we write at once @@ -234,7 +234,7 @@ subroutine gather_and_write_variables(f) do lvl=1, nlvl ! the data from our pointer is not contiguous (if it is 3D data), so we can not pass the pointer directly to MPI - laux = var%local_data_ptr3(lvl,:) ! todo: remove this buffer and pass the data directly to MPI (change order of data layout to be levelwise or do not gather levelwise but by columns) + laux = var%external_local_data_ptr(lvl,:) ! todo: remove this buffer and pass the data directly to MPI (change order of data layout to be levelwise or do not gather levelwise but by columns) if(var%is_elem_based) then call gather_elem2D(laux, var%global_level_data, f%iorank, 42, MPI_comm_fesom) @@ -296,19 +296,19 @@ subroutine specify_node_var(f, name, longname, units, local_data) character(len=*), intent(in) :: units, longname real(kind=8), target, intent(inout) :: local_data(..) ! todo: be able to set precision ! EO parameters - real(8), pointer :: local_data_ptr3(:,:) + real(8), pointer :: external_local_data_ptr(:,:) type(dim_info) level_diminfo, depth_diminfo level_diminfo = obtain_diminfo(f, m_nod2d) if(size(shape(local_data)) == 1) then ! 1D data - call c_f_pointer(c_loc(local_data), local_data_ptr3, [1,size(local_data)]) - call specify_variable(f, name, [level_diminfo%idx, f%time_dimidx], level_diminfo%len, local_data_ptr3, .false., longname, units) + call c_f_pointer(c_loc(local_data), external_local_data_ptr, [1,size(local_data)]) + call specify_variable(f, name, [level_diminfo%idx, f%time_dimidx], level_diminfo%len, external_local_data_ptr, .false., longname, units) else if(size(shape(local_data)) == 2) then ! 2D data depth_diminfo = obtain_diminfo(f, size(local_data, dim=1)) - call c_f_pointer(c_loc(local_data), local_data_ptr3, [size(local_data, dim=1),size(local_data, dim=2)]) - call specify_variable(f, name, [depth_diminfo%idx, level_diminfo%idx, f%time_dimidx], level_diminfo%len, local_data_ptr3, .false., longname, units) + call c_f_pointer(c_loc(local_data), external_local_data_ptr, [size(local_data, dim=1),size(local_data, dim=2)]) + call specify_variable(f, name, [depth_diminfo%idx, level_diminfo%idx, f%time_dimidx], level_diminfo%len, external_local_data_ptr, .false., longname, units) end if end subroutine @@ -321,13 +321,13 @@ subroutine specify_elem_var_2d(f, name, longname, units, local_data) character(len=*), intent(in) :: units, longname real(kind=8), target, intent(inout) :: local_data(:) ! todo: be able to set precision ! EO parameters - real(8), pointer :: local_data_ptr3(:,:) + real(8), pointer :: external_local_data_ptr(:,:) type(dim_info) level_diminfo level_diminfo = obtain_diminfo(f, m_elem2d) - local_data_ptr3(1:1,1:size(local_data)) => local_data - call specify_variable(f, name, [level_diminfo%idx, f%time_dimidx], level_diminfo%len, local_data_ptr3, .true., longname, units) + external_local_data_ptr(1:1,1:size(local_data)) => local_data + call specify_variable(f, name, [level_diminfo%idx, f%time_dimidx], level_diminfo%len, external_local_data_ptr, .true., longname, units) end subroutine @@ -405,7 +405,7 @@ subroutine specify_variable(f, name, dim_indices, global_level_data_size, local_ call assert(f%nvar_infos < size(f%var_infos), __LINE__) f%nvar_infos = f%nvar_infos+1 f%var_infos(f%nvar_infos)%var_index = var_index - f%var_infos(f%nvar_infos)%local_data_ptr3 => local_data + f%var_infos(f%nvar_infos)%external_local_data_ptr => local_data f%var_infos(f%nvar_infos)%global_level_data_size = global_level_data_size f%var_infos(f%nvar_infos)%is_elem_based = is_elem_based end subroutine From 082ec5d50c3716aaae883b0c8408a3c230da3310 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 19 Jan 2021 14:44:46 +0100 Subject: [PATCH 229/909] - add subroutines to trigger asynchronous read or write - use a copy of the data then writing to allow changes to the external buffer while writing --- src/io_fesom_file.F90 | 45 ++++++++++++++++++++++++++++++++++++------- 1 file changed, 38 insertions(+), 7 deletions(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index f5d804163..556ed85d7 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -10,6 +10,7 @@ module io_fesom_file_module type var_info integer var_index real(kind=8), pointer :: external_local_data_ptr(:,:) => null() + real(kind=8), allocatable, dimension(:,:) :: local_data_copy real(kind=8), allocatable :: global_level_data(:) integer :: global_level_data_size = 0 logical is_elem_based @@ -34,12 +35,14 @@ module io_fesom_file_module type(thread_type) thread logical :: thread_running = .false. integer :: comm + logical gather_and_write integer :: mype_workaround contains - procedure, public :: read_and_scatter_variables, gather_and_write_variables, init, specify_node_var, is_iorank, rec_count, time_varindex, time_dimindex + procedure, public :: async_read_and_scatter_variables, async_gather_and_write_variables, join, init, specify_node_var, is_iorank, rec_count, time_varindex, time_dimindex procedure, public :: close_file ! inherited procedures we overwrite generic, public :: specify_elem_var => specify_elem_var_2d, specify_elem_var_3d procedure, private :: specify_elem_var_2d, specify_elem_var_3d + procedure, private :: read_and_scatter_variables, gather_and_write_variables end type @@ -221,9 +224,9 @@ subroutine gather_and_write_variables(f) do i=1, f%nvar_infos var => f%var_infos(i) - nlvl = size(var%external_local_data_ptr,dim=1) + nlvl = size(var%local_data_copy,dim=1) is_2d = (nlvl == 1) - allocate(laux( size(var%external_local_data_ptr,dim=2) )) ! i.e. myDim_elem2D+eDim_elem2D or myDim_nod2D+eDim_nod2D + allocate(laux( size(var%local_data_copy,dim=2) )) ! i.e. myDim_elem2D+eDim_elem2D or myDim_nod2D+eDim_nod2D if(mype == f%iorank) then ! todo: choose how many levels we write at once @@ -234,7 +237,7 @@ subroutine gather_and_write_variables(f) do lvl=1, nlvl ! the data from our pointer is not contiguous (if it is 3D data), so we can not pass the pointer directly to MPI - laux = var%external_local_data_ptr(lvl,:) ! todo: remove this buffer and pass the data directly to MPI (change order of data layout to be levelwise or do not gather levelwise but by columns) + laux = var%local_data_copy(lvl,:) ! todo: remove this buffer and pass the data directly to MPI (change order of data layout to be levelwise or do not gather levelwise but by columns) if(var%is_elem_based) then call gather_elem2D(laux, var%global_level_data, f%iorank, 42, MPI_comm_fesom) @@ -263,28 +266,56 @@ subroutine join(f) ! EO parameters if(f%thread_running) call f%thread%join() - f%thread_running = .false. + f%thread_running = .false. + end subroutine + + + subroutine async_read_and_scatter_variables(f) + class(fesom_file_type), target :: f + + call assert(.not. f%thread_running, __LINE__) + + f%gather_and_write = .false. + call f%thread%run() + f%thread_running = .true. end subroutine subroutine async_gather_and_write_variables(f) - class(fesom_file_type) f + class(fesom_file_type), target :: f ! EO parameters + integer i + type(var_info), pointer :: var call assert(.not. f%thread_running, __LINE__) + ! copy data so we can write the current values asynchonously + do i=1, f%nvar_infos + var => f%var_infos(i) + if(.not. allocated(var%local_data_copy)) allocate( var%local_data_copy(size(var%external_local_data_ptr,dim=1), size(var%external_local_data_ptr,dim=2)) ) + var%local_data_copy = var%external_local_data_ptr + end do + + f%gather_and_write = .true. call f%thread%run() f%thread_running = .true. end subroutine subroutine async_worker(fesom_file_index) + use g_PARSUP integer, intent(in) :: fesom_file_index ! EO parameters type(fesom_file_type), pointer :: f f => all_fesom_files(fesom_file_index)%ptr -! mype=entry%mype_workaround ! for the thread callback, copy back the value of our mype as a workaround for errors with the cray envinronment (at least with ftn 2.5.9 and cray-mpich 7.5.3) + mype = f%mype_workaround ! for the thread callback, copy back the value of our mype as a workaround for errors with the cray envinronment (at least with ftn 2.5.9 and cray-mpich 7.5.3) + + if(f%gather_and_write) then + call f%gather_and_write_variables() + else + call f%read_and_scatter_variables() + end if end subroutine From 7da461882b7ac8b9021a6d0372384d549e635311 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 19 Jan 2021 17:25:46 +0100 Subject: [PATCH 230/909] remove reshaping the array pointer via a c pointer for node based data to be able to use arrays which are not contiguous in memory --- src/io_fesom_file.F90 | 38 +++++++++++++++++++++++++------------- 1 file changed, 25 insertions(+), 13 deletions(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index 556ed85d7..c90bfab45 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -38,9 +38,11 @@ module io_fesom_file_module logical gather_and_write integer :: mype_workaround contains - procedure, public :: async_read_and_scatter_variables, async_gather_and_write_variables, join, init, specify_node_var, is_iorank, rec_count, time_varindex, time_dimindex + procedure, public :: async_read_and_scatter_variables, async_gather_and_write_variables, join, init, is_iorank, rec_count, time_varindex, time_dimindex procedure, public :: close_file ! inherited procedures we overwrite + generic, public :: specify_node_var => specify_node_var_2d, specify_node_var_3d generic, public :: specify_elem_var => specify_elem_var_2d, specify_elem_var_3d + procedure, private :: specify_node_var_2d, specify_node_var_3d procedure, private :: specify_elem_var_2d, specify_elem_var_3d procedure, private :: read_and_scatter_variables, gather_and_write_variables end type @@ -319,28 +321,38 @@ subroutine async_worker(fesom_file_index) end subroutine - subroutine specify_node_var(f, name, longname, units, local_data) + subroutine specify_node_var_2d(f, name, longname, units, local_data) use, intrinsic :: ISO_C_BINDING use g_PARSUP class(fesom_file_type), intent(inout) :: f character(len=*), intent(in) :: name character(len=*), intent(in) :: units, longname - real(kind=8), target, intent(inout) :: local_data(..) ! todo: be able to set precision + real(kind=8), target, intent(inout) :: local_data(:) ! todo: be able to set precision ! EO parameters real(8), pointer :: external_local_data_ptr(:,:) - type(dim_info) level_diminfo, depth_diminfo + type(dim_info) level_diminfo level_diminfo = obtain_diminfo(f, m_nod2d) - if(size(shape(local_data)) == 1) then ! 1D data - call c_f_pointer(c_loc(local_data), external_local_data_ptr, [1,size(local_data)]) - call specify_variable(f, name, [level_diminfo%idx, f%time_dimidx], level_diminfo%len, external_local_data_ptr, .false., longname, units) - - else if(size(shape(local_data)) == 2) then ! 2D data - depth_diminfo = obtain_diminfo(f, size(local_data, dim=1)) - call c_f_pointer(c_loc(local_data), external_local_data_ptr, [size(local_data, dim=1),size(local_data, dim=2)]) - call specify_variable(f, name, [depth_diminfo%idx, level_diminfo%idx, f%time_dimidx], level_diminfo%len, external_local_data_ptr, .false., longname, units) - end if + external_local_data_ptr(1:1,1:size(local_data)) => local_data + call specify_variable(f, name, [level_diminfo%idx, f%time_dimidx], level_diminfo%len, external_local_data_ptr, .false., longname, units) + end subroutine + + + subroutine specify_node_var_3d(f, name, longname, units, local_data) + use, intrinsic :: ISO_C_BINDING + use g_PARSUP + class(fesom_file_type), intent(inout) :: f + character(len=*), intent(in) :: name + character(len=*), intent(in) :: units, longname + real(kind=8), target, intent(inout) :: local_data(:,:) ! todo: be able to set precision + ! EO parameters + type(dim_info) level_diminfo, depth_diminfo + + level_diminfo = obtain_diminfo(f, m_nod2d) + depth_diminfo = obtain_diminfo(f, size(local_data, dim=1)) + + call specify_variable(f, name, [depth_diminfo%idx, level_diminfo%idx, f%time_dimidx], level_diminfo%len, local_data, .false., longname, units) end subroutine From bcfe82158815f762e3afc6fd250c9f9215d484f4 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 21 Jan 2021 13:00:58 +0100 Subject: [PATCH 231/909] make sure the io_gather module has been initialized --- src/io_fesom_file.F90 | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index c90bfab45..d560135f1 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -107,6 +107,7 @@ function time_dimindex(this) result(x) subroutine init(f, mesh_nod2d, mesh_elem2d, mesh_nl) ! todo: would like to call it initialize but Fortran is rather cluncky with overwriting base type procedures use g_PARSUP use io_netcdf_workaround_module + use io_gather_module class(fesom_file_type), target, intent(inout) :: f integer mesh_nod2d integer mesh_elem2d @@ -117,6 +118,8 @@ subroutine init(f, mesh_nod2d, mesh_elem2d, mesh_nl) ! todo: would like to call integer err integer provided_mpi_thread_support_level + call init_io_gather() + ! get hold of our mesh data for later use (assume the mesh instance will not change) m_nod2d = mesh_nod2d m_elem2d = mesh_elem2d From a3b1695c980ecbaa3ee0088b326bf91c3bdbc633 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 6 May 2021 10:10:33 +0200 Subject: [PATCH 232/909] add missing source files to the unit test build --- test/fortran/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/test/fortran/CMakeLists.txt b/test/fortran/CMakeLists.txt index ae726f23d..b8660b011 100644 --- a/test/fortran/CMakeLists.txt +++ b/test/fortran/CMakeLists.txt @@ -16,6 +16,7 @@ add_library(${LIB_TARGET} ${CMAKE_CURRENT_LIST_DIR}/../../src/forcing_provider_a ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_file_module.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_attribute_module.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_fesom_file.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/gen_modules_partitioning.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_gather.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_scatter.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_workaround_module.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/mpi_topology_module.F90 + ${CMAKE_CURRENT_LIST_DIR}/../../src/MOD_MESH.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/oce_modules.F90 ) add_subdirectory(../../src/async_threads_cpp ${PROJECT_BINARY_DIR}/async_threads_cpp) From 704bd64ab2466d5f15d4838a2daced863359db14 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 11 May 2021 11:12:08 +0200 Subject: [PATCH 233/909] initial cmake file to build I/O scalability tests --- test/scalability/read_netcdf/CMakeLists.txt | 30 +++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 test/scalability/read_netcdf/CMakeLists.txt diff --git a/test/scalability/read_netcdf/CMakeLists.txt b/test/scalability/read_netcdf/CMakeLists.txt new file mode 100644 index 000000000..1986d5c1a --- /dev/null +++ b/test/scalability/read_netcdf/CMakeLists.txt @@ -0,0 +1,30 @@ +cmake_minimum_required(VERSION 3.4) + +project(read_netcdf Fortran) + +# get our source files +file(GLOB sources_Fortran ${CMAKE_CURRENT_LIST_DIR}/*.F90) + +include(${CMAKE_CURRENT_LIST_DIR}/../../../cmake/FindNETCDF.cmake) + +add_executable(${PROJECT_NAME} ${sources_Fortran} + ${CMAKE_CURRENT_LIST_DIR}/../../../src/io_netcdf_module.F90 + ${CMAKE_CURRENT_LIST_DIR}/../../../src/io_netcdf_nf_interface.F90 + ${CMAKE_CURRENT_LIST_DIR}/../../../src/io_netcdf_file_module.F90 + ${CMAKE_CURRENT_LIST_DIR}/../../../src/io_netcdf_attribute_module.F90 + ) + +if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Intel ) + target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fast-transcendentals -xHost -ip -init=zero -no-wrap-margin) +elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU ) + target_compile_options(${PROJECT_NAME} PRIVATE -O3 -finit-local-zero -finline-functions -march=native -fimplicit-none -fdefault-real-8 -ffree-line-length-none) + if(CMAKE_Fortran_COMPILER_VERSION VERSION_GREATER_EQUAL 10 ) + target_compile_options(${PROJECT_NAME} PRIVATE -fallow-argument-mismatch) # gfortran v10 is strict about erroneous API calls: "Rank mismatch between actual argument at (1) and actual argument at (2) (scalar and rank-1)" + endif() +elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray ) + target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64) +endif() + +target_include_directories(${PROJECT_NAME} PRIVATE ${NETCDF_Fortran_INCLUDE_DIRECTORIES}) +target_link_libraries(${PROJECT_NAME} ${NETCDF_Fortran_LIBRARIES} ${NETCDF_C_LIBRARIES}) +set_target_properties(${PROJECT_NAME} PROPERTIES LINKER_LANGUAGE Fortran) From 0beac5f503c105aa7756c5cf431cffc19ee07077 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 1 Jun 2021 18:23:35 +0200 Subject: [PATCH 234/909] fix typo in comment --- src/io_fesom_file.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index d560135f1..cab5391f9 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -294,7 +294,7 @@ subroutine async_gather_and_write_variables(f) call assert(.not. f%thread_running, __LINE__) - ! copy data so we can write the current values asynchonously + ! copy data so we can write the current values asynchronously do i=1, f%nvar_infos var => f%var_infos(i) if(.not. allocated(var%local_data_copy)) allocate( var%local_data_copy(size(var%external_local_data_ptr,dim=1), size(var%external_local_data_ptr,dim=2)) ) From 90a32a471e16012a373ac5a337dc973ed6abb8b8 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 1 Jun 2021 18:54:57 +0200 Subject: [PATCH 235/909] rename instance variable in parameter list --- src/io_fesom_file.F90 | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index cab5391f9..b94f5176b 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -104,11 +104,11 @@ function time_dimindex(this) result(x) end function - subroutine init(f, mesh_nod2d, mesh_elem2d, mesh_nl) ! todo: would like to call it initialize but Fortran is rather cluncky with overwriting base type procedures + subroutine init(this, mesh_nod2d, mesh_elem2d, mesh_nl) ! todo: would like to call it initialize but Fortran is rather cluncky with overwriting base type procedures use g_PARSUP use io_netcdf_workaround_module use io_gather_module - class(fesom_file_type), target, intent(inout) :: f + class(fesom_file_type), target, intent(inout) :: this integer mesh_nod2d integer mesh_elem2d integer mesh_nl @@ -124,13 +124,13 @@ subroutine init(f, mesh_nod2d, mesh_elem2d, mesh_nl) ! todo: would like to call m_nod2d = mesh_nod2d m_elem2d = mesh_elem2d m_nl = mesh_nl - call f%netcdf_file_type%initialize() + call this%netcdf_file_type%initialize() - allocate(f%used_mesh_dims(0)) + allocate(this%used_mesh_dims(0)) - f%time_dimidx = f%add_dim_unlimited('time') + this%time_dimidx = this%add_dim_unlimited('time') - f%time_varidx = f%add_var_double('time', [f%time_dimidx]) + this%time_varidx = this%add_var_double('time', [this%time_dimidx]) ! add this instance to global array ! the array is being used to identify the instance in an async call @@ -142,25 +142,25 @@ subroutine init(f, mesh_nod2d, mesh_elem2d, mesh_nl) ! todo: would like to call deallocate(all_fesom_files) call move_alloc(tmparr, all_fesom_files) end if - all_fesom_files(size(all_fesom_files))%ptr => f - f%fesom_file_index = size(all_fesom_files) + all_fesom_files(size(all_fesom_files))%ptr => this + this%fesom_file_index = size(all_fesom_files) ! set up async output - f%iorank = next_io_rank(MPI_COMM_FESOM, async_netcdf_allowed) + this%iorank = next_io_rank(MPI_COMM_FESOM, async_netcdf_allowed) - call MPI_Comm_dup(MPI_COMM_FESOM, f%comm, err) + call MPI_Comm_dup(MPI_COMM_FESOM, this%comm, err) - call f%thread%initialize(async_worker, f%fesom_file_index) - if(.not. async_netcdf_allowed) call f%thread%disable_async() + call this%thread%initialize(async_worker, this%fesom_file_index) + if(.not. async_netcdf_allowed) call this%thread%disable_async() ! check if we have multi thread support available in the MPI library ! tough MPI_THREAD_FUNNELED should be enough here, at least on cray-mpich 7.5.3 async mpi calls fail if we do not have support level 'MPI_THREAD_MULTIPLE' ! on cray-mpich we only get level 'MPI_THREAD_MULTIPLE' if 'MPICH_MAX_THREAD_SAFETY=multiple' is set in the environment call MPI_Query_thread(provided_mpi_thread_support_level, err) - if(provided_mpi_thread_support_level < MPI_THREAD_MULTIPLE) call f%thread%disable_async() + if(provided_mpi_thread_support_level < MPI_THREAD_MULTIPLE) call this%thread%disable_async() - f%mype_workaround = mype ! make a copy of the mype variable as there is an error with the cray compiler or environment which voids the global mype for our threads + this%mype_workaround = mype ! make a copy of the mype variable as there is an error with the cray compiler or environment which voids the global mype for our threads end subroutine @@ -174,7 +174,7 @@ subroutine read_and_scatter_variables(f) integer last_rec_idx type(var_info), pointer :: var real(kind=8), allocatable :: laux(:) - + last_rec_idx = f%rec_count() do i=1, f%nvar_infos @@ -207,7 +207,7 @@ subroutine read_and_scatter_variables(f) call scatter_nod2D(var%global_level_data, laux, f%iorank, MPI_comm_fesom) end if ! the data from our pointer is not contiguous (if it is 3D data), so we can not pass the pointer directly to MPI - var%external_local_data_ptr(lvl,:) = laux ! todo: remove this buffer and pass the data directly to MPI (change order of data layout to be levelwise or do not gather levelwise but by columns) + var%external_local_data_ptr(lvl,:) = laux ! todo: remove this buffer and pass the data directly to MPI (change order of data layout to be levelwise or do not gather levelwise but by columns) end do deallocate(laux) end do @@ -277,7 +277,7 @@ subroutine join(f) subroutine async_read_and_scatter_variables(f) class(fesom_file_type), target :: f - + call assert(.not. f%thread_running, __LINE__) f%gather_and_write = .false. From 42e45a41110ac499431c742c9bc7d1f4341252eb Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 1 Jun 2021 19:04:34 +0200 Subject: [PATCH 236/909] rename instance variable in parameter list in all procedures with 'pass' attribute --- src/io_fesom_file.F90 | 168 +++++++++++++++++++++--------------------- 1 file changed, 84 insertions(+), 84 deletions(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index b94f5176b..2bc2c1e01 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -164,10 +164,10 @@ subroutine init(this, mesh_nod2d, mesh_elem2d, mesh_nl) ! todo: would like to ca end subroutine - subroutine read_and_scatter_variables(f) + subroutine read_and_scatter_variables(this) use g_PARSUP use io_scatter_module - class(fesom_file_type), target :: f + class(fesom_file_type), target :: this ! EO parameters integer i,lvl, nlvl logical is_2d @@ -175,16 +175,16 @@ subroutine read_and_scatter_variables(f) type(var_info), pointer :: var real(kind=8), allocatable :: laux(:) - last_rec_idx = f%rec_count() + last_rec_idx = this%rec_count() - do i=1, f%nvar_infos - var => f%var_infos(i) + do i=1, this%nvar_infos + var => this%var_infos(i) nlvl = size(var%external_local_data_ptr,dim=1) is_2d = (nlvl == 1) allocate(laux( size(var%external_local_data_ptr,dim=2) )) ! i.e. myDim_elem2D+eDim_elem2D or myDim_nod2D+eDim_nod2D - if(mype == f%iorank) then + if(mype == this%iorank) then ! todo: choose how many levels we read at once if(.not. allocated(var%global_level_data)) allocate(var%global_level_data( var%global_level_data_size )) else @@ -192,19 +192,19 @@ subroutine read_and_scatter_variables(f) end if do lvl=1, nlvl - if(mype == f%iorank) then + if(mype == this%iorank) then if(is_2d) then - call f%read_var(var%var_index, [1,last_rec_idx], [size(var%global_level_data),1], var%global_level_data) + call this%read_var(var%var_index, [1,last_rec_idx], [size(var%global_level_data),1], var%global_level_data) else ! z,nod,time - call f%read_var(var%var_index, [lvl,1,last_rec_idx], [1,size(var%global_level_data),1], var%global_level_data) + call this%read_var(var%var_index, [lvl,1,last_rec_idx], [1,size(var%global_level_data),1], var%global_level_data) end if end if if(var%is_elem_based) then - call scatter_elem2D(var%global_level_data, laux, f%iorank, MPI_comm_fesom) + call scatter_elem2D(var%global_level_data, laux, this%iorank, MPI_comm_fesom) else - call scatter_nod2D(var%global_level_data, laux, f%iorank, MPI_comm_fesom) + call scatter_nod2D(var%global_level_data, laux, this%iorank, MPI_comm_fesom) end if ! the data from our pointer is not contiguous (if it is 3D data), so we can not pass the pointer directly to MPI var%external_local_data_ptr(lvl,:) = laux ! todo: remove this buffer and pass the data directly to MPI (change order of data layout to be levelwise or do not gather levelwise but by columns) @@ -214,26 +214,26 @@ subroutine read_and_scatter_variables(f) end subroutine - subroutine gather_and_write_variables(f) + subroutine gather_and_write_variables(this) use g_PARSUP use io_gather_module - class(fesom_file_type), target :: f + class(fesom_file_type), target :: this ! EO parameters integer i,lvl, nlvl logical is_2d real(kind=8), allocatable :: laux(:) type(var_info), pointer :: var - if(mype == f%iorank) f%rec_cnt = f%rec_count()+1 + if(mype == this%iorank) this%rec_cnt = this%rec_count()+1 - do i=1, f%nvar_infos - var => f%var_infos(i) + do i=1, this%nvar_infos + var => this%var_infos(i) nlvl = size(var%local_data_copy,dim=1) is_2d = (nlvl == 1) allocate(laux( size(var%local_data_copy,dim=2) )) ! i.e. myDim_elem2D+eDim_elem2D or myDim_nod2D+eDim_nod2D - if(mype == f%iorank) then + if(mype == this%iorank) then ! todo: choose how many levels we write at once if(.not. allocated(var%global_level_data)) allocate(var%global_level_data( var%global_level_data_size )) else @@ -245,65 +245,65 @@ subroutine gather_and_write_variables(f) laux = var%local_data_copy(lvl,:) ! todo: remove this buffer and pass the data directly to MPI (change order of data layout to be levelwise or do not gather levelwise but by columns) if(var%is_elem_based) then - call gather_elem2D(laux, var%global_level_data, f%iorank, 42, MPI_comm_fesom) + call gather_elem2D(laux, var%global_level_data, this%iorank, 42, MPI_comm_fesom) else - call gather_nod2D (laux, var%global_level_data, f%iorank, 42, MPI_comm_fesom) + call gather_nod2D (laux, var%global_level_data, this%iorank, 42, MPI_comm_fesom) end if - if(mype == f%iorank) then + if(mype == this%iorank) then if(is_2d) then - call f%write_var(var%var_index, [1,f%rec_cnt], [size(var%global_level_data),1], var%global_level_data) + call this%write_var(var%var_index, [1,this%rec_cnt], [size(var%global_level_data),1], var%global_level_data) else ! z,nod,time - call f%write_var(var%var_index, [lvl,1,f%rec_cnt], [1,size(var%global_level_data),1], var%global_level_data) + call this%write_var(var%var_index, [lvl,1,this%rec_cnt], [1,size(var%global_level_data),1], var%global_level_data) end if end if end do deallocate(laux) end do - if(mype == f%iorank) call f%flush_file() ! flush the file to disk after each write + if(mype == this%iorank) call this%flush_file() ! flush the file to disk after each write end subroutine - subroutine join(f) - class(fesom_file_type) f + subroutine join(this) + class(fesom_file_type) this ! EO parameters - if(f%thread_running) call f%thread%join() - f%thread_running = .false. + if(this%thread_running) call this%thread%join() + this%thread_running = .false. end subroutine - subroutine async_read_and_scatter_variables(f) - class(fesom_file_type), target :: f + subroutine async_read_and_scatter_variables(this) + class(fesom_file_type), target :: this - call assert(.not. f%thread_running, __LINE__) + call assert(.not. this%thread_running, __LINE__) - f%gather_and_write = .false. - call f%thread%run() - f%thread_running = .true. + this%gather_and_write = .false. + call this%thread%run() + this%thread_running = .true. end subroutine - subroutine async_gather_and_write_variables(f) - class(fesom_file_type), target :: f + subroutine async_gather_and_write_variables(this) + class(fesom_file_type), target :: this ! EO parameters integer i type(var_info), pointer :: var - call assert(.not. f%thread_running, __LINE__) + call assert(.not. this%thread_running, __LINE__) ! copy data so we can write the current values asynchronously - do i=1, f%nvar_infos - var => f%var_infos(i) + do i=1, this%nvar_infos + var => this%var_infos(i) if(.not. allocated(var%local_data_copy)) allocate( var%local_data_copy(size(var%external_local_data_ptr,dim=1), size(var%external_local_data_ptr,dim=2)) ) var%local_data_copy = var%external_local_data_ptr end do - f%gather_and_write = .true. - call f%thread%run() - f%thread_running = .true. + this%gather_and_write = .true. + call this%thread%run() + this%thread_running = .true. end subroutine @@ -324,10 +324,10 @@ subroutine async_worker(fesom_file_index) end subroutine - subroutine specify_node_var_2d(f, name, longname, units, local_data) + subroutine specify_node_var_2d(this, name, longname, units, local_data) use, intrinsic :: ISO_C_BINDING use g_PARSUP - class(fesom_file_type), intent(inout) :: f + class(fesom_file_type), intent(inout) :: this character(len=*), intent(in) :: name character(len=*), intent(in) :: units, longname real(kind=8), target, intent(inout) :: local_data(:) ! todo: be able to set precision @@ -335,34 +335,34 @@ subroutine specify_node_var_2d(f, name, longname, units, local_data) real(8), pointer :: external_local_data_ptr(:,:) type(dim_info) level_diminfo - level_diminfo = obtain_diminfo(f, m_nod2d) + level_diminfo = obtain_diminfo(this, m_nod2d) external_local_data_ptr(1:1,1:size(local_data)) => local_data - call specify_variable(f, name, [level_diminfo%idx, f%time_dimidx], level_diminfo%len, external_local_data_ptr, .false., longname, units) + call specify_variable(this, name, [level_diminfo%idx, this%time_dimidx], level_diminfo%len, external_local_data_ptr, .false., longname, units) end subroutine - subroutine specify_node_var_3d(f, name, longname, units, local_data) + subroutine specify_node_var_3d(this, name, longname, units, local_data) use, intrinsic :: ISO_C_BINDING use g_PARSUP - class(fesom_file_type), intent(inout) :: f + class(fesom_file_type), intent(inout) :: this character(len=*), intent(in) :: name character(len=*), intent(in) :: units, longname real(kind=8), target, intent(inout) :: local_data(:,:) ! todo: be able to set precision ! EO parameters type(dim_info) level_diminfo, depth_diminfo - level_diminfo = obtain_diminfo(f, m_nod2d) - depth_diminfo = obtain_diminfo(f, size(local_data, dim=1)) + level_diminfo = obtain_diminfo(this, m_nod2d) + depth_diminfo = obtain_diminfo(this, size(local_data, dim=1)) - call specify_variable(f, name, [depth_diminfo%idx, level_diminfo%idx, f%time_dimidx], level_diminfo%len, local_data, .false., longname, units) + call specify_variable(this, name, [depth_diminfo%idx, level_diminfo%idx, this%time_dimidx], level_diminfo%len, local_data, .false., longname, units) end subroutine - subroutine specify_elem_var_2d(f, name, longname, units, local_data) + subroutine specify_elem_var_2d(this, name, longname, units, local_data) use, intrinsic :: ISO_C_BINDING use g_PARSUP - class(fesom_file_type), intent(inout) :: f + class(fesom_file_type), intent(inout) :: this character(len=*), intent(in) :: name character(len=*), intent(in) :: units, longname real(kind=8), target, intent(inout) :: local_data(:) ! todo: be able to set precision @@ -370,54 +370,54 @@ subroutine specify_elem_var_2d(f, name, longname, units, local_data) real(8), pointer :: external_local_data_ptr(:,:) type(dim_info) level_diminfo - level_diminfo = obtain_diminfo(f, m_elem2d) + level_diminfo = obtain_diminfo(this, m_elem2d) external_local_data_ptr(1:1,1:size(local_data)) => local_data - call specify_variable(f, name, [level_diminfo%idx, f%time_dimidx], level_diminfo%len, external_local_data_ptr, .true., longname, units) + call specify_variable(this, name, [level_diminfo%idx, this%time_dimidx], level_diminfo%len, external_local_data_ptr, .true., longname, units) end subroutine - subroutine specify_elem_var_3d(f, name, longname, units, local_data) + subroutine specify_elem_var_3d(this, name, longname, units, local_data) use, intrinsic :: ISO_C_BINDING use g_PARSUP - class(fesom_file_type), intent(inout) :: f + class(fesom_file_type), intent(inout) :: this character(len=*), intent(in) :: name character(len=*), intent(in) :: units, longname real(kind=8), target, intent(inout) :: local_data(:,:) ! todo: be able to set precision ! EO parameters type(dim_info) level_diminfo, depth_diminfo - level_diminfo = obtain_diminfo(f, m_elem2d) - depth_diminfo = obtain_diminfo(f, size(local_data, dim=1)) + level_diminfo = obtain_diminfo(this, m_elem2d) + depth_diminfo = obtain_diminfo(this, size(local_data, dim=1)) - call specify_variable(f, name, [depth_diminfo%idx, level_diminfo%idx, f%time_dimidx], level_diminfo%len, local_data, .true., longname, units) + call specify_variable(this, name, [depth_diminfo%idx, level_diminfo%idx, this%time_dimidx], level_diminfo%len, local_data, .true., longname, units) end subroutine - function obtain_diminfo(f, len) result(info) - type(fesom_file_type), intent(inout) :: f + function obtain_diminfo(this, len) result(info) + type(fesom_file_type), intent(inout) :: this type(dim_info) info integer len ! EO parameters integer i type(dim_info), allocatable :: tmparr(:) - do i=1, size(f%used_mesh_dims) - if(f%used_mesh_dims(i)%len == len) then - info = f%used_mesh_dims(i) + do i=1, size(this%used_mesh_dims) + if(this%used_mesh_dims(i)%len == len) then + info = this%used_mesh_dims(i) return end if end do ! the dim has not been added yet, see if it is one of our allowed mesh related dims if(len == m_nod2d) then - info = dim_info( idx=f%add_dim('node', len), len=len) + info = dim_info( idx=this%add_dim('node', len), len=len) else if(len == m_elem2d) then - info = dim_info( idx=f%add_dim('elem', len), len=len) + info = dim_info( idx=this%add_dim('elem', len), len=len) else if(len == m_nl-1) then - info = dim_info( idx=f%add_dim('nz_1', len), len=len) + info = dim_info( idx=this%add_dim('nz_1', len), len=len) else if(len == m_nl) then - info = dim_info( idx=f%add_dim('nz', len), len=len) + info = dim_info( idx=this%add_dim('nz', len), len=len) else print *, "error in line ",__LINE__, __FILE__," can not find dimension with size",len stop 1 @@ -425,16 +425,16 @@ function obtain_diminfo(f, len) result(info) ! append the new dim to our list of used dims, i.e. the dims we use for the mesh based variables created via #specify_variable ! assume the used_mesh_dims array is allocated - allocate( tmparr(size(f%used_mesh_dims)+1) ) - tmparr(1:size(f%used_mesh_dims)) = f%used_mesh_dims - deallocate(f%used_mesh_dims) - call move_alloc(tmparr, f%used_mesh_dims) - f%used_mesh_dims( size(f%used_mesh_dims) ) = info + allocate( tmparr(size(this%used_mesh_dims)+1) ) + tmparr(1:size(this%used_mesh_dims)) = this%used_mesh_dims + deallocate(this%used_mesh_dims) + call move_alloc(tmparr, this%used_mesh_dims) + this%used_mesh_dims( size(this%used_mesh_dims) ) = info end function - subroutine specify_variable(f, name, dim_indices, global_level_data_size, local_data, is_elem_based, longname, units) - type(fesom_file_type), intent(inout) :: f + subroutine specify_variable(this, name, dim_indices, global_level_data_size, local_data, is_elem_based, longname, units) + type(fesom_file_type), intent(inout) :: this character(len=*), intent(in) :: name integer, intent(in) :: dim_indices(:) integer global_level_data_size @@ -444,16 +444,16 @@ subroutine specify_variable(f, name, dim_indices, global_level_data_size, local_ ! EO parameters integer var_index - var_index = f%add_var_double(name, dim_indices) - call f%add_var_att(var_index, "units", units) - call f%add_var_att(var_index, "long_name", longname) + var_index = this%add_var_double(name, dim_indices) + call this%add_var_att(var_index, "units", units) + call this%add_var_att(var_index, "long_name", longname) - call assert(f%nvar_infos < size(f%var_infos), __LINE__) - f%nvar_infos = f%nvar_infos+1 - f%var_infos(f%nvar_infos)%var_index = var_index - f%var_infos(f%nvar_infos)%external_local_data_ptr => local_data - f%var_infos(f%nvar_infos)%global_level_data_size = global_level_data_size - f%var_infos(f%nvar_infos)%is_elem_based = is_elem_based + call assert(this%nvar_infos < size(this%var_infos), __LINE__) + this%nvar_infos = this%nvar_infos+1 + this%var_infos(this%nvar_infos)%var_index = var_index + this%var_infos(this%nvar_infos)%external_local_data_ptr => local_data + this%var_infos(this%nvar_infos)%global_level_data_size = global_level_data_size + this%var_infos(this%nvar_infos)%is_elem_based = is_elem_based end subroutine From 9b92a8530d87d61125c5d73a889f4b24cf9ac11f Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 3 Jun 2021 15:57:58 +0200 Subject: [PATCH 237/909] add module to manage separate files belonging to a restart file group (i.e. ocean or ice restart) --- src/io_restart_file_group.F90 | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 src/io_restart_file_group.F90 diff --git a/src/io_restart_file_group.F90 b/src/io_restart_file_group.F90 new file mode 100644 index 000000000..bced553ef --- /dev/null +++ b/src/io_restart_file_group.F90 @@ -0,0 +1,10 @@ +module restart_file_group_module + use io_fesom_file_module + implicit none + private + + +contains + + +end module From 00b712b7cb9a2463f0e3edbd92e5b56b813ef992 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 3 Jun 2021 16:32:28 +0200 Subject: [PATCH 238/909] add restart_file_group type --- src/io_restart_file_group.F90 | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/io_restart_file_group.F90 b/src/io_restart_file_group.F90 index bced553ef..b2a838c47 100644 --- a/src/io_restart_file_group.F90 +++ b/src/io_restart_file_group.F90 @@ -3,8 +3,23 @@ module restart_file_group_module implicit none private + + type, extends(fesom_file_type) :: restart_file_type + private + integer iter_varindex + character(:), allocatable :: varname + character(:), allocatable :: path + end type + + + type restart_file_group + private + type(restart_file_type) files(20); integer :: nfiles = 0 ! todo: allow dynamically allocated size without messing with shallow copied pointers + contains + end type contains + end module From 67cc1b3a5748ac497e23c6367f6d544693c51c6a Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 3 Jun 2021 17:10:21 +0200 Subject: [PATCH 239/909] add helper procedure to add a file to the file group --- src/io_restart_file_group.F90 | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/src/io_restart_file_group.F90 b/src/io_restart_file_group.F90 index b2a838c47..fefc33894 100644 --- a/src/io_restart_file_group.F90 +++ b/src/io_restart_file_group.F90 @@ -21,5 +21,33 @@ module restart_file_group_module contains + subroutine add_file(g, name, mesh_nod2d, mesh_elem2d, mesh_nl) + class(restart_file_group), target, intent(inout) :: g + character(len=*), intent(in) :: name + integer mesh_nod2d, mesh_elem2d, mesh_nl + ! EO parameters + type(restart_file_type), pointer :: f + + call assert(g%nfiles < size(g%files), __LINE__) + g%nfiles = g%nfiles+1 + f => g%files(g%nfiles) + + f%path = "" + f%varname = name + call f%fesom_file_type%init(mesh_nod2d, mesh_elem2d, mesh_nl) + ! this is specific for a restart file + f%iter_varindex = f%add_var_int('iter', [f%time_dimindex()]) + end subroutine + + + subroutine assert(val, line) + logical, intent(in) :: val + integer, intent(in) :: line + ! EO parameters + if(.not. val) then + print *, "error in line ",line, __FILE__ + stop 1 + end if + end subroutine end module From 988105b4d98bcb3b9a9e147462a605ea4eda48f4 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 3 Jun 2021 17:33:24 +0200 Subject: [PATCH 240/909] add procedures to specify a restart variable --- src/io_restart_file_group.F90 | 70 ++++++++++++++++++++++++++++++++++- 1 file changed, 69 insertions(+), 1 deletion(-) diff --git a/src/io_restart_file_group.F90 b/src/io_restart_file_group.F90 index fefc33894..426be6c38 100644 --- a/src/io_restart_file_group.F90 +++ b/src/io_restart_file_group.F90 @@ -16,11 +16,79 @@ module restart_file_group_module private type(restart_file_type) files(20); integer :: nfiles = 0 ! todo: allow dynamically allocated size without messing with shallow copied pointers contains + generic, public :: def_node_var => def_node_var_2d, def_node_var_3d + generic, public :: def_elem_var => def_elem_var_2d, def_elem_var_3d + procedure, private :: def_node_var_2d, def_node_var_3d + procedure, private :: def_elem_var_2d, def_elem_var_3d end type - + contains + subroutine def_node_var_2d(this, name, longname, units, local_data, mesh) + use, intrinsic :: ISO_C_BINDING + use g_PARSUP + use mod_mesh + class(restart_file_group), target, intent(inout) :: this + character(len=*), intent(in) :: name + character(len=*), intent(in) :: units, longname + real(kind=8), target, intent(inout) :: local_data(:) ! todo: be able to set precision + type(t_mesh), intent(in) :: mesh + ! EO parameters + + call add_file(this, name, mesh%nod2d, mesh%elem2d, mesh%nl) + call this%files(this%nfiles)%specify_node_var(name, longname, units, local_data) + end subroutine + + + subroutine def_node_var_3d(this, name, longname, units, local_data, mesh) + use, intrinsic :: ISO_C_BINDING + use g_PARSUP + use mod_mesh + class(restart_file_group), intent(inout) :: this + character(len=*), intent(in) :: name + character(len=*), intent(in) :: units, longname + real(kind=8), target, intent(inout) :: local_data(:,:) ! todo: be able to set precision + type(t_mesh), intent(in) :: mesh + ! EO parameters + + call add_file(this, name, mesh%nod2d, mesh%elem2d, mesh%nl) + call this%files(this%nfiles)%specify_node_var(name, longname, units, local_data) + end subroutine + + + subroutine def_elem_var_2d(this, name, longname, units, local_data, mesh) + use, intrinsic :: ISO_C_BINDING + use g_PARSUP + use mod_mesh + class(restart_file_group), intent(inout) :: this + character(len=*), intent(in) :: name + character(len=*), intent(in) :: units, longname + real(kind=8), target, intent(inout) :: local_data(:) ! todo: be able to set precision + type(t_mesh), intent(in) :: mesh + ! EO parameters + + call add_file(this, name, mesh%nod2d, mesh%elem2d, mesh%nl) + call this%files(this%nfiles)%specify_elem_var(name, longname, units, local_data) + end subroutine + + + subroutine def_elem_var_3d(this, name, longname, units, local_data, mesh) + use, intrinsic :: ISO_C_BINDING + use g_PARSUP + use mod_mesh + class(restart_file_group), intent(inout) :: this + character(len=*), intent(in) :: name + character(len=*), intent(in) :: units, longname + real(kind=8), target, intent(inout) :: local_data(:,:) ! todo: be able to set precision + type(t_mesh), intent(in) :: mesh + ! EO parameters + + call add_file(this, name, mesh%nod2d, mesh%elem2d, mesh%nl) + call this%files(this%nfiles)%specify_elem_var(name, longname, units, local_data) + end subroutine + + subroutine add_file(g, name, mesh_nod2d, mesh_elem2d, mesh_nl) class(restart_file_group), target, intent(inout) :: g character(len=*), intent(in) :: name From be863dea884b0753222ae5a9b62afbb67f2d656a Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 3 Jun 2021 17:40:40 +0200 Subject: [PATCH 241/909] add comment on why we use separate procedures to specify node- and element based variables --- src/io_fesom_file.F90 | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index 2bc2c1e01..395e120fd 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -324,6 +324,9 @@ subroutine async_worker(fesom_file_index) end subroutine + ! use separate procedures to specify node based or element based variables + ! if we would otherwise specify the vars only via the sizes of their dimensions, + ! we have to assign the corresponding dimindx somewhere else, which would be error prone subroutine specify_node_var_2d(this, name, longname, units, local_data) use, intrinsic :: ISO_C_BINDING use g_PARSUP From f24a5198df6995cbd52af28ecdc99506bf6781b7 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 3 Jun 2021 18:02:00 +0200 Subject: [PATCH 242/909] remove unused 'use' statements --- src/io_restart_file_group.F90 | 8 -------- 1 file changed, 8 deletions(-) diff --git a/src/io_restart_file_group.F90 b/src/io_restart_file_group.F90 index 426be6c38..cfec9d122 100644 --- a/src/io_restart_file_group.F90 +++ b/src/io_restart_file_group.F90 @@ -26,8 +26,6 @@ module restart_file_group_module subroutine def_node_var_2d(this, name, longname, units, local_data, mesh) - use, intrinsic :: ISO_C_BINDING - use g_PARSUP use mod_mesh class(restart_file_group), target, intent(inout) :: this character(len=*), intent(in) :: name @@ -42,8 +40,6 @@ subroutine def_node_var_2d(this, name, longname, units, local_data, mesh) subroutine def_node_var_3d(this, name, longname, units, local_data, mesh) - use, intrinsic :: ISO_C_BINDING - use g_PARSUP use mod_mesh class(restart_file_group), intent(inout) :: this character(len=*), intent(in) :: name @@ -58,8 +54,6 @@ subroutine def_node_var_3d(this, name, longname, units, local_data, mesh) subroutine def_elem_var_2d(this, name, longname, units, local_data, mesh) - use, intrinsic :: ISO_C_BINDING - use g_PARSUP use mod_mesh class(restart_file_group), intent(inout) :: this character(len=*), intent(in) :: name @@ -74,8 +68,6 @@ subroutine def_elem_var_2d(this, name, longname, units, local_data, mesh) subroutine def_elem_var_3d(this, name, longname, units, local_data, mesh) - use, intrinsic :: ISO_C_BINDING - use g_PARSUP use mod_mesh class(restart_file_group), intent(inout) :: this character(len=*), intent(in) :: name From 1032cbb18e219cc599d4fd95bb754f71c544b173 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 4 Jun 2021 10:04:18 +0200 Subject: [PATCH 243/909] limit public API of io_RESTART module to what we actually use --- src/io_restart.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index c20c5d825..9e75da89b 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -51,7 +51,7 @@ MODULE io_RESTART real(kind=WP) :: ctime !current time in seconds from the beginning of the year PRIVATE - PUBLIC :: restart, ocean_file, ice_file + PUBLIC :: restart ! !-------------------------------------------------------------------------------------------- ! generic interface was required to associate variables of unknown rank with the pointers of the same rank From b1129ce2bf1ff037fdf541af7c435f2cd12ab647 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 4 Jun 2021 10:08:50 +0200 Subject: [PATCH 244/909] - expose restart_file_group type - declare restart_file_group variables for oce and ice restart files --- src/io_restart.F90 | 7 +++++++ src/io_restart_file_group.F90 | 1 + 2 files changed, 8 insertions(+) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 9e75da89b..be3d88440 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -1,4 +1,5 @@ MODULE io_RESTART + use restart_file_group_module use g_config use g_clock use g_parsup @@ -62,6 +63,12 @@ MODULE io_RESTART ! !-------------------------------------------------------------------------------------------- ! + + + type(restart_file_group), save, target :: oce_files + type(restart_file_group), save, target :: ice_files + + contains ! !-------------------------------------------------------------------------------------------- diff --git a/src/io_restart_file_group.F90 b/src/io_restart_file_group.F90 index cfec9d122..d3892f91f 100644 --- a/src/io_restart_file_group.F90 +++ b/src/io_restart_file_group.F90 @@ -1,6 +1,7 @@ module restart_file_group_module use io_fesom_file_module implicit none + public restart_file_group private From a03162a45e9efbe0166cd2c450641a021e0fbdf1 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 4 Jun 2021 14:25:48 +0200 Subject: [PATCH 245/909] remove trailing semicolon --- src/io_restart.F90 | 46 +++++++++++++++++++++++----------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index be3d88440..054275322 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -103,25 +103,25 @@ subroutine ini_ocean_io(year, mesh) !=========================================================================== !___Define the netCDF variables for 2D fields_______________________________ !___SSH_____________________________________________________________________ - call def_variable(ocean_file, 'ssh', (/nod2D/), 'sea surface elevation', 'm', eta_n); + call def_variable(ocean_file, 'ssh', (/nod2D/), 'sea surface elevation', 'm', eta_n) !___ALE related fields______________________________________________________ - call def_variable(ocean_file, 'hbar', (/nod2D/), 'ALE surface elevation', 'm', hbar); -!!PS call def_variable(ocean_file, 'ssh_rhs', (/nod2D/), 'RHS for the elevation', '?', ssh_rhs); - call def_variable(ocean_file, 'ssh_rhs_old', (/nod2D/), 'RHS for the elevation', '?', ssh_rhs_old); - call def_variable(ocean_file, 'hnode', (/nl-1, nod2D/), 'nodal layer thickness', 'm', hnode); + call def_variable(ocean_file, 'hbar', (/nod2D/), 'ALE surface elevation', 'm', hbar) +!!PS call def_variable(ocean_file, 'ssh_rhs', (/nod2D/), 'RHS for the elevation', '?', ssh_rhs) + call def_variable(ocean_file, 'ssh_rhs_old', (/nod2D/), 'RHS for the elevation', '?', ssh_rhs_old) + call def_variable(ocean_file, 'hnode', (/nl-1, nod2D/), 'nodal layer thickness', 'm', hnode) !___Define the netCDF variables for 3D fields_______________________________ - call def_variable(ocean_file, 'u', (/nl-1, elem2D/), 'zonal velocity', 'm/s', UV(1,:,:)); - call def_variable(ocean_file, 'v', (/nl-1, elem2D/), 'meridional velocity', 'm/s', UV(2,:,:)); - call def_variable(ocean_file, 'urhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for u', 'm/s', UV_rhsAB(1,:,:)); - call def_variable(ocean_file, 'vrhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for v', 'm/s', UV_rhsAB(2,:,:)); + call def_variable(ocean_file, 'u', (/nl-1, elem2D/), 'zonal velocity', 'm/s', UV(1,:,:)) + call def_variable(ocean_file, 'v', (/nl-1, elem2D/), 'meridional velocity', 'm/s', UV(2,:,:)) + call def_variable(ocean_file, 'urhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for u', 'm/s', UV_rhsAB(1,:,:)) + call def_variable(ocean_file, 'vrhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for v', 'm/s', UV_rhsAB(2,:,:)) !___Save restart variables for TKE and IDEMIX_________________________________ if (trim(mix_scheme)=='cvmix_TKE' .or. trim(mix_scheme)=='cvmix_TKE+IDEMIX') then - call def_variable(ocean_file, 'tke', (/nl, nod2d/), 'Turbulent Kinetic Energy', 'm2/s2', tke(:,:)); + call def_variable(ocean_file, 'tke', (/nl, nod2d/), 'Turbulent Kinetic Energy', 'm2/s2', tke(:,:)) endif if (trim(mix_scheme)=='cvmix_IDEMIX' .or. trim(mix_scheme)=='cvmix_TKE+IDEMIX') then - call def_variable(ocean_file, 'iwe', (/nl, nod2d/), 'Internal Wave eneryy', 'm2/s2', tke(:,:)); + call def_variable(ocean_file, 'iwe', (/nl, nod2d/), 'Internal Wave eneryy', 'm2/s2', tke(:,:)) endif do j=1,num_tracers @@ -139,13 +139,13 @@ subroutine ini_ocean_io(year, mesh) write(longname,'(A15,i1)') 'passive tracer ', j units='none' END SELECT - call def_variable(ocean_file, trim(trname), (/nl-1, nod2D/), trim(longname), trim(units), tr_arr(:,:,j)); + call def_variable(ocean_file, trim(trname), (/nl-1, nod2D/), trim(longname), trim(units), tr_arr(:,:,j)) longname=trim(longname)//', Adams–Bashforth' - call def_variable(ocean_file, trim(trname)//'_AB',(/nl-1, nod2D/), trim(longname), trim(units), tr_arr_old(:,:,j)); + call def_variable(ocean_file, trim(trname)//'_AB',(/nl-1, nod2D/), trim(longname), trim(units), tr_arr_old(:,:,j)) end do - call def_variable(ocean_file, 'w', (/nl, nod2D/), 'vertical velocity', 'm/s', Wvel); - call def_variable(ocean_file, 'w_expl', (/nl, nod2D/), 'vertical velocity', 'm/s', Wvel_e); - call def_variable(ocean_file, 'w_impl', (/nl, nod2D/), 'vertical velocity', 'm/s', Wvel_i); + call def_variable(ocean_file, 'w', (/nl, nod2D/), 'vertical velocity', 'm/s', Wvel) + call def_variable(ocean_file, 'w_expl', (/nl, nod2D/), 'vertical velocity', 'm/s', Wvel_e) + call def_variable(ocean_file, 'w_impl', (/nl, nod2D/), 'vertical velocity', 'm/s', Wvel_i) end subroutine ini_ocean_io ! !-------------------------------------------------------------------------------------------- @@ -176,14 +176,14 @@ subroutine ini_ice_io(year, mesh) !===================== Definition part ===================================== !=========================================================================== !___Define the netCDF variables for 2D fields_______________________________ - call def_variable(ice_file, 'area', (/nod2D/), 'ice concentration [0 to 1]', '%', a_ice); - call def_variable(ice_file, 'hice', (/nod2D/), 'effective ice thickness', 'm', m_ice); - call def_variable(ice_file, 'hsnow', (/nod2D/), 'effective snow thickness', 'm', m_snow); - call def_variable(ice_file, 'uice', (/nod2D/), 'zonal velocity', 'm/s', u_ice); - call def_variable(ice_file, 'vice', (/nod2D/), 'meridional velocity', 'm', v_ice); + call def_variable(ice_file, 'area', (/nod2D/), 'ice concentration [0 to 1]', '%', a_ice) + call def_variable(ice_file, 'hice', (/nod2D/), 'effective ice thickness', 'm', m_ice) + call def_variable(ice_file, 'hsnow', (/nod2D/), 'effective snow thickness', 'm', m_snow) + call def_variable(ice_file, 'uice', (/nod2D/), 'zonal velocity', 'm/s', u_ice) + call def_variable(ice_file, 'vice', (/nod2D/), 'meridional velocity', 'm', v_ice) #if defined (__oifs) - call def_variable(ice_file, 'ice_albedo', (/nod2D/), 'ice albedo', '-', ice_alb); - call def_variable(ice_file, 'ice_temp',(/nod2D/), 'ice surface temperature', 'K', ice_temp); + call def_variable(ice_file, 'ice_albedo', (/nod2D/), 'ice albedo', '-', ice_alb) + call def_variable(ice_file, 'ice_temp',(/nod2D/), 'ice surface temperature', 'K', ice_temp) #endif /* (__oifs) */ end subroutine ini_ice_io From 8b7dc74e2e3836c357e1fc4dc5d89ec2fc3a5cda Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 4 Jun 2021 14:34:30 +0200 Subject: [PATCH 246/909] change all restart def_variable calls to use restart_file_group API --- src/io_restart.F90 | 46 +++++++++++++++++++++++----------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 054275322..e81a73238 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -103,25 +103,25 @@ subroutine ini_ocean_io(year, mesh) !=========================================================================== !___Define the netCDF variables for 2D fields_______________________________ !___SSH_____________________________________________________________________ - call def_variable(ocean_file, 'ssh', (/nod2D/), 'sea surface elevation', 'm', eta_n) + call oce_files%def_node_var('ssh', 'sea surface elevation', 'm', eta_n, mesh) !___ALE related fields______________________________________________________ - call def_variable(ocean_file, 'hbar', (/nod2D/), 'ALE surface elevation', 'm', hbar) -!!PS call def_variable(ocean_file, 'ssh_rhs', (/nod2D/), 'RHS for the elevation', '?', ssh_rhs) - call def_variable(ocean_file, 'ssh_rhs_old', (/nod2D/), 'RHS for the elevation', '?', ssh_rhs_old) - call def_variable(ocean_file, 'hnode', (/nl-1, nod2D/), 'nodal layer thickness', 'm', hnode) + call oce_files%def_node_var('hbar', 'ALE surface elevation', 'm', hbar, mesh) +!!PS call oce_files%def_node_var('ssh_rhs', 'RHS for the elevation', '?', ssh_rhs, mesh) + call oce_files%def_node_var('ssh_rhs_old', 'RHS for the elevation', '?', ssh_rhs_old, mesh) + call oce_files%def_node_var('hnode', 'nodal layer thickness', 'm', hnode, mesh) !___Define the netCDF variables for 3D fields_______________________________ - call def_variable(ocean_file, 'u', (/nl-1, elem2D/), 'zonal velocity', 'm/s', UV(1,:,:)) - call def_variable(ocean_file, 'v', (/nl-1, elem2D/), 'meridional velocity', 'm/s', UV(2,:,:)) - call def_variable(ocean_file, 'urhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for u', 'm/s', UV_rhsAB(1,:,:)) - call def_variable(ocean_file, 'vrhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for v', 'm/s', UV_rhsAB(2,:,:)) + call oce_files%def_elem_var('u', 'zonal velocity', 'm/s', UV(1,:,:), mesh) + call oce_files%def_elem_var('v', 'meridional velocity', 'm/s', UV(2,:,:), mesh) + call oce_files%def_elem_var('urhs_AB', 'Adams–Bashforth for u', 'm/s', UV_rhsAB(1,:,:), mesh) + call oce_files%def_elem_var('vrhs_AB', 'Adams–Bashforth for v', 'm/s', UV_rhsAB(2,:,:), mesh) !___Save restart variables for TKE and IDEMIX_________________________________ if (trim(mix_scheme)=='cvmix_TKE' .or. trim(mix_scheme)=='cvmix_TKE+IDEMIX') then - call def_variable(ocean_file, 'tke', (/nl, nod2d/), 'Turbulent Kinetic Energy', 'm2/s2', tke(:,:)) + call oce_files%def_node_var('tke', 'Turbulent Kinetic Energy', 'm2/s2', tke(:,:), mesh) endif if (trim(mix_scheme)=='cvmix_IDEMIX' .or. trim(mix_scheme)=='cvmix_TKE+IDEMIX') then - call def_variable(ocean_file, 'iwe', (/nl, nod2d/), 'Internal Wave eneryy', 'm2/s2', tke(:,:)) + call oce_files%def_node_var('iwe', 'Internal Wave eneryy', 'm2/s2', tke(:,:), mesh) endif do j=1,num_tracers @@ -139,13 +139,13 @@ subroutine ini_ocean_io(year, mesh) write(longname,'(A15,i1)') 'passive tracer ', j units='none' END SELECT - call def_variable(ocean_file, trim(trname), (/nl-1, nod2D/), trim(longname), trim(units), tr_arr(:,:,j)) + call oce_files%def_node_var(trim(trname), trim(longname), trim(units), tr_arr(:,:,j), mesh) longname=trim(longname)//', Adams–Bashforth' - call def_variable(ocean_file, trim(trname)//'_AB',(/nl-1, nod2D/), trim(longname), trim(units), tr_arr_old(:,:,j)) + call oce_files%def_node_var(trim(trname)//'_AB', trim(longname), trim(units), tr_arr_old(:,:,j), mesh) end do - call def_variable(ocean_file, 'w', (/nl, nod2D/), 'vertical velocity', 'm/s', Wvel) - call def_variable(ocean_file, 'w_expl', (/nl, nod2D/), 'vertical velocity', 'm/s', Wvel_e) - call def_variable(ocean_file, 'w_impl', (/nl, nod2D/), 'vertical velocity', 'm/s', Wvel_i) + call oce_files%def_node_var('w', 'vertical velocity', 'm/s', Wvel, mesh) + call oce_files%def_node_var('w_expl', 'vertical velocity', 'm/s', Wvel_e, mesh) + call oce_files%def_node_var('w_impl', 'vertical velocity', 'm/s', Wvel_i, mesh) end subroutine ini_ocean_io ! !-------------------------------------------------------------------------------------------- @@ -176,14 +176,14 @@ subroutine ini_ice_io(year, mesh) !===================== Definition part ===================================== !=========================================================================== !___Define the netCDF variables for 2D fields_______________________________ - call def_variable(ice_file, 'area', (/nod2D/), 'ice concentration [0 to 1]', '%', a_ice) - call def_variable(ice_file, 'hice', (/nod2D/), 'effective ice thickness', 'm', m_ice) - call def_variable(ice_file, 'hsnow', (/nod2D/), 'effective snow thickness', 'm', m_snow) - call def_variable(ice_file, 'uice', (/nod2D/), 'zonal velocity', 'm/s', u_ice) - call def_variable(ice_file, 'vice', (/nod2D/), 'meridional velocity', 'm', v_ice) + call ice_files%def_node_var('area', 'ice concentration [0 to 1]', '%', a_ice, mesh) + call ice_files%def_node_var('hice', 'effective ice thickness', 'm', m_ice, mesh) + call ice_files%def_node_var('hsnow', 'effective snow thickness', 'm', m_snow, mesh) + call ice_files%def_node_var('uice', 'zonal velocity', 'm/s', u_ice, mesh) + call ice_files%def_node_var('vice', 'meridional velocity', 'm', v_ice, mesh) #if defined (__oifs) - call def_variable(ice_file, 'ice_albedo', (/nod2D/), 'ice albedo', '-', ice_alb) - call def_variable(ice_file, 'ice_temp',(/nod2D/), 'ice surface temperature', 'K', ice_temp) + call ice_files%def_node_var('ice_albedo', 'ice albedo', '-', ice_alb, mesh) + call ice_files%def_node_var('ice_temp', 'ice surface temperature', 'K', ice_temp, mesh) #endif /* (__oifs) */ end subroutine ini_ice_io From cb40b4f77468572f2643aacc52b46320c640e645 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 4 Jun 2021 14:35:42 +0200 Subject: [PATCH 247/909] remove unused target attribute --- src/io_restart.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index e81a73238..b41e2bb6d 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -65,8 +65,8 @@ MODULE io_RESTART ! - type(restart_file_group), save, target :: oce_files - type(restart_file_group), save, target :: ice_files + type(restart_file_group), save :: oce_files + type(restart_file_group), save :: ice_files contains From f2d3afef9e041e37b62070810b7c07475bc67b2f Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 4 Jun 2021 17:53:18 +0200 Subject: [PATCH 248/909] remove procedures from previous io_RESTART implementation --- src/io_restart.F90 | 83 +--------------------------------------------- 1 file changed, 1 insertion(+), 82 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index b41e2bb6d..8f696d94a 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -53,18 +53,7 @@ MODULE io_RESTART PRIVATE PUBLIC :: restart -! -!-------------------------------------------------------------------------------------------- -! generic interface was required to associate variables of unknown rank with the pointers of the same rank -! this allows for automatic streaming of associated variables into the netcdf file - INTERFACE def_variable - MODULE PROCEDURE def_variable_1d, def_variable_2d - END INTERFACE -! -!-------------------------------------------------------------------------------------------- -! - - + type(restart_file_group), save :: oce_files type(restart_file_group), save :: ice_files @@ -353,76 +342,6 @@ subroutine def_dim(file, name, ndim) end subroutine def_dim -subroutine def_variable_1d(file, name, global_shape, longname, units, local_data) - implicit none - type(nc_file), intent(inout) :: file - character(len=*), intent(in) :: name - integer, intent(in) :: global_shape(1) - character(len=*), intent(in), optional :: units, longname - real(kind=WP),target, intent(inout) :: local_data(:) - integer :: c - type(nc_vars), allocatable, dimension(:) :: temp - - if (file%nvar > 0) then - ! create temporal dimension - allocate(temp(file%nvar)); temp=file%var - ! deallocate the input data array - deallocate(file%var) - ! then reallocate - file%nvar=file%nvar+1 - allocate(file%var(file%nvar)) - ! restore the original data - file%var(1:file%nvar-1)=temp - deallocate(temp) - else - ! first dimension in a file - file%nvar=1 - allocate(file%var(file%nvar)) - end if - file%var(file%nvar)%name=trim(name) - file%var(file%nvar)%longname=trim(longname) - file%var(file%nvar)%units=trim(units) - file%var(file%nvar)%ndim=1 - file%var(file%nvar)%dims(1)=global_shape(1) - file%var(file%nvar)%pt1=>local_data -end subroutine def_variable_1d - - -subroutine def_variable_2d(file, name, global_shape, longname, units, local_data) - implicit none - type(nc_file), intent(inout) :: file - character(len=*), intent(in) :: name - integer, intent(in) :: global_shape(2) - character(len=*), intent(in), optional :: units, longname - real(kind=WP),target, intent(inout) :: local_data(:,:) - integer :: c - type(nc_vars), allocatable, dimension(:) :: temp - - if (file%nvar > 0) then - ! create temporal dimension - allocate(temp(file%nvar)); temp=file%var - ! deallocate the input data array - deallocate(file%var) - ! then reallocate - file%nvar=file%nvar+1 - allocate(file%var(file%nvar)) - ! restore the original data - file%var(1:file%nvar-1)=temp - deallocate(temp) - else - ! first dimension in a file - file%nvar=1 - allocate(file%var(file%nvar)) - end if - file%var(file%nvar)%name=trim(name) - file%var(file%nvar)%longname=trim(longname) - file%var(file%nvar)%units=trim(units) - file%var(file%nvar)%ndim=2 - file%var(file%nvar)%dims(1:2)=global_shape - file%var(file%nvar)%pt2=>local_data -end subroutine def_variable_2d - - subroutine write_restart(file, istep, mesh) implicit none type(nc_file), intent(inout) :: file From 75a5ec54907562c6dc2a7e1a56cf38964608ab09 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 4 Jun 2021 17:56:32 +0200 Subject: [PATCH 249/909] store path for restart files --- src/io_restart.F90 | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 8f696d94a..4f4a69f36 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -56,6 +56,8 @@ MODULE io_RESTART type(restart_file_group), save :: oce_files type(restart_file_group), save :: ice_files + character(:), allocatable, save :: oce_path + character(:), allocatable, save :: ice_path contains @@ -78,8 +80,7 @@ subroutine ini_ocean_io(year, mesh) #include "associate_mesh.h" write(cyear,'(i4)') year - ! create an ocean restart file; serial output implemented so far - ocean_file%filename=trim(ResultPath)//trim(runid)//'.'//cyear//'.oce.restart.nc' + oce_path = trim(ResultPath)//trim(runid)//'.'//cyear//'.oce.restart.nc' if (ocean_file%is_in_use) return ocean_file%is_in_use=.true. call def_dim(ocean_file, 'node', nod2d) @@ -155,8 +156,7 @@ subroutine ini_ice_io(year, mesh) #include "associate_mesh.h" write(cyear,'(i4)') year - ! create an ocean restart file; serial output implemented so far - ice_file%filename=trim(ResultPath)//trim(runid)//'.'//cyear//'.ice.restart.nc' + ice_path = trim(ResultPath)//trim(runid)//'.'//cyear//'.ice.restart.nc' if (ice_file%is_in_use) return ice_file%is_in_use=.true. call def_dim(ice_file, 'node', nod2d) From 2db82f4e33aebfa59981f6b3491e0c1f376c62d6 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 4 Jun 2021 18:04:44 +0200 Subject: [PATCH 250/909] remove mesh dim definition from previous implementation --- src/io_restart.F90 | 9 --------- 1 file changed, 9 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 4f4a69f36..f48c7b402 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -77,16 +77,10 @@ subroutine ini_ocean_io(year, mesh) character(4) :: cyear type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" - write(cyear,'(i4)') year oce_path = trim(ResultPath)//trim(runid)//'.'//cyear//'.oce.restart.nc' if (ocean_file%is_in_use) return ocean_file%is_in_use=.true. - call def_dim(ocean_file, 'node', nod2d) - call def_dim(ocean_file, 'elem', elem2d) - call def_dim(ocean_file, 'nz_1', nl-1) - call def_dim(ocean_file, 'nz', nl) !=========================================================================== !===================== Definition part ===================================== @@ -153,13 +147,10 @@ subroutine ini_ice_io(year, mesh) character(4) :: cyear type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" - write(cyear,'(i4)') year ice_path = trim(ResultPath)//trim(runid)//'.'//cyear//'.ice.restart.nc' if (ice_file%is_in_use) return ice_file%is_in_use=.true. - call def_dim(ice_file, 'node', nod2d) !=========================================================================== !===================== Definition part ===================================== From 3efa5325c58914d98f9146c1cba164f5b3faca08 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 4 Jun 2021 18:07:02 +0200 Subject: [PATCH 251/909] replace guard to specify variables only once --- src/io_restart.F90 | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index f48c7b402..5ed72084e 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -76,11 +76,13 @@ subroutine ini_ocean_io(year, mesh) character(500) :: trname, units character(4) :: cyear type(t_mesh), intent(in) , target :: mesh - + logical, save :: has_been_called = .false. + write(cyear,'(i4)') year oce_path = trim(ResultPath)//trim(runid)//'.'//cyear//'.oce.restart.nc' - if (ocean_file%is_in_use) return - ocean_file%is_in_use=.true. + + if(has_been_called) return + has_been_called = .true. !=========================================================================== !===================== Definition part ===================================== @@ -146,11 +148,13 @@ subroutine ini_ice_io(year, mesh) character(500) :: trname, units character(4) :: cyear type(t_mesh), intent(in) , target :: mesh + logical, save :: has_been_called = .false. write(cyear,'(i4)') year ice_path = trim(ResultPath)//trim(runid)//'.'//cyear//'.ice.restart.nc' - if (ice_file%is_in_use) return - ice_file%is_in_use=.true. + + if(has_been_called) return + has_been_called = .true. !=========================================================================== !===================== Definition part ===================================== From 14a3f0e4ba89d5c9822c498f7cb2ff18b291fb34 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 4 Jun 2021 18:14:33 +0200 Subject: [PATCH 252/909] remove unused parameter --- src/io_restart.F90 | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 5ed72084e..561c123fe 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -424,10 +424,9 @@ subroutine write_restart(file, istep, mesh) end subroutine write_restart -subroutine read_restart(file, mesh, arg) +subroutine read_restart(file, mesh) implicit none type(nc_file), intent(inout) :: file - integer, optional, intent(in) :: arg real(kind=WP), allocatable :: aux(:), laux(:) integer :: i, lev, size1, size2, shape integer :: rec2read, c @@ -459,11 +458,7 @@ subroutine read_restart(file, mesh, arg) call par_ex end if - if (.not. present(arg)) then - rec2read=file%rec_count - else - rec2read=arg - end if + rec2read=file%rec_count write(*,*) 'restart from record ', rec2read, ' of ', file%rec_count if (int(ctime)/=int(rtime)) then From fdaf10dd4fcca53b56196e66f95e0f4214e9478c Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 4 Jun 2021 18:16:48 +0200 Subject: [PATCH 253/909] remove unused procedure --- src/io_restart.F90 | 28 ---------------------------- 1 file changed, 28 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 561c123fe..606ea86e5 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -309,34 +309,6 @@ subroutine create_new_file(file) end subroutine create_new_file -subroutine def_dim(file, name, ndim) - implicit none - type(nc_file), intent(inout) :: file - character(len=*), intent(in) :: name - integer, intent(in) :: ndim - type(nc_dims), allocatable, dimension(:) :: temp - - if (file%ndim > 0) then - ! create temporal dimension - allocate(temp(file%ndim)); temp=file%dim - ! deallocate the input data array - deallocate(file%dim) - ! then reallocate - file%ndim=file%ndim+1 - allocate(file%dim(file%ndim)) - ! restore the original data - file%dim(1:file%ndim-1)=temp - deallocate(temp) - else - ! first dimension in a file - file%ndim=1 - allocate(file%dim(file%ndim)) - end if - file%dim(file%ndim)%name=trim(name) - file%dim(file%ndim)%size=ndim -end subroutine def_dim - - subroutine write_restart(file, istep, mesh) implicit none type(nc_file), intent(inout) :: file From 43375b9617457d9e2f4adee41aa90f2da64931ea Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 7 Jun 2021 14:22:16 +0200 Subject: [PATCH 254/909] be able to access internal variables of the helper restart_file_group type --- src/io_restart_file_group.F90 | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/io_restart_file_group.F90 b/src/io_restart_file_group.F90 index d3892f91f..3a7b19e8f 100644 --- a/src/io_restart_file_group.F90 +++ b/src/io_restart_file_group.F90 @@ -1,3 +1,4 @@ +! helper module to treat split restart files similar as the previous single-file ones module restart_file_group_module use io_fesom_file_module implicit none @@ -6,7 +7,6 @@ module restart_file_group_module type, extends(fesom_file_type) :: restart_file_type - private integer iter_varindex character(:), allocatable :: varname character(:), allocatable :: path @@ -15,7 +15,8 @@ module restart_file_group_module type restart_file_group private - type(restart_file_type) files(20); integer :: nfiles = 0 ! todo: allow dynamically allocated size without messing with shallow copied pointers + type(restart_file_type), public :: files(20) + integer, public :: nfiles = 0 ! todo: allow dynamically allocated size without messing with shallow copied pointers contains generic, public :: def_node_var => def_node_var_2d, def_node_var_3d generic, public :: def_elem_var => def_elem_var_2d, def_elem_var_3d From f07538f64f05d8c405867e02979e85e3748986c7 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 7 Jun 2021 15:05:13 +0200 Subject: [PATCH 255/909] rewrite read_restart to use async and parallel reading from individual files --- src/io_restart.F90 | 140 ++++++++++++++------------------------------- 1 file changed, 42 insertions(+), 98 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 606ea86e5..d18ce4006 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -196,12 +196,10 @@ subroutine restart(istep, l_write, l_read, mesh) end if if (l_read) then - call assoc_ids(ocean_file); call was_error(ocean_file) - call read_restart(ocean_file, mesh); call was_error(ocean_file) - if (use_ice) then - call assoc_ids(ice_file); call was_error(ice_file) - call read_restart(ice_file, mesh); call was_error(ice_file) - end if + call read_restart(oce_path, oce_files) + if (use_ice) then + call read_restart(ice_path, ice_files) + end if end if if (istep==0) return @@ -396,105 +394,51 @@ subroutine write_restart(file, istep, mesh) end subroutine write_restart -subroutine read_restart(file, mesh) - implicit none - type(nc_file), intent(inout) :: file - real(kind=WP), allocatable :: aux(:), laux(:) - integer :: i, lev, size1, size2, shape - integer :: rec2read, c - real(kind=WP) :: rtime !timestamp of the record - logical :: file_exist=.False. - type(t_mesh), intent(in) , target :: mesh +subroutine read_restart(path, filegroup) + character(len=*), intent(in) :: path + type(restart_file_group), intent(inout) :: filegroup + ! EO parameters + real(kind=WP) rtime + integer i + character(:), allocatable :: dirpath + + do i=1, filegroup%nfiles + if( filegroup%files(i)%is_iorank() ) then + dirpath = path(1:len(path)-3) ! chop of the ".nc" suffix + if(filegroup%files(i)%path .ne. dirpath//"/"//filegroup%files(i)%varname//".nc") then + call execute_command_line("mkdir -p "//dirpath) + filegroup%files(i)%path = dirpath//"/"//filegroup%files(i)%varname//".nc" + write(*,*) 'reading restart for ', filegroup%files(i)%varname, ' at ', filegroup%files(i)%path + call filegroup%files(i)%open_read(filegroup%files(i)%path) ! do we need to bother with read-only access? + ! todo: print a reasonable error message if the file does not exist + end if + end if + + call filegroup%files(i)%async_read_and_scatter_variables() + end do + + do i=1, filegroup%nfiles + call filegroup%files(i)%join() -#include "associate_mesh.h" + if(filegroup%files(i)%is_iorank()) then + write(*,*) 'restart from record ', filegroup%files(i)%rec_count(), ' of ', filegroup%files(i)%rec_count() - ! laux=0. - ! Serial output implemented so far - c=1 - if (mype==0) then - file_exist=.False. - inquire(file=file%filename,exist=file_exist) - if (file_exist) then - write(*,*) ' reading restart file: ', trim(file%filename) - file%error_status(c)=nf_open(file%filename, nf_nowrite, file%ncid); c=c+1 - file%error_status(c)=nf_get_vara_int(file%ncid, file%iter_varid, file%rec_count, 1, globalstep, 1); c=c+1 - file%error_status(c)=nf_get_vara_double(file%ncid, file%time_varid, file%rec_count, 1, rtime, 1); c=c+1 - else - write(*,*) - print *, achar(27)//'[33m' - write(*,*) '____________________________________________________________________' - write(*,*) ' ERROR: could not find restart_file:',trim(file%filename),'!' - write(*,*) '____________________________________________________________________' - print *, achar(27)//'[0m' - write(*,*) - call par_ex - end if - - rec2read=file%rec_count - write(*,*) 'restart from record ', rec2read, ' of ', file%rec_count + ! read the last entry from the iter variable + call filegroup%files(i)%read_var1(filegroup%files(i)%iter_varindex, [filegroup%files(i)%rec_count()], globalstep) - if (int(ctime)/=int(rtime)) then + ! read the last entry from the time variable + call filegroup%files(i)%read_var1(filegroup%files(i)%time_varindex(), [filegroup%files(i)%rec_count()], rtime) + call filegroup%files(i)%close_file() + + if (int(ctime)/=int(rtime)) then write(*,*) 'Reading restart: timestamps in restart and in clock files do not match' write(*,*) 'restart/ times are:', ctime, rtime write(*,*) 'the model will stop!' - file%error_status(c)=-310; c=c+1 - end if - end if - - call was_error(file); c=1 - - do i=1, file%nvar - shape=file%var(i)%ndim - if (mype==0) write(*,*) 'reading restart for ', trim(file%var(i)%name) -!_______writing 2D fields________________________________________________ - if (shape==1) then - size1=file%var(i)%dims(1) - if (mype==0) then - allocate(aux(size1)) - file%error_status(c)=nf_get_vara_double(file%ncid, file%var(i)%code, (/1, file%rec_count/), (/size1, 1/), aux, 1); c=c+1 -! write(*,*) 'min/max 2D =', minval(aux), maxval(aux) - end if - if (size1==nod2D) call broadcast_nod (file%var(i)%pt1, aux) - if (size1==elem2D) call broadcast_elem(file%var(i)%pt1, aux) - if (mype==0) deallocate(aux) -!_______writing 3D fields________________________________________________ - elseif (shape==2) then - size1=file%var(i)%dims(1) - size2=file%var(i)%dims(2) - if (mype==0) allocate(aux (size2)) - if (size2==nod2D) allocate(laux(myDim_nod2D +eDim_nod2D )) - if (size2==elem2D) allocate(laux(myDim_elem2D+eDim_elem2D)) - do lev=1, size1 - if (mype==0) then - file%error_status(c)=nf_get_vara_double(file%ncid, file%var(i)%code, (/lev, 1, file%rec_count/), (/1, size2, 1/), aux, 1); c=c+1 -! write(*,*) 'min/max 3D ', lev,'=', minval(aux), maxval(aux) - end if - file%var(i)%pt2(lev,:)=0. -! if (size1==nod2D .or. size2==nod2D) call broadcast_nod (file%var(i)%pt2(lev,:), aux) -! if (size1==elem2D .or. size2==elem2D) call broadcast_elem(file%var(i)%pt2(lev,:), aux) - if (size2==nod2D) then - call broadcast_nod (laux, aux) - file%var(i)%pt2(lev,:)=laux(1:myDim_nod2D+eDim_nod2D) - end if - if (size2==elem2D) then - call broadcast_elem(laux, aux) - file%var(i)%pt2(lev,:)=laux(1:myDim_elem2D+eDim_elem2D) - end if - end do - deallocate(laux) - if (mype==0) deallocate(aux) - else - if (mype==0) write(*,*) 'not supported shape of array in restart file when reading restart' - call par_ex - stop - end if - call was_error(file); c=1 + stop 1 + end if + end if end do - - if (mype==0) file%error_status(1)=nf_close(file%ncid); - file%error_count=1 - call was_error(file) -end subroutine read_restart +end subroutine subroutine assoc_ids(file) From abbaeef076e50bace8e26e5f56041f0b4f52a73a Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 7 Jun 2021 15:38:46 +0200 Subject: [PATCH 256/909] rewrite write_restart to use async and parallel writing to individual files --- src/io_restart.F90 | 125 ++++++++++++++------------------------------- 1 file changed, 39 insertions(+), 86 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index d18ce4006..32ee6fb81 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -230,11 +230,9 @@ subroutine restart(istep, l_write, l_read, mesh) ! write restart if(mype==0) write(*,*)'Do output (netCDF, restart) ...' - call assoc_ids(ocean_file); call was_error(ocean_file) - call write_restart(ocean_file, istep, mesh); call was_error(ocean_file) + call write_restart(oce_path, oce_files, istep) if (use_ice) then - call assoc_ids(ice_file); call was_error(ice_file) - call write_restart(ice_file, istep, mesh); call was_error(ice_file) + call write_restart(ice_path, ice_files, istep) end if ! actualize clock file to latest restart point @@ -307,91 +305,46 @@ subroutine create_new_file(file) end subroutine create_new_file -subroutine write_restart(file, istep, mesh) - implicit none - type(nc_file), intent(inout) :: file - integer, intent(in) :: istep - type(t_mesh), intent(in) , target :: mesh - real(kind=WP), allocatable :: aux(:), laux(:) - integer :: i, lev, size1, size2, shape - integer :: c - real(kind=WP) :: t0, t1, t2, t3 - -#include "associate_mesh.h" +subroutine write_restart(path, filegroup, istep) + character(len=*), intent(in) :: path + type(restart_file_group), intent(inout) :: filegroup + integer, intent(in) :: istep + ! EO parameters + integer cstep + integer i + character(:), allocatable :: dirpath + + cstep = globalstep+istep + + do i=1, filegroup%nfiles + if(filegroup%files(i)%is_iorank()) then + dirpath = path(1:len(path)-3) ! chop of the ".nc" suffix + if(filegroup%files(i)%path .ne. dirpath//"/"//filegroup%files(i)%varname//".nc") then + call execute_command_line("mkdir -p "//dirpath) + filegroup%files(i)%path = dirpath//"/"//filegroup%files(i)%varname//".nc" + call filegroup%files(i)%open_write_create(filegroup%files(i)%path) + else + call filegroup%files(i)%open_write_append(filegroup%files(i)%path) + end if - ! Serial output implemented so far - if (mype==0) then - c=1 - !file%rec_count=file%rec_count+1 - write(*,*) 'writing restart record ', file%rec_count - file%error_status(c)=nf_open(file%filename, nf_write, file%ncid); c=c+1 - file%error_status(c)=nf_put_vara_double(file%ncid, file%time_varid, file%rec_count, 1, ctime, 1); c=c+1 - file%error_status(c)=nf_put_vara_int(file%ncid, file%iter_varid, file%rec_count, 1, globalstep+istep, 1); c=c+1 - end if + write(*,*) 'writing restart record ', filegroup%files(i)%rec_count()+1, ' to ', filegroup%files(i)%path + ! todo: write iter to a separate (non-mesh-variable) file + call filegroup%files(i)%write_var(filegroup%files(i)%iter_varindex, [filegroup%files(i)%rec_count()+1], [1], [cstep]) + ! todo: write time via the fesom_file_type + call filegroup%files(i)%write_var(filegroup%files(i)%time_varindex(), [filegroup%files(i)%rec_count()+1], [1], [ctime]) + end if - call was_error(file); c=1 - - do i=1, file%nvar - shape=file%var(i)%ndim -!_______writing 2D fields________________________________________________ - if (shape==1) then - size1=file%var(i)%dims(1) - if (mype==0) allocate(aux(size1)) - t0=MPI_Wtime() - if (size1==nod2D) call gather_nod (file%var(i)%pt1, aux) - if (size1==elem2D) call gather_elem(file%var(i)%pt1, aux) - t1=MPI_Wtime() - if (mype==0) then - file%error_status(c)=nf_put_vara_double(file%ncid, file%var(i)%code, (/1, file%rec_count/), (/size1, 1/), aux, 1); c=c+1 - end if - t2=MPI_Wtime() -#ifdef DEBUG - ! Timeing information for collecting and writing restart file - if (mype==0) write(*,*) 'nvar: ', i, 'size: ', size1, 'gather_nod: ', t1-t0 - if (mype==0) write(*,*) 'nvar: ', i, 'size: ', size1, 'nf_put_var: ', t2-t1 -#endif - if (mype==0) deallocate(aux) -!_______writing 3D fields________________________________________________ - elseif (shape==2) then - size1=file%var(i)%dims(1) - size2=file%var(i)%dims(2) - if (mype==0) allocate(aux (size2)) - if (size2==nod2D) allocate(laux(myDim_nod2D +eDim_nod2D )) - if (size2==elem2D) allocate(laux(myDim_elem2D+eDim_elem2D)) - do lev=1, size1 - laux=file%var(i)%pt2(lev,:) -! if (size1==nod2D .or. size2==nod2D) call gather_nod (file%var(i)%pt2(lev,:), aux) -! if (size1==elem2D .or. size2==elem2D) call gather_elem(file%var(i)%pt2(lev,:), aux) - t0=MPI_Wtime() - if (size1==nod2D .or. size2==nod2D) call gather_nod (laux, aux) - if (size1==elem2D .or. size2==elem2D) call gather_elem(laux, aux) - t1=MPI_Wtime() - if (mype==0) then - file%error_status(c)=nf_put_vara_double(file%ncid, file%var(i)%code, (/lev, 1, file%rec_count/), (/1, size2, 1/), aux, 1); c=c+1 - end if - t2=MPI_Wtime() -#ifdef DEBUG - ! Timeing information for collecting and writing output file - if (mype==0) write(*,*) 'nvar: ', i, 'size: ', size2, 'lev: ', lev, 'gather_nod: ', t1-t0 - if (mype==0) write(*,*) 'nvar: ', i, 'size: ', size2, 'lev: ', lev, 'nf_put_var: ', t2-t1 -#endif - end do - deallocate(laux) - if (mype==0) deallocate(aux) - else - if (mype==0) write(*,*) 'not supported shape of array in restart file' - call par_ex - stop - end if - call was_error(file); c=1 + call filegroup%files(i)%async_gather_and_write_variables() end do - - if (mype==0) file%error_count=c-1 - call was_error(file) - if (mype==0) file%error_status(1)=nf_close(file%ncid); - file%error_count=1 - call was_error(file) -end subroutine write_restart + + do i=1, filegroup%nfiles + call filegroup%files(i)%join() + + if(filegroup%files(i)%is_iorank()) then + call filegroup%files(i)%close_file() + end if + end do +end subroutine subroutine read_restart(path, filegroup) From 4c7bd376050dea61e37dc6eb108faa6098bdb434 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 7 Jun 2021 15:44:44 +0200 Subject: [PATCH 257/909] remove unused procedures --- src/io_restart.F90 | 145 +-------------------------------------------- 1 file changed, 1 insertion(+), 144 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 32ee6fb81..7fcd0f67e 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -244,67 +244,6 @@ subroutine restart(istep, l_write, l_read, mesh) end subroutine restart -subroutine create_new_file(file) - implicit none - - type(nc_file), intent(inout) :: file - integer :: c, j - integer :: n, k, l, kdim, dimid(4) - character(2000) :: att_text - ! Serial output implemented so far - if (mype/=0) return - c=1 - file%error_status=0 - ! create an ocean output file - write(*,*) 'initializing restart file ', trim(file%filename) - file%error_status(c) = nf_create(file%filename, IOR(NF_NOCLOBBER,IOR(NF_NETCDF4,NF_CLASSIC_MODEL)), file%ncid); c=c+1 - - do j=1, file%ndim -!___Create mesh related dimentions__________________________________________ - file%error_status(c) = nf_def_dim(file%ncid, file%dim(j)%name, file%dim(j)%size, file%dim(j)%code ); c=c+1 - end do - -!___Create time related dimentions__________________________________________ - file%error_status(c) = nf_def_dim(file%ncid, 'time', NF_UNLIMITED, file%rec_dimid); c=c+1 -!___Define the time and iteration variables_________________________________ - file%error_status(c) = nf_def_var(file%ncid, 'time', NF_DOUBLE, 1, file%rec_dimid, file%time_varid); c=c+1 - file%error_status(c) = nf_def_var(file%ncid, 'iter', NF_INT, 1, file%rec_dimid, file%iter_varid); c=c+1 - - - att_text='time' - file%error_status(c) = nf_put_att_text(file%ncid, file%time_varid, 'long_name', len_trim(att_text), trim(att_text)); c=c+1 - write(att_text, '(a14,I4.4,a1,I2.2,a1,I2.2,a6)') 'seconds since ', yearold, '-', 1, '-', 1, ' 0:0:0' - file%error_status(c) = nf_put_att_text(file%ncid, file%time_varid, 'units', len_trim(att_text), trim(att_text)); c=c+1 - - att_text='iteration_count' - file%error_status(c) = nf_put_att_text(file%ncid, file%iter_varid, 'long_name', len_trim(att_text), trim(att_text)); c=c+1 - - do j=1, file%nvar -!___associate physical dimension with the netcdf IDs________________________ - n=file%var(j)%ndim ! shape size of the variable (exluding time) - do k=1, n - !k_th dimension of the variable - kdim=file%var(j)%dims(k) - do l=1, file%ndim ! list all defined dimensions - if (kdim==file%dim(l)%size) dimid(k)=file%dim(l)%code - end do - !write(*,*) "j",j,kdim, ' -> ', dimid(k) - end do - file%error_status(c) = nf_def_var(file%ncid, trim(file%var(j)%name), NF_DOUBLE, file%var(j)%ndim+1, (/dimid(1:n), file%rec_dimid/), file%var(j)%code); c=c+1 - !if (n==1) then - ! file%error_status(c)=nf_def_var_chunking(file%ncid, file%var(j)%code, NF_CHUNKED, (/1/)); c=c+1 - if (n==2) then - file%error_status(c)=nf_def_var_chunking(file%ncid, file%var(j)%code, NF_CHUNKED, (/1, file%dim(1)%size/)); ! c=c+1 - end if - file%error_status(c)=nf_put_att_text(file%ncid, file%var(j)%code, 'description', len_trim(file%var(j)%longname), file%var(j)%longname); c=c+1 - file%error_status(c)=nf_put_att_text(file%ncid, file%var(j)%code, 'units', len_trim(file%var(j)%units), file%var(j)%units); c=c+1 - end do - - file%error_status(c)=nf_close(file%ncid); c=c+1 - file%error_count=c-1 -end subroutine create_new_file - - subroutine write_restart(path, filegroup, istep) character(len=*), intent(in) :: path type(restart_file_group), intent(inout) :: filegroup @@ -393,86 +332,4 @@ subroutine read_restart(path, filegroup) end do end subroutine - -subroutine assoc_ids(file) - implicit none - - type(nc_file), intent(inout) :: file - character(500) :: longname - integer :: c, j, k - real(kind=WP) :: rtime !timestamp of the record - ! Serial output implemented so far - if (mype/=0) return - c=1 - file%error_status=0 - ! open existing netcdf file - write(*,*) 'associating restart file ', trim(file%filename) - - file%error_status(c) = nf_open(file%filename, nf_nowrite, file%ncid) - !if the file does not exist it will be created! - if (file%error_status(c) .ne. nf_noerr) then - call create_new_file(file) ! error status counter will be reset - c=file%error_count+1 - file%error_status(c) = nf_open(file%filename, nf_nowrite, file%ncid); c=c+1 - end if - - do j=1, file%ndim -!___Associate mesh related dimentions_______________________________________ - file%error_status(c) = nf_inq_dimid(file%ncid, file%dim(j)%name, file%dim(j)%code); c=c+1 - end do -!___Associate time related dimentions_______________________________________ - file%error_status(c) = nf_inq_dimid (file%ncid, 'time', file%rec_dimid); c=c+1 - file%error_status(c) = nf_inq_dimlen(file%ncid, file%rec_dimid, file%rec_count); c=c+1 -!___Associate the time and iteration variables______________________________ - file%error_status(c) = nf_inq_varid(file%ncid, 'time', file%time_varid); c=c+1 - file%error_status(c) = nf_inq_varid(file%ncid, 'iter', file%iter_varid); c=c+1 -!___if the time rtime at the rec_count does not equal ctime we look for the closest record with the -! timestamp less than ctime - do k=file%rec_count, 1, -1 - file%error_status(c)=nf_get_vara_double(file%ncid, file%time_varid, k, 1, rtime, 1); - if (ctime > rtime) then - file%rec_count=k+1 - exit ! a proper rec_count detected, ready for writing restart, exit the loop - elseif (ctime == rtime) then - file%rec_count=k - exit ! a proper rec_count detected, ready for reading restart, exit the loop - end if - if (k==1) then - if (mype==0) write(*,*) 'WARNING: all dates in restart file are after the current date' - if (mype==0) write(*,*) 'reading restart will not be possible !' - if (mype==0) write(*,*) 'the model attempted to start with the time stamp = ', int(ctime) - file%error_status(c)=-310; - end if - end do - c=c+1 ! check will be made only for the last nf_get_vara_double - file%rec_count=max(file%rec_count, 1) -!___Associate physical variables____________________________________________ - do j=1, file%nvar - file%error_status(c) = nf_inq_varid(file%ncid, file%var(j)%name, file%var(j)%code); c=c+1 - end do - file%error_status(c)=nf_close(file%ncid); c=c+1 - file%error_count=c-1 - write(*,*) 'current restart counter = ', file%rec_count -end subroutine assoc_ids -! -!-------------------------------------------------------------------------------------------- -! -subroutine was_error(id) - implicit none - type(nc_file), intent(inout) :: id - integer :: k, status, ierror - - call MPI_BCast(id%error_count, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call MPI_BCast(id%error_status(1), id%error_count, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - - do k=1, id%error_count - status=id%error_status(k) - if (status .ne. nf_noerr) then - if (mype==0) write(*,*) 'error counter=', k - if (mype==0) call handle_err(status) - call par_ex - stop - end if - end do -end subroutine was_error -END MODULE io_RESTART +end module From e4f2368ed18b3214edc95d9984bc0963c29190b5 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 7 Jun 2021 15:45:54 +0200 Subject: [PATCH 258/909] remove unused derived types --- src/io_restart.F90 | 39 ++------------------------------------- 1 file changed, 2 insertions(+), 37 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 7fcd0f67e..bf2e0685a 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -11,43 +11,8 @@ MODULE io_RESTART use g_cvmix_idemix implicit none #include "netcdf.inc" -! -!-------------------------------------------------------------------------------------------- -! - type nc_dims - integer :: size - character(100) :: name - integer :: code - end type nc_dims -! -!-------------------------------------------------------------------------------------------- -! - type nc_vars - character(100) :: name - integer :: code - character(500) :: longname - character(100) :: units - integer :: ndim - integer :: dims(2) !<=2; assume there are no variables with dimension more than 2xNLxT - real(kind=WP), pointer :: pt1(:), pt2(:,:) - end type nc_vars -! -!-------------------------------------------------------------------------------------------- -! - type nc_file - character(500) :: filename - type(nc_dims), allocatable, dimension(:) :: dim - type(nc_vars), allocatable, dimension(:) :: var - integer :: ndim=0, nvar=0 - integer :: rec_dimid, time_varid, iter_varid - integer :: ncid - integer :: rec_count=0 - integer :: error_status(250), error_count - logical :: is_in_use=.false. - end type nc_file - - - type(nc_file), save :: ocean_file, ice_file + + integer, save :: globalstep=0 real(kind=WP) :: ctime !current time in seconds from the beginning of the year From 15f3c03e82e648857b9b690d1b65c5bf123d84a2 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 7 Jun 2021 15:51:32 +0200 Subject: [PATCH 259/909] remove redundant implicit none --- src/io_restart.F90 | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index bf2e0685a..290ec497c 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -13,7 +13,7 @@ MODULE io_RESTART #include "netcdf.inc" - integer, save :: globalstep=0 + integer, save :: globalstep=0 ! todo: remove this from module scope as it will mess things up if we use async read/write from the same process real(kind=WP) :: ctime !current time in seconds from the beginning of the year PRIVATE @@ -31,8 +31,6 @@ MODULE io_RESTART ! ini_ocean_io initializes ocean_file datatype which contains information of all variables need to be written into ! the ocean restart file. This is the only place need to be modified if a new variable is added! subroutine ini_ocean_io(year, mesh) - implicit none - integer, intent(in) :: year integer :: ncid, j integer :: varid @@ -103,8 +101,6 @@ end subroutine ini_ocean_io ! ini_ice_io initializes ice_file datatype which contains information of all variables need to be written into ! the ice restart file. This is the only place need to be modified if a new variable is added! subroutine ini_ice_io(year, mesh) - implicit none - integer, intent(in) :: year integer :: ncid, j integer :: varid @@ -140,7 +136,6 @@ end subroutine ini_ice_io !-------------------------------------------------------------------------------------------- ! subroutine restart(istep, l_write, l_read, mesh) - implicit none ! this is the main restart subroutine ! if l_write is TRUE writing restart file will be forced ! if l_read is TRUE the restart file will be read From 46be82eee73f31af8fad6162ca372f73ed0aa7ba Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 7 Jun 2021 15:55:58 +0200 Subject: [PATCH 260/909] remove unused variables --- src/io_restart.F90 | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 290ec497c..cdf424ca8 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -32,10 +32,8 @@ MODULE io_RESTART ! the ocean restart file. This is the only place need to be modified if a new variable is added! subroutine ini_ocean_io(year, mesh) integer, intent(in) :: year - integer :: ncid, j - integer :: varid + integer :: j character(500) :: longname - character(500) :: filename character(500) :: trname, units character(4) :: cyear type(t_mesh), intent(in) , target :: mesh @@ -102,11 +100,6 @@ end subroutine ini_ocean_io ! the ice restart file. This is the only place need to be modified if a new variable is added! subroutine ini_ice_io(year, mesh) integer, intent(in) :: year - integer :: ncid, j - integer :: varid - character(500) :: longname - character(500) :: filename - character(500) :: trname, units character(4) :: cyear type(t_mesh), intent(in) , target :: mesh logical, save :: has_been_called = .false. @@ -143,7 +136,6 @@ subroutine restart(istep, l_write, l_read, mesh) integer :: istep logical :: l_write, l_read logical :: is_restart - integer :: mpierr type(t_mesh), intent(in) , target :: mesh ctime=timeold+(dayold-1.)*86400 From 22c2b8200e7f5a7bf827a07b4030a9308297f7fc Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 7 Jun 2021 15:57:15 +0200 Subject: [PATCH 261/909] remove netcdf dependency --- src/io_restart.F90 | 1 - 1 file changed, 1 deletion(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index cdf424ca8..c11093d99 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -10,7 +10,6 @@ MODULE io_RESTART use g_cvmix_tke use g_cvmix_idemix implicit none -#include "netcdf.inc" integer, save :: globalstep=0 ! todo: remove this from module scope as it will mess things up if we use async read/write from the same process From add74af23be411fd71c9a4e5def27848cbfe6b25 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 7 Jun 2021 16:16:41 +0200 Subject: [PATCH 262/909] remove now unnecessary module dependencies --- src/io_restart.F90 | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index c11093d99..af134e2fc 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -1,22 +1,15 @@ MODULE io_RESTART use restart_file_group_module - use g_config use g_clock - use g_parsup - use g_comm_auto - use mod_mesh use o_arrays use i_arrays use g_cvmix_tke - use g_cvmix_idemix implicit none - + public :: restart + private integer, save :: globalstep=0 ! todo: remove this from module scope as it will mess things up if we use async read/write from the same process real(kind=WP) :: ctime !current time in seconds from the beginning of the year - - PRIVATE - PUBLIC :: restart type(restart_file_group), save :: oce_files type(restart_file_group), save :: ice_files From a283e714ddee0111fcae88176b95ea18d1e39fc4 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 8 Jun 2021 11:48:57 +0200 Subject: [PATCH 263/909] remove nonconforming tab characters --- src/io_restart.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index af134e2fc..280305357 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -181,8 +181,8 @@ subroutine restart(istep, l_write, l_read, mesh) ! actualize clock file to latest restart point if (mype==0) then - write(*,*) ' --> actualize clock file to latest restart point' - call clock_finish + write(*,*) ' --> actualize clock file to latest restart point' + call clock_finish end if end subroutine restart From 402c930cd7dcdee7e99bf2a2359201dc6ba862f6 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 9 Jun 2021 17:27:15 +0200 Subject: [PATCH 264/909] continue with the computation while writing restarts --- src/fvom_main.F90 | 1 + src/io_restart.F90 | 36 +++++++++++++++++++++++++++++------- 2 files changed, 30 insertions(+), 7 deletions(-) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index 45d250b72..3e14bd57a 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -239,6 +239,7 @@ program main end do call finalize_output() + call finalize_restart() !___FINISH MODEL RUN________________________________________________________ diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 280305357..c1c76ea72 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -5,7 +5,7 @@ MODULE io_RESTART use i_arrays use g_cvmix_tke implicit none - public :: restart + public :: restart, finalize_restart private integer, save :: globalstep=0 ! todo: remove this from module scope as it will mess things up if we use async read/write from the same process @@ -200,14 +200,18 @@ subroutine write_restart(path, filegroup, istep) cstep = globalstep+istep do i=1, filegroup%nfiles + call filegroup%files(i)%join() ! join the previous write (if required) + if(filegroup%files(i)%is_iorank()) then + if(filegroup%files(i)%is_attached()) call filegroup%files(i)%close_file() ! close the file from previous write + dirpath = path(1:len(path)-3) ! chop of the ".nc" suffix if(filegroup%files(i)%path .ne. dirpath//"/"//filegroup%files(i)%varname//".nc") then call execute_command_line("mkdir -p "//dirpath) filegroup%files(i)%path = dirpath//"/"//filegroup%files(i)%varname//".nc" call filegroup%files(i)%open_write_create(filegroup%files(i)%path) else - call filegroup%files(i)%open_write_append(filegroup%files(i)%path) + call filegroup%files(i)%open_write_append(filegroup%files(i)%path) ! todo: keep the file open between writes end if write(*,*) 'writing restart record ', filegroup%files(i)%rec_count()+1, ' to ', filegroup%files(i)%path @@ -220,13 +224,31 @@ subroutine write_restart(path, filegroup, istep) call filegroup%files(i)%async_gather_and_write_variables() end do - do i=1, filegroup%nfiles - call filegroup%files(i)%join() - - if(filegroup%files(i)%is_iorank()) then - call filegroup%files(i)%close_file() +end subroutine + + +! join remaining threads and close all open files +subroutine finalize_restart() + integer i + + ! join all previous writes + ! close all restart files + + do i=1, oce_files%nfiles + call oce_files%files(i)%join() + if(oce_files%files(i)%is_iorank()) then + if(oce_files%files(i)%is_attached()) call oce_files%files(i)%close_file() end if end do + + if(use_ice) then + do i=1, ice_files%nfiles + call ice_files%files(i)%join() + if(ice_files%files(i)%is_iorank()) then + if(ice_files%files(i)%is_attached()) call ice_files%files(i)%close_file() + end if + end do + end if end subroutine From 181fe5a41c44bdada5eb6f2162b84e7f36d78324 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 18 Jun 2021 15:34:53 +0200 Subject: [PATCH 265/909] fix build error on macOS --- src/CMakeLists.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 28811319d..68a7993d5 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,5 +1,4 @@ cmake_minimum_required(VERSION 3.4) -set(CMAKE_OSX_DEPLOYMENT_TARGET "10.9") project(fesom C Fortran) From 563c6fb2fb76d7c152895d4663da068a73ae680f Mon Sep 17 00:00:00 2001 From: Paul Gierz Date: Thu, 24 Jun 2021 16:00:22 +0200 Subject: [PATCH 266/909] Update gen_modules_rotate_grid.F90 Better helpful comments, plus a typo --- src/gen_modules_rotate_grid.F90 | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/gen_modules_rotate_grid.F90 b/src/gen_modules_rotate_grid.F90 index 1634ee48b..797a8a74d 100755 --- a/src/gen_modules_rotate_grid.F90 +++ b/src/gen_modules_rotate_grid.F90 @@ -1,10 +1,13 @@ ! Routines needed to support displaced poles: ! The new pole position is set with -! alphaEuler, betaEuler and gammaEuler. The degfault values +! alphaEuler, betaEuler and gammaEuler. The default values ! alphaEuler=50. [degree] Euler angles, convention: ! betaEuler=15. [degree] first around z, then around new x, ! gammaEuler=-90. [degree] then around new z. ! +! A helpful animation may be found online here: +! https://en.wikipedia.org/wiki/Euler_angles +! ! The first two define the new pole position ! as phi_p=alphaEuler-90, theta_p=90-betaEuler. ! The third, gammaEuler, is in reality irrelevant and just From cb2add54a86f9478d702d9e287a70f57b88defa0 Mon Sep 17 00:00:00 2001 From: JanStreffing Date: Mon, 28 Jun 2021 09:54:44 +0200 Subject: [PATCH 267/909] fixes to coupling --- src/ice_oce_coupling.F90 | 22 ++++++++++------ src/ice_thermo_cpl.F90 | 57 +++++++++++++++++++++++++--------------- src/io_meandata.F90 | 22 +++++++++++----- 3 files changed, 65 insertions(+), 36 deletions(-) diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index 748552beb..d871096f2 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -277,11 +277,10 @@ subroutine oce_fluxes(mesh) ! enforce the total freshwater/salt flux be zero ! 1. water flux ! if (.not. use_virt_salt) can be used! ! we conserve only the fluxes from the database plus evaporation. - flux = evaporation-ice_sublimation & ! the ice2atmos subplimation does not contribute to the freshwater flux into the ocean - +prec_rain & - +prec_snow*(1.0_WP-a_ice_old) & - +runoff - + !flux = evaporation-ice_sublimation & ! the ice2atmos subplimation does not contribute to the freshwater flux into the ocean + ! +prec_rain & + ! +prec_snow*(1.0_WP-a_ice_old) & + ! +runoff ! --> In case of zlevel and zstar and levitating sea ice, sea ice is just sitting ! on top of the ocean without displacement of water, there the thermodynamic ! growth rates of sea ice have to be taken into account to preserve the fresh water @@ -292,7 +291,7 @@ subroutine oce_fluxes(mesh) ! salinity flux !!PS if ( .not. use_floatice .and. .not. use_virt_salt) then if (.not. use_virt_salt) then - flux = flux-thdgr*rhoice*inv_rhowat-thdgrsn*rhosno*inv_rhowat + flux = water_flux+thdgr*rhoice*inv_rhowat+thdgrsn*rhosno*inv_rhowat end if ! Also balance freshwater flux that come from ocean-cavity boundary @@ -305,11 +304,18 @@ subroutine oce_fluxes(mesh) end if end if - call integrate_nod(flux, net, mesh) + ! call integrate_nod(flux, net, mesh) ! here the + sign must be used because we switched up the sign of the ! water_flux with water_flux = -fresh_wa_flux, but evap, prec_... and runoff still ! have there original sign - water_flux=water_flux+net/ocean_area + + ! water_flux=water_flux+net/ocean_area + if (.not. use_virt_salt) then + ! lets just impose the total flux (water_flux) conservation here + ! using just flux will not conserve due to cutoffs in the sea ice module + call integrate_nod(water_flux, net, mesh) + water_flux=water_flux-net/ocean_area + end if !___________________________________________________________________________ if (use_sw_pene) call cal_shortwave_rad(mesh) diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index 5aeedd104..c3f6e7234 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -30,7 +30,7 @@ subroutine thermodynamics(mesh) !---- variables from ice_modules.F90 use i_dyn_parms, only: Cd_oce_ice use i_therm_parms, only: rhowat, rhoice, rhosno, cc, cl, con, consn, Sice -#ifdef oifs +#if defined (__oifs) use i_array, only: a_ice, m_ice, m_snow, u_ice, v_ice, u_w, v_w & , fresh_wa_flux, net_heat_flux, oce_heat_flux, ice_heat_flux, enthalpyoffuse, S_oc_array, T_oc_array #else @@ -42,7 +42,7 @@ subroutine thermodynamics(mesh) use g_config, only: dt !---- variables from gen_modules_forcing.F90 -#ifdef oifs +#if defined (__oifs) use g_forcing_arrays, only: shortwave, evap_no_ifrac, sublimation & , prec_rain, prec_snow, runoff, evaporation, thdgr, thdgrsn, flice & , enthalpyoffuse @@ -86,7 +86,7 @@ subroutine thermodynamics(mesh) !---- geographical coordinates real(kind=WP) :: geolon, geolat !---- minimum and maximum of the lead closing parameter - real(kind=WP) :: h0min = 0.50, h0max = 1.5 + real(kind=WP) :: h0min = 0.5, h0max = 1.5 type(t_mesh), intent(in) , target :: mesh real(kind=WP), parameter :: Aimin = 0.001, himin = 0.005 @@ -142,10 +142,6 @@ subroutine thermodynamics(mesh) h0min = 0.3 h0max = 0.3 endif -#endif /* (__oifs) */ - - call ice_growth -#if defined (__oifs) !---- For AWI-CM3 we calculate ice surface temp and albedo in fesom, ! then send those to OpenIFS where they are used to calucate the ! energy fluxes ---! @@ -157,10 +153,10 @@ subroutine thermodynamics(mesh) ! Freezing temp of saltwater in K ice_temp(inod) = -0.0575_WP*S_oc_array(inod) + 1.7105e-3_WP*sqrt(S_oc_array(inod)**3) -2.155e-4_WP*(S_oc_array(inod)**2)+273.15_WP endif - call ice_albedo(h,hsn,t,alb) + call ice_albedo(h,hsn,t,alb,geolat) ice_alb(inod) = alb #endif - + call ice_growth a_ice(inod) = A m_ice(inod) = h @@ -184,9 +180,9 @@ subroutine thermodynamics(mesh) !=================================================================== subroutine ice_growth - + implicit none - + !---- thermodynamic production rates (pos.: growth; neg.: melting) real(kind=WP) :: dsnow, dslat, dhice, dhiow, dcice, dciow @@ -309,8 +305,19 @@ subroutine ice_growth !---- snow melt rate over sea ice (dsnow <= 0) !---- if there is atmospheric melting over sea ice, first melt any !---- snow that is present, but do not melt more snow than available +#if defined (__oifs) + !---- new condition added - surface temperature must be + !---- larger than 273K to melt snow + if (t.gt.273_WP) then + dsnow = A*min(Qatmice-Qicecon,0._WP) + dsnow = max(dsnow*rhoice/rhosno,-hsn) + else + dsnow = 0.0_WP + endif +#else dsnow = A*min(Qatmice-Qicecon,0._WP) dsnow = max(dsnow*rhoice/rhosno,-hsn) +#endif !---- update snow thickness after atmospheric snow melt hsn = hsn + dsnow @@ -473,7 +480,7 @@ subroutine ice_surftemp(h,hsn,a2ihf,t) real(kind=WP) zcpdte real(kind=WP) zcprosn !---- local parameters - real(kind=WP), parameter :: dice = 0.05_WP ! ECHAM6's thickness for top ice "layer" + real(kind=WP), parameter :: dice = 0.10_WP ! Thickness for top ice "layer" !---- freezing temperature of sea-water [K] real(kind=WP) :: TFrezs @@ -488,26 +495,34 @@ subroutine ice_surftemp(h,hsn,a2ihf,t) zcprosn=rhosno*cpsno/dt ! Specific Energy required to change temperature of 1m snow on ice [J/(sm³K)] zcpdte=zcpdt+zcprosn*hsn ! Combined Energy required to change temperature of snow + 0.05m of upper ice t=(zcpdte*t+a2ihf+zicefl)/(zcpdte+con/zsniced) ! New sea ice surf temp [K] - t=min(273.15,t) ! Not warmer than freezing please! + t=min(273.15_WP,t) end subroutine ice_surftemp - subroutine ice_albedo(h,hsn,t,alb) + subroutine ice_albedo(h,hsn,t,alb,geolat) ! INPUT: - ! hsn - snow thickness, used for albedo parameterization [m] - ! t - temperature of snow/ice surface [C] + ! h - ice thickness [m] + ! hsn - snow thickness [m] + ! t - temperature of snow/ice surface [C] + ! geolat - lattitude ! ! OUTPUT: - ! alb - snow albedo + ! alb - selected broadband albedo use i_therm_param implicit none - real(kind=WP) h - real(kind=WP) hsn - real(kind=WP) t - real(kind=WP) alb + real(kind=WP) :: h + real(kind=WP) :: hsn + real(kind=WP) :: t + real(kind=WP) :: alb + real(kind=WP) :: geolat ! set albedo ! ice and snow, freezing and melting conditions are distinguished + if (geolat.gt.0.) then !SH does not have melt ponds + albsnm = 0.79_WP + else + albsnm = 0.7_WP + endif if (h>0.0_WP) then if (t<273.15_WP) then ! freezing condition if (hsn.gt.0.0_WP) then ! snow cover present diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index b378a27c3..f0aabb41e 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -103,7 +103,7 @@ subroutine ini_mean_io(mesh) integer :: i, j integer, save :: nm_io_unit = 103 ! unit to open namelist file, skip 100-102 for cray integer :: iost - integer,dimension(12) :: sel_forcvar=0 + integer,dimension(15) :: sel_forcvar=0 character(len=10) :: id_string type(t_mesh), intent(in) , target :: mesh @@ -167,11 +167,15 @@ subroutine ini_mean_io(mesh) end if CASE ('thdgr ') if (use_ice) then - call def_stream(nod2D, myDim_nod2D, 'thdgr', 'growth rate ice', 'm/s', thdgr(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'thdgr', 'thermodynamic growth rate ice', 'm/s', thdgr(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) end if CASE ('thdgrsn ') if (use_ice) then - call def_stream(nod2D, myDim_nod2D, 'thdgrsn', 'growth rate ice', 'm/s', thdgrsn(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'thdgrsn', 'thermodynamic growth rate snow', 'm/s', thdgrsn(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + end if +CASE ('flice ') + if (use_ice) then + call def_stream(nod2D, myDim_nod2D, 'flice', 'flooding growth rate ice', 'm/s', flice(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) end if CASE ('m_snow ') if (use_ice) then @@ -262,8 +266,8 @@ subroutine ini_mean_io(mesh) CASE ('curl_surf ') if (lcurt_stress_surf) then call def_stream(nod2D, myDim_nod2D, 'curl_surf', 'vorticity of the surface stress','none', curl_stress_surf(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) - end if + end if !___________________________________________________________________________________________________________________________________ ! output Ferrari/GM parameterisation 2D CASE ('fer_C ') @@ -510,9 +514,10 @@ subroutine ini_mean_io(mesh) if (sel_forcvar(10)==0) call def_stream(nod2D , myDim_nod2D , 'runoff', 'river runoff' , 'none' , runoff(:) , 1, 'm', i_real4, mesh) if (sel_forcvar(11)==0) call def_stream(elem2D, myDim_elem2D, 'tx_sur', 'zonal wind str. to ocean' , 'm/s^2', stress_surf(1, :), 1, 'm', i_real4, mesh) if (sel_forcvar(12)==0) call def_stream(elem2D, myDim_elem2D, 'ty_sur', 'meridional wind str. to ocean' , 'm/s^2', stress_surf(2, :), 1, 'm', i_real4, mesh) - call def_stream(nod2D , myDim_nod2D , 'cd','wind drag coef. ' , '', cd_atm_oce_arr(:), 1, 'm', i_real4, mesh) - call def_stream(nod2D , myDim_nod2D , 'ch','transfer coeff. sensible heat', '', ch_atm_oce_arr(:), 1, 'm', i_real4, mesh) - call def_stream(nod2D , myDim_nod2D , 'ce','transfer coeff. evaporation ' , '', ce_atm_oce_arr(:), 1, 'm', i_real4, mesh) + call def_stream(nod2D , myDim_nod2D , 'cd', 'wind drag coef. ' , '', cd_atm_oce_arr(:), 1, 'm', i_real4, mesh) + call def_stream(nod2D , myDim_nod2D , 'ch', 'transfer coeff. sensible heat', '', ch_atm_oce_arr(:), 1, 'm', i_real4, mesh) + call def_stream(nod2D , myDim_nod2D , 'ce', 'transfer coeff. evaporation ' , '', ce_atm_oce_arr(:), 1, 'm', i_real4, mesh) + call def_stream(nod2D, myDim_nod2D, 'subli', 'sublimation', 'm/s', sublimation(:), 1, 'm', i_real4, mesh) end if @@ -611,6 +616,9 @@ subroutine create_new_file(entry, mesh) call assert_nf( nf_def_var(entry%ncid, trim(entry%name), entry%data_strategy%netcdf_type(), entry%ndim+1, & (/entry%dimid(1:entry%ndim), entry%recID/), entry%varID), __LINE__) + !if (entry%ndim==2) then + ! call assert_nf( nf_def_var_chunking(entry%ncid, entry%varID, NF_CHUNKED, (/1, entry%glsize(1)/)), __LINE__); + !end if call assert_nf( nf_put_att_text(entry%ncid, entry%varID, 'description', len_trim(entry%description), entry%description), __LINE__) call assert_nf( nf_put_att_text(entry%ncid, entry%varID, 'long_name', len_trim(entry%description), entry%description), __LINE__) call assert_nf( nf_put_att_text(entry%ncid, entry%varID, 'units', len_trim(entry%units), entry%units), __LINE__) From 0911d1cacdd4f681ef9b313042979175ec042882 Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 28 Jun 2021 11:47:43 +0200 Subject: [PATCH 268/909] addd comments --- src/ice_oce_coupling.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index 7b9555d02..5ff9c3f57 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -309,10 +309,10 @@ subroutine oce_fluxes(mesh) ! Also balance freshwater flux that come from ocean-cavity boundary if (use_cavity) then - if (.not. use_virt_salt) then + if (.not. use_virt_salt) then !zstar, zlevel ! only for full-free surface approach otherwise total ocean volume will drift where (ulevels_nod2d > 1) flux = -water_flux - else + else ! linfs where (ulevels_nod2d > 1) flux = 0.0_WP end if end if From 6fe46834bb7402c18c436bd12ade3d75d5363415 Mon Sep 17 00:00:00 2001 From: patrickscholz Date: Mon, 28 Jun 2021 14:07:11 +0200 Subject: [PATCH 269/909] Fix problem with FESOM2.0 reproducability bias on ollie - FESOM2.0 simulations on AWI ollie HPC were not always reproducible. Runs with identical namelists, init-conditions ... could lead to sligthly different results. - Natalja solved that issue by changing the compiler flag -fast-transcendentals with -fimf-use-svml - with this option loops with exp, sin, cos ... (IEEE function) will be always computed through the vector implementation otherwise the compiler is not fixed how IEEE funtions should be treated, which can be either Vector, Scalar or something else. All three can give slightly different results --- src/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 68a7993d5..a04db6736 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -67,8 +67,8 @@ if(${VERBOSE}) endif() # CMAKE_Fortran_COMPILER_ID will also work if a wrapper is being used (e.g. mpif90 wraps ifort -> compiler id is Intel) if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Intel ) - target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fast-transcendentals -xHost -ip -init=zero -no-wrap-margin) -# target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fast-transcendentals -xHost -ip -g -traceback -check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) + target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -init=zero -no-wrap-margin) +# target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -g -traceback -check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU ) target_compile_options(${PROJECT_NAME} PRIVATE -O3 -finit-local-zero -finline-functions -march=native -fimplicit-none -fdefault-real-8 -ffree-line-length-none) if(CMAKE_Fortran_COMPILER_VERSION VERSION_GREATER_EQUAL 10 ) From 4dbe135f88332db83ca78b1ffba71850b2d9cc32 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 28 Jun 2021 15:44:09 +0200 Subject: [PATCH 270/909] - do not create uninitialized copies of variables for threads via OpenMP - explicitly disable OpenMP compiling for the Cray ftn compiler --- src/CMakeLists.txt | 2 +- src/gen_modules_partitioning.F90 | 6 ------ 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 28811319d..eec063e37 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -76,7 +76,7 @@ elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU ) target_compile_options(${PROJECT_NAME} PRIVATE -fallow-argument-mismatch) # gfortran v10 is strict about erroneous API calls: "Rank mismatch between actual argument at (1) and actual argument at (2) (scalar and rank-1)" endif() elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray ) - target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64) + target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64 -hnoomp) endif() target_include_directories(${PROJECT_NAME} PRIVATE ${NETCDF_Fortran_INCLUDE_DIRECTORIES} ${OASIS_Fortran_INCLUDE_DIRECTORIES}) target_include_directories(${PROJECT_NAME} PRIVATE ${MCT_Fortran_INCLUDE_DIRECTORIES} ${MPEU_Fortran_INCLUDE_DIRECTORIES}) diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index a914a9d51..770229964 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -72,12 +72,6 @@ module g_PARSUP integer, allocatable :: remPtr_elem2D(:), remList_elem2D(:) logical :: elem_full_flag -!$OMP threadprivate(com_nod2D,com_elem2D,com_elem2D_full) -!$OMP threadprivate(mype) -!$OMP threadprivate(myDim_nod2D, eDim_nod2D, myList_nod2D) -!$OMP threadprivate(myDim_elem2D, eDim_elem2D, eXDim_elem2D, myList_elem2D) -!$OMP threadprivate(myDim_edge2D, eDim_edge2D, myList_edge2D) - contains subroutine par_init ! initializes MPI From b71850b0cf2eddb3bf52e5231080276cc525f4ec Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 28 Jun 2021 15:45:57 +0200 Subject: [PATCH 271/909] fix build error on macOS --- src/CMakeLists.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index eec063e37..cc99e3450 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,5 +1,4 @@ cmake_minimum_required(VERSION 3.4) -set(CMAKE_OSX_DEPLOYMENT_TARGET "10.9") project(fesom C Fortran) From bd3645a3cd3be4c531794d5a803c7713f20ecead Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 28 Jun 2021 17:57:17 +0200 Subject: [PATCH 272/909] - do not create uninitialized copies of variables for threads via OpenMP - explicitly disable OpenMP compiling for the Cray ftn compiler --- src/CMakeLists.txt | 2 +- src/gen_modules_partitioning.F90 | 6 ------ 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 28811319d..eec063e37 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -76,7 +76,7 @@ elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU ) target_compile_options(${PROJECT_NAME} PRIVATE -fallow-argument-mismatch) # gfortran v10 is strict about erroneous API calls: "Rank mismatch between actual argument at (1) and actual argument at (2) (scalar and rank-1)" endif() elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray ) - target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64) + target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64 -hnoomp) endif() target_include_directories(${PROJECT_NAME} PRIVATE ${NETCDF_Fortran_INCLUDE_DIRECTORIES} ${OASIS_Fortran_INCLUDE_DIRECTORIES}) target_include_directories(${PROJECT_NAME} PRIVATE ${MCT_Fortran_INCLUDE_DIRECTORIES} ${MPEU_Fortran_INCLUDE_DIRECTORIES}) diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index a914a9d51..770229964 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -72,12 +72,6 @@ module g_PARSUP integer, allocatable :: remPtr_elem2D(:), remList_elem2D(:) logical :: elem_full_flag -!$OMP threadprivate(com_nod2D,com_elem2D,com_elem2D_full) -!$OMP threadprivate(mype) -!$OMP threadprivate(myDim_nod2D, eDim_nod2D, myList_nod2D) -!$OMP threadprivate(myDim_elem2D, eDim_elem2D, eXDim_elem2D, myList_elem2D) -!$OMP threadprivate(myDim_edge2D, eDim_edge2D, myList_edge2D) - contains subroutine par_init ! initializes MPI From 6ab6f510e5ac81f48dd18c63d6a090a489dfba75 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 29 Jun 2021 12:05:30 +0200 Subject: [PATCH 273/909] only use sublimation variable if it is actually declared --- src/io_meandata.F90 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index f0aabb41e..84f4211d4 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -517,7 +517,9 @@ subroutine ini_mean_io(mesh) call def_stream(nod2D , myDim_nod2D , 'cd', 'wind drag coef. ' , '', cd_atm_oce_arr(:), 1, 'm', i_real4, mesh) call def_stream(nod2D , myDim_nod2D , 'ch', 'transfer coeff. sensible heat', '', ch_atm_oce_arr(:), 1, 'm', i_real4, mesh) call def_stream(nod2D , myDim_nod2D , 'ce', 'transfer coeff. evaporation ' , '', ce_atm_oce_arr(:), 1, 'm', i_real4, mesh) +#if defined (__oasis) call def_stream(nod2D, myDim_nod2D, 'subli', 'sublimation', 'm/s', sublimation(:), 1, 'm', i_real4, mesh) +#endif end if From db3d62737b0149337accf4978b2c29ebe5b0dd26 Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 29 Jun 2021 14:54:18 +0200 Subject: [PATCH 274/909] sort structure of ../src/gen_forcing_couple.F90 a bit --- src/gen_forcing_couple.F90 | 211 +++++++++++++++++++------------------ 1 file changed, 106 insertions(+), 105 deletions(-) diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index 2a39b34b7..19240fc31 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -141,97 +141,97 @@ subroutine update_atm_forcing(istep, mesh) call cpl_oasis3mct_recv (i,exchange,action) !if (.not. action) cycle !Do not apply a correction at first time step! - if (i==1 .and. action .and. istep/=1) call net_rec_from_atm(action) - if (i.eq.1) then - if (.not. action) cycle - stress_atmoce_x(:) = exchange(:) ! taux_oce - do_rotate_oce_wind=.true. - elseif (i.eq.2) then - if (.not. action) cycle - stress_atmoce_y(:) = exchange(:) ! tauy_oce - do_rotate_oce_wind=.true. - elseif (i.eq.3) then - if (.not. action) cycle - stress_atmice_x(:) = exchange(:) ! taux_ice - do_rotate_ice_wind=.true. - elseif (i.eq.4) then - if (.not. action) cycle - stress_atmice_y(:) = exchange(:) ! tauy_ice - do_rotate_ice_wind=.true. - elseif (i.eq.5) then - if (action) then - prec_rain(:) = exchange(:) ! tot_prec - mask=1. - call force_flux_consv(prec_rain, mask, i, 0,action, mesh) - end if - elseif (i.eq.6) then - if (action) then - prec_snow(:) = exchange(:) ! snowfall - mask=1. - call force_flux_consv(prec_snow, mask,i,1,action, mesh) ! Northern hemisphere - call force_flux_consv(prec_snow, mask,i,2,action, mesh) ! Southern Hemisphere - end if + if (i==1 .and. action .and. istep/=1) call net_rec_from_atm(action) + if (i.eq.1) then + if (.not. action) cycle + stress_atmoce_x(:) = exchange(:) ! taux_oce + do_rotate_oce_wind=.true. + elseif (i.eq.2) then + if (.not. action) cycle + stress_atmoce_y(:) = exchange(:) ! tauy_oce + do_rotate_oce_wind=.true. + elseif (i.eq.3) then + if (.not. action) cycle + stress_atmice_x(:) = exchange(:) ! taux_ice + do_rotate_ice_wind=.true. + elseif (i.eq.4) then + if (.not. action) cycle + stress_atmice_y(:) = exchange(:) ! tauy_ice + do_rotate_ice_wind=.true. + elseif (i.eq.5) then + if (action) then + prec_rain(:) = exchange(:) ! tot_prec + mask=1. + call force_flux_consv(prec_rain, mask, i, 0,action, mesh) + end if + elseif (i.eq.6) then + if (action) then + prec_snow(:) = exchange(:) ! snowfall + mask=1. + call force_flux_consv(prec_snow, mask,i,1,action, mesh) ! Northern hemisphere + call force_flux_consv(prec_snow, mask,i,2,action, mesh) ! Southern Hemisphere + end if elseif (i.eq.7) then - if (action) then - evap_no_ifrac(:) = exchange(:) ! tot_evap - tmp_evap_no_ifrac(:) = exchange(:) ! to reset for flux - ! correction - end if - mask=1.-a_ice - evap_no_ifrac(:) = tmp_evap_no_ifrac(:) - call force_flux_consv(evap_no_ifrac,mask,i,0,action, mesh) - elseif (i.eq.8) then - if (action) then - sublimation(:) = exchange(:) ! tot_subl - tmp_sublimation(:) = exchange(:) ! to reset for flux - ! correction - end if - mask=a_ice - sublimation(:) = tmp_sublimation(:) - call force_flux_consv(sublimation,mask,i,1,action, mesh) ! Northern hemisphere - call force_flux_consv(sublimation,mask,i,2,action, mesh) ! Southern Hemisphere - elseif (i.eq.9) then - if (action) then - oce_heat_flux(:) = exchange(:) ! heat_oce - tmp_oce_heat_flux(:) = exchange(:) ! to reset for flux - ! correction - end if - mask=1.-a_ice - oce_heat_flux(:) = tmp_oce_heat_flux(:) - call force_flux_consv(oce_heat_flux, mask, i, 0,action, mesh) - elseif (i.eq.10) then - if (action) then - ice_heat_flux(:) = exchange(:) ! heat_ice - tmp_ice_heat_flux(:) = exchange(:) ! to reset for flux - ! correction - end if - mask=a_ice - ice_heat_flux(:) = tmp_ice_heat_flux(:) - call force_flux_consv(ice_heat_flux, mask, i, 1,action, mesh) ! Northern hemisphere - call force_flux_consv(ice_heat_flux, mask, i, 2,action, mesh) ! Southern Hemisphere - elseif (i.eq.11) then - if (action) then - shortwave(:) = exchange(:) ! heat_swr - tmp_shortwave(:) = exchange(:) ! to reset for flux - ! correction - end if - mask=1.-a_ice - shortwave(:) = tmp_shortwave(:) - call force_flux_consv(shortwave, mask, i, 0,action, mesh) - elseif (i.eq.12) then - if (action) then - runoff(:) = exchange(:) ! AWI-CM2: runoff, AWI-CM3: runoff + excess snow on glaciers - mask=1. - call force_flux_consv(runoff, mask, i, 0,action, mesh) - end if + if (action) then + evap_no_ifrac(:) = exchange(:) ! tot_evap + tmp_evap_no_ifrac(:) = exchange(:) ! to reset for flux + ! correction + end if + mask=1.-a_ice + evap_no_ifrac(:) = tmp_evap_no_ifrac(:) + call force_flux_consv(evap_no_ifrac,mask,i,0,action, mesh) + elseif (i.eq.8) then + if (action) then + sublimation(:) = exchange(:) ! tot_subl + tmp_sublimation(:) = exchange(:) ! to reset for flux + ! correction + end if + mask=a_ice + sublimation(:) = tmp_sublimation(:) + call force_flux_consv(sublimation,mask,i,1,action, mesh) ! Northern hemisphere + call force_flux_consv(sublimation,mask,i,2,action, mesh) ! Southern Hemisphere + elseif (i.eq.9) then + if (action) then + oce_heat_flux(:) = exchange(:) ! heat_oce + tmp_oce_heat_flux(:) = exchange(:) ! to reset for flux + ! correction + end if + mask=1.-a_ice + oce_heat_flux(:) = tmp_oce_heat_flux(:) + call force_flux_consv(oce_heat_flux, mask, i, 0,action, mesh) + elseif (i.eq.10) then + if (action) then + ice_heat_flux(:) = exchange(:) ! heat_ice + tmp_ice_heat_flux(:) = exchange(:) ! to reset for flux + ! correction + end if + mask=a_ice + ice_heat_flux(:) = tmp_ice_heat_flux(:) + call force_flux_consv(ice_heat_flux, mask, i, 1,action, mesh) ! Northern hemisphere + call force_flux_consv(ice_heat_flux, mask, i, 2,action, mesh) ! Southern Hemisphere + elseif (i.eq.11) then + if (action) then + shortwave(:) = exchange(:) ! heat_swr + tmp_shortwave(:) = exchange(:) ! to reset for flux + ! correction + end if + mask=1.-a_ice + shortwave(:) = tmp_shortwave(:) + call force_flux_consv(shortwave, mask, i, 0,action, mesh) + elseif (i.eq.12) then + if (action) then + runoff(:) = exchange(:) ! AWI-CM2: runoff, AWI-CM3: runoff + excess snow on glaciers + mask=1. + call force_flux_consv(runoff, mask, i, 0,action, mesh) + end if #if defined (__oifs) - elseif (i.eq.13) then - if (action) then - enthalpyoffuse(:) = exchange(:) ! enthalpy of fusion via solid water discharge from glaciers - mask=1. - call force_flux_consv(enthalpyoffuse, mask, i, 0,action, mesh) - end if - end if + elseif (i.eq.13) then + if (action) then + enthalpyoffuse(:) = exchange(:) ! enthalpy of fusion via solid water discharge from glaciers + mask=1. + call force_flux_consv(enthalpyoffuse, mask, i, 0,action, mesh) + end if + end if #endif #ifdef VERBOSE if (mype==0) then @@ -240,14 +240,14 @@ subroutine update_atm_forcing(istep, mesh) #endif end do - if ((do_rotate_oce_wind .AND. do_rotate_ice_wind) .AND. rotated_grid) then - do n=1, myDim_nod2D+eDim_nod2D - call vector_g2r(stress_atmoce_x(n), stress_atmoce_y(n), coord_nod2D(1, n), coord_nod2D(2, n), 0) - call vector_g2r(stress_atmice_x(n), stress_atmice_y(n), coord_nod2D(1, n), coord_nod2D(2, n), 0) - end do - do_rotate_oce_wind=.false. - do_rotate_ice_wind=.false. - end if + if ((do_rotate_oce_wind .AND. do_rotate_ice_wind) .AND. rotated_grid) then + do n=1, myDim_nod2D+eDim_nod2D + call vector_g2r(stress_atmoce_x(n), stress_atmoce_y(n), coord_nod2D(1, n), coord_nod2D(2, n), 0) + call vector_g2r(stress_atmice_x(n), stress_atmice_y(n), coord_nod2D(1, n), coord_nod2D(2, n), 0) + end do + do_rotate_oce_wind=.false. + do_rotate_ice_wind=.false. + end if #else call sbc_do(mesh) u_wind = atmdata(i_xwind,:) @@ -264,14 +264,15 @@ subroutine update_atm_forcing(istep, mesh) if (use_cavity) then do i=1,myDim_nod2d+eDim_nod2d if (ulevels_nod2d(i)>1) then - u_wind(i)=0.0_WP - v_wind(i)=0.0_WP - shum(i)=0.0_WP - longwave(i)=0.0_WP - Tair(i)=0.0_WP - prec_rain(i)=0.0_WP - prec_snow(i)=0.0_WP - press_air(i)=0.0_WP + u_wind(i) = 0.0_WP + v_wind(i) = 0.0_WP + shum(i) = 0.0_WP + longwave(i) = 0.0_WP + Tair(i) = 0.0_WP + prec_rain(i)= 0.0_WP + prec_snow(i)= 0.0_WP + press_air(i)= 0.0_WP +! runoff(i) = 0.0_WP end if end do endif From 541ddc860b1ba2beb93197695eda2df5b48e1317 Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 29 Jun 2021 15:18:35 +0200 Subject: [PATCH 275/909] set also river runoff zeros where there is cavity since runoff is only added in ice_therm when there is no cavity otherwise balancing might be corrupted --- src/gen_forcing_couple.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index 19240fc31..d62207cab 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -272,7 +272,7 @@ subroutine update_atm_forcing(istep, mesh) prec_rain(i)= 0.0_WP prec_snow(i)= 0.0_WP press_air(i)= 0.0_WP -! runoff(i) = 0.0_WP + runoff(i) = 0.0_WP end if end do endif From ba219c2e7a3ada2a7de0a2510f3f8e957817ba54 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 29 Jun 2021 15:41:27 +0200 Subject: [PATCH 276/909] remove cray workaround as the cause has been fixed in bd3645a3 --- src/io_fesom_file.F90 | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index 395e120fd..9a64684e8 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -36,7 +36,6 @@ module io_fesom_file_module logical :: thread_running = .false. integer :: comm logical gather_and_write - integer :: mype_workaround contains procedure, public :: async_read_and_scatter_variables, async_gather_and_write_variables, join, init, is_iorank, rec_count, time_varindex, time_dimindex procedure, public :: close_file ! inherited procedures we overwrite @@ -158,9 +157,7 @@ subroutine init(this, mesh_nod2d, mesh_elem2d, mesh_nl) ! todo: would like to ca ! tough MPI_THREAD_FUNNELED should be enough here, at least on cray-mpich 7.5.3 async mpi calls fail if we do not have support level 'MPI_THREAD_MULTIPLE' ! on cray-mpich we only get level 'MPI_THREAD_MULTIPLE' if 'MPICH_MAX_THREAD_SAFETY=multiple' is set in the environment call MPI_Query_thread(provided_mpi_thread_support_level, err) - if(provided_mpi_thread_support_level < MPI_THREAD_MULTIPLE) call this%thread%disable_async() - - this%mype_workaround = mype ! make a copy of the mype variable as there is an error with the cray compiler or environment which voids the global mype for our threads + if(provided_mpi_thread_support_level < MPI_THREAD_MULTIPLE) call this%thread%disable_async() end subroutine @@ -314,7 +311,6 @@ subroutine async_worker(fesom_file_index) type(fesom_file_type), pointer :: f f => all_fesom_files(fesom_file_index)%ptr - mype = f%mype_workaround ! for the thread callback, copy back the value of our mype as a workaround for errors with the cray envinronment (at least with ftn 2.5.9 and cray-mpich 7.5.3) if(f%gather_and_write) then call f%gather_and_write_variables() From 835a83dc291706ed02ef82cafe467568ddbd8354 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 29 Jun 2021 15:20:41 +0200 Subject: [PATCH 277/909] - use the dedicated communicator copy in read_and_scatter_variables and gather_and_write_variables procedures - do not import the FESOM communicator at all in these procedures --- src/io_fesom_file.F90 | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index 9a64684e8..d62a0c6d1 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -162,7 +162,6 @@ subroutine init(this, mesh_nod2d, mesh_elem2d, mesh_nl) ! todo: would like to ca subroutine read_and_scatter_variables(this) - use g_PARSUP use io_scatter_module class(fesom_file_type), target :: this ! EO parameters @@ -181,7 +180,7 @@ subroutine read_and_scatter_variables(this) is_2d = (nlvl == 1) allocate(laux( size(var%external_local_data_ptr,dim=2) )) ! i.e. myDim_elem2D+eDim_elem2D or myDim_nod2D+eDim_nod2D - if(mype == this%iorank) then + if(this%is_iorank()) then ! todo: choose how many levels we read at once if(.not. allocated(var%global_level_data)) allocate(var%global_level_data( var%global_level_data_size )) else @@ -189,7 +188,7 @@ subroutine read_and_scatter_variables(this) end if do lvl=1, nlvl - if(mype == this%iorank) then + if(this%is_iorank()) then if(is_2d) then call this%read_var(var%var_index, [1,last_rec_idx], [size(var%global_level_data),1], var%global_level_data) else @@ -199,9 +198,9 @@ subroutine read_and_scatter_variables(this) end if if(var%is_elem_based) then - call scatter_elem2D(var%global_level_data, laux, this%iorank, MPI_comm_fesom) + call scatter_elem2D(var%global_level_data, laux, this%iorank, this%comm) else - call scatter_nod2D(var%global_level_data, laux, this%iorank, MPI_comm_fesom) + call scatter_nod2D(var%global_level_data, laux, this%iorank, this%comm) end if ! the data from our pointer is not contiguous (if it is 3D data), so we can not pass the pointer directly to MPI var%external_local_data_ptr(lvl,:) = laux ! todo: remove this buffer and pass the data directly to MPI (change order of data layout to be levelwise or do not gather levelwise but by columns) @@ -212,7 +211,6 @@ subroutine read_and_scatter_variables(this) subroutine gather_and_write_variables(this) - use g_PARSUP use io_gather_module class(fesom_file_type), target :: this ! EO parameters @@ -221,7 +219,7 @@ subroutine gather_and_write_variables(this) real(kind=8), allocatable :: laux(:) type(var_info), pointer :: var - if(mype == this%iorank) this%rec_cnt = this%rec_count()+1 + if(this%is_iorank()) this%rec_cnt = this%rec_count()+1 do i=1, this%nvar_infos var => this%var_infos(i) @@ -230,7 +228,7 @@ subroutine gather_and_write_variables(this) is_2d = (nlvl == 1) allocate(laux( size(var%local_data_copy,dim=2) )) ! i.e. myDim_elem2D+eDim_elem2D or myDim_nod2D+eDim_nod2D - if(mype == this%iorank) then + if(this%is_iorank()) then ! todo: choose how many levels we write at once if(.not. allocated(var%global_level_data)) allocate(var%global_level_data( var%global_level_data_size )) else @@ -242,12 +240,12 @@ subroutine gather_and_write_variables(this) laux = var%local_data_copy(lvl,:) ! todo: remove this buffer and pass the data directly to MPI (change order of data layout to be levelwise or do not gather levelwise but by columns) if(var%is_elem_based) then - call gather_elem2D(laux, var%global_level_data, this%iorank, 42, MPI_comm_fesom) + call gather_elem2D(laux, var%global_level_data, this%iorank, 42, this%comm) else - call gather_nod2D (laux, var%global_level_data, this%iorank, 42, MPI_comm_fesom) + call gather_nod2D (laux, var%global_level_data, this%iorank, 42, this%comm) end if - if(mype == this%iorank) then + if(this%is_iorank()) then if(is_2d) then call this%write_var(var%var_index, [1,this%rec_cnt], [size(var%global_level_data),1], var%global_level_data) else @@ -259,7 +257,7 @@ subroutine gather_and_write_variables(this) deallocate(laux) end do - if(mype == this%iorank) call this%flush_file() ! flush the file to disk after each write + if(this%is_iorank()) call this%flush_file() ! flush the file to disk after each write end subroutine From 607e19ae0177e1089976a921afe7c1ba6cbe76a9 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 29 Jun 2021 15:50:08 +0200 Subject: [PATCH 278/909] remove unused module use statements --- src/io_fesom_file.F90 | 8 -------- 1 file changed, 8 deletions(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index d62a0c6d1..615f23b98 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -71,7 +71,6 @@ function is_iorank(this) result(x) ! return the number of timesteps of the file if a file is attached or return the default value of -1 function rec_count(this) result(x) - use g_PARSUP class(fesom_file_type), intent(inout) :: this integer x ! EO parameters @@ -88,7 +87,6 @@ function rec_count(this) result(x) function time_varindex(this) result(x) - use g_PARSUP class(fesom_file_type), intent(in) :: this integer x x = this%time_varidx @@ -96,7 +94,6 @@ function time_varindex(this) result(x) function time_dimindex(this) result(x) - use g_PARSUP class(fesom_file_type), intent(in) :: this integer x x = this%time_dimidx @@ -303,7 +300,6 @@ subroutine async_gather_and_write_variables(this) subroutine async_worker(fesom_file_index) - use g_PARSUP integer, intent(in) :: fesom_file_index ! EO parameters type(fesom_file_type), pointer :: f @@ -323,7 +319,6 @@ subroutine async_worker(fesom_file_index) ! we have to assign the corresponding dimindx somewhere else, which would be error prone subroutine specify_node_var_2d(this, name, longname, units, local_data) use, intrinsic :: ISO_C_BINDING - use g_PARSUP class(fesom_file_type), intent(inout) :: this character(len=*), intent(in) :: name character(len=*), intent(in) :: units, longname @@ -341,7 +336,6 @@ subroutine specify_node_var_2d(this, name, longname, units, local_data) subroutine specify_node_var_3d(this, name, longname, units, local_data) use, intrinsic :: ISO_C_BINDING - use g_PARSUP class(fesom_file_type), intent(inout) :: this character(len=*), intent(in) :: name character(len=*), intent(in) :: units, longname @@ -358,7 +352,6 @@ subroutine specify_node_var_3d(this, name, longname, units, local_data) subroutine specify_elem_var_2d(this, name, longname, units, local_data) use, intrinsic :: ISO_C_BINDING - use g_PARSUP class(fesom_file_type), intent(inout) :: this character(len=*), intent(in) :: name character(len=*), intent(in) :: units, longname @@ -376,7 +369,6 @@ subroutine specify_elem_var_2d(this, name, longname, units, local_data) subroutine specify_elem_var_3d(this, name, longname, units, local_data) use, intrinsic :: ISO_C_BINDING - use g_PARSUP class(fesom_file_type), intent(inout) :: this character(len=*), intent(in) :: name character(len=*), intent(in) :: units, longname From b70ba4f5d973c0dbe40954360efe4f3cda0f7a7f Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 30 Jun 2021 12:08:46 +0200 Subject: [PATCH 279/909] add some comments --- src/oce_ale.F90 | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 447929d75..d231cbf61 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2719,20 +2719,27 @@ subroutine oce_timestep_ale(n, mesh) call compute_hbar_ale(mesh) !___________________________________________________________________________ - ! Current dynamic elevation alpha*hbar(n+1/2)+(1-alpha)*hbar(n-1/2) - ! equation (14) Danlov et.al "the finite volume sea ice ocean model FESOM2 - ! ...if we do it here we don't need to write hbar_old into a restart file... + ! - Current dynamic elevation alpha*hbar(n+1/2)+(1-alpha)*hbar(n-1/2) + ! equation (14) Danlov et.al "the finite volume sea ice ocean model FESOM2 + ! ...if we do it here we don't need to write hbar_old into a restart file... + ! - where(ulevels_nod2D==1) is used here because of the rigid lid + ! approximation under the cavity + ! - at points in the cavity the time derivative term in ssh matrix will be + ! omitted; and (14) will not be applied at cavity points. Additionally, + ! since there is no real elevation, but only surface pressure, there is + ! no layer motion under the cavity. In this case the ice sheet acts as a + ! rigid lid. where(ulevels_nod2D==1) eta_n=alpha*hbar+(1.0_WP-alpha)*hbar_old - ! --> eta_(n) ! call zero_dynamics !DS, zeros several dynamical variables; to be used for testing new implementations! t5=MPI_Wtime() + !___________________________________________________________________________ + ! Do horizontal and vertical scaling of GM/Redi diffusivity if (Fer_GM .or. Redi) then call init_Redi_GM(mesh) end if - !___________________________________________________________________________ ! Implementation of Gent & McWiliams parameterization after R. Ferrari et al., 2010 ! does not belong directly to ALE formalism if (Fer_GM) then From a814b895223885b42fca50f5baec4272d39597d8 Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 30 Jun 2021 12:16:52 +0200 Subject: [PATCH 280/909] in case of cavity do freshwater balancing only for the open ocean part since under the cavity there is rigid lid approximation --- src/ice_oce_coupling.F90 | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index 5ff9c3f57..f0efc5843 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -316,14 +316,24 @@ subroutine oce_fluxes(mesh) where (ulevels_nod2d > 1) flux = 0.0_WP end if end if - + + ! compute total global net freshwater flux into the ocean call integrate_nod(flux, net, mesh) + + !___________________________________________________________________________ ! here the + sign must be used because we switched up the sign of the ! water_flux with water_flux = -fresh_wa_flux, but evap, prec_... and runoff still ! have there original sign ! if use_cavity=.false. --> ocean_area == ocean_areawithcav !! water_flux=water_flux+net/ocean_area - water_flux=water_flux+net/ocean_areawithcav + if (use_cavity) then + ! due to rigid lid approximation under the cavity we to not add freshwater + ! under the cavity for the freshwater balancing we do this only for the open + ! ocean + where (ulevels_nod2d == 1) water_flux=water_flux+net/ocean_area + else + water_flux=water_flux+net/ocean_area + end if !___________________________________________________________________________ if (use_sw_pene) call cal_shortwave_rad(mesh) From eb1809886d884db80519519288d57ebe18f685bd Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 30 Jun 2021 13:56:42 +0200 Subject: [PATCH 281/909] compute integrate parameter also under the cavity --- src/write_step_info.F90 | 1008 +++++++++++++++++++-------------------- 1 file changed, 504 insertions(+), 504 deletions(-) diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index 9ca952efe..9e0ad0eab 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -1,504 +1,504 @@ -module write_step_info_interface - interface - subroutine write_step_info(istep,outfreq, mesh) - use MOD_MESH - integer :: istep,outfreq - type(t_mesh), intent(in) , target :: mesh - end subroutine - end interface -end module - -! -! -!=============================================================================== -subroutine write_step_info(istep,outfreq, mesh) - use g_config, only: dt, use_ice - use MOD_MESH - use o_PARAM - use g_PARSUP - use o_ARRAYS - use i_ARRAYS - use g_comm_auto - implicit none - - integer :: n, istep,outfreq - real(kind=WP) :: int_eta, int_hbar, int_wflux, int_hflux, int_temp, int_salt - real(kind=WP) :: min_eta, min_hbar, min_wflux, min_hflux, min_temp, min_salt, & - min_wvel,min_hnode,min_deta,min_wvel2,min_hnode2, & - min_vvel, min_vvel2, min_uvel, min_uvel2 - real(kind=WP) :: max_eta, max_hbar, max_wflux, max_hflux, max_temp, max_salt, & - max_wvel, max_hnode, max_deta, max_wvel2, max_hnode2, max_m_ice, & - max_vvel, max_vvel2, max_uvel, max_uvel2, & - max_cfl_z, max_pgfx, max_pgfy, max_kv, max_av - real(kind=WP) :: int_deta , int_dhbar - real(kind=WP) :: loc, loc_eta, loc_hbar, loc_deta, loc_dhbar, loc_wflux,loc_hflux, loc_temp, loc_salt - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" - if (mod(istep,outfreq)==0) then - - !_______________________________________________________________________ - int_eta =0. - int_hbar =0. - int_deta =0. - int_dhbar =0. - int_wflux =0. - int_hflux =0. - int_temp =0. - int_salt =0. - loc_eta =0. - loc_hbar =0. - loc_deta =0. - loc_dhbar =0. - loc_wflux =0. -!!PS loc_hflux =0. -!!PS loc_temp =0. -!!PS loc_salt =0. - loc =0. - !_______________________________________________________________________ - do n=1, myDim_nod2D - if (ulevels_nod2D(n)>1) cycle - loc_eta = loc_eta + area(ulevels_nod2D(n), n)*eta_n(n) - loc_hbar = loc_hbar + area(ulevels_nod2D(n), n)*hbar(n) - loc_deta = loc_deta + area(ulevels_nod2D(n), n)*d_eta(n) - loc_dhbar = loc_dhbar + area(ulevels_nod2D(n), n)*(hbar(n)-hbar_old(n)) - loc_wflux = loc_wflux + area(ulevels_nod2D(n), n)*water_flux(n) -!!PS loc_hflux = loc_hflux + area(1, n)*heat_flux(n) -!!PS loc_temp = loc_temp + area(1, n)*sum(tr_arr(:,n,1))/(nlevels_nod2D(n)-1) -!!PS loc_salt = loc_salt + area(1, n)*sum(tr_arr(:,n,2))/(nlevels_nod2D(n)-1) - end do - - !_______________________________________________________________________ - call MPI_AllREDUCE(loc_eta , int_eta , 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) - call MPI_AllREDUCE(loc_hbar , int_hbar , 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) - call MPI_AllREDUCE(loc_deta , int_deta , 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) - call MPI_AllREDUCE(loc_dhbar, int_dhbar, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) - call MPI_AllREDUCE(loc_wflux, int_wflux, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) -!!PS call MPI_AllREDUCE(loc_hflux, int_hflux, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) -!!PS call MPI_AllREDUCE(loc_temp , int_temp , 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) -!!PS call MPI_AllREDUCE(loc_salt , int_salt , 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) - - int_eta = int_eta /ocean_area - int_hbar = int_hbar /ocean_area - int_deta = int_deta /ocean_area - int_dhbar= int_dhbar/ocean_area - int_wflux= int_wflux/ocean_area - -!!PS int_eta = int_eta /ocean_areawithcav -!!PS int_hbar = int_hbar /ocean_areawithcav -!!PS int_deta = int_deta /ocean_areawithcav -!!PS int_dhbar= int_dhbar/ocean_areawithcav -!!PS int_wflux= int_wflux/ocean_areawithcav - -!!PS int_hflux= int_hflux/ocean_area -!!PS int_temp = int_temp /ocean_area -!!PS int_salt = int_salt /ocean_area - - !_______________________________________________________________________ - loc = minval(eta_n(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_eta , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(hbar(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_hbar , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(water_flux(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_wflux, 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(heat_flux(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_hflux, 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(tr_arr(:,1:myDim_nod2D,1),MASK=(tr_arr(:,1:myDim_nod2D,2)/=0.0)) - call MPI_AllREDUCE(loc , min_temp , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(tr_arr(:,1:myDim_nod2D,2),MASK=(tr_arr(:,1:myDim_nod2D,2)/=0.0)) - call MPI_AllREDUCE(loc , min_salt , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(Wvel(1,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_wvel , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(Wvel(2,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_wvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(Unode(1,1,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_uvel , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(Unode(1,2,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_uvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(Unode(2,1,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_vvel , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(Unode(2,2,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_vvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(d_eta(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_deta , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(hnode(1,1:myDim_nod2D),MASK=(hnode(1,1:myDim_nod2D)/=0.0)) - call MPI_AllREDUCE(loc , min_hnode , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(hnode(2,1:myDim_nod2D),MASK=(hnode(2,1:myDim_nod2D)/=0.0)) - call MPI_AllREDUCE(loc , min_hnode2 , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - - !_______________________________________________________________________ - loc = maxval(eta_n(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_eta , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(hbar(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_hbar , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(water_flux(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_wflux, 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(heat_flux(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_hflux, 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(tr_arr(:,1:myDim_nod2D,1),MASK=(tr_arr(:,1:myDim_nod2D,2)/=0.0)) - call MPI_AllREDUCE(loc , max_temp , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(tr_arr(:,1:myDim_nod2D,2),MASK=(tr_arr(:,1:myDim_nod2D,2)/=0.0)) - call MPI_AllREDUCE(loc , max_salt , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(Wvel(1,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_wvel , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(Wvel(2,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_wvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(Unode(1,1,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_uvel , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(Unode(1,2,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_uvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(Unode(2,1,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_vvel , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(Unode(2,2,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_vvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(d_eta(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_deta , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(hnode(1,1:myDim_nod2D),MASK=(hnode(1,1:myDim_nod2D)/=0.0)) - call MPI_AllREDUCE(loc , max_hnode , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(hnode(2,1:myDim_nod2D),MASK=(hnode(2,1:myDim_nod2D)/=0.0)) - call MPI_AllREDUCE(loc , max_hnode2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(CFL_z(:,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_cfl_z , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(abs(pgf_x(:,1:myDim_nod2D))) - call MPI_AllREDUCE(loc , max_pgfx , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(abs(pgf_y(:,1:myDim_nod2D))) - call MPI_AllREDUCE(loc , max_pgfy , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - if (use_ice) then - loc = maxval(m_ice(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_m_ice , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - end if - loc = maxval(abs(Av(:,1:myDim_nod2D))) - call MPI_AllREDUCE(loc , max_av , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(abs(Kv(:,1:myDim_nod2D))) - call MPI_AllREDUCE(loc , max_kv , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - !_______________________________________________________________________ - if (mype==0) then - write(*,*) '___CHECK GLOBAL OCEAN VARIABLES --> mstep=',mstep - write(*,*) ' ___global estimat of eta & hbar____________________' - write(*,*) ' int(eta), int(hbar) =', int_eta, int_hbar - write(*,*) ' --> error(eta-hbar) =', int_eta-int_hbar - write(*,*) ' min(eta) , max(eta) =', min_eta, max_eta - write(*,*) ' max(hbar), max(hbar) =', min_hbar, max_hbar - write(*,*) - write(*,*) ' int(deta), int(dhbar) =', int_deta, int_dhbar - write(*,*) ' --> error(deta-dhbar) =', int_deta-int_dhbar - write(*,*) ' --> error(deta-wflux) =', int_deta-int_wflux - write(*,*) ' --> error(dhbar-wflux) =', int_dhbar-int_wflux - write(*,*) - write(*,*) ' -int(wflux)*dt =', int_wflux*dt*(-1.0) - write(*,*) ' int(deta )-int(wflux)*dt =', int_deta-int_wflux*dt*(-1.0) - write(*,*) ' int(dhbar)-int(wflux)*dt =', int_dhbar-int_wflux*dt*(-1.0) - write(*,*) - write(*,*) ' ___global min/max/mean --> mstep=',mstep,'____________' - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' eta= ', min_eta ,' | ',max_eta ,' | ','N.A.' - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' deta= ', min_deta ,' | ',max_deta ,' | ','N.A.' - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' hbar= ', min_hbar ,' | ',max_hbar ,' | ','N.A.' - write(*,"(A, ES10.3, A, ES10.3, A, ES10.3)") ' wflux= ', min_wflux,' | ',max_wflux,' | ',int_wflux - write(*,"(A, ES10.3, A, ES10.3, A, ES10.3)") ' hflux= ', min_hflux,' | ',max_hflux,' | ',int_hflux - write(*,"(A, ES10.3, A, ES10.3, A, ES10.3)") ' temp= ', min_temp ,' | ',max_temp ,' | ',int_temp - write(*,"(A, ES10.3, A, ES10.3, A, ES10.3)") ' salt= ', min_salt ,' | ',max_salt ,' | ',int_salt - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' wvel(1,:)= ', min_wvel ,' | ',max_wvel ,' | ','N.A.' - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' wvel(2,:)= ', min_wvel2,' | ',max_wvel2,' | ','N.A.' - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' uvel(1,:)= ', min_uvel ,' | ',max_uvel ,' | ','N.A.' - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' uvel(2,:)= ', min_uvel2,' | ',max_uvel2,' | ','N.A.' - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' vvel(1,:)= ', min_vvel ,' | ',max_vvel ,' | ','N.A.' - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' vvel(2,:)= ', min_vvel2,' | ',max_vvel2,' | ','N.A.' - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' hnode(1,:)= ', min_hnode,' | ',max_hnode,' | ','N.A.' - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' hnode(2,:)= ', min_hnode2,' | ',max_hnode2,' | ','N.A.' - write(*,"(A, A , A, ES10.3, A, A )") ' cfl_z= ',' N.A. ',' | ',max_cfl_z ,' | ','N.A.' - write(*,"(A, A , A, ES10.3, A, A )") ' pgf_x= ',' N.A. ',' | ',max_pgfx ,' | ','N.A.' - write(*,"(A, A , A, ES10.3, A, A )") ' pgf_y= ',' N.A. ',' | ',max_pgfy ,' | ','N.A.' - write(*,"(A, A , A, ES10.3, A, A )") ' Av= ',' N.A. ',' | ',max_av ,' | ','N.A.' - write(*,"(A, A , A, ES10.3, A, A )") ' Kv= ',' N.A. ',' | ',max_kv ,' | ','N.A.' - if (use_ice) write(*,"(A, A , A, ES10.3, A, A )") ' m_ice= ',' N.A. ',' | ',max_m_ice ,' | ','N.A.' - write(*,*) - endif - endif ! --> if (mod(istep,logfile_outfreq)==0) then -end subroutine write_step_info -! -! -!=============================================================================== -subroutine check_blowup(istep, mesh) - use g_config, only: logfile_outfreq, which_ALE - use MOD_MESH - use o_PARAM - use g_PARSUP - use o_ARRAYS - use i_ARRAYS - use g_comm_auto - use io_BLOWUP - use g_forcing_arrays - use diagnostics - use write_step_info_interface - implicit none - - integer :: n, nz, istep, found_blowup_loc=0, found_blowup=0 - integer :: el, elidx - type(t_mesh), intent(in), target :: mesh -#include "associate_mesh.h" - !___________________________________________________________________________ -! ! if (mod(istep,logfile_outfreq)==0) then -! ! if (mype==0) then -! ! write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep -! ! write(*,*) -! ! endif - do n=1, myDim_nod2d - - !___________________________________________________________________ - ! check ssh - if ( ((eta_n(n) /= eta_n(n)) .or. & - eta_n(n)<-50.0 .or. eta_n(n)>50.0 .or. & - (d_eta(n) /= d_eta(n)) ) ) then -!!PS eta_n(n)<-10.0 .or. eta_n(n)>10.0)) then - found_blowup_loc=1 - write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep - write(*,*) ' --STOP--> found eta_n become NaN or <-10.0, >10.0' - write(*,*) 'mype = ',mype - write(*,*) 'mstep = ',istep - write(*,*) 'node = ',n - write(*,*) 'uln, nln = ',ulevels_nod2D(n), nlevels_nod2D(n) - write(*,*) 'glon,glat = ',geo_coord_nod2D(:,n)/rad - write(*,*) - write(*,*) 'eta_n(n) = ',eta_n(n) - write(*,*) 'd_eta(n) = ',d_eta(n) - write(*,*) - write(*,*) 'zbar_3d_n = ',zbar_3d_n(:,n) - write(*,*) 'Z_3d_n = ',Z_3d_n(:,n) - write(*,*) - write(*,*) 'ssh_rhs = ',ssh_rhs(n),', ssh_rhs_old = ',ssh_rhs_old(n) - write(*,*) - write(*,*) 'hbar = ',hbar(n),', hbar_old = ',hbar_old(n) - write(*,*) - write(*,*) 'wflux = ',water_flux(n) - write(*,*) - write(*,*) 'u_wind = ',u_wind(n),', v_wind = ',v_wind(n) - write(*,*) - do nz=1,nod_in_elem2D_num(n) - write(*,*) 'stress_surf(1:2,',nz,') = ',stress_surf(:,nod_in_elem2D(nz,n)) - end do - write(*,*) - write(*,*) 'm_ice = ',m_ice(n),', m_ice_old = ',m_ice_old(n) - write(*,*) 'a_ice = ',a_ice(n),', a_ice_old = ',a_ice_old(n) -!!PS write(*,*) 'thdgr = ',thdgr(n),', thdgr_old = ',thdgr_old(n) -!!PS write(*,*) 'thdgrsn = ',thdgrsn(n) - write(*,*) -!!PS if (lcurt_stress_surf) then -!!PS write(*,*) 'curl_stress_surf = ',curl_stress_surf(n) -!!PS write(*,*) -!!PS endif -!!PS do el=1,nod_in_elem2d_num(n) -!!PS elidx = nod_in_elem2D(el,n) -!!PS write(*,*) ' elem#=',el,', elemidx=',elidx -!!PS write(*,*) ' pgf_x =',pgf_x(:,elidx) -!!PS write(*,*) ' pgf_y =',pgf_y(:,elidx) -!!PS ! write(*,*) ' U =',UV(1,:,elidx) -!!PS ! write(*,*) ' V =',UV(2,:,elidx) -!!PS write(*,*) -!!PS enddo -!!PS write(*,*) 'Wvel(1, n) = ',Wvel(,n) - write(*,*) 'Wvel(:, n) = ',Wvel(ulevels_nod2D(n):nlevels_nod2D(n),n) - write(*,*) - write(*,*) 'CFL_z(:,n) = ',CFL_z(ulevels_nod2D(n):nlevels_nod2D(n),n) - write(*,*) -!!PS write(*,*) 'hnode(1, n) = ',hnode(1,n) - write(*,*) 'hnode(:, n) = ',hnode(ulevels_nod2D(n):nlevels_nod2D(n),n) - write(*,*) - - endif - - !___________________________________________________________________ - ! check surface vertical velocity --> in case of zlevel and zstar - ! vertical coordinate its indicator if Volume is conserved for - ! Wvel(1,n)~maschine preccision -!!PS if ( .not. trim(which_ALE)=='linfs' .and. ( Wvel(1, n) /= Wvel(1, n) .or. abs(Wvel(1,n))>1e-12 )) then - if ( .not. trim(which_ALE)=='linfs' .and. ( Wvel(1, n) /= Wvel(1, n) )) then - found_blowup_loc=1 - write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep - write(*,*) ' --STOP--> found surface layer vertical velocity becomes NaN or >1e-12' - write(*,*) 'mype = ',mype - write(*,*) 'mstep = ',istep - write(*,*) 'node = ',n - write(*,*) 'uln, nln = ',ulevels_nod2D(n), nlevels_nod2D(n) - write(*,*) 'glon,glat = ',geo_coord_nod2D(:,n)/rad - write(*,*) - write(*,*) 'Wvel(1, n) = ',Wvel(1,n) - write(*,*) 'Wvel(:, n) = ',Wvel(:,n) - write(*,*) - write(*,*) 'hnode(1, n) = ',hnode(1,n) - write(*,*) 'hnode(:, n) = ',hnode(:,n) - write(*,*) 'hflux = ',heat_flux(n) - write(*,*) 'wflux = ',water_flux(n) - write(*,*) - write(*,*) 'eta_n = ',eta_n(n) - write(*,*) 'd_eta(n) = ',d_eta(n) - write(*,*) 'hbar = ',hbar(n) - write(*,*) 'hbar_old = ',hbar_old(n) - write(*,*) 'ssh_rhs = ',ssh_rhs(n) - write(*,*) 'ssh_rhs_old = ',ssh_rhs_old(n) - write(*,*) - write(*,*) 'CFL_z(:,n) = ',CFL_z(:,n) - write(*,*) - - end if ! --> if ( .not. trim(which_ALE)=='linfs' .and. ... - - !___________________________________________________________________ - ! check surface layer thinknesss - if ( .not. trim(which_ALE)=='linfs' .and. ( hnode(1, n) /= hnode(1, n) .or. hnode(1,n)< 0 )) then - found_blowup_loc=1 - write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep - write(*,*) ' --STOP--> found surface layer thickness becomes NaN or <0' - write(*,*) 'mype = ',mype - write(*,*) 'mstep = ',istep - write(*,*) 'node = ',n - write(*,*) - write(*,*) 'hnode(1, n) = ',hnode(1,n) - write(*,*) 'hnode(:, n) = ',hnode(:,n) - write(*,*) - write(*,*) 'glon,glat = ',geo_coord_nod2D(:,n)/rad - write(*,*) - end if ! --> if ( .not. trim(which_ALE)=='linfs' .and. ... - - - do nz=1,nlevels_nod2D(n)-1 - !_______________________________________________________________ - ! check temp - if ( (tr_arr(nz, n,1) /= tr_arr(nz, n,1)) .or. & - tr_arr(nz, n,1) < -5.0 .or. tr_arr(nz, n,1)>60) then - found_blowup_loc=1 - write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep - write(*,*) ' --STOP--> found temperture becomes NaN or <-5.0, >60' - write(*,*) 'mype = ',mype - write(*,*) 'mstep = ',istep - write(*,*) 'node = ',n - write(*,*) 'lon,lat = ',geo_coord_nod2D(:,n)/rad - write(*,*) 'nz = ',nz - write(*,*) 'nzmin, nzmax= ',ulevels_nod2D(n),nlevels_nod2D(n) - write(*,*) 'x=', geo_coord_nod2D(1,n)/rad, ' ; ', 'y=', geo_coord_nod2D(2,n)/rad - write(*,*) 'z=', Z_n(nz) - write(*,*) 'temp(nz, n) = ',tr_arr(nz, n,1) - write(*,*) 'temp(: , n) = ',tr_arr(:, n,1) - write(*,*) 'temp_old(nz,n)= ',tr_arr_old(nz, n,1) - write(*,*) 'temp_old(: ,n)= ',tr_arr_old(:, n,1) - write(*,*) - write(*,*) 'hflux = ',heat_flux(n) - write(*,*) 'wflux = ',water_flux(n) - write(*,*) - write(*,*) 'eta_n = ',eta_n(n) - write(*,*) 'd_eta(n) = ',d_eta(n) - write(*,*) 'hbar = ',hbar(n) - write(*,*) 'hbar_old = ',hbar_old(n) - write(*,*) 'ssh_rhs = ',ssh_rhs(n) - write(*,*) 'ssh_rhs_old = ',ssh_rhs_old(n) - write(*,*) - write(*,*) 'm_ice = ',m_ice(n) - write(*,*) 'm_ice_old = ',m_ice_old(n) - write(*,*) 'm_snow = ',m_snow(n) - write(*,*) 'm_snow_old = ',m_snow_old(n) - write(*,*) - write(*,*) 'hnode = ',hnode(:,n) - write(*,*) 'hnode_new = ',hnode_new(:,n) - write(*,*) - write(*,*) 'Kv = ',Kv(:,n) - write(*,*) - write(*,*) 'W = ',Wvel(:,n) - write(*,*) - write(*,*) 'CFL_z(:,n) = ',CFL_z(:,n) - write(*,*) -! do el=1,nod_in_elem2d_num(n) -! elidx = nod_in_elem2D(el,n) -! write(*,*) ' elem#=',el,', elemidx=',elidx -! write(*,*) ' helem =',helem(:,elidx) -! write(*,*) ' U =',UV(1,:,elidx) -! write(*,*) ' V =',UV(2,:,elidx) -! enddo - write(*,*) - - endif ! --> if ( (tr_arr(nz, n,1) /= tr_arr(nz, n,1)) .or. & ... - - !_______________________________________________________________ - ! check salt - if ( (tr_arr(nz, n,2) /= tr_arr(nz, n,2)) .or. & - tr_arr(nz, n,2) < 0 .or. tr_arr(nz, n,2)>50 ) then - found_blowup_loc=1 - write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep - write(*,*) ' --STOP--> found salinity becomes NaN or <0, >50' - write(*,*) 'mype = ',mype - write(*,*) 'mstep = ',istep - write(*,*) 'node = ',n - write(*,*) 'nz = ',nz - write(*,*) 'nzmin, nzmax= ',ulevels_nod2D(n),nlevels_nod2D(n) - write(*,*) 'x=', geo_coord_nod2D(1,n)/rad, ' ; ', 'y=', geo_coord_nod2D(2,n)/rad - write(*,*) 'z=', Z_n(nz) - write(*,*) 'salt(nz, n) = ',tr_arr(nz, n,2) - write(*,*) 'salt(: , n) = ',tr_arr(:, n,2) - write(*,*) - write(*,*) 'temp(nz, n) = ',tr_arr(nz, n,1) - write(*,*) 'temp(: , n) = ',tr_arr(:, n,1) - write(*,*) - write(*,*) 'hflux = ',heat_flux(n) - write(*,*) - write(*,*) 'wflux = ',water_flux(n) - write(*,*) 'eta_n = ',eta_n(n) - write(*,*) 'd_eta(n) = ',d_eta(n) - write(*,*) 'hbar = ',hbar(n) - write(*,*) 'hbar_old = ',hbar_old(n) - write(*,*) 'ssh_rhs = ',ssh_rhs(n) - write(*,*) 'ssh_rhs_old = ',ssh_rhs_old(n) - write(*,*) - write(*,*) 'hnode = ',hnode(:,n) - write(*,*) 'hnode_new = ',hnode_new(:,n) - write(*,*) - write(*,*) 'zbar_3d_n = ',zbar_3d_n(:,n) - write(*,*) 'Z_3d_n = ',Z_3d_n(:,n) - write(*,*) - write(*,*) 'Kv = ',Kv(:,n) - write(*,*) - do el=1,nod_in_elem2d_num(n) - elidx = nod_in_elem2D(el,n) - write(*,*) ' elem#=',el,', elemidx=',elidx - write(*,*) ' Av =',Av(:,elidx) -! write(*,*) ' helem =',helem(:,elidx) -! write(*,*) ' U =',UV(1,:,elidx) -! write(*,*) ' V =',UV(2,:,elidx) - enddo - write(*,*) 'Wvel = ',Wvel(:,n) - write(*,*) - write(*,*) 'CFL_z(:,n) = ',CFL_z(:,n) - write(*,*) - write(*,*) 'glon,glat = ',geo_coord_nod2D(:,n)/rad - write(*,*) - endif ! --> if ( (tr_arr(nz, n,2) /= tr_arr(nz, n,2)) .or. & ... - end do ! --> do nz=1,nlevels_nod2D(n)-1 - end do ! --> do n=1, myDim_nod2d -! ! end if - - !_______________________________________________________________________ - ! check globally if one of the cpus hat a blowup situation. if its the - ! case CPU mype==0 needs to write out the stuff. Write out occurs in - ! moment only over CPU mype==0 - call MPI_AllREDUCE(found_blowup_loc , found_blowup , 1, MPI_INTEGER, MPI_MAX, MPI_COMM_FESOM, MPIerr) - if (found_blowup==1) then - call write_step_info(istep,1,mesh) - if (mype==0) then - call sleep(1) - write(*,*) - write(*,*) ' MODEL BLOW UP !!!' - write(*,*) ' ____' - write(*,*) ' __,-~~/~ `---.' - write(*,*) ' _/_,---( , )' - write(*,*) ' __ / < / ) \___' - write(*,*) '- -- ----===;;;`====------------------===;;;===---- -- -' - write(*,*) ' \/ ~"~"~"~"~"~\~"~)~"/' - write(*,*) ' (_ ( \ ( > \)' - write(*,*) ' \_( _ < >_>`' - write(*,*) ' ~ `-i` ::>|--"' - write(*,*) ' I;|.|.|' - write(*,*) ' <|i::|i|`' - write(*,*) ' (` ^`"`- ")' - write(*,*) ' _____.,-#%&$@%#&#~,._____' - write(*,*) - end if - call blowup(istep, mesh) - if (mype==0) write(*,*) ' --> finished writing blow up file' - call par_ex - endif -end subroutine +module write_step_info_interface + interface + subroutine write_step_info(istep,outfreq, mesh) + use MOD_MESH + integer :: istep,outfreq + type(t_mesh), intent(in) , target :: mesh + end subroutine + end interface +end module + +! +! +!=============================================================================== +subroutine write_step_info(istep,outfreq, mesh) + use g_config, only: dt, use_ice + use MOD_MESH + use o_PARAM + use g_PARSUP + use o_ARRAYS + use i_ARRAYS + use g_comm_auto + implicit none + + integer :: n, istep,outfreq + real(kind=WP) :: int_eta, int_hbar, int_wflux, int_hflux, int_temp, int_salt + real(kind=WP) :: min_eta, min_hbar, min_wflux, min_hflux, min_temp, min_salt, & + min_wvel,min_hnode,min_deta,min_wvel2,min_hnode2, & + min_vvel, min_vvel2, min_uvel, min_uvel2 + real(kind=WP) :: max_eta, max_hbar, max_wflux, max_hflux, max_temp, max_salt, & + max_wvel, max_hnode, max_deta, max_wvel2, max_hnode2, max_m_ice, & + max_vvel, max_vvel2, max_uvel, max_uvel2, & + max_cfl_z, max_pgfx, max_pgfy, max_kv, max_av + real(kind=WP) :: int_deta , int_dhbar + real(kind=WP) :: loc, loc_eta, loc_hbar, loc_deta, loc_dhbar, loc_wflux,loc_hflux, loc_temp, loc_salt + type(t_mesh), intent(in) , target :: mesh +#include "associate_mesh.h" + if (mod(istep,outfreq)==0) then + + !_______________________________________________________________________ + int_eta =0. + int_hbar =0. + int_deta =0. + int_dhbar =0. + int_wflux =0. + int_hflux =0. + int_temp =0. + int_salt =0. + loc_eta =0. + loc_hbar =0. + loc_deta =0. + loc_dhbar =0. + loc_wflux =0. +!!PS loc_hflux =0. +!!PS loc_temp =0. +!!PS loc_salt =0. + loc =0. + !_______________________________________________________________________ + do n=1, myDim_nod2D +!!PS if (ulevels_nod2D(n)>1) cycle + loc_eta = loc_eta + areasvol(ulevels_nod2D(n), n)*eta_n(n) + loc_hbar = loc_hbar + areasvol(ulevels_nod2D(n), n)*hbar(n) + loc_deta = loc_deta + areasvol(ulevels_nod2D(n), n)*d_eta(n) + loc_dhbar = loc_dhbar + areasvol(ulevels_nod2D(n), n)*(hbar(n)-hbar_old(n)) + loc_wflux = loc_wflux + areasvol(ulevels_nod2D(n), n)*water_flux(n) +!!PS loc_hflux = loc_hflux + area(1, n)*heat_flux(n) +!!PS loc_temp = loc_temp + area(1, n)*sum(tr_arr(:,n,1))/(nlevels_nod2D(n)-1) +!!PS loc_salt = loc_salt + area(1, n)*sum(tr_arr(:,n,2))/(nlevels_nod2D(n)-1) + end do + + !_______________________________________________________________________ + call MPI_AllREDUCE(loc_eta , int_eta , 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) + call MPI_AllREDUCE(loc_hbar , int_hbar , 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) + call MPI_AllREDUCE(loc_deta , int_deta , 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) + call MPI_AllREDUCE(loc_dhbar, int_dhbar, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) + call MPI_AllREDUCE(loc_wflux, int_wflux, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) +!!PS call MPI_AllREDUCE(loc_hflux, int_hflux, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) +!!PS call MPI_AllREDUCE(loc_temp , int_temp , 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) +!!PS call MPI_AllREDUCE(loc_salt , int_salt , 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) +! +!!PS int_eta = int_eta /ocean_area +!!PS int_hbar = int_hbar /ocean_area +!!PS int_deta = int_deta /ocean_area +!!PS int_dhbar= int_dhbar/ocean_area +!!PS int_wflux= int_wflux/ocean_area + + int_eta = int_eta /ocean_areawithcav + int_hbar = int_hbar /ocean_areawithcav + int_deta = int_deta /ocean_areawithcav + int_dhbar= int_dhbar/ocean_areawithcav + int_wflux= int_wflux/ocean_areawithcav + +!!PS int_hflux= int_hflux/ocean_area +!!PS int_temp = int_temp /ocean_area +!!PS int_salt = int_salt /ocean_area + + !_______________________________________________________________________ + loc = minval(eta_n(1:myDim_nod2D)) + call MPI_AllREDUCE(loc , min_eta , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc = minval(hbar(1:myDim_nod2D)) + call MPI_AllREDUCE(loc , min_hbar , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc = minval(water_flux(1:myDim_nod2D)) + call MPI_AllREDUCE(loc , min_wflux, 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc = minval(heat_flux(1:myDim_nod2D)) + call MPI_AllREDUCE(loc , min_hflux, 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc = minval(tr_arr(:,1:myDim_nod2D,1),MASK=(tr_arr(:,1:myDim_nod2D,2)/=0.0)) + call MPI_AllREDUCE(loc , min_temp , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc = minval(tr_arr(:,1:myDim_nod2D,2),MASK=(tr_arr(:,1:myDim_nod2D,2)/=0.0)) + call MPI_AllREDUCE(loc , min_salt , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc = minval(Wvel(1,1:myDim_nod2D)) + call MPI_AllREDUCE(loc , min_wvel , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc = minval(Wvel(2,1:myDim_nod2D)) + call MPI_AllREDUCE(loc , min_wvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc = minval(Unode(1,1,1:myDim_nod2D)) + call MPI_AllREDUCE(loc , min_uvel , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc = minval(Unode(1,2,1:myDim_nod2D)) + call MPI_AllREDUCE(loc , min_uvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc = minval(Unode(2,1,1:myDim_nod2D)) + call MPI_AllREDUCE(loc , min_vvel , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc = minval(Unode(2,2,1:myDim_nod2D)) + call MPI_AllREDUCE(loc , min_vvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc = minval(d_eta(1:myDim_nod2D)) + call MPI_AllREDUCE(loc , min_deta , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc = minval(hnode(1,1:myDim_nod2D),MASK=(hnode(1,1:myDim_nod2D)/=0.0)) + call MPI_AllREDUCE(loc , min_hnode , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc = minval(hnode(2,1:myDim_nod2D),MASK=(hnode(2,1:myDim_nod2D)/=0.0)) + call MPI_AllREDUCE(loc , min_hnode2 , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + + !_______________________________________________________________________ + loc = maxval(eta_n(1:myDim_nod2D)) + call MPI_AllREDUCE(loc , max_eta , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc = maxval(hbar(1:myDim_nod2D)) + call MPI_AllREDUCE(loc , max_hbar , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc = maxval(water_flux(1:myDim_nod2D)) + call MPI_AllREDUCE(loc , max_wflux, 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc = maxval(heat_flux(1:myDim_nod2D)) + call MPI_AllREDUCE(loc , max_hflux, 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc = maxval(tr_arr(:,1:myDim_nod2D,1),MASK=(tr_arr(:,1:myDim_nod2D,2)/=0.0)) + call MPI_AllREDUCE(loc , max_temp , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc = maxval(tr_arr(:,1:myDim_nod2D,2),MASK=(tr_arr(:,1:myDim_nod2D,2)/=0.0)) + call MPI_AllREDUCE(loc , max_salt , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc = maxval(Wvel(1,1:myDim_nod2D)) + call MPI_AllREDUCE(loc , max_wvel , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc = maxval(Wvel(2,1:myDim_nod2D)) + call MPI_AllREDUCE(loc , max_wvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc = maxval(Unode(1,1,1:myDim_nod2D)) + call MPI_AllREDUCE(loc , max_uvel , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc = maxval(Unode(1,2,1:myDim_nod2D)) + call MPI_AllREDUCE(loc , max_uvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc = maxval(Unode(2,1,1:myDim_nod2D)) + call MPI_AllREDUCE(loc , max_vvel , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc = maxval(Unode(2,2,1:myDim_nod2D)) + call MPI_AllREDUCE(loc , max_vvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc = maxval(d_eta(1:myDim_nod2D)) + call MPI_AllREDUCE(loc , max_deta , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc = maxval(hnode(1,1:myDim_nod2D),MASK=(hnode(1,1:myDim_nod2D)/=0.0)) + call MPI_AllREDUCE(loc , max_hnode , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc = maxval(hnode(2,1:myDim_nod2D),MASK=(hnode(2,1:myDim_nod2D)/=0.0)) + call MPI_AllREDUCE(loc , max_hnode2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc = maxval(CFL_z(:,1:myDim_nod2D)) + call MPI_AllREDUCE(loc , max_cfl_z , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc = maxval(abs(pgf_x(:,1:myDim_nod2D))) + call MPI_AllREDUCE(loc , max_pgfx , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc = maxval(abs(pgf_y(:,1:myDim_nod2D))) + call MPI_AllREDUCE(loc , max_pgfy , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + if (use_ice) then + loc = maxval(m_ice(1:myDim_nod2D)) + call MPI_AllREDUCE(loc , max_m_ice , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + end if + loc = maxval(abs(Av(:,1:myDim_nod2D))) + call MPI_AllREDUCE(loc , max_av , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc = maxval(abs(Kv(:,1:myDim_nod2D))) + call MPI_AllREDUCE(loc , max_kv , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + !_______________________________________________________________________ + if (mype==0) then + write(*,*) '___CHECK GLOBAL OCEAN VARIABLES --> mstep=',mstep + write(*,*) ' ___global estimat of eta & hbar____________________' + write(*,*) ' int(eta), int(hbar) =', int_eta, int_hbar + write(*,*) ' --> error(eta-hbar) =', int_eta-int_hbar + write(*,*) ' min(eta) , max(eta) =', min_eta, max_eta + write(*,*) ' max(hbar), max(hbar) =', min_hbar, max_hbar + write(*,*) + write(*,*) ' int(deta), int(dhbar) =', int_deta, int_dhbar + write(*,*) ' --> error(deta-dhbar) =', int_deta-int_dhbar + write(*,*) ' --> error(deta-wflux) =', int_deta-int_wflux + write(*,*) ' --> error(dhbar-wflux) =', int_dhbar-int_wflux + write(*,*) + write(*,*) ' -int(wflux)*dt =', int_wflux*dt*(-1.0) + write(*,*) ' int(deta )-int(wflux)*dt =', int_deta-int_wflux*dt*(-1.0) + write(*,*) ' int(dhbar)-int(wflux)*dt =', int_dhbar-int_wflux*dt*(-1.0) + write(*,*) + write(*,*) ' ___global min/max/mean --> mstep=',mstep,'____________' + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' eta= ', min_eta ,' | ',max_eta ,' | ','N.A.' + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' deta= ', min_deta ,' | ',max_deta ,' | ','N.A.' + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' hbar= ', min_hbar ,' | ',max_hbar ,' | ','N.A.' + write(*,"(A, ES10.3, A, ES10.3, A, ES10.3)") ' wflux= ', min_wflux,' | ',max_wflux,' | ',int_wflux + write(*,"(A, ES10.3, A, ES10.3, A, ES10.3)") ' hflux= ', min_hflux,' | ',max_hflux,' | ',int_hflux + write(*,"(A, ES10.3, A, ES10.3, A, ES10.3)") ' temp= ', min_temp ,' | ',max_temp ,' | ',int_temp + write(*,"(A, ES10.3, A, ES10.3, A, ES10.3)") ' salt= ', min_salt ,' | ',max_salt ,' | ',int_salt + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' wvel(1,:)= ', min_wvel ,' | ',max_wvel ,' | ','N.A.' + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' wvel(2,:)= ', min_wvel2,' | ',max_wvel2,' | ','N.A.' + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' uvel(1,:)= ', min_uvel ,' | ',max_uvel ,' | ','N.A.' + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' uvel(2,:)= ', min_uvel2,' | ',max_uvel2,' | ','N.A.' + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' vvel(1,:)= ', min_vvel ,' | ',max_vvel ,' | ','N.A.' + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' vvel(2,:)= ', min_vvel2,' | ',max_vvel2,' | ','N.A.' + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' hnode(1,:)= ', min_hnode,' | ',max_hnode,' | ','N.A.' + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' hnode(2,:)= ', min_hnode2,' | ',max_hnode2,' | ','N.A.' + write(*,"(A, A , A, ES10.3, A, A )") ' cfl_z= ',' N.A. ',' | ',max_cfl_z ,' | ','N.A.' + write(*,"(A, A , A, ES10.3, A, A )") ' pgf_x= ',' N.A. ',' | ',max_pgfx ,' | ','N.A.' + write(*,"(A, A , A, ES10.3, A, A )") ' pgf_y= ',' N.A. ',' | ',max_pgfy ,' | ','N.A.' + write(*,"(A, A , A, ES10.3, A, A )") ' Av= ',' N.A. ',' | ',max_av ,' | ','N.A.' + write(*,"(A, A , A, ES10.3, A, A )") ' Kv= ',' N.A. ',' | ',max_kv ,' | ','N.A.' + if (use_ice) write(*,"(A, A , A, ES10.3, A, A )") ' m_ice= ',' N.A. ',' | ',max_m_ice ,' | ','N.A.' + write(*,*) + endif + endif ! --> if (mod(istep,logfile_outfreq)==0) then +end subroutine write_step_info +! +! +!=============================================================================== +subroutine check_blowup(istep, mesh) + use g_config, only: logfile_outfreq, which_ALE + use MOD_MESH + use o_PARAM + use g_PARSUP + use o_ARRAYS + use i_ARRAYS + use g_comm_auto + use io_BLOWUP + use g_forcing_arrays + use diagnostics + use write_step_info_interface + implicit none + + integer :: n, nz, istep, found_blowup_loc=0, found_blowup=0 + integer :: el, elidx + type(t_mesh), intent(in), target :: mesh +#include "associate_mesh.h" + !___________________________________________________________________________ +! ! if (mod(istep,logfile_outfreq)==0) then +! ! if (mype==0) then +! ! write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep +! ! write(*,*) +! ! endif + do n=1, myDim_nod2d + + !___________________________________________________________________ + ! check ssh + if ( ((eta_n(n) /= eta_n(n)) .or. & + eta_n(n)<-50.0 .or. eta_n(n)>50.0 .or. & + (d_eta(n) /= d_eta(n)) ) ) then +!!PS eta_n(n)<-10.0 .or. eta_n(n)>10.0)) then + found_blowup_loc=1 + write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep + write(*,*) ' --STOP--> found eta_n become NaN or <-10.0, >10.0' + write(*,*) 'mype = ',mype + write(*,*) 'mstep = ',istep + write(*,*) 'node = ',n + write(*,*) 'uln, nln = ',ulevels_nod2D(n), nlevels_nod2D(n) + write(*,*) 'glon,glat = ',geo_coord_nod2D(:,n)/rad + write(*,*) + write(*,*) 'eta_n(n) = ',eta_n(n) + write(*,*) 'd_eta(n) = ',d_eta(n) + write(*,*) + write(*,*) 'zbar_3d_n = ',zbar_3d_n(:,n) + write(*,*) 'Z_3d_n = ',Z_3d_n(:,n) + write(*,*) + write(*,*) 'ssh_rhs = ',ssh_rhs(n),', ssh_rhs_old = ',ssh_rhs_old(n) + write(*,*) + write(*,*) 'hbar = ',hbar(n),', hbar_old = ',hbar_old(n) + write(*,*) + write(*,*) 'wflux = ',water_flux(n) + write(*,*) + write(*,*) 'u_wind = ',u_wind(n),', v_wind = ',v_wind(n) + write(*,*) + do nz=1,nod_in_elem2D_num(n) + write(*,*) 'stress_surf(1:2,',nz,') = ',stress_surf(:,nod_in_elem2D(nz,n)) + end do + write(*,*) + write(*,*) 'm_ice = ',m_ice(n),', m_ice_old = ',m_ice_old(n) + write(*,*) 'a_ice = ',a_ice(n),', a_ice_old = ',a_ice_old(n) +!!PS write(*,*) 'thdgr = ',thdgr(n),', thdgr_old = ',thdgr_old(n) +!!PS write(*,*) 'thdgrsn = ',thdgrsn(n) + write(*,*) +!!PS if (lcurt_stress_surf) then +!!PS write(*,*) 'curl_stress_surf = ',curl_stress_surf(n) +!!PS write(*,*) +!!PS endif +!!PS do el=1,nod_in_elem2d_num(n) +!!PS elidx = nod_in_elem2D(el,n) +!!PS write(*,*) ' elem#=',el,', elemidx=',elidx +!!PS write(*,*) ' pgf_x =',pgf_x(:,elidx) +!!PS write(*,*) ' pgf_y =',pgf_y(:,elidx) +!!PS ! write(*,*) ' U =',UV(1,:,elidx) +!!PS ! write(*,*) ' V =',UV(2,:,elidx) +!!PS write(*,*) +!!PS enddo +!!PS write(*,*) 'Wvel(1, n) = ',Wvel(,n) + write(*,*) 'Wvel(:, n) = ',Wvel(ulevels_nod2D(n):nlevels_nod2D(n),n) + write(*,*) + write(*,*) 'CFL_z(:,n) = ',CFL_z(ulevels_nod2D(n):nlevels_nod2D(n),n) + write(*,*) +!!PS write(*,*) 'hnode(1, n) = ',hnode(1,n) + write(*,*) 'hnode(:, n) = ',hnode(ulevels_nod2D(n):nlevels_nod2D(n),n) + write(*,*) + + endif + + !___________________________________________________________________ + ! check surface vertical velocity --> in case of zlevel and zstar + ! vertical coordinate its indicator if Volume is conserved for + ! Wvel(1,n)~maschine preccision +!!PS if ( .not. trim(which_ALE)=='linfs' .and. ( Wvel(1, n) /= Wvel(1, n) .or. abs(Wvel(1,n))>1e-12 )) then + if ( .not. trim(which_ALE)=='linfs' .and. ( Wvel(1, n) /= Wvel(1, n) )) then + found_blowup_loc=1 + write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep + write(*,*) ' --STOP--> found surface layer vertical velocity becomes NaN or >1e-12' + write(*,*) 'mype = ',mype + write(*,*) 'mstep = ',istep + write(*,*) 'node = ',n + write(*,*) 'uln, nln = ',ulevels_nod2D(n), nlevels_nod2D(n) + write(*,*) 'glon,glat = ',geo_coord_nod2D(:,n)/rad + write(*,*) + write(*,*) 'Wvel(1, n) = ',Wvel(1,n) + write(*,*) 'Wvel(:, n) = ',Wvel(:,n) + write(*,*) + write(*,*) 'hnode(1, n) = ',hnode(1,n) + write(*,*) 'hnode(:, n) = ',hnode(:,n) + write(*,*) 'hflux = ',heat_flux(n) + write(*,*) 'wflux = ',water_flux(n) + write(*,*) + write(*,*) 'eta_n = ',eta_n(n) + write(*,*) 'd_eta(n) = ',d_eta(n) + write(*,*) 'hbar = ',hbar(n) + write(*,*) 'hbar_old = ',hbar_old(n) + write(*,*) 'ssh_rhs = ',ssh_rhs(n) + write(*,*) 'ssh_rhs_old = ',ssh_rhs_old(n) + write(*,*) + write(*,*) 'CFL_z(:,n) = ',CFL_z(:,n) + write(*,*) + + end if ! --> if ( .not. trim(which_ALE)=='linfs' .and. ... + + !___________________________________________________________________ + ! check surface layer thinknesss + if ( .not. trim(which_ALE)=='linfs' .and. ( hnode(1, n) /= hnode(1, n) .or. hnode(1,n)< 0 )) then + found_blowup_loc=1 + write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep + write(*,*) ' --STOP--> found surface layer thickness becomes NaN or <0' + write(*,*) 'mype = ',mype + write(*,*) 'mstep = ',istep + write(*,*) 'node = ',n + write(*,*) + write(*,*) 'hnode(1, n) = ',hnode(1,n) + write(*,*) 'hnode(:, n) = ',hnode(:,n) + write(*,*) + write(*,*) 'glon,glat = ',geo_coord_nod2D(:,n)/rad + write(*,*) + end if ! --> if ( .not. trim(which_ALE)=='linfs' .and. ... + + + do nz=1,nlevels_nod2D(n)-1 + !_______________________________________________________________ + ! check temp + if ( (tr_arr(nz, n,1) /= tr_arr(nz, n,1)) .or. & + tr_arr(nz, n,1) < -5.0 .or. tr_arr(nz, n,1)>60) then + found_blowup_loc=1 + write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep + write(*,*) ' --STOP--> found temperture becomes NaN or <-5.0, >60' + write(*,*) 'mype = ',mype + write(*,*) 'mstep = ',istep + write(*,*) 'node = ',n + write(*,*) 'lon,lat = ',geo_coord_nod2D(:,n)/rad + write(*,*) 'nz = ',nz + write(*,*) 'nzmin, nzmax= ',ulevels_nod2D(n),nlevels_nod2D(n) + write(*,*) 'x=', geo_coord_nod2D(1,n)/rad, ' ; ', 'y=', geo_coord_nod2D(2,n)/rad + write(*,*) 'z=', Z_n(nz) + write(*,*) 'temp(nz, n) = ',tr_arr(nz, n,1) + write(*,*) 'temp(: , n) = ',tr_arr(:, n,1) + write(*,*) 'temp_old(nz,n)= ',tr_arr_old(nz, n,1) + write(*,*) 'temp_old(: ,n)= ',tr_arr_old(:, n,1) + write(*,*) + write(*,*) 'hflux = ',heat_flux(n) + write(*,*) 'wflux = ',water_flux(n) + write(*,*) + write(*,*) 'eta_n = ',eta_n(n) + write(*,*) 'd_eta(n) = ',d_eta(n) + write(*,*) 'hbar = ',hbar(n) + write(*,*) 'hbar_old = ',hbar_old(n) + write(*,*) 'ssh_rhs = ',ssh_rhs(n) + write(*,*) 'ssh_rhs_old = ',ssh_rhs_old(n) + write(*,*) + write(*,*) 'm_ice = ',m_ice(n) + write(*,*) 'm_ice_old = ',m_ice_old(n) + write(*,*) 'm_snow = ',m_snow(n) + write(*,*) 'm_snow_old = ',m_snow_old(n) + write(*,*) + write(*,*) 'hnode = ',hnode(:,n) + write(*,*) 'hnode_new = ',hnode_new(:,n) + write(*,*) + write(*,*) 'Kv = ',Kv(:,n) + write(*,*) + write(*,*) 'W = ',Wvel(:,n) + write(*,*) + write(*,*) 'CFL_z(:,n) = ',CFL_z(:,n) + write(*,*) +! do el=1,nod_in_elem2d_num(n) +! elidx = nod_in_elem2D(el,n) +! write(*,*) ' elem#=',el,', elemidx=',elidx +! write(*,*) ' helem =',helem(:,elidx) +! write(*,*) ' U =',UV(1,:,elidx) +! write(*,*) ' V =',UV(2,:,elidx) +! enddo + write(*,*) + + endif ! --> if ( (tr_arr(nz, n,1) /= tr_arr(nz, n,1)) .or. & ... + + !_______________________________________________________________ + ! check salt + if ( (tr_arr(nz, n,2) /= tr_arr(nz, n,2)) .or. & + tr_arr(nz, n,2) < 0 .or. tr_arr(nz, n,2)>50 ) then + found_blowup_loc=1 + write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep + write(*,*) ' --STOP--> found salinity becomes NaN or <0, >50' + write(*,*) 'mype = ',mype + write(*,*) 'mstep = ',istep + write(*,*) 'node = ',n + write(*,*) 'nz = ',nz + write(*,*) 'nzmin, nzmax= ',ulevels_nod2D(n),nlevels_nod2D(n) + write(*,*) 'x=', geo_coord_nod2D(1,n)/rad, ' ; ', 'y=', geo_coord_nod2D(2,n)/rad + write(*,*) 'z=', Z_n(nz) + write(*,*) 'salt(nz, n) = ',tr_arr(nz, n,2) + write(*,*) 'salt(: , n) = ',tr_arr(:, n,2) + write(*,*) + write(*,*) 'temp(nz, n) = ',tr_arr(nz, n,1) + write(*,*) 'temp(: , n) = ',tr_arr(:, n,1) + write(*,*) + write(*,*) 'hflux = ',heat_flux(n) + write(*,*) + write(*,*) 'wflux = ',water_flux(n) + write(*,*) 'eta_n = ',eta_n(n) + write(*,*) 'd_eta(n) = ',d_eta(n) + write(*,*) 'hbar = ',hbar(n) + write(*,*) 'hbar_old = ',hbar_old(n) + write(*,*) 'ssh_rhs = ',ssh_rhs(n) + write(*,*) 'ssh_rhs_old = ',ssh_rhs_old(n) + write(*,*) + write(*,*) 'hnode = ',hnode(:,n) + write(*,*) 'hnode_new = ',hnode_new(:,n) + write(*,*) + write(*,*) 'zbar_3d_n = ',zbar_3d_n(:,n) + write(*,*) 'Z_3d_n = ',Z_3d_n(:,n) + write(*,*) + write(*,*) 'Kv = ',Kv(:,n) + write(*,*) + do el=1,nod_in_elem2d_num(n) + elidx = nod_in_elem2D(el,n) + write(*,*) ' elem#=',el,', elemidx=',elidx + write(*,*) ' Av =',Av(:,elidx) +! write(*,*) ' helem =',helem(:,elidx) +! write(*,*) ' U =',UV(1,:,elidx) +! write(*,*) ' V =',UV(2,:,elidx) + enddo + write(*,*) 'Wvel = ',Wvel(:,n) + write(*,*) + write(*,*) 'CFL_z(:,n) = ',CFL_z(:,n) + write(*,*) + write(*,*) 'glon,glat = ',geo_coord_nod2D(:,n)/rad + write(*,*) + endif ! --> if ( (tr_arr(nz, n,2) /= tr_arr(nz, n,2)) .or. & ... + end do ! --> do nz=1,nlevels_nod2D(n)-1 + end do ! --> do n=1, myDim_nod2d +! ! end if + + !_______________________________________________________________________ + ! check globally if one of the cpus hat a blowup situation. if its the + ! case CPU mype==0 needs to write out the stuff. Write out occurs in + ! moment only over CPU mype==0 + call MPI_AllREDUCE(found_blowup_loc , found_blowup , 1, MPI_INTEGER, MPI_MAX, MPI_COMM_FESOM, MPIerr) + if (found_blowup==1) then + call write_step_info(istep,1,mesh) + if (mype==0) then + call sleep(1) + write(*,*) + write(*,*) ' MODEL BLOW UP !!!' + write(*,*) ' ____' + write(*,*) ' __,-~~/~ `---.' + write(*,*) ' _/_,---( , )' + write(*,*) ' __ / < / ) \___' + write(*,*) '- -- ----===;;;`====------------------===;;;===---- -- -' + write(*,*) ' \/ ~"~"~"~"~"~\~"~)~"/' + write(*,*) ' (_ ( \ ( > \)' + write(*,*) ' \_( _ < >_>`' + write(*,*) ' ~ `-i` ::>|--"' + write(*,*) ' I;|.|.|' + write(*,*) ' <|i::|i|`' + write(*,*) ' (` ^`"`- ")' + write(*,*) ' _____.,-#%&$@%#&#~,._____' + write(*,*) + end if + call blowup(istep, mesh) + if (mype==0) write(*,*) ' --> finished writing blow up file' + call par_ex + endif +end subroutine From cbacb09cbec77c728662fed4bace7d6f38b06eb2 Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 30 Jun 2021 16:48:26 +0200 Subject: [PATCH 282/909] update my mesh rotation in my python plotting routine --- view_pscholz/sub_fesom_mesh.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/view_pscholz/sub_fesom_mesh.py b/view_pscholz/sub_fesom_mesh.py index 2aefb37ff..f3fcc336a 100644 --- a/view_pscholz/sub_fesom_mesh.py +++ b/view_pscholz/sub_fesom_mesh.py @@ -130,7 +130,8 @@ def __init__(self,inputarray): #_______________________________________________________________________ # remove+augment periodic boundary - self.fesom_remove_pbnd() + if (inputarray['mesh_remove_cyc' ] == True): + self.fesom_remove_pbnd() # calculate fesom land mask interactivly #self.fesom_calc_landmask() @@ -285,9 +286,9 @@ def fesom_grid_rot_r2g(self,str_mode='r2g'): #_______________________________________________________________________ # make inverse of rotation matrix - if (str_mode == 'r2g') or (str_mode == 'focus'): - from numpy.linalg import inv - rotate_matrix=inv(rotate_matrix); +# if (str_mode == 'r2g') or (str_mode == 'focus'): +# from numpy.linalg import inv +# rotate_matrix=inv(rotate_matrix); #____3D_________________________________________________________________ # calculate Cartesian coordinates @@ -303,9 +304,14 @@ def fesom_grid_rot_r2g(self,str_mode='r2g'): #_______________________________________________________________________ # rotate to geographical cartesian coordinates: - xg=rotate_matrix[0,0]*xr + rotate_matrix[0,1]*yr + rotate_matrix[0,2]*zr; - yg=rotate_matrix[1,0]*xr + rotate_matrix[1,1]*yr + rotate_matrix[1,2]*zr; - zg=rotate_matrix[2,0]*xr + rotate_matrix[2,1]*yr + rotate_matrix[2,2]*zr; + if (str_mode == 'r2g') or (str_mode == 'focus'): + xg=rotate_matrix[0,0]*xr + rotate_matrix[1,0]*yr + rotate_matrix[2,0]*zr; + yg=rotate_matrix[0,1]*xr + rotate_matrix[1,1]*yr + rotate_matrix[2,1]*zr; + zg=rotate_matrix[0,2]*xr + rotate_matrix[1,2]*yr + rotate_matrix[2,2]*zr; + else: + xg=rotate_matrix[0,0]*xr + rotate_matrix[0,1]*yr + rotate_matrix[0,2]*zr; + yg=rotate_matrix[1,0]*xr + rotate_matrix[1,1]*yr + rotate_matrix[1,2]*zr; + zg=rotate_matrix[2,0]*xr + rotate_matrix[2,1]*yr + rotate_matrix[2,2]*zr; ##______________________________________________________________________ #self.nodes_2d_yg = np.degrees(np.arcsin(zg)); From 3cb6862f0d6f8baa34c4a45fe79f90de6d6d9bf1 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 5 Jul 2021 16:47:08 +0200 Subject: [PATCH 283/909] add module with utility functions to convert an integer to text --- src/fortran_utils.F90 | 31 +++++++++++++++++++++++++++++++ test/fortran/CMakeLists.txt | 1 + 2 files changed, 32 insertions(+) create mode 100644 src/fortran_utils.F90 diff --git a/src/fortran_utils.F90 b/src/fortran_utils.F90 new file mode 100644 index 000000000..542054ffe --- /dev/null +++ b/src/fortran_utils.F90 @@ -0,0 +1,31 @@ + ! synopsis: basic Fortran utilities, no MPI, dependencies only to INTRINSIC modules +module fortran_utils + implicit none + +contains + + + function positiveint_to_txt(val) result(txt) + integer, intent(in) :: val + character(int(log10(real(val)))+1) :: txt ! does not work for val=0 + ! EO parameters + write(txt,'(i0)') val + end function + + + function positiveint_to_txt_pad(val, width) result(txt) ! for val=0 width must be > 0 + integer, intent(in) :: val, width + character(:), allocatable :: txt + ! EO parameters + integer w, val_width + character(:), allocatable :: widthtxt + + val_width = int(log10(real(val)))+1 + w = width + if(w < val_width) w = val_width + widthtxt = positiveint_to_txt(w) + allocate(character(w) :: txt) + write(txt,'(i0.'//widthtxt//')') val + end function + +end module diff --git a/test/fortran/CMakeLists.txt b/test/fortran/CMakeLists.txt index b8660b011..a6d1212a9 100644 --- a/test/fortran/CMakeLists.txt +++ b/test/fortran/CMakeLists.txt @@ -17,6 +17,7 @@ add_library(${LIB_TARGET} ${CMAKE_CURRENT_LIST_DIR}/../../src/forcing_provider_a ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_attribute_module.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_fesom_file.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/gen_modules_partitioning.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_gather.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_scatter.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_workaround_module.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/mpi_topology_module.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/MOD_MESH.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/oce_modules.F90 + ${CMAKE_CURRENT_LIST_DIR}/../../src/fortran_utils.F90 ) add_subdirectory(../../src/async_threads_cpp ${PROJECT_BINARY_DIR}/async_threads_cpp) From 0457196612abf6e4ec17e9253a9b0c63be42a469 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 5 Jul 2021 19:27:54 +0200 Subject: [PATCH 284/909] - change integer to text utilities to work with the value 0 - add unit tests for the integer to text utility functions --- src/fortran_utils.F90 | 24 ++++++++++++---- test/fortran/fortran_utils_tests.pf | 44 +++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+), 6 deletions(-) create mode 100644 test/fortran/fortran_utils_tests.pf diff --git a/src/fortran_utils.F90 b/src/fortran_utils.F90 index 542054ffe..6eecdb89d 100644 --- a/src/fortran_utils.F90 +++ b/src/fortran_utils.F90 @@ -5,25 +5,37 @@ module fortran_utils contains - function positiveint_to_txt(val) result(txt) + function int_to_txt(val) result(txt) integer, intent(in) :: val - character(int(log10(real(val)))+1) :: txt ! does not work for val=0 + character(:), allocatable :: txt ! EO parameters + integer val_width + + if(val == 0) then + val_width = 1 + else + val_width = int(log10(real(val)))+1 ! does not work for val=0 + end if + allocate(character(val_width) :: txt) write(txt,'(i0)') val end function - function positiveint_to_txt_pad(val, width) result(txt) ! for val=0 width must be > 0 + function int_to_txt_pad(val, width) result(txt) integer, intent(in) :: val, width character(:), allocatable :: txt ! EO parameters integer w, val_width character(:), allocatable :: widthtxt - - val_width = int(log10(real(val)))+1 + + if(val == 0) then + val_width = 1 + else + val_width = int(log10(real(val)))+1 ! does not work for val=0 + end if w = width if(w < val_width) w = val_width - widthtxt = positiveint_to_txt(w) + widthtxt = int_to_txt(w) allocate(character(w) :: txt) write(txt,'(i0.'//widthtxt//')') val end function diff --git a/test/fortran/fortran_utils_tests.pf b/test/fortran/fortran_utils_tests.pf new file mode 100644 index 000000000..0ca78da2b --- /dev/null +++ b/test/fortran/fortran_utils_tests.pf @@ -0,0 +1,44 @@ +module fortran_utils_tests + use fortran_utils + use funit; implicit none + +contains + + + @test + subroutine test_2_digits_results_in_2_characters_string() + @assertEqual("12", int_to_txt(12)) + end subroutine + + + @test + subroutine test_1_digit_results_in_1_character_string + @assertEqual("1", int_to_txt(1)) + end subroutine + + + @test + subroutine test_0_results_in_0_character_string + @assertEqual("0", int_to_txt(0)) + end subroutine + + + @test + subroutine test_1_digit_padded_to_3_results_in_3_character_string + @assertEqual("001", int_to_txt_pad(1,3)) + end subroutine + + @test + subroutine test_3_digit_padded_to_1_results_in_3_character_string + @assertEqual("123", int_to_txt_pad(123,1)) + end subroutine + + + @test + subroutine test_0_padded_to_0_results_in_0_character_string + @assertEqual("0", int_to_txt_pad(0,0)) + end subroutine + + + +end module From fc86dde2bf438fcb1d0bedb3eebd77f43f4ead3c Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 8 Jul 2021 12:04:43 +0200 Subject: [PATCH 285/909] be able to read/write transposed 3D restart files via preprocessor definition --- src/CMakeLists.txt | 1 + src/info_module.F90 | 6 ++++++ src/io_fesom_file.F90 | 24 ++++++++++++++++++++---- 3 files changed, 27 insertions(+), 4 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index eec063e37..9cfca071d 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -2,6 +2,7 @@ cmake_minimum_required(VERSION 3.4) set(CMAKE_OSX_DEPLOYMENT_TARGET "10.9") project(fesom C Fortran) +#add_compile_options(-DTRANSPOSE_RESTART) option(DISABLE_MULTITHREADING "disable asynchronous operations" OFF) diff --git a/src/info_module.F90 b/src/info_module.F90 index 592d55466..071e5bf3e 100644 --- a/src/info_module.F90 +++ b/src/info_module.F90 @@ -86,6 +86,12 @@ subroutine print_definitions() print '(g0)', 'VERBOSE is ON' #else print '(g0)', 'VERBOSE is OFF' +#endif +#ifdef TRANSPOSE_RESTART + print '(g0)', 'TRANSPOSE_RESTART is ON' +#else + print '(g0)', 'TRANSPOSE_RESTART is OFF' +#endif #endif end subroutine diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index 615f23b98..540f05a19 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -190,7 +190,11 @@ subroutine read_and_scatter_variables(this) call this%read_var(var%var_index, [1,last_rec_idx], [size(var%global_level_data),1], var%global_level_data) else ! z,nod,time - call this%read_var(var%var_index, [lvl,1,last_rec_idx], [1,size(var%global_level_data),1], var%global_level_data) +#ifndef TRANSPOSE_RESTART + call this%read_var(var%var_index, [lvl,1,last_rec_idx], [1,size(var%global_level_data),1], var%global_level_data) ! untransposed +#else + call this%read_var(var%var_index, [1,lvl,last_rec_idx], [size(var%global_level_data),1,1], var%global_level_data) +#endif end if end if @@ -247,7 +251,11 @@ subroutine gather_and_write_variables(this) call this%write_var(var%var_index, [1,this%rec_cnt], [size(var%global_level_data),1], var%global_level_data) else ! z,nod,time - call this%write_var(var%var_index, [lvl,1,this%rec_cnt], [1,size(var%global_level_data),1], var%global_level_data) +#ifndef TRANSPOSE_RESTART + call this%write_var(var%var_index, [lvl,1,this%rec_cnt], [1,size(var%global_level_data),1], var%global_level_data) ! untransposed +#else + call this%write_var(var%var_index, [1,lvl,this%rec_cnt], [size(var%global_level_data),1,1], var%global_level_data) +#endif end if end if end do @@ -346,7 +354,11 @@ subroutine specify_node_var_3d(this, name, longname, units, local_data) level_diminfo = obtain_diminfo(this, m_nod2d) depth_diminfo = obtain_diminfo(this, size(local_data, dim=1)) - call specify_variable(this, name, [depth_diminfo%idx, level_diminfo%idx, this%time_dimidx], level_diminfo%len, local_data, .false., longname, units) +#ifndef TRANSPOSE_RESTART + call specify_variable(this, name, [depth_diminfo%idx, level_diminfo%idx, this%time_dimidx], level_diminfo%len, local_data, .false., longname, units) ! untransposed +#else + call specify_variable(this, name, [level_diminfo%idx, depth_diminfo%idx, this%time_dimidx], level_diminfo%len, local_data, .false., longname, units) +#endif end subroutine @@ -379,7 +391,11 @@ subroutine specify_elem_var_3d(this, name, longname, units, local_data) level_diminfo = obtain_diminfo(this, m_elem2d) depth_diminfo = obtain_diminfo(this, size(local_data, dim=1)) - call specify_variable(this, name, [depth_diminfo%idx, level_diminfo%idx, this%time_dimidx], level_diminfo%len, local_data, .true., longname, units) +#ifndef TRANSPOSE_RESTART + call specify_variable(this, name, [depth_diminfo%idx, level_diminfo%idx, this%time_dimidx], level_diminfo%len, local_data, .true., longname, units) ! untransposed +#else + call specify_variable(this, name, [level_diminfo%idx, depth_diminfo%idx, this%time_dimidx], level_diminfo%len, local_data, .true., longname, units) +#endif end subroutine From b7daca0bd3a38322e3546a7c797daaec4310bd1e Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 8 Jul 2021 12:35:58 +0200 Subject: [PATCH 286/909] workaround runtime error produced by the cray ftn compiler --- src/io_netcdf_file_module.F90 | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index 9e5605e5d..2ee3936cb 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -151,8 +151,10 @@ function add_var_x(this, name, dim_indices, netcdf_datatype) result(varindex) ! EO parameters include "netcdf.inc" type(var_type), allocatable :: tmparr(:) - type(att_type_wrapper) empty_atts(0) + type(att_type_wrapper), allocatable :: empty_atts(:) + allocate(empty_atts(0)) ! if we use a fixed size array with size 0 there is a segfault at runtime when compiled with cray ftn + ! assume the vars array is allocated allocate( tmparr(size(this%vars)+1) ) tmparr(1:size(this%vars)) = this%vars From 3b923b36bf44ddf7cc7b734645c60265142f3940 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 8 Jul 2021 12:39:42 +0200 Subject: [PATCH 287/909] print full filepath in "restart from record" info --- src/io_restart.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 9b18851b2..5d479a70b 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -289,7 +289,7 @@ subroutine read_restart(path, filegroup) call filegroup%files(i)%join() if(filegroup%files(i)%is_iorank()) then - write(*,*) 'restart from record ', filegroup%files(i)%rec_count(), ' of ', filegroup%files(i)%rec_count() + write(*,*) 'restart from record ', filegroup%files(i)%rec_count(), ' of ', filegroup%files(i)%rec_count(), filegroup%files(i)%path ! read the last entry from the iter variable call filegroup%files(i)%read_var1(filegroup%files(i)%iter_varindex, [filegroup%files(i)%rec_count()], globalstep) From 8fb0c885ff270144d30a70c96fdf3bf441778ca5 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 8 Jul 2021 14:59:28 +0200 Subject: [PATCH 288/909] disable parallel restart reading for Cray compilers for now because of problems at aleph --- src/CMakeLists.txt | 4 ++++ src/info_module.F90 | 4 ++++ src/io_restart.F90 | 8 +++++++- 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 9cfca071d..d25f587a3 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -2,6 +2,10 @@ cmake_minimum_required(VERSION 3.4) set(CMAKE_OSX_DEPLOYMENT_TARGET "10.9") project(fesom C Fortran) +if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray ) +#add_compile_options(-craympich-mt) # alternative cray-mpich library + add_compile_options(-DDISABLE_PARALLEL_RESTART_READ) # work around issue on aleph +endif() #add_compile_options(-DTRANSPOSE_RESTART) option(DISABLE_MULTITHREADING "disable asynchronous operations" OFF) diff --git a/src/info_module.F90 b/src/info_module.F90 index 071e5bf3e..9ccf69b75 100644 --- a/src/info_module.F90 +++ b/src/info_module.F90 @@ -92,6 +92,10 @@ subroutine print_definitions() #else print '(g0)', 'TRANSPOSE_RESTART is OFF' #endif +#ifdef DISABLE_PARALLEL_RESTART_READ + print '(g0)', 'DISABLE_PARALLEL_RESTART_READ is ON' +#else + print '(g0)', 'DISABLE_PARALLEL_RESTART_READ is OFF' #endif end subroutine diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 5d479a70b..10deeacaf 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -276,16 +276,22 @@ subroutine read_restart(path, filegroup) if(filegroup%files(i)%path .ne. dirpath//"/"//filegroup%files(i)%varname//".nc") then call execute_command_line("mkdir -p "//dirpath) filegroup%files(i)%path = dirpath//"/"//filegroup%files(i)%varname//".nc" - write(*,*) 'reading restart for ', filegroup%files(i)%varname, ' at ', filegroup%files(i)%path +#ifndef DISABLE_PARALLEL_RESTART_READ + write(*,*) 'reading restart PARALLEL for ', filegroup%files(i)%varname, ' at ', filegroup%files(i)%path +#else + write(*,*) 'reading restart SEQIENTIAL for ', filegroup%files(i)%varname, ' at ', filegroup%files(i)%path +#endif call filegroup%files(i)%open_read(filegroup%files(i)%path) ! do we need to bother with read-only access? ! todo: print a reasonable error message if the file does not exist end if end if call filegroup%files(i)%async_read_and_scatter_variables() +#ifndef DISABLE_PARALLEL_RESTART_READ end do do i=1, filegroup%nfiles +#endif call filegroup%files(i)%join() if(filegroup%files(i)%is_iorank()) then From ebcc01c0bb76a281b7617605b440144f4b293812 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 18 Jun 2021 15:34:53 +0200 Subject: [PATCH 289/909] fix build error on macOS --- src/CMakeLists.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index d25f587a3..fe5f68b0f 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,5 +1,4 @@ cmake_minimum_required(VERSION 3.4) -set(CMAKE_OSX_DEPLOYMENT_TARGET "10.9") project(fesom C Fortran) if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray ) From 5fa9f4d2fcd0e50967def0d0d7da111d439f3ecd Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 8 Jul 2021 15:08:21 +0200 Subject: [PATCH 290/909] give default values to uninitialized variables --- src/oce_ale_tracer.F90 | 3 +++ src/oce_ice_init_state.F90 | 4 ++++ src/oce_vel_rhs_vinv.F90 | 1 + 3 files changed, 8 insertions(+) diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 06b2ad8b4..80038796a 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -342,6 +342,9 @@ subroutine diff_ver_part_expl_ale(tr_num, mesh) real(kind=WP) :: zinv1,Ty #include "associate_mesh.h" + + Ty = 0.0_WP + !___________________________________________________________________________ do n=1, myDim_nod2D nl1=nlevels_nod2D(n)-1 diff --git a/src/oce_ice_init_state.F90 b/src/oce_ice_init_state.F90 index ecb31fd31..a637515ad 100755 --- a/src/oce_ice_init_state.F90 +++ b/src/oce_ice_init_state.F90 @@ -372,6 +372,8 @@ subroutine init_fields_na_test(mesh) #include "associate_mesh.h" + c_status = .false. + ! =================== ! Fill the model fields with dummy values ! =================== @@ -478,6 +480,8 @@ subroutine init_fields_global_test(mesh) #include "associate_mesh.h" + c_status = .false. + ! =================== ! Fill the model fields with dummy values ! =================== diff --git a/src/oce_vel_rhs_vinv.F90 b/src/oce_vel_rhs_vinv.F90 index 849b5aea9..46881e065 100755 --- a/src/oce_vel_rhs_vinv.F90 +++ b/src/oce_vel_rhs_vinv.F90 @@ -120,6 +120,7 @@ subroutine compute_vel_rhs_vinv(mesh) !vector invariant real(kind=WP) :: density0_inv = 1./density_0 #include "associate_mesh.h" + w = 0.0_WP uvert=0.0_WP From a1f747f6b0c94e2eb38e5f17e1451108aedc3431 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 8 Jul 2021 15:08:21 +0200 Subject: [PATCH 291/909] give default values to uninitialized variables --- src/oce_ale_tracer.F90 | 3 +++ src/oce_ice_init_state.F90 | 4 ++++ src/oce_vel_rhs_vinv.F90 | 1 + 3 files changed, 8 insertions(+) diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index dd8d8411f..356ae8756 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -339,6 +339,9 @@ subroutine diff_ver_part_expl_ale(tr_num, mesh) real(kind=WP) :: zinv1,Ty #include "associate_mesh.h" + + Ty = 0.0_WP + !___________________________________________________________________________ do n=1, myDim_nod2D nl1=nlevels_nod2D(n)-1 diff --git a/src/oce_ice_init_state.F90 b/src/oce_ice_init_state.F90 index ecb31fd31..a637515ad 100755 --- a/src/oce_ice_init_state.F90 +++ b/src/oce_ice_init_state.F90 @@ -372,6 +372,8 @@ subroutine init_fields_na_test(mesh) #include "associate_mesh.h" + c_status = .false. + ! =================== ! Fill the model fields with dummy values ! =================== @@ -478,6 +480,8 @@ subroutine init_fields_global_test(mesh) #include "associate_mesh.h" + c_status = .false. + ! =================== ! Fill the model fields with dummy values ! =================== diff --git a/src/oce_vel_rhs_vinv.F90 b/src/oce_vel_rhs_vinv.F90 index a09e6658e..f7d98f3ab 100755 --- a/src/oce_vel_rhs_vinv.F90 +++ b/src/oce_vel_rhs_vinv.F90 @@ -120,6 +120,7 @@ subroutine compute_vel_rhs_vinv(mesh) !vector invariant real(kind=WP) :: density0_inv = 1./density_0 #include "associate_mesh.h" + w = 0.0_WP uvert=0.0_WP From 3bcd313d6a8421debeab1619d4f75f1444f5e2b9 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 9 Jul 2021 13:19:49 +0200 Subject: [PATCH 292/909] be able to write transposed 3D output files via preprocessor definition --- src/CMakeLists.txt | 1 + src/info_module.F90 | 5 +++++ src/io_meandata.F90 | 18 ++++++++++++++++-- 3 files changed, 22 insertions(+), 2 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index fe5f68b0f..18adf1110 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -6,6 +6,7 @@ if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray ) add_compile_options(-DDISABLE_PARALLEL_RESTART_READ) # work around issue on aleph endif() #add_compile_options(-DTRANSPOSE_RESTART) +#add_compile_options(-DTRANSPOSE_OUTPUT) option(DISABLE_MULTITHREADING "disable asynchronous operations" OFF) diff --git a/src/info_module.F90 b/src/info_module.F90 index 9ccf69b75..cb79983b5 100644 --- a/src/info_module.F90 +++ b/src/info_module.F90 @@ -96,6 +96,11 @@ subroutine print_definitions() print '(g0)', 'DISABLE_PARALLEL_RESTART_READ is ON' #else print '(g0)', 'DISABLE_PARALLEL_RESTART_READ is OFF' +#endif +#ifdef TRANSPOSE_OUTPUT + print '(g0)', 'TRANSPOSE_OUTPUT is ON' +#else + print '(g0)', 'TRANSPOSE_OUTPUT is OFF' #endif end subroutine diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 3cd67d342..4e38135f6 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -616,8 +616,14 @@ subroutine create_new_file(entry, mesh) call assert_nf( nf_put_att_text(entry%ncid, entry%tID, 'axis', len_trim('T'), trim('T')), __LINE__) call assert_nf( nf_put_att_text(entry%ncid, entry%tID, 'stored_direction', len_trim('increasing'), trim('increasing')), __LINE__) - call assert_nf( nf_def_var(entry%ncid, trim(entry%name), entry%data_strategy%netcdf_type(), entry%ndim+1, & - (/entry%dimid(1:entry%ndim), entry%recID/), entry%varID), __LINE__) +#ifndef TRANSPOSE_OUTPUT + call assert_nf( nf_def_var(entry%ncid, trim(entry%name), entry%data_strategy%netcdf_type(), entry%ndim+1, (/entry%dimid(1:entry%ndim), entry%recID/), entry%varID), __LINE__) +#else + call assert_nf( nf_def_var(entry%ncid, trim(entry%name), entry%data_strategy%netcdf_type(), entry%ndim+1, (/entry%dimid(entry%ndim:1:-1), entry%recID/), entry%varID), __LINE__) +#endif + + + call assert_nf( nf_put_att_text(entry%ncid, entry%varID, 'description', len_trim(entry%description), entry%description), __LINE__) call assert_nf( nf_put_att_text(entry%ncid, entry%varID, 'long_name', len_trim(entry%description), entry%description), __LINE__) call assert_nf( nf_put_att_text(entry%ncid, entry%varID, 'units', len_trim(entry%units), entry%units), __LINE__) @@ -725,7 +731,11 @@ subroutine write_mean(entry, entry_index) if (entry%ndim==1) then call assert_nf( nf_put_vara_double(entry%ncid, entry%varID, (/1, entry%rec_count/), (/size2, 1/), entry%aux_r8, 1), __LINE__) elseif (entry%ndim==2) then +#ifndef TRANSPOSE_OUTPUT call assert_nf( nf_put_vara_double(entry%ncid, entry%varID, (/lev, 1, entry%rec_count/), (/1, size2, 1/), entry%aux_r8, 1), __LINE__) +#else + call assert_nf( nf_put_vara_double(entry%ncid, entry%varID, (/1, lev, entry%rec_count/), (/size2, 1, 1/), entry%aux_r8, 1), __LINE__) +#endif end if end if end do @@ -745,7 +755,11 @@ subroutine write_mean(entry, entry_index) if (entry%ndim==1) then call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/1, entry%rec_count/), (/size2, 1/), entry%aux_r4, 1), __LINE__) elseif (entry%ndim==2) then +#ifndef TRANSPOSE_OUTPUT call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/lev, 1, entry%rec_count/), (/1, size2, 1/), entry%aux_r4, 1), __LINE__) +#else + call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/1, lev, entry%rec_count/), (/size2, 1, 1/), entry%aux_r4, 1), __LINE__) +#endif end if end if end do From 0686dd2f99838ec6784821f7f80d28478b1c3aa6 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 9 Jul 2021 16:30:50 +0200 Subject: [PATCH 293/909] remove wrong description of restart subroutine --- src/fvom_main.F90 | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index e35cf8ef6..57fde03b9 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -128,12 +128,6 @@ program main call clock_newyear ! check if it is a new year if (mype==0) t6=MPI_Wtime() !___CREATE NEW RESTART FILE IF APPLICABLE___________________________________ - ! The interface to the restart module is made via call restart ! - ! The inputs are: istep, l_write, l_create - ! if istep is not zero it will be decided whether restart shall be written - ! if l_write is TRUE the restart will be forced - ! if l_read the restart will be read - ! as an example, for reading restart one does: call restart(0, .false., .false., .true.) call restart(0, .false., r_restart, mesh) ! istep, l_write, l_read if (mype==0) t7=MPI_Wtime() From d74f5c1c97a64c189fade251d7e979dcb0a5632a Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 9 Jul 2021 16:51:38 +0200 Subject: [PATCH 294/909] use separate procedure to determine if a restart is due for a given step --- src/io_restart.F90 | 46 ++++++++++++++++++++++++++++------------------ 1 file changed, 28 insertions(+), 18 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 10deeacaf..9505414ea 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -159,24 +159,7 @@ subroutine restart(istep, l_write, l_read, mesh) if (istep==0) return !check whether restart will be written - is_restart=.false. - - if (restart_length_unit.eq.'y') then - call annual_event(is_restart) - else if (restart_length_unit.eq.'m') then - call monthly_event(is_restart) - else if (restart_length_unit.eq.'d') then - call daily_event(is_restart, restart_length) - else if (restart_length_unit.eq.'h') then - call hourly_event(is_restart, restart_length) - else if (restart_length_unit.eq.'s') then - call step_event(is_restart, istep, restart_length) - else - write(*,*) 'You did not specify a supported outputflag.' - write(*,*) 'The program will stop to give you opportunity to do it.' - call par_ex(1) - stop - endif + is_restart = is_due(restart_length_unit, restart_length, istep) if (l_write) is_restart=.true. @@ -314,4 +297,31 @@ subroutine read_restart(path, filegroup) end do end subroutine + + function is_due(unit, frequency, istep) result(d) + character(len=*), intent(in) :: unit + integer, intent(in) :: frequency + integer, intent(in) :: istep + logical d + ! EO parameters + d = .false. + + if(unit.eq.'y') then + call annual_event(d) + else if(unit.eq.'m') then + call monthly_event(d) + else if(unit.eq.'d') then + call daily_event(d, frequency) + else if(unit.eq.'h') then + call hourly_event(d, frequency) + else if(unit.eq.'s') then + call step_event(d, istep, frequency) + else + write(*,*) 'You did not specify a supported outputflag.' + write(*,*) 'The program will stop to give you opportunity to do it.' + stop 1 + stop + end if + end function + end module From 5581d1f896a38340788edab4b9c07b512943b4f3 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 9 Jul 2021 16:53:22 +0200 Subject: [PATCH 295/909] remove unused parameter in restart procedure --- src/fvom_main.F90 | 4 ++-- src/io_restart.F90 | 7 ++----- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index 57fde03b9..9e1db4c8c 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -128,7 +128,7 @@ program main call clock_newyear ! check if it is a new year if (mype==0) t6=MPI_Wtime() !___CREATE NEW RESTART FILE IF APPLICABLE___________________________________ - call restart(0, .false., r_restart, mesh) ! istep, l_write, l_read + call restart(0, r_restart, mesh) if (mype==0) t7=MPI_Wtime() ! store grid information into netcdf file @@ -251,7 +251,7 @@ program main if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call output (n)'//achar(27)//'[0m' call output (n, mesh) t5 = MPI_Wtime() - call restart(n, .false., .false., mesh) + call restart(n, .false., mesh) t6 = MPI_Wtime() rtime_fullice = rtime_fullice + t2 - t1 diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 9505414ea..923719fda 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -124,7 +124,7 @@ end subroutine ini_ice_io ! !-------------------------------------------------------------------------------------------- ! -subroutine restart(istep, l_write, l_read, mesh) +subroutine restart(istep, l_read, mesh) #if defined(__icepack) icepack restart not merged here ! produce a compiler error if USE_ICEPACK=ON; todo: merge icepack restart from 68d8b8b @@ -132,11 +132,10 @@ subroutine restart(istep, l_write, l_read, mesh) implicit none ! this is the main restart subroutine - ! if l_write is TRUE writing restart file will be forced ! if l_read is TRUE the restart file will be read integer :: istep - logical :: l_write, l_read + logical :: l_read logical :: is_restart type(t_mesh), intent(in) , target :: mesh @@ -161,8 +160,6 @@ subroutine restart(istep, l_write, l_read, mesh) !check whether restart will be written is_restart = is_due(restart_length_unit, restart_length, istep) - if (l_write) is_restart=.true. - if (.not. is_restart) return ! write restart From 9a00bc6cfe2404349aeb5a09ed5ca3e69abb0c37 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 9 Jul 2021 17:49:54 +0200 Subject: [PATCH 296/909] determine if we can load raw restart dump files --- src/io_restart.F90 | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 923719fda..bcfd60ce0 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -138,6 +138,7 @@ subroutine restart(istep, l_read, mesh) logical :: l_read logical :: is_restart type(t_mesh), intent(in) , target :: mesh + logical dumpfiles_exist ctime=timeold+(dayold-1.)*86400 if (.not. l_read) then @@ -149,6 +150,11 @@ subroutine restart(istep, l_read, mesh) end if if (l_read) then + ! determine if we can load raw restart dump files + if(mype == 0) then + inquire(file=trim(ResultPath)//"/raw_restart/np"//int_to_txt(npes)//".info", exist=dumpfiles_exist) + end if + call MPI_Bcast(dumpfiles_exist, 1, MPI_LOGICAL, 0, MPI_COMM_FESOM, MPIerr) call read_restart(oce_path, oce_files) if (use_ice) then call read_restart(ice_path, ice_files) From 2e7aff5f347fe6faf6f976f5599638c51a82abfa Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 14 Jul 2021 10:29:38 +0200 Subject: [PATCH 297/909] add helper function to create a padded text from the local MPI rank --- src/io_fesom_file.F90 | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index 540f05a19..a86dba7f8 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -473,6 +473,15 @@ subroutine close_file(this) end subroutine + function mpirank_to_txt() result(txt) + use g_PARSUP + use fortran_utils + character(:), allocatable :: txt + ! EO parameters + txt = int_to_txt_pad(mype,int(log10(real(npes)))+1) ! pad to the width of the number of processes + end function + + subroutine assert(val, line) logical, intent(in) :: val integer, intent(in) :: line From 7e1d930d99232441363054a1803bd1ec952b19d0 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 14 Jul 2021 10:39:55 +0200 Subject: [PATCH 298/909] store variable name --- src/io_fesom_file.F90 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index a86dba7f8..75a736e97 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -14,6 +14,7 @@ module io_fesom_file_module real(kind=8), allocatable :: global_level_data(:) integer :: global_level_data_size = 0 logical is_elem_based + character(:), allocatable :: varname ! todo: maybe use a getter in netcdf_file_type to get the name end type @@ -459,6 +460,7 @@ subroutine specify_variable(this, name, dim_indices, global_level_data_size, loc this%var_infos(this%nvar_infos)%external_local_data_ptr => local_data this%var_infos(this%nvar_infos)%global_level_data_size = global_level_data_size this%var_infos(this%nvar_infos)%is_elem_based = is_elem_based + this%var_infos(this%nvar_infos)%varname = name end subroutine From c87fe643a98a761d5cd37d6a904c66eec61d60a3 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 14 Jul 2021 10:42:06 +0200 Subject: [PATCH 299/909] add procedure to dump raw data for a variable --- src/io_fesom_file.F90 | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index 75a736e97..980ef527d 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -39,6 +39,7 @@ module io_fesom_file_module logical gather_and_write contains procedure, public :: async_read_and_scatter_variables, async_gather_and_write_variables, join, init, is_iorank, rec_count, time_varindex, time_dimindex + procedure, public :: write_variables_raw procedure, public :: close_file ! inherited procedures we overwrite generic, public :: specify_node_var => specify_node_var_2d, specify_node_var_3d generic, public :: specify_elem_var => specify_elem_var_2d, specify_elem_var_3d @@ -267,6 +268,22 @@ subroutine gather_and_write_variables(this) end subroutine + subroutine write_variables_raw(this, outdir) + class(fesom_file_type), target :: this + character(len=*) outdir + ! EO parameters + integer i, fileunit + type(var_info), pointer :: var + + do i=1, this%nvar_infos + var => this%var_infos(i) + open(newunit = fileunit, file = outdir//'/'//var%varname//'_'//mpirank_to_txt()//'.dump', form = 'unformatted') + write(fileunit) var%external_local_data_ptr ! directly use external_local_data_ptr, use the local_data_copy only when called asynchronously + close(fileunit) + end do + end subroutine + + subroutine join(this) class(fesom_file_type) this ! EO parameters From 028fe979fbf1cd13c57a5563d207815ab9446d42 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 14 Jul 2021 13:44:20 +0200 Subject: [PATCH 300/909] add namelist entries to set raw restart frequency --- src/gen_modules_config.F90 | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/gen_modules_config.F90 b/src/gen_modules_config.F90 index fa4051421..53c60b132 100755 --- a/src/gen_modules_config.F90 +++ b/src/gen_modules_config.F90 @@ -35,8 +35,10 @@ module g_config integer :: logfile_outfreq=1 ! logfile info. outp. freq., # steps integer :: restart_length=1 character :: restart_length_unit='m' + integer :: raw_restart_length=1 + character :: raw_restart_length_unit='m' - namelist /restart_log/ restart_length, restart_length_unit, logfile_outfreq + namelist /restart_log/ restart_length, restart_length_unit, raw_restart_length, raw_restart_length_unit, logfile_outfreq !_____________________________________________________________________________ ! *** ale_def *** From 019fbe7bdb39ef8ce8d86d51933c00d60f43e150 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 14 Jul 2021 11:09:30 +0200 Subject: [PATCH 301/909] create the output directory for raw restarts if these are not turned off --- src/io_restart.F90 | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index bcfd60ce0..18fffa611 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -16,6 +16,8 @@ MODULE io_RESTART character(:), allocatable, save :: oce_path character(:), allocatable, save :: ice_path + character(:), allocatable, save :: raw_restart_dirpath + contains ! @@ -130,6 +132,7 @@ subroutine restart(istep, l_read, mesh) icepack restart not merged here ! produce a compiler error if USE_ICEPACK=ON; todo: merge icepack restart from 68d8b8b #endif + use fortran_utils implicit none ! this is the main restart subroutine ! if l_read is TRUE the restart file will be read @@ -139,6 +142,22 @@ subroutine restart(istep, l_read, mesh) logical :: is_restart type(t_mesh), intent(in) , target :: mesh logical dumpfiles_exist + logical, save :: initialized = .false. + integer cstat, estat + character(500) cmsg ! there seems to be no documentation about the max size of this text + + if(.not. initialized) then + initialized = .true. + raw_restart_dirpath = trim(ResultPath)//"/raw_restart/np"//int_to_txt(npes) + if(raw_restart_length_unit /= "off") then + if(mype == 0) then + print *,"creating raw restart directory: "//raw_restart_dirpath + call execute_command_line("mkdir -p "//raw_restart_dirpath, exitstat=estat, cmdstat=cstat, cmdmsg=cmsg) ! sometimes does not work on aleph + if(cstat /= 0) print *,"creating raw restart directory ERROR ", trim(cmsg) + end if + call MPI_Barrier(MPI_COMM_FESOM, mpierr) ! make sure the dir has been created before we continue... + end if + end if ctime=timeold+(dayold-1.)*86400 if (.not. l_read) then From 6f612c1c87edb5cdc6f4ee7079d3eec62a3b1adb Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 14 Jul 2021 12:22:53 +0200 Subject: [PATCH 302/909] use a variable for the raw restart info path so we can use it elsewhere --- src/io_restart.F90 | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 18fffa611..d39b7ba9d 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -17,6 +17,7 @@ MODULE io_RESTART character(:), allocatable, save :: ice_path character(:), allocatable, save :: raw_restart_dirpath + character(:), allocatable, save :: raw_restart_infopath contains @@ -149,6 +150,7 @@ subroutine restart(istep, l_read, mesh) if(.not. initialized) then initialized = .true. raw_restart_dirpath = trim(ResultPath)//"/raw_restart/np"//int_to_txt(npes) + raw_restart_infopath = trim(ResultPath)//"/raw_restart/np"//int_to_txt(npes)//".info" if(raw_restart_length_unit /= "off") then if(mype == 0) then print *,"creating raw restart directory: "//raw_restart_dirpath @@ -171,7 +173,7 @@ subroutine restart(istep, l_read, mesh) if (l_read) then ! determine if we can load raw restart dump files if(mype == 0) then - inquire(file=trim(ResultPath)//"/raw_restart/np"//int_to_txt(npes)//".info", exist=dumpfiles_exist) + inquire(file=raw_restart_infopath, exist=dumpfiles_exist) end if call MPI_Bcast(dumpfiles_exist, 1, MPI_LOGICAL, 0, MPI_COMM_FESOM, MPIerr) call read_restart(oce_path, oce_files) From 43dcfa1df0c509e3ba51bc6c7db6be07f9d50010 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 14 Jul 2021 13:59:33 +0200 Subject: [PATCH 303/909] add subroutine to write raw restarts --- src/io_restart.F90 | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index d39b7ba9d..8098bf8ca 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -244,6 +244,29 @@ subroutine write_restart(path, filegroup, istep) end subroutine +subroutine write_raw_restart(filegroup, istep) + type(restart_file_group), intent(inout) :: filegroup + integer, intent(in):: istep + ! EO parameters + integer i + integer cstep + integer fileunit + + do i=1, filegroup%nfiles + call filegroup%files(i)%write_variables_raw(raw_restart_dirpath) + end do + + if(mype == 0) then + ! store metadata about the raw restart + cstep = globalstep+istep + open(newunit = fileunit, file = raw_restart_infopath) + write(fileunit, '(g0)') cstep + write(fileunit, '(g0)') ctime + close(fileunit) + end if +end subroutine + + ! join remaining threads and close all open files subroutine finalize_restart() integer i From 75d644319478abdd371b67b489b79288b284b5e7 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 14 Jul 2021 14:33:37 +0200 Subject: [PATCH 304/909] add procedure read data for a variable from a dump file --- src/io_fesom_file.F90 | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index 980ef527d..d24e96d1a 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -39,7 +39,7 @@ module io_fesom_file_module logical gather_and_write contains procedure, public :: async_read_and_scatter_variables, async_gather_and_write_variables, join, init, is_iorank, rec_count, time_varindex, time_dimindex - procedure, public :: write_variables_raw + procedure, public :: read_variables_raw, write_variables_raw procedure, public :: close_file ! inherited procedures we overwrite generic, public :: specify_node_var => specify_node_var_2d, specify_node_var_3d generic, public :: specify_elem_var => specify_elem_var_2d, specify_elem_var_3d @@ -268,6 +268,27 @@ subroutine gather_and_write_variables(this) end subroutine + subroutine read_variables_raw(this, outdir) + class(fesom_file_type), target :: this + character(len=*), intent(in) :: outdir + ! EO parameters + integer i, fileunit + type(var_info), pointer :: var + integer status + + do i=1, this%nvar_infos + var => this%var_infos(i) + open(newunit = fileunit, status = 'old', iostat = status, file = outdir//'/'//var%varname//'_'//mpirank_to_txt()//'.dump', form = 'unformatted') + if(status == 0) then + read(fileunit) var%external_local_data_ptr ! directly use external_local_data_ptr, use the local_data_copy only when called asynchronously + close(fileunit) + else + print *,"can not open ",outdir//'/'//var%varname//'_'//mpirank_to_txt()//'.dump' + end if + end do + end subroutine + + subroutine write_variables_raw(this, outdir) class(fesom_file_type), target :: this character(len=*) outdir From 312edcbd32cce2ddcce536cc358abd750415401b Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 14 Jul 2021 16:09:33 +0200 Subject: [PATCH 305/909] stop execution if there is a read error --- src/io_fesom_file.F90 | 1 + 1 file changed, 1 insertion(+) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index d24e96d1a..25dbbef1c 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -284,6 +284,7 @@ subroutine read_variables_raw(this, outdir) close(fileunit) else print *,"can not open ",outdir//'/'//var%varname//'_'//mpirank_to_txt()//'.dump' + stop 1 end if end do end subroutine From ae7a1d96bf3418ffacfc001e4bd6279bac94e501 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 14 Jul 2021 16:36:11 +0200 Subject: [PATCH 306/909] use a variable for the rank we use to read/write raw restart metadata --- src/io_restart.F90 | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 8098bf8ca..1a79d7b50 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -18,6 +18,8 @@ MODULE io_RESTART character(:), allocatable, save :: raw_restart_dirpath character(:), allocatable, save :: raw_restart_infopath + + integer, parameter :: RAW_RESTART_METADATA_RANK = 0 contains @@ -152,7 +154,7 @@ subroutine restart(istep, l_read, mesh) raw_restart_dirpath = trim(ResultPath)//"/raw_restart/np"//int_to_txt(npes) raw_restart_infopath = trim(ResultPath)//"/raw_restart/np"//int_to_txt(npes)//".info" if(raw_restart_length_unit /= "off") then - if(mype == 0) then + if(mype == RAW_RESTART_METADATA_RANK) then print *,"creating raw restart directory: "//raw_restart_dirpath call execute_command_line("mkdir -p "//raw_restart_dirpath, exitstat=estat, cmdstat=cstat, cmdmsg=cmsg) ! sometimes does not work on aleph if(cstat /= 0) print *,"creating raw restart directory ERROR ", trim(cmsg) @@ -172,10 +174,10 @@ subroutine restart(istep, l_read, mesh) if (l_read) then ! determine if we can load raw restart dump files - if(mype == 0) then + if(mype == RAW_RESTART_METADATA_RANK) then inquire(file=raw_restart_infopath, exist=dumpfiles_exist) end if - call MPI_Bcast(dumpfiles_exist, 1, MPI_LOGICAL, 0, MPI_COMM_FESOM, MPIerr) + call MPI_Bcast(dumpfiles_exist, 1, MPI_LOGICAL, RAW_RESTART_METADATA_RANK, MPI_COMM_FESOM, MPIerr) call read_restart(oce_path, oce_files) if (use_ice) then call read_restart(ice_path, ice_files) @@ -256,7 +258,7 @@ subroutine write_raw_restart(filegroup, istep) call filegroup%files(i)%write_variables_raw(raw_restart_dirpath) end do - if(mype == 0) then + if(mype == RAW_RESTART_METADATA_RANK) then ! store metadata about the raw restart cstep = globalstep+istep open(newunit = fileunit, file = raw_restart_infopath) From 0e6e63ddb1e5edbe43197f9d728c3aa268650578 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 14 Jul 2021 18:53:10 +0200 Subject: [PATCH 307/909] add subroutine to read raw restart data --- src/io_restart.F90 | 52 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 51 insertions(+), 1 deletion(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 1a79d7b50..8f760e5f7 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -269,6 +269,43 @@ subroutine write_raw_restart(filegroup, istep) end subroutine +subroutine read_raw_restart(filegroup) + type(restart_file_group), intent(inout) :: filegroup + ! EO parameters + integer i + integer rstep + real(kind=WP) rtime + integer fileunit + integer status + + if(mype == RAW_RESTART_METADATA_RANK) then + ! store metadata about the raw restart + open(newunit = fileunit, status = 'old', iostat = status, file = raw_restart_infopath) + if(status == 0) then + read(fileunit,*) rstep + read(fileunit,*) rtime + close(fileunit) + else + print *,"can not open ",raw_restart_infopath + stop 1 + end if + + ! compare the restart time with our actual time + if(int(ctime) /= int(rtime)) then + print *, "raw restart time ",rtime,"does not match current clock time",ctime + stop 1 + end if + globalstep = rstep + end if + ! sync globalstep with the other processes to let all processes writing portable restart files know the globalstep + call MPI_Bcast(globalstep, 1, MPI_INT, RAW_RESTART_METADATA_RANK, MPI_COMM_FESOM, MPIerr) + + do i=1, filegroup%nfiles + call filegroup%files(i)%read_variables_raw(raw_restart_dirpath) + end do +end subroutine + + ! join remaining threads and close all open files subroutine finalize_restart() integer i @@ -295,12 +332,14 @@ subroutine finalize_restart() subroutine read_restart(path, filegroup) + use g_PARSUP character(len=*), intent(in) :: path type(restart_file_group), intent(inout) :: filegroup ! EO parameters real(kind=WP) rtime integer i character(:), allocatable :: dirpath + integer mpistatus(MPI_STATUS_SIZE) do i=1, filegroup%nfiles if( filegroup%files(i)%is_iorank() ) then @@ -342,8 +381,19 @@ subroutine read_restart(path, filegroup) write(*,*) 'the model will stop!' stop 1 end if - end if + end if end do + + ! sync globalstep with the process responsible for raw restart metadata + if(filegroup%nfiles >= 1) then + ! use the first restart I/O process to send the globalstep + if( filegroup%files(1)%is_iorank() ) then + call MPI_Send(globalstep, 1, MPI_INTEGER, RAW_RESTART_METADATA_RANK, 42, MPI_COMM_FESOM, MPIerr) + end if + if(mype == RAW_RESTART_METADATA_RANK) then + call MPI_Recv(globalstep, 1, MPI_INTEGER, MPI_ANY_SOURCE, 42, MPI_COMM_FESOM, mpistatus, MPIerr) + end if + end if end subroutine From 4003123b51180f0ad8afb5b566142003b955bc80 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 15 Jul 2021 09:24:41 +0200 Subject: [PATCH 308/909] - read raw restart if available instead of portable restart - when reading the portable restart, immediately write a raw restart --- src/io_restart.F90 | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 8f760e5f7..b8ada1b82 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -178,9 +178,17 @@ subroutine restart(istep, l_read, mesh) inquire(file=raw_restart_infopath, exist=dumpfiles_exist) end if call MPI_Bcast(dumpfiles_exist, 1, MPI_LOGICAL, RAW_RESTART_METADATA_RANK, MPI_COMM_FESOM, MPIerr) - call read_restart(oce_path, oce_files) - if (use_ice) then - call read_restart(ice_path, ice_files) + if(dumpfiles_exist) then + call read_raw_restart(oce_files) + if(use_ice) call read_raw_restart(ice_files) + else + call read_restart(oce_path, oce_files) + if (use_ice) call read_restart(ice_path, ice_files) + ! immediately create a raw restart + if(raw_restart_length_unit /= "off") then + call write_raw_restart(oce_files, istep) + if(use_ice) call write_raw_restart(ice_files, istep) + end if end if end if From 7979de65e2c208ca9c8cf0d36a9d5e8fab9fbe87 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 15 Jul 2021 10:05:11 +0200 Subject: [PATCH 309/909] be able to append to an existing portable restart file if we started from a raw restart --- src/io_restart.F90 | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index b8ada1b82..af7aab363 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -223,6 +223,8 @@ subroutine write_restart(path, filegroup, istep) integer cstep integer i character(:), allocatable :: dirpath + character(:), allocatable :: filepath + logical file_exists cstep = globalstep+istep @@ -231,11 +233,17 @@ subroutine write_restart(path, filegroup, istep) if(filegroup%files(i)%is_iorank()) then if(filegroup%files(i)%is_attached()) call filegroup%files(i)%close_file() ! close the file from previous write - + dirpath = path(1:len(path)-3) ! chop of the ".nc" suffix - if(filegroup%files(i)%path .ne. dirpath//"/"//filegroup%files(i)%varname//".nc") then + filepath = dirpath//"/"//filegroup%files(i)%varname//".nc" + if(filegroup%files(i)%path == "") then + ! the path to an existing restart file is not set in read_restart if we had a restart from a raw restart + inquire(file=filepath, exist=file_exists) + if(file_exists) filegroup%files(i)%path = filepath + end if + if(filegroup%files(i)%path .ne. filepath) then call execute_command_line("mkdir -p "//dirpath) - filegroup%files(i)%path = dirpath//"/"//filegroup%files(i)%varname//".nc" + filegroup%files(i)%path = filepath call filegroup%files(i)%open_write_create(filegroup%files(i)%path) else call filegroup%files(i)%open_write_append(filegroup%files(i)%path) ! todo: keep the file open between writes From 2bcbf1b19b647faa3b00140d505ee4efa2d83a97 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 15 Jul 2021 10:17:08 +0200 Subject: [PATCH 310/909] change MPI datatype to Fortran integer --- src/io_restart.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index af7aab363..c26d2c15a 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -314,7 +314,7 @@ subroutine read_raw_restart(filegroup) globalstep = rstep end if ! sync globalstep with the other processes to let all processes writing portable restart files know the globalstep - call MPI_Bcast(globalstep, 1, MPI_INT, RAW_RESTART_METADATA_RANK, MPI_COMM_FESOM, MPIerr) + call MPI_Bcast(globalstep, 1, MPI_INTEGER, RAW_RESTART_METADATA_RANK, MPI_COMM_FESOM, MPIerr) do i=1, filegroup%nfiles call filegroup%files(i)%read_variables_raw(raw_restart_dirpath) From 8fead52b2e98089cbcfc3187fdd8d1037de99beb Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 15 Jul 2021 11:54:11 +0200 Subject: [PATCH 311/909] print a message when reading or writing raw restarts --- src/io_restart.F90 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index c26d2c15a..929fd7a4d 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -275,6 +275,7 @@ subroutine write_raw_restart(filegroup, istep) end do if(mype == RAW_RESTART_METADATA_RANK) then + print *,"writing raw restart to "//raw_restart_dirpath ! store metadata about the raw restart cstep = globalstep+istep open(newunit = fileunit, file = raw_restart_infopath) @@ -312,6 +313,7 @@ subroutine read_raw_restart(filegroup) stop 1 end if globalstep = rstep + print *,"reading raw restart from "//raw_restart_dirpath end if ! sync globalstep with the other processes to let all processes writing portable restart files know the globalstep call MPI_Bcast(globalstep, 1, MPI_INTEGER, RAW_RESTART_METADATA_RANK, MPI_COMM_FESOM, MPIerr) From 5f45ebc71a6d4d2216416c3694c6a92b2817553d Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 15 Jul 2021 12:48:58 +0200 Subject: [PATCH 312/909] do not print a message when creating the raw restart directory as the directory might already exist and the message would then be a little confusing --- src/io_restart.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 929fd7a4d..b769a5d42 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -155,7 +155,7 @@ subroutine restart(istep, l_read, mesh) raw_restart_infopath = trim(ResultPath)//"/raw_restart/np"//int_to_txt(npes)//".info" if(raw_restart_length_unit /= "off") then if(mype == RAW_RESTART_METADATA_RANK) then - print *,"creating raw restart directory: "//raw_restart_dirpath + ! inquire does not work for directories, the directory might already exist call execute_command_line("mkdir -p "//raw_restart_dirpath, exitstat=estat, cmdstat=cstat, cmdmsg=cmsg) ! sometimes does not work on aleph if(cstat /= 0) print *,"creating raw restart directory ERROR ", trim(cmsg) end if From f420963f8a5c331cc62e3a6ce1191c377ffccfc3 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 15 Jul 2021 12:57:30 +0200 Subject: [PATCH 313/909] rename logical variable --- src/io_restart.F90 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index b769a5d42..15a4e3f21 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -142,7 +142,7 @@ subroutine restart(istep, l_read, mesh) integer :: istep logical :: l_read - logical :: is_restart + logical :: is_portable_restart_write type(t_mesh), intent(in) , target :: mesh logical dumpfiles_exist logical, save :: initialized = .false. @@ -195,9 +195,9 @@ subroutine restart(istep, l_read, mesh) if (istep==0) return !check whether restart will be written - is_restart = is_due(restart_length_unit, restart_length, istep) + is_portable_restart_write = is_due(restart_length_unit, restart_length, istep) - if (.not. is_restart) return + if (.not. is_portable_restart_write) return ! write restart if(mype==0) write(*,*)'Do output (netCDF, restart) ...' From 01922f7baa8aa37abc37ee11367918cdd4c4aa84 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 15 Jul 2021 13:41:42 +0200 Subject: [PATCH 314/909] be able to use 'off' as a frequency unit to disable portable or raw restarts --- src/gen_modules_config.F90 | 4 ++-- src/io_restart.F90 | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/gen_modules_config.F90 b/src/gen_modules_config.F90 index 53c60b132..b34455d08 100755 --- a/src/gen_modules_config.F90 +++ b/src/gen_modules_config.F90 @@ -34,9 +34,9 @@ module g_config ! *** restart_log *** integer :: logfile_outfreq=1 ! logfile info. outp. freq., # steps integer :: restart_length=1 - character :: restart_length_unit='m' + character(3) :: restart_length_unit='m' integer :: raw_restart_length=1 - character :: raw_restart_length_unit='m' + character(3) :: raw_restart_length_unit='m' namelist /restart_log/ restart_length, restart_length_unit, raw_restart_length, raw_restart_length_unit, logfile_outfreq diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 15a4e3f21..f32533005 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -195,7 +195,7 @@ subroutine restart(istep, l_read, mesh) if (istep==0) return !check whether restart will be written - is_portable_restart_write = is_due(restart_length_unit, restart_length, istep) + is_portable_restart_write = is_due(trim(restart_length_unit), restart_length, istep) if (.not. is_portable_restart_write) return @@ -433,6 +433,8 @@ function is_due(unit, frequency, istep) result(d) call hourly_event(d, frequency) else if(unit.eq.'s') then call step_event(d, istep, frequency) + else if(unit.eq.'off') then + d = .false. else write(*,*) 'You did not specify a supported outputflag.' write(*,*) 'The program will stop to give you opportunity to do it.' From b9ea629b093fd587bfa2a63e858e7e8c275f6903 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 15 Jul 2021 13:55:27 +0200 Subject: [PATCH 315/909] prefix restart output directory with 'fesom_' --- src/io_restart.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index f32533005..fa9434407 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -151,8 +151,8 @@ subroutine restart(istep, l_read, mesh) if(.not. initialized) then initialized = .true. - raw_restart_dirpath = trim(ResultPath)//"/raw_restart/np"//int_to_txt(npes) - raw_restart_infopath = trim(ResultPath)//"/raw_restart/np"//int_to_txt(npes)//".info" + raw_restart_dirpath = trim(ResultPath)//"/fesom_raw_restart/np"//int_to_txt(npes) + raw_restart_infopath = trim(ResultPath)//"/fesom_raw_restart/np"//int_to_txt(npes)//".info" if(raw_restart_length_unit /= "off") then if(mype == RAW_RESTART_METADATA_RANK) then ! inquire does not work for directories, the directory might already exist From 869c619049f1c02d188c2f793330c49bfd96f0b7 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 15 Jul 2021 13:59:05 +0200 Subject: [PATCH 316/909] - write raw restarts depending on its frequency - write raw restarts together with portable restarts --- src/io_restart.F90 | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index fa9434407..4e2bb6958 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -142,7 +142,7 @@ subroutine restart(istep, l_read, mesh) integer :: istep logical :: l_read - logical :: is_portable_restart_write + logical :: is_portable_restart_write, is_raw_restart_write type(t_mesh), intent(in) , target :: mesh logical dumpfiles_exist logical, save :: initialized = .false. @@ -196,20 +196,30 @@ subroutine restart(istep, l_read, mesh) !check whether restart will be written is_portable_restart_write = is_due(trim(restart_length_unit), restart_length, istep) + if(is_portable_restart_write .and. (raw_restart_length_unit /= "off")) then + is_raw_restart_write = .true. ! always write a raw restart together with the portable restart (unless raw restarts are off) + else + is_raw_restart_write = is_due(trim(raw_restart_length_unit), raw_restart_length, istep) + end if - if (.not. is_portable_restart_write) return + if(is_portable_restart_write) then + ! write restart + if(mype==0) write(*,*)'Do output (netCDF, restart) ...' + call write_restart(oce_path, oce_files, istep) + if(use_ice) call write_restart(ice_path, ice_files, istep) + end if - ! write restart - if(mype==0) write(*,*)'Do output (netCDF, restart) ...' - call write_restart(oce_path, oce_files, istep) - if (use_ice) then - call write_restart(ice_path, ice_files, istep) + if(is_raw_restart_write) then + call write_raw_restart(oce_files, istep) + if(use_ice) call write_raw_restart(ice_files, istep) end if - + ! actualize clock file to latest restart point if (mype==0) then - write(*,*) ' --> actualize clock file to latest restart point' - call clock_finish + if(is_portable_restart_write .or. is_raw_restart_write) then + write(*,*) ' --> actualize clock file to latest restart point' + call clock_finish + end if end if end subroutine restart From 7b890a92336844517312fd57a58430c0a2708d88 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 16 Jul 2021 11:17:26 +0200 Subject: [PATCH 317/909] make transposed restart the default --- src/CMakeLists.txt | 1 - src/info_module.F90 | 6 +++--- src/io_fesom_file.F90 | 8 ++++---- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 18adf1110..0f26f04f5 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -5,7 +5,6 @@ if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray ) #add_compile_options(-craympich-mt) # alternative cray-mpich library add_compile_options(-DDISABLE_PARALLEL_RESTART_READ) # work around issue on aleph endif() -#add_compile_options(-DTRANSPOSE_RESTART) #add_compile_options(-DTRANSPOSE_OUTPUT) option(DISABLE_MULTITHREADING "disable asynchronous operations" OFF) diff --git a/src/info_module.F90 b/src/info_module.F90 index cb79983b5..a30129b87 100644 --- a/src/info_module.F90 +++ b/src/info_module.F90 @@ -87,10 +87,10 @@ subroutine print_definitions() #else print '(g0)', 'VERBOSE is OFF' #endif -#ifdef TRANSPOSE_RESTART - print '(g0)', 'TRANSPOSE_RESTART is ON' +#ifdef UNTRANSPOSE_RESTART + print '(g0)', 'UNTRANSPOSE_RESTART is ON' #else - print '(g0)', 'TRANSPOSE_RESTART is OFF' + print '(g0)', 'UNTRANSPOSE_RESTART is OFF' #endif #ifdef DISABLE_PARALLEL_RESTART_READ print '(g0)', 'DISABLE_PARALLEL_RESTART_READ is ON' diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index 25dbbef1c..45f3ac153 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -192,7 +192,7 @@ subroutine read_and_scatter_variables(this) call this%read_var(var%var_index, [1,last_rec_idx], [size(var%global_level_data),1], var%global_level_data) else ! z,nod,time -#ifndef TRANSPOSE_RESTART +#ifdef UNTRANSPOSE_RESTART call this%read_var(var%var_index, [lvl,1,last_rec_idx], [1,size(var%global_level_data),1], var%global_level_data) ! untransposed #else call this%read_var(var%var_index, [1,lvl,last_rec_idx], [size(var%global_level_data),1,1], var%global_level_data) @@ -253,7 +253,7 @@ subroutine gather_and_write_variables(this) call this%write_var(var%var_index, [1,this%rec_cnt], [size(var%global_level_data),1], var%global_level_data) else ! z,nod,time -#ifndef TRANSPOSE_RESTART +#ifdef UNTRANSPOSE_RESTART call this%write_var(var%var_index, [lvl,1,this%rec_cnt], [1,size(var%global_level_data),1], var%global_level_data) ! untransposed #else call this%write_var(var%var_index, [1,lvl,this%rec_cnt], [size(var%global_level_data),1,1], var%global_level_data) @@ -394,7 +394,7 @@ subroutine specify_node_var_3d(this, name, longname, units, local_data) level_diminfo = obtain_diminfo(this, m_nod2d) depth_diminfo = obtain_diminfo(this, size(local_data, dim=1)) -#ifndef TRANSPOSE_RESTART +#ifdef UNTRANSPOSE_RESTART call specify_variable(this, name, [depth_diminfo%idx, level_diminfo%idx, this%time_dimidx], level_diminfo%len, local_data, .false., longname, units) ! untransposed #else call specify_variable(this, name, [level_diminfo%idx, depth_diminfo%idx, this%time_dimidx], level_diminfo%len, local_data, .false., longname, units) @@ -431,7 +431,7 @@ subroutine specify_elem_var_3d(this, name, longname, units, local_data) level_diminfo = obtain_diminfo(this, m_elem2d) depth_diminfo = obtain_diminfo(this, size(local_data, dim=1)) -#ifndef TRANSPOSE_RESTART +#ifdef UNTRANSPOSE_RESTART call specify_variable(this, name, [depth_diminfo%idx, level_diminfo%idx, this%time_dimidx], level_diminfo%len, local_data, .true., longname, units) ! untransposed #else call specify_variable(this, name, [level_diminfo%idx, depth_diminfo%idx, this%time_dimidx], level_diminfo%len, local_data, .true., longname, units) From 403f365bb970e6c6e911ab208ca24f9fd9e52bcb Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 16 Jul 2021 10:59:10 +0200 Subject: [PATCH 318/909] add information about the current year to the raw restart info --- src/io_restart.F90 | 1 + 1 file changed, 1 insertion(+) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 4e2bb6958..fe5cb5007 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -291,6 +291,7 @@ subroutine write_raw_restart(filegroup, istep) open(newunit = fileunit, file = raw_restart_infopath) write(fileunit, '(g0)') cstep write(fileunit, '(g0)') ctime + write(fileunit, '(2(g0))') "! year: ",yearnew close(fileunit) end if end subroutine From 0d774f479a09d5d03a0a676dc357ae22276125ae Mon Sep 17 00:00:00 2001 From: dsidoren Date: Mon, 19 Jul 2021 11:32:42 +0200 Subject: [PATCH 319/909] Update gen_forcing_couple.F90 a compilation mistake for AWICM 2.0 was pointed out by Gregor --- src/gen_forcing_couple.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index 2a39b34b7..d17ee5138 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -231,8 +231,8 @@ subroutine update_atm_forcing(istep, mesh) mask=1. call force_flux_consv(enthalpyoffuse, mask, i, 0,action, mesh) end if - end if -#endif +#endif + end if #ifdef VERBOSE if (mype==0) then write(*,*) 'FESOM RECV: flux ', i, ', max val: ', maxval(exchange) From 79e09e36310bdbdb8a98501c992f0ca614eae9d6 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 19 Jul 2021 15:45:36 +0200 Subject: [PATCH 320/909] omit MPI_ANY_SOURCE when scattering data and explicitly give the source rank, this makes the scatter algorithm more reliable (at least with with OpenMPI) --- src/io_scatter.F90 | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/src/io_scatter.F90 b/src/io_scatter.F90 index 663f8a4aa..e7e54e235 100644 --- a/src/io_scatter.F90 +++ b/src/io_scatter.F90 @@ -19,7 +19,7 @@ subroutine scatter_nod2D(arr2D_global, arr2D_local, root_rank, comm) integer :: tag = 0 integer :: mpi_precision = MPI_DOUBLE_PRECISION integer status(MPI_STATUS_SIZE) - integer :: n, sender_rank + integer :: remote_rank integer, allocatable :: remote_list_nod2d(:) real(real64), allocatable :: sendbuf(:) integer node_size @@ -28,20 +28,21 @@ subroutine scatter_nod2D(arr2D_global, arr2D_local, root_rank, comm) if(mype == root_rank) then arr2D_local = arr2D_global(mylist_nod2d) - do n = 1, npes-1 + do remote_rank = 0, npes-1 + if(remote_rank == root_rank) cycle + ! receive remote partition 2D size - call mpi_recv(node_size, 1, mpi_integer, MPI_ANY_SOURCE, tag+0, comm, status, mpierr) - sender_rank = status(mpi_source) + call mpi_recv(node_size, 1, mpi_integer, remote_rank, tag+0, comm, status, mpierr) ! receive remote mylist_nod2d allocate(remote_list_nod2d(node_size)) - call mpi_recv(remote_list_nod2d(1), node_size, mpi_integer, sender_rank, tag+1, comm, status, mpierr) + call mpi_recv(remote_list_nod2d(1), node_size, mpi_integer, remote_rank, tag+1, comm, status, mpierr) allocate(sendbuf(node_size)) sendbuf = arr2D_global(remote_list_nod2d) deallocate(remote_list_nod2d) - call mpi_send(sendbuf(1), node_size, mpi_precision, sender_rank, tag+2, comm, mpierr) + call mpi_send(sendbuf(1), node_size, mpi_precision, remote_rank, tag+2, comm, mpierr) deallocate(sendbuf) end do @@ -50,7 +51,7 @@ subroutine scatter_nod2D(arr2D_global, arr2D_local, root_rank, comm) call mpi_send(node_size, 1, mpi_integer, root_rank, tag+0, comm, mpierr) call mpi_send(mylist_nod2d(1), node_size, mpi_integer, root_rank, tag+1, comm, mpierr) - call mpi_recv(arr2D_local(1), node_size, mpi_precision, root_rank, tag+2, comm, status, mpierr) + call mpi_recv(arr2D_local(1), node_size, mpi_precision, root_rank, tag+2, comm, status, mpierr) ! aleph blocks here end if ! without a barrier, we get wrong results in arr2D_local @@ -72,7 +73,7 @@ subroutine scatter_elem2D(arr2D_global, arr2D_local, root_rank, comm) integer :: tag = 0 integer :: mpi_precision = MPI_DOUBLE_PRECISION integer status(MPI_STATUS_SIZE) - integer :: n, sender_rank + integer :: remote_rank integer, allocatable :: remote_list_elem2d(:) real(real64), allocatable :: sendbuf(:) integer elem_size @@ -82,20 +83,21 @@ subroutine scatter_elem2D(arr2D_global, arr2D_local, root_rank, comm) if(mype == root_rank) then arr2D_local = arr2D_global(myList_elem2D(1:elem_size)) - do n = 1, npes-1 + do remote_rank = 0, npes-1 + if(remote_rank == root_rank) cycle + ! receive remote partition 2D size - call mpi_recv(elem_size, 1, mpi_integer, MPI_ANY_SOURCE, tag+0, comm, status, mpierr) - sender_rank = status(mpi_source) + call mpi_recv(elem_size, 1, mpi_integer, remote_rank, tag+0, comm, status, mpierr) ! receive remote mylist_elem2d allocate(remote_list_elem2d(elem_size)) - call mpi_recv(remote_list_elem2d(1), elem_size, mpi_integer, sender_rank, tag+1, comm, status, mpierr) + call mpi_recv(remote_list_elem2d(1), elem_size, mpi_integer, remote_rank, tag+1, comm, status, mpierr) allocate(sendbuf(elem_size)) sendbuf = arr2D_global(remote_list_elem2d) deallocate(remote_list_elem2d) - call mpi_send(sendbuf(1), elem_size, mpi_precision, sender_rank, tag+2, comm, mpierr) + call mpi_send(sendbuf(1), elem_size, mpi_precision, remote_rank, tag+2, comm, mpierr) deallocate(sendbuf) end do From 7048007331c27f982ecef503fef20007d533498f Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 27 Jul 2021 14:54:29 +0200 Subject: [PATCH 321/909] group the write_raw_restart calls --- src/io_restart.F90 | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index fe5cb5007..eedbad1b9 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -186,8 +186,7 @@ subroutine restart(istep, l_read, mesh) if (use_ice) call read_restart(ice_path, ice_files) ! immediately create a raw restart if(raw_restart_length_unit /= "off") then - call write_raw_restart(oce_files, istep) - if(use_ice) call write_raw_restart(ice_files, istep) + call write_all_raw_restarts(istep) end if end if end if @@ -210,8 +209,7 @@ subroutine restart(istep, l_read, mesh) end if if(is_raw_restart_write) then - call write_raw_restart(oce_files, istep) - if(use_ice) call write_raw_restart(ice_files, istep) + call write_all_raw_restarts(istep) end if ! actualize clock file to latest restart point @@ -272,6 +270,15 @@ subroutine write_restart(path, filegroup, istep) end subroutine +subroutine write_all_raw_restarts(istep) + integer, intent(in):: istep + ! EO parameters + + call write_raw_restart(oce_files, istep) + if(use_ice) call write_raw_restart(ice_files, istep) +end subroutine + + subroutine write_raw_restart(filegroup, istep) type(restart_file_group), intent(inout) :: filegroup integer, intent(in):: istep From 1cddcecaa08a0aafed57bda18ffbae6573b3f5b3 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 27 Jul 2021 15:01:56 +0200 Subject: [PATCH 322/909] group the read_raw_restart calls --- src/io_restart.F90 | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index eedbad1b9..e2c4b3989 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -179,8 +179,7 @@ subroutine restart(istep, l_read, mesh) end if call MPI_Bcast(dumpfiles_exist, 1, MPI_LOGICAL, RAW_RESTART_METADATA_RANK, MPI_COMM_FESOM, MPIerr) if(dumpfiles_exist) then - call read_raw_restart(oce_files) - if(use_ice) call read_raw_restart(ice_files) + call read_all_raw_restarts() else call read_restart(oce_path, oce_files) if (use_ice) call read_restart(ice_path, ice_files) @@ -304,6 +303,12 @@ subroutine write_raw_restart(filegroup, istep) end subroutine +subroutine read_all_raw_restarts() + call read_raw_restart(oce_files) + if(use_ice) call read_raw_restart(ice_files) +end subroutine + + subroutine read_raw_restart(filegroup) type(restart_file_group), intent(inout) :: filegroup ! EO parameters From e541c9ca6a5107139bf3f4599f3dcd23bafe95f2 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 27 Jul 2021 15:09:02 +0200 Subject: [PATCH 323/909] fix typo in comment --- src/io_restart.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index e2c4b3989..87f3b688e 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -319,7 +319,7 @@ subroutine read_raw_restart(filegroup) integer status if(mype == RAW_RESTART_METADATA_RANK) then - ! store metadata about the raw restart + ! read metadata info for the raw restart open(newunit = fileunit, status = 'old', iostat = status, file = raw_restart_infopath) if(status == 0) then read(fileunit,*) rstep From 6117e7baeb97d7a5a43d51fb9fbfe9f95db202fd Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 27 Jul 2021 15:21:51 +0200 Subject: [PATCH 324/909] move writing/reading the raw restart info to the write/read all wrapper procedures --- src/io_restart.F90 | 46 +++++++++++++++++++++++----------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 87f3b688e..85bf08a24 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -272,24 +272,12 @@ subroutine write_restart(path, filegroup, istep) subroutine write_all_raw_restarts(istep) integer, intent(in):: istep ! EO parameters - - call write_raw_restart(oce_files, istep) - if(use_ice) call write_raw_restart(ice_files, istep) -end subroutine - - -subroutine write_raw_restart(filegroup, istep) - type(restart_file_group), intent(inout) :: filegroup - integer, intent(in):: istep - ! EO parameters - integer i integer cstep integer fileunit - - do i=1, filegroup%nfiles - call filegroup%files(i)%write_variables_raw(raw_restart_dirpath) - end do - + + call write_raw_restart(oce_files) + if(use_ice) call write_raw_restart(ice_files) + if(mype == RAW_RESTART_METADATA_RANK) then print *,"writing raw restart to "//raw_restart_dirpath ! store metadata about the raw restart @@ -303,16 +291,18 @@ subroutine write_raw_restart(filegroup, istep) end subroutine -subroutine read_all_raw_restarts() - call read_raw_restart(oce_files) - if(use_ice) call read_raw_restart(ice_files) -end subroutine - - -subroutine read_raw_restart(filegroup) +subroutine write_raw_restart(filegroup) type(restart_file_group), intent(inout) :: filegroup ! EO parameters integer i + + do i=1, filegroup%nfiles + call filegroup%files(i)%write_variables_raw(raw_restart_dirpath) + end do +end subroutine + + +subroutine read_all_raw_restarts() integer rstep real(kind=WP) rtime integer fileunit @@ -340,6 +330,16 @@ subroutine read_raw_restart(filegroup) end if ! sync globalstep with the other processes to let all processes writing portable restart files know the globalstep call MPI_Bcast(globalstep, 1, MPI_INTEGER, RAW_RESTART_METADATA_RANK, MPI_COMM_FESOM, MPIerr) + + call read_raw_restart(oce_files) + if(use_ice) call read_raw_restart(ice_files) +end subroutine + + +subroutine read_raw_restart(filegroup) + type(restart_file_group), intent(inout) :: filegroup + ! EO parameters + integer i do i=1, filegroup%nfiles call filegroup%files(i)%read_variables_raw(raw_restart_dirpath) From 20197d32fad10c37b2891003bfa2534277137d00 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 27 Jul 2021 15:24:19 +0200 Subject: [PATCH 325/909] rename procedures to write/read raw restarts for a file group --- src/io_restart.F90 | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 85bf08a24..fdbe13d4d 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -275,8 +275,8 @@ subroutine write_all_raw_restarts(istep) integer cstep integer fileunit - call write_raw_restart(oce_files) - if(use_ice) call write_raw_restart(ice_files) + call write_raw_restart_group(oce_files) + if(use_ice) call write_raw_restart_group(ice_files) if(mype == RAW_RESTART_METADATA_RANK) then print *,"writing raw restart to "//raw_restart_dirpath @@ -291,7 +291,7 @@ subroutine write_all_raw_restarts(istep) end subroutine -subroutine write_raw_restart(filegroup) +subroutine write_raw_restart_group(filegroup) type(restart_file_group), intent(inout) :: filegroup ! EO parameters integer i @@ -331,12 +331,12 @@ subroutine read_all_raw_restarts() ! sync globalstep with the other processes to let all processes writing portable restart files know the globalstep call MPI_Bcast(globalstep, 1, MPI_INTEGER, RAW_RESTART_METADATA_RANK, MPI_COMM_FESOM, MPIerr) - call read_raw_restart(oce_files) - if(use_ice) call read_raw_restart(ice_files) + call read_raw_restart_group(oce_files) + if(use_ice) call read_raw_restart_group(ice_files) end subroutine -subroutine read_raw_restart(filegroup) +subroutine read_raw_restart_group(filegroup) type(restart_file_group), intent(inout) :: filegroup ! EO parameters integer i From dee13a1751ef94d1b875c1d4905e60f016d0c92c Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 27 Jul 2021 15:45:23 +0200 Subject: [PATCH 326/909] write all raw restart variables to a single file per process --- src/io_fesom_file.F90 | 8 +++----- src/io_restart.F90 | 20 ++++++++++++++++---- 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index 45f3ac153..eadfea5e1 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -290,18 +290,16 @@ subroutine read_variables_raw(this, outdir) end subroutine - subroutine write_variables_raw(this, outdir) + subroutine write_variables_raw(this, fileunit) class(fesom_file_type), target :: this - character(len=*) outdir + integer, intent(in) :: fileunit ! EO parameters - integer i, fileunit + integer i type(var_info), pointer :: var do i=1, this%nvar_infos var => this%var_infos(i) - open(newunit = fileunit, file = outdir//'/'//var%varname//'_'//mpirank_to_txt()//'.dump', form = 'unformatted') write(fileunit) var%external_local_data_ptr ! directly use external_local_data_ptr, use the local_data_copy only when called asynchronously - close(fileunit) end do end subroutine diff --git a/src/io_restart.F90 b/src/io_restart.F90 index fdbe13d4d..e4a876981 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -275,8 +275,10 @@ subroutine write_all_raw_restarts(istep) integer cstep integer fileunit - call write_raw_restart_group(oce_files) - if(use_ice) call write_raw_restart_group(ice_files) + open(newunit = fileunit, file = raw_restart_dirpath//'/'//mpirank_to_txt()//'.dump', form = 'unformatted') + call write_raw_restart_group(oce_files, fileunit) + if(use_ice) call write_raw_restart_group(ice_files, fileunit) + close(fileunit) if(mype == RAW_RESTART_METADATA_RANK) then print *,"writing raw restart to "//raw_restart_dirpath @@ -291,13 +293,14 @@ subroutine write_all_raw_restarts(istep) end subroutine -subroutine write_raw_restart_group(filegroup) +subroutine write_raw_restart_group(filegroup, fileunit) type(restart_file_group), intent(inout) :: filegroup + integer, intent(in) :: fileunit ! EO parameters integer i do i=1, filegroup%nfiles - call filegroup%files(i)%write_variables_raw(raw_restart_dirpath) + call filegroup%files(i)%write_variables_raw(fileunit) end do end subroutine @@ -466,4 +469,13 @@ function is_due(unit, frequency, istep) result(d) end if end function + + function mpirank_to_txt() result(txt) + use g_PARSUP + use fortran_utils + character(:), allocatable :: txt + ! EO parameters + txt = int_to_txt_pad(mype,int(log10(real(npes)))+1) ! pad to the width of the number of processes + end function + end module From 957e40ff5049cc25aaa4bb15009a68e1b7bd3ba7 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 27 Jul 2021 15:57:00 +0200 Subject: [PATCH 327/909] read all raw restart variables from a single file per process --- src/io_fesom_file.F90 | 24 ++++-------------------- src/io_restart.F90 | 16 ++++++++++++---- 2 files changed, 16 insertions(+), 24 deletions(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index eadfea5e1..fc20832b1 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -268,24 +268,17 @@ subroutine gather_and_write_variables(this) end subroutine - subroutine read_variables_raw(this, outdir) + subroutine read_variables_raw(this, fileunit) class(fesom_file_type), target :: this - character(len=*), intent(in) :: outdir + integer, intent(in) :: fileunit ! EO parameters - integer i, fileunit + integer i type(var_info), pointer :: var integer status do i=1, this%nvar_infos var => this%var_infos(i) - open(newunit = fileunit, status = 'old', iostat = status, file = outdir//'/'//var%varname//'_'//mpirank_to_txt()//'.dump', form = 'unformatted') - if(status == 0) then - read(fileunit) var%external_local_data_ptr ! directly use external_local_data_ptr, use the local_data_copy only when called asynchronously - close(fileunit) - else - print *,"can not open ",outdir//'/'//var%varname//'_'//mpirank_to_txt()//'.dump' - stop 1 - end if + read(fileunit) var%external_local_data_ptr ! directly use external_local_data_ptr, use the local_data_copy only when called asynchronously end do end subroutine @@ -512,15 +505,6 @@ subroutine close_file(this) end subroutine - function mpirank_to_txt() result(txt) - use g_PARSUP - use fortran_utils - character(:), allocatable :: txt - ! EO parameters - txt = int_to_txt_pad(mype,int(log10(real(npes)))+1) ! pad to the width of the number of processes - end function - - subroutine assert(val, line) logical, intent(in) :: val integer, intent(in) :: line diff --git a/src/io_restart.F90 b/src/io_restart.F90 index e4a876981..145cd2862 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -334,18 +334,26 @@ subroutine read_all_raw_restarts() ! sync globalstep with the other processes to let all processes writing portable restart files know the globalstep call MPI_Bcast(globalstep, 1, MPI_INTEGER, RAW_RESTART_METADATA_RANK, MPI_COMM_FESOM, MPIerr) - call read_raw_restart_group(oce_files) - if(use_ice) call read_raw_restart_group(ice_files) + open(newunit = fileunit, status = 'old', iostat = status, file = raw_restart_dirpath//'/'//mpirank_to_txt()//'.dump', form = 'unformatted') + if(status == 0) then + call read_raw_restart_group(oce_files, fileunit) + if(use_ice) call read_raw_restart_group(ice_files, fileunit) + close(fileunit) + else + print *,"can not open ",raw_restart_dirpath//'/'//mpirank_to_txt()//'.dump' + stop 1 + end if end subroutine -subroutine read_raw_restart_group(filegroup) +subroutine read_raw_restart_group(filegroup, fileunit) type(restart_file_group), intent(inout) :: filegroup + integer, intent(in) :: fileunit ! EO parameters integer i do i=1, filegroup%nfiles - call filegroup%files(i)%read_variables_raw(raw_restart_dirpath) + call filegroup%files(i)%read_variables_raw(fileunit) end do end subroutine From b3dc1e6116fbd6abd864fcd79e2db1f584ab83fb Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 27 Jul 2021 15:58:53 +0200 Subject: [PATCH 328/909] write to the raw restart info whether we have oce only or oce+ice in the dump files --- src/io_restart.F90 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 145cd2862..66a81b221 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -288,6 +288,8 @@ subroutine write_all_raw_restarts(istep) write(fileunit, '(g0)') cstep write(fileunit, '(g0)') ctime write(fileunit, '(2(g0))') "! year: ",yearnew + write(fileunit, '(g0)') "! oce" + if(use_ice) write(fileunit, '(g0)') "! ice" close(fileunit) end if end subroutine From 8f027448996563fa196054d06cf8914f04f97681 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 28 Jul 2021 11:56:23 +0200 Subject: [PATCH 329/909] fix typo in console output --- src/io_restart.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 66a81b221..7d0f1b3eb 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -404,7 +404,7 @@ subroutine read_restart(path, filegroup) #ifndef DISABLE_PARALLEL_RESTART_READ write(*,*) 'reading restart PARALLEL for ', filegroup%files(i)%varname, ' at ', filegroup%files(i)%path #else - write(*,*) 'reading restart SEQIENTIAL for ', filegroup%files(i)%varname, ' at ', filegroup%files(i)%path + write(*,*) 'reading restart SEQUENTIAL for ', filegroup%files(i)%varname, ' at ', filegroup%files(i)%path #endif call filegroup%files(i)%open_read(filegroup%files(i)%path) ! do we need to bother with read-only access? ! todo: print a reasonable error message if the file does not exist From 50e6303b40ae406a678b11eea5015468ce14f044 Mon Sep 17 00:00:00 2001 From: JanStreffing Date: Wed, 28 Jul 2021 13:08:25 +0200 Subject: [PATCH 330/909] ice albedo again controllable from namelist, adding chunking back in io_meandata --- src/ice_thermo_cpl.F90 | 13 +++++++------ src/io_meandata.F90 | 6 +++--- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index c3f6e7234..8995e3b9e 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -515,26 +515,27 @@ subroutine ice_albedo(h,hsn,t,alb,geolat) real(kind=WP) :: t real(kind=WP) :: alb real(kind=WP) :: geolat + real(kind=WP) :: melt_pool_alb_reduction ! set albedo ! ice and snow, freezing and melting conditions are distinguished if (geolat.gt.0.) then !SH does not have melt ponds - albsnm = 0.79_WP + melt_pool_alb_reduction = 0.0_WP else - albsnm = 0.7_WP + melt_pool_alb_reduction = 0.12_WP endif if (h>0.0_WP) then if (t<273.15_WP) then ! freezing condition - if (hsn.gt.0.0_WP) then ! snow cover present + if (hsn.gt.0.001_WP) then ! snow cover present alb=albsn else ! no snow cover alb=albi endif else ! melting condition - if (hsn.gt.0.0_WP) then ! snow cover present - alb=albsnm + if (hsn.gt.0.001_WP) then ! snow cover present + alb=albsnm-melt_pool_alb_reduction else ! no snow cover - alb=albim + alb=albim-melt_pool_alb_reduction endif endif else diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 84f4211d4..17141883c 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -618,9 +618,9 @@ subroutine create_new_file(entry, mesh) call assert_nf( nf_def_var(entry%ncid, trim(entry%name), entry%data_strategy%netcdf_type(), entry%ndim+1, & (/entry%dimid(1:entry%ndim), entry%recID/), entry%varID), __LINE__) - !if (entry%ndim==2) then - ! call assert_nf( nf_def_var_chunking(entry%ncid, entry%varID, NF_CHUNKED, (/1, entry%glsize(1)/)), __LINE__); - !end if + if (entry%ndim==2) then + call assert_nf( nf_def_var_chunking(entry%ncid, entry%varID, NF_CHUNKED, (/1, entry%glsize(1)/)), __LINE__); + end if call assert_nf( nf_put_att_text(entry%ncid, entry%varID, 'description', len_trim(entry%description), entry%description), __LINE__) call assert_nf( nf_put_att_text(entry%ncid, entry%varID, 'long_name', len_trim(entry%description), entry%description), __LINE__) call assert_nf( nf_put_att_text(entry%ncid, entry%varID, 'units', len_trim(entry%units), entry%units), __LINE__) From 6b0511166c03b384561f265866f79e5709720fa8 Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 18 May 2021 16:05:36 +0200 Subject: [PATCH 331/909] include non local fluxes from KPP --- src/oce_ale_mixing_kpp.F90 | 17 ++- src/oce_ale_tracer.F90 | 278 +++++++++++++++++++++++++------------ src/oce_modules.F90 | 7 +- 3 files changed, 209 insertions(+), 93 deletions(-) diff --git a/src/oce_ale_mixing_kpp.F90 b/src/oce_ale_mixing_kpp.F90 index d08060e98..14b90ccfe 100755 --- a/src/oce_ale_mixing_kpp.F90 +++ b/src/oce_ale_mixing_kpp.F90 @@ -70,7 +70,9 @@ MODULE o_mixing_KPP_mod real(KIND=WP), dimension(0:nni+1,0:nnj+1) :: wst ! lookup table for ws, the turbulent velocity scale scalars logical :: smooth_blmc=.true. logical :: smooth_hbl =.false. - logical :: smooth_Ri =.false. + logical :: smooth_Ri_hor =.false. + logical :: smooth_Ri_ver =.false. + logical :: limit_hbl_ekmmob =.false. !.true. contains @@ -336,8 +338,10 @@ subroutine oce_mixing_KPP(viscAE, diffK, mesh) DO node=1, myDim_nod2D !+eDim_nod2D nzmin = ulevels_nod2D(node) - ustar(node) = sqrt( sqrt( stress_atmoce_x(node)**2 + stress_atmoce_y(node)**2 )*density_0_r ) ! @ the surface (eqn. 2) - +!!PS ustar(node) = sqrt( sqrt( stress_atmoce_x(node)**2 + stress_atmoce_y(node)**2 )*density_0_r ) ! @ the surface (eqn. 2) + + ustar(node) = sqrt( sqrt( stress_node_surf(1,node)**2 + stress_node_surf(2,node)**2 )*density_0_r ) ! @ the surface (eqn. 2) + ! Surface buoyancy forcing (eqns. A2c & A2d & A3b & A3d) !!PS Bo(node) = -g * ( sw_alpha(1,node) * heat_flux(node) / vcpw & !heat_flux & water_flux: positive up !!PS + sw_beta (1,node) * water_flux(node) * tr_arr(1,node,2)) @@ -397,7 +401,8 @@ subroutine oce_mixing_KPP(viscAE, diffK, mesh) ! only at the end should save some time call exchange_nod(diffK(:,:,1)) call exchange_nod(diffK(:,:,2)) - + call exchange_nod(ghats) + ! OVER ELEMENTS call exchange_nod(viscA) !Warning: don't forget to communicate before averaging on elements!!! minmix=3.0e-3_WP @@ -768,7 +773,7 @@ subroutine ri_iwmix(viscA, diffK, mesh) ! smooth Richardson number in the vertical using a 1-2-1 filter !!PS IF(smooth_richardson_number .and. nlevels_nod2d(node)>2) then - IF(smooth_richardson_number .and. nzmax>2) then + IF(smooth_Ri_ver .and. nzmax>2) then DO mr=1,num_smoothings ri_prev = 0.25_WP * diffK(1, node, 1) !!PS DO nz=2,nlevels_nod2d(node)-1 @@ -781,7 +786,7 @@ subroutine ri_iwmix(viscA, diffK, mesh) END IF END DO - if (smooth_Ri) then + if (smooth_Ri_hor) then call smooth_nod(diffK(:,:,1), 3, mesh) end if diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 06b2ad8b4..ee689cb26 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -400,7 +400,8 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) use g_PARSUP use g_CONFIG use g_forcing_arrays - use o_mixing_KPP_mod !for ghats _GO_ + use o_mixing_KPP_mod !for ghats _GO_ + use g_cvmix_kpp, only: kpp_nonlcltranspT, kpp_nonlcltranspS, kpp_oblmixc use bc_surface_interface implicit none @@ -425,36 +426,80 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) Ty1 =0.0_WP ! solve equation diffusion equation implicite part: - ! --> h^(n+0.5)* (T^(n+0.5)-Tstar) = dt*( K_33*d/dz*(T^(n+0.5)-Tstar) + K_33*d/dz*Tstar ) - ! --> dTnew = T^(n+0.5)-Tstar - ! --> h^(n+0.5)* (dTnew) = dt*(K_33*d/dz*dTnew) + K_33*dt*d/dz*Tstar - ! --> h^(n+0.5)* (dTnew) = dt*(K_33*d/dz*dTnew) + RHS - ! --> solve for dT_new - ! - ! ----------- zbar_1, V_1 (Skalar Volume), A_1 (Area of edge), no Cavity A1==V1, yes Cavity A1 !=V1 - ! Z_1 o T_1 - ! ----------- zbar_2, V_2 - ! Z_2 o T_2 - ! ----------- zbar_3, V_3 - ! Z_3 o T_3 - ! ----------- zbar_4 - ! : - ! --> Difference Quotient at Volume _2: ddTnew_2/dt + d/dz*K_33 d/dz*dTnew_2 = 0 --> homogene solution - ! V2*dTnew_2 *h^(n+0.5) = -dt * [ (dTnew_1-dTnew_2)/(Z_1-Z_2)*A_2 + (dTnew_2-dTnew_3)/(Z_2-Z_3)*A_3 ] + RHS - ! dTnew_2 *h^(n+0.5) = -dt * [ (dTnew_1-dTnew_2)/(Z_1-Z_2)*A_2/V_2 + (dTnew_2-dTnew_3)/(Z_2-Z_3)*A_3/V_2 ] + RHS - ! | | - ! v v - ! diffusive flux towards diffusive flux towards - ! T_2 trough boundary V2 T_2 trough boundary V3 - ! - ! --> solve coefficents for homogene part - ! dTnew_2 *h^(n+0.5) = -dt * [ a*dTnew_1 + b*dTnew_2 + c*dTnew_3 ] + ! --> h^(n+0.5)* (T^(n+0.5)-Tstar) = dt*( K_33*d/dz*(T^(n+0.5)-Tstar) + K_33*d/dz*Tstar ) + ! --> Tnew = T^(n+0.5)-Tstar + ! --> h^(n+0.5)* (Tnew) = dt*(K_33*d/dz*Tnew) + K_33*dt*d/dz*Tstar + ! --> h^(n+0.5)* (Tnew) = dt*(K_33*d/dz*Tnew) + RHS + ! --> solve for T_new + ! --> V_1 (Skalar Volume), A_1 (Area of edge), . + ! no Cavity A1==V1, yes Cavity A1 !=V1 /I\ nvec_up (+1) + ! I + ! ----------- zbar_1, A_1 *----I----* + ! Z_1 o T_1, V1 |\ I ./| + ! ----------- zbar_2, A_2 | \ ./ | Gaus Theorem: + ! Z_2 o T_2, V2 | \ / | --> Flux form + ! ----------- zbar_3, A_3 | | | --> normal vec outwards facing + ! Z_3 o T_3, V3 *---|-----* + ! ----------- zbar_4 \ | I ./ + ! : \ | I/ + ! \|/I + ! * I + ! \I/ + ! * nvec_dwn (-1) + ! --> 1st. solve homogenouse part: + ! f(Tnew) = h^(n+0.5)* (Tnew) - dt*(K_33*dTnew/dz) = 0 ! - ! --> a = -dt*K_33/(Z_1-Z_2)*A_2/V_2 + ! --> 2nd. Difference Quotient at Tnew_i in flux form (Gaus Theorem, dont forget normal vectors!!!): + ! V_i*Tnew_i *h_i = -dt * [ K_33 * (Tnew_i-1 - Tnew_i)/(Z_i-1 - Z_i) * A_i * nvec_up + ! +K_33 * (Tnew_i - Tnew_i+1)/(Z_i - Z_i+1) * A_i+1 * nvec_dwn ] + ! Tnew_i *h_i = -dt * [ K_33 * (Tnew_i-1 - Tnew_i)/(Z_i-1 - Z_i) * A_i /V_i * nvec_up + ! +K_33 * (Tnew_i - Tnew_i+1)/(Z_i - Z_i+1) * A_i+1/V_i * nvec_dwn ] + ! + ! --> 3rd. solve for coefficents a, b, c: + ! f(Tnew) = [ a*dTnew_i-1 + b*dTnew_i + c*dTnew_i+1 ] + ! + ! df(Tnew)/dTnew_i-1 = a = -dt*K_33/(Z_i-1 - Z_i) * A_i/V_i * (nvec_up =1) + ! + ! df(Tnew)/dTnew_i+1 = c = dt * K_33 * 1/(Z_i - Z_i+1) * A_i+1/V_i * (nvec_dwn=-1) + ! = -dt * K_33 * 1/(Z_i - Z_i+1) * A_i+1/V_i + ! + ! df(Tnew)/dTnew_i = b = h_i + dt*K_33/(Z_i-1 - Z_i) * A_i/V_i * (nvec_up=+1) + ! - dt*K_33/(Z_i - Z_i+1) * A_i+1/V_i * (nvec_dwn=-1) + ! = h_i + dt*K_33/(Z_i-1 - Z_i) * A_i/V_i + ! + dt*K_33/(Z_i - Z_i+1) * A_i+1/V_i + ! = h_i -(a+c) + ! + ! --> 4th. solve inhomogenous part: + ! [ a*dTnew_i-1 + b*dTnew_i + c*dTnew_i+1 ] = RHS/V_i + ! + ! RHS = K_33*dt*d/dz*Tstar + ! + ! --> write as Difference Quotient in flux form + ! RHS/V_i = K_33 * dt * (Tstar_i-1 - Tstar_i)/(Z_i-1 - Z_i) * A_i/V_i * (nvec_up=1) + ! + K_33 * dt * (Tstar_i - Tstar_i+1)/(Z_i - Z_i+1) * A_i+1/V_i * (nvec_dwn=-1) + ! + ! = K_33*dt/(Z_i-1 - Z_i) * A_i/V_i * Tstar_i-1 + ! - K_33*dt/(Z_i-1 - Z_i) * A_i/V_i * Tstar_i + ! - K_33*dt/(Z_i - Z_i+1) * A_i+1/V_i * Tstar_i + ! + K_33*dt/(Z_i - Z_i+1) * A_i+1/V_i * Tstar_i+1 + ! + ! = -a*Tstar_i-1 + (a+c)*Tstar_i - c * Tstar_i+1 + ! |-> b = h_i - (a+c), a+c = h_i-b + ! + ! = -a*Tstar_i-1 - (b-h_i)*Tstar_i - c * Tstar_i+1 + ! + ! --> 5th. solve for Tnew_i --> forward sweep algorithm --> see lower + ! | b_1 c_1 ... | |dTnew_1| + ! | a_2 b_2 c_2 ... | |dTnew_2| + ! | a_3 b_3 c_3 ... | * |dTnew_3| = RHS/V_i + ! | a_4 b_4 c_4 ...| |dTnew_4| + ! | : | | : | + ! + ! --> a = -dt*K_33 / (Z_i-1 - Z_i) * A_i/V_i ! - ! --> c = -dt*K_33/(Z_2-Z_3)*A_3/V_2 + ! --> c = -dt*K_33 / (Z_i - Z_i+1) * A_i+1/V_i ! - ! --> b = h^(n+0.5) -[ dt*K_33/(Z_1-Z_2)*A_2/V_2 + dt*K_33/(Z_2-Z_3)*A_3/V_2 ] = -(a+c) + h^(n+0.5) + ! --> b = h^(n+0.5) -[ dt*K_33/(Z_i-1 - Z_i)*A_i/V_i + dt*K_33/(Z_i - Z_i+1) * A_i+1/V_i ] = -(a+c) + h^(n+0.5) !___________________________________________________________________________ ! loop over local nodes @@ -480,20 +525,16 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) ! Be carefull here vertical operation have to be done on NEW vertical mesh !!! zbar_n=0.0_WP Z_n=0.0_WP -! zbar_n(nzmax)=zbar(nzmax) zbar_n(nzmax)=zbar_n_bot(n) Z_n(nzmax-1)=zbar_n(nzmax) + hnode_new(nzmax-1,n)/2.0_WP - !!PS do nz=nzmax-1,2,-1 do nz=nzmax-1,nzmin+1,-1 zbar_n(nz) = zbar_n(nz+1) + hnode_new(nz,n) Z_n(nz-1) = zbar_n(nz) + hnode_new(nz-1,n)/2.0_WP end do - !!PS zbar_n(1) = zbar_n(2) + hnode_new(1,n) zbar_n(nzmin) = zbar_n(nzmin+1) + hnode_new(nzmin,n) !_______________________________________________________________________ ! Regular part of coefficients: --> surface layer - !!PS nz=1 nz=nzmin ! 1/dz(nz) @@ -507,25 +548,20 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) ! layer dependent coefficients for for solving dT(1)/dt+d/dz*K_33*d/dz*T(1) = ... a(nz)=0.0_WP - !!PS c(nz)=-(Kv(2,n)+Ty1)*zinv2*zinv*area(nz+1,n)/area(nz,n) - c(nz)=-(Kv(nz+1,n)+Ty1)*zinv2*zinv*area(nz+1,n)/areasvol(nz,n) + c(nz)=-(Kv(nz+1,n)+Ty1)*zinv2*zinv * (area(nz+1,n)/areasvol(nz,n)) b(nz)=-c(nz)+hnode_new(nz,n) ! ale ! update from the vertical advection --> comes from splitting of vert ! velocity into explicite and implicite contribution if (do_wimpl) then - !!PS v_adv =zinv*area(nz+1,n)/areasvol(nz,n) - !!PS b(nz) =b(nz)+Wvel_i(nz, n)*zinv-min(0._WP, Wvel_i(nz+1, n))*v_adv - !!PS c(nz) =c(nz)-max(0._WP, Wvel_i(nz+1, n))*v_adv - !___________________________________________________________________ ! use brackets when computing ( area(nz ,n)/areasvol(nz,n) ) for ! numerical reasons, to gurante that area/areasvol in case of no ! cavity is ==1.0_WP - v_adv =zinv* ( area(nz ,n)/areasvol(nz,n) ) + v_adv =zinv * ( area(nz ,n)/areasvol(nz,n) ) b(nz) =b(nz)+Wvel_i(nz, n)*v_adv - v_adv =zinv*area(nz+1,n)/areasvol(nz,n) + v_adv =zinv * ( area(nz+1,n)/areasvol(nz,n) ) b(nz) =b(nz)-min(0._WP, Wvel_i(nz+1, n))*v_adv c(nz) =c(nz)-max(0._WP, Wvel_i(nz+1, n))*v_adv end if @@ -534,7 +570,6 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) !_______________________________________________________________________ ! Regular part of coefficients: --> 2nd...nl-2 layer - !!PS do nz=2, nzmax-2 do nz=nzmin+1, nzmax-2 ! 1/dz(nz) @@ -552,8 +587,8 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) ! use brackets when computing ( area(nz ,n)/areasvol(nz,n) ) for ! numerical reasons, to gurante that area/areasvol in case of no ! cavity is ==1.0_WP - a(nz)=-(Kv(nz,n) +Ty )*zinv1*zinv* ( area(nz ,n)/areasvol(nz,n) ) - c(nz)=-(Kv(nz+1,n)+Ty1)*zinv2*zinv*area(nz+1,n)/areasvol(nz,n) + a(nz)=-(Kv(nz,n) +Ty )*zinv1*zinv * ( area(nz ,n)/areasvol(nz,n) ) + c(nz)=-(Kv(nz+1,n)+Ty1)*zinv2*zinv * ( area(nz+1,n)/areasvol(nz,n) ) b(nz)=-a(nz)-c(nz)+hnode_new(nz,n) ! backup zinv2 for next depth level @@ -565,15 +600,15 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) ! use brackets when computing ( area(nz ,n)/areasvol(nz,n) ) for ! numerical reasons, to gurante that area/areasvol in case of no ! cavity is ==1.0_WP - v_adv=zinv* ( area(nz ,n)/areasvol(nz,n) ) + v_adv=zinv * ( area(nz ,n)/areasvol(nz,n) ) a(nz)=a(nz)+min(0._WP, Wvel_i(nz, n))*v_adv b(nz)=b(nz)+max(0._WP, Wvel_i(nz, n))*v_adv !!PS v_adv=v_adv*areasvol(nz+1,n)/areasvol(nz,n) - v_adv=zinv*area(nz+1,n)/areasvol(nz,n) + v_adv=zinv * ( area(nz+1,n)/areasvol(nz,n) ) b(nz)=b(nz)-min(0._WP, Wvel_i(nz+1, n))*v_adv c(nz)=c(nz)-max(0._WP, Wvel_i(nz+1, n))*v_adv end if - end do ! --> do nz=2, nzmax-2 + end do ! --> do nz=nzmin+1, nzmax-2 !_______________________________________________________________________ ! Regular part of coefficients: --> nl-1 layer @@ -582,8 +617,8 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) zinv=1.0_WP*dt ! no ... /(zbar(nzmax-1)-zbar(nzmax)) because of ale ! calculate isoneutral diffusivity : Kd*s^2 --> K_33 = Kv + Kd*s^2 - Ty= (Z_n(nz-1)-zbar_n(nz)) *zinv1 *slope_tapered(3,nz-1,n)**2*Ki(nz-1,n) + & - (zbar_n(nz)-Z_n(nz)) *zinv1 *slope_tapered(3,nz,n)**2 *Ki(nz,n) + Ty= (Z_n(nz-1) -zbar_n(nz)) * zinv1 * slope_tapered(3,nz-1,n)**2 * Ki(nz-1,n) + & + (zbar_n(nz)-Z_n(nz) ) * zinv1 * slope_tapered(3,nz ,n)**2 * Ki(nz,n) Ty =Ty *isredi ! layer dependent coefficients for for solving dT(nz)/dt+d/dz*K_33*d/dz*T(nz) = ... @@ -616,56 +651,134 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) ! ! -+--> tr(1) =(a(1)+c(1))*tr_arr(1,n,tr_num)-c(1)*tr_arr(2,n,tr_num) ! |--> a(1)=0 - !!PS nz=1 nz=nzmin dz=hnode_new(nz,n) ! It would be (zbar(nz)-zbar(nz+1)) if not ALE tr(nz)=-(b(nz)-dz)*tr_arr(nz,n,tr_num)-c(nz)*tr_arr(nz+1,n,tr_num) !tr(nz)=c(nz)*(tr_arr(nz,n,tr_num) - tr_arr(nz+1,n,tr_num)) - - ! ******************************************************************* - ! nonlocal transport to the rhs (only T and S currently) _GO_ - ! ******************************************************************* - ! rsss will be used later to compute: - ! 1. the virtual salinity flux - ! 2. the contribution from the nonlocal term in KPP for salinity - if (tr_num==2) then - rsss=ref_sss - if (ref_sss_local) rsss=tr_arr(1,n,2) - end if - - !!PS do nz=2,nzmax-2 do nz=nzmin+1,nzmax-2 dz=hnode_new(nz,n) - tr(nz)=-a(nz)*tr_arr(nz-1,n,tr_num)-(b(nz)-dz)*tr_arr(nz,n,tr_num)-c(nz)*tr_arr(nz+1,n,tr_num) - !tr(nz)=-a(nz)*tr_arr(nz-1,n,tr_num) & - ! -c(nz)*tr_arr(nz+1,n,tr_num) & - ! +(a(nz)+c(nz))*tr_arr(nz,n,tr_num) + tr(nz)= -a(nz) * tr_arr(nz-1,n,tr_num) & + -(b(nz)-dz)* tr_arr(nz,n,tr_num) & + -c(nz) * tr_arr(nz+1,n,tr_num) + !tr(nz)=-a(nz) * tr_arr(nz-1,n,tr_num) & + ! +(a(nz)+c(nz))* tr_arr(nz,n,tr_num) & + ! -c(nz) * tr_arr(nz+1,n,tr_num) - ! ******************************************************************* - ! nonlocal transport to the rhs (only T and S currently) _GO_ - ! ******************************************************************* -!leads to non conservation in 8th digit. needs to be checked! -! if (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) then -! if (tr_num==1) then ! T -! tr(nz)=tr(nz)+(MIN(ghats(nz,n)*Kv(nz,n), 1.0_WP)-MIN(ghats(nz+1,n)*Kv(nz+1,n), 1.0_WP)*area(nz+1,n)/area(nz,n))*heat_flux(n)/vcpw -! elseif (tr_num==2) then ! S -! tr(nz)=tr(nz)-(MIN(ghats(nz,n)*Kv(nz,n), 1.0_WP)-MIN(ghats(nz+1,n)*Kv(nz+1,n), 1.0_WP)*area(nz+1,n)/area(nz,n))*rsss*water_flux(n) -! end if -! end if end do + nz=nzmax-1 dz=hnode_new(nz,n) tr(nz)=-a(nz)*tr_arr(nz-1,n,tr_num)-(b(nz)-dz)*tr_arr(nz,n,tr_num) !tr(nz)=-a(nz)*tr_arr(nz-1,n,tr_num)+a(nz)*tr_arr(nz,n,tr_num) + !_______________________________________________________________________ + ! Add KPP nonlocal fluxes to the rhs (only T and S currently) + ! use here blmc or kpp_oblmixc instead of Kv, since Kv already contains + ! at this point the mixing enhancments from momix, instable + ! mixing or windmixing which are to much for nonlocal + ! transports and lead to instability of the model + if (use_kpp_nonlclflx) then + if (tr_num==2) then + rsss=ref_sss + if (ref_sss_local) rsss=tr_arr(1,n,2) + end if + + !___________________________________________________________________ + ! use fesom1.4 KPP + if (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) then + if (tr_num==1) then ! temperature + ! --> no fluxes to the top out of the surface, no fluxes + ! downwards out of the bottom + !___surface_________________________________________________ + nz = nzmin + tr(nz)=tr(nz) & + +(-MIN(ghats(nz+1,n)*blmc(nz+1,n,2), 1.0_WP)*(area(nz+1,n)/areasvol(nz,n)) & + ) * heat_flux(n) / vcpw * dt + !___bulk____________________________________________________ + do nz=nzmin+1, nzmax-2 + tr(nz)=tr(nz) & + +( MIN(ghats(nz ,n)*blmc(nz ,n,2), 1.0_WP)*(area(nz ,n)/areasvol(nz,n)) & + -MIN(ghats(nz+1,n)*blmc(nz+1,n,2), 1.0_WP)*(area(nz+1,n)/areasvol(nz,n)) & + ) * heat_flux(n) / vcpw * dt + end do + !___bottom__________________________________________________ + nz = nzmax-1 + tr(nz)=tr(nz) & + +( MIN(ghats(nz ,n)*blmc(nz ,n,2), 1.0_WP)*(area(nz ,n)/areasvol(nz,n)) & + ) * heat_flux(n) / vcpw * dt + + elseif (tr_num==2) then ! salinity + ! --> no fluxes to the top out of the surface, no fluxes + ! downwards out of the bottom + !___surface_________________________________________________ + nz = nzmin + tr(nz)=tr(nz) & + -(-MIN(ghats(nz+1,n)*blmc(nz+1,n,3), 1.0_WP)*(area(nz+1,n)/areasvol(nz,n)) & + ) * rsss * water_flux(n) * dt + !___bulk____________________________________________________ + do nz=nzmin+1, nzmax-2 + tr(nz)=tr(nz) & + -( MIN(ghats(nz ,n)*blmc(nz ,n,3), 1.0_WP)*(area(nz ,n)/areasvol(nz,n)) & + -MIN(ghats(nz+1,n)*blmc(nz+1,n,3), 1.0_WP)*(area(nz+1,n)/areasvol(nz,n)) & + ) * rsss * water_flux(n) * dt + end do + !___bottom__________________________________________________ + nz = nzmax-1 + tr(nz)=tr(nz) & + -( MIN(ghats(nz ,n)*blmc(nz ,n,3), 1.0_WP)*(area(nz ,n)/areasvol(nz,n)) & + ) * rsss * water_flux(n) * dt + end if + !___________________________________________________________________ + ! use cvmix KPP + elseif (mix_scheme_nmb==3 .or. mix_scheme_nmb==37) then + if (tr_num==1) then ! temperature + !___surface_________________________________________________ + nz = nzmin + tr(nz)=tr(nz) & + +(-MIN(kpp_nonlcltranspT(nz+1,n)*kpp_oblmixc(nz+1,n,2), 1.0_WP)*(area(nz+1,n)/areasvol(nz,n)) & + ) * heat_flux(n) / vcpw * dt + !___bulk____________________________________________________ + do nz=nzmin+1, nzmax-2 + tr(nz)=tr(nz) & + +( MIN(kpp_nonlcltranspT(nz ,n)*kpp_oblmixc(nz ,n,2), 1.0_WP)*(area(nz ,n)/areasvol(nz,n)) & + -MIN(kpp_nonlcltranspT(nz+1,n)*kpp_oblmixc(nz+1,n,2), 1.0_WP)*(area(nz+1,n)/areasvol(nz,n)) & + ) * heat_flux(n) / vcpw * dt + end do + !___bottom__________________________________________________ + nz = nzmax-1 + tr(nz)=tr(nz) & + +( MIN(kpp_nonlcltranspT(nz ,n)*kpp_oblmixc(nz ,n,2), 1.0_WP)*(area(nz ,n)/areasvol(nz,n)) & + ) * heat_flux(n) / vcpw * dt + + elseif (tr_num==2) then ! salinity + !___surface_________________________________________________ + nz = nzmin + tr(nz)=tr(nz) & + -(-MIN(kpp_nonlcltranspS(nz+1,n)*kpp_oblmixc(nz+1,n,3), 1.0_WP)*(area(nz+1,n)/areasvol(nz,n)) & + ) * rsss * water_flux(n) * dt + !___bulk____________________________________________________ + do nz=nzmin+1, nzmax-2 + tr(nz)=tr(nz) & + -( MIN(kpp_nonlcltranspS(nz ,n)*kpp_oblmixc(nz ,n,3), 1.0_WP)*(area(nz ,n)/areasvol(nz,n)) & + -MIN(kpp_nonlcltranspS(nz+1,n)*kpp_oblmixc(nz+1,n,3), 1.0_WP)*(area(nz+1,n)/areasvol(nz,n)) & + ) * rsss * water_flux(n) * dt + end do + !___bottom__________________________________________________ + nz = nzmax-1 + tr(nz)=tr(nz) & + -( MIN(kpp_nonlcltranspS(nz ,n)*kpp_oblmixc(nz ,n,3), 1.0_WP)*(area(nz ,n)/areasvol(nz,n)) & + ) * rsss * water_flux(n) * dt + end if + end if + end if ! --> if (use_kpp_nonlclflx) then + !_______________________________________________________________________ ! case of activated shortwave penetration into the ocean, ad 3d contribution if (use_sw_pene .and. tr_num==1) then - !!PS do nz=1, nzmax-1 do nz=nzmin, nzmax-1 zinv=1.0_WP*dt !/(zbar(nz)-zbar(nz+1)) ale! - tr(nz)=tr(nz)+(sw_3d(nz, n)-sw_3d(nz+1, n)*area(nz+1,n)/areasvol(nz,n))*zinv + tr(nz)=tr(nz)+(sw_3d(nz, n)-sw_3d(nz+1, n) * ( area(nz+1,n)/areasvol(nz,n)) ) * zinv end do end if @@ -681,7 +794,6 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) ! (BUT CHECK!) | | | | ! v (+) v (+) ! - !!PS tr(1)= tr(1)+bc_surface(n, tracer_id(tr_num)) tr(nzmin)= tr(nzmin)+bc_surface(n, tracer_id(tr_num),mesh) !_______________________________________________________________________ @@ -705,13 +817,10 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) ! --> dTnew_i = rhs'_i-c'_i*dTnew_i+1 ; i = n-1,n-2,...,1 ! ! initialize c-prime and s,t-prime - !!PS cp(1) = c(1)/b(1) - !!PS tp(1) = tr(1)/b(1) cp(nzmin) = c(nzmin)/b(nzmin) tp(nzmin) = tr(nzmin)/b(nzmin) ! solve for vectors c-prime and t, s-prime - !!PS do nz = 2,nzmax-1 do nz = nzmin+1,nzmax-1 m = b(nz)-cp(nz-1)*a(nz) cp(nz) = c(nz)/m @@ -722,7 +831,6 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) tr(nzmax-1) = tp(nzmax-1) ! solve for x from the vectors c-prime and d-prime - !!PS do nz = nzmax-2, 1, -1 do nz = nzmax-2, nzmin, -1 tr(nz) = tp(nz)-cp(nz)*tr(nz+1) end do @@ -730,12 +838,10 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) !_______________________________________________________________________ ! update tracer ! tr ... dTnew = T^(n+0.5) - T* - !!PS do nz=1,nzmax-1 do nz=nzmin,nzmax-1 ! tr_arr - before ... T* tr_arr(nz,n,tr_num)=tr_arr(nz,n,tr_num)+tr(nz) ! tr_arr - after ... T^(n+0.5) = dTnew + T* = T^(n+0.5) - T* + T* - end do end do ! --> do n=1,myDim_nod2D diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index 75d40868c..d2e93315b 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -145,6 +145,10 @@ MODULE o_PARAM real(kind=WP) :: density_ref_T = 2.0_WP real(kind=WP) :: density_ref_S = 34.0_WP +!_______________________________________________________________________________ +! use k-profile nonlocal fluxes +logical :: use_kpp_nonlclflx = .false. + !_______________________________________________________________________________ ! *** active tracer cutoff logical :: limit_salinity=.true. !set an allowed range for salinity @@ -182,7 +186,8 @@ MODULE o_PARAM use_momix, momix_lat, momix_kv, & use_instabmix, instabmix_kv, & use_windmix, windmix_kv, windmix_nl, & - smooth_bh_tra, gamma0_tra, gamma1_tra, gamma2_tra + smooth_bh_tra, gamma0_tra, gamma1_tra, gamma2_tra, & + use_kpp_nonlclflx END MODULE o_PARAM !========================================================== From ee76e29f2f0fc160c71b9d51b3bcaf327357352c Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 18 May 2021 16:07:44 +0200 Subject: [PATCH 332/909] revert total back total surface stress so github testcase does not fail --- src/oce_ale_mixing_kpp.F90 | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/oce_ale_mixing_kpp.F90 b/src/oce_ale_mixing_kpp.F90 index 14b90ccfe..fd0d3335c 100755 --- a/src/oce_ale_mixing_kpp.F90 +++ b/src/oce_ale_mixing_kpp.F90 @@ -338,9 +338,8 @@ subroutine oce_mixing_KPP(viscAE, diffK, mesh) DO node=1, myDim_nod2D !+eDim_nod2D nzmin = ulevels_nod2D(node) -!!PS ustar(node) = sqrt( sqrt( stress_atmoce_x(node)**2 + stress_atmoce_y(node)**2 )*density_0_r ) ! @ the surface (eqn. 2) - - ustar(node) = sqrt( sqrt( stress_node_surf(1,node)**2 + stress_node_surf(2,node)**2 )*density_0_r ) ! @ the surface (eqn. 2) + ustar(node) = sqrt( sqrt( stress_atmoce_x(node)**2 + stress_atmoce_y(node)**2 )*density_0_r ) ! @ the surface (eqn. 2) +!!PS ustar(node) = sqrt( sqrt( stress_node_surf(1,node)**2 + stress_node_surf(2,node)**2 )*density_0_r ) ! @ the surface (eqn. 2) ! Surface buoyancy forcing (eqns. A2c & A2d & A3b & A3d) !!PS Bo(node) = -g * ( sw_alpha(1,node) * heat_flux(node) / vcpw & !heat_flux & water_flux: positive up From 3ce19489ca404338c4fa7c120c078d0b353b7a9d Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 19 May 2021 13:53:11 +0200 Subject: [PATCH 333/909] revert back some of the ( area(nz+1,n)/areasvol(nz,n) ) brackets for numerical reasons so that github testcase does not fail but with brackets would be most likely better. Add them add later point --- src/oce_ale_tracer.F90 | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index ee689cb26..fd4378cbd 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -548,7 +548,8 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) ! layer dependent coefficients for for solving dT(1)/dt+d/dz*K_33*d/dz*T(1) = ... a(nz)=0.0_WP - c(nz)=-(Kv(nz+1,n)+Ty1)*zinv2*zinv * (area(nz+1,n)/areasvol(nz,n)) + !!PS c(nz)=-(Kv(nz+1,n)+Ty1)*zinv2*zinv * (area(nz+1,n)/areasvol(nz,n)) + c(nz)=-(Kv(nz+1,n)+Ty1)*zinv2*zinv * area(nz+1,n)/areasvol(nz,n) b(nz)=-c(nz)+hnode_new(nz,n) ! ale ! update from the vertical advection --> comes from splitting of vert @@ -561,7 +562,8 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) v_adv =zinv * ( area(nz ,n)/areasvol(nz,n) ) b(nz) =b(nz)+Wvel_i(nz, n)*v_adv - v_adv =zinv * ( area(nz+1,n)/areasvol(nz,n) ) + !!PS v_adv =zinv * ( area(nz+1,n)/areasvol(nz,n) ) + v_adv =zinv * area(nz+1,n)/areasvol(nz,n) b(nz) =b(nz)-min(0._WP, Wvel_i(nz+1, n))*v_adv c(nz) =c(nz)-max(0._WP, Wvel_i(nz+1, n))*v_adv end if @@ -588,7 +590,8 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) ! numerical reasons, to gurante that area/areasvol in case of no ! cavity is ==1.0_WP a(nz)=-(Kv(nz,n) +Ty )*zinv1*zinv * ( area(nz ,n)/areasvol(nz,n) ) - c(nz)=-(Kv(nz+1,n)+Ty1)*zinv2*zinv * ( area(nz+1,n)/areasvol(nz,n) ) + !!PS c(nz)=-(Kv(nz+1,n)+Ty1)*zinv2*zinv * ( area(nz+1,n)/areasvol(nz,n) ) + c(nz)=-(Kv(nz+1,n)+Ty1)*zinv2*zinv * area(nz+1,n)/areasvol(nz,n) b(nz)=-a(nz)-c(nz)+hnode_new(nz,n) ! backup zinv2 for next depth level @@ -604,7 +607,8 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) a(nz)=a(nz)+min(0._WP, Wvel_i(nz, n))*v_adv b(nz)=b(nz)+max(0._WP, Wvel_i(nz, n))*v_adv !!PS v_adv=v_adv*areasvol(nz+1,n)/areasvol(nz,n) - v_adv=zinv * ( area(nz+1,n)/areasvol(nz,n) ) + !!PS v_adv=zinv * ( area(nz+1,n)/areasvol(nz,n) ) + v_adv=zinv * area(nz+1,n)/areasvol(nz,n) b(nz)=b(nz)-min(0._WP, Wvel_i(nz+1, n))*v_adv c(nz)=c(nz)-max(0._WP, Wvel_i(nz+1, n))*v_adv end if @@ -778,7 +782,8 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) if (use_sw_pene .and. tr_num==1) then do nz=nzmin, nzmax-1 zinv=1.0_WP*dt !/(zbar(nz)-zbar(nz+1)) ale! - tr(nz)=tr(nz)+(sw_3d(nz, n)-sw_3d(nz+1, n) * ( area(nz+1,n)/areasvol(nz,n)) ) * zinv + !!PS tr(nz)=tr(nz)+(sw_3d(nz, n)-sw_3d(nz+1, n) * ( area(nz+1,n)/areasvol(nz,n)) ) * zinv + tr(nz)=tr(nz)+(sw_3d(nz, n)-sw_3d(nz+1, n) * area(nz+1,n)/areasvol(nz,n)) * zinv end do end if From 461fb1b4903220f5b836adb2fb9abff4b674e9ea Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 28 Jun 2021 15:49:18 +0200 Subject: [PATCH 334/909] - do not create uninitialized copies of variables for threads via OpenMP - explicitly disable OpenMP compiling for the Cray ftn compiler --- src/CMakeLists.txt | 2 +- src/gen_modules_partitioning.F90 | 6 ------ 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index a04db6736..5a0417889 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -75,7 +75,7 @@ elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU ) target_compile_options(${PROJECT_NAME} PRIVATE -fallow-argument-mismatch) # gfortran v10 is strict about erroneous API calls: "Rank mismatch between actual argument at (1) and actual argument at (2) (scalar and rank-1)" endif() elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray ) - target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64) + target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64 -hnoomp) endif() target_include_directories(${PROJECT_NAME} PRIVATE ${NETCDF_Fortran_INCLUDE_DIRECTORIES} ${OASIS_Fortran_INCLUDE_DIRECTORIES}) target_include_directories(${PROJECT_NAME} PRIVATE ${MCT_Fortran_INCLUDE_DIRECTORIES} ${MPEU_Fortran_INCLUDE_DIRECTORIES}) diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index a914a9d51..770229964 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -72,12 +72,6 @@ module g_PARSUP integer, allocatable :: remPtr_elem2D(:), remList_elem2D(:) logical :: elem_full_flag -!$OMP threadprivate(com_nod2D,com_elem2D,com_elem2D_full) -!$OMP threadprivate(mype) -!$OMP threadprivate(myDim_nod2D, eDim_nod2D, myList_nod2D) -!$OMP threadprivate(myDim_elem2D, eDim_elem2D, eXDim_elem2D, myList_elem2D) -!$OMP threadprivate(myDim_edge2D, eDim_edge2D, myList_edge2D) - contains subroutine par_init ! initializes MPI From d9943489853bf51f511c944daa60aa650cb38c43 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 8 Jul 2021 15:08:21 +0200 Subject: [PATCH 335/909] give default values to uninitialized variables --- src/oce_ale_tracer.F90 | 3 +++ src/oce_ice_init_state.F90 | 4 ++++ src/oce_vel_rhs_vinv.F90 | 1 + 3 files changed, 8 insertions(+) diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index fd4378cbd..0df3b0eb6 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -342,6 +342,9 @@ subroutine diff_ver_part_expl_ale(tr_num, mesh) real(kind=WP) :: zinv1,Ty #include "associate_mesh.h" + + Ty = 0.0_WP + !___________________________________________________________________________ do n=1, myDim_nod2D nl1=nlevels_nod2D(n)-1 diff --git a/src/oce_ice_init_state.F90 b/src/oce_ice_init_state.F90 index ecb31fd31..a637515ad 100755 --- a/src/oce_ice_init_state.F90 +++ b/src/oce_ice_init_state.F90 @@ -372,6 +372,8 @@ subroutine init_fields_na_test(mesh) #include "associate_mesh.h" + c_status = .false. + ! =================== ! Fill the model fields with dummy values ! =================== @@ -478,6 +480,8 @@ subroutine init_fields_global_test(mesh) #include "associate_mesh.h" + c_status = .false. + ! =================== ! Fill the model fields with dummy values ! =================== diff --git a/src/oce_vel_rhs_vinv.F90 b/src/oce_vel_rhs_vinv.F90 index 849b5aea9..46881e065 100755 --- a/src/oce_vel_rhs_vinv.F90 +++ b/src/oce_vel_rhs_vinv.F90 @@ -120,6 +120,7 @@ subroutine compute_vel_rhs_vinv(mesh) !vector invariant real(kind=WP) :: density0_inv = 1./density_0 #include "associate_mesh.h" + w = 0.0_WP uvert=0.0_WP From 522d46c4931c17d124fc513878b16426a9675d7c Mon Sep 17 00:00:00 2001 From: suvarchal Date: Tue, 13 Jul 2021 03:48:11 +0200 Subject: [PATCH 336/909] fixes #152 by adding lon, lat to fesom.mesh.diag.nc --- src/io_mesh_info.F90 | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/src/io_mesh_info.F90 b/src/io_mesh_info.F90 index 551c063e7..7378332cc 100644 --- a/src/io_mesh_info.F90 +++ b/src/io_mesh_info.F90 @@ -4,6 +4,7 @@ module io_mesh_info use g_config use g_comm_auto use o_ARRAYS +use o_PARAM implicit none #include "netcdf.inc" @@ -50,6 +51,7 @@ subroutine write_mesh_info(mesh) integer :: zbar_e_bot_id,zbar_n_bot_id integer :: elem_id integer :: nod_id + integer :: lon_id, lat_id character(100) :: longname character(2000) :: filename real(kind=WP), allocatable :: rbuffer(:), lrbuffer(:) @@ -88,7 +90,10 @@ subroutine write_mesh_info(mesh) call my_def_var(ncid, 'nod_part', NF_INT, 1, (/nod_n_id/), nod_part_id, 'nodal partitioning at the cold start' ) call my_def_var(ncid, 'elem_part', NF_INT, 1, (/elem_n_id/), elem_part_id, 'element partitioning at the cold start') call my_def_var(ncid, 'zbar_e_bottom', NF_DOUBLE, 1, (/elem_n_id/), zbar_e_bot_id, 'element bottom depth') - call my_def_var(ncid, 'zbar_n_bottom', NF_DOUBLE, 1, (/nod_n_id/) , zbar_n_bot_id, 'nodal bottom depth') + call my_def_var(ncid, 'zbar_n_bottom', NF_DOUBLE, 1, (/nod_n_id/), zbar_n_bot_id, 'nodal bottom depth') + call my_def_var(ncid, 'lon', NF_DOUBLE, 1, (/nod_n_id/), lon_id, 'longitude') + call my_def_var(ncid, 'lat', NF_DOUBLE, 1, (/nod_n_id/), lat_id, 'latitude') + ! 2D call my_def_var(ncid, 'nod_area', NF_DOUBLE, 2, (/nod_n_id, nl_id/), nod_area_id, 'nodal areas' ) call my_def_var(ncid, 'elements', NF_INT, 2, (/elem_n_id, id_3/), elem_id, 'elements' ) @@ -185,7 +190,13 @@ subroutine write_mesh_info(mesh) allocate(rbuffer(nod2D)) do i=1, 2 call gather_nod(geo_coord_nod2D(i, 1:myDim_nod2D), rbuffer) + rbuffer = rbuffer/rad call my_put_vara(ncid, nod_id, (/1, i/), (/nod2D, 1/), rbuffer) + if (i == 1) then + call my_put_vara(ncid, lon_id, 1, nod2D, rbuffer) + else + call my_put_vara(ncid, lat_id, 1, nod2D, rbuffer) + endif end do deallocate(rbuffer) From c0b96bfdae9a9f33c8c17bbbcf951edcde570b08 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 22 Jul 2021 16:33:16 +0200 Subject: [PATCH 337/909] when no snow file is given, rewrite prec_rain and prec_snow array, so that freshwater balancing adds up --- config/namelist.forcing.ncep2 | 56 +++++++++++++++++++++++++++++++++++ src/ice_thermo_oce.F90 | 10 ++++++- 2 files changed, 65 insertions(+), 1 deletion(-) create mode 100644 config/namelist.forcing.ncep2 diff --git a/config/namelist.forcing.ncep2 b/config/namelist.forcing.ncep2 new file mode 100644 index 000000000..925dd1828 --- /dev/null +++ b/config/namelist.forcing.ncep2 @@ -0,0 +1,56 @@ +! This is the namelist file for forcing + +&forcing_exchange_coeff +Ce_atm_oce=0.00175 ! exchange coeff. of latent heat over open water +Ch_atm_oce=0.00175 ! exchange coeff. of sensible heat over open water +Cd_atm_oce=0.001 ! drag coefficient between atmosphere and water +Ce_atm_ice=0.00175 ! exchange coeff. of latent heat over ice +Ch_atm_ice=0.00175 ! exchange coeff. of sensible heat over ice +Cd_atm_ice=0.0012 ! drag coefficient between atmosphere and ice +Swind =0.0 ! parameterization for coupled current feedback +/ + +&forcing_bulk +AOMIP_drag_coeff=.false. +ncar_bulk_formulae=.true. +ncar_bulk_z_wind=10.0 ! height at which wind forcing is located (CORE, JRA-do: 10m, JRA, NCEP:2m) +ncar_bulk_z_tair=2.0 ! height at which temp forcing is located (CORE, JRA-do: 10m, JRA, NCEP:2m) +ncar_bulk_z_shum=2.0 ! height at which humi forcing is located (CORE, JRA-do: 10m, JRA, NCEP:2m) +/ + +&land_ice +use_landice_water=.false. +landice_start_mon=5 +landice_end_mon=10 +/ + +&nam_sbc + nm_xwind_file = '/work/ollie/clidyn/forcing/NCEP2/uwnd.10m.gauss.' ! name of file with winds, if nm_sbc=2 + nm_ywind_file = '/work/ollie/clidyn/forcing/NCEP2/vwnd.10m.gauss.' ! name of file with winds, if nm_sbc=2 + nm_humi_file = '/work/ollie/clidyn/forcing/NCEP2/shum.2m.gauss.' ! name of file with 2m specific humidity + nm_qsr_file = '/work/ollie/clidyn/forcing/NCEP2/dswrf.sfc.gauss.' ! name of file with solar heat + nm_qlw_file = '/work/ollie/clidyn/forcing/NCEP2/dlwrf.sfc.gauss.' ! name of file with Long wave + nm_tair_file = '/work/ollie/clidyn/forcing/NCEP2/air.2m.gauss.' ! name of file with 2m air temperature + nm_prec_file = '/work/ollie/clidyn/forcing/NCEP2/prate.sfc.gauss.' ! name of file with rain fall + nm_snow_file = '' ! name of file with snow fall + nm_mslp_file = '' ! air_pressure_at_sea_level + nm_xwind_var = 'uwnd' ! name of variable in file with wind + nm_ywind_var = 'vwnd' ! name of variable in file with wind + nm_humi_var = 'shum' ! name of variable in file with humidity + nm_qsr_var = 'dswrf' ! name of variable in file with solar heat + nm_qlw_var = 'dlwrf' ! name of variable in file with Long wave + nm_tair_var = 'air' ! name of variable in file with 2m air temperature + nm_prec_var = 'prate' ! name of variable in file with total precipitation + nm_snow_var = '' ! name of variable in file with total precipitation + nm_mslp_var = '' ! name of variable in file with air_pressure_at_sea_level + nm_nc_iyear = 1800 + nm_nc_imm = 1 ! initial month of time axis in netCDF + nm_nc_idd = 1 ! initial day of time axis in netCDF + nm_nc_freq = 24 ! data points per day (i.e. 86400 if the time axis is in seconds) + nm_nc_tmid = 0 ! 1 if the time stamps are given at the mid points of the netcdf file, 0 otherwise (i.e. 1 in CORE1, CORE2; 0 in JRA55) + l_xwind=.true., l_ywind=.true., l_humi=.true., l_qsr=.true., l_qlw=.true., l_tair=.true., l_prec=.true., l_mslp=.false., l_cloud=.false., l_snow=.false. + nm_runoff_file ='/work/ollie/clidyn/forcing/JRA55-do-v1.4.0/CORE2_runoff.nc' + runoff_data_source ='CORE2' !Dai09, CORE2, JRA55 + nm_sss_data_file ='/work/ollie/clidyn/forcing/JRA55-do-v1.4.0/PHC2_salx.nc' + sss_data_source ='CORE2' +/ diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index 9e12224dc..d4b6896b3 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -149,6 +149,7 @@ subroutine thermodynamics(mesh) snow=prec_rain(i) endif evap_in=evaporation(i) !evap_in: positive up +!!PS evap_in=0.0_WP else rain = prec_rain(i) snow = prec_snow(i) @@ -195,7 +196,7 @@ subroutine thermodynamics(mesh) net_heat_flux(i) = ehf !positive down evaporation(i) = evap !negative up ice_sublimation(i)= subli - + thdgr(i) = ithdgr thdgrsn(i) = ithdgrsn flice(i) = iflice @@ -206,6 +207,13 @@ subroutine thermodynamics(mesh) ! real salt flux due to salinity that is contained in the sea ice 4-5 psu real_salt_flux(i)= rsf !PS + ! if snow file is not given snow computed from prec_rain --> but prec_snow + ! array needs to be filled --> so that the freshwater balancing adds up + if (.not. l_snow) then + prec_rain(i) = rain + prec_snow(i) = snow + end if + end do deallocate(ustar_aux) end subroutine thermodynamics From 8205239a273ba536edf4148a7da9b28cabbc9d35 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 22 Jul 2021 16:35:08 +0200 Subject: [PATCH 338/909] add another flag to overwrite (switch off) the fleapyears calendar checking --- src/gen_modules_config.F90 | 3 ++- src/gen_surface_forcing.F90 | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/gen_modules_config.F90 b/src/gen_modules_config.F90 index fa4051421..f265ea898 100755 --- a/src/gen_modules_config.F90 +++ b/src/gen_modules_config.F90 @@ -88,7 +88,8 @@ module g_config !_____________________________________________________________________________ ! *** fleap_year *** logical :: include_fleapyear=.false. - namelist /calendar/ include_fleapyear + logical :: use_flpyrcheck =.true. + namelist /calendar/ include_fleapyear, use_flpyrcheck !_____________________________________________________________________________ ! *** machine *** diff --git a/src/gen_surface_forcing.F90 b/src/gen_surface_forcing.F90 index eb9b5eb5d..bfd057638 100644 --- a/src/gen_surface_forcing.F90 +++ b/src/gen_surface_forcing.F90 @@ -354,7 +354,7 @@ SUBROUTINE nc_readTimeGrid(flf) call check_nferr(iost,flf%file_name) ! digg for calendar attribute in time axis variable - if (mype==0) then + if (mype==0 .and. use_flpyrcheck) then iost = nf_inq_attlen(ncid, id_time,'calendar',aux_len) iost = nf_get_att(ncid, id_time,'calendar',aux_calendar) aux_calendar = aux_calendar(1:aux_len) From 0a9d58269053d759641998ebf290e4bb4ff97b64 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 22 Jul 2021 16:36:45 +0200 Subject: [PATCH 339/909] comment NaN checking in gen_support.F90, smooth_nod3d routine --- src/gen_support.F90 | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/gen_support.F90 b/src/gen_support.F90 index c420f3de3..114a4db92 100644 --- a/src/gen_support.F90 +++ b/src/gen_support.F90 @@ -126,15 +126,15 @@ subroutine smooth_nod3D(arr, N_smooth, mesh) nln = min(nlev,nlevels_nod2d(n)) DO nz=uln,nln arr(nz, n) = work_array(nz, n) *vol(nz,n) - if (arr(nz,n)/=arr(nz,n)) then - write(*,*) ' --> found NaN in smoothing' - write(*,*) ' mype = ', mype - write(*,*) ' n = ', n - write(*,*) ' nz,uln,nln = ', nz,uln,nln - write(*,*) ' arr(nz,n) = ', arr(nz,n) - write(*,*) ' work_array(nz,n)= ', work_array(nz,n) - write(*,*) ' vol(nz,n) = ', vol(nz,n) - endif +!!PS if (arr(nz,n)/=arr(nz,n)) then +!!PS write(*,*) ' --> found NaN in smoothing' +!!PS write(*,*) ' mype = ', mype +!!PS write(*,*) ' n = ', n +!!PS write(*,*) ' nz,uln,nln = ', nz,uln,nln +!!PS write(*,*) ' arr(nz,n) = ', arr(nz,n) +!!PS write(*,*) ' work_array(nz,n)= ', work_array(nz,n) +!!PS write(*,*) ' vol(nz,n) = ', vol(nz,n) +!!PS endif END DO end DO From 98a69cd63f4183ce54cc2cfcb7e501dee1aa5c90 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 22 Jul 2021 16:37:57 +0200 Subject: [PATCH 340/909] correct namelist.forcing.ncep2 to use NCEP 2 Reanalysis forcing --- config/namelist.forcing.ncep2 | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/config/namelist.forcing.ncep2 b/config/namelist.forcing.ncep2 index 925dd1828..7dcc0ef59 100644 --- a/config/namelist.forcing.ncep2 +++ b/config/namelist.forcing.ncep2 @@ -25,22 +25,22 @@ landice_end_mon=10 / &nam_sbc - nm_xwind_file = '/work/ollie/clidyn/forcing/NCEP2/uwnd.10m.gauss.' ! name of file with winds, if nm_sbc=2 - nm_ywind_file = '/work/ollie/clidyn/forcing/NCEP2/vwnd.10m.gauss.' ! name of file with winds, if nm_sbc=2 - nm_humi_file = '/work/ollie/clidyn/forcing/NCEP2/shum.2m.gauss.' ! name of file with 2m specific humidity - nm_qsr_file = '/work/ollie/clidyn/forcing/NCEP2/dswrf.sfc.gauss.' ! name of file with solar heat - nm_qlw_file = '/work/ollie/clidyn/forcing/NCEP2/dlwrf.sfc.gauss.' ! name of file with Long wave - nm_tair_file = '/work/ollie/clidyn/forcing/NCEP2/air.2m.gauss.' ! name of file with 2m air temperature - nm_prec_file = '/work/ollie/clidyn/forcing/NCEP2/prate.sfc.gauss.' ! name of file with rain fall - nm_snow_file = '' ! name of file with snow fall - nm_mslp_file = '' ! air_pressure_at_sea_level + nm_xwind_file = '/work/ollie/clidyn/forcing/NCEP2/uwnd.10m.gauss.' ! name of file with winds, if nm_sbc=2 + nm_ywind_file = '/work/ollie/clidyn/forcing/NCEP2/vwnd.10m.gauss.' ! name of file with winds, if nm_sbc=2 + nm_humi_file = '/work/ollie/clidyn/forcing/NCEP2/shum.2m.gauss.' ! name of file with 2m specific humidity + nm_qsr_file = '/work/ollie/clidyn/forcing/NCEP2/dswrf.sfc.gauss.' ! name of file with solar heat + nm_qlw_file = '/work/ollie/clidyn/forcing/NCEP2/dlwrf.sfc.gauss.' ! name of file with Long wave + nm_tair_file = '/work/ollie/clidyn/forcing/NCEP2/air.2m.gauss.' ! name of file with 2m air temperature + nm_prec_file = '/work/ollie/clidyn/forcing/NCEP2/prate.sfc.gauss.' ! name of file with rain fall + nm_snow_file = '' ! name of file with snow fall + nm_mslp_file = '' ! air_pressure_at_sea_level nm_xwind_var = 'uwnd' ! name of variable in file with wind nm_ywind_var = 'vwnd' ! name of variable in file with wind nm_humi_var = 'shum' ! name of variable in file with humidity nm_qsr_var = 'dswrf' ! name of variable in file with solar heat nm_qlw_var = 'dlwrf' ! name of variable in file with Long wave nm_tair_var = 'air' ! name of variable in file with 2m air temperature - nm_prec_var = 'prate' ! name of variable in file with total precipitation + nm_prec_var = 'prate' ! name of variable in file with total precipitation nm_snow_var = '' ! name of variable in file with total precipitation nm_mslp_var = '' ! name of variable in file with air_pressure_at_sea_level nm_nc_iyear = 1800 From 71e3b2d6650ef5ec109ccab003590cf6de5a37bc Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 28 Jul 2021 15:52:16 +0200 Subject: [PATCH 341/909] extend error message for a wrong timestep in restart files to know which variable has the wrong timestep --- src/io_restart.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 7d0f1b3eb..2dfe7f090 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -430,7 +430,7 @@ subroutine read_restart(path, filegroup) call filegroup%files(i)%close_file() if (int(ctime)/=int(rtime)) then - write(*,*) 'Reading restart: timestamps in restart and in clock files do not match' + write(*,*) 'Reading restart: timestamps in restart and in clock files do not match for ', filegroup%files(i)%varname, ' at ', filegroup%files(i)%path write(*,*) 'restart/ times are:', ctime, rtime write(*,*) 'the model will stop!' stop 1 From bf918d3ea795a0ab18be5398413dc192883bec0c Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 28 Jul 2021 16:09:07 +0200 Subject: [PATCH 342/909] add variable to the restart file group to be able to store if a variable can be skipped when reading --- src/io_restart_file_group.F90 | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/src/io_restart_file_group.F90 b/src/io_restart_file_group.F90 index 3a7b19e8f..34994e6e1 100644 --- a/src/io_restart_file_group.F90 +++ b/src/io_restart_file_group.F90 @@ -10,6 +10,7 @@ module restart_file_group_module integer iter_varindex character(:), allocatable :: varname character(:), allocatable :: path + logical must_exist_on_read end type @@ -36,7 +37,7 @@ subroutine def_node_var_2d(this, name, longname, units, local_data, mesh) type(t_mesh), intent(in) :: mesh ! EO parameters - call add_file(this, name, mesh%nod2d, mesh%elem2d, mesh%nl) + call add_file(this, name, .true., mesh%nod2d, mesh%elem2d, mesh%nl) call this%files(this%nfiles)%specify_node_var(name, longname, units, local_data) end subroutine @@ -50,7 +51,7 @@ subroutine def_node_var_3d(this, name, longname, units, local_data, mesh) type(t_mesh), intent(in) :: mesh ! EO parameters - call add_file(this, name, mesh%nod2d, mesh%elem2d, mesh%nl) + call add_file(this, name, .true., mesh%nod2d, mesh%elem2d, mesh%nl) call this%files(this%nfiles)%specify_node_var(name, longname, units, local_data) end subroutine @@ -64,7 +65,7 @@ subroutine def_elem_var_2d(this, name, longname, units, local_data, mesh) type(t_mesh), intent(in) :: mesh ! EO parameters - call add_file(this, name, mesh%nod2d, mesh%elem2d, mesh%nl) + call add_file(this, name, .true., mesh%nod2d, mesh%elem2d, mesh%nl) call this%files(this%nfiles)%specify_elem_var(name, longname, units, local_data) end subroutine @@ -78,14 +79,15 @@ subroutine def_elem_var_3d(this, name, longname, units, local_data, mesh) type(t_mesh), intent(in) :: mesh ! EO parameters - call add_file(this, name, mesh%nod2d, mesh%elem2d, mesh%nl) + call add_file(this, name, .true., mesh%nod2d, mesh%elem2d, mesh%nl) call this%files(this%nfiles)%specify_elem_var(name, longname, units, local_data) end subroutine - subroutine add_file(g, name, mesh_nod2d, mesh_elem2d, mesh_nl) + subroutine add_file(g, name, must_exist_on_read, mesh_nod2d, mesh_elem2d, mesh_nl) class(restart_file_group), target, intent(inout) :: g character(len=*), intent(in) :: name + logical must_exist_on_read integer mesh_nod2d, mesh_elem2d, mesh_nl ! EO parameters type(restart_file_type), pointer :: f @@ -96,6 +98,7 @@ subroutine add_file(g, name, mesh_nod2d, mesh_elem2d, mesh_nl) f%path = "" f%varname = name + f%must_exist_on_read = must_exist_on_read call f%fesom_file_type%init(mesh_nod2d, mesh_elem2d, mesh_nl) ! this is specific for a restart file f%iter_varindex = f%add_var_int('iter', [f%time_dimindex()]) From 357894d098b571d9f2b372cdb8867bec0074cb2e Mon Sep 17 00:00:00 2001 From: dsidoren Date: Wed, 28 Jul 2021 16:14:13 +0200 Subject: [PATCH 343/909] Update ice_oce_coupling.F90 --- src/ice_oce_coupling.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index 7b588503c..2e92d3d42 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -329,9 +329,9 @@ subroutine oce_fluxes(mesh) ! due to rigid lid approximation under the cavity we to not add freshwater ! under the cavity for the freshwater balancing we do this only for the open ! ocean - where (ulevels_nod2d == 1) water_flux=water_flux+net/ocean_area + where (ulevels_nod2d == 1) water_flux=water_flux-net/ocean_area else - water_flux=water_flux+net/ocean_area + water_flux=water_flux-net/ocean_area end if !___________________________________________________________________________ From 4fecc1f19ea5a8291995070bebe0b99188d8722e Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 28 Jul 2021 16:18:51 +0200 Subject: [PATCH 344/909] create a variant of the procedures to define a restart variable where the file is allowed to be skipped when reading --- src/io_restart_file_group.F90 | 61 +++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/src/io_restart_file_group.F90 b/src/io_restart_file_group.F90 index 34994e6e1..244dea065 100644 --- a/src/io_restart_file_group.F90 +++ b/src/io_restart_file_group.F90 @@ -23,6 +23,11 @@ module restart_file_group_module generic, public :: def_elem_var => def_elem_var_2d, def_elem_var_3d procedure, private :: def_node_var_2d, def_node_var_3d procedure, private :: def_elem_var_2d, def_elem_var_3d + ! def_*_optional procedures create a restart variable which does not have to exist when reading the restart file + generic, public :: def_node_var_optional => def_node_var_2d_optional, def_node_var_3d_optional + generic, public :: def_elem_var_optional => def_elem_var_2d_optional, def_elem_var_3d_optional + procedure, private :: def_node_var_2d_optional, def_node_var_3d_optional + procedure, private :: def_elem_var_2d_optional, def_elem_var_3d_optional end type contains @@ -105,6 +110,62 @@ subroutine add_file(g, name, must_exist_on_read, mesh_nod2d, mesh_elem2d, mesh_n end subroutine + subroutine def_node_var_2d_optional(this, name, longname, units, local_data, mesh) + use mod_mesh + class(restart_file_group), target, intent(inout) :: this + character(len=*), intent(in) :: name + character(len=*), intent(in) :: units, longname + real(kind=8), target, intent(inout) :: local_data(:) ! todo: be able to set precision + type(t_mesh), intent(in) :: mesh + ! EO parameters + + call add_file(this, name, .false., mesh%nod2d, mesh%elem2d, mesh%nl) + call this%files(this%nfiles)%specify_node_var(name, longname, units, local_data) + end subroutine + + + subroutine def_node_var_3d_optional(this, name, longname, units, local_data, mesh) + use mod_mesh + class(restart_file_group), intent(inout) :: this + character(len=*), intent(in) :: name + character(len=*), intent(in) :: units, longname + real(kind=8), target, intent(inout) :: local_data(:,:) ! todo: be able to set precision + type(t_mesh), intent(in) :: mesh + ! EO parameters + + call add_file(this, name, .false., mesh%nod2d, mesh%elem2d, mesh%nl) + call this%files(this%nfiles)%specify_node_var(name, longname, units, local_data) + end subroutine + + + subroutine def_elem_var_2d_optional(this, name, longname, units, local_data, mesh) + use mod_mesh + class(restart_file_group), intent(inout) :: this + character(len=*), intent(in) :: name + character(len=*), intent(in) :: units, longname + real(kind=8), target, intent(inout) :: local_data(:) ! todo: be able to set precision + type(t_mesh), intent(in) :: mesh + ! EO parameters + + call add_file(this, name, .false., mesh%nod2d, mesh%elem2d, mesh%nl) + call this%files(this%nfiles)%specify_elem_var(name, longname, units, local_data) + end subroutine + + + subroutine def_elem_var_3d_optional(this, name, longname, units, local_data, mesh) + use mod_mesh + class(restart_file_group), intent(inout) :: this + character(len=*), intent(in) :: name + character(len=*), intent(in) :: units, longname + real(kind=8), target, intent(inout) :: local_data(:,:) ! todo: be able to set precision + type(t_mesh), intent(in) :: mesh + ! EO parameters + + call add_file(this, name, .false., mesh%nod2d, mesh%elem2d, mesh%nl) + call this%files(this%nfiles)%specify_elem_var(name, longname, units, local_data) + end subroutine + + subroutine assert(val, line) logical, intent(in) :: val integer, intent(in) :: line From 03194e6b31d8ac6401c2116a4b480ed3f69c59b5 Mon Sep 17 00:00:00 2001 From: dsidoren Date: Wed, 28 Jul 2021 16:24:25 +0200 Subject: [PATCH 345/909] Update io_meandata.F90 --- src/io_meandata.F90 | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 0cbc617e9..9fd53e400 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -625,9 +625,10 @@ subroutine create_new_file(entry, mesh) call assert_nf( nf_def_var(entry%ncid, trim(entry%name), entry%data_strategy%netcdf_type(), entry%ndim+1, & (/entry%dimid(1:entry%ndim), entry%recID/), entry%varID), __LINE__) - if (entry%ndim==2) then - call assert_nf( nf_def_var_chunking(entry%ncid, entry%varID, NF_CHUNKED, (/1, entry%glsize(1)/)), __LINE__); - end if +!CHUNKING stuff (netcdf libraries not always compited with it) +!if (entry%ndim==2) then +! call assert_nf( nf_def_var_chunking(entry%ncid, entry%varID, NF_CHUNKED, (/1, entry%glsize(1)/)), __LINE__); +! end if call assert_nf( nf_put_att_text(entry%ncid, entry%varID, 'description', len_trim(entry%description), entry%description), __LINE__) call assert_nf( nf_put_att_text(entry%ncid, entry%varID, 'long_name', len_trim(entry%description), entry%description), __LINE__) call assert_nf( nf_put_att_text(entry%ncid, entry%varID, 'units', len_trim(entry%units), entry%units), __LINE__) From cd083cb4e96fcd2c319eeaddb3470b2e2f071d18 Mon Sep 17 00:00:00 2001 From: dsidoren Date: Wed, 28 Jul 2021 16:40:39 +0200 Subject: [PATCH 346/909] Update ice_oce_coupling.F90 --- src/ice_oce_coupling.F90 | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index 2e92d3d42..760d604af 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -289,10 +289,10 @@ subroutine oce_fluxes(mesh) ! enforce the total freshwater/salt flux be zero ! 1. water flux ! if (.not. use_virt_salt) can be used! ! we conserve only the fluxes from the database plus evaporation. - !flux = evaporation-ice_sublimation & ! the ice2atmos subplimation does not contribute to the freshwater flux into the ocean - ! +prec_rain & - ! +prec_snow*(1.0_WP-a_ice_old) & - ! +runoff + flux = evaporation-ice_sublimation & ! the ice2atmos subplimation does not contribute to the freshwater flux into the ocean + +prec_rain & + +prec_snow*(1.0_WP-a_ice_old) & + +runoff ! --> In case of zlevel and zstar and levitating sea ice, sea ice is just sitting ! on top of the ocean without displacement of water, there the thermodynamic ! growth rates of sea ice have to be taken into account to preserve the fresh water @@ -303,7 +303,7 @@ subroutine oce_fluxes(mesh) ! salinity flux !!PS if ( .not. use_floatice .and. .not. use_virt_salt) then if (.not. use_virt_salt) then - flux = water_flux+thdgr*rhoice*inv_rhowat+thdgrsn*rhosno*inv_rhowat + flux = flux-thdgr*rhoice*inv_rhowat-thdgrsn*rhosno*inv_rhowat end if ! Also balance freshwater flux that come from ocean-cavity boundary @@ -329,9 +329,9 @@ subroutine oce_fluxes(mesh) ! due to rigid lid approximation under the cavity we to not add freshwater ! under the cavity for the freshwater balancing we do this only for the open ! ocean - where (ulevels_nod2d == 1) water_flux=water_flux-net/ocean_area + where (ulevels_nod2d == 1) water_flux=water_flux+net/ocean_area else - water_flux=water_flux-net/ocean_area + water_flux=water_flux+net/ocean_area end if !___________________________________________________________________________ From 233d72b62585c490e27c1212437d0ba283754169 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Wed, 28 Jul 2021 14:12:03 +0200 Subject: [PATCH 347/909] ignore the error message from reading the restart if the variable is not in the file (i.e. the model should not stop but use the values with which the variable was initialised initially) --- src/io_restart.F90 | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 5f6d0d674..02ae07791 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -29,6 +29,7 @@ MODULE io_RESTART integer :: ndim integer :: dims(2) !<=2; assume there are no variables with dimension more than 2xNLxT real(kind=WP), pointer :: pt1(:), pt2(:,:) + logical :: is_in_restart !if the variable is in the restart file end type nc_vars ! !-------------------------------------------------------------------------------------------- @@ -560,9 +561,9 @@ subroutine read_restart(id, mesh, arg) integer, optional, intent(in) :: arg real(kind=WP), allocatable :: aux(:), laux(:) integer :: i, lev, size1, size2, size_gen, size_lev, shape - integer :: rec2read, c, order + integer :: rec2read, c, order, ierror real(kind=WP) :: rtime !timestamp of the record - logical :: file_exist=.False. + logical :: file_exist=.False., var_exist type(t_mesh), intent(in) , target :: mesh #include "associate_mesh.h" @@ -605,10 +606,17 @@ subroutine read_restart(id, mesh, arg) end if call was_error(id); c=1 - + do i=1, id%nvar shape=id%var(i)%ndim - if (mype==0) write(*,*) 'reading restart for ', trim(id%var(i)%name) + var_exist=id%var(i)%is_in_restart + call MPI_BCast(var_exist, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + if (var_exist) then + if (mype==0) write(*,*) 'reading restart for ', trim(id%var(i)%name) + else + if (mype==0) write(*,*) '...skip reading for ', trim(id%var(i)%name) + cycle + end if !_______writing 2D fields________________________________________________ if (shape==1) then size1=id%var(i)%dims(1) @@ -681,7 +689,7 @@ subroutine assoc_ids(id) type(nc_file), intent(inout) :: id character(500) :: longname - integer :: c, j, k + integer :: c, j, k, status real(kind=WP) :: rtime !timestamp of the record ! Serial output implemented so far if (mype/=0) return @@ -730,7 +738,11 @@ subroutine assoc_ids(id) id%rec_count=max(id%rec_count, 1) !___Associate physical variables____________________________________________ do j=1, id%nvar - id%error_status(c) = nf_inq_varid(id%ncid, id%var(j)%name, id%var(j)%code); c=c+1 + status = nf_inq_varid(id%ncid, id%var(j)%name, id%var(j)%code); c=c+1 + id%var(j)%is_in_restart=(status .eq. nf_noerr) !does the requested variable exist in the file + if (.not. id%var(j)%is_in_restart) then !if not give the error message + write(*,*) 'WARNING: entry ', trim(id%var(j)%name), ' does not exist in the restart file!' + end if end do id%error_status(c)=nf_close(id%ncid); c=c+1 id%error_count=c-1 From 1a0df9b2d5894eb8ff4f195898da7c77a17b51c3 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 28 Jul 2021 19:46:18 +0200 Subject: [PATCH 348/909] skip optional restart variables which do not exist when reading restart --- src/io_restart.F90 | 44 ++++++++++++++++++++++++++++++++++++++------ 1 file changed, 38 insertions(+), 6 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 2dfe7f090..36f36f15c 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -394,29 +394,61 @@ subroutine read_restart(path, filegroup) integer i character(:), allocatable :: dirpath integer mpistatus(MPI_STATUS_SIZE) - + logical file_exists + logical, allocatable :: skip_file(:) + integer current_iorank_snd, current_iorank_rcv + + allocate(skip_file(filegroup%nfiles)) + skip_file = .false. + current_iorank_snd = 0 + current_iorank_rcv = 0 + do i=1, filegroup%nfiles if( filegroup%files(i)%is_iorank() ) then dirpath = path(1:len(path)-3) ! chop of the ".nc" suffix if(filegroup%files(i)%path .ne. dirpath//"/"//filegroup%files(i)%varname//".nc") then call execute_command_line("mkdir -p "//dirpath) filegroup%files(i)%path = dirpath//"/"//filegroup%files(i)%varname//".nc" + + ! determine if the file should be skipped + if(.not. filegroup%files(i)%must_exist_on_read) then + current_iorank_snd = mype + inquire(file=filegroup%files(i)%path, exist=file_exists) + if(.not. file_exists) skip_file(i) = .true. + end if + + if(.not. skip_file(i)) then #ifndef DISABLE_PARALLEL_RESTART_READ - write(*,*) 'reading restart PARALLEL for ', filegroup%files(i)%varname, ' at ', filegroup%files(i)%path + write(*,*) 'reading restart PARALLEL for ', filegroup%files(i)%varname, ' at ', filegroup%files(i)%path #else - write(*,*) 'reading restart SEQUENTIAL for ', filegroup%files(i)%varname, ' at ', filegroup%files(i)%path + write(*,*) 'reading restart SEQUENTIAL for ', filegroup%files(i)%varname, ' at ', filegroup%files(i)%path #endif - call filegroup%files(i)%open_read(filegroup%files(i)%path) ! do we need to bother with read-only access? + else +#ifndef DISABLE_PARALLEL_RESTART_READ + write(*,*) 'skipping reading restart PARALLEL for ', filegroup%files(i)%varname, ' at ', filegroup%files(i)%path +#else + write(*,*) 'skipping reading restart SEQUENTIAL for ', filegroup%files(i)%varname, ' at ', filegroup%files(i)%path +#endif + end if + + if(.not. skip_file(i)) call filegroup%files(i)%open_read(filegroup%files(i)%path) ! do we need to bother with read-only access? ! todo: print a reasonable error message if the file does not exist - end if + end if end if - call filegroup%files(i)%async_read_and_scatter_variables() + ! iorank already knows if we skip the file, tell the others + if(.not. filegroup%files(i)%must_exist_on_read) then + call MPI_Allreduce(current_iorank_snd, current_iorank_rcv, 1, MPI_INTEGER, MPI_SUM, MPI_COMM_FESOM, MPIerr) + call MPI_Bcast(skip_file(i), 1, MPI_LOGICAL, current_iorank_rcv, MPI_COMM_FESOM, MPIerr) + end if + + if(.not. skip_file(i)) call filegroup%files(i)%async_read_and_scatter_variables() #ifndef DISABLE_PARALLEL_RESTART_READ end do do i=1, filegroup%nfiles #endif + if(skip_file(i)) cycle call filegroup%files(i)%join() if(filegroup%files(i)%is_iorank()) then From 46cf15044e96633599fb05d374e58380b1b69fa4 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 28 Jul 2021 20:20:22 +0200 Subject: [PATCH 349/909] make sure to create a restart file instead of appending if it does not exist because it has been skipped when reading restarts --- src/io_restart.F90 | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 36f36f15c..00db75298 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -243,21 +243,25 @@ subroutine write_restart(path, filegroup, istep) dirpath = path(1:len(path)-3) ! chop of the ".nc" suffix filepath = dirpath//"/"//filegroup%files(i)%varname//".nc" - if(filegroup%files(i)%path == "") then + if(filegroup%files(i)%path == "" .or. (.not. filegroup%files(i)%must_exist_on_read)) then ! the path to an existing restart file is not set in read_restart if we had a restart from a raw restart + ! OR we might have skipped the file when reading restarts and it does not exist at all inquire(file=filepath, exist=file_exists) - if(file_exists) filegroup%files(i)%path = filepath + if(file_exists) then + filegroup%files(i)%path = filepath + else if(.not. filegroup%files(i)%must_exist_on_read) then + filegroup%files(i)%path = "" + end if end if if(filegroup%files(i)%path .ne. filepath) then call execute_command_line("mkdir -p "//dirpath) filegroup%files(i)%path = filepath call filegroup%files(i)%open_write_create(filegroup%files(i)%path) - else + else call filegroup%files(i)%open_write_append(filegroup%files(i)%path) ! todo: keep the file open between writes end if write(*,*) 'writing restart record ', filegroup%files(i)%rec_count()+1, ' to ', filegroup%files(i)%path - ! todo: write iter to a separate (non-mesh-variable) file call filegroup%files(i)%write_var(filegroup%files(i)%iter_varindex, [filegroup%files(i)%rec_count()+1], [1], [cstep]) ! todo: write time via the fesom_file_type call filegroup%files(i)%write_var(filegroup%files(i)%time_varindex(), [filegroup%files(i)%rec_count()+1], [1], [ctime]) From 6665218bf1542cc0a5c2984d5c453f82cb86145d Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 29 Jul 2021 11:29:53 +0200 Subject: [PATCH 350/909] reset current iorank variable for each io file --- src/io_restart.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 00db75298..f94d106ed 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -404,10 +404,10 @@ subroutine read_restart(path, filegroup) allocate(skip_file(filegroup%nfiles)) skip_file = .false. - current_iorank_snd = 0 - current_iorank_rcv = 0 do i=1, filegroup%nfiles + current_iorank_snd = 0 + current_iorank_rcv = 0 if( filegroup%files(i)%is_iorank() ) then dirpath = path(1:len(path)-3) ! chop of the ".nc" suffix if(filegroup%files(i)%path .ne. dirpath//"/"//filegroup%files(i)%varname//".nc") then From 55a508978253628be9067069bb5272e6b95282e0 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 29 Jul 2021 11:32:38 +0200 Subject: [PATCH 351/909] when reading restarts, sync globalstep with processes which may have skipped a restart upon reading and thus need to know the globalstep when writing their restart --- src/io_restart.F90 | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index f94d106ed..ac74727ce 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -401,6 +401,7 @@ subroutine read_restart(path, filegroup) logical file_exists logical, allocatable :: skip_file(:) integer current_iorank_snd, current_iorank_rcv + integer max_globalstep allocate(skip_file(filegroup%nfiles)) skip_file = .false. @@ -473,7 +474,13 @@ subroutine read_restart(path, filegroup) end if end if end do - + + ! sync globalstep with processes which may have skipped a restart upon reading and thus need to know the globalstep when writing their restart + if( any(skip_file .eqv. .true.) ) then + call MPI_Allreduce(globalstep, max_globalstep, 1, MPI_INTEGER, MPI_MAX, MPI_COMM_FESOM, MPIerr) + globalstep = max_globalstep + end if + ! sync globalstep with the process responsible for raw restart metadata if(filegroup%nfiles >= 1) then ! use the first restart I/O process to send the globalstep From 0a990c81b6988863a6c76fedbab3374d66747fe8 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 29 Jul 2021 11:44:53 +0200 Subject: [PATCH 352/909] make ice_albedo and ice_temp optional restart variables which do not have to exist when reading a restart --- src/io_restart.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index ac74727ce..005973bd4 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -121,8 +121,8 @@ subroutine ini_ice_io(year, mesh) call ice_files%def_node_var('uice', 'zonal velocity', 'm/s', u_ice, mesh) call ice_files%def_node_var('vice', 'meridional velocity', 'm', v_ice, mesh) #if defined (__oifs) - call ice_files%def_node_var('ice_albedo', 'ice albedo', '-', ice_alb, mesh) - call ice_files%def_node_var('ice_temp', 'ice surface temperature', 'K', ice_temp, mesh) + call ice_files%def_node_var_optional('ice_albedo', 'ice albedo', '-', ice_alb, mesh) + call ice_files%def_node_var_optional('ice_temp', 'ice surface temperature', 'K', ice_temp, mesh) #endif /* (__oifs) */ end subroutine ini_ice_io From 246bea5345cae022251fd8b4f1b231308ebbc243 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 30 Jul 2021 15:40:05 +0200 Subject: [PATCH 353/909] do not create a directory when reading restarts --- src/io_restart.F90 | 1 - 1 file changed, 1 deletion(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 005973bd4..905ef93d7 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -412,7 +412,6 @@ subroutine read_restart(path, filegroup) if( filegroup%files(i)%is_iorank() ) then dirpath = path(1:len(path)-3) ! chop of the ".nc" suffix if(filegroup%files(i)%path .ne. dirpath//"/"//filegroup%files(i)%varname//".nc") then - call execute_command_line("mkdir -p "//dirpath) filegroup%files(i)%path = dirpath//"/"//filegroup%files(i)%varname//".nc" ! determine if the file should be skipped From 18cf0ac73a6bc5007c16cfded459762231fa13cb Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 2 Aug 2021 11:49:18 +0200 Subject: [PATCH 354/909] write number of variables stored in the restart dump to the info file --- src/io_restart.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 905ef93d7..172c5a843 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -292,8 +292,8 @@ subroutine write_all_raw_restarts(istep) write(fileunit, '(g0)') cstep write(fileunit, '(g0)') ctime write(fileunit, '(2(g0))') "! year: ",yearnew - write(fileunit, '(g0)') "! oce" - if(use_ice) write(fileunit, '(g0)') "! ice" + write(fileunit, '(3(g0))') "! oce: ", oce_files%nfiles, " variables" + if(use_ice) write(fileunit, '(3(g0))') "! ice: ", ice_files%nfiles, " variables" close(fileunit) end if end subroutine From 4c8dea867d5bea3778a617d19944a06adf9d614c Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 4 Aug 2021 13:19:05 +0200 Subject: [PATCH 355/909] do not send/receive globalstep to the same rank for very small number of processes (i.e. np 9 or less) as this will deadlock for cray_mpich --- src/io_restart.F90 | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 172c5a843..1ae1087c1 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -483,10 +483,9 @@ subroutine read_restart(path, filegroup) ! sync globalstep with the process responsible for raw restart metadata if(filegroup%nfiles >= 1) then ! use the first restart I/O process to send the globalstep - if( filegroup%files(1)%is_iorank() ) then + if( filegroup%files(1)%is_iorank() .and. (mype .ne. RAW_RESTART_METADATA_RANK)) then call MPI_Send(globalstep, 1, MPI_INTEGER, RAW_RESTART_METADATA_RANK, 42, MPI_COMM_FESOM, MPIerr) - end if - if(mype == RAW_RESTART_METADATA_RANK) then + else if((mype == RAW_RESTART_METADATA_RANK) .and. (.not. filegroup%files(1)%is_iorank())) then call MPI_Recv(globalstep, 1, MPI_INTEGER, MPI_ANY_SOURCE, 42, MPI_COMM_FESOM, mpistatus, MPIerr) end if end if From b9d5c5e052ac4d2abf3d57254177537c9f1a2206 Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 6 Sep 2021 15:59:18 +0200 Subject: [PATCH 356/909] add namelist.config parameters use_depthonelem and use_cavityonelem to define bottom topography and cavity topography on elements instead of nodes --- src/gen_modules_config.F90 | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/gen_modules_config.F90 b/src/gen_modules_config.F90 index f265ea898..373524b5a 100755 --- a/src/gen_modules_config.F90 +++ b/src/gen_modules_config.F90 @@ -82,8 +82,12 @@ module g_config ! geographical coordinates integer :: thers_zbar_lev=5 ! minimum number of levels to be character(len=5) :: which_depth_n2e='mean' - namelist /geometry/ cartesian, fplane, & - cyclic_length, rotated_grid, alphaEuler, betaEuler, gammaEuler, force_rotation, which_depth_n2e + logical :: use_depthonelem =.false. + logical :: use_cavityonelem=.false. + namelist /geometry/ cartesian, fplane, & + cyclic_length, rotated_grid, force_rotation, & + alphaEuler, betaEuler, gammaEuler, & + which_depth_n2e, use_depthonelem, use_cavityonelem !_____________________________________________________________________________ ! *** fleap_year *** From f4a11446c26c19aef4f8f99ec442c597ca6270dd Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 6 Sep 2021 16:00:19 +0200 Subject: [PATCH 357/909] adapt partitioning so that bottom topography and cavity topography can be defined on elements and nodes --- src/fvom_init.F90 | 84 +++++++++++++++++++++++++++++++++-------------- 1 file changed, 60 insertions(+), 24 deletions(-) diff --git a/src/fvom_init.F90 b/src/fvom_init.F90 index c5396bdf7..3013d3bea 100755 --- a/src/fvom_init.F90 +++ b/src/fvom_init.F90 @@ -229,7 +229,7 @@ subroutine read_mesh_cavity(mesh) implicit none type(t_mesh), intent(inout), target :: mesh - integer :: node + integer :: node, auxi character(len=MAX_PATH) :: fname logical :: file_exist=.False. #include "associate_mesh_ini.h" @@ -247,7 +247,11 @@ subroutine read_mesh_cavity(mesh) inquire(file=trim(fname),exist=file_exist) if (file_exist) then open (21,file=fname, status='old') - allocate(mesh%cavity_depth(mesh%nod2D)) + if (use_cavityonelem) then + allocate(mesh%cavity_depth(mesh%elem2d)) + else + allocate(mesh%cavity_depth(mesh%nod2D)) + end if cavity_depth => mesh%cavity_depth else if (mype==0) then @@ -260,7 +264,9 @@ subroutine read_mesh_cavity(mesh) end if !___________________________________________________________________________ - do node=1, mesh%nod2D + auxi=mesh%nod2D + if (use_cavityonelem) auxi=mesh%elem2d + do node=1, auxi read(21,*) mesh%cavity_depth(node) end do @@ -660,7 +666,7 @@ subroutine find_levels(mesh) use g_parsup implicit none INTEGER :: nodes(3), elems(3), eledges(3) - integer :: elem, elem1, j, n, nneighb, q, node, i, nz + integer :: elem1, j, n, nneighb, q, node, i, nz, auxi integer :: count_iter, count_neighb_open, exit_flag, fileID=111 real(kind=WP) :: x, dmean integer :: max_iter=1000 @@ -674,22 +680,41 @@ subroutine find_levels(mesh) print *, achar(27)//'[7;1m' //' -->: read bottom depth '//achar(27)//'[0m' end if - ALLOCATE(mesh%depth(nod2D)) + !___________________________________________________________________________ + ! allocate depth + if (use_depthonelem) then + allocate(mesh%depth(elem2D)) + else + allocate(mesh%depth(nod2D)) + end if depth => mesh%depth !required after the allocation, otherwise the pointer remains undefined + + !___________________________________________________________________________ + ! load fesom2.0 aux3d.out file file_name=trim(meshpath)//'aux3d.out' open(fileID, file=file_name) - read(fileID,*) nl ! the number of levels + + ! read the number of levels + read(fileID,*) nl allocate(mesh%zbar(nl)) ! their standard depths + ! read full depth levels zbar => mesh%zbar !required after the allocation, otherwise the pointer remains undefined read(fileID,*) zbar if(zbar(2)>0) zbar=-zbar ! zbar is negative + ! compute mid depth levels allocate(mesh%Z(nl-1)) Z => mesh%Z !required after the allocation, otherwise the pointer remains undefined Z=zbar(1:nl-1)+zbar(2:nl) ! mid-depths of cells Z=0.5_WP*Z - DO n=1,nod2D + + ! read topography from file + auxi = nod2d + if (use_depthonelem) auxi = elem2d + write(*,*) ' use_depthonelem = ',use_depthonelem + write(*,*) ' auxi =',auxi + DO n = 1, auxi read(fileID,*) x if (x>0) x=-x if (x>zbar(thers_zbar_lev)) x=zbar(thers_zbar_lev) !TODO KK threshholding for depth @@ -714,18 +739,23 @@ subroutine find_levels(mesh) ! Compute the initial number number of elementa levels, based on the vertice ! depth information do n=1, elem2D - nodes=elem2D_nodes(1:3,n) - !_________________________________________________________________________ - ! depth of element is shallowest depth of sorounding vertices - if (trim(which_depth_n2e) .eq. 'min') then ; dmean=maxval(depth(nodes)) - ! depth of element is deepest depth of sorounding vertices - elseif (trim(which_depth_n2e) .eq. 'max') then ; dmean=minval(depth(nodes)) - ! DEFAULT: depth of element is mean depth of sorounding vertices - elseif (trim(which_depth_n2e) .eq. 'mean') then; dmean=sum(depth(nodes))/3.0 + !_______________________________________________________________________ + if (use_depthonelem) then + dmean = depth(n) ! depth is already defined on elements + else + nodes=elem2D_nodes(1:3,n) + !___________________________________________________________________ + ! depth of element is shallowest depth of sorounding vertices + if (trim(which_depth_n2e) .eq. 'min') then ; dmean=maxval(depth(nodes)) + ! depth of element is deepest depth of sorounding vertices + elseif (trim(which_depth_n2e) .eq. 'max') then ; dmean=minval(depth(nodes)) + ! DEFAULT: depth of element is mean depth of sorounding vertices + elseif (trim(which_depth_n2e) .eq. 'mean') then; dmean=sum(depth(nodes))/3.0 + end if end if - !_________________________________________________________________________ + !_______________________________________________________________________ exit_flag=0 do nz=1,nl-1 if(Z(nz): compute elem,vertice cavity depth index '//achar(27)//'[0m' + print *, achar(27)//'[7;1m' //' -->: compute elem, vertice cavity depth index '//achar(27)//'[0m' end if !___________________________________________________________________________ @@ -906,14 +936,20 @@ subroutine find_levels_cavity(mesh) ! Compute level position of ocean-cavity boundary cavity_maxlev=0 do elem=1, elem2D - nodes=elem2D_nodes(1:3,elem) + !_______________________________________________________________________ - ! depth of element is shallowest depth of sorounding vertices - if (trim(which_depth_n2e) .eq. 'min') then ; dmean=maxval(cavity_depth(nodes)) - ! depth of element is deepest depth of sorounding vertices - elseif (trim(which_depth_n2e) .eq. 'max') then ; dmean=minval(cavity_depth(nodes)) - ! DEFAULT: depth of element is mean depth of sorounding vertices - elseif (trim(which_depth_n2e) .eq. 'mean') then ; dmean=sum(cavity_depth(nodes))/3.0 + if (use_cavityonelem) then + dmean = cavity_depth(elem) + else + nodes=elem2D_nodes(1:3,elem) + !_______________________________________________________________________ + ! depth of element is shallowest depth of sorounding vertices + if (trim(which_depth_n2e) .eq. 'min') then ; dmean=maxval(cavity_depth(nodes)) + ! depth of element is deepest depth of sorounding vertices + elseif (trim(which_depth_n2e) .eq. 'max') then ; dmean=minval(cavity_depth(nodes)) + ! DEFAULT: depth of element is mean depth of sorounding vertices + elseif (trim(which_depth_n2e) .eq. 'mean') then ; dmean=sum(cavity_depth(nodes))/3.0 + end if end if !_______________________________________________________________________ From ec5f390ba19ad53b2d295fc9b1a186229d5329fd Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Tue, 7 Sep 2021 11:47:38 +0200 Subject: [PATCH 358/909] countinue refactoring of the code with the tracer part. tracers are now of an array of a derived type t_tracer which is passed between subroutines. see MOD_TRACER.F90 for how it is oranized. a lot of things still to do. In the perspective this approach will simplify running FESOM with multiple tracers and applying different advection etc. techniques to each tracer individually. --- src/MOD_TRACER.F90 | 19 + src/cavity_param.F90 | 36 +- src/fvom_main.F90 | 48 +- src/gen_forcing_couple.F90 | 21 +- src/gen_ic3d.F90 | 113 ++--- src/gen_modules_cvmix_kpp.F90 | 25 +- src/gen_modules_diag.F90 | 106 ++--- src/ice_oce_coupling.F90 | 69 ++- src/ice_setup_step.F90 | 36 +- src/io_blowup.F90 | 23 +- src/io_meandata.F90 | 29 +- src/io_restart.F90 | 25 +- src/oce_ale.F90 | 55 ++- src/oce_ale_mixing_kpp.F90 | 32 +- src/oce_ale_pressure_bv.F90 | 270 ++++++++---- src/oce_ale_tracer.F90 | 271 ++++++------ src/oce_ice_init_state.F90 | 802 ---------------------------------- src/oce_modules.F90 | 8 +- src/oce_setup_step.F90 | 153 +++++-- src/oce_spp.F90 | 19 +- src/oce_tracer_mod.F90 | 43 +- src/toy_channel_soufflet.F90 | 59 ++- src/write_step_info.F90 | 80 ++-- 23 files changed, 866 insertions(+), 1476 deletions(-) create mode 100644 src/MOD_TRACER.F90 delete mode 100755 src/oce_ice_init_state.F90 diff --git a/src/MOD_TRACER.F90 b/src/MOD_TRACER.F90 new file mode 100644 index 000000000..d26b835f8 --- /dev/null +++ b/src/MOD_TRACER.F90 @@ -0,0 +1,19 @@ +!========================================================== +MODULE MOD_TRACER +USE O_PARAM +IMPLICIT NONE +SAVE + +TYPE T_TRACER +real(kind=WP), allocatable, dimension(:,:) :: values, valuesAB !instant values & Adams-Bashfort interpolation +logical :: smooth_bh_tra=.false. +real(kind=WP) :: gamma0_tra, gamma1_tra, gamma2_tra +logical :: i_vert_diff =.false. +character(20) :: tra_adv_hor, tra_adv_ver, tra_adv_lim ! type of the advection scheme for this tracer +real(kind=WP) :: tra_adv_ph = 1. ! a parameter to be used in horizontal advection (for MUSCL it is the fraction of fourth-order contribution in the solution) +real(kind=WP) :: tra_adv_pv = 1. ! a parameter to be used in horizontal advection (for QR4C it is the fraction of fourth-order contribution in the solution) +integer :: ID +END TYPE T_TRACER +end module MOD_TRACER +!========================================================== + diff --git a/src/cavity_param.F90 b/src/cavity_param.F90 index 0491ee4ed..a5503a4c2 100644 --- a/src/cavity_param.F90 +++ b/src/cavity_param.F90 @@ -1,3 +1,13 @@ +module cavity_heat_water_fluxes_3eq_interface + interface + subroutine cavity_heat_water_fluxes_3eq(tracers, mesh) + use mod_mesh + use mod_tracer + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers(:) + end subroutine + end interface +end module ! ! !_______________________________________________________________________________ @@ -12,7 +22,7 @@ subroutine compute_nrst_pnt2cavline(mesh) use g_PARSUP implicit none - type(t_mesh), intent(inout) , target :: mesh + type(t_mesh), intent(inout) , target :: mesh integer :: node, kk, elnodes(3), gnode, aux_idx integer, allocatable, dimension(:) :: cavl_idx, lcl_cavl_idx real(kind=WP), allocatable, dimension(:) :: cavl_lon, cavl_lat, cavl_dep,lcl_cavl_lon, lcl_cavl_lat, lcl_cavl_dep @@ -120,16 +130,18 @@ end subroutine compute_nrst_pnt2cavline ! adjusted for use in FESOM by Ralph Timmermann, 16.02.2011 ! Reviewed by ? ! adapted by P. SCholz for FESOM2.0 -subroutine cavity_heat_water_fluxes_3eq(mesh) +subroutine cavity_heat_water_fluxes_3eq(tracers, mesh) use MOD_MESH + use MOD_TRACER use o_PARAM , only: density_0, WP - use o_ARRAYS, only: heat_flux, water_flux, tr_arr, Z_3d_n, Unode, density_m_rho0,density_ref + use o_ARRAYS, only: heat_flux, water_flux, Z_3d_n, Unode, density_m_rho0,density_ref use i_ARRAYS, only: net_heat_flux, fresh_wa_flux use g_PARSUP implicit none !___________________________________________________________________________ - type(t_mesh), intent(inout) , target :: mesh + type(t_mesh), intent(inout), target :: mesh + type(t_tracer), intent(in), target :: tracers(:) real (kind=WP) :: temp,sal,tin,zice real (kind=WP) :: rhow, rhor, rho real (kind=WP) :: gats1, gats2, gas, gat @@ -177,8 +189,8 @@ subroutine cavity_heat_water_fluxes_3eq(mesh) if(nzmin==1) cycle ! if no cavity skip that node !_______________________________________________________________________ - temp = tr_arr(nzmin, node,1) - sal = tr_arr(nzmin, node,2) + temp = tracers(1)%values(nzmin,node) + sal = tracers(2)%values(nzmin,node) zice = Z_3d_n(nzmin, node) !(<0) !_______________________________________________________________________ @@ -305,15 +317,17 @@ end subroutine cavity_heat_water_fluxes_3eq ! Compute the heat and freshwater fluxes under ice cavity using simple 2equ. ! Coded by Adriana Huerta-Casas ! Reviewed by Qiang Wang -subroutine cavity_heat_water_fluxes_2eq(mesh) +subroutine cavity_heat_water_fluxes_2eq(tracers, mesh) use MOD_MESH + use MOD_TRACER use o_PARAM , only: WP - use o_ARRAYS, only: heat_flux, water_flux, tr_arr, Z_3d_n + use o_ARRAYS, only: heat_flux, water_flux, Z_3d_n use i_ARRAYS, only: net_heat_flux, fresh_wa_flux use g_PARSUP implicit none - type(t_mesh), intent(inout) , target :: mesh + type(t_mesh), intent(inout) , target :: mesh + type(t_tracer), intent(in), target :: tracers(:) integer :: node, nzmin real(kind=WP) :: gama, L, aux real(kind=WP) :: c2, c3, c4, c5, c6 @@ -336,8 +350,8 @@ subroutine cavity_heat_water_fluxes_2eq(mesh) do node=1,myDim_nod2D nzmin = ulevels_nod2D(node) if(nzmin==1) cycle - t_i = tr_arr(nzmin,node,1) - s_i = tr_arr(nzmin,node,2) + t_i = tracers(1)%values(nzmin,node) + s_i = tracers(2)%values(nzmin,node) t_fz = c3*(s_i**(3./2.)) + c4*(s_i**2) + c5*s_i + c6*abs(Z_3d_n(nzmin,node)) heat_flux(node)=vcpw*gama*(t_i - t_fz) ! Hunter2006 used cpw=3974J/Kg (*rhowat) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index 957aa0822..3f85b7e95 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -8,6 +8,7 @@ program main USE MOD_MESH +USE MOD_TRACER USE o_ARRAYS USE o_PARAM USE g_PARSUP @@ -22,9 +23,16 @@ program main use io_mesh_info use diagnostics use mo_tidal +use tracer_init_interface +use ocean_setup_interface +use ice_setup_interface +use ocean2ice_interface +use oce_fluxes_interface +use update_atm_forcing_interface +use before_oce_step_interface +use oce_timestep_ale_interface use fesom_version_info_module use command_line_options_module - ! Define icepack module #if defined (__icepack) use icedrv_main, only: set_icepack, init_icepack, alloc_icepack @@ -44,7 +52,9 @@ program main real(kind=real32) :: mean_rtime(15), max_rtime(15), min_rtime(15) real(kind=real32) :: runtime_alltimesteps -type(t_mesh), target, save :: mesh +type(t_mesh), target, save :: mesh +type(t_tracer), allocatable, target, save :: tracers(:) + character(LEN=MPI_MAX_LIBRARY_VERSION_STRING) :: mpi_version_txt integer mpi_version_len @@ -94,7 +104,11 @@ program main !===================== call check_mesh_consistency(mesh) if (mype==0) t2=MPI_Wtime() - call ocean_setup(mesh) + + call tracer_init(tracers, mesh) ! allocate array of ocean tracers (derived type "t_tracer") + call arrays_init(mesh) ! allocate other arrays (to be refactured same as tracers in the future) + call ocean_setup(tracers, mesh) ! + if (mype==0) then write(*,*) 'FESOM ocean_setup... complete' t3=MPI_Wtime() @@ -102,13 +116,13 @@ program main call forcing_setup(mesh) if (mype==0) t4=MPI_Wtime() if (use_ice) then - call ice_setup(mesh) + call ice_setup(tracers, mesh) ice_steps_since_upd = ice_ave_steps-1 ice_update=.true. if (mype==0) write(*,*) 'EVP scheme option=', whichEVP endif if (mype==0) t5=MPI_Wtime() - call compute_diagnostics(0, mesh) ! allocate arrays for diagnostic + call compute_diagnostics(0, tracers, mesh) ! allocate arrays for diagnostic #if defined (__oasis) call cpl_oasis3mct_define_unstr(mesh) if(mype==0) write(*,*) 'FESOM ----> cpl_oasis3mct_define_unstr nsend, nrecv:',nsend, nrecv @@ -133,8 +147,8 @@ program main ! if istep is not zero it will be decided whether restart shall be written ! if l_write is TRUE the restart will be forced ! if l_read the restart will be read - ! as an example, for reading restart one does: call restart(0, .false., .false., .true.) - call restart(0, .false., r_restart, mesh) ! istep, l_write, l_read + ! as an example, for reading restart one does: call restart(0, .false., .false., .true., tracers, mesh) + call restart(0, .false., r_restart, tracers, mesh) ! istep, l_write, l_read if (mype==0) t7=MPI_Wtime() ! store grid information into netcdf file @@ -214,19 +228,18 @@ program main !___compute horizontal velocity on nodes (originaly on elements)________ call compute_vel_nodes(mesh) - !___model sea-ice step__________________________________________________ t1 = MPI_Wtime() if(use_ice) then !___compute fluxes from ocean to ice________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call ocean2ice(n)'//achar(27)//'[0m' - call ocean2ice(mesh) + call ocean2ice(tracers, mesh) !___compute update of atmospheric forcing____________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call update_atm_forcing(n)'//achar(27)//'[0m' t0_frc = MPI_Wtime() - call update_atm_forcing(n, mesh) - t1_frc = MPI_Wtime() + call update_atm_forcing(n, tracers, mesh) + t1_frc = MPI_Wtime() !___compute ice step________________________________________________ if (ice_steps_since_upd>=ice_ave_steps-1) then ice_update=.true. @@ -240,24 +253,23 @@ program main !___compute fluxes to the ocean: heat, freshwater, momentum_________ if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call oce_fluxes_mom...'//achar(27)//'[0m' call oce_fluxes_mom(mesh) ! momentum only - call oce_fluxes(mesh) + call oce_fluxes(tracers, mesh) end if - call before_oce_step(mesh) ! prepare the things if required + call before_oce_step(tracers, mesh) ! prepare the things if required t2 = MPI_Wtime() - !___model ocean step____________________________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call oce_timestep_ale'//achar(27)//'[0m' - call oce_timestep_ale(n, mesh) + call oce_timestep_ale(n, tracers, mesh) t3 = MPI_Wtime() !___compute energy diagnostics..._______________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call compute_diagnostics(1)'//achar(27)//'[0m' - call compute_diagnostics(1, mesh) + call compute_diagnostics(1, tracers, mesh) t4 = MPI_Wtime() !___prepare output______________________________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call output (n)'//achar(27)//'[0m' - call output (n, mesh) + call output (n, tracers, mesh) t5 = MPI_Wtime() - call restart(n, .false., .false., mesh) + call restart(n, .false., .false., tracers, mesh) t6 = MPI_Wtime() rtime_fullice = rtime_fullice + t2 - t1 diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index 72a448b26..a508b7c25 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -37,11 +37,23 @@ subroutine integrate_2D(flux_global, flux_local, eff_vol, field2d, mask, mesh) end interface end module +module update_atm_forcing_interface + interface + subroutine update_atm_forcing(istep, tracers, mesh) + use mod_mesh + use mod_tracer + integer, intent(in) :: istep + type(t_tracer), intent(in), target :: tracers(:) + type(t_mesh), intent(in), target :: mesh + end subroutine + end interface +end module ! Routines for updating ocean surface forcing fields !------------------------------------------------------------------------- -subroutine update_atm_forcing(istep, mesh) +subroutine update_atm_forcing(istep, tracers, mesh) use o_PARAM use mod_MESH + use MOD_TRACER use o_arrays use i_arrays use i_param @@ -63,7 +75,8 @@ subroutine update_atm_forcing(istep, mesh) use force_flux_consv_interface implicit none - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers(:) integer :: i, istep,itime,n2,n,nz,k,elem real(kind=WP) :: i_coef, aux real(kind=WP) :: dux, dvy,tx,ty,tvol @@ -101,7 +114,7 @@ subroutine update_atm_forcing(istep, mesh) #if defined (__oifs) ! AWI-CM3 outgoing state vectors do n=1,myDim_nod2D+eDim_nod2D - exchange(n)=tr_arr(1, n, 1)+tmelt ! sea surface temperature [K] + exchange(n)=tracers(1)%values(1, n)+tmelt ! sea surface temperature [K] end do elseif (i.eq.2) then exchange(:) = a_ice(:) ! ice concentation [%] @@ -116,7 +129,7 @@ subroutine update_atm_forcing(istep, mesh) #else ! AWI-CM2 outgoing state vectors do n=1,myDim_nod2D+eDim_nod2D - exchange(n)=tr_arr(1, n, 1) ! sea surface temperature [°C] + exchange(n)=tracers(1)%values(1, n) ! sea surface temperature [°C] end do elseif (i.eq.2) then exchange(:) = m_ice(:) ! ice thickness [m] diff --git a/src/gen_ic3d.F90 b/src/gen_ic3d.F90 index 5bbce229d..eac828d2a 100644 --- a/src/gen_ic3d.F90 +++ b/src/gen_ic3d.F90 @@ -13,6 +13,7 @@ MODULE g_ic3d !! USE o_ARRAYS USE MOD_MESH + USE MOD_TRACER USE o_PARAM USE g_PARSUP USE g_comm_auto @@ -296,7 +297,7 @@ SUBROUTINE nc_ic3d_ini(mesh) end if END SUBROUTINE nc_ic3d_ini - SUBROUTINE getcoeffld(mesh) + SUBROUTINE getcoeffld(tracers, mesh) !!--------------------------------------------------------------------- !! *** ROUTINE getcoeffld *** !! @@ -323,13 +324,14 @@ SUBROUTINE getcoeffld(mesh) integer :: elnodes(3) integer :: ierror ! return error code - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(inout), target :: tracers(:) #include "associate_mesh.h" ALLOCATE(ncdata(nc_Nlon,nc_Nlat,nc_Ndepth), data1d(nc_Ndepth)) ncdata=0.0_WP data1d=0.0_WP - tr_arr(:,:,current_tracer)=dummy + tracers(current_tracer)%values(:,:)=dummy !open NETCDF file on 0 core if (mype==0) then iost = nf_open(filename,NF_NOWRITE,ncid) @@ -360,7 +362,6 @@ SUBROUTINE getcoeffld(mesh) call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) call check_nferr(iost,filename) call MPI_BCast(ncdata, nc_Nlon*nc_Nlat*nc_Ndepth, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) - ! bilinear space interpolation, ! data is assumed to be sampled on a regular grid do ii = 1, myDim_nod2d @@ -370,8 +371,6 @@ SUBROUTINE getcoeffld(mesh) j = bilin_indx_j(ii) ip1 = i + 1 jp1 = j + 1 -!!PS x = geo_coord_nod2D(1,ii)/rad -!!PS y = geo_coord_nod2D(2,ii)/rad !______________________________________________________________________ ! its a cavity node use extrapolation points of closest cavity line point ! exchange the coordinates of the cavity node with the coordinates of the @@ -426,11 +425,11 @@ SUBROUTINE getcoeffld(mesh) cf_a = (d2 - d1)/ delta_d ! value of interpolated OB data on Z from model cf_b = d1 - cf_a * nc_depth(d_indx) - !!PS tr_arr(k,ii,current_tracer) = -cf_a * Z_3d_n(k,ii) + cf_b - tr_arr(k,ii,current_tracer) = -cf_a * aux_z + cf_b + !!PS tracers(current_tracer)%values(k,ii) = -cf_a * Z_3d_n(k,ii) + cf_b + tracers(current_tracer)%values(k,ii) = -cf_a * aux_z + cf_b end if elseif (d_indx==0) then - tr_arr(k,ii,current_tracer)=data1d(1) + tracers(current_tracer)%values(k,ii)=data1d(1) end if enddo !___________________________________________________________________ @@ -450,10 +449,10 @@ SUBROUTINE getcoeffld(mesh) cf_a = (d2 - d1)/ delta_d ! value of interpolated OB data on Z from model cf_b = d1 - cf_a * nc_depth(d_indx) - tr_arr(k,ii,current_tracer) = -cf_a * Z_3d_n(k,ii) + cf_b + tracers(current_tracer)%values(k,ii) = -cf_a * Z_3d_n(k,ii) + cf_b end if elseif (d_indx==0) then - tr_arr(k,ii,current_tracer)=data1d(1) + tracers(current_tracer)%values(k,ii)=data1d(1) end if enddo end if ! --> if (use_cavity) then @@ -466,16 +465,19 @@ SUBROUTINE getcoeffld(mesh) DEALLOCATE( ncdata, data1d ) END SUBROUTINE getcoeffld - SUBROUTINE do_ic3d(mesh) + SUBROUTINE do_ic3d(tracers, mesh) !!--------------------------------------------------------------------- !! *** ROUTINE do_ic3d *** !! !! ** Purpose : read 3D initial conditions for tracers from netcdf and interpolate on model grid !!---------------------------------------------------------------------- + USE insitu2pot_interface IMPLICIT NONE integer :: n, i - type(t_mesh), intent(in) , target :: mesh real(kind=WP) :: locTmax, locTmin, locSmax, locSmin, glo + + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(inout), target :: tracers(:) if (mype==0) write(*,*) "Start: Initial conditions for tracers" @@ -488,9 +490,9 @@ SUBROUTINE do_ic3d(mesh) ! read initial conditions for current tracer call nc_ic3d_ini(mesh) ! get first coeficients for time inerpolation on model grid for all datas - call getcoeffld(mesh) + call getcoeffld(tracers, mesh) call nc_end ! deallocate arrqays associated with netcdf file - call extrap_nod(tr_arr(:,:,current_tracer), mesh) + call extrap_nod(tracers(current_tracer)%values(:,:), mesh) exit elseif (current_tracer==num_tracers) then if (mype==0) write(*,*) "idlist contains tracer which is not listed in tracer_id!" @@ -502,42 +504,35 @@ SUBROUTINE do_ic3d(mesh) END DO DEALLOCATE(bilin_indx_i, bilin_indx_j) - !_________________________________________________________________________ - ! set remaining dummy values from bottom topography to 0.0_WP - where (tr_arr > 0.9_WP*dummy) - tr_arr=0.0_WP - end where - + do current_tracer=1, num_tracers + !_________________________________________________________________________ + ! set remaining dummy values from bottom topography to 0.0_WP + where (tracers(current_tracer)%values > 0.9_WP*dummy) + tracers(current_tracer)%values=0.0_WP + end where + + !_________________________________________________________________________ + ! eliminate values within cavity that result from the extrapolation of + ! initialisation + do n=1,myDim_nod2d + eDim_nod2D + ! ensure cavity is zero + if (use_cavity) tracers(current_tracer)%values(1:mesh%ulevels_nod2D(n)-1,n)=0.0_WP + ! ensure bottom is zero + tracers(current_tracer)%values(mesh%nlevels_nod2D(n):mesh%nl-1,n)=0.0_WP + end do + end do !_________________________________________________________________________ ! convert temperature from Kelvin --> °C - where (tr_arr(:,:,1) > 100._WP) - tr_arr(:,:,1)=tr_arr(:,:,1)-273.15_WP + where (tracers(1)%values(:,:) > 100._WP) + tracers(1)%values(:,:) = tracers(1)%values(:,:)-273.15_WP end where - !_________________________________________________________________________ - ! eliminate values within cavity that result from the extrapolation of - ! initialisation - do n=1,myDim_nod2d + eDim_nod2D - ! ensure cavity is zero - if (use_cavity) tr_arr(1:mesh%ulevels_nod2D(n)-1,n,:)=0.0_WP - ! ensure bottom is zero - tr_arr(mesh%nlevels_nod2D(n):mesh%nl-1,n,:)=0.0_WP - end do - !_________________________________________________________________________ if (t_insitu) then if (mype==0) write(*,*) "converting insitu temperature to potential..." - call insitu2pot(mesh) + call insitu2pot(tracers, mesh) end if - if (mype==0) write(*,*) "DONE: Initial conditions for tracers" - - !_________________________________________________________________________ - ! Homogenous temp salt initialisation --> for testing and debuging -!!PS do n=1,myDim_nod2d + eDim_nod2D -!!PS tr_arr(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n,1) = 16.0 -!!PS tr_arr(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n,2) = 35.0 -!!PS end do - + if (mype==0) write(*,*) "DONE: Initial conditions for tracers" !_________________________________________________________________________ ! check initial fields locTmax = -6666 @@ -545,31 +540,10 @@ SUBROUTINE do_ic3d(mesh) locSmax = locTmax locSmin = locTmin do n=1,myDim_nod2d -!!PS if (any( tr_arr(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n,2)>0.99_WP*dummy)) then -!!PS write(*,*) '____________________________________________________________' -!!PS write(*,*) ' --> check init fields SALT >0.99_WP*dummy' -!!PS write(*,*) 'mype =',mype -!!PS write(*,*) 'n =',n -!!PS write(*,*) 'lon,lat =',mesh%geo_coord_nod2D(:,n)/rad -!!PS write(*,*) 'mesh%ulevels_nod2D(n) =',mesh%ulevels_nod2D(n) -!!PS write(*,*) 'mesh%nlevels_nod2D(n) =',mesh%nlevels_nod2D(n) -!!PS write(*,*) 'tr_arr(unl:lnl,n,2) =',tr_arr(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n,2) -!!PS write(*,*) 'tr_arr( 1:lnl,n,2) =',tr_arr(1:mesh%nlevels_nod2D(n)-1,n,2) -!!PS end if -!!PS if (any( tr_arr(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n,1)>0.99_WP*dummy)) then -!!PS write(*,*) '____________________________________________________________' -!!PS write(*,*) ' --> check init fields TEMP >0.99_WP*dummy' -!!PS write(*,*) 'mype =',mype -!!PS write(*,*) 'n =',n -!!PS write(*,*) 'lon,lat =',mesh%geo_coord_nod2D(:,n)/rad -!!PS write(*,*) 'mesh%ulevels_nod2D(n) =',mesh%ulevels_nod2D(n) -!!PS write(*,*) 'mesh%nlevels_nod2D(n) =',mesh%nlevels_nod2D(n) -!!PS write(*,*) 'tr_arr(:,n,1) =',tr_arr(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n,1) -!!PS end if - locTmax = max(locTmax,maxval(tr_arr(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n,1)) ) - locTmin = min(locTmin,minval(tr_arr(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n,1)) ) - locSmax = max(locSmax,maxval(tr_arr(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n,2)) ) - locSmin = min(locSmin,minval(tr_arr(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n,2)) ) + locTmax = max(locTmax,maxval(tracers(1)%values(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n)) ) + locTmin = min(locTmin,minval(tracers(1)%values(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n)) ) + locSmax = max(locSmax,maxval(tracers(2)%values(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n)) ) + locSmin = min(locSmin,minval(tracers(2)%values(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n)) ) end do call MPI_AllREDUCE(locTmax , glo , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) if (mype==0) write(*,*) ' |-> gobal max init. temp. =', glo @@ -578,8 +552,7 @@ SUBROUTINE do_ic3d(mesh) call MPI_AllREDUCE(locSmax , glo , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) if (mype==0) write(*,*) ' |-> gobal max init. salt. =', glo call MPI_AllREDUCE(locSmin , glo , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - if (mype==0) write(*,*) ' `-> gobal min init. salt. =', glo - + if (mype==0) write(*,*) ' `-> gobal min init. salt. =', glo END SUBROUTINE do_ic3d diff --git a/src/gen_modules_cvmix_kpp.F90 b/src/gen_modules_cvmix_kpp.F90 index 823093fec..41da7e75e 100644 --- a/src/gen_modules_cvmix_kpp.F90 +++ b/src/gen_modules_cvmix_kpp.F90 @@ -23,6 +23,7 @@ module g_cvmix_kpp use g_config use o_param use mod_mesh + use mod_tracer use g_parsup use o_arrays use g_comm_auto @@ -341,8 +342,9 @@ end subroutine init_cvmix_kpp ! !=========================================================================== ! calculate PP vertrical mixing coefficients from CVMIX library - subroutine calc_cvmix_kpp(mesh) - type(t_mesh), intent(in) , target :: mesh + subroutine calc_cvmix_kpp(tracers, mesh) + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers(:) integer :: node, elem, nz, nln, nun, elnodes(3), aux_nz real(kind=WP) :: vshear2, dz2, aux, aux_wm(mesh%nl), aux_ws(mesh%nl) real(kind=WP) :: aux_coeff, sigma, stable @@ -352,7 +354,10 @@ subroutine calc_cvmix_kpp(mesh) real(kind=WP) :: sldepth, sfc_temp, sfc_salt, sfc_u, sfc_v, htot, delh, rho_sfc, rho_nz real(kind=WP) :: rhopot, bulk_0, bulk_pz, bulk_pz2 real(kind=WP) :: sfc_rhopot, sfc_bulk_0, sfc_bulk_pz, sfc_bulk_pz2 + real(kind=WP), dimension(:,:), pointer :: temp, salt #include "associate_mesh.h" + temp=>tracers(1)%values(:,:) + salt=>tracers(2)%values(:,:) !_______________________________________________________________________ kpp_Av = 0.0_WP kpp_Kv = 0.0_WP @@ -404,9 +409,9 @@ subroutine calc_cvmix_kpp(mesh) ! buoyancy difference with respect to the surface --> computed in ! oce_ale_pressure_bf.F90 --> subroutine pressure_bv ! --> dbsfc(nz,node) - !!PS call densityJM_components(tr_arr(1,node,1), tr_arr(1,node,2), sfc_bulk_0, sfc_bulk_pz, sfc_bulk_pz2, sfc_rhopot, mesh) - call densityJM_components(tr_arr(nun,node,1), tr_arr(nun,node,2), sfc_bulk_0, sfc_bulk_pz, sfc_bulk_pz2, sfc_rhopot, mesh) - call densityJM_components(tr_arr(nz,node,1), tr_arr(nz,node,2), bulk_0, bulk_pz, bulk_pz2, rhopot, mesh) + !!PS call densityJM_components(temp(1,node), salt(1,node), sfc_bulk_0, sfc_bulk_pz, sfc_bulk_pz2, sfc_rhopot, mesh) + call densityJM_components(temp(nun,node), salt(nun,node), sfc_bulk_0, sfc_bulk_pz, sfc_bulk_pz2, sfc_rhopot, mesh) + call densityJM_components(temp(nz, node), salt(nz, node), bulk_0, bulk_pz, bulk_pz2, rhopot, mesh) rho_nz = bulk_0 + Z_3d_n(nz,node)*(bulk_pz + Z_3d_n(nz,node)*bulk_pz2) rho_nz = rho_nz*rhopot/(rho_nz+0.1_WP*Z_3d_n(nz,node))-density_0 rho_sfc = sfc_bulk_0 + Z_3d_n(nz,node)*(sfc_bulk_pz + Z_3d_n(nz,node)*sfc_bulk_pz2) @@ -441,8 +446,8 @@ subroutine calc_cvmix_kpp(mesh) do nztmp = nun, nzsfc delh = min( max(0.0_WP,sldepth-htot), hnode(nztmp,node) ) htot = htot+delh - sfc_temp = sfc_temp + tr_arr(nztmp,node,1)*delh - sfc_salt = sfc_salt + tr_arr(nztmp,node,2)*delh + sfc_temp = sfc_temp + temp(nztmp,node)*delh + sfc_salt = sfc_salt + salt(nztmp,node)*delh sfc_u = sfc_u + Unode(1,nztmp,node) *delh sfc_v = sfc_v + Unode(2,nztmp,node) *delh end do @@ -464,7 +469,7 @@ subroutine calc_cvmix_kpp(mesh) ! depth level as the deep point --> than calculate bouyancy ! difference call densityJM_components(sfc_temp, sfc_salt, sfc_bulk_0, sfc_bulk_pz, sfc_bulk_pz2, sfc_rhopot, mesh) - call densityJM_components(tr_arr(nz,node,1), tr_arr(nz,node,2), bulk_0, bulk_pz, bulk_pz2, rhopot, mesh) + call densityJM_components(temp(nz,node), salt(nz,node), bulk_0, bulk_pz, bulk_pz2, rhopot, mesh) rho_nz = bulk_0 + Z_3d_n(nz,node)*(bulk_pz + Z_3d_n(nz,node)*bulk_pz2) rho_nz = rho_nz*rhopot/(rho_nz+0.1_WP*Z_3d_n(nz,node))-density_0 rho_sfc = sfc_bulk_0 + Z_3d_n(nz,node)*(sfc_bulk_pz + Z_3d_n(nz,node)*sfc_bulk_pz2) @@ -491,10 +496,10 @@ subroutine calc_cvmix_kpp(mesh) !!PS if (flag_debug .and. mype==0) print *, achar(27)//'[35m'//' --> call surface buyflux[0m' !!PS kpp_sbuoyflx(node) = -g * & !!PS (sw_alpha(1,node)*heat_flux( node) / vcpw + & !heat_flux & water_flux: positive up - !!PS sw_beta( 1,node)*water_flux(node)*tr_arr(1,node,2)) + !!PS sw_beta( 1,node)*water_flux(node)*salt(1,node,2)) kpp_sbuoyflx(node) = -g * & (sw_alpha(nun,node)*heat_flux( node) / vcpw + & !heat_flux & water_flux: positive up - sw_beta( nun,node)*water_flux(node)*tr_arr(nun,node,2)) + sw_beta( nun,node)*water_flux(node)*salt(nun,node)) ! calculate friction velocity (ustar) at surface (m/s) diff --git a/src/gen_modules_diag.F90 b/src/gen_modules_diag.F90 index 04d44e6a4..ef3b50874 100755 --- a/src/gen_modules_diag.F90 +++ b/src/gen_modules_diag.F90 @@ -2,6 +2,7 @@ module diagnostics use g_config use mod_mesh + use mod_tracer use g_parsup use g_clock use g_comm_auto @@ -76,10 +77,10 @@ module diagnostics !rhs_diag=ssh_rhs? subroutine diag_solver(mode, mesh) implicit none - integer, intent(in) :: mode - integer :: n, is, ie - logical, save :: firstcall=.true. - type(t_mesh), intent(in) , target :: mesh + integer, intent(in) :: mode + integer :: n, is, ie + logical, save :: firstcall=.true. + type(t_mesh), intent(in), target :: mesh #include "associate_mesh.h" !===================== @@ -384,22 +385,26 @@ subroutine diag_energy(mode, mesh) END DO end subroutine diag_energy ! ============================================================== -subroutine diag_densMOC(mode, mesh) +subroutine diag_densMOC(mode, tracers, mesh) implicit none - integer, intent(in) :: mode - type(t_mesh), intent(in) , target :: mesh - integer :: nz, snz, elem, nzmax, nzmin, elnodes(3), is, ie, pos - integer :: e, edge, enodes(2), eelems(2) - real(kind=WP) :: div, deltaX, deltaY, locz - integer :: jj - real(kind=WP), save :: dd - real(kind=WP) :: uvdz_el(2), rhoz_el, vol_el, dz, weight, dmin, dmax, ddiff, test, test1, test2, test3 - real(kind=WP), save, allocatable :: dens(:), aux(:), el_depth(:) - real(kind=WP), save, allocatable :: std_dens_w(:,:), std_dens_VOL1(:,:), std_dens_VOL2(:,:) - logical, save :: firstcall_s=.true., firstcall_e=.true. - + integer, intent(in) :: mode + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers(:) + integer :: nz, snz, elem, nzmax, nzmin, elnodes(3), is, ie, pos + integer :: e, edge, enodes(2), eelems(2) + real(kind=WP) :: div, deltaX, deltaY, locz + integer :: jj + real(kind=WP), save :: dd + real(kind=WP) :: uvdz_el(2), rhoz_el, vol_el, dz, weight, dmin, dmax, ddiff, test, test1, test2, test3 + real(kind=WP), save, allocatable :: dens(:), aux(:), el_depth(:) + real(kind=WP), save, allocatable :: std_dens_w(:,:), std_dens_VOL1(:,:), std_dens_VOL2(:,:) + logical, save :: firstcall_s=.true., firstcall_e=.true. + real(kind=WP), dimension(:,:), pointer :: temp, salt #include "associate_mesh.h" + temp=>tracers(1)%values(:,:) + salt=>tracers(2)%values(:,:) + if (firstcall_s) then !allocate the stuff at the first call allocate(std_dens_UVDZ(2,std_dens_N, myDim_elem2D)) allocate(std_dens_w ( std_dens_N, myDim_elem2D)) @@ -453,7 +458,7 @@ subroutine diag_densMOC(mode, mesh) do jj=1,3 dens_flux_e(elem)=dens_flux_e(elem) + (sw_alpha(ulevels_nod2D(elnodes(jj)),elnodes(jj)) * heat_flux_in(elnodes(jj)) / vcpw + & sw_beta(ulevels_nod2D(elnodes(jj)),elnodes(jj)) * (relax_salt (elnodes(jj)) + water_flux(elnodes(jj)) * & - tr_arr(ulevels_nod2D(elnodes(jj)),elnodes(jj),2))) + salt(ulevels_nod2D(elnodes(jj)),elnodes(jj)))) end do dens_flux_e(elem) =dens_flux_e(elem)/3.0_WP ! density_dmoc is the sigma_2 density given at nodes. it is computed in oce_ale_pressure_bv @@ -479,7 +484,7 @@ subroutine diag_densMOC(mode, mesh) dd = 0.0_WP do jj=1,3 - dd = dd + (sw_beta (1,elnodes(jj)) * water_flux(elnodes(jj)) * tr_arr(ulevels_nod2D(elnodes(jj)), elnodes(jj), 2)) + dd = dd + (sw_beta (1,elnodes(jj)) * water_flux(elnodes(jj)) * salt(ulevels_nod2D(elnodes(jj)), elnodes(jj))) end do std_dens_flux(3, is,elem)=std_dens_flux(3, is,elem)+elem_area(elem)*dd/3. @@ -632,11 +637,12 @@ subroutine diag_densMOC(mode, mesh) end subroutine diag_densMOC ! ============================================================== -subroutine compute_diagnostics(mode, mesh) +subroutine compute_diagnostics(mode, tracers, mesh) implicit none - integer, intent(in) :: mode !constructor mode (0=only allocation; any other=do diagnostic) - real(kind=WP) :: val - type(t_mesh), intent(in) , target :: mesh + integer, intent(in) :: mode !constructor mode (0=only allocation; any other=do diagnostic) + real(kind=WP) :: val + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers(:) !1. solver diagnostic if (ldiag_solver) call diag_solver(mode, mesh) !2. compute curl(stress_surf) @@ -648,14 +654,14 @@ subroutine compute_diagnostics(mode, mesh) !5. print integrated temperature if (ldiag_salt3d) then if (mod(mstep,logfile_outfreq)==0) then - call integrate_nod(tr_arr(:,:,2), val, mesh) + call integrate_nod(tracers(2)%values(:,:), val, mesh) if (mype==0) then write(*,*) 'total integral of salinity at timestep :', mstep, val end if end if end if !6. MOC in density coordinate - if (ldiag_dMOC) call diag_densMOC(mode, mesh) + if (ldiag_dMOC) call diag_densMOC(mode, tracers, mesh) end subroutine compute_diagnostics @@ -670,13 +676,13 @@ end subroutine compute_diagnostics ! in a coastal model application ... ! Klingbeil et al., 2014, Quantification of spurious dissipation and mixing – ! Discrete variance decay in a Finite-Volume framework ... -subroutine compute_diag_dvd_2ndmoment_burchard_etal_2008(tr_num, mesh) +subroutine compute_diag_dvd_2ndmoment_burchard_etal_2008(tracer, mesh) use o_arrays use g_PARSUP use oce_adv_tra_driver_interfaces implicit none - type(t_mesh), intent(in), target :: mesh - integer, intent(in) :: tr_num + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracer integer :: node, nz, nzmin, nzmax real(kind=WP) :: tr_sqr(mesh%nl-1,myDim_nod2D+eDim_nod2D), trAB_sqr(mesh%nl-1,myDim_nod2D+eDim_nod2D) @@ -692,8 +698,8 @@ subroutine compute_diag_dvd_2ndmoment_burchard_etal_2008(tr_num, mesh) nzmax = nlevels_nod2D(node)-1 nzmin = ulevels_nod2D(node) do nz = nzmin, nzmax - tr_sqr(nz,node) = tr_arr(nz,node,tr_num)**2 - trAB_sqr(nz,node) = tr_arr_old(nz,node,tr_num)**2 + tr_sqr(nz,node) = tracer%values (nz,node)**2 + trAB_sqr(nz,node) = tracer%valuesAB(nz,node)**2 end do end do @@ -722,8 +728,8 @@ subroutine compute_diag_dvd_2ndmoment_burchard_etal_2008(tr_num, mesh) ! --> split it up in DVD contribution from horizontal and vertical ! advection since for the horizontal advection Adams Bashfort tracer ! are used and for the vertical the normal tracer values. - tr_dvd_horiz(nz,node,tr_num) = hnode(nz,node)/hnode_new(nz,node)*trAB_sqr(nz,node) - del_ttf_advhoriz(nz,node)/hnode_new(nz,node) - tr_dvd_vert(nz,node,tr_num) = hnode(nz,node)/hnode_new(nz,node)*tr_sqr( nz,node) - del_ttf_advvert( nz,node)/hnode_new(nz,node) + tr_dvd_horiz(nz,node,tracer%ID) = hnode(nz,node)/hnode_new(nz,node)*trAB_sqr(nz,node) - del_ttf_advhoriz(nz,node)/hnode_new(nz,node) + tr_dvd_vert(nz,node,tracer%ID) = hnode(nz,node)/hnode_new(nz,node)*tr_sqr( nz,node) - del_ttf_advvert( nz,node)/hnode_new(nz,node) end do end do end subroutine compute_diag_dvd_2ndmoment_burchard_etal_2008 @@ -736,14 +742,14 @@ end subroutine compute_diag_dvd_2ndmoment_burchard_etal_2008 ! see: ! Klingbeil et al., 2014, Quantification of spurious dissipation and mixing – ! Discrete variance decay in a Finite-Volume framework ... -subroutine compute_diag_dvd_2ndmoment_klingbeil_etal_2014(tr_num, mesh) +subroutine compute_diag_dvd_2ndmoment_klingbeil_etal_2014(tracer, mesh) use o_arrays use g_PARSUP use oce_adv_tra_driver_interfaces implicit none - integer, intent(in) :: tr_num - integer :: node, nz, nzmin, nzmax - type(t_mesh), intent(in), target :: mesh + integer :: node, nz, nzmin, nzmax + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracer #include "associate_mesh.h" !___________________________________________________________________________ @@ -752,7 +758,7 @@ subroutine compute_diag_dvd_2ndmoment_klingbeil_etal_2014(tr_num, mesh) ! numerically induced mixing in ocean models ... del_ttf_advhoriz = 0.0_WP del_ttf_advvert = 0.0_WP - call do_oce_adv_tra(tr_arr(:,:,tr_num), tr_arr_old(:,:,tr_num), UV, wvel, wvel_i, wvel_e, 2, del_ttf_advhoriz, del_ttf_advvert, tra_adv_ph, tra_adv_pv, mesh) + call do_oce_adv_tra(tracer%values, tracer%valuesAB(:,:), UV, wvel, wvel_i, wvel_e, 2, del_ttf_advhoriz, del_ttf_advvert, tra_adv_ph, tra_adv_pv, mesh) !___________________________________________________________________________ ! add target second moment to DVD do node = 1,mydim_nod2D @@ -775,9 +781,9 @@ subroutine compute_diag_dvd_2ndmoment_klingbeil_etal_2014(tr_num, mesh) ! --> split it up in DVD contribution from horizontal and vertical ! advection since for the horizontal advection Adams Bashfort tracer ! are used and for the vertical the normal tracer values. - tr_dvd_horiz(nz,node,tr_num) = hnode(nz,node)/hnode_new(nz,node)*(tr_arr_old(nz,node,tr_num)**2) & + tr_dvd_horiz(nz,node,tracer%ID) = hnode(nz,node)/hnode_new(nz,node)*(tracer%valuesAB(nz,node)**2) & - del_ttf_advhoriz(nz,node)/hnode_new(nz,node) - tr_dvd_vert(nz,node,tr_num) = hnode(nz,node)/hnode_new(nz,node)*(tr_arr( nz,node,tr_num)**2) & + tr_dvd_vert(nz,node,tracer%ID) = hnode(nz,node)/hnode_new(nz,node)*(tracer%values (nz,node)**2) & - del_ttf_advvert( nz,node)/hnode_new(nz,node) end do end do @@ -793,15 +799,15 @@ end subroutine compute_diag_dvd_2ndmoment_klingbeil_etal_2014 ! in a coastal model application ... ! Klingbeil et al., 2014, Quantification of spurious dissipation and mixing – ! Discrete variance decay in a Finite-Volume framework ... -subroutine compute_diag_dvd(tr_num, mesh) +subroutine compute_diag_dvd(tracer, mesh) use g_config, only: dt use o_arrays use g_PARSUP implicit none - integer, intent(in) :: tr_num integer :: node, nz, nzmin, nzmax - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracer #include "associate_mesh.h" !___________________________________________________________________________ @@ -821,15 +827,15 @@ subroutine compute_diag_dvd(tr_num, mesh) ! now add this part ! --> tr_dvd_horiz contains already the expected target second moments ! from subroutine compute_diag_dvd_2ndmoment - tr_dvd_horiz(nz,node,tr_num) = (tr_dvd_horiz(nz,node,tr_num) & - -( hnode(nz,node)/hnode_new(nz,node)*tr_arr_old(nz,node,tr_num) & - -del_ttf_advhoriz(nz,node)/hnode_new(nz,node) & - )**2 & + tr_dvd_horiz(nz,node,tracer%ID) = (tr_dvd_horiz(nz,node,tracer%ID) & + -( hnode(nz,node)/hnode_new(nz,node)*tracer%valuesAB(nz,node) & + -del_ttf_advhoriz(nz,node)/hnode_new(nz,node) & + )**2 & )/dt - tr_dvd_vert(nz,node,tr_num) = (tr_dvd_vert(nz,node,tr_num) & - -( hnode(nz,node)/hnode_new(nz,node)*tr_arr( nz,node,tr_num) & - -del_ttf_advvert( nz,node)/hnode_new(nz,node) & - )**2 & + tr_dvd_vert(nz,node,tracer%ID) = (tr_dvd_vert(nz,node,tracer%ID) & + -( hnode(nz,node)/hnode_new(nz,node)*tracer%values (nz,node) & + -del_ttf_advvert( nz,node)/hnode_new(nz,node) & + )**2 & )/dt end do end do diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index 760d604af..a5eabfae5 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -1,3 +1,25 @@ +module ocean2ice_interface + interface + subroutine ocean2ice(tracers, mesh) + use mod_mesh + use mod_tracer + type(t_mesh), intent(in) , target :: mesh + type(t_tracer), intent(inout), target :: tracers(:) + end subroutine + end interface +end module + +module oce_fluxes_interface + interface + subroutine oce_fluxes(tracers, mesh) + use mod_mesh + use mod_tracer + type(t_mesh), intent(in) , target :: mesh + type(t_tracer), intent(inout), target :: tracers(:) + end subroutine + end interface +end module + ! ! !_______________________________________________________________________________ @@ -78,7 +100,7 @@ end subroutine oce_fluxes_mom ! ! !_______________________________________________________________________________ -subroutine ocean2ice(mesh) +subroutine ocean2ice(tracers, mesh) ! transmits the relevant fields from the ocean to the ice model @@ -86,31 +108,35 @@ subroutine ocean2ice(mesh) use o_ARRAYS use i_ARRAYS use MOD_MESH + use MOD_TRACER use g_PARSUP USE g_CONFIG use g_comm_auto implicit none - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers(:) integer :: n, elem, k real(kind=WP) :: uw, vw, vol - + real(kind=WP), dimension(:,:), pointer :: temp, salt #include "associate_mesh.h" + temp=>tracers(1)%values(:,:) + salt=>tracers(2)%values(:,:) ! the arrays in the ice model are renamed if (ice_update) then do n=1, myDim_nod2d+eDim_nod2d if (ulevels_nod2D(n)>1) cycle - T_oc_array(n) = tr_arr(1,n,1) - S_oc_array(n) = tr_arr(1,n,2) + T_oc_array(n) = temp(1,n) + S_oc_array(n) = salt(1,n) elevation(n) = hbar(n) end do else do n=1, myDim_nod2d+eDim_nod2d if (ulevels_nod2D(n)>1) cycle - T_oc_array(n) = (T_oc_array(n)*real(ice_steps_since_upd,WP)+tr_arr(1,n,1))/real(ice_steps_since_upd+1,WP) - S_oc_array(n) = (S_oc_array(n)*real(ice_steps_since_upd,WP)+tr_arr(1,n,2))/real(ice_steps_since_upd+1,WP) + T_oc_array(n) = (T_oc_array(n)*real(ice_steps_since_upd,WP)+temp(1,n))/real(ice_steps_since_upd+1,WP) + S_oc_array(n) = (S_oc_array(n)*real(ice_steps_since_upd,WP)+salt(1,n))/real(ice_steps_since_upd+1,WP) elevation(n) = (elevation(n) *real(ice_steps_since_upd,WP)+ hbar(n))/real(ice_steps_since_upd+1,WP) !NR !PS elevation(n)=(elevation(n)*real(ice_steps_since_upd)+eta_n(n))/real(ice_steps_since_upd+1,WP) !NR elevation(n)=(elevation(n)*real(ice_steps_since_upd)+hbar(n))/real(ice_steps_since_upd+1,WP) !PS @@ -152,9 +178,10 @@ end subroutine ocean2ice ! ! !_______________________________________________________________________________ -subroutine oce_fluxes(mesh) +subroutine oce_fluxes(tracers, mesh) use MOD_MESH + use MOD_TRACER USE g_CONFIG use o_ARRAYS use i_ARRAYS @@ -169,14 +196,17 @@ subroutine oce_fluxes(mesh) use icedrv_main, only: icepack_to_fesom, & init_flux_atm_ocn #endif - + use cavity_heat_water_fluxes_3eq_interface implicit none - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers(:) integer :: n, elem, elnodes(3),n1 real(kind=WP) :: rsss, net real(kind=WP), allocatable :: flux(:) - + real(kind=WP), dimension(:,:), pointer :: temp, salt #include "associate_mesh.h" + temp=>tracers(1)%values(:,:) + salt=>tracers(2)%values(:,:) allocate(flux(myDim_nod2D+eDim_nod2D)) flux = 0.0_WP @@ -218,8 +248,7 @@ subroutine oce_fluxes(mesh) water_flux = -fresh_wa_flux #endif heat_flux_in=heat_flux ! sw_pene will change the heat_flux - - if (use_cavity) call cavity_heat_water_fluxes_3eq(mesh) + if (use_cavity) call cavity_heat_water_fluxes_3eq(tracers, mesh) !!PS if (use_cavity) call cavity_heat_water_fluxes_2eq(mesh) !!PS where(ulevels_nod2D>1) heat_flux=0.0_WP @@ -245,8 +274,8 @@ subroutine oce_fluxes(mesh) if (use_virt_salt) then ! will remain zero otherwise rsss=ref_sss do n=1, myDim_nod2D+eDim_nod2D - !!PS if (ref_sss_local) rsss = tr_arr(1,n,2) - if (ref_sss_local) rsss = tr_arr(ulevels_nod2d(n),n,2) + !!PS if (ref_sss_local) rsss = salt(1,n) + if (ref_sss_local) rsss = salt(ulevels_nod2d(n),n) virtual_salt(n)=rsss*water_flux(n) end do @@ -261,7 +290,7 @@ subroutine oce_fluxes(mesh) end if where (ulevels_nod2d == 1) - dens_flux=sw_alpha(1,:) * heat_flux_in / vcpw + sw_beta(1, :) * (relax_salt + water_flux * tr_arr(1,:,2)) + dens_flux=sw_alpha(1,:) * heat_flux_in / vcpw + sw_beta(1, :) * (relax_salt + water_flux * salt(1,:)) elsewhere dens_flux=0.0_WP end where @@ -271,13 +300,13 @@ subroutine oce_fluxes(mesh) do n=1, myDim_nod2D+eDim_nod2D relax_salt(n) = 0.0_WP if (ulevels_nod2d(n)>1) cycle - !!PS relax_salt(n)=surf_relax_S*(Ssurf(n)-tr_arr(1,n,2)) - relax_salt(n)=surf_relax_S*(Ssurf(n)-tr_arr(ulevels_nod2d(n),n,2)) + !!PS relax_salt(n)=surf_relax_S*(Ssurf(n)-salt(1,n)) + relax_salt(n)=surf_relax_S*(Ssurf(n)-salt(ulevels_nod2d(n),n)) end do else do n=1, myDim_nod2D+eDim_nod2D - !!PS relax_salt(n)=surf_relax_S*(Ssurf(n)-tr_arr(1,n,2)) - relax_salt(n)=surf_relax_S*(Ssurf(n)-tr_arr(ulevels_nod2d(n),n,2)) + !!PS relax_salt(n)=surf_relax_S*(Ssurf(n)-salt(1,n)) + relax_salt(n)=surf_relax_S*(Ssurf(n)-salt(ulevels_nod2d(n),n)) end do end if diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index 183b2ba5e..c6c8885aa 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -1,31 +1,19 @@ -module ice_setup_step_interfaces - interface - subroutine ice_array_setup(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh - end subroutine - - subroutine ice_initial_state(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh - end subroutine - end interface -end module - -! ! !_______________________________________________________________________________ ! ice initialization + array allocation + time stepping -subroutine ice_setup(mesh) +subroutine ice_setup(tracers, mesh) use o_param use g_parsup use i_param use i_arrays use g_CONFIG use mod_mesh - use ice_setup_step_interfaces + use mod_tracer + use ice_array_setup_interface + use ice_initial_state_interface implicit none - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers(:) ! ================ DO not change ice_dt=real(ice_ave_steps,WP)*dt @@ -41,7 +29,7 @@ subroutine ice_setup(mesh) ! Initialization routine, user input is required ! ================ !call ice_init_fields_test - call ice_initial_state(mesh) ! Use it unless running test example + call ice_initial_state(tracers, mesh) ! Use it unless running test example if(mype==0) write(*,*) 'Ice is initialized' end subroutine ice_setup ! @@ -281,22 +269,24 @@ end subroutine ice_timestep ! !_______________________________________________________________________________ ! sets inital values or reads restart file for ice model -subroutine ice_initial_state(mesh) +subroutine ice_initial_state(tracers, mesh) use i_ARRAYs use MOD_MESH + use MOD_TRACER use o_PARAM use o_arrays use g_parsup use g_CONFIG implicit none ! - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers(:) integer :: i character(MAX_PATH) :: filename real(kind=WP), external :: TFrez ! Sea water freeze temperature. #include "associate_mesh.h" - + write(*,*) tracers(1)%ID, tracers(2)%ID m_ice =0._WP a_ice =0._WP u_ice =0._WP @@ -314,7 +304,7 @@ subroutine ice_initial_state(mesh) endif !_______________________________________________________________________ - if (tr_arr(1,i,1)< 0.0_WP) then + if (tracers(1)%values(1,i)< 0.0_WP) then if (geo_coord_nod2D(2,i)>0._WP) then m_ice(i) = 1.0_WP m_snow(i)= 0.1_WP diff --git a/src/io_blowup.F90 b/src/io_blowup.F90 index 27dcf686c..107e5247b 100644 --- a/src/io_blowup.F90 +++ b/src/io_blowup.F90 @@ -4,6 +4,7 @@ MODULE io_BLOWUP use g_parsup use g_comm_auto USE MOD_MESH + USE MOD_TRACER use o_arrays use i_arrays implicit none @@ -62,10 +63,11 @@ MODULE io_BLOWUP !_______________________________________________________________________________ ! ini_ocean_io initializes bid datatype which contains information of all variables need to be written into ! the ocean restart file. This is the only place need to be modified if a new variable is added! - subroutine ini_blowup_io(year, mesh) + subroutine ini_blowup_io(year, tracers, mesh) implicit none - type(t_mesh), intent(in) , target :: mesh integer, intent(in) :: year + type(t_tracer), intent(in), target :: tracers(:) + type(t_mesh), intent(in), target :: mesh integer :: ncid, j integer :: varid character(500) :: longname @@ -130,9 +132,9 @@ subroutine ini_blowup_io(year, mesh) write(longname,'(A15,i1)') 'passive tracer ', j units='none' END SELECT - call def_variable(bid, trim(trname), (/nl-1, nod2D/), trim(longname), trim(units), tr_arr(:,:,j)); + call def_variable(bid, trim(trname), (/nl-1, nod2D/), trim(longname), trim(units), tracers(j)%values(:,:)); !!PS longname=trim(longname)//', Adams–Bashforth' -!!PS call def_variable(bid, trim(trname)//'_AB',(/nl-1, nod2D/), trim(longname), trim(units), tr_arr_old(:,:,j)); +!!PS call def_variable(bid, trim(trname)//'_AB',(/nl-1, nod2D/), trim(longname), trim(units), tracers(j)%valuesAB(:,:)(:,:)); end do call def_variable(bid, 'w' , (/nl, nod2D/) , 'vertical velocity', 'm/s', Wvel); call def_variable(bid, 'w_expl' , (/nl, nod2D/) , 'vertical velocity', 'm/s', Wvel_e); @@ -166,13 +168,14 @@ end subroutine ini_blowup_io ! ! !_______________________________________________________________________________ - subroutine blowup(istep, mesh) + subroutine blowup(istep, tracers, mesh) implicit none - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers(:) integer :: istep ctime=timeold+(dayold-1.)*86400 - call ini_blowup_io(yearnew, mesh) + call ini_blowup_io(yearnew, tracers, mesh) if(mype==0) write(*,*)'Do output (netCDF, blowup) ...' if(mype==0) write(*,*)' --> call assoc_ids(bid)' call assoc_ids(bid) ; call was_error(bid) @@ -276,7 +279,7 @@ subroutine def_variable_1d(id, name, dims, longname, units, data) character(len=*), intent(in) :: name integer, intent(in) :: dims(1) character(len=*), intent(in), optional :: units, longname - real(kind=WP),target, intent(inout) :: data(:) + real(kind=WP),target, intent(in) :: data(:) integer :: c type(nc_vars), allocatable, dimension(:) :: temp @@ -312,7 +315,7 @@ subroutine def_variable_2d(id, name, dims, longname, units, data) character(len=*), intent(in) :: name integer, intent(in) :: dims(2) character(len=*), intent(in), optional :: units, longname - real(kind=WP),target, intent(inout) :: data(:,:) + real(kind=WP),target, intent(in) :: data(:,:) integer :: c type(nc_vars), allocatable, dimension(:) :: temp @@ -349,7 +352,7 @@ subroutine write_blowup(id, istep, mesh) real(kind=WP), allocatable :: aux1(:), aux2(:,:) integer :: i, size1, size2, shape integer :: c - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh #include "associate_mesh.h" diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 9fd53e400..c9cf93384 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -91,7 +91,9 @@ subroutine destructor(this) end subroutine -subroutine ini_mean_io(mesh) +subroutine ini_mean_io(tracers, mesh) + use MOD_MESH + use MOD_TRACER use g_cvmix_tke use g_cvmix_idemix use g_cvmix_kpp @@ -106,7 +108,8 @@ subroutine ini_mean_io(mesh) integer,dimension(15) :: sel_forcvar=0 character(len=10) :: id_string - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers(:) namelist /nml_listsize/ io_listsize namelist /nml_list / io_list @@ -139,9 +142,9 @@ subroutine ini_mean_io(mesh) SELECT CASE (trim(io_list(i)%id)) !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!2D streams!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! CASE ('sst ') - call def_stream(nod2D, myDim_nod2D, 'sst', 'sea surface temperature', 'C', tr_arr(1,1:myDim_nod2D,1), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'sst', 'sea surface temperature', 'C', tracers(1)%values(1,1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) CASE ('sss ') - call def_stream(nod2D, myDim_nod2D, 'sss', 'sea surface salinity', 'psu', tr_arr(1,1:myDim_nod2D,2), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'sss', 'sea surface salinity', 'psu', tracers(2)%values(1,1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) CASE ('ssh ') call def_stream(nod2D, myDim_nod2D, 'ssh', 'sea surface elevation', 'm', eta_n, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) CASE ('vve_5 ') @@ -286,13 +289,13 @@ subroutine ini_mean_io(mesh) !>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> 3D streams <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< !___________________________________________________________________________________________________________________________________ CASE ('temp ') - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'temp', 'temperature', 'C', tr_arr(:,:,1), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'temp', 'temperature', 'C', tracers(1)%values(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) CASE ('salt ') - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'salt', 'salinity', 'psu', tr_arr(:,:,2), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'salt', 'salinity', 'psu', tracers(2)%values(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) CASE ('otracers ') do j=3, num_tracers write (id_string, "(I3.3)") tracer_id(j) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'tra_'//id_string, 'pasive tracer ID='//id_string, 'n/a', tr_arr(:,:,j), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'tra_'//id_string, 'pasive tracer ID='//id_string, 'n/a', tracers(j)%values(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) end do CASE ('slope_x ') call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'slope_x', 'neutral slope X', 'none', slope_tapered(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) @@ -795,9 +798,10 @@ subroutine update_means ! !-------------------------------------------------------------------------------------------- ! -subroutine output(istep, mesh) +subroutine output(istep, tracers, mesh) use g_clock use mod_mesh + use mod_tracer use g_PARSUP use io_gather_module #if defined (__icepack) @@ -811,13 +815,14 @@ subroutine output(istep, mesh) integer :: n, k logical :: do_output type(Meandata), pointer :: entry - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers(:) character(:), allocatable :: filepath real(real64) :: rtime !timestamp of the record ctime=timeold+(dayold-1.)*86400 if (lfirst) then - call ini_mean_io(mesh) + call ini_mean_io(tracers, mesh) call init_io_gather() #if defined (__icepack) call init_io_icepack(mesh) @@ -941,7 +946,7 @@ subroutine def_stream3D(glsize, lcsize, name, description, units, data, freq, fr implicit none integer, intent(in) :: glsize(2), lcsize(2) character(len=*), intent(in) :: name, description, units - real(kind=WP), target, intent(inout) :: data(:,:) + real(kind=WP), target, intent(in) :: data(:,:) integer, intent(in) :: freq character, intent(in) :: freq_unit integer, intent(in) :: accuracy @@ -1006,7 +1011,7 @@ subroutine def_stream2D(glsize, lcsize, name, description, units, data, freq, fr implicit none integer, intent(in) :: glsize, lcsize character(len=*), intent(in) :: name, description, units - real(kind=WP), target, intent(inout) :: data(:) + real(kind=WP), target, intent(in) :: data(:) integer, intent(in) :: freq character, intent(in) :: freq_unit integer, intent(in) :: accuracy diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 02ae07791..d130b8f65 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -4,6 +4,7 @@ MODULE io_RESTART use g_parsup use g_comm_auto use mod_mesh + use mod_tracer use o_arrays use i_arrays use g_cvmix_tke @@ -77,7 +78,7 @@ MODULE io_RESTART !-------------------------------------------------------------------------------------------- ! ini_ocean_io initializes oid datatype which contains information of all variables need to be written into ! the ocean restart file. This is the only place need to be modified if a new variable is added! -subroutine ini_ocean_io(year, mesh) +subroutine ini_ocean_io(year, tracers, mesh) implicit none integer, intent(in) :: year @@ -87,8 +88,8 @@ subroutine ini_ocean_io(year, mesh) character(500) :: filename character(500) :: trname, units character(4) :: cyear - type(t_mesh), intent(in) , target :: mesh - + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers(:) #include "associate_mesh.h" write(cyear,'(i4)') year @@ -146,9 +147,9 @@ subroutine ini_ocean_io(year, mesh) write(longname,'(A15,i1)') 'passive tracer ', j units='none' END SELECT - call def_variable(oid, trim(trname), (/nl-1, nod2D/), trim(longname), trim(units), tr_arr(:,:,j)); + call def_variable(oid, trim(trname), (/nl-1, nod2D/), trim(longname), trim(units), tracers(j)%values(:,:)); longname=trim(longname)//', Adams–Bashforth' - call def_variable(oid, trim(trname)//'_AB',(/nl-1, nod2D/), trim(longname), trim(units), tr_arr_old(:,:,j)); + call def_variable(oid, trim(trname)//'_AB',(/nl-1, nod2D/), trim(longname), trim(units), tracers(j)%valuesAB(:,:)); end do call def_variable(oid, 'w', (/nl, nod2D/), 'vertical velocity', 'm/s', Wvel); call def_variable(oid, 'w_expl', (/nl, nod2D/), 'vertical velocity', 'm/s', Wvel_e); @@ -197,7 +198,7 @@ end subroutine ini_ice_io ! !-------------------------------------------------------------------------------------------- ! -subroutine restart(istep, l_write, l_read, mesh) +subroutine restart(istep, l_write, l_read, tracers, mesh) #if defined(__icepack) use icedrv_main, only: init_restart_icepack @@ -212,17 +213,17 @@ subroutine restart(istep, l_write, l_read, mesh) logical :: l_write, l_read logical :: is_restart integer :: mpierr - type(t_mesh), intent(in) , target :: mesh - + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers(:) ctime=timeold+(dayold-1.)*86400 if (.not. l_read) then - call ini_ocean_io(yearnew, mesh) + call ini_ocean_io(yearnew, tracers, mesh) if (use_ice) call ini_ice_io (yearnew, mesh) #if defined(__icepack) if (use_ice) call init_restart_icepack(yearnew, mesh) #endif else - call ini_ocean_io(yearold, mesh) + call ini_ocean_io(yearold, tracers, mesh) if (use_ice) call ini_ice_io (yearold, mesh) #if defined(__icepack) if (use_ice) call init_restart_icepack(yearold, mesh) @@ -388,7 +389,7 @@ subroutine def_variable_1d(id, name, dims, longname, units, data) character(len=*), intent(in) :: name integer, intent(in) :: dims(1) character(len=*), intent(in), optional :: units, longname - real(kind=WP),target, intent(inout) :: data(:) + real(kind=WP),target, intent(in) :: data(:) integer :: c type(nc_vars), allocatable, dimension(:) :: temp @@ -424,7 +425,7 @@ subroutine def_variable_2d(id, name, dims, longname, units, data) character(len=*), intent(in) :: name integer, intent(in) :: dims(2) character(len=*), intent(in), optional :: units, longname - real(kind=WP),target, intent(inout) :: data(:,:) + real(kind=WP),target, intent(in) :: data(:,:) integer :: c type(nc_vars), allocatable, dimension(:) :: temp diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index d231cbf61..0cb72704c 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -57,6 +57,17 @@ subroutine update_thickness_ale(mesh) end interface end module +module oce_timestep_ale_interface + interface + subroutine oce_timestep_ale(n, tracers, mesh) + use mod_mesh + use mod_tracer + integer, intent(in) :: n + type(t_tracer), intent(inout), target, allocatable :: tracers(:) + type(t_mesh), intent(in), target :: mesh + end subroutine + end interface +end module ! CONTENT: ! ------------ ! subroutine ale_init @@ -2528,9 +2539,10 @@ end subroutine impl_vert_visc_ale ! ! !=============================================================================== -subroutine oce_timestep_ale(n, mesh) +subroutine oce_timestep_ale(n, tracers, mesh) use g_config use MOD_MESH + use MOD_TRACER use o_ARRAYS use o_PARAM use g_PARSUP @@ -2545,12 +2557,17 @@ subroutine oce_timestep_ale(n, mesh) use g_cvmix_tidal use Toy_Channel_Soufflet use oce_ale_interfaces - + use pressure_bv_interface + use pressure_force_4_linfs_interface + use pressure_force_4_zxxxx_interface + use solve_tracers_ale_interface + use write_step_info_interface + use check_blowup_interface IMPLICIT NONE real(kind=8) :: t0,t1, t2, t30, t3, t4, t5, t6, t7, t8, t9, t10, loc, glo integer :: n, node - type(t_mesh), intent(in) , target :: mesh - + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(inout), target :: tracers(:) #include "associate_mesh.h" t0=MPI_Wtime() @@ -2563,24 +2580,23 @@ subroutine oce_timestep_ale(n, mesh) !___________________________________________________________________________ ! calculate equation of state, density, pressure and mixed layer depths if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call pressure_bv'//achar(27)//'[0m' - call pressure_bv(mesh) !!!!! HeRE change is made. It is linear EoS now. + call pressure_bv(tracers, mesh) !!!!! HeRE change is made. It is linear EoS now. !___________________________________________________________________________ ! calculate calculate pressure gradient force if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call pressure_force_4_...'//achar(27)//'[0m' if (trim(which_ale)=='linfs') then - call pressure_force_4_linfs(mesh) + call pressure_force_4_linfs(tracers, mesh) else - call pressure_force_4_zxxxx(mesh) + call pressure_force_4_zxxxx(tracers, mesh) end if - !___________________________________________________________________________ ! calculate alpha and beta ! it will be used for KPP, Redi, GM etc. Shall we keep it on in general case? - call sw_alpha_beta(tr_arr(:,:,1),tr_arr(:,:,2), mesh) + call sw_alpha_beta(tracers(1)%values, tracers(2)%values, mesh) ! computes the xy gradient of a neutral surface; will be used by Redi, GM etc. - call compute_sigma_xy(tr_arr(:,:,1),tr_arr(:,:,2), mesh) + call compute_sigma_xy(tracers(1)%values,tracers(2)%values, mesh) ! compute both: neutral slope and tapered neutral slope. Can be later combined with compute_sigma_xy ! will be primarily used for computing Redi diffusivities. etc? @@ -2588,7 +2604,6 @@ subroutine oce_timestep_ale(n, mesh) !___________________________________________________________________________ call status_check - !___________________________________________________________________________ ! >>>>>> <<<<<< ! >>>>>> calculate vertical mixing coefficients for tracer (Kv) <<<<<< @@ -2616,7 +2631,7 @@ subroutine oce_timestep_ale(n, mesh) ! use FESOM2.0 tuned k-profile parameterization for vertical mixing if (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call oce_mixing_KPP'//achar(27)//'[0m' - call oce_mixing_KPP(Av, Kv_double, mesh) + call oce_mixing_KPP(Av, Kv_double, tracers, mesh) Kv=Kv_double(:,:,1) call mo_convect(mesh) @@ -2630,7 +2645,7 @@ subroutine oce_timestep_ale(n, mesh) ! use CVMIX KPP (Large at al. 1994) else if(mix_scheme_nmb==3 .or. mix_scheme_nmb==37) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call calc_cvmix_kpp'//achar(27)//'[0m' - call calc_cvmix_kpp(mesh) + call calc_cvmix_kpp(tracers, mesh) call mo_convect(mesh) ! use CVMIX PP (Pacanowski and Philander 1981) parameterisation for mixing @@ -2651,7 +2666,7 @@ subroutine oce_timestep_ale(n, mesh) call mo_convect(mesh) end if - + !___EXTENSION OF MIXING SCHEMES_____________________________________________ ! add CVMIX TIDAL mixing scheme of Simmons et al. 2004 "Tidally driven mixing ! in a numerical model of the ocean general circulation", ocean modelling to @@ -2733,7 +2748,6 @@ subroutine oce_timestep_ale(n, mesh) ! --> eta_(n) ! call zero_dynamics !DS, zeros several dynamical variables; to be used for testing new implementations! t5=MPI_Wtime() - !___________________________________________________________________________ ! Do horizontal and vertical scaling of GM/Redi diffusivity if (Fer_GM .or. Redi) then @@ -2747,8 +2761,7 @@ subroutine oce_timestep_ale(n, mesh) call fer_solve_Gamma(mesh) call fer_gamma2vel(mesh) end if - t6=MPI_Wtime() - + t6=MPI_Wtime() !___________________________________________________________________________ ! The main step of ALE procedure --> this is were the magic happens --> here ! is decided how change in hbar is distributed over the vertical layers @@ -2759,7 +2772,7 @@ subroutine oce_timestep_ale(n, mesh) !___________________________________________________________________________ ! solve tracer equation if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call solve_tracers_ale'//achar(27)//'[0m' - call solve_tracers_ale(mesh) + call solve_tracers_ale(tracers, mesh) t8=MPI_Wtime() !___________________________________________________________________________ @@ -2767,14 +2780,13 @@ subroutine oce_timestep_ale(n, mesh) if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call update_thickness_ale'//achar(27)//'[0m' call update_thickness_ale(mesh) t9=MPI_Wtime() - !___________________________________________________________________________ ! write out global fields for debugging - call write_step_info(n,logfile_outfreq, mesh) + call write_step_info(n,logfile_outfreq, tracers, mesh) ! check model for blowup --> ! write_step_info and check_blowup require ! togeather around 2.5% of model runtime - call check_blowup(n, mesh) + call check_blowup(n, tracers, mesh) t10=MPI_Wtime() !___________________________________________________________________________ ! write out execution times for ocean step parts @@ -2805,5 +2817,4 @@ subroutine oce_timestep_ale(n, mesh) write(*,*) write(*,*) end if - end subroutine oce_timestep_ale diff --git a/src/oce_ale_mixing_kpp.F90 b/src/oce_ale_mixing_kpp.F90 index fd0d3335c..0ec774515 100755 --- a/src/oce_ale_mixing_kpp.F90 +++ b/src/oce_ale_mixing_kpp.F90 @@ -8,6 +8,7 @@ MODULE o_mixing_KPP_mod !--------------------------------------------------------------- USE o_PARAM USE MOD_MESH + USE MOD_TRACER USE o_ARRAYS USE g_PARSUP USE g_config @@ -237,7 +238,7 @@ end subroutine oce_mixing_kpp_init ! diffK = diffusion coefficient (m^2/s) ! !--------------------------------------------------------------- - subroutine oce_mixing_KPP(viscAE, diffK, mesh) + subroutine oce_mixing_KPP(viscAE, diffK, tracers, mesh) IMPLICIT NONE @@ -245,7 +246,8 @@ subroutine oce_mixing_KPP(viscAE, diffK, mesh) ! Define allocatble arrays under oce_modules.F90 ! Allocate arrays under oce_setup_step.F90 ! ******************************************************************* - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers(:) integer :: node, kn, elem, elnodes(3) integer :: nz, ns, j, q, lay, lay_mi, nzmin, nzmax real(KIND=WP) :: smftu, smftv, aux, vol @@ -289,14 +291,7 @@ subroutine oce_mixing_KPP(viscAE, diffK, mesh) dVsq (nzmin,node) = 0.0_WP dbsfc(nzmin,node) = 0.0_WP -! Surface temperature and salinity - !!PS tsurf = tr_arr(1,node,1) - !!PS ssurf = tr_arr(1,node,2) -!!PS tsurf = tr_arr(nzmin,node,1) -!!PS ssurf = tr_arr(nzmin,node,2) ! Surface velocity - !!PS usurf = Unode(1,1,node) - !!PS vsurf = Unode(2,1,node) usurf = Unode(1,nzmin,node) vsurf = Unode(2,nzmin,node) @@ -327,10 +322,6 @@ subroutine oce_mixing_KPP(viscAE, diffK, mesh) ! Reason: oce_timestep(n) is called after subroutine oce_mixing_(K)PP ! where compute_sigma_xy -> sw_alpha_beta is called (Fer_GM should be set to true) ! ******************************************************************* -! IF ( .not. Fer_GM ) THEN -! CALL sw_alpha_beta(tr_arr(:,:,1),tr_arr(:,:,2)) -! ENDIF -! ******************************************************************* ! friction velocity, turbulent sfc buoyancy forcing ! ustar = sqrt( sqrt( stress_atmoce_x^2 + stress_atmoce_y^2 ) / rho ) (m/s) ! bo = -g * ( Talpha*heat_flux/vcpw + Sbeta * salinity*water_flux ) (m^2/s^3) @@ -342,10 +333,8 @@ subroutine oce_mixing_KPP(viscAE, diffK, mesh) !!PS ustar(node) = sqrt( sqrt( stress_node_surf(1,node)**2 + stress_node_surf(2,node)**2 )*density_0_r ) ! @ the surface (eqn. 2) ! Surface buoyancy forcing (eqns. A2c & A2d & A3b & A3d) - !!PS Bo(node) = -g * ( sw_alpha(1,node) * heat_flux(node) / vcpw & !heat_flux & water_flux: positive up - !!PS + sw_beta (1,node) * water_flux(node) * tr_arr(1,node,2)) Bo(node) = -g * ( sw_alpha(nzmin,node) * heat_flux(node) / vcpw & !heat_flux & water_flux: positive up - + sw_beta (nzmin,node) * water_flux(node) * tr_arr(nzmin,node,2)) + + sw_beta (nzmin,node) * water_flux(node) * tracers(2)%values(nzmin,node)) END DO ! compute interior mixing coefficients everywhere, due to constant @@ -354,7 +343,7 @@ subroutine oce_mixing_KPP(viscAE, diffK, mesh) CALL ri_iwmix(viscA, diffK, mesh) ! add double diffusion IF (double_diffusion) then - CALL ddmix(diffK, mesh) + CALL ddmix(diffK, tracers, mesh) END IF ! boundary layer mixing coefficients: diagnose new b.l. depth @@ -854,10 +843,11 @@ end subroutine ri_iwmix ! ! output: update diffu ! - subroutine ddmix(diffK, mesh) + subroutine ddmix(diffK, tracers, mesh) IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers(:) real(KIND=WP), parameter :: Rrho0 = 1.9_WP ! limit for double diffusive density ratio real(KIND=WP), parameter :: dsfmax = 1.e-4_WP ! (m^2/s) max diffusivity in case of salt fingering real(KIND=WP), parameter :: viscosity_molecular = 1.5e-6_WP ! (m^2/s) @@ -877,8 +867,8 @@ subroutine ddmix(diffK, mesh) DO nz=nzmin+1,nzmax-1 ! alphaDT and betaDS @Z - alphaDT = sw_alpha(nz-1,node) * tr_arr(nz-1,node,1) - betaDS = sw_beta (nz-1,node) * tr_arr(nz-1,node,2) + alphaDT = sw_alpha(nz-1,node) * tracers(1)%values(nz-1,node) + betaDS = sw_beta (nz-1,node) * tracers(2)%values(nz-1,node) IF (alphaDT > betaDS .and. betaDS > 0.0_WP) THEN diff --git a/src/oce_ale_pressure_bv.F90 b/src/oce_ale_pressure_bv.F90 index 0feb5cd0a..8297799c8 100644 --- a/src/oce_ale_pressure_bv.F90 +++ b/src/oce_ale_pressure_bv.F90 @@ -30,9 +30,11 @@ subroutine pressure_force_4_linfs_fullcell(mesh) end module module pressure_force_4_linfs_nemo_interface interface - subroutine pressure_force_4_linfs_nemo(mesh) + subroutine pressure_force_4_linfs_nemo(tracers, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + use mod_tracer + type(t_tracer), intent(in), target :: tracers(:) + type(t_mesh), intent(in), target :: mesh end subroutine end interface end module @@ -46,9 +48,11 @@ subroutine pressure_force_4_linfs_shchepetkin(mesh) end module module pressure_force_4_linfs_easypgf_interface interface - subroutine pressure_force_4_linfs_easypgf(mesh) + subroutine pressure_force_4_linfs_easypgf(tracers, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + use mod_tracer + type(t_tracer), intent(in), target :: tracers(:) + type(t_mesh), intent(in), target :: mesh end subroutine end interface end module @@ -78,9 +82,11 @@ subroutine pressure_force_4_zxxxx_shchepetkin(mesh) end module module pressure_force_4_zxxxx_easypgf_interface interface - subroutine pressure_force_4_zxxxx_easypgf(mesh) + subroutine pressure_force_4_zxxxx_easypgf(tracers, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + use mod_tracer + type(t_tracer), intent(in), target :: tracers(:) + type(t_mesh), intent(in), target :: mesh end subroutine end interface end module @@ -100,16 +106,57 @@ subroutine init_ref_density(mesh) end subroutine end interface end module +module insitu2pot_interface + interface + subroutine insitu2pot(tracers, mesh) + use mod_mesh + use mod_tracer + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(inout), target :: tracers(:) + end subroutine + end interface +end module +module pressure_bv_interface + interface + subroutine pressure_bv(tracers, mesh) + use mod_mesh + use mod_tracer + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers(:) + end subroutine + end interface +end module +module pressure_force_4_linfs_interface + interface + subroutine pressure_force_4_linfs(tracers, mesh) + use mod_mesh + use mod_tracer + type(t_tracer), intent(in), target :: tracers(:) + type(t_mesh), intent(in), target :: mesh + end subroutine + end interface +end module +module pressure_force_4_zxxxx_interface + interface + subroutine pressure_force_4_zxxxx(tracers, mesh) + use mod_mesh + use mod_tracer + type(t_tracer), intent(in), target :: tracers(:) + type(t_mesh), intent(in), target :: mesh + end subroutine + end interface +end module ! ! !=============================================================================== -subroutine pressure_bv(mesh) +subroutine pressure_bv(tracers, mesh) ! fill in the hydrostatic pressure and the Brunt-Vaisala frequency ! in a single pass the using split form of the equation of state ! as proposed by NR use g_config USE o_PARAM USE MOD_MESH + USE MOD_TRACER USE o_ARRAYS USE g_PARSUP use i_arrays @@ -118,14 +165,19 @@ subroutine pressure_bv(mesh) use densityJM_components_interface use density_linear_interface IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(inout), target :: tracers(:) real(kind=WP) :: dz_inv, bv, a, rho_up, rho_dn, t, s integer :: node, nz, nl1, nzmax, nzmin real(kind=WP) :: rhopot(mesh%nl), bulk_0(mesh%nl), bulk_pz(mesh%nl), bulk_pz2(mesh%nl), rho(mesh%nl), dbsfc1(mesh%nl), db_max real(kind=WP) :: bulk_up, bulk_dn, smallvalue, buoyancy_crit, rho_surf, aux_rho, aux_rho1 real(kind=WP) :: sigma_theta_crit=0.125_WP !kg/m3, Levitus threshold for computing MLD2 logical :: flag1, flag2, mixing_kpp + + real(kind=WP), dimension(:,:), pointer :: temp, salt #include "associate_mesh.h" + temp=>tracers(1)%values(:,:) + salt=>tracers(2)%values(:,:) smallvalue=1.0e-20 buoyancy_crit=0.0003_WP mixing_kpp = (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) ! NR Evaluate string comparison outside the loop. It is expensive. @@ -138,7 +190,7 @@ subroutine pressure_bv(mesh) nzmax = nlevels_nod2D(node) !!PS do nz=1,nlevels_nod2d(node)-1 do nz=nzmin,nzmax-1 - a=min(a,tr_arr(nz,node,2)) + a=min(a,salt(nz,node)) enddo enddo @@ -151,7 +203,7 @@ subroutine pressure_bv(mesh) nzmax = nlevels_nod2D(node) !!PS do nz=1, nlevels_nod2d(node)-1 do nz=nzmin, nzmax-1 - if (tr_arr(nz, node, 2) < 0) write (*,*) 'the model blows up at n=', mylist_nod2D(node), ' ; ', 'nz=', nz + if (salt(nz, node) < 0) write (*,*) 'the model blows up at n=', mylist_nod2D(node), ' ; ', 'nz=', nz end do end do endif @@ -175,8 +227,8 @@ subroutine pressure_bv(mesh) !_______________________________________________________________________ ! apply equation of state do nz=nzmin, nzmax-1 - t=tr_arr(nz, node,1) - s=tr_arr(nz, node,2) + t=temp(nz, node) + s=salt(nz, node) select case(state_equation) case(0) call density_linear(t, s, bulk_0(nz), bulk_pz(nz), bulk_pz2(nz), rhopot(nz), mesh) @@ -238,8 +290,8 @@ subroutine pressure_bv(mesh) ! like at the cavity-ocean interface --> compute water mass density that ! is replaced by the cavity if (nzmin>1) then - t=tr_arr(nzmin, node,1) - s=tr_arr(nzmin, node,2) + t=temp(nzmin, node) + s=salt(nzmin, node) do nz=1, nzmin-1 select case(state_equation) case(0) @@ -368,10 +420,11 @@ end subroutine pressure_bv ! !=============================================================================== ! Calculate pressure gradient force (PGF) for linear free surface case -subroutine pressure_force_4_linfs(mesh) +subroutine pressure_force_4_linfs(tracers, mesh) use g_config use g_PARSUP use mod_mesh + use mod_tracer use pressure_force_4_linfs_fullcell_interface use pressure_force_4_linfs_nemo_interface use pressure_force_4_linfs_shchepetkin_interface @@ -379,7 +432,11 @@ subroutine pressure_force_4_linfs(mesh) use pressure_force_4_linfs_cavity_interface use pressure_force_4_linfs_easypgf_interface implicit none - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers(:) + real(kind=WP), dimension(:,:), pointer :: temp, salt + temp=>tracers(1)%values(:,:) + salt=>tracers(2)%values(:,:) !___________________________________________________________________________ ! calculate pressure gradient force (PGF) for linfs with full cells if ( .not. use_partial_cell .and. .not. use_cavity_partial_cell) then @@ -391,7 +448,7 @@ subroutine pressure_force_4_linfs(mesh) elseif (trim(which_pgf)=='shchepetkin') then call pressure_force_4_linfs_shchepetkin(mesh) elseif (trim(which_pgf)=='easypgf') then - call pressure_force_4_linfs_easypgf(mesh) + call pressure_force_4_linfs_easypgf(tracers, mesh) else write(*,*) '________________________________________________________' write(*,*) ' --> ERROR: the choosen form of pressure gradient ' @@ -406,13 +463,13 @@ subroutine pressure_force_4_linfs(mesh) ! calculate pressure gradient force (PGF) for linfs with partiall cells else ! --> (trim(which_ale)=='linfs' .and. use_partial_cell ) if (trim(which_pgf)=='nemo') then - call pressure_force_4_linfs_nemo(mesh) + call pressure_force_4_linfs_nemo(tracers, mesh) elseif (trim(which_pgf)=='shchepetkin') then call pressure_force_4_linfs_shchepetkin(mesh) elseif (trim(which_pgf)=='cubicspline') then call pressure_force_4_linfs_cubicspline(mesh) elseif (trim(which_pgf)=='easypgf') then - call pressure_force_4_linfs_easypgf(mesh) + call pressure_force_4_linfs_easypgf(tracers, mesh) else write(*,*) '________________________________________________________' write(*,*) ' --> ERROR: the choosen form of pressure gradient ' @@ -476,9 +533,10 @@ end subroutine pressure_force_4_linfs_fullcell ! Calculate pressure gradient force (PGF) like in NEMO based on NEMO ocean engine ! Gurvan Madec, and the NEMO team gurvan.madec@locean-ipsl.umpc.fr, nemo st@locean-ipsl.umpc.fr ! November 2015, – version 3.6 stable – -subroutine pressure_force_4_linfs_nemo(mesh) +subroutine pressure_force_4_linfs_nemo(tracers, mesh) use o_PARAM use MOD_MESH + use MOD_TRACER use o_ARRAYS use g_PARSUP use g_config @@ -492,8 +550,12 @@ subroutine pressure_force_4_linfs_nemo(mesh) real(kind=WP) :: interp_n_dens(3), interp_n_temp, interp_n_salt, & dZn, dZn_i, dh, dval, mean_e_rho,dZn_rho_grad(2) real(kind=WP) :: rhopot, bulk_0, bulk_pz, bulk_pz2 - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers(:) + real(kind=WP), dimension(:,:), pointer :: temp, salt #include "associate_mesh.h" + temp=>tracers(1)%values(:,:) + salt=>tracers(2)%values(:,:) !___________________________________________________________________________ ! loop over triangular elemments do elem=1, myDim_elem2D @@ -589,10 +651,10 @@ subroutine pressure_force_4_linfs_nemo(mesh) !! state ... !else ! ... interpolate temperature and saltinity ... - dval = tr_arr(nlc, elnodes(ni),1) - tr_arr(nlc-1,elnodes(ni),1) - interp_n_temp = tr_arr(nlc-1,elnodes(ni),1) + (dval/dZn*dZn_i) - dval = tr_arr(nlc ,elnodes(ni),2) - tr_arr(nlc-1,elnodes(ni),2) - interp_n_salt = tr_arr(nlc-1,elnodes(ni),2) + (dval/dZn*dZn_i) + dval = temp(nlc, elnodes(ni)) - temp(nlc-1,elnodes(ni)) + interp_n_temp = temp(nlc-1,elnodes(ni)) + (dval/dZn*dZn_i) + dval = salt(nlc ,elnodes(ni)) - salt(nlc-1,elnodes(ni)) + interp_n_salt = salt(nlc-1,elnodes(ni)) + (dval/dZn*dZn_i) ! calculate density at element mid-depth bottom depth via ! equation of state from linear interpolated temperature and @@ -895,9 +957,10 @@ end subroutine pressure_force_4_linfs_shchepetkin !=============================================================================== ! Calculate pressure gradient force (PGF) ! First coded by P. Scholz for FESOM2.0, 08.02.2019 -subroutine pressure_force_4_linfs_easypgf(mesh) +subroutine pressure_force_4_linfs_easypgf(tracers, mesh) use o_PARAM use MOD_MESH + use MOD_TRACER use o_ARRAYS use g_PARSUP use g_config @@ -914,8 +977,13 @@ subroutine pressure_force_4_linfs_easypgf(mesh) real(kind=WP) :: rhopot(3), bulk_0(3), bulk_pz(3), bulk_pz2(3) real(kind=WP) :: dref_rhopot, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2 - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers(:) + real(kind=WP), dimension(:,:), pointer :: temp, salt #include "associate_mesh.h" + temp=>tracers(1)%values(:,:) + salt=>tracers(2)%values(:,:) + !___________________________________________________________________________ ! loop over triangular elemments do elem=1, myDim_elem2D @@ -1014,13 +1082,13 @@ subroutine pressure_force_4_linfs_easypgf(mesh) dx21(ni) = Z_3d_n(nlz+2,elnodes(ni))-Z_3d_n(nlz+1,elnodes(ni)) dx20(ni) = Z_3d_n(nlz+2,elnodes(ni))-Z_3d_n(nlz ,elnodes(ni)) - t0(ni) = tr_arr(nlz ,elnodes(ni),1) - dt10(ni) = tr_arr(nlz+1,elnodes(ni),1)-tr_arr(nlz ,elnodes(ni),1) - dt21(ni) = tr_arr(nlz+2,elnodes(ni),1)-tr_arr(nlz+1,elnodes(ni),1) + t0(ni) = temp(nlz ,elnodes(ni)) + dt10(ni) = temp(nlz+1,elnodes(ni))-temp(nlz ,elnodes(ni)) + dt21(ni) = temp(nlz+2,elnodes(ni))-temp(nlz+1,elnodes(ni)) - s0(ni) = tr_arr(nlz ,elnodes(ni),2) - ds10(ni) = tr_arr(nlz+1,elnodes(ni),2)-tr_arr(nlz ,elnodes(ni),2) - ds21(ni) = tr_arr(nlz+2,elnodes(ni),2)-tr_arr(nlz+1,elnodes(ni),2) + s0(ni) = salt(nlz ,elnodes(ni)) + ds10(ni) = salt(nlz+1,elnodes(ni))-salt(nlz ,elnodes(ni)) + ds21(ni) = salt(nlz+2,elnodes(ni))-salt(nlz+1,elnodes(ni)) !___________________________________________________________________ ! interpoalte vertice temp and salinity to elemental level Z_n temp_at_Zn(ni) = t0(ni) & @@ -1053,13 +1121,13 @@ subroutine pressure_force_4_linfs_easypgf(mesh) dx21(ni) = Z_3d_n(nlz+1,elnodes(ni))-Z_3d_n(nlz ,elnodes(ni)) dx20(ni) = Z_3d_n(nlz+1,elnodes(ni))-Z_3d_n(nlz-1,elnodes(ni)) - t0(ni) = tr_arr(nlz-1,elnodes(ni),1) - dt10(ni) = tr_arr(nlz ,elnodes(ni),1)-tr_arr(nlz-1,elnodes(ni),1) - dt21(ni) = tr_arr(nlz+1,elnodes(ni),1)-tr_arr(nlz ,elnodes(ni),1) + t0(ni) = temp(nlz-1,elnodes(ni)) + dt10(ni) = temp(nlz ,elnodes(ni))-temp(nlz-1,elnodes(ni)) + dt21(ni) = temp(nlz+1,elnodes(ni))-temp(nlz ,elnodes(ni)) - s0(ni) = tr_arr(nlz-1,elnodes(ni),2) - ds10(ni) = tr_arr(nlz ,elnodes(ni),2)-tr_arr(nlz-1,elnodes(ni),2) - ds21(ni) = tr_arr(nlz+1,elnodes(ni),2)-tr_arr(nlz ,elnodes(ni),2) + s0(ni) = salt(nlz-1,elnodes(ni)) + ds10(ni) = salt(nlz ,elnodes(ni))-salt(nlz-1,elnodes(ni)) + ds21(ni) = salt(nlz+1,elnodes(ni))-salt(nlz ,elnodes(ni)) !___________________________________________________________________ ! interpoalte vertice temp and salinity to elemental level Z_n temp_at_Zn(ni) = t0(ni) & @@ -1156,14 +1224,14 @@ subroutine pressure_force_4_linfs_easypgf(mesh) dx21(ni) = Z_3d_n(nlz ,elnodes(ni))-Z_3d_n(nlz-1,elnodes(ni)) dx20(ni) = Z_3d_n(nlz ,elnodes(ni))-Z_3d_n(nlz-2,elnodes(ni)) - t0(ni) = tr_arr(nlz-2,elnodes(ni),1) - dt10(ni) = tr_arr(nlz-1,elnodes(ni),1)-tr_arr(nlz-2,elnodes(ni),1) - dt21(ni) = tr_arr(nlz ,elnodes(ni),1)-tr_arr(nlz-1,elnodes(ni),1) + t0(ni) = temp(nlz-2,elnodes(ni)) + dt10(ni) = temp(nlz-1,elnodes(ni))-temp(nlz-2,elnodes(ni)) + dt21(ni) = temp(nlz ,elnodes(ni))-temp(nlz-1,elnodes(ni)) - s0(ni) = tr_arr(nlz-2,elnodes(ni),2) - ds10(ni) = tr_arr(nlz-1,elnodes(ni),2)-tr_arr(nlz-2,elnodes(ni),2) - ds21(ni) = tr_arr(nlz ,elnodes(ni),2)-tr_arr(nlz-1,elnodes(ni),2) - !___________________________________________________________________ + s0(ni) = salt(nlz-2,elnodes(ni)) + ds10(ni) = salt(nlz-1,elnodes(ni))-salt(nlz-2,elnodes(ni)) + ds21(ni) = salt(nlz ,elnodes(ni))-salt(nlz-1,elnodes(ni)) + !_________________________________________________________________ ! interpoalte vertice temp and salinity to elemental level Z_n temp_at_Zn(ni) = t0(ni) & + dt10(ni)/dx10(ni)*(Z_n(nlz)-Z_3d_n(nlz-2,elnodes(ni))) & @@ -1195,13 +1263,13 @@ subroutine pressure_force_4_linfs_easypgf(mesh) dx21(ni) = Z_3d_n(nlz+1,elnodes(ni))-Z_3d_n(nlz ,elnodes(ni)) dx20(ni) = Z_3d_n(nlz+1,elnodes(ni))-Z_3d_n(nlz-1,elnodes(ni)) - t0(ni) = tr_arr(nlz-1,elnodes(ni),1) - dt10(ni) = tr_arr(nlz ,elnodes(ni),1)-tr_arr(nlz-1,elnodes(ni),1) - dt21(ni) = tr_arr(nlz+1,elnodes(ni),1)-tr_arr(nlz ,elnodes(ni),1) + t0(ni) = temp(nlz-1,elnodes(ni)) + dt10(ni) = temp(nlz ,elnodes(ni))-temp(nlz-1,elnodes(ni)) + dt21(ni) = temp(nlz+1,elnodes(ni))-temp(nlz ,elnodes(ni)) - s0(ni) = tr_arr(nlz-1,elnodes(ni),2) - ds10(ni) = tr_arr(nlz ,elnodes(ni),2)-tr_arr(nlz-1,elnodes(ni),2) - ds21(ni) = tr_arr(nlz+1,elnodes(ni),2)-tr_arr(nlz ,elnodes(ni),2) + s0(ni) = salt(nlz-1,elnodes(ni)) + ds10(ni) = salt(nlz ,elnodes(ni))-salt(nlz-1,elnodes(ni)) + ds21(ni) = salt(nlz+1,elnodes(ni))-salt(nlz ,elnodes(ni)) !___________________________________________________________________ ! interpoalte vertice temp and salinity to elemental level Z_n temp_at_Zn(ni) = t0(ni) & @@ -1658,22 +1726,25 @@ end subroutine pressure_force_4_linfs_cavity ! !=============================================================================== ! Calculate pressure gradient force (PGF) for full free surface case zlevel and zstar -subroutine pressure_force_4_zxxxx(mesh) +subroutine pressure_force_4_zxxxx(tracers, mesh) use g_PARSUP use g_config use mod_mesh + use mod_tracer use pressure_force_4_zxxxx_shchepetkin_interface use pressure_force_4_zxxxx_cubicspline_interface use pressure_force_4_zxxxx_easypgf_interface implicit none - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers(:) + !___________________________________________________________________________ if (trim(which_pgf)=='shchepetkin') then call pressure_force_4_zxxxx_shchepetkin(mesh) elseif (trim(which_pgf)=='cubicspline') then call pressure_force_4_zxxxx_cubicspline(mesh) elseif (trim(which_pgf)=='easypgf' ) then - call pressure_force_4_zxxxx_easypgf(mesh) + call pressure_force_4_zxxxx_easypgf(tracers, mesh) else write(*,*) '________________________________________________________' write(*,*) ' --> ERROR: the choosen form of pressure gradient ' @@ -2113,9 +2184,10 @@ end subroutine pressure_force_4_zxxxx_shchepetkin ! --> based on density jacobian method ... ! calculate PGF for linfs with partiell cell on/off ! First coded by P. Scholz for FESOM2.0, 08.02.2019 -subroutine pressure_force_4_zxxxx_easypgf(mesh) +subroutine pressure_force_4_zxxxx_easypgf(tracers, mesh) use o_PARAM use MOD_MESH + use MOD_TRACER use o_ARRAYS use g_PARSUP use g_config @@ -2132,8 +2204,12 @@ subroutine pressure_force_4_zxxxx_easypgf(mesh) real(kind=WP) :: rho_at_Zn(3), temp_at_Zn(3), salt_at_Zn(3), drho_dz(3), aux_dref real(kind=WP) :: rhopot(3), bulk_0(3), bulk_pz(3), bulk_pz2(3) real(kind=WP) :: dref_rhopot, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2 - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers(:) + real(kind=WP), dimension(:,:), pointer :: temp, salt #include "associate_mesh.h" + temp=>tracers(1)%values(:,:) + salt=>tracers(2)%values(:,:) !___________________________________________________________________________ ! loop over triangular elemments do elem=1, myDim_elem2D @@ -2221,13 +2297,13 @@ subroutine pressure_force_4_zxxxx_easypgf(mesh) !!PS df10(ni) = density_m_rho0(nlz+1,elnodes(ni))-density_m_rho0(nlz ,elnodes(ni)) !!PS df21(ni) = density_m_rho0(nlz+2,elnodes(ni))-density_m_rho0(nlz+1,elnodes(ni)) - t0(ni) = tr_arr(nlz ,elnodes(ni),1) - dt10(ni) = tr_arr(nlz+1,elnodes(ni),1)-tr_arr(nlz ,elnodes(ni),1) - dt21(ni) = tr_arr(nlz+2,elnodes(ni),1)-tr_arr(nlz+1,elnodes(ni),1) + t0(ni) = temp(nlz ,elnodes(ni)) + dt10(ni) = temp(nlz+1,elnodes(ni))-temp(nlz ,elnodes(ni)) + dt21(ni) = temp(nlz+2,elnodes(ni))-temp(nlz+1,elnodes(ni)) - s0(ni) = tr_arr(nlz ,elnodes(ni),2) - ds10(ni) = tr_arr(nlz+1,elnodes(ni),2)-tr_arr(nlz ,elnodes(ni),2) - ds21(ni) = tr_arr(nlz+2,elnodes(ni),2)-tr_arr(nlz+1,elnodes(ni),2) + s0(ni) = salt(nlz ,elnodes(ni)) + ds10(ni) = salt(nlz+1,elnodes(ni))-salt(nlz ,elnodes(ni)) + ds21(ni) = salt(nlz+2,elnodes(ni))-salt(nlz+1,elnodes(ni)) !___________________________________________________________________ ! interpoalte vertice temp and salinity to elemental level Z_n temp_at_Zn(ni) = t0(ni) & @@ -2270,13 +2346,13 @@ subroutine pressure_force_4_zxxxx_easypgf(mesh) !!PS df10(ni) = density_m_rho0(nlz ,elnodes(ni))-density_m_rho0(nlz-1,elnodes(ni)) !!PS df21(ni) = density_m_rho0(nlz+1,elnodes(ni))-density_m_rho0(nlz ,elnodes(ni)) - t0(ni) = tr_arr(nlz-1,elnodes(ni),1) - dt10(ni) = tr_arr(nlz ,elnodes(ni),1)-tr_arr(nlz-1,elnodes(ni),1) - dt21(ni) = tr_arr(nlz+1,elnodes(ni),1)-tr_arr(nlz ,elnodes(ni),1) + t0(ni) = temp(nlz-1,elnodes(ni)) + dt10(ni) = temp(nlz ,elnodes(ni))-temp(nlz-1,elnodes(ni)) + dt21(ni) = temp(nlz+1,elnodes(ni))-temp(nlz ,elnodes(ni)) - s0(ni) = tr_arr(nlz-1,elnodes(ni),2) - ds10(ni) = tr_arr(nlz ,elnodes(ni),2)-tr_arr(nlz-1,elnodes(ni),2) - ds21(ni) = tr_arr(nlz+1,elnodes(ni),2)-tr_arr(nlz ,elnodes(ni),2) + s0(ni) = salt(nlz-1,elnodes(ni)) + ds10(ni) = salt(nlz ,elnodes(ni))-salt(nlz-1,elnodes(ni)) + ds21(ni) = salt(nlz+1,elnodes(ni))-salt(nlz ,elnodes(ni)) !___________________________________________________________________ ! interpoalte vertice temp and salinity to elemental level Z_n temp_at_Zn(ni) = t0(ni) & @@ -2348,13 +2424,13 @@ subroutine pressure_force_4_zxxxx_easypgf(mesh) !!PS df10 = density_m_rho0(nlz ,elnodes)-density_m_rho0(nlz-1,elnodes) !!PS df21 = density_m_rho0(nlz+1,elnodes)-density_m_rho0(nlz ,elnodes) - t0 = tr_arr(nlz-1,elnodes,1) - dt10 = tr_arr(nlz ,elnodes,1)-tr_arr(nlz-1,elnodes,1) - dt21 = tr_arr(nlz+1,elnodes,1)-tr_arr(nlz ,elnodes,1) + t0 = temp(nlz-1,elnodes) + dt10 = temp(nlz ,elnodes)-temp(nlz-1,elnodes) + dt21 = temp(nlz+1,elnodes)-temp(nlz ,elnodes) - s0 = tr_arr(nlz-1,elnodes,2) - ds10 = tr_arr(nlz ,elnodes,2)-tr_arr(nlz-1,elnodes,2) - ds21 = tr_arr(nlz+1,elnodes,2)-tr_arr(nlz ,elnodes,2) + s0 = salt(nlz-1,elnodes) + ds10 = salt(nlz ,elnodes)-salt(nlz-1,elnodes) + ds21 = salt(nlz+1,elnodes)-salt(nlz ,elnodes) !___________________________________________________________________ ! interpoalte vertice temp and salinity to elemental level Z_n temp_at_Zn = t0 & @@ -2435,13 +2511,13 @@ subroutine pressure_force_4_zxxxx_easypgf(mesh) !!PS df10(ni) = density_m_rho0(nlz-1,elnodes(ni))-density_m_rho0(nlz-2,elnodes(ni)) !!PS df21(ni) = density_m_rho0(nlz ,elnodes(ni))-density_m_rho0(nlz-1,elnodes(ni)) - t0(ni) = tr_arr(nlz-2,elnodes(ni),1) - dt10(ni) = tr_arr(nlz-1,elnodes(ni),1)-tr_arr(nlz-2,elnodes(ni),1) - dt21(ni) = tr_arr(nlz ,elnodes(ni),1)-tr_arr(nlz-1,elnodes(ni),1) + t0(ni) = temp(nlz-2,elnodes(ni)) + dt10(ni) = temp(nlz-1,elnodes(ni))-temp(nlz-2,elnodes(ni)) + dt21(ni) = temp(nlz ,elnodes(ni))-temp(nlz-1,elnodes(ni)) - s0(ni) = tr_arr(nlz-2,elnodes(ni),2) - ds10(ni) = tr_arr(nlz-1,elnodes(ni),2)-tr_arr(nlz-2,elnodes(ni),2) - ds21(ni) = tr_arr(nlz ,elnodes(ni),2)-tr_arr(nlz-1,elnodes(ni),2) + s0(ni) = salt(nlz-2,elnodes(ni)) + ds10(ni) = salt(nlz-1,elnodes(ni))-salt(nlz-2,elnodes(ni)) + ds21(ni) = salt(nlz ,elnodes(ni))-salt(nlz-1,elnodes(ni)) !___________________________________________________________________ ! interpoalte vertice temp and salinity to elemental level Z_n temp_at_Zn(ni) = t0(ni) & @@ -2484,13 +2560,13 @@ subroutine pressure_force_4_zxxxx_easypgf(mesh) !!PS df10 = density_m_rho0(nlz ,elnodes)-density_m_rho0(nlz-1,elnodes) !!PS df21 = density_m_rho0(nlz+1,elnodes)-density_m_rho0(nlz ,elnodes) - t0(ni) = tr_arr(nlz-1,elnodes(ni),1) - dt10(ni) = tr_arr(nlz ,elnodes(ni),1)-tr_arr(nlz-1,elnodes(ni),1) - dt21(ni) = tr_arr(nlz+1,elnodes(ni),1)-tr_arr(nlz ,elnodes(ni),1) + t0(ni) = temp(nlz-1,elnodes(ni)) + dt10(ni) = temp(nlz ,elnodes(ni))-temp(nlz-1,elnodes(ni)) + dt21(ni) = temp(nlz+1,elnodes(ni))-temp(nlz ,elnodes(ni)) - s0(ni) = tr_arr(nlz-1,elnodes(ni),2) - ds10(ni) = tr_arr(nlz ,elnodes(ni),2)-tr_arr(nlz-1,elnodes(ni),2) - ds21(ni) = tr_arr(nlz+1,elnodes(ni),2)-tr_arr(nlz ,elnodes(ni),2) + s0(ni) = salt(nlz-1,elnodes(ni)) + ds10(ni) = salt(nlz ,elnodes(ni))-salt(nlz-1,elnodes(ni)) + ds21(ni) = salt(nlz+1,elnodes(ni))-salt(nlz ,elnodes(ni)) !___________________________________________________________________ ! interpoalte vertice temp and salinity to elemental level Z_n temp_at_Zn(ni) = t0(ni) & @@ -2949,9 +3025,10 @@ end subroutine compute_neutral_slope ! !=============================================================================== !converts insitu temperature to a potential one -! tr_arr(:,:,1) will be modified! -subroutine insitu2pot(mesh) +! tracers(1)%values will be modified! +subroutine insitu2pot(tracers, mesh) use mod_mesh + use mod_tracer use o_param use o_arrays use g_config @@ -2960,10 +3037,13 @@ subroutine insitu2pot(mesh) real(kind=WP), external :: ptheta real(kind=WP) :: pp, pr, tt, ss integer :: n, nz, nzmin,nzmax - type(t_mesh), intent(in) , target :: mesh - + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(inout), target :: tracers(:) + real(kind=WP), dimension(:,:), pointer :: temp, salt + #include "associate_mesh.h" - + temp=>tracers(1)%values(:,:) + salt=>tracers(2)%values(:,:) ! Convert in situ temperature into potential temperature pr=0.0_WP do n=1,myDim_nod2d+eDim_nod2D @@ -2971,8 +3051,8 @@ subroutine insitu2pot(mesh) nzmax = nlevels_nod2D(n) !!PS do nz=1, nlevels_nod2D(n)-1 do nz=nzmin, nzmax-1 - tt=tr_arr(nz,n,1) - ss=tr_arr(nz,n,2) + tt=temp(nz,n) + ss=salt(nz,n) !!PS ___________________________________________________________________ !!PS using here Z_3d_n at the beginning makes the model very instable after @@ -2981,7 +3061,7 @@ subroutine insitu2pot(mesh) !!PS anyway do a spinup and it its only used at initialisation time !!PS pp=abs(Z_3d_n(nz,n)) pp=abs(Z(nz)) - tr_arr(nz,n,1)=ptheta(ss, tt, pp, pr) + temp(nz,n)=ptheta(ss, tt, pp, pr) end do end do end subroutine insitu2pot diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 0df3b0eb6..b075f2f94 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -2,108 +2,104 @@ module diff_part_hor_redi_interface interface subroutine diff_part_hor_redi(mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh - end subroutine - end interface -end module -module adv_tracers_muscle_ale_interface - interface - subroutine adv_tracers_muscle_ale(ttfAB, num_ord, do_Xmoment, mesh) - use MOD_MESH - use g_PARSUP - type(t_mesh), intent(in) , target :: mesh - integer :: do_Xmoment - real(kind=WP) :: ttfAB(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP) :: num_ord - end subroutine - end interface -end module -module adv_tracers_vert_ppm_ale_interface - interface - subroutine adv_tracers_vert_ppm_ale(ttf, do_Xmoment, mesh) - use MOD_MESH - use g_PARSUP - type(t_mesh), intent(in) , target :: mesh - integer :: do_Xmoment - real(kind=WP) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) + use mod_tracer + type(t_mesh), intent(in), target :: mesh end subroutine end interface end module module adv_tracers_ale_interface interface - subroutine adv_tracers_ale(tr_num, mesh) + subroutine adv_tracers_ale(tracer, mesh) use mod_mesh - integer :: tr_num - type(t_mesh), intent(in) , target :: mesh + use mod_tracer + type(t_tracer), intent(inout), target :: tracer + type(t_mesh), intent(in), target :: mesh end subroutine end interface end module module diff_ver_part_expl_ale_interface interface - subroutine diff_ver_part_expl_ale(tr_num, mesh) - use MOD_MESH - type(t_mesh), intent(in) , target :: mesh - integer :: tr_num + subroutine diff_ver_part_expl_ale(tracer, mesh) + use mod_mesh + use mod_tracer + type(t_tracer), intent(inout), target :: tracer + type(t_mesh), intent(in), target :: mesh end subroutine end interface end module module diff_ver_part_redi_expl_interface interface subroutine diff_ver_part_redi_expl(mesh) - use MOD_MESH - type(t_mesh), intent(in) , target :: mesh + use mod_mesh + use mod_tracer + type(t_mesh), intent(in), target :: mesh end subroutine end interface end module module diff_ver_part_impl_ale_interface interface - subroutine diff_ver_part_impl_ale(tr_num, mesh) - use MOD_MESH - type(t_mesh), intent(in) , target :: mesh - integer :: tr_num + subroutine diff_ver_part_impl_ale(tracer, mesh) + use mod_mesh + use mod_tracer + type(t_tracer), intent(inout), target :: tracer + type(t_mesh), intent(in), target :: mesh end subroutine end interface end module module diff_tracers_ale_interface interface - subroutine diff_tracers_ale(tr_num, mesh) + subroutine diff_tracers_ale(tracer, mesh) use mod_mesh - integer, intent(in) :: tr_num - type(t_mesh), intent(in) , target :: mesh + use mod_tracer + type(t_tracer), intent(inout), target :: tracer + type(t_mesh), intent(in), target :: mesh end subroutine end interface end module module bc_surface_interface interface - function bc_surface(n, id, mesh) + function bc_surface(n, id, sval, mesh) use mod_mesh - integer , intent(in) :: n, id - type(t_mesh), intent(in) , target :: mesh - real(kind=WP) :: bc_surface + integer , intent(in) :: n, id + type(t_mesh), intent(in) , target :: mesh + real(kind=WP) :: bc_surface + real(kind=WP), intent(in) :: sval end function end interface end module module diff_part_bh_interface interface - subroutine diff_part_bh(ttf, mesh) - use MOD_MESH + subroutine diff_part_bh(tracer, mesh) use g_PARSUP - type(t_mesh) , intent(in), target :: mesh - real(kind=WP), intent(inout), target :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) + use mod_mesh + use mod_tracer + type(t_tracer), intent(inout), target :: tracer + type(t_mesh), intent(in), target :: mesh + end subroutine + end interface +end module +module solve_tracers_ale_interface + interface + subroutine solve_tracers_ale(tracers, mesh) + use g_PARSUP + use mod_mesh + use mod_tracer + type(t_tracer), intent(inout), target :: tracers(:) + type(t_mesh), intent(in), target :: mesh end subroutine end interface end module - ! ! !=============================================================================== ! Driving routine Here with ALE changes!!! -subroutine solve_tracers_ale(mesh) +subroutine solve_tracers_ale(tracers, mesh) use g_config use g_parsup use o_PARAM, only: num_tracers, SPP, Fer_GM use o_arrays use mod_mesh + use mod_tracer use g_comm_auto use o_tracers use Toy_Channel_Soufflet @@ -111,15 +107,14 @@ subroutine solve_tracers_ale(mesh) use diff_tracers_ale_interface implicit none - type(t_mesh), intent(in) , target :: mesh - integer :: tr_num, node, nzmax, nzmin - real(kind=WP) :: aux_tr(mesh%nl-1,myDim_nod2D+eDim_nod2D) + type(t_tracer), intent(inout), target :: tracers(:) + type(t_mesh), intent(in), target :: mesh + integer :: tr_num, node, nzmax, nzmin #include "associate_mesh.h" !___________________________________________________________________________ if (SPP) call cal_rejected_salt(mesh) - if (SPP) call app_rejected_salt(mesh) - + if (SPP) call app_rejected_salt(tracers(2)%values, mesh) !___________________________________________________________________________ ! update 3D velocities with the bolus velocities: ! 1. bolus velocities are computed according to GM implementation after R. Ferrari et al., 2010 @@ -131,33 +126,33 @@ subroutine solve_tracers_ale(mesh) end if !___________________________________________________________________________ ! loop over all tracers - do tr_num=1,num_tracers + do tr_num=1, num_tracers ! do tracer AB (Adams-Bashfort) interpolation only for advectiv part ! needed if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call init_tracers_AB'//achar(27)//'[0m' - call init_tracers_AB(tr_num, mesh) + call init_tracers_AB(tracers(tr_num), mesh) ! advect tracers if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call adv_tracers_ale'//achar(27)//'[0m' - call adv_tracers_ale(tr_num, mesh) + call adv_tracers_ale(tracers(tr_num), mesh) ! diffuse tracers if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call diff_tracers_ale'//achar(27)//'[0m' - call diff_tracers_ale(tr_num, mesh) + call diff_tracers_ale(tracers(tr_num), mesh) ! relax to salt and temp climatology if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call relax_to_clim'//achar(27)//'[0m' if ((toy_ocean) .AND. (TRIM(which_toy)=="soufflet")) then - call relax_zonal_temp(mesh) + call relax_zonal_temp(tracers(tr_num), mesh) else - call relax_to_clim(tr_num, mesh) + call relax_to_clim(tracers(tr_num), mesh) end if - call exchange_nod(tr_arr(:,:,tr_num)) + call exchange_nod(tracers(tr_num)%values(:,:)) end do - + !___________________________________________________________________________ - do tr_num=1, ptracers_restore_total - tr_arr(:,ptracers_restore(tr_num)%ind2,ptracers_restore(tr_num)%locid)=1.0_WP + do tr_num=1, ptracers_restore_total + tracers(ptracers_restore(tr_num)%locid)%values(:,ptracers_restore(tr_num)%ind2)=1.0_WP end do !___________________________________________________________________________ @@ -166,8 +161,7 @@ subroutine solve_tracers_ale(mesh) UV =UV -fer_UV Wvel_e=Wvel_e-fer_Wvel Wvel =Wvel -fer_Wvel - end if - + end if !___________________________________________________________________________ ! to avoid crash with high salinities when coupled to atmosphere ! --> if we do only where (tr_arr(:,:,2) < 3._WP ) we also fill up the bottom @@ -176,43 +170,33 @@ subroutine solve_tracers_ale(mesh) do node=1,myDim_nod2D+eDim_nod2D nzmax=nlevels_nod2D(node)-1 nzmin=ulevels_nod2D(node) - !!PS where (tr_arr(1:nzmax,node,2) > 45._WP) - !!PS tr_arr(1:nzmax,node,2)=45._WP - !!PS end where - where (tr_arr(nzmin:nzmax,node,2) > 45._WP) - tr_arr(nzmin:nzmax,node,2)=45._WP + where (tracers(2)%values(nzmin:nzmax,node) > 45._WP) + tracers(2)%values(nzmin:nzmax,node)=45._WP end where - !!PS where (tr_arr(1:nzmax,node,2) < 3._WP ) - !!PS tr_arr(1:nzmax,node,2)=3._WP - !!PS end where - where (tr_arr(nzmin:nzmax,node,2) < 3._WP ) - tr_arr(nzmin:nzmax,node,2)=3._WP - end where - -!!PS if (nzmin>15 .and. mype==0) then -!!PS write(*,*) ' tr_arr(:,node,1) = ',tr_arr(:,node,1) -!!PS write(*,*) -!!PS write(*,*) ' tr_arr(:,node,2) = ',tr_arr(:,node,2) -!!PS end if + where (tracers(2)%values(nzmin:nzmax,node) < 3._WP ) + tracers(2)%values(nzmin:nzmax,node) = 3._WP + end where end do end subroutine solve_tracers_ale ! ! !=============================================================================== -subroutine adv_tracers_ale(tr_num, mesh) +subroutine adv_tracers_ale(tracer, mesh) use g_config, only: flag_debug use g_parsup use mod_mesh + use mod_tracer use o_arrays use diagnostics, only: ldiag_DVD, compute_diag_dvd_2ndmoment_klingbeil_etal_2014, & compute_diag_dvd_2ndmoment_burchard_etal_2008, compute_diag_dvd - use adv_tracers_muscle_ale_interface - use adv_tracers_vert_ppm_ale_interface +! use adv_tracers_muscle_ale_interface +! use adv_tracers_vert_ppm_ale_interface use oce_adv_tra_driver_interfaces implicit none integer :: tr_num, node, nz - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(inout), target :: tracer ! del_ttf ... initialised and setted to zero in call init_tracers_AB(tr_num) ! --> del_ttf ... equivalent to R_T^n in Danilov etal FESOM2: "from finite element ! to finite volume". At the end R_T^n should contain all advection therms and @@ -224,8 +208,7 @@ subroutine adv_tracers_ale(tr_num, mesh) ! of discret variance decay if (ldiag_DVD .and. tr_num <= 2) then if (flag_debug .and. mype==0) print *, achar(27)//'[38m'//' --> call compute_diag_dvd_2ndmoment'//achar(27)//'[0m' - call compute_diag_dvd_2ndmoment_klingbeil_etal_2014(tr_num, mesh) - !!PS call compute_diag_dvd_2ndmoment_burchard_etal_2008(tr_num) + call compute_diag_dvd_2ndmoment_klingbeil_etal_2014(tracer,mesh) end if !___________________________________________________________________________ @@ -233,7 +216,7 @@ subroutine adv_tracers_ale(tr_num, mesh) ! here --> add horizontal advection part to del_ttf(nz,n) = del_ttf(nz,n) + ... del_ttf_advhoriz = 0.0_WP del_ttf_advvert = 0.0_WP - call do_oce_adv_tra(tr_arr(:,:,tr_num), tr_arr_old(:,:,tr_num), UV, wvel, wvel_i, wvel_e, 1, del_ttf_advhoriz, del_ttf_advvert, tra_adv_ph, tra_adv_pv, mesh) + call do_oce_adv_tra(tracer%values, tracer%valuesAB, UV, wvel, wvel_i, wvel_e, 1, del_ttf_advhoriz, del_ttf_advvert, tra_adv_ph, tra_adv_pv, mesh) !___________________________________________________________________________ ! update array for total tracer flux del_ttf with the fluxes from horizontal ! and vertical advection @@ -243,15 +226,16 @@ subroutine adv_tracers_ale(tr_num, mesh) ! compute discrete variance decay after Burchard and Rennau 2008 if (ldiag_DVD .and. tr_num <= 2) then if (flag_debug .and. mype==0) print *, achar(27)//'[38m'//' --> call compute_diag_dvd'//achar(27)//'[0m' - call compute_diag_dvd(tr_num, mesh) + call compute_diag_dvd(tracer, mesh) end if end subroutine adv_tracers_ale ! ! !=============================================================================== -subroutine diff_tracers_ale(tr_num, mesh) +subroutine diff_tracers_ale(tracer, mesh) use mod_mesh + use mod_tracer use g_PARSUP use o_arrays use o_tracers @@ -262,27 +246,25 @@ subroutine diff_tracers_ale(tr_num, mesh) use diff_part_bh_interface implicit none - integer, intent(in) :: tr_num integer :: n, nzmax, nzmin - type(t_mesh), intent(in) , target :: mesh + type(t_tracer), intent(inout), target :: tracer + type(t_mesh), intent(in), target :: mesh #include "associate_mesh.h" !___________________________________________________________________________ ! convert tr_arr_old(:,:,tr_num)=ttr_n-0.5 --> prepare to calc ttr_n+0.5 ! eliminate AB (adams bashfort) interpolates tracer, which is only needed for ! tracer advection. For diffusion only need tracer from previouse time step - tr_arr_old(:,:,tr_num)=tr_arr(:,:,tr_num) !DS: check that this is the right place! - + tracer%valuesAB(:,:)=tracer%values(:,:) !DS: check that this is the right place! !___________________________________________________________________________ ! do horizontal diffusiion ! write there also horizontal diffusion rhs to del_ttf which is equal the R_T^n ! in danilovs srcipt ! includes Redi diffusivity if Redi=.true. call diff_part_hor_redi(mesh) ! seems to be ~9% faster than diff_part_hor - !___________________________________________________________________________ ! do vertical diffusion: explicite - if (.not. i_vert_diff) call diff_ver_part_expl_ale(tr_num, mesh) + if (.not. i_vert_diff) call diff_ver_part_expl_ale(tracer, mesh) ! A projection of horizontal Redi diffussivity onto vertical. This par contains horizontal ! derivatives and has to be computed explicitly! if (Redi) call diff_ver_part_redi_expl(mesh) @@ -299,20 +281,19 @@ subroutine diff_tracers_ale(tr_num, mesh) !!PS tr_arr(1:nzmax,n,tr_num)=tr_arr(1:nzmax,n,tr_num)+ & !!PS del_ttf(1:nzmax,n)/hnode_new(1:nzmax,n) - del_ttf(nzmin:nzmax,n)=del_ttf(nzmin:nzmax,n)+tr_arr(nzmin:nzmax,n,tr_num)* & + del_ttf(nzmin:nzmax,n)=del_ttf(nzmin:nzmax,n)+tracer%values(nzmin:nzmax,n)* & (hnode(nzmin:nzmax,n)-hnode_new(nzmin:nzmax,n)) - tr_arr(nzmin:nzmax,n,tr_num)=tr_arr(nzmin:nzmax,n,tr_num)+ & + tracer%values(nzmin:nzmax,n)=tracer%values(nzmin:nzmax,n)+ & del_ttf(nzmin:nzmax,n)/hnode_new(nzmin:nzmax,n) ! WHY NOT ??? --> whats advantage of above --> tested it --> the upper ! equation has a 30% smaller nummerical drift !tr_arr(1:nzmax,n,tr_num)=(hnode(1:nzmax,n)*tr_arr(1:nzmax,n,tr_num)+ & ! del_ttf(1:nzmax,n))/hnode_new(1:nzmax,n) end do - !___________________________________________________________________________ if (i_vert_diff) then ! do vertical diffusion: implicite - call diff_ver_part_impl_ale(tr_num, mesh) + call diff_ver_part_impl_ale(tracer, mesh) end if @@ -320,22 +301,24 @@ subroutine diff_tracers_ale(tr_num, mesh) !init_tracers will set it to zero for the next timestep !init_tracers will set it to zero for the next timestep if (smooth_bh_tra) then - call diff_part_bh(tr_arr(:,:,tr_num), mesh) ! alpply biharmonic diffusion (implemented as filter) + call diff_part_bh(tracer, mesh) ! alpply biharmonic diffusion (implemented as filter) end if end subroutine diff_tracers_ale ! ! !=============================================================================== !Vertical diffusive flux(explicit scheme): -subroutine diff_ver_part_expl_ale(tr_num, mesh) +subroutine diff_ver_part_expl_ale(tracer, mesh) use o_ARRAYS use g_forcing_arrays use MOD_MESH + use MOD_TRACER use g_PARSUP use g_config,only: dt implicit none - type(t_mesh), intent(in) , target :: mesh + type(t_tracer), intent(inout), target :: tracer + type(t_mesh), intent(in), target :: mesh real(kind=WP) :: vd_flux(mesh%nl-1) real(kind=WP) :: rdata,flux,rlx integer :: nz,nl1,ul1, tr_num,n @@ -378,7 +361,7 @@ subroutine diff_ver_part_expl_ale(tr_num, mesh) ! Ty= Kd(4,nz-1,n)*(Z_3d_n(nz-1,n)-zbar_3d_n(nz,n))*zinv1 *neutral_slope(3,nz-1,n)**2 + & ! Kd(4,nz,n)*(zbar_3d_n(nz,n)-Z_3d_n(nz,n))*zinv1 *neutral_slope(3,nz,n)**2 - vd_flux(nz) = (Kv(nz,n)+Ty)*(tr_arr(nz-1,n,tr_num)-tr_arr(nz,n,tr_num))*zinv1*area(nz,n) + vd_flux(nz) = (Kv(nz,n)+Ty)*(tracer%values(nz-1,n)-tracer%values(nz,n))*zinv1*area(nz,n) end do @@ -395,8 +378,9 @@ end subroutine diff_ver_part_expl_ale ! !=============================================================================== ! vertical diffusivity augmented with Redi contribution [vertical flux of K(3,3)*d_zT] -subroutine diff_ver_part_impl_ale(tr_num, mesh) +subroutine diff_ver_part_impl_ale(tracer, mesh) use MOD_MESH + use MOD_TRACER use o_PARAM use o_ARRAYS use i_ARRAYS @@ -408,18 +392,22 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) use bc_surface_interface implicit none - type(t_mesh), intent(in) , target :: mesh + type(t_tracer), intent(inout), target :: tracer + type(t_mesh), intent(in), target :: mesh !!PS real(kind=WP) :: bc_surface real(kind=WP) :: a(mesh%nl), b(mesh%nl), c(mesh%nl), tr(mesh%nl) real(kind=WP) :: cp(mesh%nl), tp(mesh%nl) - integer :: nz, n, nzmax,nzmin, tr_num + integer :: nz, n, nzmax,nzmin real(kind=WP) :: m, zinv, dt_inv, dz real(kind=WP) :: rsss, Ty,Ty1, c1,zinv1,zinv2,v_adv real(kind=WP), external :: TFrez ! Sea water freeze temperature. real(kind=WP) :: isredi=0._WP logical :: do_wimpl=.true. -#include "associate_mesh.h" + real(kind=WP), dimension(:,:), pointer :: trarr + +#include "associate_mesh.h" + trarr=>tracer%values(:,:) !___________________________________________________________________________ if ((trim(tra_adv_lim)=='FCT') .OR. (.not. w_split)) do_wimpl=.false. @@ -649,36 +637,29 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) end if !_______________________________________________________________________ - ! the rhs (inhomogene part): --> rhs = K_33*dt*d/dz*Tstar --> Tstar...tr_arr + ! the rhs (inhomogene part): --> rhs = K_33*dt*d/dz*Tstar --> Tstar...trarr ! solve difference quotient for rhs --> tr ! RHS at Volume_2: ! ! RHS*V_2 = K_33*dt*(T_1-T_2)/(Z_1-Z_2)*V_2 - K_33*dt*(T_2-T_3)/(Z_2-Z_3)*V_3 ! = -a*T_1 + (a+c)*T_2 - c*T_3 ! - ! -+--> tr(1) =(a(1)+c(1))*tr_arr(1,n,tr_num)-c(1)*tr_arr(2,n,tr_num) + ! -+--> tr(1) =(a(1)+c(1))*trarr(1,n)-c(1)*trarr(2,n) ! |--> a(1)=0 nz=nzmin dz=hnode_new(nz,n) ! It would be (zbar(nz)-zbar(nz+1)) if not ALE - tr(nz)=-(b(nz)-dz)*tr_arr(nz,n,tr_num)-c(nz)*tr_arr(nz+1,n,tr_num) - !tr(nz)=c(nz)*(tr_arr(nz,n,tr_num) - tr_arr(nz+1,n,tr_num)) + tr(nz)=-(b(nz)-dz)*trarr(nz,n)-c(nz)*trarr(nz+1,n) do nz=nzmin+1,nzmax-2 dz=hnode_new(nz,n) - tr(nz)= -a(nz) * tr_arr(nz-1,n,tr_num) & - -(b(nz)-dz)* tr_arr(nz,n,tr_num) & - -c(nz) * tr_arr(nz+1,n,tr_num) - !tr(nz)=-a(nz) * tr_arr(nz-1,n,tr_num) & - ! +(a(nz)+c(nz))* tr_arr(nz,n,tr_num) & - ! -c(nz) * tr_arr(nz+1,n,tr_num) - + tr(nz)= -a(nz) * trarr(nz-1,n) & + -(b(nz)-dz)* trarr(nz,n) & + -c(nz) * trarr(nz+1,n) end do nz=nzmax-1 dz=hnode_new(nz,n) - tr(nz)=-a(nz)*tr_arr(nz-1,n,tr_num)-(b(nz)-dz)*tr_arr(nz,n,tr_num) - !tr(nz)=-a(nz)*tr_arr(nz-1,n,tr_num)+a(nz)*tr_arr(nz,n,tr_num) - + tr(nz)=-a(nz)*trarr(nz-1,n)-(b(nz)-dz)*trarr(nz,n) !_______________________________________________________________________ ! Add KPP nonlocal fluxes to the rhs (only T and S currently) ! use here blmc or kpp_oblmixc instead of Kv, since Kv already contains @@ -686,15 +667,15 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) ! mixing or windmixing which are to much for nonlocal ! transports and lead to instability of the model if (use_kpp_nonlclflx) then - if (tr_num==2) then + if (tracer%ID==2) then rsss=ref_sss - if (ref_sss_local) rsss=tr_arr(1,n,2) + if (ref_sss_local) rsss=tracer%values(1,n) end if !___________________________________________________________________ ! use fesom1.4 KPP if (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) then - if (tr_num==1) then ! temperature + if (tracer%ID==1) then ! temperature ! --> no fluxes to the top out of the surface, no fluxes ! downwards out of the bottom !___surface_________________________________________________ @@ -715,7 +696,7 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) +( MIN(ghats(nz ,n)*blmc(nz ,n,2), 1.0_WP)*(area(nz ,n)/areasvol(nz,n)) & ) * heat_flux(n) / vcpw * dt - elseif (tr_num==2) then ! salinity + elseif (tracer%ID==2) then ! salinity ! --> no fluxes to the top out of the surface, no fluxes ! downwards out of the bottom !___surface_________________________________________________ @@ -739,7 +720,7 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) !___________________________________________________________________ ! use cvmix KPP elseif (mix_scheme_nmb==3 .or. mix_scheme_nmb==37) then - if (tr_num==1) then ! temperature + if (tracer%ID==1) then ! temperature !___surface_________________________________________________ nz = nzmin tr(nz)=tr(nz) & @@ -758,7 +739,7 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) +( MIN(kpp_nonlcltranspT(nz ,n)*kpp_oblmixc(nz ,n,2), 1.0_WP)*(area(nz ,n)/areasvol(nz,n)) & ) * heat_flux(n) / vcpw * dt - elseif (tr_num==2) then ! salinity + elseif (tracer%ID==2) then ! salinity !___surface_________________________________________________ nz = nzmin tr(nz)=tr(nz) & @@ -782,7 +763,7 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) !_______________________________________________________________________ ! case of activated shortwave penetration into the ocean, ad 3d contribution - if (use_sw_pene .and. tr_num==1) then + if (use_sw_pene .and. tracer%ID==1) then do nz=nzmin, nzmax-1 zinv=1.0_WP*dt !/(zbar(nz)-zbar(nz+1)) ale! !!PS tr(nz)=tr(nz)+(sw_3d(nz, n)-sw_3d(nz+1, n) * ( area(nz+1,n)/areasvol(nz,n)) ) * zinv @@ -793,7 +774,7 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) !_______________________________________________________________________ ! The first row contains also the boundary condition from heatflux, ! freshwaterflux and relaxation terms - ! --> tr_arr(1,n,1)*water_flux(n) : latent heatflux contribution due to + ! --> trarr(1,n)*water_flux(n) : latent heatflux contribution due to ! cell volume. If Volume decreases --> temp has to raise, if volume ! expended --> temp has to decrease ! (-) ^ (-) ^ @@ -802,7 +783,7 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) ! (BUT CHECK!) | | | | ! v (+) v (+) ! - tr(nzmin)= tr(nzmin)+bc_surface(n, tracer_id(tr_num),mesh) + tr(nzmin)= tr(nzmin)+bc_surface(n, tracer%ID, trarr(mesh%ulevels_nod2D(n),n), mesh) !_______________________________________________________________________ ! The forward sweep algorithm to solve the three-diagonal matrix @@ -847,9 +828,8 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) ! update tracer ! tr ... dTnew = T^(n+0.5) - T* do nz=nzmin,nzmax-1 - ! tr_arr - before ... T* - tr_arr(nz,n,tr_num)=tr_arr(nz,n,tr_num)+tr(nz) - ! tr_arr - after ... T^(n+0.5) = dTnew + T* = T^(n+0.5) - T* + T* + ! trarr - before ... T* + trarr(nz,n)=trarr(nz,n)+tr(nz) end do end do ! --> do n=1,myDim_nod2D @@ -927,7 +907,7 @@ subroutine diff_ver_part_redi_expl(mesh) del_ttf(nz,n) = del_ttf(nz,n)+(vd_flux(nz) - vd_flux(nz+1))*dt/areasvol(nz,n) enddo end do -end subroutine diff_ver_part_redi_expl! +end subroutine diff_ver_part_redi_expl ! ! !=============================================================================== @@ -1151,7 +1131,7 @@ end subroutine diff_part_bh !=============================================================================== ! this function returns a boundary conditions for a specified thacer ID and surface node ! ID = 0 and 1 are reserved for temperature and salinity -FUNCTION bc_surface(n, id, mesh) +FUNCTION bc_surface(n, id, sval, mesh) use MOD_MESH USE o_ARRAYS USE g_forcing_arrays @@ -1159,17 +1139,18 @@ FUNCTION bc_surface(n, id, mesh) USE g_config implicit none - type(t_mesh), intent(in) , target :: mesh - REAL(kind=WP) :: bc_surface - integer, intent(in) :: n, id - character(len=10) :: id_string + integer, intent(in) :: n, id + real(kind=WP), intent(in) :: sval + type(t_mesh), intent(in) , target :: mesh + REAL(kind=WP) :: bc_surface + character(len=10) :: id_string ! --> is_nonlinfs=1.0 for zelvel,zstar .... ! --> is_nonlinfs=0.0 for linfs SELECT CASE (id) - CASE (0) - bc_surface=-dt*(heat_flux(n)/vcpw + tr_arr(mesh%ulevels_nod2D(n),n,1)*water_flux(n)*is_nonlinfs) CASE (1) + bc_surface=-dt*(heat_flux(n)/vcpw + sval*water_flux(n)*is_nonlinfs) + CASE (2) ! --> real_salt_flux(:): salt flux due to containment/releasing of salt ! by forming/melting of sea ice bc_surface= dt*(virtual_salt(n) & !--> is zeros for zlevel/zstar diff --git a/src/oce_ice_init_state.F90 b/src/oce_ice_init_state.F90 deleted file mode 100755 index a637515ad..000000000 --- a/src/oce_ice_init_state.F90 +++ /dev/null @@ -1,802 +0,0 @@ -!============================================================================== -! -! Simple initialization, forcing and output, just for tests -! for ocean and ice. -! ============================================================================ -! ============================================================================ -subroutine initial_state_test(mesh) - use MOD_MESH - use o_ARRAYS - use o_PARAM - use g_PARSUP - ! - implicit none - integer :: elem, n, nz, elnodes(3) - integer :: elevation, strat, wind, cooling, tperturb - real(kind=WP) :: lon, lat, a, dst - real(kind=WP) :: minlat,maxlat,tt,rwidth - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - -! Now updated for the box mesh, it was originally designed for hex mesh. -! In that case, the southern boundary is 40, the northern 48.83, and 0:18 the -! longitudinal extent. - - - -! Default values - stress_surf=0.0 - tr_arr(:,:,1)=20.0_WP - Tsurf=tr_arr(1,:,1) - heat_flux=0.0_WP - tr_arr(:,:,2)=35.0_WP - Ssurf=tr_arr(1,:,2) - water_flux=0.0_WP - relax2clim=0.0 - - elevation=0 - strat=1 - wind=1 - cooling=0 - tperturb=0 - surf_relax_T=0 !10.0/10.0/24.0/3600. - surf_relax_S=0. - - - ! Stratification - if(strat==1) then - DO n=1, myDim_nod2D+eDim_nod2D - DO nz=1, nlevels_nod2D(n)-1 - ! tr_arr(nz,n,1)=tr_arr(nz,n,1)- 8.2e-3*abs(Z(nz)) - tr_arr(nz,n,1)=tr_arr(nz,n,1)-0.95_WP*20*tanh(abs(Z(nz))/300)-abs(Z(nz))/2400.0_WP - - END DO - END DO - end if - -Tsurf=tr_arr(1,:,1) - - if (tperturb==0) then - ! Temperature perturbation - do n=1, myDim_nod2D+eDim_nod2D - lat=coord_nod2D(2,n) - lon=coord_nod2D(1,n) - dst=sqrt((lat-37.5*rad)**2+(lon-4.5*rad)**2) - if (dst>1.5*rad) cycle - do nz=1, nlevels_nod2D(n)-1 - tr_arr(nz,n,1)=tr_arr(nz,n,1)+0.1*exp(-(dst/(1.5*rad))**2)*sin(pi*abs(Z(nz))/1600) - end do - end do - end if - - - if (cooling==1) then - ! Surface cooling - do n=1, myDim_nod2D+eDim_nod2D - lat=coord_nod2D(2,n) - lon=coord_nod2D(1,n) - dst=sqrt((lat-37.5*rad)**2+(lon-4.5*rad)**2) - if (dst>3.7*rad) cycle - Tsurf(n)=Tsurf(n)-1*exp(-(dst/(2.2*rad))**2) - end do - end if - -#ifdef false - if (wind==1) then - DO elem=1, myDim_elem2D - elnodes=elem2d_nodes(:,elem) - lat=sum(coord_nod2D(2,elnodes))/3.0_WP - lon=sum(coord_nod2D(1,elnodes))/3.0_WP - stress_surf(1,elem)=-0.2 *cos(pi*(lat-30.0*rad)/(15.0*rad)) !(8.83*rad)) - ! 40 is the south boundary of the hex box - END DO - end if -#endif - - if (wind==1) then - DO elem=1, myDim_elem2D - elnodes=elem2d_nodes(:,elem) - lat=sum(coord_nod2D(2,elnodes))/3.0_WP - lon=sum(coord_nod2D(1,elnodes))/3.0_WP - !stress_surf(1,elem)=0.1 *cos(pi*(lat-40.0*rad)/(1500000.0/r_earth))* & - ! exp(-((lat-40.0*rad)/(1500000.0/r_earth))**2) - ! 40 is the center of domain - stress_surf(1,elem)=0.1 *cos(pi*(lat-35.0*rad)/(1250000.0/r_earth))* & - exp(-((lat-35.0*rad)/(1250000.0/r_earth))**2)* & - (1.0_WP-0.5_WP*((lat-35.0*rad)/(1250000.0/r_earth))) - ! 35 is the center of domain - END DO - end if - - ! Fix for too low salinity - where (tr_arr(:,:,2)<20.4) tr_arr(:,:,2)=20.4 -end subroutine initial_state_test -! ==================================================================== - -subroutine initial_state_channel_test(mesh) - use MOD_MESH - use o_ARRAYS - use o_PARAM - use g_PARSUP - use g_CONFIG - ! - implicit none - integer :: elem, n, nz, elnodes(3) - integer :: strat, wind, elevation - real(kind=WP) :: lon, lat, a, dst - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - ! Default values - stress_surf=0.0 - tr_arr(:,:,1)=20.0_WP - Tsurf=tr_arr(1,:,1) - heat_flux=0.0_WP - tr_arr(:,:,2)=35.0_WP - Ssurf=tr_arr(1,:,2) - water_flux=0.0_WP - - strat=1 - wind=0 - elevation=0 - - lat=30.0*rad - if (strat==1) then - do n=1, myDim_nod2D+eDim_nod2D - dst=coord_nod2D(2, n)-lat - do nz=1, nlevels(n)-1 - tr_arr(nz,n,1)=25.-0.5e-5*r_earth*dst- 8.2e-3*abs(Z(nz)) -! tr_arr(nz,n,1)=(25.-0.5e-5*r_earth*dst)*exp(Z(nz)/800) - end do - end do - end if - - if (wind==1) then - DO elem=1, myDim_elem2D - call elem_center(elem, lon, lat, mesh) - stress_surf(1,elem)=-0.2 *cos(pi*(lat-30.0*rad)/(15*rad)) - ! 40 is the south boundary of the box - END DO - end if - - Tsurf=tr_arr(1,:,1) - Ssurf=tr_arr(1,:,2) - Tclim=tr_arr(:,:,1) - Sclim=tr_arr(:,:,2) - - ! small perturbation: - if (strat==1) then - do n=1, myDim_nod2D+eDim_nod2D - dst=coord_nod2D(2, n)-30.0*rad - do nz=1, nlevels(n)-1 - tr_arr(nz,n,1)=tr_arr(nz,n,1)-0.2*sin(2*pi*dst/(15.0*rad))*sin(pi*Z(nz)/1500.0) & - *(sin(8*pi*coord_nod2D(1,n)/(20.0*rad))+ & - 0.5*sin(3*pi*coord_nod2D(1,n)/(20.0*rad))) - end do - end do - end if - - if(elevation==1) then - eta_n=0.01*(coord_nod2D(2,:)-30.0*rad)/(15.0*rad) - end if - - ! relaxation to climatology: - Do n=1, myDim_nod2D+eDim_nod2D - lat=coord_nod2D(2,n) - if(lat>43.5*rad) relax2clim(n)=clim_relax*(1.0-(45*rad-lat)/(1.5*rad)) - if(lat<31.5*rad) relax2clim(n)=clim_relax*(1.0+(30*rad-lat)/(1.5*rad)) - END DO - return - ! advection scheme tests - - dst=45.0*rad-30.0*rad; - DO n=1, myDim_nod2D+eDim_nod2D - lat=coord_nod2D(2,n)-30.0*rad - lon=coord_nod2D(1,n) - eta_n(n)=(1000000./pi)*sin(pi*lat/dst)*sin(2*pi*lon/(20*rad)) - !eta_n(n)=(1000000./pi)*sin(pi*lat/dst)*sin(pi*lon/(10*rad)) - end do - - - Do n=1, myDim_elem2D - UV(1,:,n)=-sum(gradient_sca(4:6,n)*eta_n(elem2D_nodes(:,n))) - UV(2,:,n)=sum(gradient_sca(1:3,n)*eta_n(elem2D_nodes(:,n))) - END DO - - - !Do n=1, elem2D - !call elem_center(n, lon, lat, mesh) - !lat=lat-30.0*rad - !UV(1,:,n)=-(20*rad/dst)*0.1*cos(pi*lat/dst)*sin(2*pi*lon/(20*rad)) - !UV(2,:,n)= 0.2*sin(pi*lat/dst)*cos(2*pi*lon/(20*rad)) - !end do - relax2clim=0. - tr_arr(:,:,1)=20.0 - Tsurf=tr_arr(1,:,1) - surf_relax_T=0. - surf_relax_S=0. - !U_n=-0.3 - !V_n=0. -! Temperature perturbation - do n=1, myDim_nod2D+eDim_nod2D - lat=coord_nod2D(2,n)-32.5*rad - lon=coord_nod2D(1,n)-5.0*rad - if (lon>cyclic_length/2) lon=lon-cyclic_length - if (lon<-cyclic_length/2) lon=lon+cyclic_length - dst=sqrt((lat)**2+(lon)**2) - if (dst>1.5*rad) cycle - do nz=1, nlevels_nod2D(n)-1 - !if(abs(Z(nz)+500)<300) then - tr_arr(nz,n,1)=tr_arr(nz,n,1)+1.0*cos(pi*dst/2.0/1.5/rad) !exp(-(dst/(1.5*rad))**2) - !end if - end do - end do -end subroutine initial_state_channel_test -! ==================================================================== -subroutine initial_state_channel_narrow_test(mesh) - use MOD_MESH - use o_ARRAYS - use o_PARAM - use g_PARSUP - use g_CONFIG - ! - implicit none - integer :: elem, n, nz, elnodes(3) - integer :: strat, wind, elevation - real(kind=WP) :: lon, lat, a, dst - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - ! Default values - stress_surf=0.0 - tr_arr(:,:,1)=20.0_WP - Tsurf=tr_arr(1,:,1) - heat_flux=0.0_WP - tr_arr(:,:,2)=35.0_WP - Ssurf=tr_arr(1,:,2) - water_flux=0.0_WP - - strat=1 - wind=0 - elevation=0 - - lat=30.0*rad - if (strat==1) then - do n=1, myDim_nod2D+eDim_nod2D - dst=coord_nod2D(2, n)-lat - do nz=1, nlevels_nod2D(n)-1 - tr_arr(nz,n,1)=25.-0.5e-5*r_earth*dst- 8.2e-3*abs(Z(nz)) -! tr_arr(nz,n,1)=(25.-0.5e-5*r_earth*dst)*exp(Z(nz)/800) - end do - end do - end if - - if (wind==1) then - DO elem=1, myDim_elem2D - call elem_center(elem, lon, lat, mesh) - stress_surf(1,elem)=-0.2 *cos(pi*(lat-30.0*rad)/(10*rad)) - ! 40 is the south boundary of the box - END DO - end if - - Tsurf=tr_arr(1,:,1) - Ssurf=tr_arr(1,:,2) - Tclim=tr_arr(:,:,1) - Sclim=tr_arr(:,:,2) - - ! small perturbation: - if (strat==1) then - do n=1, myDim_nod2D+eDim_nod2D - dst=coord_nod2D(2, n)-30.0*rad - do nz=1, nlevels(n)-1 - tr_arr(nz,n,1)=tr_arr(nz,n,1)-0.1*sin(pi*dst/(10.0*rad))*sin(pi*Z(nz)/1600.0) & - *(sin(4*pi*coord_nod2D(1,n)/(10.0*rad))+0.5*sin(3*pi*coord_nod2D(1,n)/(10.0*rad))) - end do - end do - end if - - if(elevation==1) then - eta_n=0.01*(coord_nod2D(2,:)-30.0*rad)/(10.0*rad) - end if - - ! relaxation to climatology: - Do n=1, myDim_nod2D+eDim_nod2D - lat=coord_nod2D(2,n) - if(lat>38.5*rad) relax2clim(n)=clim_relax*(1.0-(40*rad-lat)/(1.5*rad)) - if(lat<31.5*rad) relax2clim(n)=clim_relax*(1.0+(30*rad-lat)/(1.5*rad)) - END DO -!T_rhsAB=tr_arr(:,:,1) in case upwind1 -!S_rhsAB=tr_arr(:,:,2) -! Advection experiments: -return - UV(1,:,:)=-0.3 - UV(2,:,:)=0. - - dst=maxval(coord_nod2D(2,:))-30.0*rad; - DO n=1, myDim_nod2D+eDim_nod2D - lat=coord_nod2D(2,n)-30.0*rad - lon=coord_nod2D(1,n) - eta_n(n)=(1000000./pi)*sin(pi*lat/dst)*sin(2*pi*lon/(10*rad)) - !eta_n(n)=(1000000./pi)*sin(pi*lat/dst)*sin(pi*lon/(10*rad)) - end do - - - Do n=1, myDim_elem2D - UV(1,:,n)=-sum(gradient_sca(4:6,n)*eta_n(elem2D_nodes(:,n))) - UV(2,:,n)=sum(gradient_sca(1:3,n)*eta_n(elem2D_nodes(:,n))) - END DO - - - Do n=1, myDim_elem2D - call elem_center(n, lon, lat, mesh) - lat=lat-30.0*rad - UV(1,:,n)=-0.1*(dst/10.0/rad)*cos(pi*lat/dst)*sin(2*pi*lon/(10*rad)) - UV(2,:,n)= 0.2*sin(pi*lat/dst)*cos(2*pi*lon/(10*rad)) - end do - - - - relax2clim=0. - tr_arr(:,:,1)=20.0 - -! Temperature perturbation - do n=1, myDim_nod2D+eDim_nod2D - lat=coord_nod2D(2,n)-32.5*rad - lon=coord_nod2D(1,n)-5.0*rad - if (lon>cyclic_length/2) lon=lon-cyclic_length - if (lon<-cyclic_length/2) lon=lon+cyclic_length - dst=sqrt((lat)**2+(lon)**2) - if (dst>1.5*rad) cycle - do nz=1, nlevels_nod2D(n)-1 - tr_arr(nz,n,1)=tr_arr(nz,n,1)+1.0*cos(pi*dst/2.0/1.5/rad) !exp(-(dst/(1.5*rad))**2) - end do - end do -end subroutine initial_state_channel_narrow_test -! ================================================================ -subroutine init_fields_na_test(mesh) - use MOD_MESH - use o_PARAM - use o_ARRAYS - use g_PARSUP - ! - implicit none - integer :: n, nz, nd - real(kind=WP) :: maxlat, minlat, rwidth, lat,lon - logical :: c_status - real(kind=WP) :: p0, ss, tt,pr - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - c_status = .false. - - ! =================== - ! Fill the model fields with dummy values - ! =================== - - ! Default values - stress_surf=0.0 - tr_arr(:,:,1)=20.0_WP - Tsurf=tr_arr(1,:,1) - heat_flux=0.0_WP - tr_arr(:,:,2)=35.0_WP - Ssurf=tr_arr(1,:,2) - water_flux=0.0_WP - - ! =================== - ! Initialize T, S from files - ! =================== - - !call get_TS_mean('gur', c_status) - - ! =================== - ! If database contains in situ - ! temperature, transform it to - ! potential temperature - ! =================== - if(c_status) then - pr=0. - do n=1,myDim_nod2D+eDim_nod2D - DO nz=1,nlevels_nod2D(n)-1 - tt=tr_arr(nz,n,1) - ss=tr_arr(nz,n,2) - p0=abs(Z(nz)) - call ptheta(ss, tt, p0, pr, tr_arr(nz,n,1)) - END DO - end do - write(*,*) 'In situ temperature is converted to potential temperature' - end if - Tclim=tr_arr(:,:,1) - Sclim=tr_arr(:,:,2) - do n=1, myDim_nod2D+eDim_nod2D - Tsurf(n)=tr_arr(1,n,1) - Ssurf(n)=tr_arr(1,n,2) - end do - - ! ==================== - ! Specify where restoring to - ! climatology is applied - ! ==================== - ! relaxation to climatology: - maxlat=80.0*rad - minlat=-28.0*rad - rwidth=10.0*rad - Do n=1, myDim_nod2D+eDim_nod2D - lat=coord_nod2D(2,n) - if(lat>maxlat-rwidth) then - relax2clim(n)=clim_relax*(cos(pi*0.5*(maxlat-lat)/rwidth))**2 - end if - if(latmaxlat-rwidth) relax2clim(n)=clim_relax*(1.0-(maxlat-lat)/rwidth) - !if(lat15.0).and.(lon<40.0).and.(lat>30.0).and.(lat<40.0)) then - DO nz=1,nlevels_nod2D(n)-1 - if(tr_arr(nz,n,2)<38.0) tr_arr(nz,n,2)=38.0 - END DO - end if - end do -end subroutine init_fields_global_test -! ================================================================ -! ==================================================================== - -subroutine initial_state_channel_dima_test(mesh) - use MOD_MESH - use o_ARRAYS - use o_PARAM - use g_PARSUP - ! - implicit none - integer :: elem, n, nz, elnodes(3) - integer :: strat, wind, elevation - real(kind=WP) :: lon, lat, a, dst - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - -! Default values - stress_surf=0.0 - tr_arr(:,:,1)=20.0_WP - Tsurf=tr_arr(1,:,1) - heat_flux=0.0_WP - tr_arr(:,:,2)=35.0_WP - Ssurf=tr_arr(1,:,2) - water_flux=0.0_WP - - strat=1 - wind=0 - elevation=0 - - lat=30.0*rad - if (strat==1) then - do n=1, myDim_nod2D+eDim_nod2D - dst=coord_nod2D(2, n)-lat - do nz=1, nlevels(n)-1 - tr_arr(nz,n,1)=25.-0.5e-5*r_earth*dst- 8.2e-3*abs(Z(nz)) -! tr_arr(nz,n,1)=(25.-0.5e-5*r_earth*dst)*exp(Z(nz)/800) - end do - end do - end if - - if (wind==1) then - DO elem=1, myDim_elem2D - call elem_center(elem, lon, lat, mesh) - stress_surf(1,elem)=-0.2 *cos(pi*(lat-30.0*rad)/(15*rad)) - ! 40 is the south boundary of the box - END DO - end if - - Tsurf=tr_arr(1,:,1) - Ssurf=tr_arr(1,:,2) - Tclim=tr_arr(:,:,1) - Sclim=tr_arr(:,:,2) - - ! small perturbation: - if (strat==1) then - do n=1, myDim_nod2D+eDim_nod2D - dst=coord_nod2D(2, n)-30.0*rad - do nz=1, nlevels(n)-1 - tr_arr(nz,n,1)=tr_arr(nz,n,1)-0.1*sin(pi*dst/(15.0*rad))*sin(pi*Z(nz)/1500.0) & - *(sin(8*pi*coord_nod2D(1,n)/(40.0*rad))+sin(5*pi*coord_nod2D(1,n)/(40.0*rad))) - end do - end do - end if - - if(elevation==1) then - eta_n=0.01*(coord_nod2D(2,:)-30.0*rad)/(15.0*rad) - end if - - ! relaxation to climatology: - Do n=1, myDim_nod2D+eDim_nod2D - lat=coord_nod2D(2,n) - if(lat>43.5*rad) relax2clim(n)=clim_relax*(1.0-(45*rad-lat)/(1.5*rad)) - if(lat<31.5*rad) relax2clim(n)=clim_relax*(1.0+(30*rad-lat)/(1.5*rad)) - END DO -end subroutine initial_state_channel_dima_test -! ==================================================================== -subroutine ice_init_fields_test(mesh) -! -! Simple initialization for a box model to test the dynamical part. -! No thermodinamics is initialized here -! -use mod_mesh -use i_arrays -use i_param -use o_param -use g_PARSUP -use o_ARRAYS -use g_CONFIG -use g_comm_auto - -IMPLICIT NONE -real(kind=WP) :: xmin, xmax, ymin, ymax, Lx, Ly, meanf -integer :: n, elnodes(3) -type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - - coriolis=1.4e-4 ! redefines Coriolis - coriolis_node=1.4e-4 - ! Set initial thickness and area coverage: - m_ice=2.0 - m_snow=0.0 - u_ice=0.0 - v_ice=0.0 - stress_atmice_x=0.0 - stress_atmice_y=0.0 - ! a_ice is defined later - - - ! Set ocean velocity (stationary in time): - xmin=0.0_WP*rad - xmax=20.0_WP*rad !10.0_WP*rad - ymin=30._WP*rad !30._WP*rad - ymax=45.0_WP*rad !40.0_WP*rad - Lx=xmax-xmin - Ly=ymax-ymin - - DO n=1, myDim_nod2D+eDim_nod2D - a_ice(n)=(coord_nod2d(1,n)-xmin)/Lx - END DO - - DO n=1, myDim_nod2D+eDim_nod2D - U_w(n)=0.1*(2*(coord_nod2d(2,n)-ymin)-Ly)/Ly - V_w(n)=-0.1*(2*(coord_nod2d(1,n)-xmin)-Lx)/Lx - END DO - m_ice=m_ice*a_ice - - ! Elevation computed approximately, from the geostrophy: - meanf= 1.4e-4*r_earth !2*omega*sin(yc)*r_earth - DO n=1, myDim_nod2d+eDim_nod2D - elevation(n)=-0.1*meanf/g *((coord_nod2d(2,n)-ymin)**2/Ly- & - (coord_nod2d(2,n)-ymin)+ & - (coord_nod2d(1,n)-xmin)**2/Lx -& - (coord_nod2d(1,n)-xmin)) - END DO -end subroutine ice_init_fields_test -! ============================================================================= -Subroutine ice_update_forcing_test(step, mesh) -! -! Here only simple wind variability is introduced -! -use mod_mesh -use i_arrays -use i_param -use o_param -use i_therm_param -use g_PARSUP -use g_forcing_arrays -USE g_CONFIG -IMPLICIT NONE -real(kind=WP) :: xmin, xm, ym, ymin, Lx, Ly, td, cdwin -integer :: step, n, elnodes(3) -type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - cdwin=0.00225_WP - ! Set wind velocity (stationary in time): - xmin=0.0_WP*rad - Lx=20.0_WP*rad-xmin - ymin=30.0_WP*rad - Ly=45.0_WP*rad-ymin - td=4*3600*24.0_WP - - DO n=1, myDim_nod2D+eDim_nod2D - xm=coord_nod2d(1,n) - ym=coord_nod2d(2,n) - u_wind(n)=5.0+(sin(2*pi*step*dt/td)-3.0)*sin(2*pi*(xm-xmin)/Lx) & - *sin(pi*(ym-ymin)/Ly) - - v_wind(n)=5.0+(sin(2*pi*step*dt/td)-3.0)*sin(2*pi*(ym-ymin)/Ly) & - *sin(pi*(xm-xmin)/Lx) - END DO - ! wind to stress: - - stress_atmice_x = rhoair*cdwin*sqrt(u_wind**2+v_wind**2)*u_wind - stress_atmice_y = rhoair*cdwin*sqrt(u_wind**2+v_wind**2)*v_wind -end subroutine ice_update_forcing_test -! -!============================================================================== -! Simple initialization for tests for GM with the real geometry -! ============================================================================ -subroutine ini_global_ocean(mesh) - use MOD_MESH - use o_ARRAYS - use o_PARAM - use g_PARSUP - USE g_ROTATE_grid - ! - implicit none - integer :: n, nz - real(kind=WP) :: minlat,maxlat, lon, lat, val - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - tr_arr(:,:,1)=20.0_WP - tr_arr(:,:,2)=34.0_WP - - - call r2g(lon, maxlat, coord_nod2D(1,1), coord_nod2D(2,1)) - call r2g(lon, minlat, coord_nod2D(1,1), coord_nod2D(2,1)) - DO n=2,myDim_nod2D+eDim_nod2D - call r2g(lon, lat, coord_nod2D(1,n), coord_nod2D(2,n)) - maxlat=max(maxlat, lat) - minlat=min(minlat, lat) - END DO - - call MPI_AllREDUCE(minlat, val, 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - minlat=val - call MPI_AllREDUCE(maxlat, val, 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - maxlat=val - - ! Stratification - DO n=1, myDim_nod2D+eDim_nod2D - call r2g(lon, lat, coord_nod2D(1,n), coord_nod2D(2,n)) - DO nz=1, nlevels_nod2D(n)-1 - tr_arr(nz,n,1)=tr_arr(nz,n,1)-(lat-minlat)/(maxlat-minlat)*2.0_WP - END DO - END DO -end subroutine ini_global_ocean -! ==================================================================== -! -!============================================================================== -! Zero the dynamicsl variables and forcing to allow for debugging of new implementations -! ============================================================================ -subroutine zero_dynamics - use g_parsup - use o_arrays - use g_comm_auto - use o_tracers - use g_forcing_arrays - implicit none - - water_flux =0._WP - real_salt_flux=0._WP - surf_relax_S =0._WP - heat_flux =0._WP - UV =0._WP - Wvel =0._WP -end subroutine zero_dynamics -! ==================================================================== - diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index d2e93315b..34d7ba1ce 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -95,8 +95,7 @@ MODULE o_PARAM integer :: ID integer, allocatable, dimension(:) :: ind2 END TYPE tracer_source3d_type - -integer :: num_tracers=2 +integer :: num_tracers=2 integer, dimension(100) :: tracer_ID = RESHAPE((/0, 1/), (/100/), (/0/)) ! ID for each tracer for treating the initialization and surface boundary condition ! 0=temp, 1=salt etc. type(tracer_source3d_type), & @@ -236,11 +235,8 @@ MODULE o_ARRAYS real(kind=WP), allocatable :: stress_node_surf(:,:) REAL(kind=WP), ALLOCATABLE :: stress_atmoce_x(:) REAL(kind=WP), ALLOCATABLE :: stress_atmoce_y(:) -real(kind=WP), allocatable :: T_rhs(:,:) real(kind=WP), allocatable :: heat_flux(:), Tsurf(:) real(kind=WP), allocatable :: heat_flux_in(:) !to keep the unmodified (by SW penetration etc.) heat flux -real(kind=WP), allocatable :: S_rhs(:,:) -real(kind=WP), allocatable :: tr_arr(:,:,:),tr_arr_old(:,:,:) real(kind=WP), allocatable :: del_ttf(:,:) real(kind=WP), allocatable :: del_ttf_advhoriz(:,:),del_ttf_advvert(:,:) !!PS ,del_ttf_diff(:,:) @@ -256,8 +252,6 @@ MODULE o_ARRAYS real(kind=WP), allocatable :: MLD1(:), MLD2(:) integer, allocatable :: MLD1_ind(:), MLD2_ind(:) real(kind=WP), allocatable :: ssh_gp(:) -! Passive and age tracers -real(kind=WP), allocatable :: tracer(:,:,:), tracer_rhs(:,:,:) !Tracer gradients&RHS real(kind=WP), allocatable :: ttrhs(:,:) real(kind=WP), allocatable :: tr_xy(:,:,:) diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index c3f5fe64a..061f0a05e 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -1,24 +1,50 @@ -module array_setup_interface +module oce_initial_state_interface interface - subroutine array_setup(mesh) + subroutine oce_initial_state(tracers, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + use mod_tracer + type(t_mesh), intent(in) , target :: mesh + type(t_tracer), intent(inout), target :: tracers(:) end subroutine end interface end module -module oce_initial_state_interface +module tracer_init_interface interface - subroutine oce_initial_state(mesh) + subroutine tracer_init(tracers, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + use mod_tracer + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(inout), target, allocatable :: tracers(:) end subroutine end interface end module +module ocean_setup_interface + interface + subroutine ocean_setup(tracers, mesh) + use mod_mesh + use mod_tracer + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(inout), target, allocatable :: tracers(:) + end subroutine + end interface +end module +module before_oce_step_interface + interface + subroutine before_oce_step(tracers, mesh) + use mod_mesh + use mod_tracer + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(inout), target, allocatable :: tracers(:) + end subroutine + end interface +end module + ! ! !_______________________________________________________________________________ -subroutine ocean_setup(mesh) +subroutine ocean_setup(tracers, mesh) USE MOD_MESH +USE MOD_TRACER USE o_PARAM USE g_PARSUP USE o_ARRAYS @@ -30,11 +56,12 @@ subroutine ocean_setup(mesh) use g_cvmix_kpp use g_cvmix_tidal use Toy_Channel_Soufflet -use array_setup_interface use oce_initial_state_interface use oce_adv_tra_fct_interfaces IMPLICIT NONE -type(t_mesh), intent(inout) , target :: mesh +type(t_mesh), intent(inout), target :: mesh +type(t_tracer), intent(inout), target :: tracers(:) +integer :: n !___setup virt_salt_flux____________________________________________________ ! if the ale thinkness remain unchanged (like in 'linfs' case) the vitrual ! salinity flux need to be used @@ -49,8 +76,7 @@ subroutine ocean_setup(mesh) use_virt_salt=.true. is_nonlinfs = 0.0_WP end if - call array_setup(mesh) - + !___________________________________________________________________________ ! initialize arrays for ALE if (mype==0) then @@ -144,16 +170,20 @@ subroutine ocean_setup(mesh) SELECT CASE (TRIM(which_toy)) CASE ("soufflet") !forcing update for soufflet testcase if (mod(mstep, soufflet_forc_update)==0) then - call initial_state_soufflet(mesh) + call initial_state_soufflet(tracers, mesh) call compute_zonal_mean_ini(mesh) - call compute_zonal_mean(mesh) + call compute_zonal_mean(tracers, mesh) end if END SELECT else - call oce_initial_state(mesh) ! Use it if not running tests + call oce_initial_state(tracers, mesh) ! Use it if not running tests end if - if (.not.r_restart) tr_arr_old=tr_arr + if (.not.r_restart) then + do n=1, num_tracers + tracers(n)%valuesAB=tracers(n)%values + end do + end if !___________________________________________________________________________ ! first time fill up array for hnode & helem @@ -173,10 +203,46 @@ subroutine ocean_setup(mesh) write(*,*) '******************************************************************************' end if end subroutine ocean_setup +!_______________________________________________________________________________ +SUBROUTINE tracer_init(tracers, mesh) +USE MOD_MESH +USE MOD_TRACER +USE g_PARSUP +IMPLICIT NONE +integer :: elem_size, node_size +integer :: n +type(t_mesh), intent(in) , target :: mesh +type(t_tracer), intent(inout) , target, allocatable :: tracers(:) +#include "associate_mesh.h" + +elem_size=myDim_elem2D+eDim_elem2D +node_size=myDim_nod2D+eDim_nod2D + +! ================ +! Temperature (index=1), Salinity (index=2), etc. +! ================ +allocate(tracers(num_tracers)) +do n=1, num_tracers + allocate(tracers(n)%values (nl-1,node_size)) + allocate(tracers(n)%valuesAB(nl-1,node_size)) + tracers(n)%tra_adv_hor = TRIM(tra_adv_hor) + tracers(n)%tra_adv_ver = TRIM(tra_adv_ver) + tracers(n)%tra_adv_lim = TRIM(tra_adv_lim) + tracers(n)%tra_adv_ph = tra_adv_ph + tracers(n)%tra_adv_pv = tra_adv_pv + tracers(n)%smooth_bh_tra = smooth_bh_tra + tracers(n)%gamma0_tra = gamma0_tra + tracers(n)%gamma1_tra = gamma1_tra + tracers(n)%gamma2_tra = gamma2_tra + tracers(n)%values = 0. + tracers(n)%valuesAB = 0. + tracers(n)%ID = n +end do +END SUBROUTINE tracer_init ! ! !_______________________________________________________________________________ -SUBROUTINE array_setup(mesh) +SUBROUTINE arrays_init(mesh) USE MOD_MESH USE o_ARRAYS USE o_PARAM @@ -190,8 +256,7 @@ SUBROUTINE array_setup(mesh) IMPLICIT NONE integer :: elem_size, node_size integer :: n -type(t_mesh), intent(in) , target :: mesh - +type(t_mesh), intent(in) , target :: mesh #include "associate_mesh.h" @@ -223,12 +288,7 @@ SUBROUTINE array_setup(mesh) allocate(Wvel(nl, node_size), hpressure(nl,node_size)) allocate(Wvel_e(nl, node_size), Wvel_i(nl, node_size)) allocate(CFL_z(nl, node_size)) ! vertical CFL criteria -! ================ -! Temperature and salinity -! ================ -allocate(T_rhs(nl-1, node_size)) -allocate(S_rhs(nl-1, node_size)) -allocate(tr_arr(nl-1,node_size,num_tracers),tr_arr_old(nl-1,node_size,num_tracers)) + allocate(del_ttf(nl-1,node_size)) allocate(del_ttf_advhoriz(nl-1,node_size),del_ttf_advvert(nl-1,node_size)) del_ttf = 0.0_WP @@ -383,12 +443,10 @@ SUBROUTINE array_setup(mesh) CFL_z =0.0_WP hpressure=0.0_WP ! - T_rhs=0.0_WP heat_flux=0.0_WP heat_flux_in=0.0_WP Tsurf=0.0_WP - S_rhs=0.0_WP water_flux=0.0_WP relax_salt=0.0_WP virtual_salt=0.0_WP @@ -402,9 +460,6 @@ SUBROUTINE array_setup(mesh) stress_atmoce_x =0.0_WP stress_atmoce_y =0.0_WP - tr_arr=0.0_WP - tr_arr_old=0.0_WP - bvfreq=0.0_WP mixlay_dep=0.0_WP bv_ref=0.0_WP @@ -445,14 +500,15 @@ SUBROUTINE array_setup(mesh) !!PS dum_3d_n = 0.0_WP !!PS dum_2d_e = 0.0_WP !!PS dum_3d_e = 0.0_WP -END SUBROUTINE array_setup +END SUBROUTINE arrays_init ! ! !_______________________________________________________________________________ ! Here the 3D tracers will be initialized. Initialization strategy depends on a tracer ID. ! ID = 0 and 1 are reserved for temperature and salinity -SUBROUTINE oce_initial_state(mesh) +SUBROUTINE oce_initial_state(tracers, mesh) USE MOD_MESH +USE MOD_TRACER USE o_ARRAYS USE g_PARSUP USE g_config @@ -463,7 +519,8 @@ SUBROUTINE oce_initial_state(mesh) implicit none integer :: i, k, counter, rcounter3, id character(len=10) :: i_string, id_string - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in) , target :: mesh + type(t_tracer), intent(inout), target :: tracers(:) real(kind=WP) :: loc, max_temp, min_temp, max_salt, min_salt #include "associate_mesh.h" @@ -475,12 +532,12 @@ SUBROUTINE oce_initial_state(mesh) ! this must be always done! First two tracers with IDs 0 and 1 are the temperature and salinity. if(mype==0) write(*,*) 'read Temperatur climatology from:', trim(filelist(1)) if(mype==0) write(*,*) 'read Salt climatology from:', trim(filelist(2)) - call do_ic3d(mesh) + call do_ic3d(tracers, mesh) - Tclim=tr_arr(:,:,1) - Sclim=tr_arr(:,:,2) - Tsurf=tr_arr(1,:,1) - Ssurf=tr_arr(1,:,2) + Tclim=tracers(1)%values + Sclim=tracers(2)%values + Tsurf=Tclim(1,:) + Ssurf=Sclim(1,:) relax2clim=0.0_WP ! count the passive tracers which require 3D source (ptracers_restore_total) @@ -504,14 +561,14 @@ SUBROUTINE oce_initial_state(mesh) id=tracer_ID(i) SELECT CASE (id) CASE (101) ! initialize tracer ID=101 - tr_arr(:,:,i)=0.0_WP + tracers(i)%values(:,:)=0.0_WP if (mype==0) then write (i_string, "(I3)") i write (id_string, "(I3)") id write(*,*) 'initializing '//trim(i_string)//'th tracer with ID='//trim(id_string) end if CASE (301) !Fram Strait 3d restored passive tracer - tr_arr(:,:,i)=0.0_WP + tracers(i)%values(:,:)=0.0_WP rcounter3 =rcounter3+1 counter=0 do k=1, myDim_nod2D+eDim_nod2D @@ -531,7 +588,7 @@ SUBROUTINE oce_initial_state(mesh) ptracers_restore(rcounter3)%ind2(counter)=k end if end do - tr_arr(:,ptracers_restore(rcounter3)%ind2,i)=1. + tracers(i)%values(:,ptracers_restore(rcounter3)%ind2)=1. if (mype==0) then write (i_string, "(I3)") i write (id_string, "(I3)") id @@ -539,7 +596,7 @@ SUBROUTINE oce_initial_state(mesh) end if CASE (302) !Bering Strait 3d restored passive tracer - tr_arr(:,:,i)=0. + tracers(i)%values(:,:)=0.0_WP rcounter3 =rcounter3+1 counter=0 do k=1, myDim_nod2D+eDim_nod2D @@ -559,7 +616,7 @@ SUBROUTINE oce_initial_state(mesh) ptracers_restore(rcounter3)%ind2(counter)=k end if end do - tr_arr(:,ptracers_restore(rcounter3)%ind2,i)=1. + tracers(i)%values(:,ptracers_restore(rcounter3)%ind2)=0.0_WP if (mype==0) then write (i_string, "(I3)") i write (id_string, "(I3)") id @@ -567,7 +624,7 @@ SUBROUTINE oce_initial_state(mesh) end if CASE (303) !BSO 3d restored passive tracer - tr_arr(:,:,i)=0. + tracers(i)%values(:,:)=0.0_WP rcounter3 =rcounter3+1 counter=0 do k=1, myDim_nod2D+eDim_nod2D @@ -587,7 +644,7 @@ SUBROUTINE oce_initial_state(mesh) ptracers_restore(rcounter3)%ind2(counter)=k end if end do - tr_arr(:,ptracers_restore(rcounter3)%ind2,i)=1. + tracers(i)%values(:,ptracers_restore(rcounter3)%ind2)=0.0_WP if (mype==0) then write (i_string, "(I3)") i write (id_string, "(I3)") id @@ -609,8 +666,9 @@ end subroutine oce_initial_state ! !========================================================================== ! Here we do things (if applicable) before the ocean timestep will be made -SUBROUTINE before_oce_step(mesh) +SUBROUTINE before_oce_step(tracers, mesh) USE MOD_MESH + USE MOD_TRACER USE o_ARRAYS USE g_PARSUP USE g_config @@ -618,7 +676,8 @@ SUBROUTINE before_oce_step(mesh) implicit none integer :: i, k, counter, rcounter3, id character(len=10) :: i_string, id_string - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(inout), target, allocatable :: tracers(:) #include "associate_mesh.h" @@ -626,7 +685,7 @@ SUBROUTINE before_oce_step(mesh) SELECT CASE (TRIM(which_toy)) CASE ("soufflet") !forcing update for soufflet testcase if (mod(mstep, soufflet_forc_update)==0) then - call compute_zonal_mean(mesh) + call compute_zonal_mean(tracers, mesh) end if END SELECT end if diff --git a/src/oce_spp.F90 b/src/oce_spp.F90 index 26de0ef8e..df627380e 100644 --- a/src/oce_spp.F90 +++ b/src/oce_spp.F90 @@ -41,7 +41,7 @@ end subroutine cal_rejected_salt ! !---------------------------------------------------------------------------- ! -subroutine app_rejected_salt(mesh) +subroutine app_rejected_salt(ttf, mesh) use g_parsup use o_arrays use mod_mesh @@ -58,7 +58,8 @@ subroutine app_rejected_salt(mesh) data n_distr /5/ data rho_cri /0.4_WP/ !kg/m3 !SH !Duffy1999 - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent (inout) :: ttf(mesh%nl-1,myDim_nod2D+eDim_nod2D) #include "associate_mesh.h" @@ -70,8 +71,7 @@ subroutine app_rejected_salt(mesh) ! 2. in case of non zero salinity of ice (the well accepted value is 5psu) the SSS might become negative nzmin = ulevels_nod2D(row) nzmax = nlevels_nod2D(row) - !!PS if (tr_arr(1,row,2) < 10.0_WP) cycle - if (tr_arr(nzmin,row,2) < 10.0_WP) cycle + if (ttf(nzmin,row) < 10.0_WP) cycle if (geo_coord_nod2D(2,row)>0.0_WP) then !NH kml=1 !!PS spar(1)=0.0_WP @@ -85,18 +85,11 @@ subroutine app_rejected_salt(mesh) spar(k+1)=area(k+1,row)*hnode(k+1,row)*(Z_3d_n(1,row)-Z_3d_n(k+1,row))**n_distr end do - !!PS if (kml>1) then - !!PS tr_arr(1,row,2)=tr_arr(1,row,2)-ice_rejected_salt(row)/area(1,row)/hnode(1,row) - !!PS spar(2:kml)=spar(2:kml)/sum(spar(2:kml)) - !!PS do k=2,kml - !!PS tr_arr(k,row,2)=tr_arr(k,row,2)+ice_rejected_salt(row)*spar(k)/area(k,row)/hnode(k,row) - !!PS end do - !!PS endif if (kml>nzmin) then - tr_arr(nzmin,row,2)=tr_arr(nzmin,row,2)-ice_rejected_salt(row)/areasvol(1,row)/hnode(1,row) + ttf(nzmin,row)=ttf(nzmin,row)-ice_rejected_salt(row)/areasvol(1,row)/hnode(1,row) spar(nzmin+1:kml)=spar(nzmin+1:kml)/sum(spar(nzmin+1:kml)) do k=nzmin+1,kml - tr_arr(k,row,2)=tr_arr(k,row,2)+ice_rejected_salt(row)*spar(k)/areasvol(k,row)/hnode(k,row) + ttf(k,row)=ttf(k,row)+ice_rejected_salt(row)*spar(k)/areasvol(k,row)/hnode(k,row) end do endif endif diff --git a/src/oce_tracer_mod.F90 b/src/oce_tracer_mod.F90 index f8c06fa56..ddc348c5a 100755 --- a/src/oce_tracer_mod.F90 +++ b/src/oce_tracer_mod.F90 @@ -1,6 +1,7 @@ !============================================================================================ MODULE o_tracers USE MOD_MESH +USE MOD_TRACER IMPLICIT NONE interface @@ -29,7 +30,6 @@ SUBROUTINE tracer_gradient_elements(ttf, mesh) integer :: elem, elnodes(3) integer :: n, nz, nzmin, nzmax - #include "associate_mesh.h" DO elem=1, myDim_elem2D @@ -46,29 +46,29 @@ END SUBROUTINE tracer_gradient_elements ! ! !======================================================================================== -SUBROUTINE init_tracers_AB(tr_num, mesh) +SUBROUTINE init_tracers_AB(tracer, mesh) use g_config, only: flag_debug use g_parsup use o_arrays use g_comm_auto use mod_mesh - + use mod_tracer IMPLICIT NONE integer :: tr_num,n,nz - type(t_mesh), intent(in) , target :: mesh - + type(t_mesh), intent(in) , target :: mesh + type(t_tracer), intent(inout), target :: tracer !filling work arrays del_ttf=0.0_WP !AB interpolation - tr_arr_old(:,:,tr_num)=-(0.5_WP+epsilon)*tr_arr_old(:,:,tr_num)+(1.5_WP+epsilon)*tr_arr(:,:,tr_num) + tracer%valuesAB(:,:)=-(0.5_WP+epsilon)*tracer%valuesAB(:,:)+(1.5_WP+epsilon)*tracer%values(:,:) if (flag_debug .and. mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_elements'//achar(27)//'[0m' - call tracer_gradient_elements(tr_arr_old(:,:,tr_num), mesh) + call tracer_gradient_elements(tracer%valuesAB, mesh) call exchange_elem_begin(tr_xy) if (flag_debug .and. mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_z'//achar(27)//'[0m' - call tracer_gradient_z(tr_arr(:,:,tr_num), mesh) + call tracer_gradient_z(tracer%values, mesh) !WHY NOT AB HERE? DSIDOREN! call exchange_elem_end() ! tr_xy used in fill_up_dn_grad call exchange_nod_begin(tr_z) ! not used in fill_up_dn_grad @@ -77,43 +77,44 @@ SUBROUTINE init_tracers_AB(tr_num, mesh) call exchange_nod_end() ! tr_z halos should have arrived by now. if (flag_debug .and. mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_elements'//achar(27)//'[0m' - call tracer_gradient_elements(tr_arr(:,:,tr_num), mesh) !redefine tr_arr to the current timestep + call tracer_gradient_elements(tracer%values, mesh) !redefine tr_arr to the current timestep call exchange_elem(tr_xy) END SUBROUTINE init_tracers_AB ! ! !======================================================================================== -SUBROUTINE relax_to_clim(tr_num, mesh) - +SUBROUTINE relax_to_clim(tracer, mesh) use g_config,only: dt USE g_PARSUP use o_arrays IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh - integer :: tr_num,n,nz, nzmin, nzmax + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(inout), target :: tracer + integer :: n,nz, nzmin, nzmax + + real(kind=WP), dimension(:,:), pointer :: trarr #include "associate_mesh.h" + trarr=>tracer%values(:,:) - if ((clim_relax>1.0e-8_WP).and.(tr_num==1)) then + if ((clim_relax>1.0e-8_WP).and.(tracer%ID==1)) then DO n=1, myDim_nod2D nzmin = ulevels_nod2D(n) nzmax = nlevels_nod2D(n) !!PS tr_arr(1:nlevels_nod2D(n)-1,n,tr_num)=tr_arr(1:nlevels_nod2D(n)-1,n,tr_num)+& !!PS relax2clim(n)*dt*(Tclim(1:nlevels_nod2D(n)-1,n)-tr_arr(1:nlevels_nod2D(n)-1,n,tr_num)) - tr_arr(nzmin:nzmax-1,n,tr_num)=tr_arr(nzmin:nzmax-1,n,tr_num)+& - relax2clim(n)*dt*(Tclim(nzmin:nzmax-1,n)-tr_arr(nzmin:nzmax-1,n,tr_num)) + trarr(nzmin:nzmax-1,n)=trarr(nzmin:nzmax-1,n)+& + relax2clim(n)*dt*(Tclim(nzmin:nzmax-1,n)-trarr(nzmin:nzmax-1,n)) END DO END if - if ((clim_relax>1.0e-8_WP).and.(tr_num==2)) then + if ((clim_relax>1.0e-8_WP).and.(tracer%ID==2)) then DO n=1, myDim_nod2D nzmin = ulevels_nod2D(n) nzmax = nlevels_nod2D(n) - !!PS tr_arr(1:nlevels_nod2D(n)-1,n,tr_num)=tr_arr(1:nlevels_nod2D(n)-1,n,tr_num)+& - !!PS relax2clim(n)*dt*(Sclim(1:nlevels_nod2D(n)-1,n)-tr_arr(1:nlevels_nod2D(n)-1,n,tr_num)) - tr_arr(nzmin:nzmax-1,n,tr_num)=tr_arr(nzmin:nzmax-1,n,tr_num)+& - relax2clim(n)*dt*(Sclim(nzmin:nzmax-1,n)-tr_arr(nzmin:nzmax-1,n,tr_num)) + trarr(nzmin:nzmax-1,n)=trarr(nzmin:nzmax-1,n)+& + relax2clim(n)*dt*(Sclim(nzmin:nzmax-1,n)-trarr(nzmin:nzmax-1,n)) END DO END IF END SUBROUTINE relax_to_clim diff --git a/src/toy_channel_soufflet.F90 b/src/toy_channel_soufflet.F90 index c354f7685..221deb0ca 100644 --- a/src/toy_channel_soufflet.F90 +++ b/src/toy_channel_soufflet.F90 @@ -1,5 +1,6 @@ MODULE Toy_Channel_Soufflet use mod_mesh + use mod_tracer USE o_ARRAYS USE o_PARAM USE g_PARSUP @@ -46,7 +47,7 @@ subroutine relax_zonal_vel(mesh) implicit none integer :: elem, nz, nn, nn1 real(kind=WP) :: a, yy, uzon - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh #include "associate_mesh.h" DO elem=1, myDim_elem2D @@ -75,11 +76,13 @@ subroutine relax_zonal_vel(mesh) end subroutine relax_zonal_vel !========================================================================== -subroutine relax_zonal_temp(mesh) +subroutine relax_zonal_temp(tracer, mesh) implicit none integer :: n, nz, nn, nn1 real(kind=WP) :: yy, a, Tzon - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(inout), target :: tracer + #include "associate_mesh.h" do n=1, myDim_nod2D+eDim_nod2D @@ -97,7 +100,7 @@ subroutine relax_zonal_temp(mesh) end if do nz=1, nlevels_nod2D(n)-1 Tzon=(1.0-a)*ztem(nz,nn)+a*ztem(nz,nn1) - tr_arr(nz,n,1)= tr_arr(nz,n,1)+dt*tau_inv*(Tclim(nz,n)-Tzon) + tracer%values(nz,n)= tracer%values(nz,n)+dt*tau_inv*(Tclim(nz,n)-Tzon) end do end do end subroutine relax_zonal_temp @@ -107,7 +110,8 @@ subroutine compute_zonal_mean_ini(mesh) real(kind=8) :: ymean, Ly integer :: elem, nz, m, elnodes(3) real(kind=8), allocatable :: zvel1D(:), znum1D(:) - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + #include "associate_mesh.h" Ly=ysize/r_earth ! The meridional lenght in radians @@ -157,11 +161,13 @@ subroutine compute_zonal_mean_ini(mesh) ! no division by 0 is occurring end subroutine compute_zonal_mean_ini !========================================================================== -subroutine compute_zonal_mean(mesh) +subroutine compute_zonal_mean(tracers, mesh) implicit none integer :: elem, nz, m, elnodes(3) real(kind=8), allocatable :: zvel1D(:), znum1D(:) - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers(:) + #include "associate_mesh.h" @@ -170,7 +176,7 @@ subroutine compute_zonal_mean(mesh) DO elem=1,myDim_elem2D if(elem2D_nodes(1,elem)>myDim_nod2D) cycle Do nz=1,nlevels(elem)-1 - ztem(nz,bpos(elem))=ztem(nz,bpos(elem))+sum(tr_arr(nz,elem2D_nodes(:,elem),1))/3.0_8 + ztem(nz,bpos(elem))=ztem(nz,bpos(elem))+sum(tracers(1)%values(nz,elem2D_nodes(:,elem)))/3.0_8 zvel(nz,bpos(elem))=zvel(nz,bpos(elem))+UV(1,nz,elem) END DO END DO @@ -217,10 +223,12 @@ subroutine compute_zonal_mean(mesh) end subroutine compute_zonal_mean ! ==================================================================================== -subroutine initial_state_soufflet(mesh) +subroutine initial_state_soufflet(tracers, mesh) ! Profiles Soufflet 2016 (OM) implicit none - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(inout), target :: tracers(:) + integer :: n, nz, elnodes(3) real(kind=8) :: dst, yn, Fy, Lx ! real(kind=8) :: Ljet,rhomax,Sb, drho_No, drho_So @@ -231,10 +239,10 @@ subroutine initial_state_soufflet(mesh) dy=ysize/nybins/r_earth ! Default values - stress_surf = 0.0_WP - heat_flux = 0.0_WP - tr_arr(:,:,2) = 35.0_WP - Ssurf = tr_arr(1,:,2) + stress_surf = 0.0_WP + heat_flux = 0.0_WP + tracers(2)%values = 35.0_WP + Ssurf = tracers(2)%values(1,:) water_flux = 0.0_WP relax2clim = 0.0_WP @@ -279,21 +287,21 @@ subroutine initial_state_soufflet(mesh) end if end if do nz=1, nlevels_nod2D(n)-1 - tr_arr(nz, n,1)=rho_So(nz)+(rho_No(nz)-rho_So(nz))*(1.0-Fy) + tracers(1)%values(nz,n)=rho_So(nz)+(rho_No(nz)-rho_So(nz))*(1.0-Fy) end do end do ! ======== ! Make consistent ! ======== - Tsurf=tr_arr(1,:,1) - Tclim=tr_arr(:,:,1) + Tsurf=tracers(1)%values(1,:) + Tclim=tracers(1)%values(:,:) ! ======== ! add small perturbation: do n=1, myDim_nod2D+eDim_nod2D dst=(coord_nod2D(2, n)-lat0)*r_earth do nz=1, nlevels_nod2D(n)-1 - tr_arr(nz,n,1)=tr_arr(nz,n,1)-0.1*sin(2*pi*dst/ysize)*exp(2*Z(nz)/zsize) & + tracers(1)%values(nz,n)=tracers(1)%values(nz,n)-0.1*sin(2*pi*dst/ysize)*exp(2*Z(nz)/zsize) & *(sin(8*pi*coord_nod2D(1,n)*r_earth/xsize)+ & 0.5*sin(3*pi*coord_nod2D(1,n)*r_earth/xsize)) end do @@ -301,7 +309,7 @@ subroutine initial_state_soufflet(mesh) ! ======= ! Compute geostrophically balanced flow ! ======= - write(*,*) mype, 'T', maxval(tr_arr(:,:,1)), minval(tr_arr(:,:,1)) + write(*,*) mype, 'T', maxval(tracers(1)%values), minval(tracers(1)%values) ! Redefine Coriolis (to agree with the Soufflet paper) DO n=1,myDim_elem2D elnodes=elem2D_nodes(:,n) @@ -327,18 +335,7 @@ subroutine initial_state_soufflet(mesh) call exchange_elem(UV) allocate(Uclim(nl-1,myDim_elem2D+eDim_elem2D)) - Uclim=UV(1,:,:) - -!!PS tr_arr(:,:,1) = 16.0_WP -!!PS tr_arr(:,:,2) = 35.0_WP -!!PS Ssurf = tr_arr(1,:,2) -!!PS Tsurf = tr_arr(1,:,1) -!!PS Tclim = tr_arr(:,:,1) - -!!PS UV = 0.0_WP -!!PS UV(1,:,:) = 0.01_WP -!!PS Uclim = UV(1,:,:) - + Uclim=UV(1,:,:) write(*,*) mype, 'Vel', maxval(UV(1,:,:)), minval(UV(1,:,:)) END subroutine initial_state_soufflet ! =============================================================================== diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index 9e0ad0eab..f7898a162 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -1,19 +1,32 @@ module write_step_info_interface interface - subroutine write_step_info(istep,outfreq, mesh) + subroutine write_step_info(istep,outfreq,tracers,mesh) use MOD_MESH - integer :: istep,outfreq - type(t_mesh), intent(in) , target :: mesh + use MOD_TRACER + integer :: istep,outfreq + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers(:) + end subroutine + end interface +end module +module check_blowup_interface + interface + subroutine check_blowup(istep, tracers, mesh) + use MOD_MESH + use MOD_TRACER + integer :: istep + type(t_tracer), intent(in), target :: tracers(:) + type(t_mesh), intent(in), target :: mesh end subroutine end interface end module - ! ! !=============================================================================== -subroutine write_step_info(istep,outfreq, mesh) +subroutine write_step_info(istep, outfreq, tracers, mesh) use g_config, only: dt, use_ice use MOD_MESH + use MOD_TRACER use o_PARAM use g_PARSUP use o_ARRAYS @@ -32,7 +45,8 @@ subroutine write_step_info(istep,outfreq, mesh) max_cfl_z, max_pgfx, max_pgfy, max_kv, max_av real(kind=WP) :: int_deta , int_dhbar real(kind=WP) :: loc, loc_eta, loc_hbar, loc_deta, loc_dhbar, loc_wflux,loc_hflux, loc_temp, loc_salt - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers(:) #include "associate_mesh.h" if (mod(istep,outfreq)==0) then @@ -63,8 +77,8 @@ subroutine write_step_info(istep,outfreq, mesh) loc_dhbar = loc_dhbar + areasvol(ulevels_nod2D(n), n)*(hbar(n)-hbar_old(n)) loc_wflux = loc_wflux + areasvol(ulevels_nod2D(n), n)*water_flux(n) !!PS loc_hflux = loc_hflux + area(1, n)*heat_flux(n) -!!PS loc_temp = loc_temp + area(1, n)*sum(tr_arr(:,n,1))/(nlevels_nod2D(n)-1) -!!PS loc_salt = loc_salt + area(1, n)*sum(tr_arr(:,n,2))/(nlevels_nod2D(n)-1) +!!PS loc_temp = loc_temp + area(1, n)*sum(tracers(1)%values(:, n))/(nlevels_nod2D(n)-1) +!!PS loc_salt = loc_salt + area(1, n)*sum(tracers(2)%values(:, n))/(nlevels_nod2D(n)-1) end do !_______________________________________________________________________ @@ -102,9 +116,9 @@ subroutine write_step_info(istep,outfreq, mesh) call MPI_AllREDUCE(loc , min_wflux, 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) loc = minval(heat_flux(1:myDim_nod2D)) call MPI_AllREDUCE(loc , min_hflux, 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(tr_arr(:,1:myDim_nod2D,1),MASK=(tr_arr(:,1:myDim_nod2D,2)/=0.0)) + loc = minval(tracers(1)%values(:,1:myDim_nod2D),MASK=(tracers(2)%values(:,1:myDim_nod2D)/=0.0)) call MPI_AllREDUCE(loc , min_temp , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(tr_arr(:,1:myDim_nod2D,2),MASK=(tr_arr(:,1:myDim_nod2D,2)/=0.0)) + loc = minval(tracers(2)%values(:,1:myDim_nod2D),MASK=(tracers(2)%values(:,1:myDim_nod2D)/=0.0)) call MPI_AllREDUCE(loc , min_salt , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) loc = minval(Wvel(1,1:myDim_nod2D)) call MPI_AllREDUCE(loc , min_wvel , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) @@ -134,9 +148,9 @@ subroutine write_step_info(istep,outfreq, mesh) call MPI_AllREDUCE(loc , max_wflux, 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) loc = maxval(heat_flux(1:myDim_nod2D)) call MPI_AllREDUCE(loc , max_hflux, 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(tr_arr(:,1:myDim_nod2D,1),MASK=(tr_arr(:,1:myDim_nod2D,2)/=0.0)) + loc = maxval(tracers(1)%values(:,1:myDim_nod2D),MASK=(tracers(2)%values(:,1:myDim_nod2D)/=0.0)) call MPI_AllREDUCE(loc , max_temp , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(tr_arr(:,1:myDim_nod2D,2),MASK=(tr_arr(:,1:myDim_nod2D,2)/=0.0)) + loc = maxval(tracers(2)%values(:,1:myDim_nod2D),MASK=(tracers(2)%values(:,1:myDim_nod2D)/=0.0)) call MPI_AllREDUCE(loc , max_salt , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) loc = maxval(Wvel(1,1:myDim_nod2D)) call MPI_AllREDUCE(loc , max_wvel , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) @@ -217,9 +231,10 @@ end subroutine write_step_info ! ! !=============================================================================== -subroutine check_blowup(istep, mesh) +subroutine check_blowup(istep, tracers, mesh) use g_config, only: logfile_outfreq, which_ALE use MOD_MESH + use MOD_TRACER use o_PARAM use g_PARSUP use o_ARRAYS @@ -231,9 +246,10 @@ subroutine check_blowup(istep, mesh) use write_step_info_interface implicit none - integer :: n, nz, istep, found_blowup_loc=0, found_blowup=0 - integer :: el, elidx - type(t_mesh), intent(in), target :: mesh + integer :: n, nz, istep, found_blowup_loc=0, found_blowup=0 + integer :: el, elidx + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers(:) #include "associate_mesh.h" !___________________________________________________________________________ ! ! if (mod(istep,logfile_outfreq)==0) then @@ -361,8 +377,8 @@ subroutine check_blowup(istep, mesh) do nz=1,nlevels_nod2D(n)-1 !_______________________________________________________________ ! check temp - if ( (tr_arr(nz, n,1) /= tr_arr(nz, n,1)) .or. & - tr_arr(nz, n,1) < -5.0 .or. tr_arr(nz, n,1)>60) then + if ( (tracers(1)%values(nz, n) /= tracers(1)%values(nz, n)) .or. & + tracers(1)%values(nz, n) < -5.0 .or. tracers(1)%values(nz, n)>60) then found_blowup_loc=1 write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep write(*,*) ' --STOP--> found temperture becomes NaN or <-5.0, >60' @@ -374,10 +390,10 @@ subroutine check_blowup(istep, mesh) write(*,*) 'nzmin, nzmax= ',ulevels_nod2D(n),nlevels_nod2D(n) write(*,*) 'x=', geo_coord_nod2D(1,n)/rad, ' ; ', 'y=', geo_coord_nod2D(2,n)/rad write(*,*) 'z=', Z_n(nz) - write(*,*) 'temp(nz, n) = ',tr_arr(nz, n,1) - write(*,*) 'temp(: , n) = ',tr_arr(:, n,1) - write(*,*) 'temp_old(nz,n)= ',tr_arr_old(nz, n,1) - write(*,*) 'temp_old(: ,n)= ',tr_arr_old(:, n,1) + write(*,*) 'temp(nz, n) = ',tracers(1)%values(nz, n) + write(*,*) 'temp(: , n) = ',tracers(1)%values(:, n) + write(*,*) 'temp_old(nz,n)= ',tracers(1)%valuesAB(nz, n) + write(*,*) 'temp_old(: ,n)= ',tracers(1)%valuesAB(:, n) write(*,*) write(*,*) 'hflux = ',heat_flux(n) write(*,*) 'wflux = ',water_flux(n) @@ -412,12 +428,12 @@ subroutine check_blowup(istep, mesh) ! enddo write(*,*) - endif ! --> if ( (tr_arr(nz, n,1) /= tr_arr(nz, n,1)) .or. & ... + endif ! --> if ( (tracers(1)%values(nz, n) /= tracers(1)%values(nz, n)) .or. & ... !_______________________________________________________________ ! check salt - if ( (tr_arr(nz, n,2) /= tr_arr(nz, n,2)) .or. & - tr_arr(nz, n,2) < 0 .or. tr_arr(nz, n,2)>50 ) then + if ( (tracers(2)%values(nz, n) /= tracers(2)%values(nz, n)) .or. & + tracers(2)%values(nz, n) < 0 .or. tracers(2)%values(nz, n)>50 ) then found_blowup_loc=1 write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep write(*,*) ' --STOP--> found salinity becomes NaN or <0, >50' @@ -428,11 +444,11 @@ subroutine check_blowup(istep, mesh) write(*,*) 'nzmin, nzmax= ',ulevels_nod2D(n),nlevels_nod2D(n) write(*,*) 'x=', geo_coord_nod2D(1,n)/rad, ' ; ', 'y=', geo_coord_nod2D(2,n)/rad write(*,*) 'z=', Z_n(nz) - write(*,*) 'salt(nz, n) = ',tr_arr(nz, n,2) - write(*,*) 'salt(: , n) = ',tr_arr(:, n,2) + write(*,*) 'salt(nz, n) = ',tracers(2)%values(nz, n) + write(*,*) 'salt(: , n) = ',tracers(2)%values(:, n) write(*,*) - write(*,*) 'temp(nz, n) = ',tr_arr(nz, n,1) - write(*,*) 'temp(: , n) = ',tr_arr(:, n,1) + write(*,*) 'temp(nz, n) = ',tracers(1)%values(nz, n) + write(*,*) 'temp(: , n) = ',tracers(1)%values(:, n) write(*,*) write(*,*) 'hflux = ',heat_flux(n) write(*,*) @@ -466,7 +482,7 @@ subroutine check_blowup(istep, mesh) write(*,*) write(*,*) 'glon,glat = ',geo_coord_nod2D(:,n)/rad write(*,*) - endif ! --> if ( (tr_arr(nz, n,2) /= tr_arr(nz, n,2)) .or. & ... + endif ! --> if ( (tracers(2)%values(nz, n) /= tracers(2)%values(nz, n)) .or. & ... end do ! --> do nz=1,nlevels_nod2D(n)-1 end do ! --> do n=1, myDim_nod2d ! ! end if @@ -477,7 +493,7 @@ subroutine check_blowup(istep, mesh) ! moment only over CPU mype==0 call MPI_AllREDUCE(found_blowup_loc , found_blowup , 1, MPI_INTEGER, MPI_MAX, MPI_COMM_FESOM, MPIerr) if (found_blowup==1) then - call write_step_info(istep,1,mesh) + call write_step_info(istep,1,tracers,mesh) if (mype==0) then call sleep(1) write(*,*) @@ -497,7 +513,7 @@ subroutine check_blowup(istep, mesh) write(*,*) ' _____.,-#%&$@%#&#~,._____' write(*,*) end if - call blowup(istep, mesh) + call blowup(istep, tracers, mesh) if (mype==0) write(*,*) ' --> finished writing blow up file' call par_ex endif From 36c1948553d68f20c87b29a7655caad181d4ac01 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Tue, 7 Sep 2021 12:15:42 +0200 Subject: [PATCH 359/909] a never ending story with missing interfaces :) --- src/ice_setup_step.F90 | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index c6c8885aa..981165c80 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -1,3 +1,33 @@ +module ice_array_setup_interface + interface + subroutine ice_array_setup(mesh) + use mod_mesh + use mod_tracer + type(t_mesh), intent(in), target :: mesh + end subroutine + end interface +end module + +module ice_initial_state_interface + interface + subroutine ice_initial_state(tracers, mesh) + use mod_mesh + use mod_tracer + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers(:) + end subroutine + end interface +end module +module ice_setup_interface + interface + subroutine ice_setup(tracers, mesh) + use mod_mesh + use mod_tracer + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers(:) + end subroutine + end interface +end module ! !_______________________________________________________________________________ ! ice initialization + array allocation + time stepping From 1e0fcb3f21733fe965bb48faa5922d75caa08df6 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Tue, 7 Sep 2021 15:13:02 +0200 Subject: [PATCH 360/909] There was a mistake in Soufflet channel stuff (it used to be restored twice per timestep and it led to a bug after refactoring) --- src/ice_setup_step.F90 | 1 - src/oce_ale_tracer.F90 | 5 ++--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index 981165c80..baae70e8e 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -316,7 +316,6 @@ subroutine ice_initial_state(tracers, mesh) real(kind=WP), external :: TFrez ! Sea water freeze temperature. #include "associate_mesh.h" - write(*,*) tracers(1)%ID, tracers(2)%ID m_ice =0._WP a_ice =0._WP u_ice =0._WP diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index b075f2f94..872d71fcc 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -142,14 +142,13 @@ subroutine solve_tracers_ale(tracers, mesh) ! relax to salt and temp climatology if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call relax_to_clim'//achar(27)//'[0m' - if ((toy_ocean) .AND. (TRIM(which_toy)=="soufflet")) then + if ((toy_ocean) .AND. ((tr_num==1) .AND. (TRIM(which_toy)=="soufflet"))) then call relax_zonal_temp(tracers(tr_num), mesh) else call relax_to_clim(tracers(tr_num), mesh) end if call exchange_nod(tracers(tr_num)%values(:,:)) end do - !___________________________________________________________________________ do tr_num=1, ptracers_restore_total tracers(ptracers_restore(tr_num)%locid)%values(:,ptracers_restore(tr_num)%ind2)=1.0_WP @@ -290,13 +289,13 @@ subroutine diff_tracers_ale(tracer, mesh) !tr_arr(1:nzmax,n,tr_num)=(hnode(1:nzmax,n)*tr_arr(1:nzmax,n,tr_num)+ & ! del_ttf(1:nzmax,n))/hnode_new(1:nzmax,n) end do + !___________________________________________________________________________ if (i_vert_diff) then ! do vertical diffusion: implicite call diff_ver_part_impl_ale(tracer, mesh) end if - !We DO not set del_ttf to zero because it will not be used in this timestep anymore !init_tracers will set it to zero for the next timestep !init_tracers will set it to zero for the next timestep From 0181d48ec56238f0ab9190f971bbe512725dcfd4 Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 8 Sep 2021 09:21:28 +0200 Subject: [PATCH 361/909] add reading depth on element to oce_mesh.F90 depending on namelist.config flag use_depthonelem and where final bottom depth depending on zlevels and partial cells is computed in oce_ale.F90 --- src/oce_ale.F90 | 30 +++-- src/oce_mesh.F90 | 313 ++++++++++++++++++++++++++++++++++------------- 2 files changed, 247 insertions(+), 96 deletions(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index d231cbf61..437c09daf 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -200,7 +200,7 @@ subroutine init_bottom_elem_thickness(mesh) use MOD_MESH use g_PARSUP use o_ARRAYS - use g_config,only: use_partial_cell, partial_cell_thresh + use g_config,only: use_partial_cell, partial_cell_thresh, use_depthonelem use g_comm_auto use g_support implicit none @@ -217,10 +217,15 @@ subroutine init_bottom_elem_thickness(mesh) if(use_partial_cell) then !Adjust the thickness of elemental bottom cells do elem=1, myDim_elem2D - elnodes=elem2D_nodes(:,elem) - ! elemental topographic depth - dd=sum(depth(elnodes))/3.0_WP + !___________________________________________________________________ + if (use_depthonelem) then + dd=depth(elem) + else + elnodes=elem2D_nodes(:,elem) + ! elemental topographic depth + dd=sum(depth(elnodes))/3.0_WP + end if ! number of full depth levels at elem nle=nlevels(elem) @@ -424,7 +429,7 @@ subroutine init_surface_elem_depth(mesh) use MOD_MESH use g_PARSUP use o_ARRAYS - use g_config,only: use_cavity, use_cavity_partial_cell, cavity_partial_cell_thresh + use g_config,only: use_cavity, use_cavity_partial_cell, cavity_partial_cell_thresh, use_cavityonelem use g_comm_auto use g_support implicit none @@ -447,15 +452,20 @@ subroutine init_surface_elem_depth(mesh) ule=ulevels(elem) if (ule==1) cycle - !___________________________________________________________________ - elnodes=elem2D_nodes(:,elem) - !___________________________________________________________________ ! elemental cavity depth if (use_cavity_partial_cell) then - dd=sum(cavity_depth(elnodes))/3.0_WP + + !_______________________________________________________________ + if (use_cavityonelem) then + dd=cavity_depth(elem) + else + elnodes=elem2D_nodes(:,elem) + ! elemental cavity depth + dd=sum(cavity_depth(elnodes))/3.0_WP + end if - !___________________________________________________________________ + !_______________________________________________________________ ! Only apply Surface Partial Cells when the initial full cell surface ! layer thickness is above the treshhold cavity_partial_cell_thresh if (zbar(ule)-zbar(ule+1)<=cavity_partial_cell_thresh) then diff --git a/src/oce_mesh.F90 b/src/oce_mesh.F90 index 7d37a8372..84dc3a0cb 100755 --- a/src/oce_mesh.F90 +++ b/src/oce_mesh.F90 @@ -478,7 +478,6 @@ SUBROUTINE read_mesh(mesh) ! read depth data !============================== ! 0 proc reads header of aux3d.out and broadcasts it between procs - allocate(mesh%depth(myDim_nod2D+eDim_nod2D)) if (mype==0) then !open the file for reading on 0 proc file_name=trim(meshpath)//'aux3d.out' open(fileID, file=file_name) @@ -499,46 +498,113 @@ SUBROUTINE read_mesh(mesh) mesh%Z=0.5_WP*mesh%Z ! 0 proc reads the data in chunks and distributes it between other procs - mesh_check=0 - do nchunk=0, (mesh%nod2D-1)/chunk_size - mapping(1:chunk_size)=0 - do n=1, myDim_nod2D+eDim_nod2D - ipos=(myList_nod2D(n)-1)/chunk_size - if (ipos==nchunk) then - iofs=myList_nod2D(n)-nchunk*chunk_size - mapping(iofs)=n - end if - end do + !______________________________________________________________________________ + ! bottom topography is defined on elements + if (use_depthonelem) then + !___________________________________________________________________________ + ! allocate mesh%depth at elements + allocate(mesh%depth(myDim_elem2D+eDim_elem2D+eXDim_elem2D)) + + !___________________________________________________________________________ + mesh_check=0 + do nchunk=0, (mesh%elem2D-1)/chunk_size + mapping(1:chunk_size)=0 + do n=1, myDim_elem2D+eDim_elem2D+eXDim_elem2D + ipos=(myList_elem2D(n)-1)/chunk_size + if (ipos==nchunk) then + iofs=myList_elem2D(n)-nchunk*chunk_size + mapping(iofs)=n + end if + end do + + k=min(chunk_size, mesh%elem2D-nchunk*chunk_size) + if (mype==0) then + do n=1, k + read(fileID,*) rbuff(n,1) + end do + ! check here if aux3d.out contains depth levels (FESOM2.0) or 3d indices + ! (FESOM1.4) like that check if the proper mesh is loaded. 11000.0 is here + ! the maximum depth on earth in marianen trench + if ( flag_wrongaux3d==0 .and. any(abs(rbuff(1:k,1))>11000.0_WP) ) flag_wrongaux3d=1 + end if + call MPI_BCast(rbuff(1:k,1), k, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) - k=min(chunk_size, mesh%nod2D-nchunk*chunk_size) - if (mype==0) then - do n=1, k - read(fileID,*) rbuff(n,1) - end do - ! check here if aux3d.out contains depth levels (FESOM2.0) or 3d indices - ! (FESOM1.4) like that check if the proper mesh is loaded. 11000.0 is here - ! the maximum depth on earth in marianen trench - if ( flag_wrongaux3d==0 .and. any(abs(rbuff(1:k,1))>11000.0_WP) ) flag_wrongaux3d=1 + do n=1, k + x=rbuff(n,1) + if (x>0) x=-x !deps must be negative! + if (x>mesh%zbar(thers_zbar_lev)) x=mesh%zbar(thers_zbar_lev) !threshold for depth + if (mapping(n)>0) then + mesh_check=mesh_check+1 + mesh%depth(mapping(n))=x + end if + end do ! --> do n=1, k + end do ! --> do nchunk=0, (mesh%elem2D-1)/chunk_size + + !___________________________________________________________________________ + if (mype==0) close(fileID) + + !___________________________________________________________________________ + if (mesh_check/=myDim_elem2D+eDim_elem2D+eXDim_elem2D) then + write(*,*) 'ERROR while reading aux3d.out on mype=', mype + write(*,*) mesh_check, ' values have been read in according to partitioning' + write(*,*) 'it does not equal to myDim_elem2D+eDim_elem2D+eXDim_elem2D = ', myDim_elem2D+eDim_elem2D+eXDim_elem2D end if - call MPI_BCast(rbuff(1:k,1), k, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) + + !______________________________________________________________________________ + ! bottom topography is defined on nodes + else + !___________________________________________________________________________ + ! allocate mesh%depth at nodes + allocate(mesh%depth(myDim_nod2D+eDim_nod2D)) + + !___________________________________________________________________________ + ! fill mesh%depth from file with neighborhood information + mesh_check=0 + do nchunk=0, (mesh%nod2D-1)/chunk_size + mapping(1:chunk_size)=0 + do n=1, myDim_nod2D+eDim_nod2D + ipos=(myList_nod2D(n)-1)/chunk_size + if (ipos==nchunk) then + iofs=myList_nod2D(n)-nchunk*chunk_size + mapping(iofs)=n + end if + end do - do n=1, k - x=rbuff(n,1) - if (x>0) x=-x !deps must be negative! - if (x>mesh%zbar(5)) x=mesh%zbar(5) !threshold for depth - if (mapping(n)>0) then - mesh_check=mesh_check+1 - mesh%depth(mapping(n))=x + k=min(chunk_size, mesh%nod2D-nchunk*chunk_size) + if (mype==0) then + do n=1, k + read(fileID,*) rbuff(n,1) + end do + ! check here if aux3d.out contains depth levels (FESOM2.0) or 3d indices + ! (FESOM1.4) like that check if the proper mesh is loaded. 11000.0 is here + ! the maximum depth on earth in marianen trench + if ( flag_wrongaux3d==0 .and. any(abs(rbuff(1:k,1))>11000.0_WP) ) flag_wrongaux3d=1 end if - end do - end do + call MPI_BCast(rbuff(1:k,1), k, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) - if (mype==0) close(fileID) - if (mesh_check/=myDim_nod2D+eDim_nod2D) then - write(*,*) 'ERROR while reading aux3d.out on mype=', mype - write(*,*) mesh_check, ' values have been read in according to partitioning' - write(*,*) 'it does not equal to myDim_nod2D+eDim_nod2D = ', myDim_nod2D+eDim_nod2D - end if + do n=1, k + x=rbuff(n,1) + if (x>0) x=-x !deps must be negative! + if (x>mesh%zbar(thers_zbar_lev)) x=mesh%zbar(thers_zbar_lev) !threshold for depth + if (mapping(n)>0) then + mesh_check=mesh_check+1 + mesh%depth(mapping(n))=x + end if + end do ! --> do n=1, k + end do ! --> do nchunk=0, (mesh%nod2D-1)/chunk_size + + !___________________________________________________________________________ + if (mype==0) close(fileID) + + !___________________________________________________________________________ + if (mesh_check/=myDim_nod2D+eDim_nod2D) then + write(*,*) 'ERROR while reading aux3d.out on mype=', mype + write(*,*) mesh_check, ' values have been read in according to partitioning' + write(*,*) 'it does not equal to myDim_nod2D+eDim_nod2D = ', myDim_nod2D+eDim_nod2D + end if + end if ! --> if (use_depthonelem) then + + !_______________________________________________________________________________ ! check if the mesh structure of FESOM2.0 and of FESOM1.4 is loaded if ((mype==0) .and. (flag_wrongaux3d==1)) then @@ -913,6 +979,7 @@ subroutine find_levels_cavity(mesh) integer :: nchunk, chunk_size, ipos, iofs, mesh_check integer, allocatable, dimension(:) :: mapping integer, allocatable, dimension(:) :: ibuff + real(kind=WP), allocatable, dimension(:) :: rbuff real(kind=WP) :: t0, t1 logical :: file_exist=.False. integer :: elem, elnodes(3), ule, uln(3), node, j, nz @@ -925,7 +992,6 @@ subroutine find_levels_cavity(mesh) ! allocate arrays, reset pointers !!PS allocate(mesh%cavity_flag_e(myDim_elem2D+eDim_elem2D+eXDim_elem2D)) !!PS allocate(mesh%cavity_flag_n(myDim_nod2D+eDim_nod2D)) - allocate(mesh%cavity_depth(myDim_nod2D+eDim_nod2D)) !___________________________________________________________________________ ! mesh related files will be read in chunks of chunk_size @@ -1108,6 +1174,8 @@ subroutine find_levels_cavity(mesh) print *, achar(27)//'[0m' end if + deallocate(ibuff) + !___________________________________________________________________________ ! Part III: computing cavity flag at nodes and elements !!PS mesh%cavity_flag_e = 0 @@ -1207,69 +1275,142 @@ subroutine find_levels_cavity(mesh) end if end if - ! 0 proc reads the data in chunks and distributes it between other procs - mesh_check=0 - do nchunk=0, (mesh%nod2D-1)/chunk_size + !___________________________________________________________________________ + ! cavity topography is defined on elements + if (use_cavityonelem) then !_______________________________________________________________________ - !create the mapping for the current chunk - mapping(1:chunk_size)=0 - do n=1, myDim_nod2D+eDim_nod2D - ! myList_nod2D(n) contains global vertice index of the local - ! vertice on that CPU - ! ipos is integer, (myList_nod2D(n)-1)/chunk_size always rounds - ! off to integer values - ! --> ipos is an index to which chunk a global vertice on a local CPU - ! belongs - ipos=(myList_nod2D(n)-1)/chunk_size + ! allocate mesh%depth at elements + allocate(mesh%cavity_depth(myDim_elem2D+eDim_elem2D+eXDim_elem2D)) + + !_______________________________________________________________________ + ! fill mesh%cavity_depth from file with neighborhood information + mesh_check=0 + do nchunk=0, (mesh%elem2D-1)/chunk_size + mapping(1:chunk_size)=0 + do n=1, myDim_elem2D+eDim_elem2D+eXDim_elem2D + ipos=(myList_elem2D(n)-1)/chunk_size + if (ipos==nchunk) then + iofs=myList_elem2D(n)-nchunk*chunk_size + mapping(iofs)=n + end if + end do - ! if global vertice chunk index (ipos) lies within the actual chunk - if (ipos==nchunk) then - iofs=myList_nod2D(n)-nchunk*chunk_size - ! connect chunk reduced (iofs) global vertice index with local - ! vertice index n --> mapping(iofs)=n - mapping(iofs)=n + !___________________________________________________________________ + ! read the chunk piece into the buffer --> done only by one + ! CPU (mype==0) + k=min(chunk_size, mesh%elem2D-nchunk*chunk_size) + if (mype==0) then + do n=1, k + read(fileID,*) rbuff(n) + end do end if - end do + + !___________________________________________________________________ + ! broadcast chunk buffer to all other CPUs (k...size of buffer) + call MPI_BCast(rbuff(1:k), k, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) + + !___________________________________________________________________ + ! fill the local arrays + do n=1, k + if (mapping(n)>0) then + mesh_check=mesh_check+1 + mesh%cavity_depth(mapping(n))=rbuff(n) + end if + end do + end do ! --> do nchunk=0, (mesh%elem2D-1)/chunk_size !_______________________________________________________________________ - ! read the chunk piece into the buffer --> done only by one CPU (mype==0) - ! k ... is actual chunk size, considers also possible change in chunk size - ! at the end i.e nod2d=130000, nchunk_0 = 100000, nchunk_1=30000 - k=min(chunk_size, mesh%nod2D-nchunk*chunk_size) - if (mype==0) then - do n=1, k - read(fileID,*) ibuff(n) - end do + if (mype==0) close(fileID) + + !_______________________________________________________________________ + if (mesh_check/=myDim_elem2D+eDim_elem2D+eXDim_elem2D) then + write(*,*) + print *, achar(27)//'[33m' + write(*,*) '____________________________________________________________________' + write(*,*) ' ERROR: while reading cavity_depth.out on mype=', mype + write(*,*) ' ',mesh_check, ' values have been read in according to partitioning' + write(*,*) ' it does not equal to myDim_elem2D+eDim_elem2D+eXDim_elem2D = ', myDim_elem2D+eDim_elem2D+eXDim_elem2D + write(*,*) '____________________________________________________________________' + print *, achar(27)//'[0m' end if + !___________________________________________________________________________ + ! cavity topography is defined on nodes + else !_______________________________________________________________________ - ! broadcast chunk buffer to all other CPUs (k...size of buffer) - call MPI_BCast(ibuff(1:k), k, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + ! allocate mesh%depth at nodes + allocate(mesh%cavity_depth(myDim_nod2D+eDim_nod2D)) !_______________________________________________________________________ - ! fill the local arrays - do n=1, k - if (mapping(n)>0) then - mesh_check=mesh_check+1 - mesh%cavity_depth(mapping(n))=ibuff(n) + ! fill mesh%cavity_depth from file with neighborhood information + ! 0 proc reads the data in chunks and distributes it between other procs + mesh_check=0 + do nchunk=0, (mesh%nod2D-1)/chunk_size + !___________________________________________________________________ + !create the mapping for the current chunk + mapping(1:chunk_size)=0 + do n=1, myDim_nod2D+eDim_nod2D + ! myList_nod2D(n) contains global vertice index of the local + ! vertice on that CPU + ! ipos is integer, (myList_nod2D(n)-1)/chunk_size always rounds + ! off to integer values + ! --> ipos is an index to which chunk a global vertice on a local CPU + ! belongs + ipos=(myList_nod2D(n)-1)/chunk_size + + ! if global vertice chunk index (ipos) lies within the actual chunk + if (ipos==nchunk) then + iofs=myList_nod2D(n)-nchunk*chunk_size + ! connect chunk reduced (iofs) global vertice index with local + ! vertice index n --> mapping(iofs)=n + mapping(iofs)=n + end if + end do + + !_______________________________________________________________________ + ! read the chunk piece into the buffer --> done only by one CPU (mype==0) + ! k ... is actual chunk size, considers also possible change in chunk size + ! at the end i.e nod2d=130000, nchunk_0 = 100000, nchunk_1=30000 + k=min(chunk_size, mesh%nod2D-nchunk*chunk_size) + if (mype==0) then + do n=1, k + read(fileID,*) rbuff(n) + end do end if - end do - end do ! --> do nchunk=0, (mesh%nod2D-1)/chunk_size - if (mype==0) close(fileID) - if (mesh_check/=myDim_nod2D+eDim_nod2D) then - write(*,*) - print *, achar(27)//'[33m' - write(*,*) '____________________________________________________________________' - write(*,*) ' ERROR: while reading cavity_depth.out on mype=', mype - write(*,*) ' ',mesh_check, ' values have been read in according to partitioning' - write(*,*) ' it does not equal to myDim_nod2D+eDim_nod2D = ', myDim_nod2D+eDim_nod2D - write(*,*) '____________________________________________________________________' - print *, achar(27)//'[0m' - end if + + !___________________________________________________________________ + ! broadcast chunk buffer to all other CPUs (k...size of buffer) + call MPI_BCast(rbuff(1:k), k, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) + + !___________________________________________________________________ + ! fill the local arrays + do n=1, k + if (mapping(n)>0) then + mesh_check=mesh_check+1 + mesh%cavity_depth(mapping(n))=rbuff(n) + end if + end do + end do ! --> do nchunk=0, (mesh%nod2D-1)/chunk_size + + !_______________________________________________________________________ + if (mype==0) close(fileID) + + !_______________________________________________________________________ + if (mesh_check/=myDim_nod2D+eDim_nod2D) then + write(*,*) + print *, achar(27)//'[33m' + write(*,*) '____________________________________________________________________' + write(*,*) ' ERROR: while reading cavity_depth.out on mype=', mype + write(*,*) ' ',mesh_check, ' values have been read in according to partitioning' + write(*,*) ' it does not equal to myDim_nod2D+eDim_nod2D = ', myDim_nod2D+eDim_nod2D + write(*,*) '____________________________________________________________________' + print *, achar(27)//'[0m' + end if + end if ! --> if (use_cavityonelem) then !___________________________________________________________________________ ! deallocate mapping and buffer array - deallocate(ibuff) + deallocate(rbuff) deallocate(mapping) !___________________________________________________________________________ From 9625297e72fb75b954d305ad5155b38c42ce9917 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Wed, 8 Sep 2021 14:52:16 +0200 Subject: [PATCH 362/909] working arrays related to the tracers have been moved to MOD_TRACER. Hence there was no need in O_MESH module anymore. --- src/MOD_TRACER.F90 | 20 ++++++++++++++++ src/fvom_init.F90 | 4 ---- src/gen_bulk_formulae.F90 | 1 - src/gen_halo_exchange.F90 | 41 -------------------------------- src/gen_modules_partitioning.F90 | 1 - src/gen_support.F90 | 1 - src/ice_fct.F90 | 15 +++++------- src/ice_thermo_cpl.F90 | 2 -- src/io_mesh_info.F90 | 1 - src/oce_adv_tra_driver.F90 | 3 +-- src/oce_adv_tra_fct.F90 | 4 ++-- src/oce_adv_tra_hor.F90 | 5 ++-- src/oce_adv_tra_ver.F90 | 1 - src/oce_ale.F90 | 5 +--- src/oce_ale_tracer.F90 | 3 ++- src/oce_dyn.F90 | 1 - src/oce_modules.F90 | 35 --------------------------- src/oce_muscl_adv.F90 | 7 +++--- src/oce_setup_step.F90 | 26 ++++++++++---------- 19 files changed, 50 insertions(+), 126 deletions(-) diff --git a/src/MOD_TRACER.F90 b/src/MOD_TRACER.F90 index d26b835f8..67146ccfc 100644 --- a/src/MOD_TRACER.F90 +++ b/src/MOD_TRACER.F90 @@ -1,6 +1,7 @@ !========================================================== MODULE MOD_TRACER USE O_PARAM +USE, intrinsic :: ISO_FORTRAN_ENV IMPLICIT NONE SAVE @@ -14,6 +15,25 @@ MODULE MOD_TRACER real(kind=WP) :: tra_adv_pv = 1. ! a parameter to be used in horizontal advection (for QR4C it is the fraction of fourth-order contribution in the solution) integer :: ID END TYPE T_TRACER + +!auxuary arrays to work with tracers: +real(kind=WP), allocatable :: del_ttf(:,:) +real(kind=WP), allocatable :: del_ttf_advhoriz(:,:),del_ttf_advvert(:,:) +!_______________________________________________________________________________ +! in case ldiag_DVD=.true. --> calculate discrete variance decay (DVD) +real(kind=WP), allocatable :: tr_dvd_horiz(:,:,:), tr_dvd_vert(:,:,:) +! The fct part +real(kind=WP),allocatable,dimension(:,:) :: fct_LO ! Low-order solution +real(kind=WP),allocatable,dimension(:,:) :: adv_flux_hor ! Antidif. horiz. contrib. from edges / backup for iterafive fct scheme +real(kind=WP),allocatable,dimension(:,:) :: adv_flux_ver ! Antidif. vert. fluxes from nodes / backup for iterafive fct scheme + +real(kind=WP),allocatable,dimension(:,:) :: fct_ttf_max,fct_ttf_min +real(kind=WP),allocatable,dimension(:,:) :: fct_plus,fct_minus +! MUSCL type reconstruction +integer,allocatable,dimension(:) :: nn_num, nboundary_lay +integer,allocatable,dimension(:,:) :: nn_pos +integer,allocatable,dimension(:,:) :: edge_up_dn_tri +real(kind=WP),allocatable,dimension(:,:,:) :: edge_up_dn_grad end module MOD_TRACER !========================================================== diff --git a/src/fvom_init.F90 b/src/fvom_init.F90 index c5396bdf7..2ff677fc3 100755 --- a/src/fvom_init.F90 +++ b/src/fvom_init.F90 @@ -14,7 +14,6 @@ program MAIN use o_PARAM use MOD_MESH - use o_MESH use g_PARSUP use g_CONFIG use g_rotate_grid @@ -314,7 +313,6 @@ END SUBROUTINE test_tri_ini !> Finds edges. Creates 3 files: edgenum.out, edges.out, edge_tri.out SUBROUTINE find_edges_ini(mesh) USE MOD_MESH -USE o_MESH USE o_PARAM USE g_PARSUP USE g_CONFIG @@ -357,7 +355,6 @@ end subroutine elem_center DO q=1,q1 ne_num(elnodes(q))=ne_num(elnodes(q))+1 if (ne_num(elnodes(q)) > MAX_ADJACENT ) then - print *,'Parameter in o_MESH from ocean_modules.F90, too small.' print *,'Recompile with larger value for MAX_ADJACENT.' stop else @@ -1386,7 +1383,6 @@ subroutine stiff_mat_ini(mesh) num_ne(nod(j)) = num_ne(nod(j)) + 1 if (max(num_ne(nod(i)), num_ne(nod(j))) > MAX_ADJACENT ) then - print *,'Parameter in o_MESH from ocean_modules.F90, too small.' print *,'Recompile with larger value for MAX_ADJACENT.' stop else diff --git a/src/gen_bulk_formulae.F90 b/src/gen_bulk_formulae.F90 index 2704fd974..f87dc5748 100755 --- a/src/gen_bulk_formulae.F90 +++ b/src/gen_bulk_formulae.F90 @@ -325,7 +325,6 @@ subroutine cal_wind_drag_coeff ! Reviewed by ?? !-------------------------------------------------- - use o_mesh use i_arrays use g_forcing_arrays use g_parsup diff --git a/src/gen_halo_exchange.F90 b/src/gen_halo_exchange.F90 index af1d29b84..e15344cae 100755 --- a/src/gen_halo_exchange.F90 +++ b/src/gen_halo_exchange.F90 @@ -71,7 +71,6 @@ END SUBROUTINE exchange_nod2D_i !============================================================================= subroutine exchange_nod2D_i_begin(nod_array2D) - USE o_MESH USE g_PARSUP IMPLICIT NONE @@ -127,7 +126,6 @@ END SUBROUTINE exchange_nod2D ! ======================================================================== subroutine exchange_nod2D_begin(nod_array2D) - USE o_MESH USE g_PARSUP IMPLICIT NONE @@ -182,7 +180,6 @@ END SUBROUTINE exchange_nod2D_2fields ! ======================================================================== subroutine exchange_nod2D_2fields_begin(nod1_array2D, nod2_array2D) -USE o_MESH USE g_PARSUP IMPLICIT NONE @@ -246,7 +243,6 @@ END SUBROUTINE exchange_nod2D_3fields ! ======================================================================== subroutine exchange_nod2D_3fields_begin(nod1_array2D, nod2_array2D, nod3_array2D) -USE o_MESH USE g_PARSUP IMPLICIT NONE @@ -315,7 +311,6 @@ END SUBROUTINE exchange_nod3D ! ======================================================================== subroutine exchange_nod3D_begin(nod_array3D) -USE o_MESH USE g_PARSUP IMPLICIT NONE @@ -381,7 +376,6 @@ END SUBROUTINE exchange_nod3D_2fields ! ======================================================================== subroutine exchange_nod3D_2fields_begin(nod1_array3D,nod2_array3D) -USE o_MESH USE g_PARSUP IMPLICIT NONE @@ -444,7 +438,6 @@ subroutine exchange_nod3D_2fields_begin(nod1_array3D,nod2_array3D) END SUBROUTINE exchange_nod3D_2fields_begin ! ======================================================================== subroutine exchange_nod3D_n(nod_array3D) -USE o_MESH USE g_PARSUP IMPLICIT NONE @@ -460,7 +453,6 @@ END SUBROUTINE exchange_nod3D_n !================================================= subroutine exchange_nod3D_n_begin(nod_array3D) -USE o_MESH USE g_PARSUP IMPLICIT NONE @@ -548,7 +540,6 @@ END SUBROUTINE exchange_elem_end !nr Not used, no MPI datatype built (yet) ! !!$subroutine exchange_edge3D(edge_array3D) -!!$ use o_MESH !!$ use g_PARSUP !!$ implicit none !!$ @@ -618,7 +609,6 @@ END SUBROUTINE exchange_elem_end !========================================================================== !!$subroutine exchange_edge2D(edge_array2D) -!!$ use o_MESH !!$ use g_PARSUP !!$ implicit none !!$ @@ -660,7 +650,6 @@ subroutine exchange_elem3D(elem_array3D) END SUBROUTINE exchange_elem3D !=========================================== subroutine exchange_elem3D_begin(elem_array3D) -USE o_MESH USE g_PARSUP IMPLICIT NONE @@ -787,7 +776,6 @@ END SUBROUTINE exchange_elem3D_begin !============================================================================= subroutine exchange_elem3D_n(elem_array3D) -USE o_MESH USE g_PARSUP IMPLICIT NONE @@ -803,7 +791,6 @@ subroutine exchange_elem3D_n(elem_array3D) END SUBROUTINE exchange_elem3D_n !============================================================================= subroutine exchange_elem3D_n_begin(elem_array3D) -USE o_MESH USE g_PARSUP IMPLICIT NONE @@ -888,7 +875,6 @@ subroutine exchange_elem3D_n_begin(elem_array3D) END SUBROUTINE exchange_elem3D_n_begin !======================================================================== subroutine exchange_elem2D(elem_array2D) -USE o_MESH USE g_PARSUP IMPLICIT NONE @@ -905,7 +891,6 @@ subroutine exchange_elem2D(elem_array2D) END SUBROUTINE exchange_elem2D !======================================================================== subroutine exchange_elem2D_begin(elem_array2D) -USE o_MESH USE g_PARSUP IMPLICIT NONE @@ -972,7 +957,6 @@ END SUBROUTINE exchange_elem2D_begin ! ======================================================================== subroutine exchange_elem2D_i(elem_array2D) !Exchange with ALL(!) the neighbours -USE o_MESH USE g_PARSUP IMPLICIT NONE @@ -989,7 +973,6 @@ END SUBROUTINE exchange_elem2D_i !============================================================================= subroutine exchange_elem2D_i_begin(elem_array2D) !Exchange with ALL(!) the neighbours -USE o_MESH USE g_PARSUP IMPLICIT NONE @@ -1037,7 +1020,6 @@ END SUBROUTINE exchange_elem2D_i_begin subroutine broadcast_nod3D(arr3D, arr3Dglobal) ! Distribute the nodal information available on 0 PE to other PEs use g_PARSUP -USE o_MESH IMPLICIT NONE @@ -1102,7 +1084,6 @@ end subroutine broadcast_nod3D subroutine broadcast_nod2D(arr2D, arr2Dglobal) ! A 2D version of the previous routine use g_PARSUP -USE o_MESH IMPLICIT NONE real(real64) :: arr2D(:) @@ -1151,7 +1132,6 @@ end subroutine broadcast_nod2D subroutine broadcast_elem3D(arr3D, arr3Dglobal) ! Distribute the elemental information available on 0 PE to other PEs use g_PARSUP -USE o_MESH IMPLICIT NONE @@ -1217,7 +1197,6 @@ end subroutine broadcast_elem3D subroutine broadcast_elem2D(arr2D, arr2Dglobal) ! A 2D version of the previous routine use g_PARSUP -USE o_MESH IMPLICIT NONE integer :: i, n, nTS, sender, status(MPI_STATUS_SIZE) @@ -1272,7 +1251,6 @@ subroutine gather_nod3D(arr3D, arr3D_global) ! Use only with 3D arrays stored in (vertical, horizontal) way use g_PARSUP -USE o_MESH IMPLICIT NONE @@ -1335,7 +1313,6 @@ subroutine gather_real4_nod3D(arr3D, arr3D_global) ! Use only with 3D arrays stored in (vertical, horizontal) way use g_PARSUP -USE o_MESH IMPLICIT NONE @@ -1397,7 +1374,6 @@ subroutine gather_int2_nod3D(arr3D, arr3D_global) ! Use only with 3D arrays stored in (vertical, horizontal) way use g_PARSUP -USE o_MESH IMPLICIT NONE @@ -1455,7 +1431,6 @@ subroutine gather_nod2D(arr2D, arr2D_global) ! Make nodal information available to master PE use g_PARSUP -USE o_MESH IMPLICIT NONE @@ -1510,7 +1485,6 @@ subroutine gather_real4_nod2D(arr2D, arr2D_global) ! Make nodal information available to master PE use g_PARSUP -USE o_MESH IMPLICIT NONE @@ -1566,7 +1540,6 @@ subroutine gather_int2_nod2D(arr2D, arr2D_global) ! Make nodal information available to master PE use g_PARSUP -USE o_MESH IMPLICIT NONE @@ -1624,7 +1597,6 @@ subroutine gather_elem3D(arr3D, arr3D_global) ! Use only with 3D arrays stored in (vertical, horizontal) way use g_PARSUP -USE o_MESH IMPLICIT NONE @@ -1692,7 +1664,6 @@ subroutine gather_real4_elem3D(arr3D, arr3D_global) ! Use only with 3D arrays stored in (vertical, horizontal) way use g_PARSUP -USE o_MESH IMPLICIT NONE @@ -1761,7 +1732,6 @@ subroutine gather_int2_elem3D(arr3D, arr3D_global) ! Use only with 3D arrays stored in (vertical, horizontal) way use g_PARSUP -USE o_MESH IMPLICIT NONE @@ -1827,7 +1797,6 @@ subroutine gather_elem2D(arr2D, arr2D_global) ! Make element information available to master PE use g_PARSUP -USE o_MESH IMPLICIT NONE @@ -1887,7 +1856,6 @@ subroutine gather_real4_elem2D(arr2D, arr2D_global) ! Make element information available to master PE use g_PARSUP -USE o_MESH IMPLICIT NONE @@ -1947,7 +1915,6 @@ subroutine gather_int2_elem2D(arr2D, arr2D_global) ! Make element information available to master PE use g_PARSUP -USE o_MESH IMPLICIT NONE @@ -2010,7 +1977,6 @@ subroutine gather_real8to4_nod3D(arr3D, arr3D_global) ! Use only with 3D arrays stored in (vertical, horizontal) way use g_PARSUP -USE o_MESH IMPLICIT NONE @@ -2075,7 +2041,6 @@ subroutine gather_real8to4_nod2D(arr2D, arr2D_global) ! Make nodal information available to master PE use g_PARSUP -USE o_MESH IMPLICIT NONE @@ -2133,7 +2098,6 @@ subroutine gather_real8to4_elem3D(arr3D, arr3D_global) ! Use only with 3D arrays stored in (vertical, horizontal) way use g_PARSUP -USE o_MESH IMPLICIT NONE @@ -2195,7 +2159,6 @@ subroutine gather_real8to4_elem2D(arr2D, arr2D_global) ! Make element information available to master PE use g_PARSUP -USE o_MESH IMPLICIT NONE @@ -2253,7 +2216,6 @@ end subroutine gather_real8to4_elem2D subroutine gather_elem2D_i(arr2D, arr2D_global) ! Make element information available to master PE use g_PARSUP - use o_MESH IMPLICIT NONE integer :: n @@ -2290,7 +2252,6 @@ subroutine gather_nod2D_i(arr2D, arr2D_global) ! Make nodal information available to master PE use g_PARSUP -USE o_MESH IMPLICIT NONE @@ -2343,7 +2304,6 @@ end subroutine gather_nod2D_i subroutine gather_edg2D(arr2D, arr2Dglobal) ! A 2D version of the previous routine use g_PARSUP -USE o_MESH IMPLICIT NONE real(real64) :: arr2D(:) @@ -2384,7 +2344,6 @@ end subroutine gather_edg2D subroutine gather_edg2D_i(arr2D, arr2Dglobal) ! A 2D version of the previous routine use g_PARSUP -USE o_MESH IMPLICIT NONE integer :: arr2D(:) diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index 770229964..a348faaaa 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -486,7 +486,6 @@ end subroutine set_par_support !=================================================================== subroutine init_gatherLists - use o_MESH implicit none integer :: n2D, e2D, sum_loc_elem2D diff --git a/src/gen_support.F90 b/src/gen_support.F90 index 114a4db92..ffddaa677 100644 --- a/src/gen_support.F90 +++ b/src/gen_support.F90 @@ -260,7 +260,6 @@ end subroutine smooth_elem3D !-------------------------------------------------------------------------------------------- ! subroutine integrate_nod_2D(data, int2D, mesh) - use o_MESH use g_PARSUP use g_comm_auto diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index fb91b370c..3de0e1881 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -181,10 +181,9 @@ subroutine ice_solve_low_order(mesh) ! We add diffusive contribution to the rhs. The diffusion operator ! is implemented as the difference between the consistent and lumped mass ! matrices acting on the field from the previous time step. The consistent - ! mass matrix on the lhs is replaced with the lumped one. - + ! mass matrix on the lhs is replaced with the lumped one. use MOD_MESH - use o_MESH + use MOD_TRACER use i_ARRAYS use i_PARAM use g_PARSUP @@ -237,9 +236,8 @@ end subroutine ice_solve_low_order ! !_______________________________________________________________________________ subroutine ice_solve_high_order(mesh) - use MOD_MESH - use O_MESH + use MOD_TRACER use i_ARRAYS use g_PARSUP use o_PARAM @@ -326,9 +324,8 @@ subroutine ice_fem_fct(tr_array_id, mesh) ! Int. J. Numer. Meth. Fluids, 7 (1987), 1093--1109) as described by Kuzmin and ! Turek. (kuzmin@math.uni-dortmund.de) ! - use MOD_MESH - use O_MESH + use MOD_TRACER use i_arrays use i_param use o_PARAM @@ -634,7 +631,7 @@ end subroutine ice_fem_fct SUBROUTINE ice_mass_matrix_fill(mesh) ! Used in ice_fct inherited from FESOM use MOD_MESH - use O_MESH + use MOD_TRACER use i_PARAM use i_ARRAYS use g_PARSUP @@ -803,7 +800,7 @@ end subroutine ice_TG_rhs_div !_______________________________________________________________________________ subroutine ice_update_for_div(mesh) use MOD_MESH - use O_MESH + use MOD_TRACER use i_Arrays use i_PARAM use g_PARSUP diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index 8995e3b9e..87b77bde0 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -22,9 +22,7 @@ subroutine thermodynamics(mesh) #endif use g_parsup, only: myDim_nod2D, eDim_nod2D #ifdef use_cavity - use o_mesh, only: coord_nod2D, ulevels_nod2D #else - use o_mesh, only: coord_nod2D #endif !---- variables from ice_modules.F90 diff --git a/src/io_mesh_info.F90 b/src/io_mesh_info.F90 index 7378332cc..c1f74c08e 100644 --- a/src/io_mesh_info.F90 +++ b/src/io_mesh_info.F90 @@ -421,5 +421,4 @@ subroutine my_close(ncid) call MPI_BCast(status, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) if (status .ne. nf_noerr) call handle_err(status) end subroutine my_close - end module io_mesh_info diff --git a/src/oce_adv_tra_driver.F90 b/src/oce_adv_tra_driver.F90 index 988b6a090..0a0acca5c 100644 --- a/src/oce_adv_tra_driver.F90 +++ b/src/oce_adv_tra_driver.F90 @@ -40,7 +40,7 @@ subroutine oce_tra_adv_flux2dtracer(dttf_h, dttf_v, flux_h, flux_v, mesh, use_lo !=============================================================================== subroutine do_oce_adv_tra(ttf, ttfAB, vel, w, wi, we, do_Xmoment, dttf_h, dttf_v, opth, optv, mesh) use MOD_MESH - use O_MESH + use MOD_TRACER use o_ARRAYS use o_PARAM use g_PARSUP @@ -200,7 +200,6 @@ end subroutine do_oce_adv_tra !=============================================================================== subroutine oce_tra_adv_flux2dtracer(dttf_h, dttf_v, flux_h, flux_v, mesh, use_lo, ttf, lo) use MOD_MESH - use O_MESH use o_ARRAYS use o_PARAM use g_PARSUP diff --git a/src/oce_adv_tra_fct.F90 b/src/oce_adv_tra_fct.F90 index 62db9a7d3..e5e2525f8 100644 --- a/src/oce_adv_tra_fct.F90 +++ b/src/oce_adv_tra_fct.F90 @@ -24,7 +24,7 @@ subroutine oce_tra_adv_fct(dttf_h, dttf_v, ttf, lo, adf_h, adf_v, mesh) !=============================================================================== subroutine oce_adv_tra_fct_init(mesh) use MOD_MESH - use O_MESH + use MOD_TRACER use o_ARRAYS use o_PARAM use g_PARSUP @@ -63,7 +63,7 @@ subroutine oce_tra_adv_fct(dttf_h, dttf_v, ttf, lo, adf_h, adf_v, mesh) ! HO ==High-order (3rd/4th order gradient reconstruction method) ! Adds limited fluxes to the LO solution use MOD_MESH - use O_MESH + use MOD_TRACER use o_ARRAYS use o_PARAM use g_PARSUP diff --git a/src/oce_adv_tra_hor.F90 b/src/oce_adv_tra_hor.F90 index 883c489a5..3243e4013 100644 --- a/src/oce_adv_tra_hor.F90 +++ b/src/oce_adv_tra_hor.F90 @@ -56,7 +56,6 @@ subroutine adv_tra_hor_mfct(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zero !=============================================================================== subroutine adv_tra_hor_upw1(ttf, vel, do_Xmoment, mesh, flux, init_zero) use MOD_MESH - use O_MESH use o_ARRAYS use o_PARAM use g_PARSUP @@ -214,7 +213,7 @@ end subroutine adv_tra_hor_upw1 !=============================================================================== subroutine adv_tra_hor_muscl(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zero) use MOD_MESH - use O_MESH + use MOD_TRACER use o_ARRAYS use o_PARAM use g_PARSUP @@ -484,7 +483,7 @@ end subroutine adv_tra_hor_muscl !=============================================================================== subroutine adv_tra_hor_mfct(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zero) use MOD_MESH - use O_MESH + use MOD_TRACER use o_ARRAYS use o_PARAM use g_PARSUP diff --git a/src/oce_adv_tra_ver.F90 b/src/oce_adv_tra_ver.F90 index 0dffb12ab..7d971a85d 100644 --- a/src/oce_adv_tra_ver.F90 +++ b/src/oce_adv_tra_ver.F90 @@ -82,7 +82,6 @@ subroutine adv_tra_ver_cdiff(ttf, w, do_Xmoment, mesh, flux, init_zero) !=============================================================================== subroutine adv_tra_vert_impl(ttf, w, mesh) use MOD_MESH - use O_MESH use o_PARAM use o_ARRAYS use i_ARRAYS diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 0cb72704c..91ee25ab7 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -566,7 +566,6 @@ subroutine init_thickness_ale(mesh) use g_config,only: dt, which_ale use o_PARAM use MOD_MESH - use O_MESH use g_PARSUP use o_ARRAYS implicit none @@ -811,7 +810,6 @@ end subroutine init_thickness_ale subroutine update_thickness_ale(mesh) use o_PARAM use MOD_MESH - use O_MESH use g_PARSUP use o_ARRAYS use g_config,only: which_ale,lzstar_lev,min_hnode @@ -1393,7 +1391,7 @@ subroutine update_stiff_mat_ale(mesh) use g_config,only: dt use o_PARAM use MOD_MESH - use O_MESH + use MOD_TRACER use g_PARSUP use o_ARRAYS ! @@ -1713,7 +1711,6 @@ end subroutine compute_hbar_ale subroutine vert_vel_ale(mesh) use g_config,only: dt, which_ALE, min_hnode, lzstar_lev, flag_warn_cflz use MOD_MESH - use O_MESH use o_ARRAYS use o_PARAM use g_PARSUP diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 872d71fcc..cd6736468 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -840,6 +840,7 @@ subroutine diff_ver_part_redi_expl(mesh) use o_ARRAYS use g_PARSUP use MOD_MESH + use MOD_TRACER USE o_param use g_config use g_comm_auto @@ -914,6 +915,7 @@ subroutine diff_part_hor_redi(mesh) use o_ARRAYS use g_PARSUP use MOD_MESH + use MOD_TRACER use o_param use g_config IMPLICIT NONE @@ -1061,7 +1063,6 @@ SUBROUTINE diff_part_bh(ttf, mesh) use o_ARRAYS use g_PARSUP use MOD_MESH - use O_MESH use o_param use g_config use g_comm_auto diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 6bd7225e9..317041029 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -733,7 +733,6 @@ end subroutine visc_filt_bilapl ! Quadratic in velocity term can be introduced if needed. SUBROUTINE visc_filt_bidiff(mesh) USE MOD_MESH - USE o_MESH USE o_ARRAYS USE o_PARAM USE g_PARSUP diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index 34d7ba1ce..0a898244b 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -189,33 +189,6 @@ MODULE o_PARAM use_kpp_nonlclflx END MODULE o_PARAM -!========================================================== - -!========================================================== -MODULE o_MESH -USE o_PARAM -USE, intrinsic :: ISO_FORTRAN_ENV -! All variables used to keep the mesh structure + -! auxiliary variables involved in implementation -! of open boundaries and advection schemes -! -! The fct part -real(kind=WP),allocatable,dimension(:,:) :: fct_LO ! Low-order solution -real(kind=WP),allocatable,dimension(:,:) :: adv_flux_hor ! Antidif. horiz. contrib. from edges / backup for iterafive fct scheme -real(kind=WP),allocatable,dimension(:,:) :: adv_flux_ver ! Antidif. vert. fluxes from nodes / backup for iterafive fct scheme - -real(kind=WP),allocatable,dimension(:,:) :: fct_ttf_max,fct_ttf_min -real(kind=WP),allocatable,dimension(:,:) :: fct_plus,fct_minus -! Quadratic reconstruction part -integer,allocatable,dimension(:) :: nn_num, nboundary_lay -real(kind=WP),allocatable,dimension(:,:,:) :: quad_int_mat, quad_int_coef -integer,allocatable,dimension(:,:) :: nn_pos -! MUSCL type reconstruction -integer,allocatable,dimension(:,:) :: edge_up_dn_tri -real(kind=WP),allocatable,dimension(:,:,:) :: edge_up_dn_grad -end module o_MESH -!========================================================== - !========================================================== MODULE o_ARRAYS USE o_PARAM @@ -237,9 +210,6 @@ MODULE o_ARRAYS REAL(kind=WP), ALLOCATABLE :: stress_atmoce_y(:) real(kind=WP), allocatable :: heat_flux(:), Tsurf(:) real(kind=WP), allocatable :: heat_flux_in(:) !to keep the unmodified (by SW penetration etc.) heat flux -real(kind=WP), allocatable :: del_ttf(:,:) -real(kind=WP), allocatable :: del_ttf_advhoriz(:,:),del_ttf_advvert(:,:) !!PS ,del_ttf_diff(:,:) - real(kind=WP), allocatable :: water_flux(:), Ssurf(:) real(kind=WP), allocatable :: virtual_salt(:), relax_salt(:) real(kind=WP), allocatable :: Tclim(:,:), Sclim(:,:) @@ -339,10 +309,5 @@ MODULE o_ARRAYS real(kind=WP), target, allocatable :: fer_c(:), fer_scal(:), fer_K(:,:), fer_gamma(:,:,:) real(kind=WP), allocatable :: ice_rejected_salt(:) - -!_______________________________________________________________________________ -! in case ldiag_DVD=.true. --> calculate discrete variance decay (DVD) -real(kind=WP), allocatable :: tr_dvd_horiz(:,:,:),tr_dvd_vert(:,:,:) - END MODULE o_ARRAYS !========================================================== diff --git a/src/oce_muscl_adv.F90 b/src/oce_muscl_adv.F90 index c94085117..e3ca565ea 100755 --- a/src/oce_muscl_adv.F90 +++ b/src/oce_muscl_adv.F90 @@ -26,7 +26,7 @@ subroutine find_up_downwind_triangles(mesh) ! adv_tracer_muscl subroutine muscl_adv_init(mesh) use MOD_MESH - use O_MESH + use MOD_TRACER use o_ARRAYS use o_PARAM use g_PARSUP @@ -123,7 +123,7 @@ end SUBROUTINE muscl_adv_init !_______________________________________________________________________________ SUBROUTINE find_up_downwind_triangles(mesh) USE MOD_MESH -USE O_MESH +USE MOD_TRACER USE o_ARRAYS USE o_PARAM USE g_PARSUP @@ -283,11 +283,10 @@ end SUBROUTINE find_up_downwind_triangles ! !_______________________________________________________________________________ SUBROUTINE fill_up_dn_grad(mesh) - ! ttx, tty elemental gradient of tracer USE o_PARAM USE MOD_MESH -USE O_MESH +USE MOD_TRACER USE o_ARRAYS USE g_PARSUP IMPLICIT NONE diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 061f0a05e..c9d157a30 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -207,6 +207,7 @@ end subroutine ocean_setup SUBROUTINE tracer_init(tracers, mesh) USE MOD_MESH USE MOD_TRACER +USE DIAGNOSTICS, only: ldiag_DVD USE g_PARSUP IMPLICIT NONE integer :: elem_size, node_size @@ -238,6 +239,18 @@ SUBROUTINE tracer_init(tracers, mesh) tracers(n)%valuesAB = 0. tracers(n)%ID = n end do + +allocate(del_ttf(nl-1,node_size)) +allocate(del_ttf_advhoriz(nl-1,node_size),del_ttf_advvert(nl-1,node_size)) +del_ttf = 0.0_WP +del_ttf_advhoriz = 0.0_WP +del_ttf_advvert = 0.0_WP + +if (ldiag_DVD) then + allocate(tr_dvd_horiz(nl-1,node_size,2),tr_dvd_vert(nl-1,node_size,2)) + tr_dvd_horiz = 0.0_WP + tr_dvd_vert = 0.0_WP +end if END SUBROUTINE tracer_init ! ! @@ -288,19 +301,6 @@ SUBROUTINE arrays_init(mesh) allocate(Wvel(nl, node_size), hpressure(nl,node_size)) allocate(Wvel_e(nl, node_size), Wvel_i(nl, node_size)) allocate(CFL_z(nl, node_size)) ! vertical CFL criteria - -allocate(del_ttf(nl-1,node_size)) -allocate(del_ttf_advhoriz(nl-1,node_size),del_ttf_advvert(nl-1,node_size)) -del_ttf = 0.0_WP -del_ttf_advhoriz = 0.0_WP -del_ttf_advvert = 0.0_WP -!!PS allocate(del_ttf_diff(nl-1,node_size)) -if (ldiag_DVD) then - allocate(tr_dvd_horiz(nl-1,node_size,2),tr_dvd_vert(nl-1,node_size,2)) - tr_dvd_horiz = 0.0_WP - tr_dvd_vert = 0.0_WP -end if - allocate(bvfreq(nl,node_size),mixlay_dep(node_size),bv_ref(node_size)) ! ================ ! Ocean forcing arrays From 92f2a9a4491b6ccb884da9b729bda349ca4b9d91 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Thu, 9 Sep 2021 10:07:39 +0200 Subject: [PATCH 363/909] removed do_Xmoment ifrom the tracer advection part. DVD diagnostics wont work now! we need to do something about it in the future. maybe just to introduce an another tharer of t_tracer type with **do_Xmoment? --- src/gen_modules_diag.F90 | 6 ++- src/oce_adv_tra_driver.F90 | 26 ++++++------- src/oce_adv_tra_hor.F90 | 78 ++++++++++++++++++-------------------- src/oce_adv_tra_ver.F90 | 41 ++++++++------------ src/oce_ale_tracer.F90 | 2 +- 5 files changed, 69 insertions(+), 84 deletions(-) diff --git a/src/gen_modules_diag.F90 b/src/gen_modules_diag.F90 index ef3b50874..4d1219c0e 100755 --- a/src/gen_modules_diag.F90 +++ b/src/gen_modules_diag.F90 @@ -709,7 +709,8 @@ subroutine compute_diag_dvd_2ndmoment_burchard_etal_2008(tracer, mesh) ! numerically induced mixing in ocean models ... del_ttf_advhoriz = 0.0_WP del_ttf_advvert = 0.0_WP - call do_oce_adv_tra(tr_sqr, trAB_sqr, UV, wvel, wvel_i, wvel_e, 1, del_ttf_advhoriz, del_ttf_advvert, tra_adv_ph, tra_adv_pv, mesh) +! maybe just to introduce an another tharer of t_tracer type with **do_Xmoment? +! call do_oce_adv_tra(tr_sqr, trAB_sqr, UV, wvel, wvel_i, wvel_e, 1, del_ttf_advhoriz, del_ttf_advvert, tra_adv_ph, tra_adv_pv, mesh) !___________________________________________________________________________ ! add target second moment to DVD do node = 1,mydim_nod2D @@ -758,7 +759,8 @@ subroutine compute_diag_dvd_2ndmoment_klingbeil_etal_2014(tracer, mesh) ! numerically induced mixing in ocean models ... del_ttf_advhoriz = 0.0_WP del_ttf_advvert = 0.0_WP - call do_oce_adv_tra(tracer%values, tracer%valuesAB(:,:), UV, wvel, wvel_i, wvel_e, 2, del_ttf_advhoriz, del_ttf_advvert, tra_adv_ph, tra_adv_pv, mesh) +! maybe just to introduce an another tharer of t_tracer type with **do_Xmoment? +! call do_oce_adv_tra(tracer%values, tracer%valuesAB(:,:), UV, wvel, wvel_i, wvel_e, 2, del_ttf_advhoriz, del_ttf_advvert, tra_adv_ph, tra_adv_pv, mesh) !___________________________________________________________________________ ! add target second moment to DVD do node = 1,mydim_nod2D diff --git a/src/oce_adv_tra_driver.F90 b/src/oce_adv_tra_driver.F90 index 0a0acca5c..e094fad93 100644 --- a/src/oce_adv_tra_driver.F90 +++ b/src/oce_adv_tra_driver.F90 @@ -1,6 +1,6 @@ module oce_adv_tra_driver_interfaces interface - subroutine do_oce_adv_tra(ttf, ttfAB, vel, w, wi, we, do_Xmoment, dttf_h, dttf_v, opth, optv, mesh) + subroutine do_oce_adv_tra(ttf, ttfAB, vel, w, wi, we, dttf_h, dttf_v, opth, optv, mesh) use MOD_MESH use g_PARSUP type(t_mesh), intent(in), target :: mesh @@ -8,7 +8,6 @@ subroutine do_oce_adv_tra(ttf, ttfAB, vel, w, wi, we, do_Xmoment, dttf_h, dttf_v real(kind=WP), intent(in), target :: W(mesh%nl, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in), target :: WI(mesh%nl, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in), target :: WE(mesh%nl, myDim_nod2D+eDim_nod2D) - integer, intent(in) :: do_Xmoment !--> = [1,2] compute 1st & 2nd moment of tracer transport real(kind=WP), intent(in) :: ttf (mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in) :: ttfAB (mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(inout) :: dttf_h(mesh%nl-1, myDim_nod2D+eDim_nod2D) @@ -38,7 +37,7 @@ subroutine oce_tra_adv_flux2dtracer(dttf_h, dttf_v, flux_h, flux_v, mesh, use_lo ! ! !=============================================================================== -subroutine do_oce_adv_tra(ttf, ttfAB, vel, w, wi, we, do_Xmoment, dttf_h, dttf_v, opth, optv, mesh) +subroutine do_oce_adv_tra(ttf, ttfAB, vel, w, wi, we, dttf_h, dttf_v, opth, optv, mesh) use MOD_MESH use MOD_TRACER use o_ARRAYS @@ -56,7 +55,6 @@ subroutine do_oce_adv_tra(ttf, ttfAB, vel, w, wi, we, do_Xmoment, dttf_h, dttf_v real(kind=WP), intent(in), target :: W(mesh%nl, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in), target :: WI(mesh%nl, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in), target :: WE(mesh%nl, myDim_nod2D+eDim_nod2D) - integer, intent(in) :: do_Xmoment !--> = [1,2] compute 1st & 2nd moment of tracer transport real(kind=WP), intent(in) :: ttf (mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in) :: ttfAB(mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(inout) :: dttf_h(mesh%nl-1, myDim_nod2D+eDim_nod2D) @@ -80,7 +78,7 @@ subroutine do_oce_adv_tra(ttf, ttfAB, vel, w, wi, we, do_Xmoment, dttf_h, dttf_v ! compute the low order upwind horizontal flux ! init_zero=.true. : zero the horizontal flux before computation ! init_zero=.false. : input flux will be substracted - call adv_tra_hor_upw1(ttf, vel, do_Xmoment, mesh, adv_flux_hor, init_zero=.true.) + call adv_tra_hor_upw1(ttf, vel, mesh, adv_flux_hor, init_zero=.true.) ! update the LO solution for horizontal contribution fct_LO=0.0_WP @@ -109,7 +107,7 @@ subroutine do_oce_adv_tra(ttf, ttfAB, vel, w, wi, we, do_Xmoment, dttf_h, dttf_v ! compute the low order upwind vertical flux (explicit part only) ! zero the input/output flux before computation - call adv_tra_ver_upw1(ttf, we, do_Xmoment, mesh, adv_flux_ver, init_zero=.true.) + call adv_tra_ver_upw1(ttf, we, mesh, adv_flux_ver, init_zero=.true.) ! update the LO solution for vertical contribution do n=1, myDim_nod2D @@ -128,7 +126,7 @@ subroutine do_oce_adv_tra(ttf, ttfAB, vel, w, wi, we, do_Xmoment, dttf_h, dttf_v ! zero the input/output flux before computation ! --> compute here low order part of vertical anti diffusive fluxes, ! has to be done on the full vertical velocity w - call adv_tra_ver_upw1(ttf, w, do_Xmoment, mesh, adv_flux_ver, init_zero=.true.) + call adv_tra_ver_upw1(ttf, w, mesh, adv_flux_ver, init_zero=.true.) end if call exchange_nod(fct_LO) @@ -142,11 +140,11 @@ subroutine do_oce_adv_tra(ttf, ttfAB, vel, w, wi, we, do_Xmoment, dttf_h, dttf_v SELECT CASE(trim(tra_adv_hor)) CASE('MUSCL') ! compute the untidiffusive horizontal flux (init_zero=.false.: input is the LO horizontal flux computed above) - call adv_tra_hor_muscl(ttfAB, uv, do_Xmoment, mesh, opth, adv_flux_hor, init_zero=do_zero_flux) + call adv_tra_hor_muscl(ttfAB, uv, mesh, opth, adv_flux_hor, init_zero=do_zero_flux) CASE('MFCT') - call adv_tra_hor_mfct(ttfAB, uv, do_Xmoment, mesh, opth, adv_flux_hor, init_zero=do_zero_flux) + call adv_tra_hor_mfct(ttfAB, uv, mesh, opth, adv_flux_hor, init_zero=do_zero_flux) CASE('UPW1') - call adv_tra_hor_upw1(ttfAB, uv, do_Xmoment, mesh, adv_flux_hor, init_zero=do_zero_flux) + call adv_tra_hor_upw1(ttfAB, uv, mesh, adv_flux_hor, init_zero=do_zero_flux) CASE DEFAULT !unknown if (mype==0) write(*,*) 'Unknown horizontal advection type ', trim(tra_adv_hor), '! Check your namelists!' call par_ex(1) @@ -163,13 +161,13 @@ subroutine do_oce_adv_tra(ttf, ttfAB, vel, w, wi, we, do_Xmoment, dttf_h, dttf_v SELECT CASE(trim(tra_adv_ver)) CASE('QR4C') ! compute the untidiffusive vertical flux (init_zero=.false.:input is the LO vertical flux computed above) - call adv_tra_ver_qr4c (ttfAB, pwvel, do_Xmoment, mesh, optv, adv_flux_ver, init_zero=do_zero_flux) + call adv_tra_ver_qr4c (ttfAB, pwvel, mesh, optv, adv_flux_ver, init_zero=do_zero_flux) CASE('CDIFF') - call adv_tra_ver_cdiff(ttfAB, pwvel, do_Xmoment, mesh, adv_flux_ver, init_zero=do_zero_flux) + call adv_tra_ver_cdiff(ttfAB, pwvel, mesh, adv_flux_ver, init_zero=do_zero_flux) CASE('PPM') - call adv_tra_vert_ppm (ttfAB, pwvel, do_Xmoment, mesh, adv_flux_ver, init_zero=do_zero_flux) + call adv_tra_vert_ppm (ttfAB, pwvel, mesh, adv_flux_ver, init_zero=do_zero_flux) CASE('UPW1') - call adv_tra_ver_upw1 (ttfAB, pwvel, do_Xmoment, mesh, adv_flux_ver, init_zero=do_zero_flux) + call adv_tra_ver_upw1 (ttfAB, pwvel, mesh, adv_flux_ver, init_zero=do_zero_flux) CASE DEFAULT !unknown if (mype==0) write(*,*) 'Unknown vertical advection type ', trim(tra_adv_ver), '! Check your namelists!' call par_ex(1) diff --git a/src/oce_adv_tra_hor.F90 b/src/oce_adv_tra_hor.F90 index 3243e4013..cdec31b1f 100644 --- a/src/oce_adv_tra_hor.F90 +++ b/src/oce_adv_tra_hor.F90 @@ -8,13 +8,12 @@ module oce_adv_tra_hor_interfaces ! IF init_zero=.TRUE. : flux will be set to zero before computation ! IF init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_hor_upw1(ttf, vel, do_Xmoment, mesh, flux, init_zero) + subroutine adv_tra_hor_upw1(ttf, vel, mesh, flux, init_zero) use MOD_MESH use g_PARSUP type(t_mesh), intent(in) , target :: mesh real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) - integer, intent(in) :: do_Xmoment real(kind=WP), intent(inout) :: flux(mesh%nl-1, myDim_edge2D) logical, optional :: init_zero end subroutine @@ -25,11 +24,10 @@ subroutine adv_tra_hor_upw1(ttf, vel, do_Xmoment, mesh, flux, init_zero) ! IF init_zero=.TRUE. : flux will be set to zero before computation ! IF init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_hor_muscl(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zero) + subroutine adv_tra_hor_muscl(ttf, vel, mesh, num_ord, flux, init_zero) use MOD_MESH use g_PARSUP type(t_mesh), intent(in), target :: mesh - integer, intent(in) :: do_Xmoment !--> = [1,2] compute 1st & 2nd moment of tracer transport real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) @@ -38,11 +36,10 @@ subroutine adv_tra_hor_muscl(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zer end subroutine ! a not stable version of MUSCL (reconstruction in the vicinity of bottom topography is not upwind) ! it runs with FCT option only - subroutine adv_tra_hor_mfct(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zero) + subroutine adv_tra_hor_mfct(ttf, vel, mesh, num_ord, flux, init_zero) use MOD_MESH use g_PARSUP type(t_mesh), intent(in), target :: mesh - integer, intent(in) :: do_Xmoment !--> = [1,2] compute 1st & 2nd moment of tracer transport real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) @@ -54,7 +51,7 @@ subroutine adv_tra_hor_mfct(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zero ! ! !=============================================================================== -subroutine adv_tra_hor_upw1(ttf, vel, do_Xmoment, mesh, flux, init_zero) +subroutine adv_tra_hor_upw1(ttf, vel, mesh, flux, init_zero) use MOD_MESH use o_ARRAYS use o_PARAM @@ -63,7 +60,6 @@ subroutine adv_tra_hor_upw1(ttf, vel, do_Xmoment, mesh, flux, init_zero) use g_comm_auto implicit none type(t_mesh), intent(in) , target :: mesh - integer, intent(in) :: do_Xmoment !--> = [1,2] compute 1st & 2nd moment of tracer transport real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) real(kind=WP), intent(inout) :: flux(mesh%nl-1, myDim_edge2D) @@ -140,8 +136,8 @@ subroutine adv_tra_hor_upw1(ttf, vel, do_Xmoment, mesh, flux, init_zero) !____________________________________________________________________ ! 1st. low order upwind solution flux(nz, edge)=-0.5_WP*( & - (ttf(nz, enodes(1))**do_Xmoment)*(vflux+abs(vflux))+ & - (ttf(nz, enodes(2))**do_Xmoment)*(vflux-abs(vflux)) & + ttf(nz, enodes(1))*(vflux+abs(vflux))+ & + ttf(nz, enodes(2))*(vflux-abs(vflux)) & )-flux(nz, edge) end do @@ -158,8 +154,8 @@ subroutine adv_tra_hor_upw1(ttf, vel, do_Xmoment, mesh, flux, init_zero) !___________________________________________________________ ! 1st. low order upwind solution flux(nz, edge)=-0.5_WP*( & - (ttf(nz, enodes(1))**do_Xmoment)*(vflux+abs(vflux))+ & - (ttf(nz, enodes(2))**do_Xmoment)*(vflux-abs(vflux)))-flux(nz, edge) + ttf(nz, enodes(1))*(vflux+abs(vflux))+ & + ttf(nz, enodes(2))*(vflux-abs(vflux)))-flux(nz, edge) end do end if @@ -176,8 +172,8 @@ subroutine adv_tra_hor_upw1(ttf, vel, do_Xmoment, mesh, flux, init_zero) +(VEL(2,nz,el(2))*deltaX2 - VEL(1,nz,el(2))*deltaY2)*helem(nz,el(2)) flux(nz, edge)=-0.5_WP*( & - (ttf(nz, enodes(1))**do_Xmoment)*(vflux+abs(vflux))+ & - (ttf(nz, enodes(2))**do_Xmoment)*(vflux-abs(vflux)))-flux(nz, edge) + ttf(nz, enodes(1))*(vflux+abs(vflux))+ & + ttf(nz, enodes(2))*(vflux-abs(vflux)))-flux(nz, edge) end do !_______________________________________________________________________ @@ -189,8 +185,8 @@ subroutine adv_tra_hor_upw1(ttf, vel, do_Xmoment, mesh, flux, init_zero) !____________________________________________________________________ ! 1st. low order upwind solution flux(nz, edge)=-0.5_WP*( & - (ttf(nz, enodes(1))**do_Xmoment)*(vflux+abs(vflux))+ & - (ttf(nz, enodes(2))**do_Xmoment)*(vflux-abs(vflux)) & + ttf(nz, enodes(1))*(vflux+abs(vflux))+ & + ttf(nz, enodes(2))*(vflux-abs(vflux)) & )-flux(nz, edge) end do @@ -203,15 +199,15 @@ subroutine adv_tra_hor_upw1(ttf, vel, do_Xmoment, mesh, flux, init_zero) !_______________________________________________________________ ! 1st. low order upwind solution flux(nz, edge)=-0.5_WP*( & - (ttf(nz, enodes(1))**do_Xmoment)*(vflux+abs(vflux))+ & - (ttf(nz, enodes(2))**do_Xmoment)*(vflux-abs(vflux)))-flux(nz, edge) + ttf(nz, enodes(1))*(vflux+abs(vflux))+ & + ttf(nz, enodes(2))*(vflux-abs(vflux)))-flux(nz, edge) end do end do end subroutine adv_tra_hor_upw1 ! ! !=============================================================================== -subroutine adv_tra_hor_muscl(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zero) +subroutine adv_tra_hor_muscl(ttf, vel, mesh, num_ord, flux, init_zero) use MOD_MESH use MOD_TRACER use o_ARRAYS @@ -221,7 +217,6 @@ subroutine adv_tra_hor_muscl(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zer use g_comm_auto implicit none type(t_mesh), intent(in), target :: mesh - integer, intent(in) :: do_Xmoment !--> = [1,2] compute 1st & 2nd moment of tracer transport real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) @@ -310,8 +305,8 @@ subroutine adv_tra_hor_muscl(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zer !____________________________________________________________________ ! volume flux across the segments vflux=(-VEL(2,nz,el(1))*deltaX1 + VEL(1,nz,el(1))*deltaY1)*helem(nz,el(1)) - cHO=(vflux+abs(vflux))*(Tmean1**do_Xmoment) + (vflux-abs(vflux))*(Tmean2**do_Xmoment) - flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*( 0.5_WP*(Tmean1+Tmean2))**do_Xmoment-flux(nz,edge) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) end do !_______________________________________________________________________ @@ -337,8 +332,8 @@ subroutine adv_tra_hor_muscl(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zer !_______________________________________________________________ ! volume flux across the segments vflux=(VEL(2,nz,el(2))*deltaX2 - VEL(1,nz,el(2))*deltaY2)*helem(nz,el(2)) - cHO=(vflux+abs(vflux))*(Tmean1**do_Xmoment) + (vflux-abs(vflux))*(Tmean2**do_Xmoment) - flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*( 0.5_WP*(Tmean1+Tmean2))**do_Xmoment-flux(nz,edge) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) end do end if @@ -425,8 +420,8 @@ subroutine adv_tra_hor_muscl(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zer !___________________________________________________________________ ! (1-num_ord) is done with 3rd order upwind - cHO=(vflux+abs(vflux))*(Tmean1**do_Xmoment) + (vflux-abs(vflux))*(Tmean2**do_Xmoment) - flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*( 0.5_WP*(Tmean1+Tmean2))**do_Xmoment-flux(nz,edge) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) end do !_______________________________________________________________________ @@ -449,8 +444,8 @@ subroutine adv_tra_hor_muscl(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zer !____________________________________________________________________ ! volume flux across the segments vflux=(-VEL(2,nz,el(1))*deltaX1 + VEL(1,nz,el(1))*deltaY1)*helem(nz,el(1)) - cHO=(vflux+abs(vflux))*(Tmean1**do_Xmoment) + (vflux-abs(vflux))*(Tmean2**do_Xmoment) - flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*( 0.5_WP*(Tmean1+Tmean2))**do_Xmoment-flux(nz,edge) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) end do !_______________________________________________________________________ @@ -473,15 +468,15 @@ subroutine adv_tra_hor_muscl(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zer !____________________________________________________________________ ! volume flux across the segments vflux=(VEL(2,nz,el(2))*deltaX2 - VEL(1,nz,el(2))*deltaY2)*helem(nz,el(2)) - cHO=(vflux+abs(vflux))*(Tmean1**do_Xmoment) + (vflux-abs(vflux))*(Tmean2**do_Xmoment) - flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*( 0.5_WP*(Tmean1+Tmean2))**do_Xmoment-flux(nz,edge) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) end do end do end subroutine adv_tra_hor_muscl ! ! !=============================================================================== -subroutine adv_tra_hor_mfct(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zero) +subroutine adv_tra_hor_mfct(ttf, vel, mesh, num_ord, flux, init_zero) use MOD_MESH use MOD_TRACER use o_ARRAYS @@ -491,7 +486,6 @@ subroutine adv_tra_hor_mfct(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zero use g_comm_auto implicit none type(t_mesh), intent(in), target :: mesh - integer, intent(in) :: do_Xmoment !--> = [1,2] compute 1st & 2nd moment of tracer transport real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) @@ -576,8 +570,8 @@ subroutine adv_tra_hor_mfct(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zero !____________________________________________________________________ ! volume flux across the segments vflux=(-VEL(2,nz,el(1))*deltaX1 + VEL(1,nz,el(1))*deltaY1)*helem(nz,el(1)) - cHO=(vflux+abs(vflux))*(Tmean1**do_Xmoment) + (vflux-abs(vflux))*(Tmean2**do_Xmoment) - flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*( 0.5_WP*(Tmean1+Tmean2))**do_Xmoment-flux(nz,edge) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) end do !_______________________________________________________________________ @@ -599,8 +593,8 @@ subroutine adv_tra_hor_mfct(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zero !___________________________________________________________________ ! volume flux across the segments vflux=(VEL(2,nz,el(2))*deltaX2 - VEL(1,nz,el(2))*deltaY2)*helem(nz,el(2)) - cHO=(vflux+abs(vflux))*(Tmean1**do_Xmoment) + (vflux-abs(vflux))*(Tmean2**do_Xmoment) - flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*( 0.5_WP*(Tmean1+Tmean2))**do_Xmoment-flux(nz,edge) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) end do end if @@ -683,8 +677,8 @@ subroutine adv_tra_hor_mfct(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zero !___________________________________________________________________ ! (1-num_ord) is done with 3rd order upwind - cHO=(vflux+abs(vflux))*(Tmean1**do_Xmoment) + (vflux-abs(vflux))*(Tmean2**do_Xmoment) - flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*( 0.5_WP*(Tmean1+Tmean2))**do_Xmoment-flux(nz,edge) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) end do !_______________________________________________________________________ @@ -704,8 +698,8 @@ subroutine adv_tra_hor_mfct(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zero !____________________________________________________________________ ! volume flux across the segments vflux=(-VEL(2,nz,el(1))*deltaX1 + VEL(1,nz,el(1))*deltaY1)*helem(nz,el(1)) - cHO=(vflux+abs(vflux))*(Tmean1**do_Xmoment) + (vflux-abs(vflux))*(Tmean2**do_Xmoment) - flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*( 0.5_WP*(Tmean1+Tmean2))**do_Xmoment-flux(nz,edge) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) end do !_______________________________________________________________________ @@ -725,8 +719,8 @@ subroutine adv_tra_hor_mfct(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zero !____________________________________________________________________ ! volume flux across the segments vflux=(VEL(2,nz,el(2))*deltaX2 - VEL(1,nz,el(2))*deltaY2)*helem(nz,el(2)) - cHO=(vflux+abs(vflux))*(Tmean1**do_Xmoment) + (vflux-abs(vflux))*(Tmean2**do_Xmoment) - flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*( 0.5_WP*(Tmean1+Tmean2))**do_Xmoment-flux(nz,edge) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) end do end do end subroutine adv_tra_hor_mfct diff --git a/src/oce_adv_tra_ver.F90 b/src/oce_adv_tra_ver.F90 index 7d971a85d..590b98d0f 100644 --- a/src/oce_adv_tra_ver.F90 +++ b/src/oce_adv_tra_ver.F90 @@ -15,11 +15,10 @@ subroutine adv_tra_vert_impl(ttf, w, mesh) ! IF init_zero=.TRUE. : flux will be set to zero before computation ! IF init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_ver_upw1(ttf, w, do_Xmoment, mesh, flux, init_zero) + subroutine adv_tra_ver_upw1(ttf, w, mesh, flux, init_zero) use MOD_MESH use g_PARSUP type(t_mesh), intent(in), target :: mesh - integer, intent(in) :: do_Xmoment !--> = [1,2] compute 1st & 2nd moment of tracer transport real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in) :: W (mesh%nl, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(inout) :: flux(mesh%nl, myDim_nod2D) @@ -31,11 +30,10 @@ subroutine adv_tra_ver_upw1(ttf, w, do_Xmoment, mesh, flux, init_zero) ! IF init_zero=.TRUE. : flux will be set to zero before computation ! IF init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_ver_qr4c(ttf, w, do_Xmoment, mesh, num_ord, flux, init_zero) + subroutine adv_tra_ver_qr4c(ttf, w, mesh, num_ord, flux, init_zero) use MOD_MESH use g_PARSUP type(t_mesh), intent(in), target :: mesh - integer, intent(in) :: do_Xmoment !--> = [1,2] compute 1st & 2nd moment of tracer transport real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in) :: W (mesh%nl, myDim_nod2D+eDim_nod2D) @@ -48,12 +46,11 @@ subroutine adv_tra_ver_qr4c(ttf, w, do_Xmoment, mesh, num_ord, flux, init_zero) ! IF init_zero=.TRUE. : flux will be set to zero before computation ! IF init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_vert_ppm(ttf, w, do_Xmoment, mesh, flux, init_zero) + subroutine adv_tra_vert_ppm(ttf, w, mesh, flux, init_zero) use MOD_MESH use g_PARSUP type(t_mesh), intent(in), target :: mesh integer :: n, nz, nl1 - integer, intent(in) :: do_Xmoment !--> = [1,2] compute 1st & 2nd moment of tracer transport real(kind=WP) :: tvert(mesh%nl), tv real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in) :: W (mesh%nl, myDim_nod2D+eDim_nod2D) @@ -65,12 +62,11 @@ subroutine adv_tra_vert_ppm(ttf, w, do_Xmoment, mesh, flux, init_zero) ! IF init_zero=.TRUE. : flux will be set to zero before computation ! IF init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_ver_cdiff(ttf, w, do_Xmoment, mesh, flux, init_zero) + subroutine adv_tra_ver_cdiff(ttf, w, mesh, flux, init_zero) use MOD_MESH use g_PARSUP type(t_mesh), intent(in), target :: mesh integer :: n, nz, nl1 - integer, intent(in) :: do_Xmoment !--> = [1,2] compute 1st & 2nd moment of tracer transport real(kind=WP) :: tvert(mesh%nl), tv real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in) :: W (mesh%nl, myDim_nod2D+eDim_nod2D) @@ -227,7 +223,7 @@ end subroutine adv_tra_vert_impl ! ! !=============================================================================== -subroutine adv_tra_ver_upw1(ttf, w, do_Xmoment, mesh, flux, init_zero) +subroutine adv_tra_ver_upw1(ttf, w, mesh, flux, init_zero) use g_config use MOD_MESH use o_ARRAYS @@ -236,7 +232,6 @@ subroutine adv_tra_ver_upw1(ttf, w, do_Xmoment, mesh, flux, init_zero) use g_forcing_arrays implicit none type(t_mesh), intent(in), target :: mesh - integer, intent(in) :: do_Xmoment !--> = [1,2] compute 1st & 2nd moment of tracer transport real(kind=WP) :: tvert(mesh%nl) integer :: n, nz, nzmax, nzmin real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) @@ -274,15 +269,15 @@ subroutine adv_tra_ver_upw1(ttf, w, do_Xmoment, mesh, flux, init_zero) ! vert. flux at remaining levels do nz=nzmin+1,nzmax-1 flux(nz,n)=-0.5*( & - (ttf(nz ,n)**do_Xmoment)*(W(nz,n)+abs(W(nz,n)))+ & - (ttf(nz-1,n)**do_Xmoment)*(W(nz,n)-abs(W(nz,n))))*area(nz,n)-flux(nz,n) + ttf(nz ,n)*(W(nz,n)+abs(W(nz,n)))+ & + ttf(nz-1,n)*(W(nz,n)-abs(W(nz,n))))*area(nz,n)-flux(nz,n) end do end do end subroutine adv_tra_ver_upw1 ! ! !=============================================================================== -subroutine adv_tra_ver_qr4c(ttf, w, do_Xmoment, mesh, num_ord, flux, init_zero) +subroutine adv_tra_ver_qr4c(ttf, w, mesh, num_ord, flux, init_zero) use g_config use MOD_MESH use o_ARRAYS @@ -291,7 +286,6 @@ subroutine adv_tra_ver_qr4c(ttf, w, do_Xmoment, mesh, num_ord, flux, init_zero) use g_forcing_arrays implicit none type(t_mesh), intent(in), target :: mesh - integer, intent(in) :: do_Xmoment !--> = [1,2] compute 1st & 2nd moment of tracer transport real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in) :: W (mesh%nl, myDim_nod2D+eDim_nod2D) @@ -348,16 +342,16 @@ subroutine adv_tra_ver_qr4c(ttf, w, do_Xmoment, mesh, num_ord, flux, init_zero) Tmean1=ttf(nz ,n)+(2*qc+qu)*(zbar_3d_n(nz,n)-Z_3d_n(nz ,n))/3.0_WP Tmean2=ttf(nz-1,n)+(2*qc+qd)*(zbar_3d_n(nz,n)-Z_3d_n(nz-1,n))/3.0_WP - Tmean =(W(nz,n)+abs(W(nz,n)))*(Tmean1**do_Xmoment)+(W(nz,n)-abs(W(nz,n)))*(Tmean2**do_Xmoment) + Tmean =(W(nz,n)+abs(W(nz,n)))*Tmean1+(W(nz,n)-abs(W(nz,n)))*Tmean2 ! flux(nz,n)=-0.5_WP*(num_ord*(Tmean1+Tmean2)*W(nz,n)+(1.0_WP-num_ord)*Tmean)*area(nz,n)-flux(nz,n) - flux(nz,n)=(-0.5_WP*(1.0_WP-num_ord)*Tmean - num_ord*((0.5_WP*(Tmean1+Tmean2))**do_Xmoment)*W(nz,n))*area(nz,n)-flux(nz,n) + flux(nz,n)=(-0.5_WP*(1.0_WP-num_ord)*Tmean - num_ord*(0.5_WP*(Tmean1+Tmean2))*W(nz,n))*area(nz,n)-flux(nz,n) end do end do end subroutine adv_tra_ver_qr4c ! ! !=============================================================================== -subroutine adv_tra_vert_ppm(ttf, w, do_Xmoment, mesh, flux, init_zero) +subroutine adv_tra_vert_ppm(ttf, w, mesh, flux, init_zero) use g_config use MOD_MESH use o_ARRAYS @@ -366,7 +360,6 @@ subroutine adv_tra_vert_ppm(ttf, w, do_Xmoment, mesh, flux, init_zero) use g_forcing_arrays implicit none type(t_mesh), intent(in) , target :: mesh - integer, intent(in) :: do_Xmoment !--> = [1,2] compute 1st & 2nd moment of tracer transport real(kind=WP), intent(in) :: ttf (mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in) :: W (mesh%nl, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(inout) :: flux(mesh%nl, myDim_nod2D) @@ -514,21 +507,21 @@ subroutine adv_tra_vert_ppm(ttf, w, do_Xmoment, mesh, flux, init_zero) if (W(nz,n)>0._WP) then x=min(W(nz,n)*dt/dzj, 1._WP) tvert(nz )=(-aL-0.5_WP*x*(aR-aL+(1._WP-2._WP/3._WP*x)*aj)) - tvert(nz )=( tvert(nz)**do_Xmoment ) ! compute 2nd moment for DVD + tvert(nz )=tvert(nz) ! compute 2nd moment for DVD tvert(nz )=tvert(nz)*area(nz,n)*W(nz,n) end if if (W(nz+1,n)<0._WP) then x=min(-W(nz+1,n)*dt/dzj, 1._WP) tvert(nz+1)=(-aR+0.5_WP*x*(aR-aL-(1._WP-2._WP/3._WP*x)*aj)) - tvert(nz+1)=( tvert(nz+1)**do_Xmoment ) ! compute 2nd moment for DVD + tvert(nz+1)=tvert(nz+1) ! compute 2nd moment for DVD tvert(nz+1)=tvert(nz+1)*area(nz+1,n)*W(nz+1,n) end if end do !_______________________________________________________________________ ! Surface flux - tvert(nzmin)= -( tv(nzmin)**do_Xmoment )*W(nzmin,n)*area(nzmin,n) + tvert(nzmin)= -tv(nzmin)*W(nzmin,n)*area(nzmin,n) ! Zero bottom flux tvert(nzmax)=0.0_WP flux(nzmin:nzmax, n)=tvert(nzmin:nzmax)-flux(nzmin:nzmax, n) @@ -538,7 +531,7 @@ end subroutine adv_tra_vert_ppm ! ! !=============================================================================== -subroutine adv_tra_ver_cdiff(ttf, w, do_Xmoment, mesh, flux, init_zero) +subroutine adv_tra_ver_cdiff(ttf, w, mesh, flux, init_zero) use g_config use MOD_MESH use o_ARRAYS @@ -547,7 +540,6 @@ subroutine adv_tra_ver_cdiff(ttf, w, do_Xmoment, mesh, flux, init_zero) use g_forcing_arrays implicit none type(t_mesh), intent(in), target :: mesh - integer, intent(in) :: do_Xmoment !--> = [1,2] compute 1st & 2nd moment of tracer transport real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in) :: W (mesh%nl, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(inout) :: flux(mesh%nl, myDim_nod2D) @@ -569,7 +561,7 @@ subroutine adv_tra_ver_cdiff(ttf, w, do_Xmoment, mesh, flux, init_zero) !_______________________________________________________________________ ! Surface flux - tvert(nzmin)= -W(nzmin,n)*(ttf(nzmin,n)**do_Xmoment)*area(nzmin,n) + tvert(nzmin)= -W(nzmin,n)*ttf(nzmin,n)*area(nzmin,n) !_______________________________________________________________________ ! Zero bottom flux @@ -579,7 +571,6 @@ subroutine adv_tra_ver_cdiff(ttf, w, do_Xmoment, mesh, flux, init_zero) ! Other levels do nz=nzmin+1, nzmax tv=0.5_WP*(ttf(nz-1,n)+ttf(nz,n)) - tv=tv**do_Xmoment tvert(nz)= -tv*W(nz,n)*area(nz,n) end do diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index cd6736468..9c91b9bca 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -215,7 +215,7 @@ subroutine adv_tracers_ale(tracer, mesh) ! here --> add horizontal advection part to del_ttf(nz,n) = del_ttf(nz,n) + ... del_ttf_advhoriz = 0.0_WP del_ttf_advvert = 0.0_WP - call do_oce_adv_tra(tracer%values, tracer%valuesAB, UV, wvel, wvel_i, wvel_e, 1, del_ttf_advhoriz, del_ttf_advvert, tra_adv_ph, tra_adv_pv, mesh) + call do_oce_adv_tra(tracer%values, tracer%valuesAB, UV, wvel, wvel_i, wvel_e, del_ttf_advhoriz, del_ttf_advvert, tra_adv_ph, tra_adv_pv, mesh) !___________________________________________________________________________ ! update array for total tracer flux del_ttf with the fluxes from horizontal ! and vertical advection From d760e823851a481aab9e7c3abda460518c220108 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Thu, 9 Sep 2021 10:44:28 +0200 Subject: [PATCH 364/909] a further step: the advection tracer stuff has been adopted for receieving t_tracer as INOUT argument --- src/oce_adv_tra_driver.F90 | 36 ++++++++++++++++++++---------------- src/oce_ale_tracer.F90 | 2 +- 2 files changed, 21 insertions(+), 17 deletions(-) diff --git a/src/oce_adv_tra_driver.F90 b/src/oce_adv_tra_driver.F90 index e094fad93..33afff994 100644 --- a/src/oce_adv_tra_driver.F90 +++ b/src/oce_adv_tra_driver.F90 @@ -1,18 +1,17 @@ module oce_adv_tra_driver_interfaces interface - subroutine do_oce_adv_tra(ttf, ttfAB, vel, w, wi, we, dttf_h, dttf_v, opth, optv, mesh) + subroutine do_oce_adv_tra(vel, w, wi, we, dttf_h, dttf_v, tracer, mesh) + use MOD_TRACER use MOD_MESH use g_PARSUP - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(inout), target :: tracer real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) real(kind=WP), intent(in), target :: W(mesh%nl, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in), target :: WI(mesh%nl, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in), target :: WE(mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: ttf (mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: ttfAB (mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(inout) :: dttf_h(mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(inout) :: dttf_v(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: opth, optv end subroutine end interface end module @@ -37,7 +36,7 @@ subroutine oce_tra_adv_flux2dtracer(dttf_h, dttf_v, flux_h, flux_v, mesh, use_lo ! ! !=============================================================================== -subroutine do_oce_adv_tra(ttf, ttfAB, vel, w, wi, we, dttf_h, dttf_v, opth, optv, mesh) +subroutine do_oce_adv_tra(vel, w, wi, we, dttf_h, dttf_v, tracer, mesh) use MOD_MESH use MOD_TRACER use o_ARRAYS @@ -50,17 +49,16 @@ subroutine do_oce_adv_tra(ttf, ttfAB, vel, w, wi, we, dttf_h, dttf_v, opth, optv use oce_adv_tra_fct_interfaces use oce_tra_adv_flux2dtracer_interface implicit none - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(inout), target :: tracer real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) real(kind=WP), intent(in), target :: W(mesh%nl, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in), target :: WI(mesh%nl, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in), target :: WE(mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: ttf (mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: ttfAB(mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(inout) :: dttf_h(mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(inout) :: dttf_v(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: opth, optv real(kind=WP), pointer, dimension (:,:) :: pwvel + real(kind=WP), pointer, dimension (:,:) :: ttf, ttfAB integer :: el(2), enodes(2), nz, n, e integer :: nl12, nu12, nl1, nl2, nu1, nu2, tr_num @@ -68,13 +66,19 @@ subroutine do_oce_adv_tra(ttf, ttfAB, vel, w, wi, we, dttf_h, dttf_v, opth, optv real(kind=WP) :: qc, qu, qd real(kind=WP) :: tvert(mesh%nl), tvert_e(mesh%nl), a, b, c, d, da, db, dg, vflux, Tupw1 real(kind=WP) :: Tmean, Tmean1, Tmean2, num_ord + real(kind=WP) :: opth, optv logical :: do_zero_flux #include "associate_mesh.h" + + ttf => tracer%values + ttfAB => tracer%valuesAB + opth = tracer%tra_adv_ph + optv = tracer%tra_adv_pv !___________________________________________________________________________ ! compute FCT horzontal and vertical low order solution as well as lw order ! part of antidiffusive flux - if (trim(tra_adv_lim)=='FCT') then + if (trim(tracer%tra_adv_lim)=='FCT') then ! compute the low order upwind horizontal flux ! init_zero=.true. : zero the horizontal flux before computation ! init_zero=.false. : input flux will be substracted @@ -133,11 +137,11 @@ subroutine do_oce_adv_tra(ttf, ttfAB, vel, w, wi, we, dttf_h, dttf_v, opth, optv end if do_zero_flux=.true. - if (trim(tra_adv_lim)=='FCT') do_zero_flux=.false. + if (trim(tracer%tra_adv_lim)=='FCT') do_zero_flux=.false. !___________________________________________________________________________ ! do horizontal tracer advection, in case of FCT high order solution - SELECT CASE(trim(tra_adv_hor)) + SELECT CASE(trim(tracer%tra_adv_hor)) CASE('MUSCL') ! compute the untidiffusive horizontal flux (init_zero=.false.: input is the LO horizontal flux computed above) call adv_tra_hor_muscl(ttfAB, uv, mesh, opth, adv_flux_hor, init_zero=do_zero_flux) @@ -150,7 +154,7 @@ subroutine do_oce_adv_tra(ttf, ttfAB, vel, w, wi, we, dttf_h, dttf_v, opth, optv call par_ex(1) END SELECT - if (trim(tra_adv_lim)=='FCT') then + if (trim(tracer%tra_adv_lim)=='FCT') then pwvel=>w else pwvel=>we @@ -158,7 +162,7 @@ subroutine do_oce_adv_tra(ttf, ttfAB, vel, w, wi, we, dttf_h, dttf_v, opth, optv !___________________________________________________________________________ ! do vertical tracer advection, in case of FCT high order solution - SELECT CASE(trim(tra_adv_ver)) + SELECT CASE(trim(tracer%tra_adv_ver)) CASE('QR4C') ! compute the untidiffusive vertical flux (init_zero=.false.:input is the LO vertical flux computed above) call adv_tra_ver_qr4c (ttfAB, pwvel, mesh, optv, adv_flux_ver, init_zero=do_zero_flux) @@ -184,7 +188,7 @@ subroutine do_oce_adv_tra(ttf, ttfAB, vel, w, wi, we, dttf_h, dttf_v, opth, optv ! write(*,*) '2:', minval(adv_flux_hor), maxval(adv_flux_hor), sum(adv_flux_hor) ! write(*,*) '3:', minval(adv_flux_ver), maxval(adv_flux_ver), sum(adv_flux_ver) !end if - if (trim(tra_adv_lim)=='FCT') then + if (trim(tracer%tra_adv_lim)=='FCT') then !if (mype==0) write(*,*) 'before:', sum(abs(adv_flux_ver)), sum(abs(adv_flux_hor)) call oce_tra_adv_fct(dttf_h, dttf_v, ttf, fct_LO, adv_flux_hor, adv_flux_ver, mesh) !if (mype==0) write(*,*) 'after:', sum(abs(adv_flux_ver)), sum(abs(adv_flux_hor)) diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 9c91b9bca..9f51d1f1f 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -215,7 +215,7 @@ subroutine adv_tracers_ale(tracer, mesh) ! here --> add horizontal advection part to del_ttf(nz,n) = del_ttf(nz,n) + ... del_ttf_advhoriz = 0.0_WP del_ttf_advvert = 0.0_WP - call do_oce_adv_tra(tracer%values, tracer%valuesAB, UV, wvel, wvel_i, wvel_e, del_ttf_advhoriz, del_ttf_advvert, tra_adv_ph, tra_adv_pv, mesh) + call do_oce_adv_tra(UV, wvel, wvel_i, wvel_e, del_ttf_advhoriz, del_ttf_advvert, tracer, mesh) !___________________________________________________________________________ ! update array for total tracer flux del_ttf with the fluxes from horizontal ! and vertical advection From 704ada193021b18d7a47e8f251dff09115cde07b Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Thu, 9 Sep 2021 11:01:51 +0200 Subject: [PATCH 365/909] Soufflet channel shall match the regresison tests now (but for the wrong reason). we need to fix it later --- src/MOD_TRACER.F90 | 2 +- src/oce_ale_tracer.F90 | 9 +++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/src/MOD_TRACER.F90 b/src/MOD_TRACER.F90 index 67146ccfc..f1d095336 100644 --- a/src/MOD_TRACER.F90 +++ b/src/MOD_TRACER.F90 @@ -6,7 +6,7 @@ MODULE MOD_TRACER SAVE TYPE T_TRACER -real(kind=WP), allocatable, dimension(:,:) :: values, valuesAB !instant values & Adams-Bashfort interpolation +real(kind=WP), allocatable, dimension(:,:) :: values, valuesAB ! instant values & Adams-Bashfort interpolation logical :: smooth_bh_tra=.false. real(kind=WP) :: gamma0_tra, gamma1_tra, gamma2_tra logical :: i_vert_diff =.false. diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 9f51d1f1f..2f2528e21 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -142,7 +142,8 @@ subroutine solve_tracers_ale(tracers, mesh) ! relax to salt and temp climatology if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call relax_to_clim'//achar(27)//'[0m' - if ((toy_ocean) .AND. ((tr_num==1) .AND. (TRIM(which_toy)=="soufflet"))) then +! if ((toy_ocean) .AND. ((tr_num==1) .AND. (TRIM(which_toy)=="soufflet"))) then + if ((toy_ocean) .AND. ((TRIM(which_toy)=="soufflet"))) then call relax_zonal_temp(tracers(tr_num), mesh) else call relax_to_clim(tracers(tr_num), mesh) @@ -263,7 +264,7 @@ subroutine diff_tracers_ale(tracer, mesh) call diff_part_hor_redi(mesh) ! seems to be ~9% faster than diff_part_hor !___________________________________________________________________________ ! do vertical diffusion: explicite - if (.not. i_vert_diff) call diff_ver_part_expl_ale(tracer, mesh) + if (.not. tracer%i_vert_diff) call diff_ver_part_expl_ale(tracer, mesh) ! A projection of horizontal Redi diffussivity onto vertical. This par contains horizontal ! derivatives and has to be computed explicitly! if (Redi) call diff_ver_part_redi_expl(mesh) @@ -291,7 +292,7 @@ subroutine diff_tracers_ale(tracer, mesh) end do !___________________________________________________________________________ - if (i_vert_diff) then + if (tracer%i_vert_diff) then ! do vertical diffusion: implicite call diff_ver_part_impl_ale(tracer, mesh) @@ -299,7 +300,7 @@ subroutine diff_tracers_ale(tracer, mesh) !We DO not set del_ttf to zero because it will not be used in this timestep anymore !init_tracers will set it to zero for the next timestep !init_tracers will set it to zero for the next timestep - if (smooth_bh_tra) then + if (tracer%smooth_bh_tra) then call diff_part_bh(tracer, mesh) ! alpply biharmonic diffusion (implemented as filter) end if end subroutine diff_tracers_ale From e967c5ba95ad01b86774c9f57ad06fdff20089a0 Mon Sep 17 00:00:00 2001 From: dsidoren Date: Sun, 12 Sep 2021 19:07:19 +0200 Subject: [PATCH 366/909] Update oce_setup_step.F90 setting i_vert_diff for tracer part during initialisation (was forgotten) --- src/oce_setup_step.F90 | 1 + 1 file changed, 1 insertion(+) diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index c9d157a30..39ad69c9d 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -237,6 +237,7 @@ SUBROUTINE tracer_init(tracers, mesh) tracers(n)%gamma2_tra = gamma2_tra tracers(n)%values = 0. tracers(n)%valuesAB = 0. + tracers(n)%i_vert_diff = i_vert_diff tracers(n)%ID = n end do From 920e2cfc08340b4fcd20e993ee168fae5badf8dd Mon Sep 17 00:00:00 2001 From: dsidoren Date: Sun, 12 Sep 2021 19:35:12 +0200 Subject: [PATCH 367/909] Update oce_ale_tracer.F90 trying to make Soufflet channel pass the tests --- src/oce_ale_tracer.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 2f2528e21..8ed8199ee 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -144,7 +144,7 @@ subroutine solve_tracers_ale(tracers, mesh) if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call relax_to_clim'//achar(27)//'[0m' ! if ((toy_ocean) .AND. ((tr_num==1) .AND. (TRIM(which_toy)=="soufflet"))) then if ((toy_ocean) .AND. ((TRIM(which_toy)=="soufflet"))) then - call relax_zonal_temp(tracers(tr_num), mesh) + call relax_zonal_temp(tracers(1), mesh) else call relax_to_clim(tracers(tr_num), mesh) end if From 4e2f0a731d13588a1becc51d8995a31dbedeca68 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 13 Sep 2021 11:47:01 +0200 Subject: [PATCH 368/909] completing refactoring for traces. a new namelist appeared: namelist.tra. a part of namelist.oce has been moved inte there! --- config/namelist.oce | 50 +--------------------------- config/namelist.tra | 50 ++++++++++++++++++++++++++++ src/MOD_TRACER.F90 | 20 ++++++++++++ src/gen_ic3d.F90 | 9 +++-- src/gen_model_setup.F90 | 6 ++-- src/io_meandata.F90 | 8 ++--- src/oce_adv_tra_driver.F90 | 4 +-- src/oce_ale_tracer.F90 | 10 +++--- src/oce_modules.F90 | 18 ++-------- src/oce_setup_step.F90 | 67 ++++++++++++++++++++++++++++++-------- 10 files changed, 146 insertions(+), 96 deletions(-) create mode 100644 config/namelist.tra diff --git a/config/namelist.oce b/config/namelist.oce index af71c7741..a69770154 100644 --- a/config/namelist.oce +++ b/config/namelist.oce @@ -15,7 +15,7 @@ visc_option=5 ! 1=Harmonic Leith parameterization; ! 6=Biharmonic flow aware (viscosity depends on velocity Laplacian) ! 7=Biharmonic flow aware (viscosity depends on velocity differences) ! 8=Dynamic Backscatter -easy_bs_return= 1.5 ! coefficient for returned sub-gridscale energy, to be used with visc_option=5 (easy backscatter) +easy_bs_return= 1.5 ! coefficient for returned sub-gridscale energy, to be used with visc_option=5 (easy backscatter) A_ver= 1.e-4 ! Vertical viscosity, m^2/s scale_area=5.8e9 ! Visc. and diffus. are for an element with scale_area mom_adv=2 ! 1=vector CV, p1 vel, 2=sca. CV, 3=vector inv. @@ -43,51 +43,3 @@ mix_scheme='KPP' ! vertical mixing scheme: KPP, PP Ricr = 0.3 ! critical bulk Richardson Number concv = 1.6 ! constant for pure convection (eqn. 23) (Large 1.5-1.6; MOM default 1.8) / - -&oce_tra -use_momix = .true. ! switch on/off !Monin-Obukhov -> TB04 mixing -momix_lat = -50.0 ! latitidinal treshhold for TB04, =90 --> global -momix_kv = 0.01 ! PP/KPP, mixing coefficient within MO length -use_instabmix = .true. ! enhance convection in case of instable stratification -instabmix_kv = 0.1 -use_windmix = .false. ! enhance mixing trough wind only for PP mixing (for stability) -windmix_kv = 1.e-3 -windmix_nl = 2 - -smooth_bh_tra =.false. ! use biharmonic diffusion (filter implementation) for tracers -gamma0_tra = 0.0005 ! gammaX_tra are analogous to those in the dynamical part -gamma1_tra = 0.0125 -gamma2_tra = 0. - -diff_sh_limit=5.0e-3 ! for KPP, max diff due to shear instability -Kv0_const=.true. -double_diffusion=.false. ! for KPP,dd switch -K_ver=1.0e-5 -K_hor=3000. -surf_relax_T=0.0 -surf_relax_S=1.929e-06 ! 50m/300days 6.43e-07! m/s 10./(180.*86400.) -balance_salt_water =.true. ! balance virtual-salt or freshwater flux or not -clim_relax=0.0 ! 1/s, geometrical information has to be supplied -ref_sss_local=.true. -ref_sss=34. -i_vert_diff =.true. ! true -tra_adv_hor ='MFCT' !'MUSCL', 'UPW1' -tra_adv_ver ='QR4C' !'QR4C', 'CDIFF', 'UPW1' -tra_adv_lim ='FCT' !'FCT', 'NONE' (default) -tra_adv_ph = 1. ! a parameter to be used in horizontal advection (for MUSCL it is the fraction of fourth-order contribution in the solution) -tra_adv_pv = 1. ! a parameter to be used in horizontal advection (for QR4C it is the fraction of fourth-order contribution in the solution) -! Implemented trassers (3d restoring): -! 301 - Fram strait. -! 302 - Bering Strait -! 303 - BSO -num_tracers=2 !number of all tracers -tracer_ID =0,1 !their IDs (0 and 1 are reserved for temperature and salinity) -/ - -&oce_init3d ! initial conditions for tracers -n_ic3d = 2 ! number of tracers to initialize -idlist = 1, 0 ! their IDs (0 is temperature, 1 is salinity, etc.). The reading order is defined here! -filelist = 'phc3.0_winter.nc', 'phc3.0_winter.nc' ! list of files in ClimateDataPath to read (one file per tracer), same order as idlist -varlist = 'salt', 'temp' ! variables to read from specified files -t_insitu = .true. ! if T is insitu it will be converted to potential after reading it -/ diff --git a/config/namelist.tra b/config/namelist.tra new file mode 100644 index 000000000..81a1c3435 --- /dev/null +++ b/config/namelist.tra @@ -0,0 +1,50 @@ +&tracer_listsize +num_tracers=100 !number of tracers to allocate. shallbe large or equal to the number of streams in &nml_list +/ + +&tracer_list +! ID | tra_adv_hor | tra_adv_ver | tra_adv_lim | tra_adv_ph | tra_adv_pv +nml_tracer_list = +1 , 'MFCT', 'QR4C', 'FCT ', 1., 1., !ID=1=Temperature +2 , 'MFCT', 'QR4C', 'FCT ', 1., 1., !ID=2=Salinity +!101, 'UPW1', 'UPW1', 'NON ', 0., 0. !ID=X=Whatever +/ + +&tracer_init3d ! initial conditions for tracers +n_ic3d = 2 ! number of tracers to initialize +idlist = 2, 1 ! their IDs (0 is temperature, 1 is salinity, etc.). The reading order is defined here! +filelist = 'phc3.0_winter.nc', 'phc3.0_winter.nc' ! list of files in ClimateDataPath to read (one file per tracer), same order as idlist +varlist = 'salt', 'temp' ! variables to read from specified files +t_insitu = .true. ! if T is insitu it will be converted to potential after reading it +/ + +&tracer_general +! bharmonic diffusion for tracers. We recommend to use this option in very high resolution runs (Redi is generally off there). +smooth_bh_tra =.false. ! use biharmonic diffusion (filter implementation) for tracers +gamma0_tra = 0.0005 ! gammaX_tra are analogous to those in the dynamical part +gamma1_tra = 0.0125 +gamma2_tra = 0. +i_vert_diff =.true. +/ + +&tracer_phys +use_momix = .true. ! switch on/off !Monin-Obukhov -> TB04 mixing +momix_lat = -50.0 ! latitidinal treshhold for TB04, =90 --> global +momix_kv = 0.01 ! PP/KPP, mixing coefficient within MO length +use_instabmix = .true. ! enhance convection in case of instable stratification +instabmix_kv = 0.1 +use_windmix = .false. ! enhance mixing trough wind only for PP mixing (for stability) +windmix_kv = 1.e-3 +windmix_nl = 2 +diff_sh_limit=5.0e-3 ! for KPP, max diff due to shear instability +Kv0_const=.true. +double_diffusion=.false. ! for KPP,dd switch +K_ver=1.0e-5 +K_hor=3000. +surf_relax_T=0.0 +surf_relax_S=1.929e-06 ! 50m/300days 6.43e-07! m/s 10./(180.*86400.) +balance_salt_water =.true. ! balance virtual-salt or freshwater flux or not +clim_relax=0.0 ! 1/s, geometrical information has to be supplied +ref_sss_local=.true. +ref_sss=34. +/ diff --git a/src/MOD_TRACER.F90 b/src/MOD_TRACER.F90 index f1d095336..b72af3ba7 100644 --- a/src/MOD_TRACER.F90 +++ b/src/MOD_TRACER.F90 @@ -15,6 +15,15 @@ MODULE MOD_TRACER real(kind=WP) :: tra_adv_pv = 1. ! a parameter to be used in horizontal advection (for QR4C it is the fraction of fourth-order contribution in the solution) integer :: ID END TYPE T_TRACER +integer :: num_tracers=2 + +! general options for all tracers (can be moved to T_TRACER is needed) +! bharmonic diffusion for tracers. We recommend to use this option in very high resolution runs (Redi is generally off there). +logical :: smooth_bh_tra = .false. +real(kind=WP) :: gamma0_tra = 0.0005 +real(kind=WP) :: gamma1_tra = 0.0125 +real(kind=WP) :: gamma2_tra = 0. +logical :: i_vert_diff = .true. !auxuary arrays to work with tracers: real(kind=WP), allocatable :: del_ttf(:,:) @@ -34,6 +43,17 @@ MODULE MOD_TRACER integer,allocatable,dimension(:,:) :: nn_pos integer,allocatable,dimension(:,:) :: edge_up_dn_tri real(kind=WP),allocatable,dimension(:,:,:) :: edge_up_dn_grad + +! auxury type for reading namelist.tra +type nml_tracer_list_type + INTEGER :: ID =-1 + CHARACTER(len=4) :: adv_hor ='NONE' + CHARACTER(len=4) :: adv_ver ='NONE' + CHARACTER(len=4) :: adv_lim ='NONE' + REAL(kind=WP) :: adv_ph =1. + REAL(kind=WP) :: adv_pv =1. +end type + end module MOD_TRACER !========================================================== diff --git a/src/gen_ic3d.F90 b/src/gen_ic3d.F90 index eac828d2a..b9df852a8 100644 --- a/src/gen_ic3d.F90 +++ b/src/gen_ic3d.F90 @@ -25,7 +25,7 @@ MODULE g_ic3d include 'netcdf.inc' public do_ic3d, & ! read and apply 3D initial conditions - n_ic3d, idlist, filelist, varlist, oce_init3d, & ! to be read from the namelist + n_ic3d, idlist, filelist, varlist, tracer_init3d, & ! to be read from the namelist t_insitu private @@ -40,7 +40,7 @@ MODULE g_ic3d character(MAX_PATH), save, dimension(ic_max) :: filelist character(50), save, dimension(ic_max) :: varlist - namelist / oce_init3d / n_ic3d, idlist, filelist, varlist, t_insitu + namelist / tracer_init3d / n_ic3d, idlist, filelist, varlist, t_insitu character(MAX_PATH), save :: filename character(50), save :: varname @@ -232,8 +232,7 @@ SUBROUTINE nc_ic3d_ini(mesh) warn = 0 if (mype==0) then - write(*,*) 'reading input tracer file for tracer ID= ', tracer_ID(current_tracer) - write(*,*) 'input file: ', trim(filename) + write(*,*) 'reading ', trim(filename) write(*,*) 'variable : ', trim(varname) end if @@ -486,7 +485,7 @@ SUBROUTINE do_ic3d(tracers, mesh) filename=trim(ClimateDataPath)//trim(filelist(n)) varname =trim(varlist(n)) DO current_tracer=1, num_tracers - if (tracer_ID(current_tracer)==idlist(n)) then + if (tracers(current_tracer)%ID==idlist(n)) then ! read initial conditions for current tracer call nc_ic3d_ini(mesh) ! get first coeficients for time inerpolation on model grid for all datas diff --git a/src/gen_model_setup.F90 b/src/gen_model_setup.F90 index 755b6abf7..9feea3c49 100755 --- a/src/gen_model_setup.F90 +++ b/src/gen_model_setup.F90 @@ -56,8 +56,10 @@ subroutine read_namelist nmlfile ='namelist.oce' ! name of ocean namelist file open (20,file=nmlfile) read (20,NML=oce_dyn) - read (20,NML=oce_tra) - read (20,NML=oce_init3d) + close (20) + + nmlfile ='namelist.tra' ! name of ocean namelist file + read (20,NML=tracer_phys) close (20) nmlfile ='namelist.forcing' ! name of forcing namelist file diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index c9cf93384..c67dc9dd4 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -294,7 +294,7 @@ subroutine ini_mean_io(tracers, mesh) call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'salt', 'salinity', 'psu', tracers(2)%values(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) CASE ('otracers ') do j=3, num_tracers - write (id_string, "(I3.3)") tracer_id(j) + write (id_string, "(I3.3)") tracers(j)%ID call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'tra_'//id_string, 'pasive tracer ID='//id_string, 'n/a', tracers(j)%values(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) end do CASE ('slope_x ') @@ -647,9 +647,9 @@ subroutine create_new_file(entry, mesh) call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'ClimateDataPath', len_trim(ClimateDataPath), trim(ClimateDataPath)), __LINE__) call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'which_ALE', len_trim(which_ALE), trim(which_ALE)), __LINE__) call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'mix_scheme', len_trim(mix_scheme), trim(mix_scheme)), __LINE__) - call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'tra_adv_hor', len_trim(tra_adv_hor), trim(tra_adv_hor)), __LINE__) - call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'tra_adv_ver', len_trim(tra_adv_ver), trim(tra_adv_ver)), __LINE__) - call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'tra_adv_lim', len_trim(tra_adv_lim), trim(tra_adv_lim)), __LINE__) +! call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'tra_adv_hor', len_trim(tra_adv_hor), trim(tra_adv_hor)), __LINE__) +! call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'tra_adv_ver', len_trim(tra_adv_ver), trim(tra_adv_ver)), __LINE__) +! call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'tra_adv_lim', len_trim(tra_adv_lim), trim(tra_adv_lim)), __LINE__) call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_partial_cell', NF_INT, 1, use_partial_cell), __LINE__) diff --git a/src/oce_adv_tra_driver.F90 b/src/oce_adv_tra_driver.F90 index 33afff994..63e3b371a 100644 --- a/src/oce_adv_tra_driver.F90 +++ b/src/oce_adv_tra_driver.F90 @@ -150,7 +150,7 @@ subroutine do_oce_adv_tra(vel, w, wi, we, dttf_h, dttf_v, tracer, mesh) CASE('UPW1') call adv_tra_hor_upw1(ttfAB, uv, mesh, adv_flux_hor, init_zero=do_zero_flux) CASE DEFAULT !unknown - if (mype==0) write(*,*) 'Unknown horizontal advection type ', trim(tra_adv_hor), '! Check your namelists!' + if (mype==0) write(*,*) 'Unknown horizontal advection type ', trim(tracer%tra_adv_hor), '! Check your namelists!' call par_ex(1) END SELECT @@ -173,7 +173,7 @@ subroutine do_oce_adv_tra(vel, w, wi, we, dttf_h, dttf_v, tracer, mesh) CASE('UPW1') call adv_tra_ver_upw1 (ttfAB, pwvel, mesh, adv_flux_ver, init_zero=do_zero_flux) CASE DEFAULT !unknown - if (mype==0) write(*,*) 'Unknown vertical advection type ', trim(tra_adv_ver), '! Check your namelists!' + if (mype==0) write(*,*) 'Unknown vertical advection type ', trim(tracer%tra_adv_ver), '! Check your namelists!' call par_ex(1) ! --> be aware the vertical implicite part in case without FCT is done in ! oce_ale_tracer.F90 --> subroutine diff_ver_part_impl_ale(tr_num, mesh) diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 2f2528e21..e7cc3e5c2 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -96,7 +96,7 @@ subroutine solve_tracers_ale(tracers, mesh) subroutine solve_tracers_ale(tracers, mesh) use g_config use g_parsup - use o_PARAM, only: num_tracers, SPP, Fer_GM + use o_PARAM, only: SPP, Fer_GM use o_arrays use mod_mesh use mod_tracer @@ -144,7 +144,7 @@ subroutine solve_tracers_ale(tracers, mesh) if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call relax_to_clim'//achar(27)//'[0m' ! if ((toy_ocean) .AND. ((tr_num==1) .AND. (TRIM(which_toy)=="soufflet"))) then if ((toy_ocean) .AND. ((TRIM(which_toy)=="soufflet"))) then - call relax_zonal_temp(tracers(tr_num), mesh) + call relax_zonal_temp(tracers(1), mesh) else call relax_to_clim(tracers(tr_num), mesh) end if @@ -334,11 +334,11 @@ subroutine diff_ver_part_expl_ale(tracer, mesh) ul1=ulevels_nod2D(n) vd_flux=0._WP - if (tr_num==1) then + if (tracer%ID==1) then flux = -heat_flux(n)/vcpw rdata = Tsurf(n) rlx = surf_relax_T - elseif (tr_num==2) then + elseif (tracer%ID==2) then flux = virtual_salt(n)+relax_salt(n)- real_salt_flux(n)*is_nonlinfs else flux = 0._WP @@ -409,7 +409,7 @@ subroutine diff_ver_part_impl_ale(tracer, mesh) #include "associate_mesh.h" trarr=>tracer%values(:,:) !___________________________________________________________________________ - if ((trim(tra_adv_lim)=='FCT') .OR. (.not. w_split)) do_wimpl=.false. + if ((trim(tracer%tra_adv_lim)=='FCT') .OR. (.not. w_split)) do_wimpl=.false. if (Redi) isredi=1._WP dt_inv=1.0_WP/dt diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index 0a898244b..08035d708 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -81,10 +81,7 @@ MODULE o_PARAM ! elevation and divergence real(kind=WP) :: epsilon=0.1_WP ! AB2 offset ! Tracers -logical :: i_vert_diff= .true. logical :: i_vert_visc= .true. -character(20) :: tra_adv_ver, tra_adv_hor, tra_adv_lim -real(kind=WP) :: tra_adv_ph, tra_adv_pv logical :: w_split =.false. real(kind=WP) :: w_max_cfl=1.e-5_WP @@ -95,9 +92,7 @@ MODULE o_PARAM integer :: ID integer, allocatable, dimension(:) :: ind2 END TYPE tracer_source3d_type -integer :: num_tracers=2 -integer, dimension(100) :: tracer_ID = RESHAPE((/0, 1/), (/100/), (/0/)) ! ID for each tracer for treating the initialization and surface boundary condition - ! 0=temp, 1=salt etc. + type(tracer_source3d_type), & allocatable, dimension(:) :: ptracers_restore integer :: ptracers_restore_total=0 @@ -133,11 +128,6 @@ MODULE o_PARAM real(kind=WP) :: windmix_kv = 1.e-3 integer :: windmix_nl = 2 -! bharmonic diffusion for tracers. We recommend to use this option in very high resolution runs (Redi is generally off there). -logical :: smooth_bh_tra = .false. -real(kind=WP) :: gamma0_tra = 0.0005 -real(kind=WP) :: gamma1_tra = 0.0125 -real(kind=WP) :: gamma2_tra = 0. !_______________________________________________________________________________ ! use non-constant reference density if .false. density_ref=density_0 logical :: use_density_ref = .false. @@ -179,13 +169,11 @@ MODULE o_PARAM K_back, c_back, uke_scaling, uke_scaling_factor, smooth_back, smooth_dis, & smooth_back_tend, rosb_dis - NAMELIST /oce_tra/ diff_sh_limit, Kv0_const, double_diffusion, K_ver, K_hor, surf_relax_T, surf_relax_S, & - balance_salt_water, clim_relax, ref_sss_local, ref_sss, i_vert_diff, tra_adv_ver, tra_adv_hor, & - tra_adv_lim, tra_adv_ph, tra_adv_pv, num_tracers, tracer_ID, & + NAMELIST /tracer_phys/ diff_sh_limit, Kv0_const, double_diffusion, K_ver, K_hor, surf_relax_T, surf_relax_S, & + balance_salt_water, clim_relax, ref_sss_local, ref_sss, & use_momix, momix_lat, momix_kv, & use_instabmix, instabmix_kv, & use_windmix, windmix_kv, windmix_nl, & - smooth_bh_tra, gamma0_tra, gamma1_tra, gamma2_tra, & use_kpp_nonlclflx END MODULE o_PARAM diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index c9d157a30..5da467b97 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -209,13 +209,50 @@ SUBROUTINE tracer_init(tracers, mesh) USE MOD_TRACER USE DIAGNOSTICS, only: ldiag_DVD USE g_PARSUP +USE g_ic3d IMPLICIT NONE -integer :: elem_size, node_size -integer :: n -type(t_mesh), intent(in) , target :: mesh -type(t_tracer), intent(inout) , target, allocatable :: tracers(:) +integer :: elem_size, node_size +integer, save :: nm_unit = 104 ! unit to open namelist file, skip 100-102 for cray +integer :: iost +integer :: n + +type(t_mesh), intent(in) , target :: mesh +type(t_tracer), intent(inout), target, allocatable :: tracers(:) +type(nml_tracer_list_type), target, allocatable :: nml_tracer_list(:) + +namelist /tracer_listsize/ num_tracers +namelist /tracer_list / nml_tracer_list +namelist /tracer_general / smooth_bh_tra, gamma0_tra, gamma1_tra, gamma2_tra, i_vert_diff + #include "associate_mesh.h" +! OPEN and read namelist for I/O +open( unit=nm_unit, file='namelist.tra', form='formatted', access='sequential', status='old', iostat=iost ) +if (iost == 0) then + if (mype==0) WRITE(*,*) ' file : ', 'namelist.tra',' open ok' +else + if (mype==0) WRITE(*,*) 'ERROR: --> bad opening file : ', 'namelist.tra',' ; iostat=',iost + call par_ex + stop +end if + +READ(nm_unit, nml=tracer_listsize, iostat=iost) +allocate(nml_tracer_list(num_tracers)) +READ(nm_unit, nml=tracer_list, iostat=iost) +read (nm_unit, nml=tracer_init3d, iostat=iost) +READ(nm_unit, nml=tracer_general, iostat=iost) +close(nm_unit) + +do n=1, num_tracers + if (nml_tracer_list(n)%id==-1) then + if (mype==0) write(*,*) 'number of tracers will be changed from ', num_tracers, ' to ', n-1, '!' + num_tracers=n-1 + EXIT + end if +end do + +if (mype==0) write(*,*) 'total number of tracers is: ', num_tracers + elem_size=myDim_elem2D+eDim_elem2D node_size=myDim_nod2D+eDim_nod2D @@ -226,18 +263,19 @@ SUBROUTINE tracer_init(tracers, mesh) do n=1, num_tracers allocate(tracers(n)%values (nl-1,node_size)) allocate(tracers(n)%valuesAB(nl-1,node_size)) - tracers(n)%tra_adv_hor = TRIM(tra_adv_hor) - tracers(n)%tra_adv_ver = TRIM(tra_adv_ver) - tracers(n)%tra_adv_lim = TRIM(tra_adv_lim) - tracers(n)%tra_adv_ph = tra_adv_ph - tracers(n)%tra_adv_pv = tra_adv_pv + tracers(n)%ID = nml_tracer_list(n)%id + tracers(n)%tra_adv_hor = TRIM(nml_tracer_list(n)%adv_hor) + tracers(n)%tra_adv_ver = TRIM(nml_tracer_list(n)%adv_ver) + tracers(n)%tra_adv_lim = TRIM(nml_tracer_list(n)%adv_lim) + tracers(n)%tra_adv_ph = nml_tracer_list(n)%adv_ph + tracers(n)%tra_adv_pv = nml_tracer_list(n)%adv_pv tracers(n)%smooth_bh_tra = smooth_bh_tra tracers(n)%gamma0_tra = gamma0_tra tracers(n)%gamma1_tra = gamma1_tra tracers(n)%gamma2_tra = gamma2_tra tracers(n)%values = 0. tracers(n)%valuesAB = 0. - tracers(n)%ID = n + tracers(n)%i_vert_diff = i_vert_diff end do allocate(del_ttf(nl-1,node_size)) @@ -257,6 +295,7 @@ END SUBROUTINE tracer_init !_______________________________________________________________________________ SUBROUTINE arrays_init(mesh) USE MOD_MESH +USE MOD_TRACER, only : num_tracers USE o_ARRAYS USE o_PARAM USE g_PARSUP @@ -526,7 +565,7 @@ SUBROUTINE oce_initial_state(tracers, mesh) #include "associate_mesh.h" if (mype==0) write(*,*) num_tracers, ' tracers will be used in FESOM' - if (mype==0) write(*,*) 'tracer IDs are: ', tracer_ID(1:num_tracers) + if (mype==0) write(*,*) 'tracer IDs are: ', tracers(1:num_tracers)%ID ! ! read ocean state ! this must be always done! First two tracers with IDs 0 and 1 are the temperature and salinity. @@ -543,7 +582,7 @@ SUBROUTINE oce_initial_state(tracers, mesh) ! count the passive tracers which require 3D source (ptracers_restore_total) ptracers_restore_total=0 DO i=3, num_tracers - id=tracer_ID(i) + id=tracers(i)%ID SELECT CASE (id) CASE (301) ptracers_restore_total=ptracers_restore_total+1 @@ -558,7 +597,7 @@ SUBROUTINE oce_initial_state(tracers, mesh) rcounter3=0 ! counter for tracers with 3D source DO i=3, num_tracers - id=tracer_ID(i) + id=tracers(i)%ID SELECT CASE (id) CASE (101) ! initialize tracer ID=101 tracers(i)%values(:,:)=0.0_WP @@ -676,7 +715,7 @@ SUBROUTINE before_oce_step(tracers, mesh) implicit none integer :: i, k, counter, rcounter3, id character(len=10) :: i_string, id_string - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in), target :: mesh type(t_tracer), intent(inout), target, allocatable :: tracers(:) #include "associate_mesh.h" From a3427b05d4555155b892078d306842dacd93b364 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 13 Sep 2021 12:00:07 +0200 Subject: [PATCH 369/909] namelist unit for tracer_phys was missing --- src/gen_model_setup.F90 | 1 + 1 file changed, 1 insertion(+) diff --git a/src/gen_model_setup.F90 b/src/gen_model_setup.F90 index 9feea3c49..8226de04d 100755 --- a/src/gen_model_setup.F90 +++ b/src/gen_model_setup.F90 @@ -59,6 +59,7 @@ subroutine read_namelist close (20) nmlfile ='namelist.tra' ! name of ocean namelist file + open (20,file=nmlfile) read (20,NML=tracer_phys) close (20) From 650d79fb52bf4d0ffe9437765161e737ae5cf288 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 13 Sep 2021 13:30:36 +0200 Subject: [PATCH 370/909] added trcer interface to Icepack. shall work! --- src/fvom_main.F90 | 2 +- src/icepack_drivers/icedrv_advection.F90 | 14 +++++------ src/icepack_drivers/icedrv_init.F90 | 30 ++++++++++++++---------- src/icepack_drivers/icedrv_main.F90 | 6 +++-- src/icepack_drivers/icedrv_transfer.F90 | 2 +- 5 files changed, 30 insertions(+), 24 deletions(-) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index 3f85b7e95..c7ac60153 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -135,7 +135,7 @@ program main if (mype==0) write(*,*) 'Icepack: reading namelists from namelist.icepack' call set_icepack call alloc_icepack - call init_icepack(mesh) + call init_icepack(tracers(1), mesh) if (mype==0) write(*,*) 'Icepack: setup complete' #endif diff --git a/src/icepack_drivers/icedrv_advection.F90 b/src/icepack_drivers/icedrv_advection.F90 index c15b3e47c..b52bccf97 100644 --- a/src/icepack_drivers/icedrv_advection.F90 +++ b/src/icepack_drivers/icedrv_advection.F90 @@ -100,7 +100,7 @@ end subroutine tg_rhs_icepack module subroutine init_advection_icepack(mesh) use o_param - use o_mesh + use mod_tracer use g_parsup use mod_mesh @@ -139,7 +139,7 @@ end subroutine init_advection_icepack subroutine fill_mass_matrix_icepack(mesh) use mod_mesh - use o_mesh + use mod_tracer use i_param use g_parsup @@ -215,7 +215,7 @@ subroutine solve_low_order_icepack(mesh, trc) ! mass matrix on the lhs is replaced with the lumped one. use mod_mesh - use o_mesh + use mod_tracer use i_param use g_parsup @@ -253,7 +253,7 @@ end subroutine solve_low_order_icepack subroutine solve_high_order_icepack(mesh, trc) use mod_mesh - use o_mesh + use mod_tracer use i_param use g_parsup @@ -309,7 +309,7 @@ subroutine fem_fct_icepack(mesh, trc) ! Turek. (kuzmin@math.uni-dortmund.de) use mod_mesh - use o_mesh + use mod_tracer use o_param use i_param use g_parsup @@ -460,7 +460,7 @@ end subroutine fem_fct_icepack subroutine tg_rhs_div_icepack(mesh, trc) use mod_mesh - use o_mesh + use mod_tracer use o_param use i_param use g_parsup @@ -529,7 +529,7 @@ end subroutine tg_rhs_div_icepack subroutine update_for_div_icepack(mesh, trc) use mod_mesh - use o_mesh + use mod_tracer use o_param use i_param use g_parsup diff --git a/src/icepack_drivers/icedrv_init.F90 b/src/icepack_drivers/icedrv_init.F90 index aaea17469..b2b821ed8 100644 --- a/src/icepack_drivers/icedrv_init.F90 +++ b/src/icepack_drivers/icedrv_init.F90 @@ -27,10 +27,10 @@ contains - subroutine init_state() + subroutine init_state(tracer) use icepack_intfc, only: icepack_aggregate - + use mod_tracer implicit none integer (kind=int_kind) :: & @@ -49,7 +49,8 @@ subroutine init_state() nt_ipnd, nt_aero, nt_fsd character(len=*), parameter :: subname='(init_state)' - + type(t_tracer), intent(in), target :: tracer + !----------------------------------------------------------------- ! query Icepack values !----------------------------------------------------------------- @@ -199,7 +200,7 @@ subroutine init_state() ! Set state variables !----------------------------------------------------------------- - call init_state_var() + call init_state_var(tracer) end subroutine init_state @@ -909,7 +910,7 @@ end subroutine init_faero !======================================================================= - module subroutine init_icepack(mesh) + module subroutine init_icepack(tracer, mesh) use icepack_intfc, only: icepack_init_itd use icepack_intfc, only: icepack_init_itd_hist @@ -917,6 +918,7 @@ module subroutine init_icepack(mesh) use icepack_intfc, only: icepack_init_fsd_bounds use icepack_intfc, only: icepack_warnings_flush use mod_mesh + use mod_tracer implicit none @@ -926,8 +928,8 @@ module subroutine init_icepack(mesh) tr_fsd, & ! from icepack wave_spec ! from icepack character(len=*), parameter :: subname='(icedrv_initialize)' - type(t_mesh), intent(in), target :: mesh - + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracer call icepack_query_parameters(wave_spec_out=wave_spec) call icepack_query_tracer_flags(tr_aero_out=tr_aero) call icepack_query_tracer_flags(tr_zaero_out=tr_zaero) @@ -979,7 +981,7 @@ module subroutine init_icepack(mesh) call init_fsd call fesom_to_icepack(mesh) - call init_state ! initialize the ice state + call init_state(tracer) ! initialize the ice state call init_history_therm ! initialize thermo history variables if (tr_fsd .and. wave_spec) call init_wave_spec ! wave spectrum in ice @@ -996,15 +998,17 @@ end subroutine init_icepack !======================================================================= - subroutine init_state_var () + subroutine init_state_var (tracer) use icepack_intfc, only: icepack_init_fsd use icepack_intfc, only: icepack_aggregate - use o_arrays, only: tr_arr + use mod_tracer implicit none ! local variables - + type(t_tracer), intent(in), target :: tracer + real(kind=WP), dimension(:,:), pointer :: tr_arr + integer (kind=int_kind) :: & i , & ! horizontal indices k , & ! ice layer index @@ -1033,7 +1037,7 @@ subroutine init_state_var () character(len=char_len_long), parameter :: ice_ic='default' character(len=*), parameter :: subname='(set_state_var)' - + tr_arr=>tracer%values(:,:) !----------------------------------------------------------------- ! query Icepack values !----------------------------------------------------------------- @@ -1103,7 +1107,7 @@ subroutine init_state_var () enddo do i = 1, nx - if (tr_arr(1,i,1) < 0.0_dbl_kind) then ! + if (tr_arr(1,i) < 0.0_dbl_kind) then ! do n = 1, ncat ! ice volume, snow volume aicen(i,n) = ainit(n) diff --git a/src/icepack_drivers/icedrv_main.F90 b/src/icepack_drivers/icedrv_main.F90 index 2daf1ea91..ea06fa808 100644 --- a/src/icepack_drivers/icedrv_main.F90 +++ b/src/icepack_drivers/icedrv_main.F90 @@ -788,10 +788,12 @@ module subroutine init_history_bgc() end subroutine init_history_bgc ! Initialize all - module subroutine init_icepack(mesh) + module subroutine init_icepack(tracer, mesh) use mod_mesh + use mod_tracer implicit none - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracer end subroutine init_icepack ! Copy variables from fesom to icepack diff --git a/src/icepack_drivers/icedrv_transfer.F90 b/src/icepack_drivers/icedrv_transfer.F90 index 6ba70afa6..80e51d97a 100644 --- a/src/icepack_drivers/icedrv_transfer.F90 +++ b/src/icepack_drivers/icedrv_transfer.F90 @@ -31,7 +31,7 @@ module subroutine fesom_to_icepack(mesh) use g_config, only: dt use o_param, only: mstep use mod_mesh - use o_mesh + use mod_tracer use g_parsup use g_clock From ad3821b52ea504c22acb7d8c47664290f23d86e9 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 13 Sep 2021 13:41:37 +0200 Subject: [PATCH 371/909] mesh partitioning has been modified wo work after with refactored tracers --- src/fvom_init.F90 | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/fvom_init.F90 b/src/fvom_init.F90 index 2ff677fc3..774365f8b 100755 --- a/src/fvom_init.F90 +++ b/src/fvom_init.F90 @@ -323,19 +323,19 @@ SUBROUTINE find_edges_ini(mesh) subroutine elem_center(elem, x, y, mesh) USE MOD_MESH USE g_CONFIG - integer, intent(in) :: elem - real(kind=WP), intent(out) :: x, y - type(t_mesh), intent(in), target :: mesh + integer, intent(in) :: elem + real(kind=WP), intent(out) :: x, y + type(t_mesh), intent(in), target :: mesh end subroutine elem_center end interface -integer, allocatable :: aux1(:), ne_num(:), ne_pos(:,:) +integer, allocatable :: aux1(:), ne_num(:), ne_pos(:,:), nn_num(:), nn_pos(:,:) integer :: counter, counter_in, n, k, q integer :: elem, elem1, elems(2), q1, q2 integer :: elnodes(4), ed(2), flag, eledges(4) integer :: temp(100), node real(kind=WP) :: xc(2), xe(2), ax(3), amin -type(t_mesh), intent(inout), target :: mesh +type(t_mesh), intent(inout), target :: mesh #include "associate_mesh_ini.h" ! ==================== ! (a) find edges. To make the procedure fast From cbaae0bd3be154ec08c748cef5f018eb590b7815 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 13 Sep 2021 14:00:42 +0200 Subject: [PATCH 372/909] adv_tra_hor_* used the horizontal velocities through the "use module" instead of taking the locally passed arrays --- src/MOD_MESH.F90 | 2 +- src/oce_adv_tra_driver.F90 | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/MOD_MESH.F90 b/src/MOD_MESH.F90 index 0e275067a..d07087acd 100644 --- a/src/MOD_MESH.F90 +++ b/src/MOD_MESH.F90 @@ -9,7 +9,7 @@ MODULE MOD_MESH TYPE SPARSE_MATRIX integer :: nza integer :: dim - real(kind=WP), allocatable, dimension(:) :: values + real(kind=WP), allocatable, dimension(:) :: values integer(int32), allocatable, dimension(:) :: colind integer(int32), allocatable, dimension(:) :: rowptr integer(int32), allocatable, dimension(:) :: colind_loc diff --git a/src/oce_adv_tra_driver.F90 b/src/oce_adv_tra_driver.F90 index 63e3b371a..e49abae65 100644 --- a/src/oce_adv_tra_driver.F90 +++ b/src/oce_adv_tra_driver.F90 @@ -144,11 +144,11 @@ subroutine do_oce_adv_tra(vel, w, wi, we, dttf_h, dttf_v, tracer, mesh) SELECT CASE(trim(tracer%tra_adv_hor)) CASE('MUSCL') ! compute the untidiffusive horizontal flux (init_zero=.false.: input is the LO horizontal flux computed above) - call adv_tra_hor_muscl(ttfAB, uv, mesh, opth, adv_flux_hor, init_zero=do_zero_flux) + call adv_tra_hor_muscl(ttfAB, vel, mesh, opth, adv_flux_hor, init_zero=do_zero_flux) CASE('MFCT') - call adv_tra_hor_mfct(ttfAB, uv, mesh, opth, adv_flux_hor, init_zero=do_zero_flux) + call adv_tra_hor_mfct(ttfAB, vel, mesh, opth, adv_flux_hor, init_zero=do_zero_flux) CASE('UPW1') - call adv_tra_hor_upw1(ttfAB, uv, mesh, adv_flux_hor, init_zero=do_zero_flux) + call adv_tra_hor_upw1(ttfAB, vel, mesh, adv_flux_hor, init_zero=do_zero_flux) CASE DEFAULT !unknown if (mype==0) write(*,*) 'Unknown horizontal advection type ', trim(tracer%tra_adv_hor), '! Check your namelists!' call par_ex(1) From 42d0f8789de4f36457d5ebc3ee377ac9952df88d Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Tue, 14 Sep 2021 17:24:58 +0200 Subject: [PATCH 373/909] User-defined derived-type Input/Output for type t_mesh has been implemented. "write(fileID)" mesh and "read(fileID) mesh" is possible! --- src/MOD_MESH.F90 | 159 +++++++++++++++++++++++++++++++- src/MOD_READ_BINARY_ARRAYS.F90 | 105 +++++++++++++++++++++ src/MOD_WRITE_BINARY_ARRAYS.F90 | 147 +++++++++++++++++++++++++++++ 3 files changed, 410 insertions(+), 1 deletion(-) create mode 100644 src/MOD_READ_BINARY_ARRAYS.F90 create mode 100644 src/MOD_WRITE_BINARY_ARRAYS.F90 diff --git a/src/MOD_MESH.F90 b/src/MOD_MESH.F90 index d07087acd..e8059aca2 100644 --- a/src/MOD_MESH.F90 +++ b/src/MOD_MESH.F90 @@ -1,6 +1,8 @@ !========================================================== MODULE MOD_MESH USE O_PARAM +USE MOD_WRITE_BINARY_ARRAYYS +USE MOD_READ_BINARY_ARRAYYS USE, intrinsic :: ISO_FORTRAN_ENV IMPLICIT NONE SAVE @@ -91,8 +93,163 @@ MODULE MOD_MESH integer, allocatable, dimension(:) :: ind_south, ind_north !#endif - character(:), allocatable :: representative_checksum +character(:), allocatable :: representative_checksum + +contains + procedure write_t_mesh + procedure read_t_mesh + generic :: write(unformatted) => write_t_mesh + generic :: read(unformatted) => read_t_mesh END TYPE T_MESH + +contains + +! Unformatted writing for t_mesh +subroutine write_t_mesh(mesh, unit, iostat, iomsg) + class(t_mesh), intent(in) :: mesh + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: i, j, k + integer :: s1, s2, s3 + ! write records (giving sizes for the allocation for arrays) + write(unit, iostat=iostat, iomsg=iomsg) mesh%nod2D + write(unit, iostat=iostat, iomsg=iomsg) mesh%ocean_area + write(unit, iostat=iostat, iomsg=iomsg) mesh%ocean_areawithcav + write(unit, iostat=iostat, iomsg=iomsg) mesh%edge2D + write(unit, iostat=iostat, iomsg=iomsg) mesh%edge2D_in + write(unit, iostat=iostat, iomsg=iomsg) mesh%elem2D + call write_bin_array(mesh%elem2D_nodes, unit, iostat, iomsg) + call write_bin_array(mesh%edges, unit, iostat, iomsg) + call write_bin_array(mesh%edge_tri, unit, iostat, iomsg) + call write_bin_array(mesh%elem_edges, unit, iostat, iomsg) + call write_bin_array(mesh%elem_area, unit, iostat, iomsg) + call write_bin_array(mesh%edge_dxdy, unit, iostat, iomsg) + + call write_bin_array(mesh%edge_cross_dxdy, unit, iostat, iomsg) + call write_bin_array(mesh%elem_cos, unit, iostat, iomsg) + call write_bin_array(mesh%metric_factor, unit, iostat, iomsg) + call write_bin_array(mesh%elem_neighbors, unit, iostat, iomsg) + call write_bin_array(mesh%nod_in_elem2D, unit, iostat, iomsg) + call write_bin_array(mesh%x_corners, unit, iostat, iomsg) + call write_bin_array(mesh%y_corners, unit, iostat, iomsg) + call write_bin_array(mesh%nod_in_elem2D_num, unit, iostat, iomsg) + call write_bin_array(mesh%depth, unit, iostat, iomsg) + call write_bin_array(mesh%gradient_vec, unit, iostat, iomsg) + call write_bin_array(mesh%gradient_sca, unit, iostat, iomsg) + call write_bin_array(mesh%bc_index_nod2D, unit, iostat, iomsg) + + write(unit, iostat=iostat, iomsg=iomsg) mesh%nl + + call write_bin_array(mesh%zbar, unit, iostat, iomsg) + call write_bin_array(mesh%Z, unit, iostat, iomsg) + call write_bin_array(mesh%elem_depth, unit, iostat, iomsg) + call write_bin_array(mesh%ulevels, unit, iostat, iomsg) + call write_bin_array(mesh%ulevels_nod2D, unit, iostat, iomsg) + call write_bin_array(mesh%ulevels_nod2D_max, unit, iostat, iomsg) + call write_bin_array(mesh%nlevels, unit, iostat, iomsg) + call write_bin_array(mesh%nlevels_nod2D, unit, iostat, iomsg) + call write_bin_array(mesh%nlevels_nod2D_min, unit, iostat, iomsg) + call write_bin_array(mesh%area, unit, iostat, iomsg) + call write_bin_array(mesh%area_inv, unit, iostat, iomsg) + call write_bin_array(mesh%areasvol, unit, iostat, iomsg) + call write_bin_array(mesh%areasvol_inv, unit, iostat, iomsg) + call write_bin_array(mesh%mesh_resolution, unit, iostat, iomsg) + + call write_bin_array(mesh%cavity_flag_n, unit, iostat, iomsg) + call write_bin_array(mesh%cavity_flag_e, unit, iostat, iomsg) + call write_bin_array(mesh%cavity_depth, unit, iostat, iomsg) + call write_bin_array(mesh%cavity_nrst_cavlpnt_xyz, unit, iostat, iomsg) + + write(unit, iostat=iostat, iomsg=iomsg) mesh%ssh_stiff%dim + write(unit, iostat=iostat, iomsg=iomsg) mesh%ssh_stiff%nza + + call write_bin_array(mesh%ssh_stiff%rowptr, unit, iostat, iomsg) + call write_bin_array(mesh%ssh_stiff%colind, unit, iostat, iomsg) + call write_bin_array(mesh%ssh_stiff%values, unit, iostat, iomsg) + call write_bin_array(mesh%ssh_stiff%colind_loc, unit, iostat, iomsg) + call write_bin_array(mesh%ssh_stiff%rowptr_loc, unit, iostat, iomsg) + + call write_bin_array(mesh%lump2d_south, unit, iostat, iomsg) + call write_bin_array(mesh%lump2d_north, unit, iostat, iomsg) + call write_bin_array(mesh%ind_south, unit, iostat, iomsg) + call write_bin_array(mesh%ind_north, unit, iostat, iomsg) +! call write_bin_array(mesh%representative_checksum, unit, iostat, iomsg) +end subroutine write_t_mesh + +! Unformatted reading for t_mesh +subroutine read_t_mesh(mesh, unit, iostat, iomsg) + class(t_mesh), intent(inout) :: mesh + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: i, j, k + integer :: s1, s2, s3 + ! write records (giving sizes for the allocation for arrays) + read(unit, iostat=iostat, iomsg=iomsg) mesh%nod2D + read(unit, iostat=iostat, iomsg=iomsg) mesh%ocean_area + read(unit, iostat=iostat, iomsg=iomsg) mesh%ocean_areawithcav + read(unit, iostat=iostat, iomsg=iomsg) mesh%edge2D + read(unit, iostat=iostat, iomsg=iomsg) mesh%edge2D_in + read(unit, iostat=iostat, iomsg=iomsg) mesh%elem2D + + call read_bin_array(mesh%elem2D_nodes, unit, iostat, iomsg) + call read_bin_array(mesh%edges, unit, iostat, iomsg) + call read_bin_array(mesh%edge_tri, unit, iostat, iomsg) + call read_bin_array(mesh%elem_edges, unit, iostat, iomsg) + call read_bin_array(mesh%elem_area, unit, iostat, iomsg) + call read_bin_array(mesh%edge_dxdy, unit, iostat, iomsg) + + call read_bin_array(mesh%edge_cross_dxdy, unit, iostat, iomsg) + call read_bin_array(mesh%elem_cos, unit, iostat, iomsg) + call read_bin_array(mesh%metric_factor, unit, iostat, iomsg) + call read_bin_array(mesh%elem_neighbors, unit, iostat, iomsg) + call read_bin_array(mesh%nod_in_elem2D, unit, iostat, iomsg) + call read_bin_array(mesh%x_corners, unit, iostat, iomsg) + call read_bin_array(mesh%y_corners, unit, iostat, iomsg) + call read_bin_array(mesh%nod_in_elem2D_num, unit, iostat, iomsg) + call read_bin_array(mesh%depth, unit, iostat, iomsg) + call read_bin_array(mesh%gradient_vec, unit, iostat, iomsg) + call read_bin_array(mesh%gradient_sca, unit, iostat, iomsg) + call read_bin_array(mesh%bc_index_nod2D, unit, iostat, iomsg) + + read(unit, iostat=iostat, iomsg=iomsg) mesh%nl + + call read_bin_array(mesh%zbar, unit, iostat, iomsg) + call read_bin_array(mesh%Z, unit, iostat, iomsg) + call read_bin_array(mesh%elem_depth, unit, iostat, iomsg) + call read_bin_array(mesh%ulevels, unit, iostat, iomsg) + call read_bin_array(mesh%ulevels_nod2D, unit, iostat, iomsg) + call read_bin_array(mesh%ulevels_nod2D_max, unit, iostat, iomsg) + call read_bin_array(mesh%nlevels, unit, iostat, iomsg) + call read_bin_array(mesh%nlevels_nod2D, unit, iostat, iomsg) + call read_bin_array(mesh%nlevels_nod2D_min, unit, iostat, iomsg) + call read_bin_array(mesh%area, unit, iostat, iomsg) + call read_bin_array(mesh%area_inv, unit, iostat, iomsg) + call read_bin_array(mesh%areasvol, unit, iostat, iomsg) + call read_bin_array(mesh%areasvol_inv, unit, iostat, iomsg) + call read_bin_array(mesh%mesh_resolution, unit, iostat, iomsg) + + call read_bin_array(mesh%cavity_flag_n, unit, iostat, iomsg) + call read_bin_array(mesh%cavity_flag_e, unit, iostat, iomsg) + call read_bin_array(mesh%cavity_depth, unit, iostat, iomsg) + call read_bin_array(mesh%cavity_nrst_cavlpnt_xyz, unit, iostat, iomsg) + + read(unit, iostat=iostat, iomsg=iomsg) mesh%ssh_stiff%dim + read(unit, iostat=iostat, iomsg=iomsg) mesh%ssh_stiff%nza + + call read_bin_array(mesh%ssh_stiff%rowptr, unit, iostat, iomsg) + call read_bin_array(mesh%ssh_stiff%colind, unit, iostat, iomsg) + call read_bin_array(mesh%ssh_stiff%values, unit, iostat, iomsg) + call read_bin_array(mesh%ssh_stiff%colind_loc, unit, iostat, iomsg) + call read_bin_array(mesh%ssh_stiff%rowptr_loc, unit, iostat, iomsg) + + call read_bin_array(mesh%lump2d_south, unit, iostat, iomsg) + call read_bin_array(mesh%lump2d_north, unit, iostat, iomsg) + call read_bin_array(mesh%ind_south, unit, iostat, iomsg) + call read_bin_array(mesh%ind_north, unit, iostat, iomsg) +! call read_bin_array(mesh%representative_checksum, unit, iostat, iomsg) +end subroutine read_t_mesh end module MOD_MESH !========================================================== diff --git a/src/MOD_READ_BINARY_ARRAYS.F90 b/src/MOD_READ_BINARY_ARRAYS.F90 new file mode 100644 index 000000000..fa64e57d5 --- /dev/null +++ b/src/MOD_READ_BINARY_ARRAYS.F90 @@ -0,0 +1,105 @@ +!========================================================== +! +!------------------------------------------------------------------------------------------ +! useful interface (read_bin_array) for reading arbitary binary arrays into an opened file +MODULE MOD_READ_BINARY_ARRAYYS +use o_PARAM +private +public :: read_bin_array +INTERFACE read_bin_array + MODULE PROCEDURE read1d_real, read1d_int, read1d_char, read2d_real, read2d_int, read3d_real, read3d_int +END INTERFACE +contains +subroutine read1d_real(arr, unit, iostat, iomsg) + real(kind=WP), intent(inout), allocatable :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + read(unit, iostat=iostat, iomsg=iomsg) s1 + if (s1==0) return + allocate(arr(s1)) + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) +end subroutine read1d_real + +subroutine read1d_int(arr, unit, iostat, iomsg) + integer, intent(inout), allocatable :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + read(unit, iostat=iostat, iomsg=iomsg) s1 + if (s1==0) return + allocate(arr(s1)) + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) +end subroutine read1d_int + +subroutine read1d_char(arr, unit, iostat, iomsg) + character, intent(inout), allocatable :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + read(unit, iostat=iostat, iomsg=iomsg) s1 + if (s1==0) return + allocate(arr(s1)) + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) +end subroutine read1d_char + +subroutine read2d_real(arr, unit, iostat, iomsg) + real(kind=WP), intent(inout), allocatable :: arr(:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2 + + read(unit, iostat=iostat, iomsg=iomsg) s1, s2 + if ((s1==0) .or. (s2==0)) return + allocate(arr(s1, s2)) + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2) +end subroutine read2d_real + +subroutine read2d_int(arr, unit, iostat, iomsg) + integer, intent(inout), allocatable :: arr(:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2 + + read(unit, iostat=iostat, iomsg=iomsg) s1, s2 + if ((s1==0) .or. (s2==0)) return + allocate(arr(s1, s2)) + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2) +end subroutine read2d_int + +subroutine read3d_real(arr, unit, iostat, iomsg) + real(kind=WP), intent(inout), allocatable :: arr(:,:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2, s3 + + read(unit, iostat=iostat, iomsg=iomsg) s1, s2, s3 + if ((s1==0) .or. (s2==0) .or. (s3==0)) return + allocate(arr(s1,s2,s3)) + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2, 1:s3) +end subroutine read3d_real + +subroutine read3d_int(arr, unit, iostat, iomsg) + integer, intent(inout), allocatable :: arr(:,:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2, s3 + + read(unit, iostat=iostat, iomsg=iomsg) s1, s2, s3 + if ((s1==0) .or. (s2==0) .or. (s3==0)) return + allocate(arr(s1,s2,s3)) + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2, 1:s3) +end subroutine read3d_int +end module MOD_READ_BINARY_ARRAYYS +!========================================================== + diff --git a/src/MOD_WRITE_BINARY_ARRAYS.F90 b/src/MOD_WRITE_BINARY_ARRAYS.F90 new file mode 100644 index 000000000..ad6449f0b --- /dev/null +++ b/src/MOD_WRITE_BINARY_ARRAYS.F90 @@ -0,0 +1,147 @@ +!========================================================== +! +!------------------------------------------------------------------------------------------ +! useful interface (write_bin_array) for writing arbitary binary arrays into an opened file +MODULE MOD_WRITE_BINARY_ARRAYYS +use o_PARAM +private +public :: write_bin_array +INTERFACE write_bin_array + MODULE PROCEDURE write1d_real, write1d_int, write1d_char, write2d_real, write2d_int, write3d_real, write3d_int +END INTERFACE +contains + +subroutine write1d_real(arr, unit, iostat, iomsg) + real(kind=WP), intent(in), allocatable :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + if (allocated(arr)) then + s1=size(arr, 1) + write(unit, iostat=iostat, iomsg=iomsg) s1 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) + else + s1=0 + write(unit, iostat=iostat, iomsg=iomsg) s1 + end if +end subroutine write1d_real + +subroutine write1d_int(arr, unit, iostat, iomsg) + integer, intent(in), allocatable :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + if (allocated(arr)) then + s1=size(arr, 1) + write(unit, iostat=iostat, iomsg=iomsg) s1 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) + else + s1=0 + write(unit, iostat=iostat, iomsg=iomsg) s1 + end if +end subroutine write1d_int + +subroutine write1d_char(arr, unit, iostat, iomsg) + character, intent(in), allocatable :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + if (allocated(arr)) then + s1=size(arr, 1) + write(unit, iostat=iostat, iomsg=iomsg) s1 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) + else + s1=0 + write(unit, iostat=iostat, iomsg=iomsg) s1 + end if +end subroutine write1d_char + +subroutine write2d_real(arr, unit, iostat, iomsg) + real(kind=WP), intent(in), allocatable :: arr(:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2 + + if (allocated(arr)) then + s1=size(arr, 1) + s2=size(arr, 2) + write(unit, iostat=iostat, iomsg=iomsg) s1, s2 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2) + else + s1=0 + s2=0 + write(unit, iostat=iostat, iomsg=iomsg) s1, s2 + end if +end subroutine write2d_real + +subroutine write2d_int(arr, unit, iostat, iomsg) + integer, intent(in), allocatable :: arr(:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2 + + if (allocated(arr)) then + s1=size(arr, 1) + s2=size(arr, 2) + write(unit, iostat=iostat, iomsg=iomsg) s1, s2 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2) + else + s1=0 + s2=0 + write(unit, iostat=iostat, iomsg=iomsg) s1, s2 + end if +end subroutine write2d_int + + +subroutine write3d_real(arr, unit, iostat, iomsg) + real(kind=WP), intent(in), allocatable :: arr(:,:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2, s3 + + if (allocated(arr)) then + s1=size(arr, 1) + s2=size(arr, 2) + s3=size(arr, 3) + write(unit, iostat=iostat, iomsg=iomsg) s1, s2, s3 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2, 1:s3) + else + s1=0 + s2=0 + s3=0 + write(unit, iostat=iostat, iomsg=iomsg) s1, s2, s3 + end if +end subroutine write3d_real + +subroutine write3d_int(arr, unit, iostat, iomsg) + integer, intent(in), allocatable :: arr(:,:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2, s3 + + if (allocated(arr)) then + s1=size(arr, 1) + s2=size(arr, 2) + s3=size(arr, 3) + write(unit, iostat=iostat, iomsg=iomsg) s1, s2, s3 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2, 1:s3) + else + s1=0 + s2=0 + s3=0 + write(unit, iostat=iostat, iomsg=iomsg) s1, s2, s3 + end if +end subroutine write3d_int +end module MOD_WRITE_BINARY_ARRAYYS +!========================================================== + From fd7e91c3333dfa17eb388366d397b59b3f3b8224 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Thu, 16 Sep 2021 17:02:26 +0200 Subject: [PATCH 374/909] ../mesh_part/CMakeLists.txt fixed for missing files --- mesh_part/CMakeLists.txt | 2 +- src/MOD_MESH.F90 | 4 ++-- src/MOD_READ_BINARY_ARRAYS.F90 | 4 ++-- src/MOD_WRITE_BINARY_ARRAYS.F90 | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/mesh_part/CMakeLists.txt b/mesh_part/CMakeLists.txt index 81bbe0cc4..afe0c5e58 100644 --- a/mesh_part/CMakeLists.txt +++ b/mesh_part/CMakeLists.txt @@ -4,7 +4,7 @@ project(fesom_ini C Fortran) # get our source files set(src_home ${CMAKE_CURRENT_LIST_DIR}/../src) -set(sources_Fortran ${src_home}/MOD_MESH.F90 ${src_home}/oce_modules.F90 ${src_home}/gen_modules_config.F90 ${src_home}/gen_modules_partitioning.F90 ${src_home}/gen_modules_rotate_grid.F90 ${src_home}/fvom_init.F90 ${src_home}/oce_local.F90 ${src_home}/gen_comm.F90) +set(sources_Fortran ${src_home}/MOD_MESH.F90 ${src_home}/oce_modules.F90 ${src_home}/gen_modules_config.F90 ${src_home}/gen_modules_partitioning.F90 ${src_home}/gen_modules_rotate_grid.F90 ${src_home}/fvom_init.F90 ${src_home}/oce_local.F90 ${src_home}/gen_comm.F90 ${src_home}/MOD_READ_BINARY_ARRAYS.F90 ${src_home}/MOD_WRITE_BINARY_ARRAYS.F90) set(sources_C ${src_home}/fort_part.c) diff --git a/src/MOD_MESH.F90 b/src/MOD_MESH.F90 index e8059aca2..2cc979756 100644 --- a/src/MOD_MESH.F90 +++ b/src/MOD_MESH.F90 @@ -1,8 +1,8 @@ !========================================================== MODULE MOD_MESH USE O_PARAM -USE MOD_WRITE_BINARY_ARRAYYS -USE MOD_READ_BINARY_ARRAYYS +USE MOD_WRITE_BINARY_ARRAYS +USE MOD_READ_BINARY_ARRAYS USE, intrinsic :: ISO_FORTRAN_ENV IMPLICIT NONE SAVE diff --git a/src/MOD_READ_BINARY_ARRAYS.F90 b/src/MOD_READ_BINARY_ARRAYS.F90 index fa64e57d5..95f71c584 100644 --- a/src/MOD_READ_BINARY_ARRAYS.F90 +++ b/src/MOD_READ_BINARY_ARRAYS.F90 @@ -2,7 +2,7 @@ ! !------------------------------------------------------------------------------------------ ! useful interface (read_bin_array) for reading arbitary binary arrays into an opened file -MODULE MOD_READ_BINARY_ARRAYYS +MODULE MOD_READ_BINARY_ARRAYS use o_PARAM private public :: read_bin_array @@ -100,6 +100,6 @@ subroutine read3d_int(arr, unit, iostat, iomsg) allocate(arr(s1,s2,s3)) read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2, 1:s3) end subroutine read3d_int -end module MOD_READ_BINARY_ARRAYYS +end module MOD_READ_BINARY_ARRAYS !========================================================== diff --git a/src/MOD_WRITE_BINARY_ARRAYS.F90 b/src/MOD_WRITE_BINARY_ARRAYS.F90 index ad6449f0b..c76d39fb9 100644 --- a/src/MOD_WRITE_BINARY_ARRAYS.F90 +++ b/src/MOD_WRITE_BINARY_ARRAYS.F90 @@ -2,7 +2,7 @@ ! !------------------------------------------------------------------------------------------ ! useful interface (write_bin_array) for writing arbitary binary arrays into an opened file -MODULE MOD_WRITE_BINARY_ARRAYYS +MODULE MOD_WRITE_BINARY_ARRAYS use o_PARAM private public :: write_bin_array @@ -142,6 +142,6 @@ subroutine write3d_int(arr, unit, iostat, iomsg) write(unit, iostat=iostat, iomsg=iomsg) s1, s2, s3 end if end subroutine write3d_int -end module MOD_WRITE_BINARY_ARRAYYS +end module MOD_WRITE_BINARY_ARRAYS !========================================================== From 67dd2ec15ed778e287557db4bbee994ad3b4f86e Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Thu, 16 Sep 2021 17:19:23 +0200 Subject: [PATCH 375/909] Remove comments, they confuse gfortran (#164) * remove comments, they confuse gfortran * Update fesom2.1.yml * fix test_souf experiment * Update fesom2_icepack.yml --- .github/workflows/fesom2.1.yml | 2 +- .github/workflows/fesom2_icepack.yml | 2 +- config/namelist.tra | 7 +++---- setups/test_souf/setup.yml | 5 ++++- 4 files changed, 9 insertions(+), 7 deletions(-) diff --git a/.github/workflows/fesom2.1.yml b/.github/workflows/fesom2.1.yml index 4facc60cc..f5c5de5e7 100644 --- a/.github/workflows/fesom2.1.yml +++ b/.github/workflows/fesom2.1.yml @@ -12,7 +12,7 @@ jobs: # Containers must run in Linux based operating systems runs-on: ubuntu-latest # Docker Hub image that `container-job` executes in - container: koldunovn/fesom2_test:fesom2.1 + container: koldunovn/fesom2_test:f2.1_tracers # Service containers to run with `gfortran_ubuntu` steps: diff --git a/.github/workflows/fesom2_icepack.yml b/.github/workflows/fesom2_icepack.yml index aeb50481f..941e5a075 100644 --- a/.github/workflows/fesom2_icepack.yml +++ b/.github/workflows/fesom2_icepack.yml @@ -12,7 +12,7 @@ jobs: # Containers must run in Linux based operating systems runs-on: ubuntu-latest # Docker Hub image that `container-job` executes in - container: koldunovn/fesom2_test:fesom2.1 + container: koldunovn/fesom2_test:f2.1_tracers # Service containers to run with `gfortran_ubuntu` steps: diff --git a/config/namelist.tra b/config/namelist.tra index 81a1c3435..3a03b1262 100644 --- a/config/namelist.tra +++ b/config/namelist.tra @@ -3,11 +3,10 @@ num_tracers=100 !number of tracers to allocate. shallbe large or equal to the nu / &tracer_list -! ID | tra_adv_hor | tra_adv_ver | tra_adv_lim | tra_adv_ph | tra_adv_pv nml_tracer_list = -1 , 'MFCT', 'QR4C', 'FCT ', 1., 1., !ID=1=Temperature -2 , 'MFCT', 'QR4C', 'FCT ', 1., 1., !ID=2=Salinity -!101, 'UPW1', 'UPW1', 'NON ', 0., 0. !ID=X=Whatever +1 , 'MFCT', 'QR4C', 'FCT ', 1., 1., +2 , 'MFCT', 'QR4C', 'FCT ', 1., 1., +!101, 'UPW1', 'UPW1', 'NON ', 0., 0. / &tracer_init3d ! initial conditions for tracers diff --git a/setups/test_souf/setup.yml b/setups/test_souf/setup.yml index f076d102b..4c4bd67cd 100644 --- a/setups/test_souf/setup.yml +++ b/setups/test_souf/setup.yml @@ -42,12 +42,15 @@ namelist.oce: Fer_GM: False Redi: False mix_scheme: "PP" - oce_tra: + +namelist.tra: + tracer_phys: use_momix: False K_hor: 10 surf_relax_S: 0.0 balance_salt_water: False + namelist.ice: ice_dyn: whichEVP: 0 From f3594dfc391b88d64149cd6bcfa64e6ac436efbf Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Fri, 17 Sep 2021 16:15:48 +0200 Subject: [PATCH 376/909] update on tracer refactoring. t_tracer type contains data array (t_tracer%data(n), n=1, t_tracer%num_tracers) and the work array (t_tracer%work). A generic write(unformatted) has been hard coded into MOD_TRACER and MOD_MESH. It is now possible to dump these derived types to create dwarfs! --- mesh_part/CMakeLists.txt | 2 +- src/MOD_MESH.F90 | 16 +- src/MOD_READ_BINARY_ARRAYS.F90 | 4 +- src/MOD_TRACER.F90 | 191 +++++++++++++++++++++-- src/MOD_WRITE_BINARY_ARRAYS.F90 | 4 +- src/associate_mesh.h | 7 +- src/cavity_param.F90 | 14 +- src/fvom_main.F90 | 44 +++++- src/gen_forcing_couple.F90 | 8 +- src/gen_ic3d.F90 | 46 +++--- src/gen_modules_cvmix_kpp.F90 | 6 +- src/gen_modules_diag.F90 | 75 ++++----- src/ice_oce_coupling.F90 | 16 +- src/ice_setup_step.F90 | 11 +- src/icepack_drivers/icedrv_advection.F90 | 9 +- src/icepack_drivers/icedrv_init.F90 | 10 +- src/icepack_drivers/icedrv_main.F90 | 4 +- src/io_blowup.F90 | 10 +- src/io_meandata.F90 | 26 +-- src/io_restart.F90 | 10 +- src/oce_adv_tra_driver.F90 | 115 +++++++------- src/oce_adv_tra_fct.F90 | 58 ++++--- src/oce_adv_tra_hor.F90 | 19 ++- src/oce_adv_tra_ver.F90 | 20 +-- src/oce_ale.F90 | 8 +- src/oce_ale_mixing_kpp.F90 | 21 +-- src/oce_ale_pressure_bv.F90 | 54 +++---- src/oce_ale_tracer.F90 | 187 ++++++++++++---------- src/oce_muscl_adv.F90 | 120 +++++++------- src/oce_setup_step.F90 | 132 ++++++++-------- src/oce_tracer_mod.F90 | 33 ++-- src/toy_channel_soufflet.F90 | 44 +++--- src/write_step_info.F90 | 48 +++--- 33 files changed, 811 insertions(+), 561 deletions(-) diff --git a/mesh_part/CMakeLists.txt b/mesh_part/CMakeLists.txt index 81bbe0cc4..afe0c5e58 100644 --- a/mesh_part/CMakeLists.txt +++ b/mesh_part/CMakeLists.txt @@ -4,7 +4,7 @@ project(fesom_ini C Fortran) # get our source files set(src_home ${CMAKE_CURRENT_LIST_DIR}/../src) -set(sources_Fortran ${src_home}/MOD_MESH.F90 ${src_home}/oce_modules.F90 ${src_home}/gen_modules_config.F90 ${src_home}/gen_modules_partitioning.F90 ${src_home}/gen_modules_rotate_grid.F90 ${src_home}/fvom_init.F90 ${src_home}/oce_local.F90 ${src_home}/gen_comm.F90) +set(sources_Fortran ${src_home}/MOD_MESH.F90 ${src_home}/oce_modules.F90 ${src_home}/gen_modules_config.F90 ${src_home}/gen_modules_partitioning.F90 ${src_home}/gen_modules_rotate_grid.F90 ${src_home}/fvom_init.F90 ${src_home}/oce_local.F90 ${src_home}/gen_comm.F90 ${src_home}/MOD_READ_BINARY_ARRAYS.F90 ${src_home}/MOD_WRITE_BINARY_ARRAYS.F90) set(sources_C ${src_home}/fort_part.c) diff --git a/src/MOD_MESH.F90 b/src/MOD_MESH.F90 index e8059aca2..9946013c5 100644 --- a/src/MOD_MESH.F90 +++ b/src/MOD_MESH.F90 @@ -1,8 +1,8 @@ !========================================================== MODULE MOD_MESH USE O_PARAM -USE MOD_WRITE_BINARY_ARRAYYS -USE MOD_READ_BINARY_ARRAYYS +USE MOD_WRITE_BINARY_ARRAYS +USE MOD_READ_BINARY_ARRAYS USE, intrinsic :: ISO_FORTRAN_ENV IMPLICIT NONE SAVE @@ -91,7 +91,11 @@ MODULE MOD_MESH !#if defined (__oasis) real(kind=WP), allocatable, dimension(:) :: lump2d_south, lump2d_north integer, allocatable, dimension(:) :: ind_south, ind_north -!#endif +!#endif + +integer :: nn_size +integer, allocatable, dimension(:) :: nn_num +integer, allocatable, dimension(:,:) :: nn_pos character(:), allocatable :: representative_checksum @@ -174,6 +178,9 @@ subroutine write_t_mesh(mesh, unit, iostat, iomsg) call write_bin_array(mesh%lump2d_north, unit, iostat, iomsg) call write_bin_array(mesh%ind_south, unit, iostat, iomsg) call write_bin_array(mesh%ind_north, unit, iostat, iomsg) + write(unit, iostat=iostat, iomsg=iomsg) mesh%nn_size + call write_bin_array(mesh%nn_num, unit, iostat, iomsg) + call write_bin_array(mesh%nn_pos, unit, iostat, iomsg) ! call write_bin_array(mesh%representative_checksum, unit, iostat, iomsg) end subroutine write_t_mesh @@ -248,6 +255,9 @@ subroutine read_t_mesh(mesh, unit, iostat, iomsg) call read_bin_array(mesh%lump2d_north, unit, iostat, iomsg) call read_bin_array(mesh%ind_south, unit, iostat, iomsg) call read_bin_array(mesh%ind_north, unit, iostat, iomsg) + read(unit, iostat=iostat, iomsg=iomsg) mesh%nn_size + call read_bin_array(mesh%nn_num, unit, iostat, iomsg) + call read_bin_array(mesh%nn_pos, unit, iostat, iomsg) ! call read_bin_array(mesh%representative_checksum, unit, iostat, iomsg) end subroutine read_t_mesh end module MOD_MESH diff --git a/src/MOD_READ_BINARY_ARRAYS.F90 b/src/MOD_READ_BINARY_ARRAYS.F90 index fa64e57d5..95f71c584 100644 --- a/src/MOD_READ_BINARY_ARRAYS.F90 +++ b/src/MOD_READ_BINARY_ARRAYS.F90 @@ -2,7 +2,7 @@ ! !------------------------------------------------------------------------------------------ ! useful interface (read_bin_array) for reading arbitary binary arrays into an opened file -MODULE MOD_READ_BINARY_ARRAYYS +MODULE MOD_READ_BINARY_ARRAYS use o_PARAM private public :: read_bin_array @@ -100,6 +100,6 @@ subroutine read3d_int(arr, unit, iostat, iomsg) allocate(arr(s1,s2,s3)) read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2, 1:s3) end subroutine read3d_int -end module MOD_READ_BINARY_ARRAYYS +end module MOD_READ_BINARY_ARRAYS !========================================================== diff --git a/src/MOD_TRACER.F90 b/src/MOD_TRACER.F90 index b72af3ba7..cfd439ea9 100644 --- a/src/MOD_TRACER.F90 +++ b/src/MOD_TRACER.F90 @@ -2,10 +2,12 @@ MODULE MOD_TRACER USE O_PARAM USE, intrinsic :: ISO_FORTRAN_ENV +USE MOD_WRITE_BINARY_ARRAYS +USE MOD_READ_BINARY_ARRAYS IMPLICIT NONE SAVE -TYPE T_TRACER +TYPE T_TRACER_DATA real(kind=WP), allocatable, dimension(:,:) :: values, valuesAB ! instant values & Adams-Bashfort interpolation logical :: smooth_bh_tra=.false. real(kind=WP) :: gamma0_tra, gamma1_tra, gamma2_tra @@ -14,17 +16,16 @@ MODULE MOD_TRACER real(kind=WP) :: tra_adv_ph = 1. ! a parameter to be used in horizontal advection (for MUSCL it is the fraction of fourth-order contribution in the solution) real(kind=WP) :: tra_adv_pv = 1. ! a parameter to be used in horizontal advection (for QR4C it is the fraction of fourth-order contribution in the solution) integer :: ID -END TYPE T_TRACER -integer :: num_tracers=2 -! general options for all tracers (can be moved to T_TRACER is needed) -! bharmonic diffusion for tracers. We recommend to use this option in very high resolution runs (Redi is generally off there). -logical :: smooth_bh_tra = .false. -real(kind=WP) :: gamma0_tra = 0.0005 -real(kind=WP) :: gamma1_tra = 0.0125 -real(kind=WP) :: gamma2_tra = 0. -logical :: i_vert_diff = .true. +contains + procedure WRITE_T_TRACER_DATA + procedure READ_T_TRACER_DATA + generic :: write(unformatted) => WRITE_T_TRACER_DATA + generic :: read(unformatted) => READ_T_TRACER_DATA +END TYPE T_TRACER_DATA + +TYPE T_TRACER_WORK !auxuary arrays to work with tracers: real(kind=WP), allocatable :: del_ttf(:,:) real(kind=WP), allocatable :: del_ttf_advhoriz(:,:),del_ttf_advvert(:,:) @@ -39,21 +40,183 @@ MODULE MOD_TRACER real(kind=WP),allocatable,dimension(:,:) :: fct_ttf_max,fct_ttf_min real(kind=WP),allocatable,dimension(:,:) :: fct_plus,fct_minus ! MUSCL type reconstruction -integer,allocatable,dimension(:) :: nn_num, nboundary_lay -integer,allocatable,dimension(:,:) :: nn_pos +integer,allocatable,dimension(:) :: nboundary_lay integer,allocatable,dimension(:,:) :: edge_up_dn_tri real(kind=WP),allocatable,dimension(:,:,:) :: edge_up_dn_grad +contains + procedure WRITE_T_TRACER_WORK + procedure READ_T_TRACER_WORK + generic :: write(unformatted) => WRITE_T_TRACER_WORK + generic :: read(unformatted) => READ_T_TRACER_WORK +END TYPE T_TRACER_WORK + ! auxury type for reading namelist.tra -type nml_tracer_list_type +TYPE NML_TRACER_LIST_TYPE INTEGER :: ID =-1 CHARACTER(len=4) :: adv_hor ='NONE' CHARACTER(len=4) :: adv_ver ='NONE' CHARACTER(len=4) :: adv_lim ='NONE' REAL(kind=WP) :: adv_ph =1. REAL(kind=WP) :: adv_pv =1. -end type +END TYPE NML_TRACER_LIST_TYPE + +TYPE T_TRACER +! total number of tracers: +integer :: num_tracers=2 +type(t_tracer_data), allocatable :: data(:) +type(t_tracer_work) :: work +! general options for all tracers (can be moved to T_TRACER is needed) +! bharmonic diffusion for tracers. We recommend to use this option in very high resolution runs (Redi is generally off there). +logical :: smooth_bh_tra = .false. +real(kind=WP) :: gamma0_tra = 0.0005 +real(kind=WP) :: gamma1_tra = 0.0125 +real(kind=WP) :: gamma2_tra = 0. +logical :: i_vert_diff = .true. + +contains +procedure WRITE_T_TRACER +procedure READ_T_TRACER +generic :: write(unformatted) => WRITE_T_TRACER +generic :: read(unformatted) => READ_T_TRACER +END TYPE T_TRACER + +contains + +! Unformatted writing for T_TRACER_DATA +subroutine WRITE_T_TRACER_DATA(tdata, unit, iostat, iomsg) + class(T_TRACER_DATA), intent(in) :: tdata + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + call write_bin_array(tdata%values, unit, iostat, iomsg) + call write_bin_array(tdata%valuesAB, unit, iostat, iomsg) + write(unit, iostat=iostat, iomsg=iomsg) tdata%smooth_bh_tra + write(unit, iostat=iostat, iomsg=iomsg) tdata%gamma0_tra + write(unit, iostat=iostat, iomsg=iomsg) tdata%gamma1_tra + write(unit, iostat=iostat, iomsg=iomsg) tdata%gamma2_tra + write(unit, iostat=iostat, iomsg=iomsg) tdata%i_vert_diff + write(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_hor + write(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_ver + write(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_lim + write(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_ph + write(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_pv + write(unit, iostat=iostat, iomsg=iomsg) tdata%ID +end subroutine WRITE_T_TRACER_DATA + +! Unformatted reading for T_TRACER_DATA +subroutine READ_T_TRACER_DATA(tdata, unit, iostat, iomsg) + class(T_TRACER_DATA), intent(inout) :: tdata + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + call read_bin_array(tdata%values, unit, iostat, iomsg) + call read_bin_array(tdata%valuesAB, unit, iostat, iomsg) + read(unit, iostat=iostat, iomsg=iomsg) tdata%smooth_bh_tra + read(unit, iostat=iostat, iomsg=iomsg) tdata%gamma0_tra + read(unit, iostat=iostat, iomsg=iomsg) tdata%gamma1_tra + read(unit, iostat=iostat, iomsg=iomsg) tdata%gamma2_tra + read(unit, iostat=iostat, iomsg=iomsg) tdata%i_vert_diff + read(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_hor + read(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_ver + read(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_lim + read(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_ph + read(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_pv + read(unit, iostat=iostat, iomsg=iomsg) tdata%ID +end subroutine READ_T_TRACER_DATA + +! Unformatted writing for T_TRACER_WORK +subroutine WRITE_T_TRACER_WORK(twork, unit, iostat, iomsg) + class(T_TRACER_WORK), intent(in) :: twork + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + call write_bin_array(twork%del_ttf, unit, iostat, iomsg) + call write_bin_array(twork%del_ttf_advhoriz, unit, iostat, iomsg) + call write_bin_array(twork%del_ttf_advvert, unit, iostat, iomsg) + call write_bin_array(twork%tr_dvd_horiz, unit, iostat, iomsg) + call write_bin_array(twork%tr_dvd_vert, unit, iostat, iomsg) + call write_bin_array(twork%fct_LO, unit, iostat, iomsg) + call write_bin_array(twork%adv_flux_hor, unit, iostat, iomsg) + call write_bin_array(twork%adv_flux_ver, unit, iostat, iomsg) + call write_bin_array(twork%fct_ttf_max, unit, iostat, iomsg) + call write_bin_array(twork%fct_ttf_min, unit, iostat, iomsg) + call write_bin_array(twork%fct_plus, unit, iostat, iomsg) + call write_bin_array(twork%fct_minus, unit, iostat, iomsg) + call write_bin_array(twork%nboundary_lay, unit, iostat, iomsg) + call write_bin_array(twork%edge_up_dn_tri, unit, iostat, iomsg) + call write_bin_array(twork%edge_up_dn_grad, unit, iostat, iomsg) +end subroutine WRITE_T_TRACER_WORK + +! Unformatted reading for T_TRACER_WORK +subroutine READ_T_TRACER_WORK(twork, unit, iostat, iomsg) + class(T_TRACER_WORK), intent(inout) :: twork + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + call read_bin_array(twork%del_ttf, unit, iostat, iomsg) + call read_bin_array(twork%del_ttf_advhoriz, unit, iostat, iomsg) + call read_bin_array(twork%del_ttf_advvert, unit, iostat, iomsg) + call read_bin_array(twork%tr_dvd_horiz, unit, iostat, iomsg) + call read_bin_array(twork%tr_dvd_vert, unit, iostat, iomsg) + call read_bin_array(twork%fct_LO, unit, iostat, iomsg) + call read_bin_array(twork%adv_flux_hor, unit, iostat, iomsg) + call read_bin_array(twork%adv_flux_ver, unit, iostat, iomsg) + call read_bin_array(twork%fct_ttf_max, unit, iostat, iomsg) + call read_bin_array(twork%fct_ttf_min, unit, iostat, iomsg) + call read_bin_array(twork%fct_plus, unit, iostat, iomsg) + call read_bin_array(twork%fct_minus, unit, iostat, iomsg) + call read_bin_array(twork%nboundary_lay, unit, iostat, iomsg) + call read_bin_array(twork%edge_up_dn_tri, unit, iostat, iomsg) + call read_bin_array(twork%edge_up_dn_grad, unit, iostat, iomsg) +end subroutine READ_T_TRACER_WORK + +! Unformatted writing for T_TRACER +subroutine WRITE_T_TRACER(tracer, unit, iostat, iomsg) + class(T_TRACER), intent(in) :: tracer + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: i + + write(unit, iostat=iostat, iomsg=iomsg) tracer%num_tracers + do i=1, tracer%num_tracers + write(unit, iostat=iostat, iomsg=iomsg) tracer%data(i) + end do + write(unit, iostat=iostat, iomsg=iomsg) tracer%work + write(unit, iostat=iostat, iomsg=iomsg) tracer%smooth_bh_tra + write(unit, iostat=iostat, iomsg=iomsg) tracer%gamma0_tra + write(unit, iostat=iostat, iomsg=iomsg) tracer%gamma1_tra + write(unit, iostat=iostat, iomsg=iomsg) tracer%gamma2_tra + write(unit, iostat=iostat, iomsg=iomsg) tracer%i_vert_diff +end subroutine WRITE_T_TRACER + +! Unformatted reading for T_TRACER +subroutine READ_T_TRACER(tracer, unit, iostat, iomsg) + class(T_TRACER), intent(inout) :: tracer + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: i + read(unit, iostat=iostat, iomsg=iomsg) tracer%num_tracers + write(*,*) 'number of tracers to read: ', tracer%num_tracers + allocate(tracer%data(tracer%num_tracers)) + do i=1, tracer%num_tracers + read(unit, iostat=iostat, iomsg=iomsg) tracer%data(i) + write(*,*) 'tracer info:', tracer%data(i)%ID, TRIM(tracer%data(i)%tra_adv_hor), TRIM(tracer%data(i)%tra_adv_ver), TRIM(tracer%data(i)%tra_adv_lim) + end do + read(unit, iostat=iostat, iomsg=iomsg) tracer%work + read(unit, iostat=iostat, iomsg=iomsg) tracer%smooth_bh_tra + read(unit, iostat=iostat, iomsg=iomsg) tracer%gamma0_tra + read(unit, iostat=iostat, iomsg=iomsg) tracer%gamma1_tra + read(unit, iostat=iostat, iomsg=iomsg) tracer%gamma2_tra + read(unit, iostat=iostat, iomsg=iomsg) tracer%i_vert_diff +end subroutine READ_T_TRACER end module MOD_TRACER !========================================================== diff --git a/src/MOD_WRITE_BINARY_ARRAYS.F90 b/src/MOD_WRITE_BINARY_ARRAYS.F90 index ad6449f0b..c76d39fb9 100644 --- a/src/MOD_WRITE_BINARY_ARRAYS.F90 +++ b/src/MOD_WRITE_BINARY_ARRAYS.F90 @@ -2,7 +2,7 @@ ! !------------------------------------------------------------------------------------------ ! useful interface (write_bin_array) for writing arbitary binary arrays into an opened file -MODULE MOD_WRITE_BINARY_ARRAYYS +MODULE MOD_WRITE_BINARY_ARRAYS use o_PARAM private public :: write_bin_array @@ -142,6 +142,6 @@ subroutine write3d_int(arr, unit, iostat, iomsg) write(unit, iostat=iostat, iomsg=iomsg) s1, s2, s3 end if end subroutine write3d_int -end module MOD_WRITE_BINARY_ARRAYYS +end module MOD_WRITE_BINARY_ARRAYS !========================================================== diff --git a/src/associate_mesh.h b/src/associate_mesh.h index 26c197ec6..233036c96 100644 --- a/src/associate_mesh.h +++ b/src/associate_mesh.h @@ -5,6 +5,7 @@ integer , pointer :: edge2D_in real(kind=WP) , pointer :: ocean_area real(kind=WP) , pointer :: ocean_areawithcav integer , pointer :: nl +integer , pointer :: nn_size real(kind=WP), dimension(:,:), pointer :: coord_nod2D, geo_coord_nod2D integer, dimension(:,:) , pointer :: elem2D_nodes integer, dimension(:,:) , pointer :: edges @@ -31,6 +32,8 @@ type(sparse_matrix) , pointer :: ssh_stiff integer, dimension(:) , pointer :: cavity_flag_n, cavity_flag_e real(kind=WP), dimension(:) , pointer :: cavity_depth integer, dimension(:) , pointer :: ulevels, ulevels_nod2D, ulevels_nod2D_max +integer, dimension(:) , pointer :: nn_num +integer, dimension(:,:), pointer :: nn_pos nod2D => mesh%nod2D elem2D => mesh%elem2D @@ -39,7 +42,7 @@ edge2D_in => mesh%edge2D_in ocean_area => mesh%ocean_area ocean_areawithcav => mesh%ocean_areawithcav nl => mesh%nl - +nn_size => mesh%nn_size !!$coord_nod2D => mesh%coord_nod2D !!$geo_coord_nod2D => mesh%geo_coord_nod2D !!$elem2D_nodes => mesh%elem2D_nodes @@ -122,3 +125,5 @@ cavity_depth(1:myDim_nod2D+eDim_nod2D) => mesh%cavity_depth ulevels(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%ulevels ulevels_nod2D(1:myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D ulevels_nod2D_max(1:myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D_max +nn_num(1:myDim_nod2D) => mesh%nn_num +nn_pos(1:mesh%nn_size, 1:myDim_nod2D) => mesh%nn_pos diff --git a/src/cavity_param.F90 b/src/cavity_param.F90 index a5503a4c2..51dba031b 100644 --- a/src/cavity_param.F90 +++ b/src/cavity_param.F90 @@ -4,7 +4,7 @@ subroutine cavity_heat_water_fluxes_3eq(tracers, mesh) use mod_mesh use mod_tracer type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers end subroutine end interface end module @@ -141,7 +141,7 @@ subroutine cavity_heat_water_fluxes_3eq(tracers, mesh) !___________________________________________________________________________ type(t_mesh), intent(inout), target :: mesh - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers real (kind=WP) :: temp,sal,tin,zice real (kind=WP) :: rhow, rhor, rho real (kind=WP) :: gats1, gats2, gas, gat @@ -189,8 +189,8 @@ subroutine cavity_heat_water_fluxes_3eq(tracers, mesh) if(nzmin==1) cycle ! if no cavity skip that node !_______________________________________________________________________ - temp = tracers(1)%values(nzmin,node) - sal = tracers(2)%values(nzmin,node) + temp = tracers%data(1)%values(nzmin,node) + sal = tracers%data(2)%values(nzmin,node) zice = Z_3d_n(nzmin, node) !(<0) !_______________________________________________________________________ @@ -327,7 +327,7 @@ subroutine cavity_heat_water_fluxes_2eq(tracers, mesh) implicit none type(t_mesh), intent(inout) , target :: mesh - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers integer :: node, nzmin real(kind=WP) :: gama, L, aux real(kind=WP) :: c2, c3, c4, c5, c6 @@ -350,8 +350,8 @@ subroutine cavity_heat_water_fluxes_2eq(tracers, mesh) do node=1,myDim_nod2D nzmin = ulevels_nod2D(node) if(nzmin==1) cycle - t_i = tracers(1)%values(nzmin,node) - s_i = tracers(2)%values(nzmin,node) + t_i = tracers%data(1)%values(nzmin,node) + s_i = tracers%data(2)%values(nzmin,node) t_fz = c3*(s_i**(3./2.)) + c4*(s_i**2) + c5*s_i + c6*abs(Z_3d_n(nzmin,node)) heat_flux(node)=vcpw*gama*(t_i - t_fz) ! Hunter2006 used cpw=3974J/Kg (*rhowat) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index c7ac60153..a0ead3093 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -52,8 +52,14 @@ program main real(kind=real32) :: mean_rtime(15), max_rtime(15), min_rtime(15) real(kind=real32) :: runtime_alltimesteps + type(t_mesh), target, save :: mesh -type(t_tracer), allocatable, target, save :: tracers(:) +type(t_tracer), target, save :: tracers + + +character(LEN=256) :: dump_filename +type(t_mesh), target, save :: mesh_copy +type(t_tracer), target, save :: tracers_copy character(LEN=MPI_MAX_LIBRARY_VERSION_STRING) :: mpi_version_txt integer mpi_version_len @@ -105,15 +111,16 @@ program main call check_mesh_consistency(mesh) if (mype==0) t2=MPI_Wtime() - call tracer_init(tracers, mesh) ! allocate array of ocean tracers (derived type "t_tracer") - call arrays_init(mesh) ! allocate other arrays (to be refactured same as tracers in the future) - call ocean_setup(tracers, mesh) ! + call tracer_init(tracers, mesh) ! allocate array of ocean tracers (derived type "t_tracer") + call arrays_init(tracers%num_tracers, mesh) ! allocate other arrays (to be refactured same as tracers in the future) + call ocean_setup(tracers, mesh) if (mype==0) then write(*,*) 'FESOM ocean_setup... complete' t3=MPI_Wtime() endif call forcing_setup(mesh) + if (mype==0) t4=MPI_Wtime() if (use_ice) then call ice_setup(tracers, mesh) @@ -135,10 +142,9 @@ program main if (mype==0) write(*,*) 'Icepack: reading namelists from namelist.icepack' call set_icepack call alloc_icepack - call init_icepack(tracers(1), mesh) + call init_icepack(tracers%data(1), mesh) if (mype==0) write(*,*) 'Icepack: setup complete' #endif - call clock_newyear ! check if it is a new year if (mype==0) t6=MPI_Wtime() !___CREATE NEW RESTART FILE IF APPLICABLE___________________________________ @@ -150,7 +156,6 @@ program main ! as an example, for reading restart one does: call restart(0, .false., .false., .true., tracers, mesh) call restart(0, .false., r_restart, tracers, mesh) ! istep, l_write, l_read if (mype==0) t7=MPI_Wtime() - ! store grid information into netcdf file if (.not. r_restart) call write_mesh_info(mesh) @@ -159,7 +164,6 @@ program main if (r_restart) then call restart_thickness_ale(mesh) end if - if (mype==0) then t8=MPI_Wtime() @@ -182,6 +186,30 @@ program main write(*,*) '============================================' endif + + write (dump_filename, "(A7,I7.7)") "t_mesh.", mype + open (mype+300, file=trim(dump_filename), status='replace', form="unformatted") + write (mype+300) mesh + close (mype+300) + + open (mype+300, file=trim(dump_filename), status='old', form="unformatted") + read (mype+300) mesh_copy + close (mype+300) + + write (dump_filename, "(A9,I7.7)") "t_tracer.", mype + open (mype+300, file=trim(dump_filename), status='replace', form="unformatted") + write (mype+300) tracers + close (mype+300) + + open (mype+300, file=trim(dump_filename), status='old', form="unformatted") + read (mype+300) tracers_copy + close (mype+300) + +call par_ex +stop +! +! if (mype==10) write(,) mesh1%ssh_stiff%values-mesh%ssh_stiff%value + !===================== ! Time stepping !===================== diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index a508b7c25..1817052fd 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -43,7 +43,7 @@ subroutine update_atm_forcing(istep, tracers, mesh) use mod_mesh use mod_tracer integer, intent(in) :: istep - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers type(t_mesh), intent(in), target :: mesh end subroutine end interface @@ -76,7 +76,7 @@ subroutine update_atm_forcing(istep, tracers, mesh) implicit none type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers integer :: i, istep,itime,n2,n,nz,k,elem real(kind=WP) :: i_coef, aux real(kind=WP) :: dux, dvy,tx,ty,tvol @@ -114,7 +114,7 @@ subroutine update_atm_forcing(istep, tracers, mesh) #if defined (__oifs) ! AWI-CM3 outgoing state vectors do n=1,myDim_nod2D+eDim_nod2D - exchange(n)=tracers(1)%values(1, n)+tmelt ! sea surface temperature [K] + exchange(n)=tracers%data(1)%values(1, n)+tmelt ! sea surface temperature [K] end do elseif (i.eq.2) then exchange(:) = a_ice(:) ! ice concentation [%] @@ -129,7 +129,7 @@ subroutine update_atm_forcing(istep, tracers, mesh) #else ! AWI-CM2 outgoing state vectors do n=1,myDim_nod2D+eDim_nod2D - exchange(n)=tracers(1)%values(1, n) ! sea surface temperature [°C] + exchange(n)=tracers%data(1)%values(1, n) ! sea surface temperature [°C] end do elseif (i.eq.2) then exchange(:) = m_ice(:) ! ice thickness [m] diff --git a/src/gen_ic3d.F90 b/src/gen_ic3d.F90 index b9df852a8..dc1fd43c3 100644 --- a/src/gen_ic3d.F90 +++ b/src/gen_ic3d.F90 @@ -324,13 +324,13 @@ SUBROUTINE getcoeffld(tracers, mesh) integer :: ierror ! return error code type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(inout), target :: tracers(:) + type(t_tracer), intent(inout), target :: tracers #include "associate_mesh.h" ALLOCATE(ncdata(nc_Nlon,nc_Nlat,nc_Ndepth), data1d(nc_Ndepth)) ncdata=0.0_WP data1d=0.0_WP - tracers(current_tracer)%values(:,:)=dummy + tracers%data(current_tracer)%values(:,:)=dummy !open NETCDF file on 0 core if (mype==0) then iost = nf_open(filename,NF_NOWRITE,ncid) @@ -424,11 +424,11 @@ SUBROUTINE getcoeffld(tracers, mesh) cf_a = (d2 - d1)/ delta_d ! value of interpolated OB data on Z from model cf_b = d1 - cf_a * nc_depth(d_indx) - !!PS tracers(current_tracer)%values(k,ii) = -cf_a * Z_3d_n(k,ii) + cf_b - tracers(current_tracer)%values(k,ii) = -cf_a * aux_z + cf_b + !!PS tracers%data(current_tracer)%values(k,ii) = -cf_a * Z_3d_n(k,ii) + cf_b + tracers%data(current_tracer)%values(k,ii) = -cf_a * aux_z + cf_b end if elseif (d_indx==0) then - tracers(current_tracer)%values(k,ii)=data1d(1) + tracers%data(current_tracer)%values(k,ii)=data1d(1) end if enddo !___________________________________________________________________ @@ -448,10 +448,10 @@ SUBROUTINE getcoeffld(tracers, mesh) cf_a = (d2 - d1)/ delta_d ! value of interpolated OB data on Z from model cf_b = d1 - cf_a * nc_depth(d_indx) - tracers(current_tracer)%values(k,ii) = -cf_a * Z_3d_n(k,ii) + cf_b + tracers%data(current_tracer)%values(k,ii) = -cf_a * Z_3d_n(k,ii) + cf_b end if elseif (d_indx==0) then - tracers(current_tracer)%values(k,ii)=data1d(1) + tracers%data(current_tracer)%values(k,ii)=data1d(1) end if enddo end if ! --> if (use_cavity) then @@ -476,7 +476,7 @@ SUBROUTINE do_ic3d(tracers, mesh) real(kind=WP) :: locTmax, locTmin, locSmax, locSmin, glo type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(inout), target :: tracers(:) + type(t_tracer), intent(inout), target :: tracers if (mype==0) write(*,*) "Start: Initial conditions for tracers" @@ -484,16 +484,16 @@ SUBROUTINE do_ic3d(tracers, mesh) DO n=1, n_ic3d filename=trim(ClimateDataPath)//trim(filelist(n)) varname =trim(varlist(n)) - DO current_tracer=1, num_tracers - if (tracers(current_tracer)%ID==idlist(n)) then + DO current_tracer=1, tracers%num_tracers + if (tracers%data(current_tracer)%ID==idlist(n)) then ! read initial conditions for current tracer call nc_ic3d_ini(mesh) ! get first coeficients for time inerpolation on model grid for all datas call getcoeffld(tracers, mesh) call nc_end ! deallocate arrqays associated with netcdf file - call extrap_nod(tracers(current_tracer)%values(:,:), mesh) + call extrap_nod(tracers%data(current_tracer)%values(:,:), mesh) exit - elseif (current_tracer==num_tracers) then + elseif (current_tracer==tracers%num_tracers) then if (mype==0) write(*,*) "idlist contains tracer which is not listed in tracer_id!" if (mype==0) write(*,*) "check your namelists!" call par_ex @@ -503,11 +503,11 @@ SUBROUTINE do_ic3d(tracers, mesh) END DO DEALLOCATE(bilin_indx_i, bilin_indx_j) - do current_tracer=1, num_tracers + do current_tracer=1, tracers%num_tracers !_________________________________________________________________________ ! set remaining dummy values from bottom topography to 0.0_WP - where (tracers(current_tracer)%values > 0.9_WP*dummy) - tracers(current_tracer)%values=0.0_WP + where (tracers%data(current_tracer)%values > 0.9_WP*dummy) + tracers%data(current_tracer)%values=0.0_WP end where !_________________________________________________________________________ @@ -515,15 +515,15 @@ SUBROUTINE do_ic3d(tracers, mesh) ! initialisation do n=1,myDim_nod2d + eDim_nod2D ! ensure cavity is zero - if (use_cavity) tracers(current_tracer)%values(1:mesh%ulevels_nod2D(n)-1,n)=0.0_WP + if (use_cavity) tracers%data(current_tracer)%values(1:mesh%ulevels_nod2D(n)-1,n)=0.0_WP ! ensure bottom is zero - tracers(current_tracer)%values(mesh%nlevels_nod2D(n):mesh%nl-1,n)=0.0_WP + tracers%data(current_tracer)%values(mesh%nlevels_nod2D(n):mesh%nl-1,n)=0.0_WP end do end do !_________________________________________________________________________ ! convert temperature from Kelvin --> °C - where (tracers(1)%values(:,:) > 100._WP) - tracers(1)%values(:,:) = tracers(1)%values(:,:)-273.15_WP + where (tracers%data(1)%values(:,:) > 100._WP) + tracers%data(1)%values(:,:) = tracers%data(1)%values(:,:)-273.15_WP end where !_________________________________________________________________________ @@ -539,10 +539,10 @@ SUBROUTINE do_ic3d(tracers, mesh) locSmax = locTmax locSmin = locTmin do n=1,myDim_nod2d - locTmax = max(locTmax,maxval(tracers(1)%values(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n)) ) - locTmin = min(locTmin,minval(tracers(1)%values(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n)) ) - locSmax = max(locSmax,maxval(tracers(2)%values(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n)) ) - locSmin = min(locSmin,minval(tracers(2)%values(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n)) ) + locTmax = max(locTmax,maxval(tracers%data(1)%values(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n)) ) + locTmin = min(locTmin,minval(tracers%data(1)%values(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n)) ) + locSmax = max(locSmax,maxval(tracers%data(2)%values(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n)) ) + locSmin = min(locSmin,minval(tracers%data(2)%values(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n)) ) end do call MPI_AllREDUCE(locTmax , glo , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) if (mype==0) write(*,*) ' |-> gobal max init. temp. =', glo diff --git a/src/gen_modules_cvmix_kpp.F90 b/src/gen_modules_cvmix_kpp.F90 index 41da7e75e..69dba70b0 100644 --- a/src/gen_modules_cvmix_kpp.F90 +++ b/src/gen_modules_cvmix_kpp.F90 @@ -344,7 +344,7 @@ end subroutine init_cvmix_kpp ! calculate PP vertrical mixing coefficients from CVMIX library subroutine calc_cvmix_kpp(tracers, mesh) type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers integer :: node, elem, nz, nln, nun, elnodes(3), aux_nz real(kind=WP) :: vshear2, dz2, aux, aux_wm(mesh%nl), aux_ws(mesh%nl) real(kind=WP) :: aux_coeff, sigma, stable @@ -356,8 +356,8 @@ subroutine calc_cvmix_kpp(tracers, mesh) real(kind=WP) :: sfc_rhopot, sfc_bulk_0, sfc_bulk_pz, sfc_bulk_pz2 real(kind=WP), dimension(:,:), pointer :: temp, salt #include "associate_mesh.h" - temp=>tracers(1)%values(:,:) - salt=>tracers(2)%values(:,:) + temp=>tracers%data(1)%values(:,:) + salt=>tracers%data(2)%values(:,:) !_______________________________________________________________________ kpp_Av = 0.0_WP kpp_Kv = 0.0_WP diff --git a/src/gen_modules_diag.F90 b/src/gen_modules_diag.F90 index 4d1219c0e..a79dcf106 100755 --- a/src/gen_modules_diag.F90 +++ b/src/gen_modules_diag.F90 @@ -389,7 +389,7 @@ subroutine diag_densMOC(mode, tracers, mesh) implicit none integer, intent(in) :: mode type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers integer :: nz, snz, elem, nzmax, nzmin, elnodes(3), is, ie, pos integer :: e, edge, enodes(2), eelems(2) real(kind=WP) :: div, deltaX, deltaY, locz @@ -402,8 +402,8 @@ subroutine diag_densMOC(mode, tracers, mesh) real(kind=WP), dimension(:,:), pointer :: temp, salt #include "associate_mesh.h" - temp=>tracers(1)%values(:,:) - salt=>tracers(2)%values(:,:) + temp=>tracers%data(1)%values(:,:) + salt=>tracers%data(2)%values(:,:) if (firstcall_s) then !allocate the stuff at the first call allocate(std_dens_UVDZ(2,std_dens_N, myDim_elem2D)) @@ -642,7 +642,7 @@ subroutine compute_diagnostics(mode, tracers, mesh) integer, intent(in) :: mode !constructor mode (0=only allocation; any other=do diagnostic) real(kind=WP) :: val type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers !1. solver diagnostic if (ldiag_solver) call diag_solver(mode, mesh) !2. compute curl(stress_surf) @@ -654,7 +654,7 @@ subroutine compute_diagnostics(mode, tracers, mesh) !5. print integrated temperature if (ldiag_salt3d) then if (mod(mstep,logfile_outfreq)==0) then - call integrate_nod(tracers(2)%values(:,:), val, mesh) + call integrate_nod(tracers%data(2)%values(:,:), val, mesh) if (mype==0) then write(*,*) 'total integral of salinity at timestep :', mstep, val end if @@ -676,13 +676,14 @@ end subroutine compute_diagnostics ! in a coastal model application ... ! Klingbeil et al., 2014, Quantification of spurious dissipation and mixing – ! Discrete variance decay in a Finite-Volume framework ... -subroutine compute_diag_dvd_2ndmoment_burchard_etal_2008(tracer, mesh) +subroutine compute_diag_dvd_2ndmoment_burchard_etal_2008(tr_num, tracers, mesh) use o_arrays use g_PARSUP use oce_adv_tra_driver_interfaces implicit none - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracer + integer, intent(in) :: tr_num + type(t_tracer), intent(inout), target :: tracers + type(t_mesh), intent(in), target :: mesh integer :: node, nz, nzmin, nzmax real(kind=WP) :: tr_sqr(mesh%nl-1,myDim_nod2D+eDim_nod2D), trAB_sqr(mesh%nl-1,myDim_nod2D+eDim_nod2D) @@ -698,8 +699,8 @@ subroutine compute_diag_dvd_2ndmoment_burchard_etal_2008(tracer, mesh) nzmax = nlevels_nod2D(node)-1 nzmin = ulevels_nod2D(node) do nz = nzmin, nzmax - tr_sqr(nz,node) = tracer%values (nz,node)**2 - trAB_sqr(nz,node) = tracer%valuesAB(nz,node)**2 + tr_sqr(nz,node) = tracers%data(tr_num)%values (nz,node)**2 + trAB_sqr(nz,node) = tracers%data(tr_num)%valuesAB(nz,node)**2 end do end do @@ -707,10 +708,10 @@ subroutine compute_diag_dvd_2ndmoment_burchard_etal_2008(tracer, mesh) ! calculate horizintal and vertical advection for squared tracer (2nd moments) ! see Burchard and Rennau, 2008, Comparative quantification of physically and ! numerically induced mixing in ocean models ... - del_ttf_advhoriz = 0.0_WP - del_ttf_advvert = 0.0_WP + tracers%work%del_ttf_advhoriz = 0.0_WP + tracers%work%del_ttf_advvert = 0.0_WP ! maybe just to introduce an another tharer of t_tracer type with **do_Xmoment? -! call do_oce_adv_tra(tr_sqr, trAB_sqr, UV, wvel, wvel_i, wvel_e, 1, del_ttf_advhoriz, del_ttf_advvert, tra_adv_ph, tra_adv_pv, mesh) +! call do_oce_adv_tra(tr_sqr, trAB_sqr, UV, wvel, wvel_i, wvel_e, 1, tracers%work%del_ttf_advhoriz, tracers%work%del_ttf_advvert, tra_adv_ph, tra_adv_pv, mesh) !___________________________________________________________________________ ! add target second moment to DVD do node = 1,mydim_nod2D @@ -729,8 +730,8 @@ subroutine compute_diag_dvd_2ndmoment_burchard_etal_2008(tracer, mesh) ! --> split it up in DVD contribution from horizontal and vertical ! advection since for the horizontal advection Adams Bashfort tracer ! are used and for the vertical the normal tracer values. - tr_dvd_horiz(nz,node,tracer%ID) = hnode(nz,node)/hnode_new(nz,node)*trAB_sqr(nz,node) - del_ttf_advhoriz(nz,node)/hnode_new(nz,node) - tr_dvd_vert(nz,node,tracer%ID) = hnode(nz,node)/hnode_new(nz,node)*tr_sqr( nz,node) - del_ttf_advvert( nz,node)/hnode_new(nz,node) + tracers%work%tr_dvd_horiz(nz,node,tr_num) = hnode(nz,node)/hnode_new(nz,node)*trAB_sqr(nz,node) - tracers%work%del_ttf_advhoriz(nz,node)/hnode_new(nz,node) + tracers%work%tr_dvd_vert (nz,node,tr_num) = hnode(nz,node)/hnode_new(nz,node)*tr_sqr( nz,node) - tracers%work%del_ttf_advvert( nz,node)/hnode_new(nz,node) end do end do end subroutine compute_diag_dvd_2ndmoment_burchard_etal_2008 @@ -743,24 +744,25 @@ end subroutine compute_diag_dvd_2ndmoment_burchard_etal_2008 ! see: ! Klingbeil et al., 2014, Quantification of spurious dissipation and mixing – ! Discrete variance decay in a Finite-Volume framework ... -subroutine compute_diag_dvd_2ndmoment_klingbeil_etal_2014(tracer, mesh) +subroutine compute_diag_dvd_2ndmoment_klingbeil_etal_2014(tr_num, tracers, mesh) use o_arrays use g_PARSUP use oce_adv_tra_driver_interfaces implicit none - integer :: node, nz, nzmin, nzmax - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracer + integer :: node, nz, nzmin, nzmax + integer, intent(in) :: tr_num + type(t_tracer), intent(inout), target :: tracers + type(t_mesh), intent(in), target :: mesh #include "associate_mesh.h" !___________________________________________________________________________ ! calculate horizintal and vertical advection for squared tracer (2nd moments) ! see Burchard and Rennau, 2008, Comparative quantification of physically and ! numerically induced mixing in ocean models ... - del_ttf_advhoriz = 0.0_WP - del_ttf_advvert = 0.0_WP + tracers%work%del_ttf_advhoriz = 0.0_WP + tracers%work%del_ttf_advvert = 0.0_WP ! maybe just to introduce an another tharer of t_tracer type with **do_Xmoment? -! call do_oce_adv_tra(tracer%values, tracer%valuesAB(:,:), UV, wvel, wvel_i, wvel_e, 2, del_ttf_advhoriz, del_ttf_advvert, tra_adv_ph, tra_adv_pv, mesh) +! call do_oce_adv_tra(tracers%data(tr_num)%values, tracers%data(tr_num)%valuesAB(:,:), UV, wvel, wvel_i, wvel_e, 2, tracers%work%del_ttf_advhoriz, tracers%work%del_ttf_advvert, tra_adv_ph, tra_adv_pv, mesh) !___________________________________________________________________________ ! add target second moment to DVD do node = 1,mydim_nod2D @@ -783,10 +785,10 @@ subroutine compute_diag_dvd_2ndmoment_klingbeil_etal_2014(tracer, mesh) ! --> split it up in DVD contribution from horizontal and vertical ! advection since for the horizontal advection Adams Bashfort tracer ! are used and for the vertical the normal tracer values. - tr_dvd_horiz(nz,node,tracer%ID) = hnode(nz,node)/hnode_new(nz,node)*(tracer%valuesAB(nz,node)**2) & - - del_ttf_advhoriz(nz,node)/hnode_new(nz,node) - tr_dvd_vert(nz,node,tracer%ID) = hnode(nz,node)/hnode_new(nz,node)*(tracer%values (nz,node)**2) & - - del_ttf_advvert( nz,node)/hnode_new(nz,node) + tracers%work%tr_dvd_horiz(nz,node,tr_num) = hnode(nz,node)/hnode_new(nz,node)*(tracers%data(tr_num)%valuesAB(nz,node)**2) & + - tracers%work%del_ttf_advhoriz(nz,node)/hnode_new(nz,node) + tracers%work%tr_dvd_vert(nz,node,tr_num) = hnode(nz,node)/hnode_new(nz,node)*(tracers%data(tr_num)%values (nz,node)**2) & + - tracers%work%del_ttf_advvert( nz,node)/hnode_new(nz,node) end do end do end subroutine compute_diag_dvd_2ndmoment_klingbeil_etal_2014 @@ -801,15 +803,16 @@ end subroutine compute_diag_dvd_2ndmoment_klingbeil_etal_2014 ! in a coastal model application ... ! Klingbeil et al., 2014, Quantification of spurious dissipation and mixing – ! Discrete variance decay in a Finite-Volume framework ... -subroutine compute_diag_dvd(tracer, mesh) +subroutine compute_diag_dvd(tr_num, tracers, mesh) use g_config, only: dt use o_arrays use g_PARSUP implicit none integer :: node, nz, nzmin, nzmax - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracer + integer, intent(in) :: tr_num + type(t_tracer), intent(inout), target :: tracers + type(t_mesh), intent(in), target :: mesh #include "associate_mesh.h" !___________________________________________________________________________ @@ -827,16 +830,16 @@ subroutine compute_diag_dvd(tracer, mesh) ! | ! v ! now add this part - ! --> tr_dvd_horiz contains already the expected target second moments + ! --> tracers%work%tr_dvd_horiz contains already the expected target second moments ! from subroutine compute_diag_dvd_2ndmoment - tr_dvd_horiz(nz,node,tracer%ID) = (tr_dvd_horiz(nz,node,tracer%ID) & - -( hnode(nz,node)/hnode_new(nz,node)*tracer%valuesAB(nz,node) & - -del_ttf_advhoriz(nz,node)/hnode_new(nz,node) & + tracers%work%tr_dvd_horiz(nz,node,tr_num) = (tracers%work%tr_dvd_horiz(nz,node,tr_num) & + -( hnode(nz,node)/hnode_new(nz,node)*tracers%data(tr_num)%valuesAB(nz,node) & + -tracers%work%del_ttf_advhoriz(nz,node)/hnode_new(nz,node) & )**2 & )/dt - tr_dvd_vert(nz,node,tracer%ID) = (tr_dvd_vert(nz,node,tracer%ID) & - -( hnode(nz,node)/hnode_new(nz,node)*tracer%values (nz,node) & - -del_ttf_advvert( nz,node)/hnode_new(nz,node) & + tracers%work%tr_dvd_vert(nz,node,tr_num) = (tracers%work%tr_dvd_vert(nz,node,tr_num) & + -( hnode(nz,node)/hnode_new(nz,node)*tracers%data(tr_num)%values (nz,node) & + -tracers%work%del_ttf_advvert( nz,node)/hnode_new(nz,node) & )**2 & )/dt end do diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index a5eabfae5..0b641a7fa 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -4,7 +4,7 @@ subroutine ocean2ice(tracers, mesh) use mod_mesh use mod_tracer type(t_mesh), intent(in) , target :: mesh - type(t_tracer), intent(inout), target :: tracers(:) + type(t_tracer), intent(inout), target :: tracers end subroutine end interface end module @@ -15,7 +15,7 @@ subroutine oce_fluxes(tracers, mesh) use mod_mesh use mod_tracer type(t_mesh), intent(in) , target :: mesh - type(t_tracer), intent(inout), target :: tracers(:) + type(t_tracer), intent(inout), target :: tracers end subroutine end interface end module @@ -115,13 +115,13 @@ subroutine ocean2ice(tracers, mesh) implicit none type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers integer :: n, elem, k real(kind=WP) :: uw, vw, vol real(kind=WP), dimension(:,:), pointer :: temp, salt #include "associate_mesh.h" - temp=>tracers(1)%values(:,:) - salt=>tracers(2)%values(:,:) + temp=>tracers%data(1)%values(:,:) + salt=>tracers%data(2)%values(:,:) ! the arrays in the ice model are renamed @@ -199,14 +199,14 @@ subroutine oce_fluxes(tracers, mesh) use cavity_heat_water_fluxes_3eq_interface implicit none type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers integer :: n, elem, elnodes(3),n1 real(kind=WP) :: rsss, net real(kind=WP), allocatable :: flux(:) real(kind=WP), dimension(:,:), pointer :: temp, salt #include "associate_mesh.h" - temp=>tracers(1)%values(:,:) - salt=>tracers(2)%values(:,:) + temp=>tracers%data(1)%values(:,:) + salt=>tracers%data(2)%values(:,:) allocate(flux(myDim_nod2D+eDim_nod2D)) flux = 0.0_WP diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index baae70e8e..c6e069af6 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -14,7 +14,7 @@ subroutine ice_initial_state(tracers, mesh) use mod_mesh use mod_tracer type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers end subroutine end interface end module @@ -24,7 +24,7 @@ subroutine ice_setup(tracers, mesh) use mod_mesh use mod_tracer type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers end subroutine end interface end module @@ -43,7 +43,7 @@ subroutine ice_setup(tracers, mesh) use ice_initial_state_interface implicit none type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers ! ================ DO not change ice_dt=real(ice_ave_steps,WP)*dt @@ -51,7 +51,6 @@ subroutine ice_setup(tracers, mesh) Tevp_inv=3.0_WP/ice_dt Clim_evp=Clim_evp*(evp_rheol_steps/ice_dt)**2/Tevp_inv ! This is combination ! it always enters - ! ================ call ice_array_setup(mesh) call ice_fct_init(mesh) @@ -310,7 +309,7 @@ subroutine ice_initial_state(tracers, mesh) implicit none ! type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers integer :: i character(MAX_PATH) :: filename real(kind=WP), external :: TFrez ! Sea water freeze temperature. @@ -333,7 +332,7 @@ subroutine ice_initial_state(tracers, mesh) endif !_______________________________________________________________________ - if (tracers(1)%values(1,i)< 0.0_WP) then + if (tracers%data(1)%values(1,i)< 0.0_WP) then if (geo_coord_nod2D(2,i)>0._WP) then m_ice(i) = 1.0_WP m_snow(i)= 0.1_WP diff --git a/src/icepack_drivers/icedrv_advection.F90 b/src/icepack_drivers/icedrv_advection.F90 index b52bccf97..35d4a8adf 100644 --- a/src/icepack_drivers/icedrv_advection.F90 +++ b/src/icepack_drivers/icedrv_advection.F90 @@ -100,13 +100,14 @@ end subroutine tg_rhs_icepack module subroutine init_advection_icepack(mesh) use o_param - use mod_tracer use g_parsup use mod_mesh implicit none type(t_mesh), intent(in), target :: mesh + +#include "../associate_mesh.h" ! Initialization of arrays necessary to implement FCT algorithm allocate(trl(nx)) ! low-order solutions @@ -139,7 +140,6 @@ end subroutine init_advection_icepack subroutine fill_mass_matrix_icepack(mesh) use mod_mesh - use mod_tracer use i_param use g_parsup @@ -215,7 +215,6 @@ subroutine solve_low_order_icepack(mesh, trc) ! mass matrix on the lhs is replaced with the lumped one. use mod_mesh - use mod_tracer use i_param use g_parsup @@ -253,7 +252,6 @@ end subroutine solve_low_order_icepack subroutine solve_high_order_icepack(mesh, trc) use mod_mesh - use mod_tracer use i_param use g_parsup @@ -309,7 +307,6 @@ subroutine fem_fct_icepack(mesh, trc) ! Turek. (kuzmin@math.uni-dortmund.de) use mod_mesh - use mod_tracer use o_param use i_param use g_parsup @@ -460,7 +457,6 @@ end subroutine fem_fct_icepack subroutine tg_rhs_div_icepack(mesh, trc) use mod_mesh - use mod_tracer use o_param use i_param use g_parsup @@ -529,7 +525,6 @@ end subroutine tg_rhs_div_icepack subroutine update_for_div_icepack(mesh, trc) use mod_mesh - use mod_tracer use o_param use i_param use g_parsup diff --git a/src/icepack_drivers/icedrv_init.F90 b/src/icepack_drivers/icedrv_init.F90 index b2b821ed8..5c3e77fc7 100644 --- a/src/icepack_drivers/icedrv_init.F90 +++ b/src/icepack_drivers/icedrv_init.F90 @@ -49,7 +49,7 @@ subroutine init_state(tracer) nt_ipnd, nt_aero, nt_fsd character(len=*), parameter :: subname='(init_state)' - type(t_tracer), intent(in), target :: tracer + type(t_tracer_data), intent(in), target :: tracer !----------------------------------------------------------------- ! query Icepack values @@ -928,8 +928,8 @@ module subroutine init_icepack(tracer, mesh) tr_fsd, & ! from icepack wave_spec ! from icepack character(len=*), parameter :: subname='(icedrv_initialize)' - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracer + type(t_mesh), intent(in), target :: mesh + type(t_tracer_data), intent(in), target :: tracer call icepack_query_parameters(wave_spec_out=wave_spec) call icepack_query_tracer_flags(tr_aero_out=tr_aero) call icepack_query_tracer_flags(tr_zaero_out=tr_zaero) @@ -1006,8 +1006,8 @@ subroutine init_state_var (tracer) implicit none ! local variables - type(t_tracer), intent(in), target :: tracer - real(kind=WP), dimension(:,:), pointer :: tr_arr + type(t_tracer_data), intent(in), target :: tracer + real(kind=WP), dimension(:,:), pointer :: tr_arr integer (kind=int_kind) :: & i , & ! horizontal indices diff --git a/src/icepack_drivers/icedrv_main.F90 b/src/icepack_drivers/icedrv_main.F90 index ea06fa808..7dfa9700b 100644 --- a/src/icepack_drivers/icedrv_main.F90 +++ b/src/icepack_drivers/icedrv_main.F90 @@ -792,8 +792,8 @@ module subroutine init_icepack(tracer, mesh) use mod_mesh use mod_tracer implicit none - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracer + type(t_mesh), intent(in), target :: mesh + type(t_tracer_data), intent(in), target :: tracer end subroutine init_icepack ! Copy variables from fesom to icepack diff --git a/src/io_blowup.F90 b/src/io_blowup.F90 index 107e5247b..4d033719a 100644 --- a/src/io_blowup.F90 +++ b/src/io_blowup.F90 @@ -66,7 +66,7 @@ MODULE io_BLOWUP subroutine ini_blowup_io(year, tracers, mesh) implicit none integer, intent(in) :: year - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers type(t_mesh), intent(in), target :: mesh integer :: ncid, j integer :: varid @@ -117,7 +117,7 @@ subroutine ini_blowup_io(year, tracers, mesh) !!PS call def_variable(bid, 'pgf_y' , (/nl-1, elem2D/) , 'meridional pressure gradient force', '???', pgf_y(:,:)); !!PS call def_variable(bid, 'density_m_rho0' , (/nl-1, nod2D/) , 'density minus rho0', '???', density_m_rho0(:,:)); - do j=1,num_tracers + do j=1, tracers%num_tracers SELECT CASE (j) CASE(1) trname='temp' @@ -132,9 +132,9 @@ subroutine ini_blowup_io(year, tracers, mesh) write(longname,'(A15,i1)') 'passive tracer ', j units='none' END SELECT - call def_variable(bid, trim(trname), (/nl-1, nod2D/), trim(longname), trim(units), tracers(j)%values(:,:)); + call def_variable(bid, trim(trname), (/nl-1, nod2D/), trim(longname), trim(units), tracers%data(j)%values(:,:)); !!PS longname=trim(longname)//', Adams–Bashforth' -!!PS call def_variable(bid, trim(trname)//'_AB',(/nl-1, nod2D/), trim(longname), trim(units), tracers(j)%valuesAB(:,:)(:,:)); +!!PS call def_variable(bid, trim(trname)//'_AB',(/nl-1, nod2D/), trim(longname), trim(units), tracers%data(j)%valuesAB(:,:)(:,:)); end do call def_variable(bid, 'w' , (/nl, nod2D/) , 'vertical velocity', 'm/s', Wvel); call def_variable(bid, 'w_expl' , (/nl, nod2D/) , 'vertical velocity', 'm/s', Wvel_e); @@ -171,7 +171,7 @@ end subroutine ini_blowup_io subroutine blowup(istep, tracers, mesh) implicit none type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers integer :: istep ctime=timeold+(dayold-1.)*86400 diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index c67dc9dd4..bf85bf83e 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -109,7 +109,7 @@ subroutine ini_mean_io(tracers, mesh) character(len=10) :: id_string type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers namelist /nml_listsize/ io_listsize namelist /nml_list / io_list @@ -142,9 +142,9 @@ subroutine ini_mean_io(tracers, mesh) SELECT CASE (trim(io_list(i)%id)) !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!2D streams!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! CASE ('sst ') - call def_stream(nod2D, myDim_nod2D, 'sst', 'sea surface temperature', 'C', tracers(1)%values(1,1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'sst', 'sea surface temperature', 'C', tracers%data(1)%values(1,1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) CASE ('sss ') - call def_stream(nod2D, myDim_nod2D, 'sss', 'sea surface salinity', 'psu', tracers(2)%values(1,1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'sss', 'sea surface salinity', 'psu', tracers%data(2)%values(1,1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) CASE ('ssh ') call def_stream(nod2D, myDim_nod2D, 'ssh', 'sea surface elevation', 'm', eta_n, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) CASE ('vve_5 ') @@ -289,13 +289,13 @@ subroutine ini_mean_io(tracers, mesh) !>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> 3D streams <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< !___________________________________________________________________________________________________________________________________ CASE ('temp ') - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'temp', 'temperature', 'C', tracers(1)%values(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'temp', 'temperature', 'C', tracers%data(1)%values(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) CASE ('salt ') - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'salt', 'salinity', 'psu', tracers(2)%values(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'salt', 'salinity', 'psu', tracers%data(2)%values(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) CASE ('otracers ') - do j=3, num_tracers - write (id_string, "(I3.3)") tracers(j)%ID - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'tra_'//id_string, 'pasive tracer ID='//id_string, 'n/a', tracers(j)%values(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + do j=3, tracers%num_tracers + write (id_string, "(I3.3)") tracers%data(j)%ID + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'tra_'//id_string, 'pasive tracer ID='//id_string, 'n/a', tracers%data(j)%values(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) end do CASE ('slope_x ') call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'slope_x', 'neutral slope X', 'none', slope_tapered(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) @@ -504,10 +504,10 @@ subroutine ini_mean_io(tracers, mesh) !___________________________________________________________________________ if (ldiag_dvd) then - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvd_temp_h', 'horiz. dvd of temperature', '°C/s' , tr_dvd_horiz(:,:,1), 1, 'm', i_real4, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvd_temp_v', 'vert. dvd of temperature' , '°C/s' , tr_dvd_vert(:,:,1) , 1, 'm', i_real4, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvd_salt_h', 'horiz. dvd of salinity' , 'psu/s', tr_dvd_horiz(:,:,2), 1, 'm', i_real4, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvd_salt_v', 'vert. dvd of salinity' , 'psu/s', tr_dvd_vert(:,:,2) , 1, 'm', i_real4, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvd_temp_h', 'horiz. dvd of temperature', '°C/s' , tracers%work%tr_dvd_horiz(:,:,1), 1, 'm', i_real4, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvd_temp_v', 'vert. dvd of temperature' , '°C/s' , tracers%work%tr_dvd_vert(:,:,1) , 1, 'm', i_real4, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvd_salt_h', 'horiz. dvd of salinity' , 'psu/s', tracers%work%tr_dvd_horiz(:,:,2), 1, 'm', i_real4, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvd_salt_v', 'vert. dvd of salinity' , 'psu/s', tracers%work%tr_dvd_vert(:,:,2) , 1, 'm', i_real4, mesh) end if !___________________________________________________________________________ @@ -816,7 +816,7 @@ subroutine output(istep, tracers, mesh) logical :: do_output type(Meandata), pointer :: entry type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers character(:), allocatable :: filepath real(real64) :: rtime !timestamp of the record diff --git a/src/io_restart.F90 b/src/io_restart.F90 index d130b8f65..9d49f6648 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -89,7 +89,7 @@ subroutine ini_ocean_io(year, tracers, mesh) character(500) :: trname, units character(4) :: cyear type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers #include "associate_mesh.h" write(cyear,'(i4)') year @@ -132,7 +132,7 @@ subroutine ini_ocean_io(year, tracers, mesh) call def_variable(oid, 'uke_rhs', (/nl-1, elem2D/), 'unresolved kinetic energy rhs', 'm2/s2', uke_rhs(:,:)); endif - do j=1,num_tracers + do j=1, tracers%num_tracers SELECT CASE (j) CASE(1) trname='temp' @@ -147,9 +147,9 @@ subroutine ini_ocean_io(year, tracers, mesh) write(longname,'(A15,i1)') 'passive tracer ', j units='none' END SELECT - call def_variable(oid, trim(trname), (/nl-1, nod2D/), trim(longname), trim(units), tracers(j)%values(:,:)); + call def_variable(oid, trim(trname), (/nl-1, nod2D/), trim(longname), trim(units), tracers%data(j)%values(:,:)); longname=trim(longname)//', Adams–Bashforth' - call def_variable(oid, trim(trname)//'_AB',(/nl-1, nod2D/), trim(longname), trim(units), tracers(j)%valuesAB(:,:)); + call def_variable(oid, trim(trname)//'_AB',(/nl-1, nod2D/), trim(longname), trim(units), tracers%data(j)%valuesAB(:,:)); end do call def_variable(oid, 'w', (/nl, nod2D/), 'vertical velocity', 'm/s', Wvel); call def_variable(oid, 'w_expl', (/nl, nod2D/), 'vertical velocity', 'm/s', Wvel_e); @@ -214,7 +214,7 @@ subroutine restart(istep, l_write, l_read, tracers, mesh) logical :: is_restart integer :: mpierr type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers ctime=timeold+(dayold-1.)*86400 if (.not. l_read) then call ini_ocean_io(yearnew, tracers, mesh) diff --git a/src/oce_adv_tra_driver.F90 b/src/oce_adv_tra_driver.F90 index e49abae65..536859a71 100644 --- a/src/oce_adv_tra_driver.F90 +++ b/src/oce_adv_tra_driver.F90 @@ -1,17 +1,16 @@ module oce_adv_tra_driver_interfaces interface - subroutine do_oce_adv_tra(vel, w, wi, we, dttf_h, dttf_v, tracer, mesh) - use MOD_TRACER + subroutine do_oce_adv_tra(vel, w, wi, we, tr_num, tracers, mesh) use MOD_MESH + use MOD_TRACER use g_PARSUP + integer, intent(in) :: tr_num type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(inout), target :: tracer - real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) - real(kind=WP), intent(in), target :: W(mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in), target :: WI(mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in), target :: WE(mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: dttf_h(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: dttf_v(mesh%nl-1, myDim_nod2D+eDim_nod2D) + type(t_tracer), intent(inout), target :: tracers + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) + real(kind=WP), intent(in), target :: W(mesh%nl, myDim_nod2D+eDim_nod2D) + real(kind=WP), intent(in), target :: WI(mesh%nl, myDim_nod2D+eDim_nod2D) + real(kind=WP), intent(in), target :: WE(mesh%nl, myDim_nod2D+eDim_nod2D) end subroutine end interface end module @@ -19,7 +18,7 @@ subroutine do_oce_adv_tra(vel, w, wi, we, dttf_h, dttf_v, tracer, mesh) module oce_tra_adv_flux2dtracer_interface interface subroutine oce_tra_adv_flux2dtracer(dttf_h, dttf_v, flux_h, flux_v, mesh, use_lo, ttf, lo) - !update the solution for vertical and horizontal flux contributions + !update the solution for vertical and horizontal flux contributions use MOD_MESH use g_PARSUP type(t_mesh), intent(in), target :: mesh @@ -36,7 +35,7 @@ subroutine oce_tra_adv_flux2dtracer(dttf_h, dttf_v, flux_h, flux_v, mesh, use_lo ! ! !=============================================================================== -subroutine do_oce_adv_tra(vel, w, wi, we, dttf_h, dttf_v, tracer, mesh) +subroutine do_oce_adv_tra(vel, w, wi, we, tr_num, tracers, mesh) use MOD_MESH use MOD_TRACER use o_ARRAYS @@ -49,19 +48,25 @@ subroutine do_oce_adv_tra(vel, w, wi, we, dttf_h, dttf_v, tracer, mesh) use oce_adv_tra_fct_interfaces use oce_tra_adv_flux2dtracer_interface implicit none + integer, intent(in) :: tr_num type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(inout), target :: tracer - real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) - real(kind=WP), intent(in), target :: W(mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in), target :: WI(mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in), target :: WE(mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: dttf_h(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: dttf_v(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), pointer, dimension (:,:) :: pwvel - real(kind=WP), pointer, dimension (:,:) :: ttf, ttfAB - + type(t_tracer), intent(inout), target :: tracers + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) + real(kind=WP), intent(in), target :: W(mesh%nl, myDim_nod2D+eDim_nod2D) + real(kind=WP), intent(in), target :: WI(mesh%nl, myDim_nod2D+eDim_nod2D) + real(kind=WP), intent(in), target :: WE(mesh%nl, myDim_nod2D+eDim_nod2D) + + real(kind=WP), pointer, dimension (:,:) :: pwvel + real(kind=WP), pointer, dimension (:,:) :: ttf, ttfAB, fct_LO + real(kind=WP), pointer, dimension (:,:) :: adv_flux_hor, adv_flux_ver, dttf_h, dttf_v + real(kind=WP), pointer, dimension (:,:) :: fct_ttf_min, fct_ttf_max + real(kind=WP), pointer, dimension (:,:) :: fct_plus, fct_minus + + integer, pointer, dimension (:) :: nboundary_lay + real(kind=WP), pointer, dimension (:,:,:) :: edge_up_dn_grad + integer :: el(2), enodes(2), nz, n, e - integer :: nl12, nu12, nl1, nl2, nu1, nu2, tr_num + integer :: nl12, nu12, nl1, nl2, nu1, nu2 real(kind=WP) :: cLO, cHO, deltaX1, deltaY1, deltaX2, deltaY2 real(kind=WP) :: qc, qu, qd real(kind=WP) :: tvert(mesh%nl), tvert_e(mesh%nl), a, b, c, d, da, db, dg, vflux, Tupw1 @@ -70,19 +75,29 @@ subroutine do_oce_adv_tra(vel, w, wi, we, dttf_h, dttf_v, tracer, mesh) logical :: do_zero_flux #include "associate_mesh.h" - - ttf => tracer%values - ttfAB => tracer%valuesAB - opth = tracer%tra_adv_ph - optv = tracer%tra_adv_pv + ttf => tracers%data(tr_num)%values + ttfAB => tracers%data(tr_num)%valuesAB + opth = tracers%data(tr_num)%tra_adv_ph + optv = tracers%data(tr_num)%tra_adv_pv + fct_LO => tracers%work%fct_LO + adv_flux_ver => tracers%work%adv_flux_ver + adv_flux_hor => tracers%work%adv_flux_hor + edge_up_dn_grad => tracers%work%edge_up_dn_grad + nboundary_lay => tracers%work%nboundary_lay + fct_ttf_min => tracers%work%fct_ttf_min + fct_ttf_max => tracers%work%fct_ttf_max + fct_plus => tracers%work%fct_plus + fct_minus => tracers%work%fct_minus + dttf_h => tracers%work%del_ttf_advhoriz + dttf_v => tracers%work%del_ttf_advvert !___________________________________________________________________________ ! compute FCT horzontal and vertical low order solution as well as lw order ! part of antidiffusive flux - if (trim(tracer%tra_adv_lim)=='FCT') then + if (trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') then ! compute the low order upwind horizontal flux ! init_zero=.true. : zero the horizontal flux before computation ! init_zero=.false. : input flux will be substracted - call adv_tra_hor_upw1(ttf, vel, mesh, adv_flux_hor, init_zero=.true.) + call adv_tra_hor_upw1(vel, ttf, mesh, adv_flux_hor, init_zero=.true.) ! update the LO solution for horizontal contribution fct_LO=0.0_WP @@ -111,8 +126,7 @@ subroutine do_oce_adv_tra(vel, w, wi, we, dttf_h, dttf_v, tracer, mesh) ! compute the low order upwind vertical flux (explicit part only) ! zero the input/output flux before computation - call adv_tra_ver_upw1(ttf, we, mesh, adv_flux_ver, init_zero=.true.) - + call adv_tra_ver_upw1(we, ttf, mesh, adv_flux_ver, init_zero=.true.) ! update the LO solution for vertical contribution do n=1, myDim_nod2D nu1 = ulevels_nod2D(n) @@ -125,36 +139,36 @@ subroutine do_oce_adv_tra(vel, w, wi, we, dttf_h, dttf_v, tracer, mesh) if (w_split) then !wvel/=wvel_e ! update for implicit contribution (w_split option) - call adv_tra_vert_impl(fct_LO, wi, mesh) + call adv_tra_vert_impl(wi, fct_LO, mesh) ! compute the low order upwind vertical flux (full vertical velocity) ! zero the input/output flux before computation ! --> compute here low order part of vertical anti diffusive fluxes, ! has to be done on the full vertical velocity w - call adv_tra_ver_upw1(ttf, w, mesh, adv_flux_ver, init_zero=.true.) + call adv_tra_ver_upw1(w, ttf, mesh, adv_flux_ver, init_zero=.true.) end if call exchange_nod(fct_LO) end if do_zero_flux=.true. - if (trim(tracer%tra_adv_lim)=='FCT') do_zero_flux=.false. + if (trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') do_zero_flux=.false. !___________________________________________________________________________ ! do horizontal tracer advection, in case of FCT high order solution - SELECT CASE(trim(tracer%tra_adv_hor)) + SELECT CASE(trim(tracers%data(tr_num)%tra_adv_hor)) CASE('MUSCL') ! compute the untidiffusive horizontal flux (init_zero=.false.: input is the LO horizontal flux computed above) - call adv_tra_hor_muscl(ttfAB, vel, mesh, opth, adv_flux_hor, init_zero=do_zero_flux) + call adv_tra_hor_muscl(uv, ttfAB, mesh, opth, adv_flux_hor, edge_up_dn_grad, nboundary_lay, init_zero=do_zero_flux) CASE('MFCT') - call adv_tra_hor_mfct(ttfAB, vel, mesh, opth, adv_flux_hor, init_zero=do_zero_flux) + call adv_tra_hor_mfct(uv, ttfAB, mesh, opth, adv_flux_hor, edge_up_dn_grad, init_zero=do_zero_flux) CASE('UPW1') - call adv_tra_hor_upw1(ttfAB, vel, mesh, adv_flux_hor, init_zero=do_zero_flux) + call adv_tra_hor_upw1(uv, ttfAB, mesh, adv_flux_hor, init_zero=do_zero_flux) CASE DEFAULT !unknown - if (mype==0) write(*,*) 'Unknown horizontal advection type ', trim(tracer%tra_adv_hor), '! Check your namelists!' + if (mype==0) write(*,*) 'Unknown horizontal advection type ', trim(tracers%data(tr_num)%tra_adv_hor), '! Check your namelists!' call par_ex(1) END SELECT - if (trim(tracer%tra_adv_lim)=='FCT') then + if (trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') then pwvel=>w else pwvel=>we @@ -162,7 +176,7 @@ subroutine do_oce_adv_tra(vel, w, wi, we, dttf_h, dttf_v, tracer, mesh) !___________________________________________________________________________ ! do vertical tracer advection, in case of FCT high order solution - SELECT CASE(trim(tracer%tra_adv_ver)) + SELECT CASE(trim(tracers%data(tr_num)%tra_adv_ver)) CASE('QR4C') ! compute the untidiffusive vertical flux (init_zero=.false.:input is the LO vertical flux computed above) call adv_tra_ver_qr4c (ttfAB, pwvel, mesh, optv, adv_flux_ver, init_zero=do_zero_flux) @@ -171,27 +185,18 @@ subroutine do_oce_adv_tra(vel, w, wi, we, dttf_h, dttf_v, tracer, mesh) CASE('PPM') call adv_tra_vert_ppm (ttfAB, pwvel, mesh, adv_flux_ver, init_zero=do_zero_flux) CASE('UPW1') - call adv_tra_ver_upw1 (ttfAB, pwvel, mesh, adv_flux_ver, init_zero=do_zero_flux) + call adv_tra_ver_upw1 (ttfAB, pwvel, mesh, adv_flux_ver, init_zero=do_zero_flux) CASE DEFAULT !unknown - if (mype==0) write(*,*) 'Unknown vertical advection type ', trim(tracer%tra_adv_ver), '! Check your namelists!' + if (mype==0) write(*,*) 'Unknown vertical advection type ', trim(tracers%data(tr_num)%tra_adv_ver), '! Check your namelists!' call par_ex(1) ! --> be aware the vertical implicite part in case without FCT is done in ! oce_ale_tracer.F90 --> subroutine diff_ver_part_impl_ale(tr_num, mesh) ! for do_wimpl=.true. - END SELECT - + END SELECT !___________________________________________________________________________ ! -!if (mype==0) then -! write(*,*) 'check new:' -! write(*,*) '1:', minval(fct_LO), maxval(fct_LO), sum(fct_LO) -! write(*,*) '2:', minval(adv_flux_hor), maxval(adv_flux_hor), sum(adv_flux_hor) -! write(*,*) '3:', minval(adv_flux_ver), maxval(adv_flux_ver), sum(adv_flux_ver) -!end if - if (trim(tracer%tra_adv_lim)=='FCT') then -!if (mype==0) write(*,*) 'before:', sum(abs(adv_flux_ver)), sum(abs(adv_flux_hor)) - call oce_tra_adv_fct(dttf_h, dttf_v, ttf, fct_LO, adv_flux_hor, adv_flux_ver, mesh) -!if (mype==0) write(*,*) 'after:', sum(abs(adv_flux_ver)), sum(abs(adv_flux_hor)) + if (trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') then + call oce_tra_adv_fct(ttf, fct_LO, adv_flux_hor, adv_flux_ver, fct_ttf_min, fct_ttf_max, fct_plus, fct_minus, mesh) call oce_tra_adv_flux2dtracer(dttf_h, dttf_v, adv_flux_hor, adv_flux_ver, mesh, use_lo=.TRUE., ttf=ttf, lo=fct_LO) else call oce_tra_adv_flux2dtracer(dttf_h, dttf_v, adv_flux_hor, adv_flux_ver, mesh) diff --git a/src/oce_adv_tra_fct.F90 b/src/oce_adv_tra_fct.F90 index e5e2525f8..3b9b95003 100644 --- a/src/oce_adv_tra_fct.F90 +++ b/src/oce_adv_tra_fct.F90 @@ -1,61 +1,66 @@ module oce_adv_tra_fct_interfaces interface - subroutine oce_adv_tra_fct_init(mesh) + subroutine oce_adv_tra_fct_init(twork, mesh) use MOD_MESH + use MOD_TRACER use g_PARSUP - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_tracer_work), intent(inout), target :: twork end subroutine - subroutine oce_tra_adv_fct(dttf_h, dttf_v, ttf, lo, adf_h, adf_v, mesh) + subroutine oce_tra_adv_fct(ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_plus, fct_minus, mesh) use MOD_MESH use g_PARSUP type(t_mesh), intent(in), target :: mesh - real(kind=WP), intent(inout) :: dttf_h(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: dttf_v(mesh%nl-1, myDim_nod2D+eDim_nod2D) + real(kind=WP), intent(inout) :: fct_ttf_min(mesh%nl-1, myDim_nod2D+eDim_nod2D) + real(kind=WP), intent(inout) :: fct_ttf_max(mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in) :: lo (mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(inout) :: adf_h(mesh%nl-1, myDim_edge2D) real(kind=WP), intent(inout) :: adf_v(mesh%nl, myDim_nod2D) + real(kind=WP), intent(inout) :: fct_plus(mesh%nl-1, myDim_edge2D) + real(kind=WP), intent(inout) :: fct_minus(mesh%nl, myDim_nod2D) end subroutine end interface end module ! ! !=============================================================================== -subroutine oce_adv_tra_fct_init(mesh) +subroutine oce_adv_tra_fct_init(twork, mesh) use MOD_MESH use MOD_TRACER use o_ARRAYS use o_PARAM use g_PARSUP implicit none - integer :: my_size - type(t_mesh), intent(in) , target :: mesh - + integer :: my_size + type(t_mesh), intent(in) , target :: mesh + type(t_tracer_work), intent(inout), target :: twork #include "associate_mesh.h" my_size=myDim_nod2D+eDim_nod2D - allocate(fct_LO(nl-1, my_size)) ! Low-order solution - allocate(adv_flux_hor(nl-1,myDim_edge2D)) ! antidiffusive hor. contributions / from edges - allocate(adv_flux_ver(nl, myDim_nod2D)) ! antidiffusive ver. fluxes / from nodes + allocate(twork%fct_LO(nl-1, my_size)) ! Low-order solution + allocate(twork%adv_flux_hor(nl-1,myDim_edge2D)) ! antidiffusive hor. contributions / from edges + allocate(twork%adv_flux_ver(nl, myDim_nod2D)) ! antidiffusive ver. fluxes / from nodes - allocate(fct_ttf_max(nl-1, my_size),fct_ttf_min(nl-1, my_size)) - allocate(fct_plus(nl-1, my_size),fct_minus(nl-1, my_size)) + allocate(twork%fct_ttf_max(nl-1, my_size),twork%fct_ttf_min(nl-1, my_size)) + allocate(twork%fct_plus(nl-1, my_size), twork%fct_minus(nl-1, my_size)) ! Initialize with zeros: - fct_LO=0.0_WP - adv_flux_hor=0.0_WP - adv_flux_ver=0.0_WP - fct_ttf_max=0.0_WP - fct_ttf_min=0.0_WP - fct_plus=0.0_WP - fct_minus=0.0_WP + twork%fct_LO=0.0_WP + twork%adv_flux_hor=0.0_WP + twork%adv_flux_ver=0.0_WP + twork%fct_ttf_max=0.0_WP + twork%fct_ttf_min=0.0_WP + twork%fct_plus=0.0_WP + twork%fct_minus=0.0_WP if (mype==0) write(*,*) 'FCT is initialized' end subroutine oce_adv_tra_fct_init + ! ! !=============================================================================== -subroutine oce_tra_adv_fct(dttf_h, dttf_v, ttf, lo, adf_h, adf_v, mesh) +subroutine oce_tra_adv_fct(ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_plus, fct_minus, mesh) ! ! 3D Flux Corrected Transport scheme ! Limits antidiffusive fluxes==the difference in flux HO-LO @@ -71,12 +76,15 @@ subroutine oce_tra_adv_fct(dttf_h, dttf_v, ttf, lo, adf_h, adf_v, mesh) use g_comm_auto implicit none type(t_mesh), intent(in), target :: mesh - real(kind=WP), intent(inout) :: dttf_h(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: dttf_v(mesh%nl-1, myDim_nod2D+eDim_nod2D) + real(kind=WP), intent(inout) :: fct_ttf_min(mesh%nl-1, myDim_nod2D+eDim_nod2D) + real(kind=WP), intent(inout) :: fct_ttf_max(mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in) :: lo (mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(inout) :: adf_h(mesh%nl-1, myDim_edge2D) - real(kind=WP), intent(inout) :: adf_v(mesh%nl, myDim_nod2D) + real(kind=WP), intent(inout) :: adf_v(mesh%nl, myDim_nod2D) + real(kind=WP), intent(inout) :: fct_plus (mesh%nl-1, myDim_nod2D+eDim_nod2D) + real(kind=WP), intent(inout) :: fct_minus(mesh%nl-1, myDim_nod2D+eDim_nod2D) + integer :: n, nz, k, elem, enodes(3), num, el(2), nl1, nl2, nu1, nu2, nl12, nu12, edge real(kind=WP) :: flux, ae,tvert_max(mesh%nl-1),tvert_min(mesh%nl-1) real(kind=WP) :: flux_eps=1e-16 diff --git a/src/oce_adv_tra_hor.F90 b/src/oce_adv_tra_hor.F90 index cdec31b1f..3225d5e9d 100644 --- a/src/oce_adv_tra_hor.F90 +++ b/src/oce_adv_tra_hor.F90 @@ -8,8 +8,9 @@ module oce_adv_tra_hor_interfaces ! IF init_zero=.TRUE. : flux will be set to zero before computation ! IF init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_hor_upw1(ttf, vel, mesh, flux, init_zero) + subroutine adv_tra_hor_upw1(vel, ttf, mesh, flux, init_zero) use MOD_MESH + use MOD_TRACER use g_PARSUP type(t_mesh), intent(in) , target :: mesh real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) @@ -24,7 +25,7 @@ subroutine adv_tra_hor_upw1(ttf, vel, mesh, flux, init_zero) ! IF init_zero=.TRUE. : flux will be set to zero before computation ! IF init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_hor_muscl(ttf, vel, mesh, num_ord, flux, init_zero) + subroutine adv_tra_hor_muscl(vel, ttf, mesh, num_ord, flux, edge_up_dn_grad, nboundary_lay, init_zero) use MOD_MESH use g_PARSUP type(t_mesh), intent(in), target :: mesh @@ -32,11 +33,13 @@ subroutine adv_tra_hor_muscl(ttf, vel, mesh, num_ord, flux, init_zero) real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) real(kind=WP), intent(inout) :: flux(mesh%nl-1, myDim_edge2D) + integer, intent(in) :: nboundary_lay(myDim_nod2D+eDim_nod2D) + real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, myDim_edge2D) logical, optional :: init_zero end subroutine ! a not stable version of MUSCL (reconstruction in the vicinity of bottom topography is not upwind) ! it runs with FCT option only - subroutine adv_tra_hor_mfct(ttf, vel, mesh, num_ord, flux, init_zero) + subroutine adv_tra_hor_mfct(vel, ttf, mesh, num_ord, flux, edge_up_dn_grad, init_zero) use MOD_MESH use g_PARSUP type(t_mesh), intent(in), target :: mesh @@ -44,6 +47,7 @@ subroutine adv_tra_hor_mfct(ttf, vel, mesh, num_ord, flux, init_zero) real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) real(kind=WP), intent(inout) :: flux(mesh%nl-1, myDim_edge2D) + real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, myDim_edge2D) logical, optional :: init_zero end subroutine end interface @@ -51,7 +55,7 @@ subroutine adv_tra_hor_mfct(ttf, vel, mesh, num_ord, flux, init_zero) ! ! !=============================================================================== -subroutine adv_tra_hor_upw1(ttf, vel, mesh, flux, init_zero) +subroutine adv_tra_hor_upw1(vel, ttf, mesh, flux, init_zero) use MOD_MESH use o_ARRAYS use o_PARAM @@ -207,7 +211,7 @@ end subroutine adv_tra_hor_upw1 ! ! !=============================================================================== -subroutine adv_tra_hor_muscl(ttf, vel, mesh, num_ord, flux, init_zero) +subroutine adv_tra_hor_muscl(vel, ttf, mesh, num_ord, flux, edge_up_dn_grad, nboundary_lay, init_zero) use MOD_MESH use MOD_TRACER use o_ARRAYS @@ -221,6 +225,8 @@ subroutine adv_tra_hor_muscl(ttf, vel, mesh, num_ord, flux, init_zero) real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) real(kind=WP), intent(inout) :: flux(mesh%nl-1, myDim_edge2D) + integer, intent(in) :: nboundary_lay(myDim_nod2D+eDim_nod2D) + real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, myDim_edge2D) logical, optional :: init_zero real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2 real(kind=WP) :: Tmean1, Tmean2, cHO @@ -476,7 +482,7 @@ end subroutine adv_tra_hor_muscl ! ! !=============================================================================== -subroutine adv_tra_hor_mfct(ttf, vel, mesh, num_ord, flux, init_zero) + subroutine adv_tra_hor_mfct(vel, ttf, mesh, num_ord, flux, edge_up_dn_grad, init_zero) use MOD_MESH use MOD_TRACER use o_ARRAYS @@ -490,6 +496,7 @@ subroutine adv_tra_hor_mfct(ttf, vel, mesh, num_ord, flux, init_zero) real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) real(kind=WP), intent(inout) :: flux(mesh%nl-1, myDim_edge2D) + real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, myDim_edge2D) logical, optional :: init_zero real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2 real(kind=WP) :: Tmean1, Tmean2, cHO diff --git a/src/oce_adv_tra_ver.F90 b/src/oce_adv_tra_ver.F90 index 590b98d0f..3a84c509a 100644 --- a/src/oce_adv_tra_ver.F90 +++ b/src/oce_adv_tra_ver.F90 @@ -2,7 +2,7 @@ module oce_adv_tra_ver_interfaces interface ! implicit 1st order upwind vertical advection with to solve for fct_LO ! updates the input tracer ttf - subroutine adv_tra_vert_impl(ttf, w, mesh) + subroutine adv_tra_vert_impl(w, ttf, mesh) use mod_mesh use g_PARSUP type(t_mesh), intent(in), target :: mesh @@ -15,7 +15,7 @@ subroutine adv_tra_vert_impl(ttf, w, mesh) ! IF init_zero=.TRUE. : flux will be set to zero before computation ! IF init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_ver_upw1(ttf, w, mesh, flux, init_zero) + subroutine adv_tra_ver_upw1(w, ttf, mesh, flux, init_zero) use MOD_MESH use g_PARSUP type(t_mesh), intent(in), target :: mesh @@ -30,7 +30,7 @@ subroutine adv_tra_ver_upw1(ttf, w, mesh, flux, init_zero) ! IF init_zero=.TRUE. : flux will be set to zero before computation ! IF init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_ver_qr4c(ttf, w, mesh, num_ord, flux, init_zero) + subroutine adv_tra_ver_qr4c(w, ttf, mesh, num_ord, flux, init_zero) use MOD_MESH use g_PARSUP type(t_mesh), intent(in), target :: mesh @@ -46,7 +46,7 @@ subroutine adv_tra_ver_qr4c(ttf, w, mesh, num_ord, flux, init_zero) ! IF init_zero=.TRUE. : flux will be set to zero before computation ! IF init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_vert_ppm(ttf, w, mesh, flux, init_zero) + subroutine adv_tra_vert_ppm(w, ttf, mesh, flux, init_zero) use MOD_MESH use g_PARSUP type(t_mesh), intent(in), target :: mesh @@ -62,7 +62,7 @@ subroutine adv_tra_vert_ppm(ttf, w, mesh, flux, init_zero) ! IF init_zero=.TRUE. : flux will be set to zero before computation ! IF init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_ver_cdiff(ttf, w, mesh, flux, init_zero) + subroutine adv_tra_ver_cdiff(w, ttf, mesh, flux, init_zero) use MOD_MESH use g_PARSUP type(t_mesh), intent(in), target :: mesh @@ -76,7 +76,7 @@ subroutine adv_tra_ver_cdiff(ttf, w, mesh, flux, init_zero) end interface end module !=============================================================================== -subroutine adv_tra_vert_impl(ttf, w, mesh) +subroutine adv_tra_vert_impl(w, ttf, mesh) use MOD_MESH use o_PARAM use o_ARRAYS @@ -223,7 +223,7 @@ end subroutine adv_tra_vert_impl ! ! !=============================================================================== -subroutine adv_tra_ver_upw1(ttf, w, mesh, flux, init_zero) +subroutine adv_tra_ver_upw1(w, ttf, mesh, flux, init_zero) use g_config use MOD_MESH use o_ARRAYS @@ -277,7 +277,7 @@ end subroutine adv_tra_ver_upw1 ! ! !=============================================================================== -subroutine adv_tra_ver_qr4c(ttf, w, mesh, num_ord, flux, init_zero) +subroutine adv_tra_ver_qr4c(w, ttf, mesh, num_ord, flux, init_zero) use g_config use MOD_MESH use o_ARRAYS @@ -351,7 +351,7 @@ end subroutine adv_tra_ver_qr4c ! ! !=============================================================================== -subroutine adv_tra_vert_ppm(ttf, w, mesh, flux, init_zero) +subroutine adv_tra_vert_ppm(w, ttf, mesh, flux, init_zero) use g_config use MOD_MESH use o_ARRAYS @@ -531,7 +531,7 @@ end subroutine adv_tra_vert_ppm ! ! !=============================================================================== -subroutine adv_tra_ver_cdiff(ttf, w, mesh, flux, init_zero) +subroutine adv_tra_ver_cdiff(w, ttf, mesh, flux, init_zero) use g_config use MOD_MESH use o_ARRAYS diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 91ee25ab7..fcad0cd4a 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -63,7 +63,7 @@ subroutine oce_timestep_ale(n, tracers, mesh) use mod_mesh use mod_tracer integer, intent(in) :: n - type(t_tracer), intent(inout), target, allocatable :: tracers(:) + type(t_tracer), intent(inout), target :: tracers type(t_mesh), intent(in), target :: mesh end subroutine end interface @@ -2564,7 +2564,7 @@ subroutine oce_timestep_ale(n, tracers, mesh) real(kind=8) :: t0,t1, t2, t30, t3, t4, t5, t6, t7, t8, t9, t10, loc, glo integer :: n, node type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(inout), target :: tracers(:) + type(t_tracer), intent(inout), target :: tracers #include "associate_mesh.h" t0=MPI_Wtime() @@ -2590,10 +2590,10 @@ subroutine oce_timestep_ale(n, tracers, mesh) !___________________________________________________________________________ ! calculate alpha and beta ! it will be used for KPP, Redi, GM etc. Shall we keep it on in general case? - call sw_alpha_beta(tracers(1)%values, tracers(2)%values, mesh) + call sw_alpha_beta(tracers%data(1)%values, tracers%data(2)%values, mesh) ! computes the xy gradient of a neutral surface; will be used by Redi, GM etc. - call compute_sigma_xy(tracers(1)%values,tracers(2)%values, mesh) + call compute_sigma_xy(tracers%data(1)%values,tracers%data(2)%values, mesh) ! compute both: neutral slope and tapered neutral slope. Can be later combined with compute_sigma_xy ! will be primarily used for computing Redi diffusivities. etc? diff --git a/src/oce_ale_mixing_kpp.F90 b/src/oce_ale_mixing_kpp.F90 index 0ec774515..731e8cd1d 100755 --- a/src/oce_ale_mixing_kpp.F90 +++ b/src/oce_ale_mixing_kpp.F90 @@ -247,7 +247,7 @@ subroutine oce_mixing_KPP(viscAE, diffK, tracers, mesh) ! Allocate arrays under oce_setup_step.F90 ! ******************************************************************* type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers integer :: node, kn, elem, elnodes(3) integer :: nz, ns, j, q, lay, lay_mi, nzmin, nzmax real(KIND=WP) :: smftu, smftv, aux, vol @@ -260,7 +260,7 @@ subroutine oce_mixing_KPP(viscAE, diffK, tracers, mesh) real(kind=WP) :: rho_surf, rho_insitu real(KIND=WP), dimension(mesh%nl, myDim_elem2D+eDim_elem2D), intent(inout) :: viscAE!for momentum (elements) real(KIND=WP), dimension(mesh%nl, myDim_nod2D+eDim_nod2D) :: viscA !for momentum (nodes) - real(KIND=WP), dimension(mesh%nl, myDim_nod2D+eDim_nod2D, num_tracers), intent(inout) :: diffK !for T and S + real(KIND=WP), dimension(mesh%nl, myDim_nod2D+eDim_nod2D, tracers%num_tracers), intent(inout) :: diffK !for T and S #include "associate_mesh.h" @@ -334,13 +334,13 @@ subroutine oce_mixing_KPP(viscAE, diffK, tracers, mesh) ! Surface buoyancy forcing (eqns. A2c & A2d & A3b & A3d) Bo(node) = -g * ( sw_alpha(nzmin,node) * heat_flux(node) / vcpw & !heat_flux & water_flux: positive up - + sw_beta (nzmin,node) * water_flux(node) * tracers(2)%values(nzmin,node)) + + sw_beta (nzmin,node) * water_flux(node) * tracers%data(2)%values(nzmin,node)) END DO ! compute interior mixing coefficients everywhere, due to constant ! internal wave activity, static instability, and local shear ! instability. - CALL ri_iwmix(viscA, diffK, mesh) + CALL ri_iwmix(viscA, diffK, tracers, mesh) ! add double diffusion IF (double_diffusion) then CALL ddmix(diffK, tracers, mesh) @@ -718,9 +718,10 @@ END SUBROUTINE wscale ! visc = viscosity coefficient (m**2/s) ! diff = diffusion coefficient (m**2/s) ! - subroutine ri_iwmix(viscA, diffK, mesh) + subroutine ri_iwmix(viscA, diffK, tracers, mesh) IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers integer :: node, nz, mr, nzmin, nzmax real(KIND=WP) , parameter :: Riinfty = 0.8_WP ! local Richardson Number limit for shear instability (LMD 1994 uses 0.7) real(KIND=WP) :: ri_prev, tmp @@ -728,7 +729,7 @@ subroutine ri_iwmix(viscA, diffK, mesh) real(KIND=WP) :: dz_inv, shear, aux, dep, lat, Kv0_b real(KIND=WP), dimension(mesh%nl, myDim_nod2D+eDim_nod2D ), intent(inout) :: viscA !for momentum (nodes) - real(KIND=WP), dimension(mesh%nl, myDim_nod2D+eDim_nod2D ,num_tracers), intent(inout) :: diffK !for T and S + real(KIND=WP), dimension(mesh%nl, myDim_nod2D+eDim_nod2D ,tracers%num_tracers), intent(inout) :: diffK !for T and S ! Put them under the namelist.oce logical :: smooth_richardson_number = .false. @@ -847,7 +848,7 @@ subroutine ddmix(diffK, tracers, mesh) IMPLICIT NONE type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers real(KIND=WP), parameter :: Rrho0 = 1.9_WP ! limit for double diffusive density ratio real(KIND=WP), parameter :: dsfmax = 1.e-4_WP ! (m^2/s) max diffusivity in case of salt fingering real(KIND=WP), parameter :: viscosity_molecular = 1.5e-6_WP ! (m^2/s) @@ -867,8 +868,8 @@ subroutine ddmix(diffK, tracers, mesh) DO nz=nzmin+1,nzmax-1 ! alphaDT and betaDS @Z - alphaDT = sw_alpha(nz-1,node) * tracers(1)%values(nz-1,node) - betaDS = sw_beta (nz-1,node) * tracers(2)%values(nz-1,node) + alphaDT = sw_alpha(nz-1,node) * tracers%data(1)%values(nz-1,node) + betaDS = sw_beta (nz-1,node) * tracers%data(2)%values(nz-1,node) IF (alphaDT > betaDS .and. betaDS > 0.0_WP) THEN diff --git a/src/oce_ale_pressure_bv.F90 b/src/oce_ale_pressure_bv.F90 index 8297799c8..3ef70833c 100644 --- a/src/oce_ale_pressure_bv.F90 +++ b/src/oce_ale_pressure_bv.F90 @@ -33,7 +33,7 @@ module pressure_force_4_linfs_nemo_interface subroutine pressure_force_4_linfs_nemo(tracers, mesh) use mod_mesh use mod_tracer - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers type(t_mesh), intent(in), target :: mesh end subroutine end interface @@ -51,7 +51,7 @@ module pressure_force_4_linfs_easypgf_interface subroutine pressure_force_4_linfs_easypgf(tracers, mesh) use mod_mesh use mod_tracer - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers type(t_mesh), intent(in), target :: mesh end subroutine end interface @@ -85,7 +85,7 @@ module pressure_force_4_zxxxx_easypgf_interface subroutine pressure_force_4_zxxxx_easypgf(tracers, mesh) use mod_mesh use mod_tracer - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers type(t_mesh), intent(in), target :: mesh end subroutine end interface @@ -112,7 +112,7 @@ subroutine insitu2pot(tracers, mesh) use mod_mesh use mod_tracer type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(inout), target :: tracers(:) + type(t_tracer), intent(inout), target :: tracers end subroutine end interface end module @@ -122,7 +122,7 @@ subroutine pressure_bv(tracers, mesh) use mod_mesh use mod_tracer type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers end subroutine end interface end module @@ -131,7 +131,7 @@ module pressure_force_4_linfs_interface subroutine pressure_force_4_linfs(tracers, mesh) use mod_mesh use mod_tracer - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers type(t_mesh), intent(in), target :: mesh end subroutine end interface @@ -141,7 +141,7 @@ module pressure_force_4_zxxxx_interface subroutine pressure_force_4_zxxxx(tracers, mesh) use mod_mesh use mod_tracer - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers type(t_mesh), intent(in), target :: mesh end subroutine end interface @@ -166,7 +166,7 @@ subroutine pressure_bv(tracers, mesh) use density_linear_interface IMPLICIT NONE type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(inout), target :: tracers(:) + type(t_tracer), intent(inout), target :: tracers real(kind=WP) :: dz_inv, bv, a, rho_up, rho_dn, t, s integer :: node, nz, nl1, nzmax, nzmin real(kind=WP) :: rhopot(mesh%nl), bulk_0(mesh%nl), bulk_pz(mesh%nl), bulk_pz2(mesh%nl), rho(mesh%nl), dbsfc1(mesh%nl), db_max @@ -176,8 +176,8 @@ subroutine pressure_bv(tracers, mesh) real(kind=WP), dimension(:,:), pointer :: temp, salt #include "associate_mesh.h" - temp=>tracers(1)%values(:,:) - salt=>tracers(2)%values(:,:) + temp=>tracers%data(1)%values(:,:) + salt=>tracers%data(2)%values(:,:) smallvalue=1.0e-20 buoyancy_crit=0.0003_WP mixing_kpp = (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) ! NR Evaluate string comparison outside the loop. It is expensive. @@ -433,10 +433,10 @@ subroutine pressure_force_4_linfs(tracers, mesh) use pressure_force_4_linfs_easypgf_interface implicit none type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers real(kind=WP), dimension(:,:), pointer :: temp, salt - temp=>tracers(1)%values(:,:) - salt=>tracers(2)%values(:,:) + temp=>tracers%data(1)%values(:,:) + salt=>tracers%data(2)%values(:,:) !___________________________________________________________________________ ! calculate pressure gradient force (PGF) for linfs with full cells if ( .not. use_partial_cell .and. .not. use_cavity_partial_cell) then @@ -551,11 +551,11 @@ subroutine pressure_force_4_linfs_nemo(tracers, mesh) dZn, dZn_i, dh, dval, mean_e_rho,dZn_rho_grad(2) real(kind=WP) :: rhopot, bulk_0, bulk_pz, bulk_pz2 type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers real(kind=WP), dimension(:,:), pointer :: temp, salt #include "associate_mesh.h" - temp=>tracers(1)%values(:,:) - salt=>tracers(2)%values(:,:) + temp=>tracers%data(1)%values(:,:) + salt=>tracers%data(2)%values(:,:) !___________________________________________________________________________ ! loop over triangular elemments do elem=1, myDim_elem2D @@ -978,11 +978,11 @@ subroutine pressure_force_4_linfs_easypgf(tracers, mesh) real(kind=WP) :: dref_rhopot, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2 type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers real(kind=WP), dimension(:,:), pointer :: temp, salt #include "associate_mesh.h" - temp=>tracers(1)%values(:,:) - salt=>tracers(2)%values(:,:) + temp=>tracers%data(1)%values(:,:) + salt=>tracers%data(2)%values(:,:) !___________________________________________________________________________ ! loop over triangular elemments @@ -1736,7 +1736,7 @@ subroutine pressure_force_4_zxxxx(tracers, mesh) use pressure_force_4_zxxxx_easypgf_interface implicit none type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers !___________________________________________________________________________ if (trim(which_pgf)=='shchepetkin') then @@ -2205,11 +2205,11 @@ subroutine pressure_force_4_zxxxx_easypgf(tracers, mesh) real(kind=WP) :: rhopot(3), bulk_0(3), bulk_pz(3), bulk_pz2(3) real(kind=WP) :: dref_rhopot, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2 type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers real(kind=WP), dimension(:,:), pointer :: temp, salt #include "associate_mesh.h" - temp=>tracers(1)%values(:,:) - salt=>tracers(2)%values(:,:) + temp=>tracers%data(1)%values(:,:) + salt=>tracers%data(2)%values(:,:) !___________________________________________________________________________ ! loop over triangular elemments do elem=1, myDim_elem2D @@ -3025,7 +3025,7 @@ end subroutine compute_neutral_slope ! !=============================================================================== !converts insitu temperature to a potential one -! tracers(1)%values will be modified! +! tracers%data(1)%values will be modified! subroutine insitu2pot(tracers, mesh) use mod_mesh use mod_tracer @@ -3038,12 +3038,12 @@ subroutine insitu2pot(tracers, mesh) real(kind=WP) :: pp, pr, tt, ss integer :: n, nz, nzmin,nzmax type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(inout), target :: tracers(:) + type(t_tracer), intent(inout), target :: tracers real(kind=WP), dimension(:,:), pointer :: temp, salt #include "associate_mesh.h" - temp=>tracers(1)%values(:,:) - salt=>tracers(2)%values(:,:) + temp=>tracers%data(1)%values(:,:) + salt=>tracers%data(2)%values(:,:) ! Convert in situ temperature into potential temperature pr=0.0_WP do n=1,myDim_nod2d+eDim_nod2D diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index e7cc3e5c2..c68565152 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -1,17 +1,20 @@ module diff_part_hor_redi_interface interface - subroutine diff_part_hor_redi(mesh) + subroutine diff_part_hor_redi(tr_num, tracer, mesh) use mod_mesh use mod_tracer + integer, intent(in), target :: tr_num + type(t_tracer), intent(inout), target :: tracer type(t_mesh), intent(in), target :: mesh end subroutine end interface end module module adv_tracers_ale_interface interface - subroutine adv_tracers_ale(tracer, mesh) + subroutine adv_tracers_ale(tr_num, tracer, mesh) use mod_mesh use mod_tracer + integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracer type(t_mesh), intent(in), target :: mesh end subroutine @@ -19,9 +22,10 @@ subroutine adv_tracers_ale(tracer, mesh) end module module diff_ver_part_expl_ale_interface interface - subroutine diff_ver_part_expl_ale(tracer, mesh) + subroutine diff_ver_part_expl_ale(tr_num, tracer, mesh) use mod_mesh use mod_tracer + integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracer type(t_mesh), intent(in), target :: mesh end subroutine @@ -29,18 +33,21 @@ subroutine diff_ver_part_expl_ale(tracer, mesh) end module module diff_ver_part_redi_expl_interface interface - subroutine diff_ver_part_redi_expl(mesh) + subroutine diff_ver_part_redi_expl(tr_num, tracer, mesh) use mod_mesh use mod_tracer + integer, intent(in), target :: tr_num + type(t_tracer), intent(inout), target :: tracer type(t_mesh), intent(in), target :: mesh end subroutine end interface end module module diff_ver_part_impl_ale_interface interface - subroutine diff_ver_part_impl_ale(tracer, mesh) + subroutine diff_ver_part_impl_ale(tr_num, tracer, mesh) use mod_mesh use mod_tracer + integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracer type(t_mesh), intent(in), target :: mesh end subroutine @@ -48,9 +55,10 @@ subroutine diff_ver_part_impl_ale(tracer, mesh) end module module diff_tracers_ale_interface interface - subroutine diff_tracers_ale(tracer, mesh) + subroutine diff_tracers_ale(tr_num, tracer, mesh) use mod_mesh use mod_tracer + integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracer type(t_mesh), intent(in), target :: mesh end subroutine @@ -69,10 +77,11 @@ function bc_surface(n, id, sval, mesh) end module module diff_part_bh_interface interface - subroutine diff_part_bh(tracer, mesh) + subroutine diff_part_bh(tr_num, tracer, mesh) use g_PARSUP use mod_mesh use mod_tracer + integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracer type(t_mesh), intent(in), target :: mesh end subroutine @@ -83,8 +92,8 @@ module solve_tracers_ale_interface subroutine solve_tracers_ale(tracers, mesh) use g_PARSUP use mod_mesh - use mod_tracer - type(t_tracer), intent(inout), target :: tracers(:) + use mod_tracer + type(t_tracer), intent(inout), target :: tracers type(t_mesh), intent(in), target :: mesh end subroutine end interface @@ -107,14 +116,14 @@ subroutine solve_tracers_ale(tracers, mesh) use diff_tracers_ale_interface implicit none - type(t_tracer), intent(inout), target :: tracers(:) + type(t_tracer), intent(inout), target :: tracers type(t_mesh), intent(in), target :: mesh integer :: tr_num, node, nzmax, nzmin #include "associate_mesh.h" !___________________________________________________________________________ if (SPP) call cal_rejected_salt(mesh) - if (SPP) call app_rejected_salt(tracers(2)%values, mesh) + if (SPP) call app_rejected_salt(tracers%data(2)%values, mesh) !___________________________________________________________________________ ! update 3D velocities with the bolus velocities: ! 1. bolus velocities are computed according to GM implementation after R. Ferrari et al., 2010 @@ -126,35 +135,32 @@ subroutine solve_tracers_ale(tracers, mesh) end if !___________________________________________________________________________ ! loop over all tracers - do tr_num=1, num_tracers + do tr_num=1, tracers%num_tracers ! do tracer AB (Adams-Bashfort) interpolation only for advectiv part ! needed if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call init_tracers_AB'//achar(27)//'[0m' - call init_tracers_AB(tracers(tr_num), mesh) - + call init_tracers_AB(tr_num, tracers, mesh) ! advect tracers if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call adv_tracers_ale'//achar(27)//'[0m' - call adv_tracers_ale(tracers(tr_num), mesh) - + call adv_tracers_ale(tr_num, tracers, mesh) ! diffuse tracers if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call diff_tracers_ale'//achar(27)//'[0m' - call diff_tracers_ale(tracers(tr_num), mesh) - + call diff_tracers_ale(tr_num, tracers, mesh) ! relax to salt and temp climatology if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call relax_to_clim'//achar(27)//'[0m' ! if ((toy_ocean) .AND. ((tr_num==1) .AND. (TRIM(which_toy)=="soufflet"))) then if ((toy_ocean) .AND. ((TRIM(which_toy)=="soufflet"))) then - call relax_zonal_temp(tracers(1), mesh) + call relax_zonal_temp(tracers%data(1), mesh) else - call relax_to_clim(tracers(tr_num), mesh) + call relax_to_clim(tr_num, tracers, mesh) end if - call exchange_nod(tracers(tr_num)%values(:,:)) + call exchange_nod(tracers%data(tr_num)%values(:,:)) end do !___________________________________________________________________________ do tr_num=1, ptracers_restore_total - tracers(ptracers_restore(tr_num)%locid)%values(:,ptracers_restore(tr_num)%ind2)=1.0_WP + tracers%data(ptracers_restore(tr_num)%locid)%values(:,ptracers_restore(tr_num)%ind2)=1.0_WP end do - + !___________________________________________________________________________ ! subtract the the bolus velocities back from 3D velocities: if (Fer_GM) then @@ -170,19 +176,19 @@ subroutine solve_tracers_ale(tracers, mesh) do node=1,myDim_nod2D+eDim_nod2D nzmax=nlevels_nod2D(node)-1 nzmin=ulevels_nod2D(node) - where (tracers(2)%values(nzmin:nzmax,node) > 45._WP) - tracers(2)%values(nzmin:nzmax,node)=45._WP + where (tracers%data(2)%values(nzmin:nzmax,node) > 45._WP) + tracers%data(2)%values(nzmin:nzmax,node)=45._WP end where - where (tracers(2)%values(nzmin:nzmax,node) < 3._WP ) - tracers(2)%values(nzmin:nzmax,node) = 3._WP + where (tracers%data(2)%values(nzmin:nzmax,node) < 3._WP ) + tracers%data(2)%values(nzmin:nzmax,node) = 3._WP end where end do end subroutine solve_tracers_ale ! ! !=============================================================================== -subroutine adv_tracers_ale(tracer, mesh) +subroutine adv_tracers_ale(tr_num, tracers, mesh) use g_config, only: flag_debug use g_parsup use mod_mesh @@ -194,9 +200,10 @@ subroutine adv_tracers_ale(tracer, mesh) ! use adv_tracers_vert_ppm_ale_interface use oce_adv_tra_driver_interfaces implicit none - integer :: tr_num, node, nz + integer :: node, nz + integer, intent(in) :: tr_num type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(inout), target :: tracer + type(t_tracer), intent(inout), target :: tracers ! del_ttf ... initialised and setted to zero in call init_tracers_AB(tr_num) ! --> del_ttf ... equivalent to R_T^n in Danilov etal FESOM2: "from finite element ! to finite volume". At the end R_T^n should contain all advection therms and @@ -208,32 +215,32 @@ subroutine adv_tracers_ale(tracer, mesh) ! of discret variance decay if (ldiag_DVD .and. tr_num <= 2) then if (flag_debug .and. mype==0) print *, achar(27)//'[38m'//' --> call compute_diag_dvd_2ndmoment'//achar(27)//'[0m' - call compute_diag_dvd_2ndmoment_klingbeil_etal_2014(tracer,mesh) + call compute_diag_dvd_2ndmoment_klingbeil_etal_2014(tr_num, tracers,mesh) end if !___________________________________________________________________________ ! horizontal ale tracer advection ! here --> add horizontal advection part to del_ttf(nz,n) = del_ttf(nz,n) + ... - del_ttf_advhoriz = 0.0_WP - del_ttf_advvert = 0.0_WP - call do_oce_adv_tra(UV, wvel, wvel_i, wvel_e, del_ttf_advhoriz, del_ttf_advvert, tracer, mesh) + tracers%work%del_ttf_advhoriz = 0.0_WP + tracers%work%del_ttf_advvert = 0.0_WP + call do_oce_adv_tra(UV, wvel, wvel_i, wvel_e, tr_num, tracers, mesh) !___________________________________________________________________________ ! update array for total tracer flux del_ttf with the fluxes from horizontal ! and vertical advection - del_ttf=del_ttf+del_ttf_advhoriz+del_ttf_advvert + tracers%work%del_ttf=tracers%work%del_ttf+tracers%work%del_ttf_advhoriz+tracers%work%del_ttf_advvert !___________________________________________________________________________ ! compute discrete variance decay after Burchard and Rennau 2008 if (ldiag_DVD .and. tr_num <= 2) then if (flag_debug .and. mype==0) print *, achar(27)//'[38m'//' --> call compute_diag_dvd'//achar(27)//'[0m' - call compute_diag_dvd(tracer, mesh) + call compute_diag_dvd(tr_num, tracers, mesh) end if end subroutine adv_tracers_ale ! ! !=============================================================================== -subroutine diff_tracers_ale(tracer, mesh) +subroutine diff_tracers_ale(tr_num, tracers, mesh) use mod_mesh use mod_tracer use g_PARSUP @@ -246,28 +253,32 @@ subroutine diff_tracers_ale(tracer, mesh) use diff_part_bh_interface implicit none - integer :: n, nzmax, nzmin - type(t_tracer), intent(inout), target :: tracer + integer :: n, nzmax, nzmin + integer, intent(in), target :: tr_num + type(t_tracer), intent(inout), target :: tracers type(t_mesh), intent(in), target :: mesh + real(kind=WP), pointer :: del_ttf(:,:) #include "associate_mesh.h" + + del_ttf => tracers%work%del_ttf !___________________________________________________________________________ ! convert tr_arr_old(:,:,tr_num)=ttr_n-0.5 --> prepare to calc ttr_n+0.5 ! eliminate AB (adams bashfort) interpolates tracer, which is only needed for ! tracer advection. For diffusion only need tracer from previouse time step - tracer%valuesAB(:,:)=tracer%values(:,:) !DS: check that this is the right place! + tracers%data(tr_num)%valuesAB(:,:)=tracers%data(tr_num)%values(:,:) !DS: check that this is the right place! !___________________________________________________________________________ ! do horizontal diffusiion ! write there also horizontal diffusion rhs to del_ttf which is equal the R_T^n ! in danilovs srcipt ! includes Redi diffusivity if Redi=.true. - call diff_part_hor_redi(mesh) ! seems to be ~9% faster than diff_part_hor + call diff_part_hor_redi(tr_num, tracers, mesh) ! seems to be ~9% faster than diff_part_hor !___________________________________________________________________________ ! do vertical diffusion: explicite - if (.not. tracer%i_vert_diff) call diff_ver_part_expl_ale(tracer, mesh) + if (.not. tracers%i_vert_diff) call diff_ver_part_expl_ale(tr_num, tracers, mesh) ! A projection of horizontal Redi diffussivity onto vertical. This par contains horizontal ! derivatives and has to be computed explicitly! - if (Redi) call diff_ver_part_redi_expl(mesh) + if (Redi) call diff_ver_part_redi_expl(tr_num, tracers, mesh) !___________________________________________________________________________ ! Update tracers --> calculate T* see Danilov etal "FESOM2 from finite elements @@ -281,9 +292,9 @@ subroutine diff_tracers_ale(tracer, mesh) !!PS tr_arr(1:nzmax,n,tr_num)=tr_arr(1:nzmax,n,tr_num)+ & !!PS del_ttf(1:nzmax,n)/hnode_new(1:nzmax,n) - del_ttf(nzmin:nzmax,n)=del_ttf(nzmin:nzmax,n)+tracer%values(nzmin:nzmax,n)* & + del_ttf(nzmin:nzmax,n)=del_ttf(nzmin:nzmax,n)+tracers%data(tr_num)%values(nzmin:nzmax,n)* & (hnode(nzmin:nzmax,n)-hnode_new(nzmin:nzmax,n)) - tracer%values(nzmin:nzmax,n)=tracer%values(nzmin:nzmax,n)+ & + tracers%data(tr_num)%values(nzmin:nzmax,n)=tracers%data(tr_num)%values(nzmin:nzmax,n)+ & del_ttf(nzmin:nzmax,n)/hnode_new(nzmin:nzmax,n) ! WHY NOT ??? --> whats advantage of above --> tested it --> the upper ! equation has a 30% smaller nummerical drift @@ -292,23 +303,23 @@ subroutine diff_tracers_ale(tracer, mesh) end do !___________________________________________________________________________ - if (tracer%i_vert_diff) then + if (tracers%i_vert_diff) then ! do vertical diffusion: implicite - call diff_ver_part_impl_ale(tracer, mesh) + call diff_ver_part_impl_ale(tr_num, tracers, mesh) end if !We DO not set del_ttf to zero because it will not be used in this timestep anymore !init_tracers will set it to zero for the next timestep !init_tracers will set it to zero for the next timestep - if (tracer%smooth_bh_tra) then - call diff_part_bh(tracer, mesh) ! alpply biharmonic diffusion (implemented as filter) + if (tracers%smooth_bh_tra) then + call diff_part_bh(tr_num, tracers, mesh) ! alpply biharmonic diffusion (implemented as filter) end if end subroutine diff_tracers_ale ! ! !=============================================================================== !Vertical diffusive flux(explicit scheme): -subroutine diff_ver_part_expl_ale(tracer, mesh) +subroutine diff_ver_part_expl_ale(tr_num, tracers, mesh) use o_ARRAYS use g_forcing_arrays use MOD_MESH @@ -317,15 +328,20 @@ subroutine diff_ver_part_expl_ale(tracer, mesh) use g_config,only: dt implicit none - type(t_tracer), intent(inout), target :: tracer + integer, intent(in), target :: tr_num + type(t_tracer), intent(inout), target :: tracers type(t_mesh), intent(in), target :: mesh real(kind=WP) :: vd_flux(mesh%nl-1) real(kind=WP) :: rdata,flux,rlx - integer :: nz,nl1,ul1, tr_num,n + integer :: nz,nl1,ul1,n real(kind=WP) :: zinv1,Ty + real(kind=WP), pointer :: del_ttf(:,:) + #include "associate_mesh.h" + del_ttf => tracers%work%del_ttf + Ty = 0.0_WP !___________________________________________________________________________ @@ -334,11 +350,11 @@ subroutine diff_ver_part_expl_ale(tracer, mesh) ul1=ulevels_nod2D(n) vd_flux=0._WP - if (tracer%ID==1) then + if (tracers%data(tr_num)%ID==1) then flux = -heat_flux(n)/vcpw rdata = Tsurf(n) rlx = surf_relax_T - elseif (tracer%ID==2) then + elseif (tracers%data(tr_num)%ID==2) then flux = virtual_salt(n)+relax_salt(n)- real_salt_flux(n)*is_nonlinfs else flux = 0._WP @@ -361,7 +377,7 @@ subroutine diff_ver_part_expl_ale(tracer, mesh) ! Ty= Kd(4,nz-1,n)*(Z_3d_n(nz-1,n)-zbar_3d_n(nz,n))*zinv1 *neutral_slope(3,nz-1,n)**2 + & ! Kd(4,nz,n)*(zbar_3d_n(nz,n)-Z_3d_n(nz,n))*zinv1 *neutral_slope(3,nz,n)**2 - vd_flux(nz) = (Kv(nz,n)+Ty)*(tracer%values(nz-1,n)-tracer%values(nz,n))*zinv1*area(nz,n) + vd_flux(nz) = (Kv(nz,n)+Ty)*(tracers%data(tr_num)%values(nz-1,n)-tracers%data(tr_num)%values(nz,n))*zinv1*area(nz,n) end do @@ -378,7 +394,7 @@ end subroutine diff_ver_part_expl_ale ! !=============================================================================== ! vertical diffusivity augmented with Redi contribution [vertical flux of K(3,3)*d_zT] -subroutine diff_ver_part_impl_ale(tracer, mesh) +subroutine diff_ver_part_impl_ale(tr_num, tracers, mesh) use MOD_MESH use MOD_TRACER use o_PARAM @@ -392,9 +408,9 @@ subroutine diff_ver_part_impl_ale(tracer, mesh) use bc_surface_interface implicit none - type(t_tracer), intent(inout), target :: tracer + integer, intent(in), target :: tr_num + type(t_tracer), intent(inout), target :: tracers type(t_mesh), intent(in), target :: mesh -!!PS real(kind=WP) :: bc_surface real(kind=WP) :: a(mesh%nl), b(mesh%nl), c(mesh%nl), tr(mesh%nl) real(kind=WP) :: cp(mesh%nl), tp(mesh%nl) integer :: nz, n, nzmax,nzmin @@ -407,9 +423,9 @@ subroutine diff_ver_part_impl_ale(tracer, mesh) real(kind=WP), dimension(:,:), pointer :: trarr #include "associate_mesh.h" - trarr=>tracer%values(:,:) + trarr=>tracers%data(tr_num)%values(:,:) !___________________________________________________________________________ - if ((trim(tracer%tra_adv_lim)=='FCT') .OR. (.not. w_split)) do_wimpl=.false. + if ((trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') .OR. (.not. w_split)) do_wimpl=.false. if (Redi) isredi=1._WP dt_inv=1.0_WP/dt @@ -667,15 +683,15 @@ subroutine diff_ver_part_impl_ale(tracer, mesh) ! mixing or windmixing which are to much for nonlocal ! transports and lead to instability of the model if (use_kpp_nonlclflx) then - if (tracer%ID==2) then + if (tracers%data(tr_num)%ID==2) then rsss=ref_sss - if (ref_sss_local) rsss=tracer%values(1,n) + if (ref_sss_local) rsss=tracers%data(tr_num)%values(1,n) end if !___________________________________________________________________ ! use fesom1.4 KPP if (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) then - if (tracer%ID==1) then ! temperature + if (tracers%data(tr_num)%ID==1) then ! temperature ! --> no fluxes to the top out of the surface, no fluxes ! downwards out of the bottom !___surface_________________________________________________ @@ -696,7 +712,7 @@ subroutine diff_ver_part_impl_ale(tracer, mesh) +( MIN(ghats(nz ,n)*blmc(nz ,n,2), 1.0_WP)*(area(nz ,n)/areasvol(nz,n)) & ) * heat_flux(n) / vcpw * dt - elseif (tracer%ID==2) then ! salinity + elseif (tracers%data(tr_num)%ID==2) then ! salinity ! --> no fluxes to the top out of the surface, no fluxes ! downwards out of the bottom !___surface_________________________________________________ @@ -720,7 +736,7 @@ subroutine diff_ver_part_impl_ale(tracer, mesh) !___________________________________________________________________ ! use cvmix KPP elseif (mix_scheme_nmb==3 .or. mix_scheme_nmb==37) then - if (tracer%ID==1) then ! temperature + if (tracers%data(tr_num)%ID==1) then ! temperature !___surface_________________________________________________ nz = nzmin tr(nz)=tr(nz) & @@ -739,7 +755,7 @@ subroutine diff_ver_part_impl_ale(tracer, mesh) +( MIN(kpp_nonlcltranspT(nz ,n)*kpp_oblmixc(nz ,n,2), 1.0_WP)*(area(nz ,n)/areasvol(nz,n)) & ) * heat_flux(n) / vcpw * dt - elseif (tracer%ID==2) then ! salinity + elseif (tracers%data(tr_num)%ID==2) then ! salinity !___surface_________________________________________________ nz = nzmin tr(nz)=tr(nz) & @@ -763,7 +779,7 @@ subroutine diff_ver_part_impl_ale(tracer, mesh) !_______________________________________________________________________ ! case of activated shortwave penetration into the ocean, ad 3d contribution - if (use_sw_pene .and. tracer%ID==1) then + if (use_sw_pene .and. tracers%data(tr_num)%ID==1) then do nz=nzmin, nzmax-1 zinv=1.0_WP*dt !/(zbar(nz)-zbar(nz+1)) ale! !!PS tr(nz)=tr(nz)+(sw_3d(nz, n)-sw_3d(nz+1, n) * ( area(nz+1,n)/areasvol(nz,n)) ) * zinv @@ -783,7 +799,7 @@ subroutine diff_ver_part_impl_ale(tracer, mesh) ! (BUT CHECK!) | | | | ! v (+) v (+) ! - tr(nzmin)= tr(nzmin)+bc_surface(n, tracer%ID, trarr(mesh%ulevels_nod2D(n),n), mesh) + tr(nzmin)= tr(nzmin)+bc_surface(n, tracers%data(tr_num)%ID, trarr(mesh%ulevels_nod2D(n),n), mesh) !_______________________________________________________________________ ! The forward sweep algorithm to solve the three-diagonal matrix @@ -837,7 +853,7 @@ end subroutine diff_ver_part_impl_ale ! ! !=============================================================================== -subroutine diff_ver_part_redi_expl(mesh) +subroutine diff_ver_part_redi_expl(tr_num, tracers, mesh) use o_ARRAYS use g_PARSUP use MOD_MESH @@ -846,14 +862,19 @@ subroutine diff_ver_part_redi_expl(mesh) use g_config use g_comm_auto IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh + integer, intent(in), target :: tr_num + type(t_tracer), intent(inout), target :: tracers + type(t_mesh), intent(in), target :: mesh integer :: elem,k integer :: n2,nl1,ul1,nl2,nz,n real(kind=WP) :: Tx, Ty real(kind=WP) :: tr_xynodes(2,mesh%nl-1,myDim_nod2D+eDim_nod2D), vd_flux(mesh%nl) + real(kind=WP), pointer :: del_ttf(:,:) #include "associate_mesh.h" + del_ttf => tracers%work%del_ttf + do n=1, myDim_nod2D nl1=nlevels_nod2D(n)-1 ul1=ulevels_nod2D(n) @@ -912,7 +933,7 @@ end subroutine diff_ver_part_redi_expl ! ! !=============================================================================== -subroutine diff_part_hor_redi(mesh) +subroutine diff_part_hor_redi(tr_num, tracers, mesh) use o_ARRAYS use g_PARSUP use MOD_MESH @@ -920,16 +941,21 @@ subroutine diff_part_hor_redi(mesh) use o_param use g_config IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh + integer, intent(in), target :: tr_num + type(t_tracer), intent(inout), target :: tracers + type(t_mesh), intent(in), target :: mesh real(kind=WP) :: deltaX1,deltaY1,deltaX2,deltaY2 integer :: edge integer :: n2,nl1,ul1,nl2,ul2,nl12,ul12,nz,el(2),elnodes(3),n,enodes(2) real(kind=WP) :: c, Fx, Fy,Tx, Ty, Tx_z, Ty_z, SxTz, SyTz, Tz(2) real(kind=WP) :: rhs1(mesh%nl-1), rhs2(mesh%nl-1), Kh, dz real(kind=WP) :: isredi=0._WP + real(kind=WP), pointer :: del_ttf(:,:) #include "associate_mesh.h" + del_ttf => tracers%work%del_ttf + if (Redi) isredi=1._WP do edge=1, myDim_edge2D rhs1=0.0_WP @@ -1060,22 +1086,25 @@ end subroutine diff_part_hor_redi ! ! !=============================================================================== -SUBROUTINE diff_part_bh(ttf, mesh) +SUBROUTINE diff_part_bh(tr_num, tracers, mesh) use o_ARRAYS use g_PARSUP use MOD_MESH + use MOD_TRACER use o_param use g_config use g_comm_auto IMPLICIT NONE - type(t_mesh), intent(in), target :: mesh - real(kind=WP), intent(inout), target :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP) :: u1, v1, len, vi, tt, ww - integer :: nz, ed, el(2), en(2), k, elem, nl1, ul1 - real(kind=WP), allocatable :: temporary_ttf(:,:) - + integer, intent(in), target :: tr_num + type(t_tracer), intent(inout), target :: tracers + type(t_mesh), intent(in), target :: mesh + real(kind=WP) :: u1, v1, len, vi, tt, ww + integer :: nz, ed, el(2), en(2), k, elem, nl1, ul1 + real(kind=WP), allocatable :: temporary_ttf(:,:) + real(kind=WP), pointer :: ttf(:,:) #include "associate_mesh.h" + ttf => tracers%data(tr_num)%values ed=myDim_nod2D+eDim_nod2D allocate(temporary_ttf(nl-1, ed)) diff --git a/src/oce_muscl_adv.F90 b/src/oce_muscl_adv.F90 index e3ca565ea..29970add2 100755 --- a/src/oce_muscl_adv.F90 +++ b/src/oce_muscl_adv.F90 @@ -1,8 +1,10 @@ module find_up_downwind_triangles_interface interface - subroutine find_up_downwind_triangles(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh + subroutine find_up_downwind_triangles(twork, mesh) + use MOD_MESH + use MOD_TRACER + type(t_mesh), intent(in) , target :: mesh + type(t_tracer_work), intent(inout), target :: twork end subroutine end interface end module @@ -24,7 +26,7 @@ subroutine find_up_downwind_triangles(mesh) ! find_up_downwind_triangles ! fill_up_dn_grad ! adv_tracer_muscl -subroutine muscl_adv_init(mesh) +subroutine muscl_adv_init(twork, mesh) use MOD_MESH use MOD_TRACER use o_ARRAYS @@ -34,18 +36,20 @@ subroutine muscl_adv_init(mesh) use g_config use find_up_downwind_triangles_interface IMPLICIT NONE - integer :: n, k, n1, n2, n_num + integer :: n, k, n1, n2 integer :: nz - type(t_mesh), intent(in) , target :: mesh + + type(t_mesh), intent(inout), target :: mesh + type(t_tracer_work), intent(inout), target :: twork #include "associate_mesh.h" !___________________________________________________________________________ ! find upwind and downwind triangle for each local edge - call find_up_downwind_triangles(mesh) + call find_up_downwind_triangles(twork, mesh) !___________________________________________________________________________ - n_num=0 + nn_size=0 do n=1, myDim_nod2D ! get number of neighbouring nodes from sparse stiffness matrix ! stiffnes matrix filled up in subroutine init_stiff_mat_ale @@ -56,11 +60,13 @@ subroutine muscl_adv_init(mesh) ! --> SSH_stiff%rowptr(n+1)-SSH_stiff%rowptr(n) gives maximum number of ! neighbouring nodes within a single row of the sparse matrix k=SSH_stiff%rowptr(n+1)-SSH_stiff%rowptr(n) - if(k>n_num) n_num=k ! nnum maximum number of neighbouring nodes + if(k>nn_size) nn_size=k ! nnum maximum number of neighbouring nodes end do !___________________________________________________________________________ - allocate(nn_num(myDim_nod2D), nn_pos(n_num,myDim_nod2D)) + allocate(mesh%nn_num(myDim_nod2D), mesh%nn_pos(nn_size,myDim_nod2D)) + nn_num(1:myDim_nod2D) => mesh%nn_num + nn_pos(1:nn_size, 1:myDim_nod2D) => mesh%nn_pos ! These are the same arrays that we also use in quadratic reconstruction !MOVE IT TO SOMEWHERE ELSE do n=1,myDim_nod2d @@ -71,8 +77,8 @@ subroutine muscl_adv_init(mesh) end do !___________________________________________________________________________ - allocate(nboundary_lay(myDim_nod2D+eDim_nod2D)) !node n becomes a boundary node after layer nboundary_lay(n) - nboundary_lay=nl-1 + allocate(twork%nboundary_lay(myDim_nod2D+eDim_nod2D)) !node n becomes a boundary node after layer twork%nboundary_lay(n) + twork%nboundary_lay=nl-1 do n=1, myDim_edge2D ! n1 and n2 are local indices n1=edges(1,n) @@ -91,37 +97,22 @@ subroutine muscl_adv_init(mesh) if (any(edge_tri(:,n)<=0)) then ! this edge nodes is already at the surface at the boundary ... - ! later here ...sign(1, nboundary_lay(enodes(1))-nz) for nz=1 must be negativ - ! thats why here nboundary_lay(edges(:,n))=0 - nboundary_lay(edges(:,n))=0 + ! later here ...sign(1, twork%nboundary_lay(enodes(1))-nz) for nz=1 must be negativ + ! thats why here twork%nboundary_lay(edges(:,n))=0 + twork%nboundary_lay(edges(:,n))=0 else ! this edge nodes become boundary edge with increasing depth due to bottom topography - ! at the depth nboundary_lay the edge (edgepoints) still has two valid ocean triangles + ! at the depth twork%nboundary_lay the edge (edgepoints) still has two valid ocean triangles ! below that depth, edge becomes boundary edge - nboundary_lay(edges(1,n))=min(nboundary_lay(edges(1,n)), minval(nlevels(edge_tri(:,n)))-1) - nboundary_lay(edges(2,n))=min(nboundary_lay(edges(2,n)), minval(nlevels(edge_tri(:,n)))-1) + twork%nboundary_lay(edges(1,n))=min(twork%nboundary_lay(edges(1,n)), minval(nlevels(edge_tri(:,n)))-1) + twork%nboundary_lay(edges(2,n))=min(twork%nboundary_lay(edges(2,n)), minval(nlevels(edge_tri(:,n)))-1) end if end do - -!!PS !___________________________________________________________________________ -!!PS --> is transfered to oce_mesh.F90 --> subroutine find_levels_min_e2n(mesh) -!!PS --> can be deleted here! -!!PS allocate(mesh%nlevels_nod2D_min(myDim_nod2D+eDim_nod2D)) -!!PS allocate(mesh%ulevels_nod2D_min(myDim_nod2D+eDim_nod2D)) -!!PS do n=1, myDim_nod2d -!!PS k=nod_in_elem2D_num(n) -!!PS ! minimum depth in neigbouring elements around node n -!!PS mesh%nlevels_nod2D_min(n)=minval(nlevels(nod_in_elem2D(1:k, n))) -!!PS mesh%ulevels_nod2D_max(n)=maxval(ulevels(nod_in_elem2D(1:k, n))) -!!PS end do -!!PS call exchange_nod(mesh%nlevels_nod2D_min) -!!PS call exchange_nod(mesh%ulevels_nod2D_min) - end SUBROUTINE muscl_adv_init ! ! !_______________________________________________________________________________ -SUBROUTINE find_up_downwind_triangles(mesh) +SUBROUTINE find_up_downwind_triangles(twork, mesh) USE MOD_MESH USE MOD_TRACER USE o_ARRAYS @@ -135,12 +126,13 @@ SUBROUTINE find_up_downwind_triangles(mesh) real(kind=WP), allocatable :: coord_elem(:, :,:), temp(:) integer, allocatable :: temp_i(:), e_nodes(:,:) -type(t_mesh), intent(in) , target :: mesh +type(t_mesh), intent(in) , target :: mesh +type(t_tracer_work), intent(inout), target :: twork #include "associate_mesh.h" -allocate(edge_up_dn_tri(2,myDim_edge2D)) -allocate(edge_up_dn_grad(4,nl-1,myDim_edge2D)) -edge_up_dn_tri=0 +allocate(twork%edge_up_dn_tri(2,myDim_edge2D)) +allocate(twork%edge_up_dn_grad(4,nl-1,myDim_edge2D)) +twork%edge_up_dn_tri=0 ! ===== ! In order that this procedure works, we need to know nodes and their coordinates ! on the extended set of elements (not only my, but myDim+eDim+eXDim) @@ -208,15 +200,15 @@ SUBROUTINE find_up_downwind_triangles(mesh) ! Since b and c are the sides of triangle, |ab|0.0_WP).and.(ax>0.0_WP).and.(axab)) then - edge_up_dn_tri(1,n)=elem + twork%edge_up_dn_tri(1,n)=elem cycle endif if((ab==ax).or.(ax==0.0_WP)) then - edge_up_dn_tri(1,n)=elem + twork%edge_up_dn_tri(1,n)=elem cycle endif END DO @@ -251,15 +243,15 @@ SUBROUTINE find_up_downwind_triangles(mesh) ! Since b and c are the sides of triangle, |ab|0.0_WP).and.(ax>0.0_WP).and.(axab)) then - edge_up_dn_tri(2,n)=elem + twork%edge_up_dn_tri(2,n)=elem cycle endif if((ab==ax).or.(ax==0.0)) then - edge_up_dn_tri(2,n)=elem + twork%edge_up_dn_tri(2,n)=elem cycle endif END DO @@ -270,19 +262,19 @@ SUBROUTINE find_up_downwind_triangles(mesh) ! Count the number of 'good' edges: k=0 DO n=1,myDim_edge2D - if((edge_up_dn_tri(1,n).ne.0).and.(edge_up_dn_tri(2,n).ne.0)) k=k+1 + if((twork%edge_up_dn_tri(1,n).ne.0).and.(twork%edge_up_dn_tri(2,n).ne.0)) k=k+1 END DO deallocate(e_nodes, coord_elem) -edge_up_dn_grad=0.0_WP +twork%edge_up_dn_grad=0.0_WP end SUBROUTINE find_up_downwind_triangles ! ! !_______________________________________________________________________________ -SUBROUTINE fill_up_dn_grad(mesh) +SUBROUTINE fill_up_dn_grad(twork, mesh) ! ttx, tty elemental gradient of tracer USE o_PARAM USE MOD_MESH @@ -292,8 +284,8 @@ SUBROUTINE fill_up_dn_grad(mesh) IMPLICIT NONE integer :: n, nz, elem, k, edge, ednodes(2), nzmin, nzmax real(kind=WP) :: tvol, tx, ty -type(t_mesh), intent(in) , target :: mesh - +type(t_mesh), intent(in), target :: mesh +type(t_tracer_work), intent(inout), target :: twork #include "associate_mesh.h" !___________________________________________________________________________ @@ -302,7 +294,7 @@ SUBROUTINE fill_up_dn_grad(mesh) ednodes=edges(:,edge) !_______________________________________________________________________ ! case when edge has upwind and downwind triangle on the surface - if((edge_up_dn_tri(1,edge).ne.0.0_WP).and.(edge_up_dn_tri(2,edge).ne.0.0_WP)) then + if((twork%edge_up_dn_tri(1,edge).ne.0.0_WP).and.(twork%edge_up_dn_tri(2,edge).ne.0.0_WP)) then nzmin = maxval(ulevels_nod2D_max(ednodes)) nzmax = minval(nlevels_nod2D_min(ednodes)) @@ -323,8 +315,8 @@ SUBROUTINE fill_up_dn_grad(mesh) tx=tx+tr_xy(1,nz,elem)*elem_area(elem) ty=ty+tr_xy(2,nz,elem)*elem_area(elem) END DO - edge_up_dn_grad(1,nz,edge)=tx/tvol - edge_up_dn_grad(3,nz,edge)=ty/tvol + twork%edge_up_dn_grad(1,nz,edge)=tx/tvol + twork%edge_up_dn_grad(3,nz,edge)=ty/tvol END DO !___________________________________________________________________ @@ -344,8 +336,8 @@ SUBROUTINE fill_up_dn_grad(mesh) tx=tx+tr_xy(1,nz,elem)*elem_area(elem) ty=ty+tr_xy(2,nz,elem)*elem_area(elem) END DO - edge_up_dn_grad(2,nz,edge)=tx/tvol - edge_up_dn_grad(4,nz,edge)=ty/tvol + twork%edge_up_dn_grad(2,nz,edge)=tx/tvol + twork%edge_up_dn_grad(4,nz,edge)=ty/tvol END DO !___________________________________________________________________ @@ -353,9 +345,9 @@ SUBROUTINE fill_up_dn_grad(mesh) !!PS DO nz=1, minval(nlevels_nod2D_min(ednodes))-1 DO nz=nzmin, nzmax-1 ! tracer gradx for upwind and downwind tri - edge_up_dn_grad(1:2,nz,edge)=tr_xy(1,nz,edge_up_dn_tri(:,edge)) + twork%edge_up_dn_grad(1:2,nz,edge)=tr_xy(1,nz,twork%edge_up_dn_tri(:,edge)) ! tracer grady for upwind and downwind tri - edge_up_dn_grad(3:4,nz,edge)=tr_xy(2,nz,edge_up_dn_tri(:,edge)) + twork%edge_up_dn_grad(3:4,nz,edge)=tr_xy(2,nz,twork%edge_up_dn_tri(:,edge)) END DO !___________________________________________________________________ @@ -376,8 +368,8 @@ SUBROUTINE fill_up_dn_grad(mesh) tx=tx+tr_xy(1,nz,elem)*elem_area(elem) ty=ty+tr_xy(2,nz,elem)*elem_area(elem) END DO - edge_up_dn_grad(1,nz,edge)=tx/tvol - edge_up_dn_grad(3,nz,edge)=ty/tvol + twork%edge_up_dn_grad(1,nz,edge)=tx/tvol + twork%edge_up_dn_grad(3,nz,edge)=ty/tvol END DO !___________________________________________________________________ ! loop over not shared depth levels of edge node 2 (ednodes(2)) @@ -397,8 +389,8 @@ SUBROUTINE fill_up_dn_grad(mesh) tx=tx+tr_xy(1,nz,elem)*elem_area(elem) ty=ty+tr_xy(2,nz,elem)*elem_area(elem) END DO - edge_up_dn_grad(2,nz,edge)=tx/tvol - edge_up_dn_grad(4,nz,edge)=ty/tvol + twork%edge_up_dn_grad(2,nz,edge)=tx/tvol + twork%edge_up_dn_grad(4,nz,edge)=ty/tvol END DO !_______________________________________________________________________ ! case when edge either upwind or downwind triangle on the surface @@ -420,8 +412,8 @@ SUBROUTINE fill_up_dn_grad(mesh) tx=tx+tr_xy(1,nz,elem)*elem_area(elem) ty=ty+tr_xy(2,nz,elem)*elem_area(elem) END DO - edge_up_dn_grad(1,nz,edge)=tx/tvol - edge_up_dn_grad(3,nz,edge)=ty/tvol + twork%edge_up_dn_grad(1,nz,edge)=tx/tvol + twork%edge_up_dn_grad(3,nz,edge)=ty/tvol END DO nzmin = ulevels_nod2D(ednodes(2)) nzmax = nlevels_nod2D(ednodes(2)) @@ -438,8 +430,8 @@ SUBROUTINE fill_up_dn_grad(mesh) tx=tx+tr_xy(1,nz,elem)*elem_area(elem) ty=ty+tr_xy(2,nz,elem)*elem_area(elem) END DO - edge_up_dn_grad(2,nz,edge)=tx/tvol - edge_up_dn_grad(4,nz,edge)=ty/tvol + twork%edge_up_dn_grad(2,nz,edge)=tx/tvol + twork%edge_up_dn_grad(4,nz,edge)=ty/tvol END DO end if END DO diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 5da467b97..ba3ec8ac7 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -4,7 +4,7 @@ subroutine oce_initial_state(tracers, mesh) use mod_mesh use mod_tracer type(t_mesh), intent(in) , target :: mesh - type(t_tracer), intent(inout), target :: tracers(:) + type(t_tracer), intent(inout), target :: tracers end subroutine end interface end module @@ -13,8 +13,8 @@ module tracer_init_interface subroutine tracer_init(tracers, mesh) use mod_mesh use mod_tracer - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(inout), target, allocatable :: tracers(:) + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(inout), target :: tracers end subroutine end interface end module @@ -23,8 +23,8 @@ module ocean_setup_interface subroutine ocean_setup(tracers, mesh) use mod_mesh use mod_tracer - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(inout), target, allocatable :: tracers(:) + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(inout), target :: tracers end subroutine end interface end module @@ -33,8 +33,8 @@ module before_oce_step_interface subroutine before_oce_step(tracers, mesh) use mod_mesh use mod_tracer - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(inout), target, allocatable :: tracers(:) + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(inout), target :: tracers end subroutine end interface end module @@ -60,7 +60,7 @@ subroutine ocean_setup(tracers, mesh) use oce_adv_tra_fct_interfaces IMPLICIT NONE type(t_mesh), intent(inout), target :: mesh -type(t_tracer), intent(inout), target :: tracers(:) +type(t_tracer), intent(inout), target :: tracers integer :: n !___setup virt_salt_flux____________________________________________________ ! if the ale thinkness remain unchanged (like in 'linfs' case) the vitrual @@ -160,8 +160,8 @@ subroutine ocean_setup(tracers, mesh) !if(open_boundary) call set_open_boundary !TODO - call oce_adv_tra_fct_init(mesh) - call muscl_adv_init(mesh) !!PS test + call oce_adv_tra_fct_init(tracers%work, mesh) + call muscl_adv_init(tracers%work, mesh) !!PS test !===================== ! Initialize fields ! A user-defined routine has to be called here! @@ -180,8 +180,8 @@ subroutine ocean_setup(tracers, mesh) end if if (.not.r_restart) then - do n=1, num_tracers - tracers(n)%valuesAB=tracers(n)%values + do n=1, tracers%num_tracers + tracers%data(n)%valuesAB=tracers%data(n)%values end do end if @@ -216,8 +216,12 @@ SUBROUTINE tracer_init(tracers, mesh) integer :: iost integer :: n +integer :: num_tracers +logical :: i_vert_diff, smooth_bh_tra +real(kind=WP) :: gamma0_tra, gamma1_tra, gamma2_tra + type(t_mesh), intent(in) , target :: mesh -type(t_tracer), intent(inout), target, allocatable :: tracers(:) +type(t_tracer), intent(inout), target :: tracers type(nml_tracer_list_type), target, allocatable :: nml_tracer_list(:) namelist /tracer_listsize/ num_tracers @@ -256,46 +260,45 @@ SUBROUTINE tracer_init(tracers, mesh) elem_size=myDim_elem2D+eDim_elem2D node_size=myDim_nod2D+eDim_nod2D +tracers%num_tracers=num_tracers + ! ================ ! Temperature (index=1), Salinity (index=2), etc. ! ================ -allocate(tracers(num_tracers)) -do n=1, num_tracers - allocate(tracers(n)%values (nl-1,node_size)) - allocate(tracers(n)%valuesAB(nl-1,node_size)) - tracers(n)%ID = nml_tracer_list(n)%id - tracers(n)%tra_adv_hor = TRIM(nml_tracer_list(n)%adv_hor) - tracers(n)%tra_adv_ver = TRIM(nml_tracer_list(n)%adv_ver) - tracers(n)%tra_adv_lim = TRIM(nml_tracer_list(n)%adv_lim) - tracers(n)%tra_adv_ph = nml_tracer_list(n)%adv_ph - tracers(n)%tra_adv_pv = nml_tracer_list(n)%adv_pv - tracers(n)%smooth_bh_tra = smooth_bh_tra - tracers(n)%gamma0_tra = gamma0_tra - tracers(n)%gamma1_tra = gamma1_tra - tracers(n)%gamma2_tra = gamma2_tra - tracers(n)%values = 0. - tracers(n)%valuesAB = 0. - tracers(n)%i_vert_diff = i_vert_diff +allocate(tracers%data(num_tracers)) +do n=1, tracers%num_tracers + allocate(tracers%data(n)%values (nl-1,node_size)) + allocate(tracers%data(n)%valuesAB(nl-1,node_size)) + tracers%data(n)%ID = nml_tracer_list(n)%id + tracers%data(n)%tra_adv_hor = TRIM(nml_tracer_list(n)%adv_hor) + tracers%data(n)%tra_adv_ver = TRIM(nml_tracer_list(n)%adv_ver) + tracers%data(n)%tra_adv_lim = TRIM(nml_tracer_list(n)%adv_lim) + tracers%data(n)%tra_adv_ph = nml_tracer_list(n)%adv_ph + tracers%data(n)%tra_adv_pv = nml_tracer_list(n)%adv_pv + tracers%data(n)%smooth_bh_tra = smooth_bh_tra + tracers%data(n)%gamma0_tra = gamma0_tra + tracers%data(n)%gamma1_tra = gamma1_tra + tracers%data(n)%gamma2_tra = gamma2_tra + tracers%data(n)%values = 0. + tracers%data(n)%valuesAB = 0. + tracers%data(n)%i_vert_diff = i_vert_diff end do - -allocate(del_ttf(nl-1,node_size)) -allocate(del_ttf_advhoriz(nl-1,node_size),del_ttf_advvert(nl-1,node_size)) -del_ttf = 0.0_WP -del_ttf_advhoriz = 0.0_WP -del_ttf_advvert = 0.0_WP - +allocate(tracers%work%del_ttf(nl-1,node_size)) +allocate(tracers%work%del_ttf_advhoriz(nl-1,node_size),tracers%work%del_ttf_advvert(nl-1,node_size)) +tracers%work%del_ttf = 0.0_WP +tracers%work%del_ttf_advhoriz = 0.0_WP +tracers%work%del_ttf_advvert = 0.0_WP if (ldiag_DVD) then - allocate(tr_dvd_horiz(nl-1,node_size,2),tr_dvd_vert(nl-1,node_size,2)) - tr_dvd_horiz = 0.0_WP - tr_dvd_vert = 0.0_WP -end if + allocate(tracers%work%tr_dvd_horiz(nl-1,node_size,2),tracers%work%tr_dvd_vert(nl-1,node_size,2)) + tracers%work%tr_dvd_horiz = 0.0_WP + tracers%work%tr_dvd_vert = 0.0_WP +end if END SUBROUTINE tracer_init ! ! !_______________________________________________________________________________ -SUBROUTINE arrays_init(mesh) +SUBROUTINE arrays_init(num_tracers, mesh) USE MOD_MESH -USE MOD_TRACER, only : num_tracers USE o_ARRAYS USE o_PARAM USE g_PARSUP @@ -306,9 +309,10 @@ SUBROUTINE arrays_init(mesh) USE g_forcing_param, only: use_virt_salt use diagnostics, only: ldiag_dMOC, ldiag_DVD IMPLICIT NONE -integer :: elem_size, node_size -integer :: n -type(t_mesh), intent(in) , target :: mesh +integer :: elem_size, node_size +integer :: n +integer, intent(in) :: num_tracers +type(t_mesh), intent(in), target :: mesh #include "associate_mesh.h" @@ -380,7 +384,7 @@ SUBROUTINE arrays_init(mesh) Av=0.0_WP Kv=0.0_WP if (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) then - allocate(Kv_double(nl,node_size,num_tracers)) + allocate(Kv_double(nl,node_size, num_tracers)) Kv_double=0.0_WP !!PS call oce_mixing_kpp_init ! Setup constants, allocate arrays and construct look up table end if @@ -559,13 +563,13 @@ SUBROUTINE oce_initial_state(tracers, mesh) integer :: i, k, counter, rcounter3, id character(len=10) :: i_string, id_string type(t_mesh), intent(in) , target :: mesh - type(t_tracer), intent(inout), target :: tracers(:) + type(t_tracer), intent(inout), target :: tracers real(kind=WP) :: loc, max_temp, min_temp, max_salt, min_salt #include "associate_mesh.h" - if (mype==0) write(*,*) num_tracers, ' tracers will be used in FESOM' - if (mype==0) write(*,*) 'tracer IDs are: ', tracers(1:num_tracers)%ID + if (mype==0) write(*,*) tracers%num_tracers, ' tracers will be used in FESOM' + if (mype==0) write(*,*) 'tracer IDs are: ', tracers%data(1:tracers%num_tracers)%ID ! ! read ocean state ! this must be always done! First two tracers with IDs 0 and 1 are the temperature and salinity. @@ -573,16 +577,16 @@ SUBROUTINE oce_initial_state(tracers, mesh) if(mype==0) write(*,*) 'read Salt climatology from:', trim(filelist(2)) call do_ic3d(tracers, mesh) - Tclim=tracers(1)%values - Sclim=tracers(2)%values + Tclim=tracers%data(1)%values + Sclim=tracers%data(2)%values Tsurf=Tclim(1,:) Ssurf=Sclim(1,:) relax2clim=0.0_WP ! count the passive tracers which require 3D source (ptracers_restore_total) ptracers_restore_total=0 - DO i=3, num_tracers - id=tracers(i)%ID + DO i=3, tracers%num_tracers + id=tracers%data(i)%ID SELECT CASE (id) CASE (301) ptracers_restore_total=ptracers_restore_total+1 @@ -596,18 +600,18 @@ SUBROUTINE oce_initial_state(tracers, mesh) allocate(ptracers_restore(ptracers_restore_total)) rcounter3=0 ! counter for tracers with 3D source - DO i=3, num_tracers - id=tracers(i)%ID + DO i=3, tracers%num_tracers + id=tracers%data(i)%ID SELECT CASE (id) CASE (101) ! initialize tracer ID=101 - tracers(i)%values(:,:)=0.0_WP + tracers%data(i)%values(:,:)=0.0_WP if (mype==0) then write (i_string, "(I3)") i write (id_string, "(I3)") id write(*,*) 'initializing '//trim(i_string)//'th tracer with ID='//trim(id_string) end if CASE (301) !Fram Strait 3d restored passive tracer - tracers(i)%values(:,:)=0.0_WP + tracers%data(i)%values(:,:)=0.0_WP rcounter3 =rcounter3+1 counter=0 do k=1, myDim_nod2D+eDim_nod2D @@ -627,7 +631,7 @@ SUBROUTINE oce_initial_state(tracers, mesh) ptracers_restore(rcounter3)%ind2(counter)=k end if end do - tracers(i)%values(:,ptracers_restore(rcounter3)%ind2)=1. + tracers%data(i)%values(:,ptracers_restore(rcounter3)%ind2)=1. if (mype==0) then write (i_string, "(I3)") i write (id_string, "(I3)") id @@ -635,7 +639,7 @@ SUBROUTINE oce_initial_state(tracers, mesh) end if CASE (302) !Bering Strait 3d restored passive tracer - tracers(i)%values(:,:)=0.0_WP + tracers%data(i)%values(:,:)=0.0_WP rcounter3 =rcounter3+1 counter=0 do k=1, myDim_nod2D+eDim_nod2D @@ -655,7 +659,7 @@ SUBROUTINE oce_initial_state(tracers, mesh) ptracers_restore(rcounter3)%ind2(counter)=k end if end do - tracers(i)%values(:,ptracers_restore(rcounter3)%ind2)=0.0_WP + tracers%data(i)%values(:,ptracers_restore(rcounter3)%ind2)=0.0_WP if (mype==0) then write (i_string, "(I3)") i write (id_string, "(I3)") id @@ -663,7 +667,7 @@ SUBROUTINE oce_initial_state(tracers, mesh) end if CASE (303) !BSO 3d restored passive tracer - tracers(i)%values(:,:)=0.0_WP + tracers%data(i)%values(:,:)=0.0_WP rcounter3 =rcounter3+1 counter=0 do k=1, myDim_nod2D+eDim_nod2D @@ -683,7 +687,7 @@ SUBROUTINE oce_initial_state(tracers, mesh) ptracers_restore(rcounter3)%ind2(counter)=k end if end do - tracers(i)%values(:,ptracers_restore(rcounter3)%ind2)=0.0_WP + tracers%data(i)%values(:,ptracers_restore(rcounter3)%ind2)=0.0_WP if (mype==0) then write (i_string, "(I3)") i write (id_string, "(I3)") id @@ -716,7 +720,7 @@ SUBROUTINE before_oce_step(tracers, mesh) integer :: i, k, counter, rcounter3, id character(len=10) :: i_string, id_string type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(inout), target, allocatable :: tracers(:) + type(t_tracer), intent(inout), target :: tracers #include "associate_mesh.h" diff --git a/src/oce_tracer_mod.F90 b/src/oce_tracer_mod.F90 index ddc348c5a..e3ee059b0 100755 --- a/src/oce_tracer_mod.F90 +++ b/src/oce_tracer_mod.F90 @@ -46,7 +46,7 @@ END SUBROUTINE tracer_gradient_elements ! ! !======================================================================================== -SUBROUTINE init_tracers_AB(tracer, mesh) +SUBROUTINE init_tracers_AB(tr_num, tracers, mesh) use g_config, only: flag_debug use g_parsup use o_arrays @@ -54,52 +54,53 @@ SUBROUTINE init_tracers_AB(tracer, mesh) use mod_mesh use mod_tracer IMPLICIT NONE - integer :: tr_num,n,nz + integer, intent(in) :: tr_num type(t_mesh), intent(in) , target :: mesh - type(t_tracer), intent(inout), target :: tracer + type(t_tracer), intent(inout), target :: tracers + integer :: n,nz !filling work arrays - del_ttf=0.0_WP + tracers%work%del_ttf=0.0_WP !AB interpolation - tracer%valuesAB(:,:)=-(0.5_WP+epsilon)*tracer%valuesAB(:,:)+(1.5_WP+epsilon)*tracer%values(:,:) + tracers%data(tr_num)%valuesAB(:,:)=-(0.5_WP+epsilon)*tracers%data(tr_num)%valuesAB(:,:)+(1.5_WP+epsilon)*tracers%data(tr_num)%values(:,:) if (flag_debug .and. mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_elements'//achar(27)//'[0m' - call tracer_gradient_elements(tracer%valuesAB, mesh) + call tracer_gradient_elements(tracers%data(tr_num)%valuesAB, mesh) call exchange_elem_begin(tr_xy) if (flag_debug .and. mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_z'//achar(27)//'[0m' - call tracer_gradient_z(tracer%values, mesh) !WHY NOT AB HERE? DSIDOREN! + call tracer_gradient_z(tracers%data(tr_num)%values, mesh) !WHY NOT AB HERE? DSIDOREN! call exchange_elem_end() ! tr_xy used in fill_up_dn_grad call exchange_nod_begin(tr_z) ! not used in fill_up_dn_grad if (flag_debug .and. mype==0) print *, achar(27)//'[38m'//' --> call fill_up_dn_grad'//achar(27)//'[0m' - call fill_up_dn_grad(mesh) + call fill_up_dn_grad(tracers%work, mesh) call exchange_nod_end() ! tr_z halos should have arrived by now. if (flag_debug .and. mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_elements'//achar(27)//'[0m' - call tracer_gradient_elements(tracer%values, mesh) !redefine tr_arr to the current timestep + call tracer_gradient_elements(tracers%data(tr_num)%values, mesh) !redefine tr_arr to the current timestep call exchange_elem(tr_xy) END SUBROUTINE init_tracers_AB ! ! !======================================================================================== -SUBROUTINE relax_to_clim(tracer, mesh) +SUBROUTINE relax_to_clim(tr_num, tracers, mesh) use g_config,only: dt USE g_PARSUP use o_arrays IMPLICIT NONE + integer, intent(in) :: tr_num type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(inout), target :: tracer - integer :: n,nz, nzmin, nzmax - + type(t_tracer), intent(inout), target :: tracers + integer :: n,nz, nzmin, nzmax real(kind=WP), dimension(:,:), pointer :: trarr #include "associate_mesh.h" - trarr=>tracer%values(:,:) + trarr=>tracers%data(tr_num)%values(:,:) - if ((clim_relax>1.0e-8_WP).and.(tracer%ID==1)) then + if ((clim_relax>1.0e-8_WP).and.(tracers%data(tr_num)%ID==1)) then DO n=1, myDim_nod2D nzmin = ulevels_nod2D(n) nzmax = nlevels_nod2D(n) @@ -109,7 +110,7 @@ SUBROUTINE relax_to_clim(tracer, mesh) relax2clim(n)*dt*(Tclim(nzmin:nzmax-1,n)-trarr(nzmin:nzmax-1,n)) END DO END if - if ((clim_relax>1.0e-8_WP).and.(tracer%ID==2)) then + if ((clim_relax>1.0e-8_WP).and.(tracers%data(tr_num)%ID==2)) then DO n=1, myDim_nod2D nzmin = ulevels_nod2D(n) nzmax = nlevels_nod2D(n) diff --git a/src/toy_channel_soufflet.F90 b/src/toy_channel_soufflet.F90 index 221deb0ca..3f799eeca 100644 --- a/src/toy_channel_soufflet.F90 +++ b/src/toy_channel_soufflet.F90 @@ -76,12 +76,12 @@ subroutine relax_zonal_vel(mesh) end subroutine relax_zonal_vel !========================================================================== -subroutine relax_zonal_temp(tracer, mesh) +subroutine relax_zonal_temp(tdata, mesh) implicit none - integer :: n, nz, nn, nn1 - real(kind=WP) :: yy, a, Tzon - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(inout), target :: tracer + integer :: n, nz, nn, nn1 + real(kind=WP) :: yy, a, Tzon + type(t_mesh), intent(in), target :: mesh + type(t_tracer_data), intent(inout), target :: tdata #include "associate_mesh.h" @@ -100,7 +100,7 @@ subroutine relax_zonal_temp(tracer, mesh) end if do nz=1, nlevels_nod2D(n)-1 Tzon=(1.0-a)*ztem(nz,nn)+a*ztem(nz,nn1) - tracer%values(nz,n)= tracer%values(nz,n)+dt*tau_inv*(Tclim(nz,n)-Tzon) + tdata%values(nz,n)= tdata%values(nz,n)+dt*tau_inv*(Tclim(nz,n)-Tzon) end do end do end subroutine relax_zonal_temp @@ -163,10 +163,10 @@ end subroutine compute_zonal_mean_ini !========================================================================== subroutine compute_zonal_mean(tracers, mesh) implicit none - integer :: elem, nz, m, elnodes(3) - real(kind=8), allocatable :: zvel1D(:), znum1D(:) + integer :: elem, nz, m, elnodes(3) + real(kind=8), allocatable :: zvel1D(:), znum1D(:) type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers #include "associate_mesh.h" @@ -176,7 +176,7 @@ subroutine compute_zonal_mean(tracers, mesh) DO elem=1,myDim_elem2D if(elem2D_nodes(1,elem)>myDim_nod2D) cycle Do nz=1,nlevels(elem)-1 - ztem(nz,bpos(elem))=ztem(nz,bpos(elem))+sum(tracers(1)%values(nz,elem2D_nodes(:,elem)))/3.0_8 + ztem(nz,bpos(elem))=ztem(nz,bpos(elem))+sum(tracers%data(1)%values(nz,elem2D_nodes(:,elem)))/3.0_8 zvel(nz,bpos(elem))=zvel(nz,bpos(elem))+UV(1,nz,elem) END DO END DO @@ -227,7 +227,7 @@ subroutine initial_state_soufflet(tracers, mesh) ! Profiles Soufflet 2016 (OM) implicit none type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(inout), target :: tracers(:) + type(t_tracer), intent(inout), target :: tracers integer :: n, nz, elnodes(3) real(kind=8) :: dst, yn, Fy, Lx @@ -239,12 +239,12 @@ subroutine initial_state_soufflet(tracers, mesh) dy=ysize/nybins/r_earth ! Default values - stress_surf = 0.0_WP - heat_flux = 0.0_WP - tracers(2)%values = 35.0_WP - Ssurf = tracers(2)%values(1,:) - water_flux = 0.0_WP - relax2clim = 0.0_WP + stress_surf = 0.0_WP + heat_flux = 0.0_WP + tracers%data(2)%values = 35.0_WP + Ssurf = tracers%data(2)%values(1,:) + water_flux = 0.0_WP + relax2clim = 0.0_WP ! Have to set density_0=1028._WP in oce_modules.F90 ! ======== @@ -287,21 +287,21 @@ subroutine initial_state_soufflet(tracers, mesh) end if end if do nz=1, nlevels_nod2D(n)-1 - tracers(1)%values(nz,n)=rho_So(nz)+(rho_No(nz)-rho_So(nz))*(1.0-Fy) + tracers%data(1)%values(nz,n)=rho_So(nz)+(rho_No(nz)-rho_So(nz))*(1.0-Fy) end do end do ! ======== ! Make consistent ! ======== - Tsurf=tracers(1)%values(1,:) - Tclim=tracers(1)%values(:,:) + Tsurf=tracers%data(1)%values(1,:) + Tclim=tracers%data(1)%values(:,:) ! ======== ! add small perturbation: do n=1, myDim_nod2D+eDim_nod2D dst=(coord_nod2D(2, n)-lat0)*r_earth do nz=1, nlevels_nod2D(n)-1 - tracers(1)%values(nz,n)=tracers(1)%values(nz,n)-0.1*sin(2*pi*dst/ysize)*exp(2*Z(nz)/zsize) & + tracers%data(1)%values(nz,n)=tracers%data(1)%values(nz,n)-0.1*sin(2*pi*dst/ysize)*exp(2*Z(nz)/zsize) & *(sin(8*pi*coord_nod2D(1,n)*r_earth/xsize)+ & 0.5*sin(3*pi*coord_nod2D(1,n)*r_earth/xsize)) end do @@ -309,7 +309,7 @@ subroutine initial_state_soufflet(tracers, mesh) ! ======= ! Compute geostrophically balanced flow ! ======= - write(*,*) mype, 'T', maxval(tracers(1)%values), minval(tracers(1)%values) + write(*,*) mype, 'T', maxval(tracers%data(1)%values), minval(tracers%data(1)%values) ! Redefine Coriolis (to agree with the Soufflet paper) DO n=1,myDim_elem2D elnodes=elem2D_nodes(:,n) diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index f7898a162..68b7960cf 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -5,7 +5,7 @@ subroutine write_step_info(istep,outfreq,tracers,mesh) use MOD_TRACER integer :: istep,outfreq type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers end subroutine end interface end module @@ -15,7 +15,7 @@ subroutine check_blowup(istep, tracers, mesh) use MOD_MESH use MOD_TRACER integer :: istep - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers type(t_mesh), intent(in), target :: mesh end subroutine end interface @@ -46,7 +46,7 @@ subroutine write_step_info(istep, outfreq, tracers, mesh) real(kind=WP) :: int_deta , int_dhbar real(kind=WP) :: loc, loc_eta, loc_hbar, loc_deta, loc_dhbar, loc_wflux,loc_hflux, loc_temp, loc_salt type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers #include "associate_mesh.h" if (mod(istep,outfreq)==0) then @@ -77,8 +77,8 @@ subroutine write_step_info(istep, outfreq, tracers, mesh) loc_dhbar = loc_dhbar + areasvol(ulevels_nod2D(n), n)*(hbar(n)-hbar_old(n)) loc_wflux = loc_wflux + areasvol(ulevels_nod2D(n), n)*water_flux(n) !!PS loc_hflux = loc_hflux + area(1, n)*heat_flux(n) -!!PS loc_temp = loc_temp + area(1, n)*sum(tracers(1)%values(:, n))/(nlevels_nod2D(n)-1) -!!PS loc_salt = loc_salt + area(1, n)*sum(tracers(2)%values(:, n))/(nlevels_nod2D(n)-1) +!!PS loc_temp = loc_temp + area(1, n)*sum(tracers%data(1)%values(:, n))/(nlevels_nod2D(n)-1) +!!PS loc_salt = loc_salt + area(1, n)*sum(tracers%data(2)%values(:, n))/(nlevels_nod2D(n)-1) end do !_______________________________________________________________________ @@ -116,9 +116,9 @@ subroutine write_step_info(istep, outfreq, tracers, mesh) call MPI_AllREDUCE(loc , min_wflux, 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) loc = minval(heat_flux(1:myDim_nod2D)) call MPI_AllREDUCE(loc , min_hflux, 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(tracers(1)%values(:,1:myDim_nod2D),MASK=(tracers(2)%values(:,1:myDim_nod2D)/=0.0)) + loc = minval(tracers%data(1)%values(:,1:myDim_nod2D),MASK=(tracers%data(2)%values(:,1:myDim_nod2D)/=0.0)) call MPI_AllREDUCE(loc , min_temp , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(tracers(2)%values(:,1:myDim_nod2D),MASK=(tracers(2)%values(:,1:myDim_nod2D)/=0.0)) + loc = minval(tracers%data(2)%values(:,1:myDim_nod2D),MASK=(tracers%data(2)%values(:,1:myDim_nod2D)/=0.0)) call MPI_AllREDUCE(loc , min_salt , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) loc = minval(Wvel(1,1:myDim_nod2D)) call MPI_AllREDUCE(loc , min_wvel , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) @@ -148,9 +148,9 @@ subroutine write_step_info(istep, outfreq, tracers, mesh) call MPI_AllREDUCE(loc , max_wflux, 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) loc = maxval(heat_flux(1:myDim_nod2D)) call MPI_AllREDUCE(loc , max_hflux, 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(tracers(1)%values(:,1:myDim_nod2D),MASK=(tracers(2)%values(:,1:myDim_nod2D)/=0.0)) + loc = maxval(tracers%data(1)%values(:,1:myDim_nod2D),MASK=(tracers%data(2)%values(:,1:myDim_nod2D)/=0.0)) call MPI_AllREDUCE(loc , max_temp , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(tracers(2)%values(:,1:myDim_nod2D),MASK=(tracers(2)%values(:,1:myDim_nod2D)/=0.0)) + loc = maxval(tracers%data(2)%values(:,1:myDim_nod2D),MASK=(tracers%data(2)%values(:,1:myDim_nod2D)/=0.0)) call MPI_AllREDUCE(loc , max_salt , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) loc = maxval(Wvel(1,1:myDim_nod2D)) call MPI_AllREDUCE(loc , max_wvel , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) @@ -249,7 +249,7 @@ subroutine check_blowup(istep, tracers, mesh) integer :: n, nz, istep, found_blowup_loc=0, found_blowup=0 integer :: el, elidx type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers(:) + type(t_tracer), intent(in), target :: tracers #include "associate_mesh.h" !___________________________________________________________________________ ! ! if (mod(istep,logfile_outfreq)==0) then @@ -377,8 +377,8 @@ subroutine check_blowup(istep, tracers, mesh) do nz=1,nlevels_nod2D(n)-1 !_______________________________________________________________ ! check temp - if ( (tracers(1)%values(nz, n) /= tracers(1)%values(nz, n)) .or. & - tracers(1)%values(nz, n) < -5.0 .or. tracers(1)%values(nz, n)>60) then + if ( (tracers%data(1)%values(nz, n) /= tracers%data(1)%values(nz, n)) .or. & + tracers%data(1)%values(nz, n) < -5.0 .or. tracers%data(1)%values(nz, n)>60) then found_blowup_loc=1 write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep write(*,*) ' --STOP--> found temperture becomes NaN or <-5.0, >60' @@ -390,10 +390,10 @@ subroutine check_blowup(istep, tracers, mesh) write(*,*) 'nzmin, nzmax= ',ulevels_nod2D(n),nlevels_nod2D(n) write(*,*) 'x=', geo_coord_nod2D(1,n)/rad, ' ; ', 'y=', geo_coord_nod2D(2,n)/rad write(*,*) 'z=', Z_n(nz) - write(*,*) 'temp(nz, n) = ',tracers(1)%values(nz, n) - write(*,*) 'temp(: , n) = ',tracers(1)%values(:, n) - write(*,*) 'temp_old(nz,n)= ',tracers(1)%valuesAB(nz, n) - write(*,*) 'temp_old(: ,n)= ',tracers(1)%valuesAB(:, n) + write(*,*) 'temp(nz, n) = ',tracers%data(1)%values(nz, n) + write(*,*) 'temp(: , n) = ',tracers%data(1)%values(:, n) + write(*,*) 'temp_old(nz,n)= ',tracers%data(1)%valuesAB(nz, n) + write(*,*) 'temp_old(: ,n)= ',tracers%data(1)%valuesAB(:, n) write(*,*) write(*,*) 'hflux = ',heat_flux(n) write(*,*) 'wflux = ',water_flux(n) @@ -428,12 +428,12 @@ subroutine check_blowup(istep, tracers, mesh) ! enddo write(*,*) - endif ! --> if ( (tracers(1)%values(nz, n) /= tracers(1)%values(nz, n)) .or. & ... + endif ! --> if ( (tracers%data(1)%values(nz, n) /= tracers%data(1)%values(nz, n)) .or. & ... !_______________________________________________________________ ! check salt - if ( (tracers(2)%values(nz, n) /= tracers(2)%values(nz, n)) .or. & - tracers(2)%values(nz, n) < 0 .or. tracers(2)%values(nz, n)>50 ) then + if ( (tracers%data(2)%values(nz, n) /= tracers%data(2)%values(nz, n)) .or. & + tracers%data(2)%values(nz, n) < 0 .or. tracers%data(2)%values(nz, n)>50 ) then found_blowup_loc=1 write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep write(*,*) ' --STOP--> found salinity becomes NaN or <0, >50' @@ -444,11 +444,11 @@ subroutine check_blowup(istep, tracers, mesh) write(*,*) 'nzmin, nzmax= ',ulevels_nod2D(n),nlevels_nod2D(n) write(*,*) 'x=', geo_coord_nod2D(1,n)/rad, ' ; ', 'y=', geo_coord_nod2D(2,n)/rad write(*,*) 'z=', Z_n(nz) - write(*,*) 'salt(nz, n) = ',tracers(2)%values(nz, n) - write(*,*) 'salt(: , n) = ',tracers(2)%values(:, n) + write(*,*) 'salt(nz, n) = ',tracers%data(2)%values(nz, n) + write(*,*) 'salt(: , n) = ',tracers%data(2)%values(:, n) write(*,*) - write(*,*) 'temp(nz, n) = ',tracers(1)%values(nz, n) - write(*,*) 'temp(: , n) = ',tracers(1)%values(:, n) + write(*,*) 'temp(nz, n) = ',tracers%data(1)%values(nz, n) + write(*,*) 'temp(: , n) = ',tracers%data(1)%values(:, n) write(*,*) write(*,*) 'hflux = ',heat_flux(n) write(*,*) @@ -482,7 +482,7 @@ subroutine check_blowup(istep, tracers, mesh) write(*,*) write(*,*) 'glon,glat = ',geo_coord_nod2D(:,n)/rad write(*,*) - endif ! --> if ( (tracers(2)%values(nz, n) /= tracers(2)%values(nz, n)) .or. & ... + endif ! --> if ( (tracers%data(2)%values(nz, n) /= tracers%data(2)%values(nz, n)) .or. & ... end do ! --> do nz=1,nlevels_nod2D(n)-1 end do ! --> do n=1, myDim_nod2d ! ! end if From 9be920ab4446c8c98f45b185f2a2cd8d9d7a7de1 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Fri, 17 Sep 2021 16:22:28 +0200 Subject: [PATCH 377/909] I forgot to remove the STOP statement in from the previous commit --- src/fvom_main.F90 | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index a0ead3093..dbffdf80a 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -187,26 +187,26 @@ program main endif - write (dump_filename, "(A7,I7.7)") "t_mesh.", mype - open (mype+300, file=trim(dump_filename), status='replace', form="unformatted") - write (mype+300) mesh - close (mype+300) - - open (mype+300, file=trim(dump_filename), status='old', form="unformatted") - read (mype+300) mesh_copy - close (mype+300) +! write (dump_filename, "(A7,I7.7)") "t_mesh.", mype +! open (mype+300, file=trim(dump_filename), status='replace', form="unformatted") +! write (mype+300) mesh +! close (mype+300) + +! open (mype+300, file=trim(dump_filename), status='old', form="unformatted") +! read (mype+300) mesh_copy +! close (mype+300) - write (dump_filename, "(A9,I7.7)") "t_tracer.", mype - open (mype+300, file=trim(dump_filename), status='replace', form="unformatted") - write (mype+300) tracers - close (mype+300) +! write (dump_filename, "(A9,I7.7)") "t_tracer.", mype +! open (mype+300, file=trim(dump_filename), status='replace', form="unformatted") +! write (mype+300) tracers +! close (mype+300) - open (mype+300, file=trim(dump_filename), status='old', form="unformatted") - read (mype+300) tracers_copy - close (mype+300) +! open (mype+300, file=trim(dump_filename), status='old', form="unformatted") +! read (mype+300) tracers_copy +! close (mype+300) -call par_ex -stop +!call par_ex +!stop ! ! if (mype==10) write(,) mesh1%ssh_stiff%values-mesh%ssh_stiff%value From 78f19bb6ce20d08596d5f1d715500d0dd1c209b0 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 20 Sep 2021 09:22:16 +0200 Subject: [PATCH 378/909] fixed a stupid bug during refactoring in the vertical advection part --- src/oce_adv_tra_driver.F90 | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/src/oce_adv_tra_driver.F90 b/src/oce_adv_tra_driver.F90 index 536859a71..fe03742c6 100644 --- a/src/oce_adv_tra_driver.F90 +++ b/src/oce_adv_tra_driver.F90 @@ -136,7 +136,6 @@ subroutine do_oce_adv_tra(vel, w, wi, we, tr_num, tracers, mesh) fct_LO(nz,n)=(ttf(nz,n)*hnode(nz,n)+(fct_LO(nz,n)+(adv_flux_ver(nz, n)-adv_flux_ver(nz+1, n)))*dt/areasvol(nz,n))/hnode_new(nz,n) end do end do - if (w_split) then !wvel/=wvel_e ! update for implicit contribution (w_split option) call adv_tra_vert_impl(wi, fct_LO, mesh) @@ -173,26 +172,25 @@ subroutine do_oce_adv_tra(vel, w, wi, we, tr_num, tracers, mesh) else pwvel=>we end if - !___________________________________________________________________________ ! do vertical tracer advection, in case of FCT high order solution SELECT CASE(trim(tracers%data(tr_num)%tra_adv_ver)) CASE('QR4C') ! compute the untidiffusive vertical flux (init_zero=.false.:input is the LO vertical flux computed above) - call adv_tra_ver_qr4c (ttfAB, pwvel, mesh, optv, adv_flux_ver, init_zero=do_zero_flux) + call adv_tra_ver_qr4c (pwvel, ttfAB, mesh, optv, adv_flux_ver, init_zero=do_zero_flux) CASE('CDIFF') - call adv_tra_ver_cdiff(ttfAB, pwvel, mesh, adv_flux_ver, init_zero=do_zero_flux) + call adv_tra_ver_cdiff(pwvel, ttfAB, mesh, adv_flux_ver, init_zero=do_zero_flux) CASE('PPM') - call adv_tra_vert_ppm (ttfAB, pwvel, mesh, adv_flux_ver, init_zero=do_zero_flux) + call adv_tra_vert_ppm (pwvel, ttfAB, mesh, adv_flux_ver, init_zero=do_zero_flux) CASE('UPW1') - call adv_tra_ver_upw1 (ttfAB, pwvel, mesh, adv_flux_ver, init_zero=do_zero_flux) + call adv_tra_ver_upw1 (pwvel, ttfAB, mesh, adv_flux_ver, init_zero=do_zero_flux) CASE DEFAULT !unknown if (mype==0) write(*,*) 'Unknown vertical advection type ', trim(tracers%data(tr_num)%tra_adv_ver), '! Check your namelists!' call par_ex(1) ! --> be aware the vertical implicite part in case without FCT is done in ! oce_ale_tracer.F90 --> subroutine diff_ver_part_impl_ale(tr_num, mesh) ! for do_wimpl=.true. - END SELECT + END SELECT !___________________________________________________________________________ ! if (trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') then From e8023369b93ab7c3fee9dbbcce84c4223803aa93 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 20 Sep 2021 13:47:37 +0200 Subject: [PATCH 379/909] 1. all ALE arrays have been moved into the MOD_MESH. makes sence since! 2. not necesary dependencies in the advection routines have been removed. DT shall be an input parameter although --- src/MOD_MESH.F90 | 62 ++++++++++++++++++++++++++++++++++++++ src/MOD_TRACER.F90 | 4 +-- src/associate_mesh.h | 35 +++++++++++++++++++++ src/cavity_param.F90 | 9 ++---- src/gen_modules_diag.F90 | 4 +-- src/io_mesh_info.F90 | 1 - src/oce_adv_tra_driver.F90 | 40 ++++++++++++------------ src/oce_adv_tra_fct.F90 | 36 +++++++++++----------- src/oce_adv_tra_hor.F90 | 10 ------ src/oce_adv_tra_ver.F90 | 41 +++++++++++-------------- src/oce_ale.F90 | 55 +++++++++++++++++++++------------ src/oce_ale_tracer.F90 | 10 +++--- src/oce_fer_gm.F90 | 6 ++-- src/oce_modules.F90 | 34 --------------------- 14 files changed, 204 insertions(+), 143 deletions(-) diff --git a/src/MOD_MESH.F90 b/src/MOD_MESH.F90 index 9946013c5..f41e42fc4 100644 --- a/src/MOD_MESH.F90 +++ b/src/MOD_MESH.F90 @@ -97,6 +97,36 @@ MODULE MOD_MESH integer, allocatable, dimension(:) :: nn_num integer, allocatable, dimension(:,:) :: nn_pos +!_______________________________________________________________________________ +! Arrays added for ALE implementation: +! --> layer thinkness at node and depthlayer for t=n and t=n+1 +real(kind=WP), allocatable,dimension(:,:) :: hnode, hnode_new, zbar_3d_n, Z_3d_n + +! --> layer thinkness at elements, interpolated from hnode +real(kind=WP), allocatable,dimension(:,:) :: helem + +! --> thinkness of bottom elem (important for partial cells) +real(kind=WP), allocatable,dimension(:) :: bottom_elem_thickness +real(kind=WP), allocatable,dimension(:) :: bottom_node_thickness + +! --> The increment of total fluid depth on elements. It is used to update the matrix +real(kind=WP), allocatable,dimension(:) :: dhe + +! --> hbar, hbar_old: correspond to the elevation, but on semi-integer time steps. +real(kind=WP), allocatable,dimension(:) :: hbar, hbar_old + +! --> auxiliary array to store depth of layers and depth of mid level due to changing +! layer thinkness at every node +real(kind=WP), allocatable,dimension(:) :: zbar_n, Z_n + +! new bottom depth at node and element due to partial cells +real(kind=WP), allocatable,dimension(:) :: zbar_n_bot +real(kind=WP), allocatable,dimension(:) :: zbar_e_bot + +! new depth of cavity-ocean interface at node and element due to partial cells +real(kind=WP), allocatable,dimension(:) :: zbar_n_srf +real(kind=WP), allocatable,dimension(:) :: zbar_e_srf + character(:), allocatable :: representative_checksum contains @@ -181,6 +211,22 @@ subroutine write_t_mesh(mesh, unit, iostat, iomsg) write(unit, iostat=iostat, iomsg=iomsg) mesh%nn_size call write_bin_array(mesh%nn_num, unit, iostat, iomsg) call write_bin_array(mesh%nn_pos, unit, iostat, iomsg) + call write_bin_array(mesh%hnode, unit, iostat, iomsg) + call write_bin_array(mesh%hnode_new, unit, iostat, iomsg) + call write_bin_array(mesh%zbar_3d_n, unit, iostat, iomsg) + call write_bin_array(mesh%Z_3d_n, unit, iostat, iomsg) + call write_bin_array(mesh%helem, unit, iostat, iomsg) + call write_bin_array(mesh%bottom_elem_thickness, unit, iostat, iomsg) + call write_bin_array(mesh%bottom_node_thickness, unit, iostat, iomsg) + call write_bin_array(mesh%dhe, unit, iostat, iomsg) + call write_bin_array(mesh%hbar, unit, iostat, iomsg) + call write_bin_array(mesh%hbar_old, unit, iostat, iomsg) + call write_bin_array(mesh%zbar_n, unit, iostat, iomsg) + call write_bin_array(mesh%Z_n, unit, iostat, iomsg) + call write_bin_array(mesh%zbar_n_bot, unit, iostat, iomsg) + call write_bin_array(mesh%zbar_e_bot, unit, iostat, iomsg) + call write_bin_array(mesh%zbar_n_srf, unit, iostat, iomsg) + call write_bin_array(mesh%zbar_e_srf, unit, iostat, iomsg) ! call write_bin_array(mesh%representative_checksum, unit, iostat, iomsg) end subroutine write_t_mesh @@ -258,6 +304,22 @@ subroutine read_t_mesh(mesh, unit, iostat, iomsg) read(unit, iostat=iostat, iomsg=iomsg) mesh%nn_size call read_bin_array(mesh%nn_num, unit, iostat, iomsg) call read_bin_array(mesh%nn_pos, unit, iostat, iomsg) + call read_bin_array(mesh%hnode, unit, iostat, iomsg) + call read_bin_array(mesh%hnode_new, unit, iostat, iomsg) + call read_bin_array(mesh%zbar_3d_n, unit, iostat, iomsg) + call read_bin_array(mesh%Z_3d_n, unit, iostat, iomsg) + call read_bin_array(mesh%helem, unit, iostat, iomsg) + call read_bin_array(mesh%bottom_elem_thickness, unit, iostat, iomsg) + call read_bin_array(mesh%bottom_node_thickness, unit, iostat, iomsg) + call read_bin_array(mesh%dhe, unit, iostat, iomsg) + call read_bin_array(mesh%hbar, unit, iostat, iomsg) + call read_bin_array(mesh%hbar_old, unit, iostat, iomsg) + call read_bin_array(mesh%zbar_n, unit, iostat, iomsg) + call read_bin_array(mesh%Z_n, unit, iostat, iomsg) + call read_bin_array(mesh%zbar_n_bot, unit, iostat, iomsg) + call read_bin_array(mesh%zbar_e_bot, unit, iostat, iomsg) + call read_bin_array(mesh%zbar_n_srf, unit, iostat, iomsg) + call read_bin_array(mesh%zbar_e_srf, unit, iostat, iomsg) ! call read_bin_array(mesh%representative_checksum, unit, iostat, iomsg) end subroutine read_t_mesh end module MOD_MESH diff --git a/src/MOD_TRACER.F90 b/src/MOD_TRACER.F90 index cfd439ea9..35e35020f 100644 --- a/src/MOD_TRACER.F90 +++ b/src/MOD_TRACER.F90 @@ -204,11 +204,11 @@ subroutine READ_T_TRACER(tracer, unit, iostat, iomsg) integer :: i read(unit, iostat=iostat, iomsg=iomsg) tracer%num_tracers - write(*,*) 'number of tracers to read: ', tracer%num_tracers +! write(*,*) 'number of tracers to read: ', tracer%num_tracers allocate(tracer%data(tracer%num_tracers)) do i=1, tracer%num_tracers read(unit, iostat=iostat, iomsg=iomsg) tracer%data(i) - write(*,*) 'tracer info:', tracer%data(i)%ID, TRIM(tracer%data(i)%tra_adv_hor), TRIM(tracer%data(i)%tra_adv_ver), TRIM(tracer%data(i)%tra_adv_lim) +! write(*,*) 'tracer info:', tracer%data(i)%ID, TRIM(tracer%data(i)%tra_adv_hor), TRIM(tracer%data(i)%tra_adv_ver), TRIM(tracer%data(i)%tra_adv_lim) end do read(unit, iostat=iostat, iomsg=iomsg) tracer%work read(unit, iostat=iostat, iomsg=iomsg) tracer%smooth_bh_tra diff --git a/src/associate_mesh.h b/src/associate_mesh.h index 233036c96..9edc60383 100644 --- a/src/associate_mesh.h +++ b/src/associate_mesh.h @@ -35,6 +35,23 @@ integer, dimension(:) , pointer :: ulevels, ulevels_nod2D, ulevels_nod2D_ integer, dimension(:) , pointer :: nn_num integer, dimension(:,:), pointer :: nn_pos +real(kind=WP), dimension(:,:), pointer :: hnode +real(kind=WP), dimension(:,:), pointer :: hnode_new +real(kind=WP), dimension(:,:), pointer :: zbar_3d_n +real(kind=WP), dimension(:,:), pointer :: Z_3d_n +real(kind=WP), dimension(:,:), pointer :: helem +real(kind=WP), dimension(:) , pointer :: bottom_elem_thickness +real(kind=WP), dimension(:) , pointer :: bottom_node_thickness +real(kind=WP), dimension(:) , pointer :: dhe +real(kind=WP), dimension(:) , pointer :: hbar +real(kind=WP), dimension(:) , pointer :: hbar_old +real(kind=WP), dimension(:) , pointer :: zbar_n +real(kind=WP), dimension(:) , pointer :: Z_n +real(kind=WP), dimension(:) , pointer :: zbar_n_bot +real(kind=WP), dimension(:) , pointer :: zbar_e_bot +real(kind=WP), dimension(:) , pointer :: zbar_n_srf +real(kind=WP), dimension(:) , pointer :: zbar_e_srf + nod2D => mesh%nod2D elem2D => mesh%elem2D edge2D => mesh%edge2D @@ -127,3 +144,21 @@ ulevels_nod2D(1:myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D ulevels_nod2D_max(1:myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D_max nn_num(1:myDim_nod2D) => mesh%nn_num nn_pos(1:mesh%nn_size, 1:myDim_nod2D) => mesh%nn_pos +hnode(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%hnode +hnode_new(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%hnode_new +zbar_3d_n(1:mesh%nl, 1:myDim_nod2D+eDim_nod2D) => mesh%zbar_3d_n +Z_3d_n(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%Z_3d_n +helem(1:mesh%nl-1, 1:myDim_elem2D) => mesh%helem +bottom_elem_thickness(1:myDim_elem2D) => mesh%bottom_elem_thickness +bottom_node_thickness(1:myDim_nod2D+eDim_nod2D) => mesh%bottom_node_thickness +dhe(1:myDim_elem2D) => mesh%dhe +hbar(1:myDim_nod2D+eDim_nod2D) => mesh%hbar +hbar_old(1:myDim_nod2D+eDim_nod2D) => mesh%hbar_old +zbar_n(1:mesh%nl) => mesh%zbar_n +Z_n(1:mesh%nl-1) => mesh%Z_n +zbar_n_bot(1:myDim_nod2D+eDim_nod2D) => mesh%zbar_n_bot +zbar_e_bot(1:myDim_elem2D+eDim_elem2D) => mesh%zbar_e_bot +zbar_n_srf(1:myDim_nod2D+eDim_nod2D) => mesh%zbar_n_srf +zbar_e_srf(1:myDim_elem2D+eDim_elem2D) => mesh%zbar_e_srf + + diff --git a/src/cavity_param.F90 b/src/cavity_param.F90 index 51dba031b..946996802 100644 --- a/src/cavity_param.F90 +++ b/src/cavity_param.F90 @@ -18,7 +18,6 @@ subroutine cavity_heat_water_fluxes_3eq(tracers, mesh) subroutine compute_nrst_pnt2cavline(mesh) use MOD_MESH use o_PARAM , only: WP - use o_ARRAYS, only: Z_3d_n use g_PARSUP implicit none @@ -134,11 +133,10 @@ subroutine cavity_heat_water_fluxes_3eq(tracers, mesh) use MOD_MESH use MOD_TRACER use o_PARAM , only: density_0, WP - use o_ARRAYS, only: heat_flux, water_flux, Z_3d_n, Unode, density_m_rho0,density_ref + use o_ARRAYS, only: heat_flux, water_flux, Unode, density_m_rho0,density_ref use i_ARRAYS, only: net_heat_flux, fresh_wa_flux use g_PARSUP implicit none - !___________________________________________________________________________ type(t_mesh), intent(inout), target :: mesh type(t_tracer), intent(in), target :: tracers @@ -148,8 +146,7 @@ subroutine cavity_heat_water_fluxes_3eq(tracers, mesh) real (kind=WP) :: ep1,ep2,ep3,ep4,ep5,ep31 real (kind=WP) :: ex1,ex2,ex3,ex4,ex5,ex6 real (kind=WP) :: vt1,sr1,sr2,sf1,sf2,tf1,tf2,tf,sf,seta,re - integer :: node, nzmax, nzmin - + integer :: node, nzmax, nzmin !___________________________________________________________________________ real(kind=WP),parameter :: rp = 0. !reference pressure real(kind=WP),parameter :: a = -0.0575 !Foldvik&Kvinge (1974) @@ -321,7 +318,7 @@ subroutine cavity_heat_water_fluxes_2eq(tracers, mesh) use MOD_MESH use MOD_TRACER use o_PARAM , only: WP - use o_ARRAYS, only: heat_flux, water_flux, Z_3d_n + use o_ARRAYS, only: heat_flux, water_flux use i_ARRAYS, only: net_heat_flux, fresh_wa_flux use g_PARSUP implicit none diff --git a/src/gen_modules_diag.F90 b/src/gen_modules_diag.F90 index a79dcf106..560dc3e16 100755 --- a/src/gen_modules_diag.F90 +++ b/src/gen_modules_diag.F90 @@ -711,7 +711,7 @@ subroutine compute_diag_dvd_2ndmoment_burchard_etal_2008(tr_num, tracers, mesh) tracers%work%del_ttf_advhoriz = 0.0_WP tracers%work%del_ttf_advvert = 0.0_WP ! maybe just to introduce an another tharer of t_tracer type with **do_Xmoment? -! call do_oce_adv_tra(tr_sqr, trAB_sqr, UV, wvel, wvel_i, wvel_e, 1, tracers%work%del_ttf_advhoriz, tracers%work%del_ttf_advvert, tra_adv_ph, tra_adv_pv, mesh) +! call do_oce_adv_tra(dt, UV, wvel, wvel_i, wvel_e, tr_sqr, trAB_sqr, 1, tracers%work%del_ttf_advhoriz, tracers%work%del_ttf_advvert, tra_adv_ph, tra_adv_pv, mesh) !___________________________________________________________________________ ! add target second moment to DVD do node = 1,mydim_nod2D @@ -762,7 +762,7 @@ subroutine compute_diag_dvd_2ndmoment_klingbeil_etal_2014(tr_num, tracers, mesh) tracers%work%del_ttf_advhoriz = 0.0_WP tracers%work%del_ttf_advvert = 0.0_WP ! maybe just to introduce an another tharer of t_tracer type with **do_Xmoment? -! call do_oce_adv_tra(tracers%data(tr_num)%values, tracers%data(tr_num)%valuesAB(:,:), UV, wvel, wvel_i, wvel_e, 2, tracers%work%del_ttf_advhoriz, tracers%work%del_ttf_advvert, tra_adv_ph, tra_adv_pv, mesh) +! call do_oce_adv_tra(dt, UV, wvel, wvel_i, wvel_e, tracers%data(tr_num)%values, tracers%data(tr_num)%valuesAB(:,:), 2, tracers%work%del_ttf_advhoriz, tracers%work%del_ttf_advvert, tra_adv_ph, tra_adv_pv, mesh) !___________________________________________________________________________ ! add target second moment to DVD do node = 1,mydim_nod2D diff --git a/src/io_mesh_info.F90 b/src/io_mesh_info.F90 index c1f74c08e..5deb51530 100644 --- a/src/io_mesh_info.F90 +++ b/src/io_mesh_info.F90 @@ -3,7 +3,6 @@ module io_mesh_info use MOD_MESH use g_config use g_comm_auto -use o_ARRAYS use o_PARAM implicit none diff --git a/src/oce_adv_tra_driver.F90 b/src/oce_adv_tra_driver.F90 index fe03742c6..7b58b63f2 100644 --- a/src/oce_adv_tra_driver.F90 +++ b/src/oce_adv_tra_driver.F90 @@ -1,9 +1,10 @@ module oce_adv_tra_driver_interfaces interface - subroutine do_oce_adv_tra(vel, w, wi, we, tr_num, tracers, mesh) + subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, mesh) use MOD_MESH use MOD_TRACER use g_PARSUP + real(kind=WP), intent(in), target :: dt integer, intent(in) :: tr_num type(t_mesh), intent(in), target :: mesh type(t_tracer), intent(inout), target :: tracers @@ -17,10 +18,11 @@ subroutine do_oce_adv_tra(vel, w, wi, we, tr_num, tracers, mesh) module oce_tra_adv_flux2dtracer_interface interface - subroutine oce_tra_adv_flux2dtracer(dttf_h, dttf_v, flux_h, flux_v, mesh, use_lo, ttf, lo) + subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, mesh, use_lo, ttf, lo) !update the solution for vertical and horizontal flux contributions use MOD_MESH use g_PARSUP + real(kind=WP), intent(in), target :: dt type(t_mesh), intent(in), target :: mesh real(kind=WP), intent(inout) :: dttf_h(mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(inout) :: dttf_v(mesh%nl-1, myDim_nod2D+eDim_nod2D) @@ -35,19 +37,17 @@ subroutine oce_tra_adv_flux2dtracer(dttf_h, dttf_v, flux_h, flux_v, mesh, use_lo ! ! !=============================================================================== -subroutine do_oce_adv_tra(vel, w, wi, we, tr_num, tracers, mesh) +subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, mesh) use MOD_MESH use MOD_TRACER - use o_ARRAYS - use o_PARAM use g_PARSUP - use g_CONFIG use g_comm_auto use oce_adv_tra_hor_interfaces use oce_adv_tra_ver_interfaces use oce_adv_tra_fct_interfaces use oce_tra_adv_flux2dtracer_interface implicit none + real(kind=WP), intent(in), target :: dt integer, intent(in) :: tr_num type(t_mesh), intent(in), target :: mesh type(t_tracer), intent(inout), target :: tracers @@ -138,7 +138,7 @@ subroutine do_oce_adv_tra(vel, w, wi, we, tr_num, tracers, mesh) end do if (w_split) then !wvel/=wvel_e ! update for implicit contribution (w_split option) - call adv_tra_vert_impl(wi, fct_LO, mesh) + call adv_tra_vert_impl(dt, wi, fct_LO, mesh) ! compute the low order upwind vertical flux (full vertical velocity) ! zero the input/output flux before computation ! --> compute here low order part of vertical anti diffusive fluxes, @@ -157,11 +157,11 @@ subroutine do_oce_adv_tra(vel, w, wi, we, tr_num, tracers, mesh) SELECT CASE(trim(tracers%data(tr_num)%tra_adv_hor)) CASE('MUSCL') ! compute the untidiffusive horizontal flux (init_zero=.false.: input is the LO horizontal flux computed above) - call adv_tra_hor_muscl(uv, ttfAB, mesh, opth, adv_flux_hor, edge_up_dn_grad, nboundary_lay, init_zero=do_zero_flux) + call adv_tra_hor_muscl(vel, ttfAB, mesh, opth, adv_flux_hor, edge_up_dn_grad, nboundary_lay, init_zero=do_zero_flux) CASE('MFCT') - call adv_tra_hor_mfct(uv, ttfAB, mesh, opth, adv_flux_hor, edge_up_dn_grad, init_zero=do_zero_flux) + call adv_tra_hor_mfct(vel, ttfAB, mesh, opth, adv_flux_hor, edge_up_dn_grad, init_zero=do_zero_flux) CASE('UPW1') - call adv_tra_hor_upw1(uv, ttfAB, mesh, adv_flux_hor, init_zero=do_zero_flux) + call adv_tra_hor_upw1(vel, ttfAB, mesh, adv_flux_hor, init_zero=do_zero_flux) CASE DEFAULT !unknown if (mype==0) write(*,*) 'Unknown horizontal advection type ', trim(tracers%data(tr_num)%tra_adv_hor), '! Check your namelists!' call par_ex(1) @@ -177,13 +177,13 @@ subroutine do_oce_adv_tra(vel, w, wi, we, tr_num, tracers, mesh) SELECT CASE(trim(tracers%data(tr_num)%tra_adv_ver)) CASE('QR4C') ! compute the untidiffusive vertical flux (init_zero=.false.:input is the LO vertical flux computed above) - call adv_tra_ver_qr4c (pwvel, ttfAB, mesh, optv, adv_flux_ver, init_zero=do_zero_flux) + call adv_tra_ver_qr4c ( pwvel, ttfAB, mesh, optv, adv_flux_ver, init_zero=do_zero_flux) CASE('CDIFF') - call adv_tra_ver_cdiff(pwvel, ttfAB, mesh, adv_flux_ver, init_zero=do_zero_flux) + call adv_tra_ver_cdiff( pwvel, ttfAB, mesh, adv_flux_ver, init_zero=do_zero_flux) CASE('PPM') - call adv_tra_vert_ppm (pwvel, ttfAB, mesh, adv_flux_ver, init_zero=do_zero_flux) + call adv_tra_vert_ppm(dt, pwvel, ttfAB, mesh, adv_flux_ver, init_zero=do_zero_flux) CASE('UPW1') - call adv_tra_ver_upw1 (pwvel, ttfAB, mesh, adv_flux_ver, init_zero=do_zero_flux) + call adv_tra_ver_upw1 ( pwvel, ttfAB, mesh, adv_flux_ver, init_zero=do_zero_flux) CASE DEFAULT !unknown if (mype==0) write(*,*) 'Unknown vertical advection type ', trim(tracers%data(tr_num)%tra_adv_ver), '! Check your namelists!' call par_ex(1) @@ -194,23 +194,23 @@ subroutine do_oce_adv_tra(vel, w, wi, we, tr_num, tracers, mesh) !___________________________________________________________________________ ! if (trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') then - call oce_tra_adv_fct(ttf, fct_LO, adv_flux_hor, adv_flux_ver, fct_ttf_min, fct_ttf_max, fct_plus, fct_minus, mesh) - call oce_tra_adv_flux2dtracer(dttf_h, dttf_v, adv_flux_hor, adv_flux_ver, mesh, use_lo=.TRUE., ttf=ttf, lo=fct_LO) + !edge_up_dn_grad will be used as an auxuary array here + call oce_tra_adv_fct(dt, ttf, fct_LO, adv_flux_hor, adv_flux_ver, fct_ttf_min, fct_ttf_max, fct_plus, fct_minus, edge_up_dn_grad, mesh) + call oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, adv_flux_hor, adv_flux_ver, mesh, use_lo=.TRUE., ttf=ttf, lo=fct_LO) else - call oce_tra_adv_flux2dtracer(dttf_h, dttf_v, adv_flux_hor, adv_flux_ver, mesh) + call oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, adv_flux_hor, adv_flux_ver, mesh) end if end subroutine do_oce_adv_tra ! ! !=============================================================================== -subroutine oce_tra_adv_flux2dtracer(dttf_h, dttf_v, flux_h, flux_v, mesh, use_lo, ttf, lo) +subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, mesh, use_lo, ttf, lo) use MOD_MESH use o_ARRAYS - use o_PARAM use g_PARSUP - use g_CONFIG use g_comm_auto implicit none + real(kind=WP), intent(in), target :: dt type(t_mesh), intent(in), target :: mesh real(kind=WP), intent(inout) :: dttf_h(mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(inout) :: dttf_v(mesh%nl-1, myDim_nod2D+eDim_nod2D) diff --git a/src/oce_adv_tra_fct.F90 b/src/oce_adv_tra_fct.F90 index 3b9b95003..ec8314e09 100644 --- a/src/oce_adv_tra_fct.F90 +++ b/src/oce_adv_tra_fct.F90 @@ -8,9 +8,10 @@ subroutine oce_adv_tra_fct_init(twork, mesh) type(t_tracer_work), intent(inout), target :: twork end subroutine - subroutine oce_tra_adv_fct(ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_plus, fct_minus, mesh) + subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_plus, fct_minus, AUX, mesh) use MOD_MESH use g_PARSUP + real(kind=WP), intent(in), target :: dt type(t_mesh), intent(in), target :: mesh real(kind=WP), intent(inout) :: fct_ttf_min(mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(inout) :: fct_ttf_max(mesh%nl-1, myDim_nod2D+eDim_nod2D) @@ -20,6 +21,7 @@ subroutine oce_tra_adv_fct(ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_ real(kind=WP), intent(inout) :: adf_v(mesh%nl, myDim_nod2D) real(kind=WP), intent(inout) :: fct_plus(mesh%nl-1, myDim_edge2D) real(kind=WP), intent(inout) :: fct_minus(mesh%nl, myDim_nod2D) + real(kind=WP), intent(inout) :: AUX(:,:,:) !a large auxuary array end subroutine end interface end module @@ -29,8 +31,6 @@ subroutine oce_tra_adv_fct(ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_ subroutine oce_adv_tra_fct_init(twork, mesh) use MOD_MESH use MOD_TRACER - use o_ARRAYS - use o_PARAM use g_PARSUP implicit none integer :: my_size @@ -60,7 +60,7 @@ end subroutine oce_adv_tra_fct_init ! ! !=============================================================================== -subroutine oce_tra_adv_fct(ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_plus, fct_minus, mesh) +subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_plus, fct_minus, AUX, mesh) ! ! 3D Flux Corrected Transport scheme ! Limits antidiffusive fluxes==the difference in flux HO-LO @@ -69,12 +69,10 @@ subroutine oce_tra_adv_fct(ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_ ! Adds limited fluxes to the LO solution use MOD_MESH use MOD_TRACER - use o_ARRAYS - use o_PARAM use g_PARSUP - use g_CONFIG use g_comm_auto implicit none + real(kind=WP), intent(in), target :: dt type(t_mesh), intent(in), target :: mesh real(kind=WP), intent(inout) :: fct_ttf_min(mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(inout) :: fct_ttf_max(mesh%nl-1, myDim_nod2D+eDim_nod2D) @@ -84,7 +82,7 @@ subroutine oce_tra_adv_fct(ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_ real(kind=WP), intent(inout) :: adf_v(mesh%nl, myDim_nod2D) real(kind=WP), intent(inout) :: fct_plus (mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(inout) :: fct_minus(mesh%nl-1, myDim_nod2D+eDim_nod2D) - + real(kind=WP), intent(inout) :: AUX(:,:,:) !a large auxuary array, let us use twork%edge_up_dn_grad(1:4, 1:NL-2, 1:myDim_edge2D) to save space integer :: n, nz, k, elem, enodes(3), num, el(2), nl1, nl2, nu1, nu2, nl12, nu12, edge real(kind=WP) :: flux, ae,tvert_max(mesh%nl-1),tvert_min(mesh%nl-1) real(kind=WP) :: flux_eps=1e-16 @@ -112,19 +110,19 @@ subroutine oce_tra_adv_fct(ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_ !___________________________________________________________________________ ! a2. Admissible increments on elements ! (only layers below the first and above the last layer) - ! look for max, min bounds for each element --> UV_rhs here auxilary array + ! look for max, min bounds for each element --> AUX here auxilary array do elem=1, myDim_elem2D enodes=elem2D_nodes(:,elem) nu1 = ulevels(elem) nl1 = nlevels(elem) do nz=nu1, nl1-1 - UV_rhs(1,nz,elem)=maxval(fct_ttf_max(nz,enodes)) - UV_rhs(2,nz,elem)=minval(fct_ttf_min(nz,enodes)) + AUX(1,nz,elem)=maxval(fct_ttf_max(nz,enodes)) + AUX(2,nz,elem)=minval(fct_ttf_min(nz,enodes)) end do if (nl1<=nl-1) then do nz=nl1,nl-1 - UV_rhs(1,nz,elem)=-bignumber - UV_rhs(2,nz,elem)= bignumber + AUX(1,nz,elem)=-bignumber + AUX(2,nz,elem)= bignumber end do endif end do ! --> do elem=1, myDim_elem2D @@ -146,8 +144,8 @@ subroutine oce_tra_adv_fct(ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_ ! vertical layer ! nod_in_elem2D --> elem indices of which node n is surrounded ! nod_in_elem2D_num --> max number of surrounded elem - tvert_max(nz)= maxval(UV_rhs(1,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) - tvert_min(nz)= minval(UV_rhs(2,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) + tvert_max(nz)= maxval(AUX(1,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) + tvert_min(nz)= minval(AUX(2,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) end do !___________________________________________________________________ @@ -178,8 +176,8 @@ subroutine oce_tra_adv_fct(ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_ nu1 = ulevels_nod2D(n) nl1 = nlevels_nod2D(n) do nz=nu1,nl1-1 - tvert_max(nz)= maxval(UV_rhs(1,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) - tvert_min(nz)= minval(UV_rhs(2,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) + tvert_max(nz)= maxval(AUX(1,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) + tvert_min(nz)= minval(AUX(2,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) end do do nz=nu1+1, nl1-2 tvert_max(nz)=max(tvert_max(nz),maxval(fct_ttf_max(nz-1:nz+1,n))) @@ -200,8 +198,8 @@ subroutine oce_tra_adv_fct(ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_ nu1 = ulevels_nod2D(n) nl1 = nlevels_nod2D(n) do nz=nu1, nl1-1 - tvert_max(nz)= maxval(UV_rhs(1,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) - tvert_min(nz)= minval(UV_rhs(2,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) + tvert_max(nz)= maxval(AUX(1,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) + tvert_min(nz)= minval(AUX(2,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) end do do nz=nu1+1, nl1-2 tvert_max(nz)=min(tvert_max(nz),maxval(fct_ttf_max(nz-1:nz+1,n))) diff --git a/src/oce_adv_tra_hor.F90 b/src/oce_adv_tra_hor.F90 index 3225d5e9d..df6e91dcf 100644 --- a/src/oce_adv_tra_hor.F90 +++ b/src/oce_adv_tra_hor.F90 @@ -57,10 +57,7 @@ subroutine adv_tra_hor_mfct(vel, ttf, mesh, num_ord, flux, edge_up_dn_grad, !=============================================================================== subroutine adv_tra_hor_upw1(vel, ttf, mesh, flux, init_zero) use MOD_MESH - use o_ARRAYS - use o_PARAM use g_PARSUP - use g_CONFIG use g_comm_auto implicit none type(t_mesh), intent(in) , target :: mesh @@ -75,7 +72,6 @@ subroutine adv_tra_hor_upw1(vel, ttf, mesh, flux, init_zero) #include "associate_mesh.h" - if (present(init_zero))then if (init_zero) flux=0.0_WP else @@ -214,10 +210,7 @@ end subroutine adv_tra_hor_upw1 subroutine adv_tra_hor_muscl(vel, ttf, mesh, num_ord, flux, edge_up_dn_grad, nboundary_lay, init_zero) use MOD_MESH use MOD_TRACER - use o_ARRAYS - use o_PARAM use g_PARSUP - use g_CONFIG use g_comm_auto implicit none type(t_mesh), intent(in), target :: mesh @@ -485,10 +478,7 @@ end subroutine adv_tra_hor_muscl subroutine adv_tra_hor_mfct(vel, ttf, mesh, num_ord, flux, edge_up_dn_grad, init_zero) use MOD_MESH use MOD_TRACER - use o_ARRAYS - use o_PARAM use g_PARSUP - use g_CONFIG use g_comm_auto implicit none type(t_mesh), intent(in), target :: mesh diff --git a/src/oce_adv_tra_ver.F90 b/src/oce_adv_tra_ver.F90 index 3a84c509a..7f7270635 100644 --- a/src/oce_adv_tra_ver.F90 +++ b/src/oce_adv_tra_ver.F90 @@ -2,9 +2,10 @@ module oce_adv_tra_ver_interfaces interface ! implicit 1st order upwind vertical advection with to solve for fct_LO ! updates the input tracer ttf - subroutine adv_tra_vert_impl(w, ttf, mesh) + subroutine adv_tra_vert_impl(dt, w, ttf, mesh) use mod_mesh use g_PARSUP + real(kind=WP), intent(in), target :: dt type(t_mesh), intent(in), target :: mesh real(kind=WP), intent(inout) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in) :: W (mesh%nl, myDim_nod2D+eDim_nod2D) @@ -46,9 +47,10 @@ subroutine adv_tra_ver_qr4c(w, ttf, mesh, num_ord, flux, init_zero) ! IF init_zero=.TRUE. : flux will be set to zero before computation ! IF init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_vert_ppm(w, ttf, mesh, flux, init_zero) + subroutine adv_tra_vert_ppm(dt, w, ttf, mesh, flux, init_zero) use MOD_MESH use g_PARSUP + real(kind=WP), intent(in), target :: dt type(t_mesh), intent(in), target :: mesh integer :: n, nz, nl1 real(kind=WP) :: tvert(mesh%nl), tv @@ -76,17 +78,14 @@ subroutine adv_tra_ver_cdiff(w, ttf, mesh, flux, init_zero) end interface end module !=============================================================================== -subroutine adv_tra_vert_impl(w, ttf, mesh) +subroutine adv_tra_vert_impl(dt, w, ttf, mesh) use MOD_MESH - use o_PARAM - use o_ARRAYS - use i_ARRAYS + use MOD_TRACER use g_PARSUP - use g_CONFIG - use g_forcing_arrays - use o_mixing_KPP_mod !for ghats _GO_ - + use g_comm_auto + implicit none + real(kind=WP), intent(in) , target :: dt type(t_mesh), intent(in) , target :: mesh real(kind=WP), intent(inout) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in) :: W (mesh%nl, myDim_nod2D+eDim_nod2D) @@ -224,12 +223,11 @@ end subroutine adv_tra_vert_impl ! !=============================================================================== subroutine adv_tra_ver_upw1(w, ttf, mesh, flux, init_zero) - use g_config use MOD_MESH - use o_ARRAYS - use o_PARAM + use MOD_TRACER use g_PARSUP - use g_forcing_arrays + use g_comm_auto + implicit none type(t_mesh), intent(in), target :: mesh real(kind=WP) :: tvert(mesh%nl) @@ -351,14 +349,13 @@ end subroutine adv_tra_ver_qr4c ! ! !=============================================================================== -subroutine adv_tra_vert_ppm(w, ttf, mesh, flux, init_zero) - use g_config +subroutine adv_tra_vert_ppm(dt, w, ttf, mesh, flux, init_zero) use MOD_MESH - use o_ARRAYS - use o_PARAM + use MOD_TRACER use g_PARSUP - use g_forcing_arrays + use g_comm_auto implicit none + real(kind=WP), intent(in), target :: dt type(t_mesh), intent(in) , target :: mesh real(kind=WP), intent(in) :: ttf (mesh%nl-1, myDim_nod2D+eDim_nod2D) real(kind=WP), intent(in) :: W (mesh%nl, myDim_nod2D+eDim_nod2D) @@ -532,12 +529,10 @@ end subroutine adv_tra_vert_ppm ! !=============================================================================== subroutine adv_tra_ver_cdiff(w, ttf, mesh, flux, init_zero) - use g_config use MOD_MESH - use o_ARRAYS - use o_PARAM + use MOD_TRACER use g_PARSUP - use g_forcing_arrays + use g_comm_auto implicit none type(t_mesh), intent(in), target :: mesh real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index fcad0cd4a..51c4d72b2 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -100,47 +100,65 @@ subroutine init_ale(mesh) use oce_ale_interfaces Implicit NONE - integer :: n, nzmax, nzmin, elnodes(3), elem - type(t_mesh), intent(in) , target :: mesh + integer :: n, nzmax, nzmin, elnodes(3), elem + type(t_mesh), intent(inout), target :: mesh #include "associate_mesh.h" !___allocate________________________________________________________________ ! hnode and hnode_new: layer thicknesses at nodes. - allocate(hnode(1:nl-1, myDim_nod2D+eDim_nod2D)) - allocate(hnode_new(1:nl-1, myDim_nod2D+eDim_nod2D)) + allocate(mesh%hnode(1:nl-1, myDim_nod2D+eDim_nod2D)) + allocate(mesh%hnode_new(1:nl-1, myDim_nod2D+eDim_nod2D)) ! ssh_rhs_old: auxiliary array to store an intermediate part of the rhs computations. allocate(ssh_rhs_old(myDim_nod2D+eDim_nod2D)) ! hbar, hbar_old: correspond to the elevation, but on semi-integer time steps. - allocate(hbar(myDim_nod2D+eDim_nod2D)) - allocate(hbar_old(myDim_nod2D+eDim_nod2D)) + allocate(mesh%hbar(myDim_nod2D+eDim_nod2D)) + allocate(mesh%hbar_old(myDim_nod2D+eDim_nod2D)) ! helem: layer thickness at elements. It is interpolated from hnode. - allocate(helem(1:nl-1, myDim_elem2D)) + allocate(mesh%helem(1:nl-1, myDim_elem2D)) ! dhe: The increment of total fluid depth on elements. It is used to update the matrix ! of the ssh operator. - allocate(dhe(myDim_elem2D)) + allocate(mesh%dhe(myDim_elem2D)) ! zbar_n: depth of layers due to ale thinkness variactions at ervery node n - allocate(zbar_n(nl)) - allocate(zbar_3d_n(nl,myDim_nod2D+eDim_nod2D)) + allocate(mesh%zbar_n(nl)) + allocate(mesh%zbar_3d_n(nl,myDim_nod2D+eDim_nod2D)) ! Z_n: mid depth of layers due to ale thinkness variactions at ervery node n - allocate(Z_n(nl-1)) - allocate(Z_3d_n(nl-1,myDim_nod2D+eDim_nod2D)) + allocate(mesh%Z_n(nl-1)) + allocate(mesh%Z_3d_n(nl-1,myDim_nod2D+eDim_nod2D)) ! bottom_elem_tickness: changed bottom layer thinkness due to partial cells - allocate(bottom_elem_thickness(myDim_elem2D)) - allocate(zbar_e_bot(myDim_elem2D+eDim_elem2D)) - allocate(zbar_e_srf(myDim_elem2D+eDim_elem2D)) + allocate(mesh%bottom_elem_thickness(myDim_elem2D)) + allocate(mesh%zbar_e_bot(myDim_elem2D+eDim_elem2D)) + allocate(mesh%zbar_e_srf(myDim_elem2D+eDim_elem2D)) ! also change bottom thickness at nodes due to partial cell --> bottom ! thickness at nodes is the volume weighted mean of sorounding elemental ! thicknesses - allocate(bottom_node_thickness(myDim_nod2D+eDim_nod2D)) - allocate(zbar_n_bot(myDim_nod2D+eDim_nod2D)) - allocate(zbar_n_srf(myDim_nod2D+eDim_nod2D)) + allocate(mesh%bottom_node_thickness(myDim_nod2D+eDim_nod2D)) + allocate(mesh%zbar_n_bot(myDim_nod2D+eDim_nod2D)) + allocate(mesh%zbar_n_srf(myDim_nod2D+eDim_nod2D)) + + ! reassociate after the allocation (no pointer exists before) + hnode(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%hnode + hnode_new(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%hnode_new + zbar_3d_n(1:mesh%nl, 1:myDim_nod2D+eDim_nod2D) => mesh%zbar_3d_n + Z_3d_n(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%Z_3d_n + helem(1:mesh%nl-1, 1:myDim_elem2D) => mesh%helem + bottom_elem_thickness(1:myDim_elem2D) => mesh%bottom_elem_thickness + bottom_node_thickness(1:myDim_nod2D+eDim_nod2D) => mesh%bottom_node_thickness + dhe(1:myDim_elem2D) => mesh%dhe + hbar(1:myDim_nod2D+eDim_nod2D) => mesh%hbar + hbar_old(1:myDim_nod2D+eDim_nod2D) => mesh%hbar_old + zbar_n(1:mesh%nl) => mesh%zbar_n + Z_n(1:mesh%nl-1) => mesh%Z_n + zbar_n_bot(1:myDim_nod2D+eDim_nod2D) => mesh%zbar_n_bot + zbar_e_bot(1:myDim_elem2D+eDim_elem2D) => mesh%zbar_e_bot + zbar_n_srf(1:myDim_nod2D+eDim_nod2D) => mesh%zbar_n_srf + zbar_e_srf(1:myDim_elem2D+eDim_elem2D) => mesh%zbar_e_srf !___initialize______________________________________________________________ hbar = 0.0_WP @@ -1108,7 +1126,6 @@ subroutine init_stiff_mat_ale(mesh) use o_PARAM use MOD_MESH use g_PARSUP - use o_ARRAYS, only:zbar_e_bot, zbar_e_srf use g_CONFIG implicit none diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index c68565152..233b3f17d 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -11,9 +11,10 @@ subroutine diff_part_hor_redi(tr_num, tracer, mesh) end module module adv_tracers_ale_interface interface - subroutine adv_tracers_ale(tr_num, tracer, mesh) + subroutine adv_tracers_ale(dt, tr_num, tracer, mesh) use mod_mesh use mod_tracer + real(kind=WP), intent(in), target :: dt integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracer type(t_mesh), intent(in), target :: mesh @@ -142,7 +143,7 @@ subroutine solve_tracers_ale(tracers, mesh) call init_tracers_AB(tr_num, tracers, mesh) ! advect tracers if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call adv_tracers_ale'//achar(27)//'[0m' - call adv_tracers_ale(tr_num, tracers, mesh) + call adv_tracers_ale(dt, tr_num, tracers, mesh) ! diffuse tracers if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call diff_tracers_ale'//achar(27)//'[0m' call diff_tracers_ale(tr_num, tracers, mesh) @@ -188,7 +189,7 @@ end subroutine solve_tracers_ale ! ! !=============================================================================== -subroutine adv_tracers_ale(tr_num, tracers, mesh) +subroutine adv_tracers_ale(dt, tr_num, tracers, mesh) use g_config, only: flag_debug use g_parsup use mod_mesh @@ -200,6 +201,7 @@ subroutine adv_tracers_ale(tr_num, tracers, mesh) ! use adv_tracers_vert_ppm_ale_interface use oce_adv_tra_driver_interfaces implicit none + real(kind=WP), intent(in), target :: dt integer :: node, nz integer, intent(in) :: tr_num type(t_mesh), intent(in), target :: mesh @@ -223,7 +225,7 @@ subroutine adv_tracers_ale(tr_num, tracers, mesh) ! here --> add horizontal advection part to del_ttf(nz,n) = del_ttf(nz,n) + ... tracers%work%del_ttf_advhoriz = 0.0_WP tracers%work%del_ttf_advvert = 0.0_WP - call do_oce_adv_tra(UV, wvel, wvel_i, wvel_e, tr_num, tracers, mesh) + call do_oce_adv_tra(dt, UV, wvel, wvel_i, wvel_e, tr_num, tracers, mesh) !___________________________________________________________________________ ! update array for total tracer flux del_ttf with the fluxes from horizontal ! and vertical advection diff --git a/src/oce_fer_gm.F90 b/src/oce_fer_gm.F90 index a0cd0f863..f83eb04b3 100644 --- a/src/oce_fer_gm.F90 +++ b/src/oce_fer_gm.F90 @@ -8,7 +8,7 @@ subroutine fer_solve_Gamma(mesh) USE MOD_MESH USE o_PARAM - USE o_ARRAYS, ONLY: sigma_xy, fer_gamma, bvfreq, fer_c, fer_K, zbar_n, Z_n, hnode_new, zbar_n_bot + USE o_ARRAYS, ONLY: sigma_xy, fer_gamma, bvfreq, fer_c, fer_K USE g_PARSUP USE g_CONFIG use g_comm_auto @@ -125,7 +125,7 @@ END subroutine fer_solve_Gamma subroutine fer_gamma2vel(mesh) USE MOD_MESH USE o_PARAM - USE o_ARRAYS, ONLY: fer_gamma, fer_uv, helem + USE o_ARRAYS, ONLY: fer_gamma, fer_uv USE g_PARSUP USE g_CONFIG use g_comm_auto @@ -159,7 +159,7 @@ end subroutine fer_gamma2vel subroutine init_Redi_GM(mesh) !fer_compute_C_K_Redi USE MOD_MESH USE o_PARAM - USE o_ARRAYS, ONLY: fer_c, fer_k, fer_scal, Ki, bvfreq, MLD1_ind, neutral_slope, coriolis_node, hnode_new, Z_3d_n + USE o_ARRAYS, ONLY: fer_c, fer_k, fer_scal, Ki, bvfreq, MLD1_ind, neutral_slope, coriolis_node USE g_PARSUP USE g_CONFIG use g_comm_auto diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index 08035d708..3576ef01f 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -236,42 +236,8 @@ MODULE o_ARRAYS !Isoneutral diffusivities (or xy diffusivities if Redi=.false) real(kind=WP), allocatable :: Ki(:,:) -!_______________________________________________________________________________ -! Arrays added for ALE implementation: -! --> layer thinkness at node and depthlayer for t=n and t=n+1 -real(kind=WP), allocatable,dimension(:,:) :: hnode, hnode_new, zbar_3d_n, Z_3d_n - -! --> layer thinkness at elements, interpolated from hnode -real(kind=WP), allocatable,dimension(:,:) :: helem - -! --> thinkness of bottom elem (important for partial cells) -real(kind=WP), allocatable,dimension(:) :: bottom_elem_thickness -real(kind=WP), allocatable,dimension(:) :: bottom_node_thickness - -! --> The increment of total fluid depth on elements. It is used to update the matrix -real(kind=WP), allocatable,dimension(:) :: dhe - -! --> hbar, hbar_old: correspond to the elevation, but on semi-integer time steps. -real(kind=WP), allocatable,dimension(:) :: hbar, hbar_old - ! --> auxiliary array to store an intermediate part of the rhs computations. real(kind=WP), allocatable,dimension(:) :: ssh_rhs_old !, ssh_rhs_old2 !PS - -! --> auxiliary array to store depth of layers and depth of mid level due to changing -! layer thinkness at every node -real(kind=WP), allocatable,dimension(:) :: zbar_n, Z_n - -! new bottom depth at node and element due to partial cells -real(kind=WP), allocatable,dimension(:) :: zbar_n_bot -real(kind=WP), allocatable,dimension(:) :: zbar_e_bot - -! new depth of cavity-ocean interface at node and element due to partial cells -real(kind=WP), allocatable,dimension(:) :: zbar_n_srf -real(kind=WP), allocatable,dimension(:) :: zbar_e_srf - -! --> multiplication factor for surface boundary condition in -! diff_ver_part_impl_ale(tr_num) between linfs -->=0.0 and noninfs -! (zlevel,zstar...) --> = 1.0 real(kind=WP) :: is_nonlinfs !_______________________________________________________________________________ From f8c3b020c70a0579d8606940d1e537a66c5f7813 Mon Sep 17 00:00:00 2001 From: Claudia Wekerle Date: Thu, 30 Sep 2021 13:16:01 +0200 Subject: [PATCH 380/909] fix bug with reading runoff and SSS --- src/gen_surface_forcing.F90 | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/src/gen_surface_forcing.F90 b/src/gen_surface_forcing.F90 index bfd057638..a58c698ca 100644 --- a/src/gen_surface_forcing.F90 +++ b/src/gen_surface_forcing.F90 @@ -1099,13 +1099,15 @@ SUBROUTINE sbc_do(mesh) !========================================================================== ! prepare a flag which checks whether to update monthly data (SSS, river runoff) - update_monthly_flag=((day_in_month==num_day_in_month(fleapyear,month) .and. timenew==86400._WP)) + update_monthly_flag=( (day_in_month==num_day_in_month(fleapyear,month) .and. + timenew==86400._WP) .or. mstep==1 ) ! read in SSS for applying SSS restoring if (surf_relax_S > 0._WP) then if (sss_data_source=='CORE1' .or. sss_data_source=='CORE2') then if (update_monthly_flag) then - i=month+1 + i=month + if (mstep > 1) i=i+1 if (i > 12) i=1 if (mype==0) write(*,*) 'Updating SSS restoring data for month ', i call read_other_NetCDF(nm_sss_data_file, 'SALT', i, Ssurf, .true., mesh) @@ -1119,7 +1121,8 @@ SUBROUTINE sbc_do(mesh) if(update_monthly_flag) then if(runoff_climatology) then !climatology monthly mean - i=month+1 + i=month + if (mstep > 1) i=i+1 if (i > 12) i=1 if (mype==0) write(*,*) 'Updating monthly climatology runoff for month ', i filename=trim(nm_runoff_file) @@ -1130,8 +1133,8 @@ SUBROUTINE sbc_do(mesh) else !monthly data - - i=month+1 + i=month + if (mstep > 1) i=i+1 if (i > 12) i=1 if (mype==0) write(*,*) 'Updating monthly runoff for month ', i filename=trim(nm_runoff_file)//cyearnew//'.nc' From d0e8899571d8ec8adb2a4057018dcce3366d73d7 Mon Sep 17 00:00:00 2001 From: dsidoren Date: Thu, 30 Sep 2021 22:06:12 +0200 Subject: [PATCH 381/909] Update gen_surface_forcing.F90 @koldunovn you were right regarding the line splitting --- src/gen_surface_forcing.F90 | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/gen_surface_forcing.F90 b/src/gen_surface_forcing.F90 index a58c698ca..ad22831e6 100644 --- a/src/gen_surface_forcing.F90 +++ b/src/gen_surface_forcing.F90 @@ -1099,8 +1099,7 @@ SUBROUTINE sbc_do(mesh) !========================================================================== ! prepare a flag which checks whether to update monthly data (SSS, river runoff) - update_monthly_flag=( (day_in_month==num_day_in_month(fleapyear,month) .and. - timenew==86400._WP) .or. mstep==1 ) + update_monthly_flag=( (day_in_month==num_day_in_month(fleapyear,month) .AND. timenew==86400._WP) .OR. mstep==1 ) ! read in SSS for applying SSS restoring if (surf_relax_S > 0._WP) then From b457327c89bafa8444a3db6c619f30b9d4411b24 Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Tue, 5 Oct 2021 16:10:21 +0200 Subject: [PATCH 382/909] Update setup.yml Update tests --- setups/test_pi/setup.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/setups/test_pi/setup.yml b/setups/test_pi/setup.yml index e7f20c760..b38c480f0 100644 --- a/setups/test_pi/setup.yml +++ b/setups/test_pi/setup.yml @@ -61,12 +61,12 @@ namelist.io: prec: 8 fcheck: - a_ice: 0.26911274194532003 - salt: 23.9440531023692 - temp: 1.7017743034836539 - sst: 8.532529081624512 - u: -0.0014065854610620704 - v: 0.00014195144238082126 + a_ice: 0.2691276443855294 + salt: 23.944024712806094 + temp: 1.701768707848739 + sst: 8.531522995932146 + u: -0.001407225233294229 + v: 0.00014182969591235959 From 15850b0d4525bc00e603bed75432c933ca6f3401 Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Tue, 5 Oct 2021 18:08:56 +0200 Subject: [PATCH 383/909] Update fesom2.1.yml Actions are not triggered for whatever reason, try again. --- .github/workflows/fesom2.1.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/fesom2.1.yml b/.github/workflows/fesom2.1.yml index 4facc60cc..6f50d8c61 100644 --- a/.github/workflows/fesom2.1.yml +++ b/.github/workflows/fesom2.1.yml @@ -1,7 +1,7 @@ name: FESOM2 main test -# Controls when the action will run. Triggers the workflow on push or pull request. +# Controls when the action will run. Triggers the workflow on push or pull request. on: [push, pull_request] From 14044ef2e87c6ed044cb6b5ea547192a4c361aaf Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Tue, 5 Oct 2021 18:18:01 +0200 Subject: [PATCH 384/909] Update of the icepack tests --- setups/test_pi_icepack/setup.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/setups/test_pi_icepack/setup.yml b/setups/test_pi_icepack/setup.yml index 9180b3d59..b7a18cf82 100644 --- a/setups/test_pi_icepack/setup.yml +++ b/setups/test_pi_icepack/setup.yml @@ -73,13 +73,13 @@ namelist.io: prec: 8 fcheck: - a_ice: 0.3059942958760058 - salt: 23.866224273520945 - temp: 1.7172059436119271 - sst: 8.725966058658427 - u: -0.0014448488412238854 - v: 0.00018596541127645607 - aicen: 0.061198859175201174 + a_ice: 0.30599570824298994 + salt: 23.866195774787034 + temp: 1.717206693389919 + sst: 8.725991935766256 + u: -0.0014448974204450153 + v: 0.00018600030457097512 + aicen: 0.06119914164859799 From 648fe24f475e664cf1b11b371ad04e0d069d3ae8 Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Tue, 5 Oct 2021 18:19:19 +0200 Subject: [PATCH 385/909] Trying to trigger actions again --- .github/workflows/fesom2_icepack.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/fesom2_icepack.yml b/.github/workflows/fesom2_icepack.yml index aeb50481f..d5681da06 100644 --- a/.github/workflows/fesom2_icepack.yml +++ b/.github/workflows/fesom2_icepack.yml @@ -1,7 +1,7 @@ name: FESOM2_icepack -# Controls when the action will run. Triggers the workflow on push or pull request. +# Controls when the action will run. Triggers the workflow on push or pull request. on: [push, pull_request] From 16c7e140b14ac818d6e878060fca80a3a5cab709 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Thu, 7 Oct 2021 20:27:02 +0200 Subject: [PATCH 386/909] continue refactoring. the new derived type t_partit replaces the old module g_PARSUP. a lot of changes since it was used nearly everywhere. the model compiles so far. will have to make it run. --- src/CMakeLists.txt | 2 +- src/MOD_MESH.F90 | 2 + src/MOD_PARTIT.F90 | 189 ++++ src/MOD_READ_BINARY_ARRAYS.F90 | 15 +- src/MOD_TRACER.F90 | 6 + src/MOD_WRITE_BINARY_ARRAYS.F90 | 15 +- src/associate_mesh_ass.h | 68 ++ src/associate_mesh_def.h | 52 ++ src/associate_mesh_ini.h | 71 -- src/associate_part_ass.h | 63 ++ src/associate_part_def.h | 39 + src/cavity_param.F90 | 90 +- src/cpl_driver.F90 | 43 +- src/fvom_main.F90 | 102 ++- src/gen_bulk_formulae.F90 | 34 +- src/gen_comm.F90 | 69 +- src/gen_events.F90 | 10 +- src/gen_forcing_couple.F90 | 209 +++-- src/gen_forcing_init.F90 | 32 +- src/gen_halo_exchange.F90 | 1264 +++++++++++++-------------- src/gen_ic3d.F90 | 270 +++--- src/gen_interpolation.F90 | 31 +- src/gen_model_setup.F90 | 43 +- src/gen_modules_clock.F90 | 11 +- src/gen_modules_cvmix_idemix.F90 | 37 +- src/gen_modules_cvmix_kpp.F90 | 36 +- src/gen_modules_cvmix_pp.F90 | 32 +- src/gen_modules_cvmix_tidal.F90 | 31 +- src/gen_modules_cvmix_tke.F90 | 34 +- src/gen_modules_diag.F90 | 166 ++-- src/gen_modules_gpot.F90 | 22 +- src/gen_modules_partitioning.F90 | 269 +++--- src/gen_modules_read_NetCDF.F90 | 50 +- src/gen_modules_rotate_grid.F90 | 2 - src/gen_support.F90 | 107 ++- src/gen_surface_forcing.F90 | 204 ++--- src/ice_EVP.F90 | 127 +-- src/ice_fct.F90 | 182 ++-- src/ice_maEVP.F90 | 157 ++-- src/ice_oce_coupling.F90 | 70 +- src/ice_setup_step.F90 | 121 +-- src/ice_thermo_oce.F90 | 32 +- src/io_blowup.F90 | 100 ++- src/io_gather.F90 | 88 +- src/io_meandata.F90 | 496 +++++------ src/io_mesh_info.F90 | 304 +++---- src/io_netcdf_workaround_module.F90 | 11 +- src/io_restart.F90 | 167 ++-- src/oce_adv_tra_driver.F90 | 121 +-- src/oce_adv_tra_fct.F90 | 84 +- src/oce_adv_tra_hor.F90 | 97 +- src/oce_adv_tra_ver.F90 | 133 +-- src/oce_ale.F90 | 394 +++++---- src/oce_ale_mixing_kpp.F90 | 132 +-- src/oce_ale_mixing_pp.F90 | 18 +- src/oce_ale_pressure_bv.F90 | 688 ++++++++------- src/oce_ale_tracer.F90 | 185 ++-- src/oce_ale_vel_rhs.F90 | 37 +- src/oce_dyn.F90 | 317 ++++--- src/oce_fer_gm.F90 | 46 +- src/oce_local.F90 | 28 +- src/oce_mesh.F90 | 300 ++++--- src/oce_mo_conv.F90 | 15 +- src/oce_muscl_adv.F90 | 40 +- src/oce_setup_step.F90 | 137 +-- src/oce_shortwave_pene.F90 | 12 +- src/oce_spp.F90 | 28 +- src/oce_tracer_mod.F90 | 103 ++- src/oce_vel_rhs_vinv.F90 | 40 +- src/toy_channel_soufflet.F90 | 75 +- src/write_step_info.F90 | 56 +- 71 files changed, 4940 insertions(+), 3721 deletions(-) create mode 100644 src/MOD_PARTIT.F90 create mode 100644 src/associate_mesh_ass.h create mode 100644 src/associate_mesh_def.h delete mode 100644 src/associate_mesh_ini.h create mode 100644 src/associate_part_ass.h create mode 100644 src/associate_part_def.h diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 5a0417889..fd13d5d2e 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -29,7 +29,7 @@ add_custom_command(OUTPUT 5303B6F4_E4F4_45B2_A6E5_8E2B9FB5CDC4 ${FESOM_GENERATED #if(${FESOM_STANDALONE}) # list(REMOVE_ITEM sources_Fortran ${src_home}/cpl_driver.F90) #endif() -list(REMOVE_ITEM sources_Fortran ${src_home}/fvom_init.F90) +list(REMOVE_ITEM sources_Fortran ${src_home}/fvom_init.F90 ${src_home}/oce_local.F90 ${src_home}/gen_comm.F90) list(REMOVE_ITEM sources_C ${src_home}/fort_part.c) # depends on the metis library diff --git a/src/MOD_MESH.F90 b/src/MOD_MESH.F90 index f41e42fc4..4eb0c23e1 100644 --- a/src/MOD_MESH.F90 +++ b/src/MOD_MESH.F90 @@ -140,6 +140,7 @@ MODULE MOD_MESH ! Unformatted writing for t_mesh subroutine write_t_mesh(mesh, unit, iostat, iomsg) + IMPLICIT NONE class(t_mesh), intent(in) :: mesh integer, intent(in) :: unit integer, intent(out) :: iostat @@ -232,6 +233,7 @@ end subroutine write_t_mesh ! Unformatted reading for t_mesh subroutine read_t_mesh(mesh, unit, iostat, iomsg) + IMPLICIT NONE class(t_mesh), intent(inout) :: mesh integer, intent(in) :: unit integer, intent(out) :: iostat diff --git a/src/MOD_PARTIT.F90 b/src/MOD_PARTIT.F90 new file mode 100644 index 000000000..bd3b7dec2 --- /dev/null +++ b/src/MOD_PARTIT.F90 @@ -0,0 +1,189 @@ +!========================================================== +! Variables to organize parallel work +module MOD_PARTIT +USE O_PARAM +USE, intrinsic :: ISO_FORTRAN_ENV +USE MOD_WRITE_BINARY_ARRAYS +USE MOD_READ_BINARY_ARRAYS +IMPLICIT NONE +SAVE +include 'mpif.h' +integer, parameter :: MAX_LAENDERECK=16 +integer, parameter :: MAX_NEIGHBOR_PARTITIONS=32 + + +type com_struct + integer :: rPEnum ! the number of PE I receive info from + integer, dimension(MAX_NEIGHBOR_PARTITIONS) :: rPE ! their list + integer, dimension(MAX_NEIGHBOR_PARTITIONS+1) :: rptr ! allocatables to the list of nodes + integer, dimension(:), allocatable :: rlist ! the list of nodes + integer :: sPEnum ! send part + integer, dimension(MAX_NEIGHBOR_PARTITIONS) :: sPE + integer, dimension(MAX_NEIGHBOR_PARTITIONS) :: sptr + integer, dimension(:), allocatable :: slist + integer, dimension(:), allocatable :: req ! request for MPI_Wait + integer :: nreq ! number of requests for MPI_Wait + ! (to combine halo exchange of several fields) + contains + procedure WRITE_T_COM_STRUCT + procedure READ_T_COM_STRUCT + generic :: write(unformatted) => WRITE_T_COM_STRUCT + generic :: read(unformatted) => READ_T_COM_STRUCT +end type com_struct + +TYPE T_PARTIT + integer :: MPI_COMM_FESOM ! FESOM communicator (for ocean only runs if often a copy of MPI_COMM_WORLD) + + type(com_struct) :: com_nod2D + type(com_struct) :: com_elem2D + type(com_struct) :: com_elem2D_full + + ! MPI Datatypes for interface exchange + ! Element fields (2D; 2D integer; 3D with nl-1 or nl levels, 1 - 4 values) + ! small halo and / or full halo + !!! s(r)_mpitype_* are constructed during the runtime ans shall not be dumped!!! + integer, allocatable :: s_mpitype_elem2D(:,:), r_mpitype_elem2D(:,:) + integer, allocatable :: s_mpitype_elem2D_full_i(:), r_mpitype_elem2D_full_i(:) + integer, allocatable :: s_mpitype_elem2D_full(:,:), r_mpitype_elem2D_full(:,:) + integer, allocatable :: s_mpitype_elem3D(:,:,:), r_mpitype_elem3D(:,:,:) + integer, allocatable :: s_mpitype_elem3D_full(:,:,:),r_mpitype_elem3D_full(:,:,:) + + ! Nodal fields (2D; 2D integer; 3D with nl-1 or nl levels, one, two, or three values) + integer, allocatable :: s_mpitype_nod2D(:), r_mpitype_nod2D(:) + integer, allocatable :: s_mpitype_nod2D_i(:), r_mpitype_nod2D_i(:) + integer, allocatable :: s_mpitype_nod3D(:,:,:), r_mpitype_nod3D(:,:,:) + + ! general MPI part + integer :: MPIERR + integer :: npes + integer :: mype + integer :: maxPEnum=100 + integer, allocatable, dimension(:) :: part + + ! Mesh partition + integer :: myDim_nod2D, eDim_nod2D + integer, allocatable, dimension(:) :: myList_nod2D + integer :: myDim_elem2D, eDim_elem2D, eXDim_elem2D + integer, allocatable, dimension(:) :: myList_elem2D + integer :: myDim_edge2D, eDim_edge2D + integer, allocatable, dimension(:) :: myList_edge2D + + integer :: pe_status = 0 ! if /=0 then something is wrong + !!! remPtr_* are constructed during the runtime ans shall not be dumped!!! + integer, allocatable :: remPtr_nod2D(:), remList_nod2D(:) + integer, allocatable :: remPtr_elem2D(:), remList_elem2D(:) + + logical :: elem_full_flag + contains + procedure WRITE_T_PARTIT + procedure READ_T_PARTIT + generic :: write(unformatted) => WRITE_T_PARTIT + generic :: read(unformatted) => READ_T_PARTIT +END TYPE T_PARTIT +contains + +! Unformatted writing for COM_STRUCT TYPE +subroutine WRITE_T_COM_STRUCT(tstruct, unit, iostat, iomsg) + IMPLICIT NONE + class(COM_STRUCT), intent(in) :: tstruct + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + write(unit, iostat=iostat, iomsg=iomsg) tstruct%rPEnum + call write1d_int_static(tstruct%rPE, unit, iostat, iomsg) + call write1d_int_static(tstruct%rptr, unit, iostat, iomsg) + call write_bin_array(tstruct%rlist, unit, iostat, iomsg) + write(unit, iostat=iostat, iomsg=iomsg) tstruct%sPEnum + call write1d_int_static(tstruct%sPE, unit, iostat, iomsg) + call write1d_int_static(tstruct%sptr, unit, iostat, iomsg) + call write_bin_array(tstruct%slist, unit, iostat, iomsg) + ! req is constructed during the runtime + ! call write_bin_array(tstruct%req, unit, iostat, iomsg) + write(unit, iostat=iostat, iomsg=iomsg) tstruct%nreq +end subroutine WRITE_T_COM_STRUCT + +subroutine READ_T_COM_STRUCT(tstruct, unit, iostat, iomsg) + IMPLICIT NONE + class(COM_STRUCT), intent(inout) :: tstruct + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + read(unit, iostat=iostat, iomsg=iomsg) tstruct%rPEnum + call read1d_int_static(tstruct%rPE, unit, iostat, iomsg) + call read1d_int_static(tstruct%rptr, unit, iostat, iomsg) + call read_bin_array(tstruct%rlist, unit, iostat, iomsg) + read(unit, iostat=iostat, iomsg=iomsg) tstruct%sPEnum + call read1d_int_static(tstruct%sPE, unit, iostat, iomsg) + call read1d_int_static(tstruct%sptr, unit, iostat, iomsg) + call read_bin_array(tstruct%slist, unit, iostat, iomsg) +! req is constructed during the runtime +! call read_bin_array(tstruct%req, unit, iostat, iomsg) + read(unit, iostat=iostat, iomsg=iomsg) tstruct%nreq +end subroutine READ_T_COM_STRUCT + +! Unformatted writing for T_PARTIT +subroutine WRITE_T_PARTIT(partit, unit, iostat, iomsg) + IMPLICIT NONE + class(T_PARTIT), intent(in) :: partit + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + write(unit, iostat=iostat, iomsg=iomsg) partit%com_nod2D + write(unit, iostat=iostat, iomsg=iomsg) partit%com_elem2D + write(unit, iostat=iostat, iomsg=iomsg) partit%com_elem2D_full + + write(unit, iostat=iostat, iomsg=iomsg) partit%npes + write(unit, iostat=iostat, iomsg=iomsg) partit%mype + write(unit, iostat=iostat, iomsg=iomsg) partit%maxPEnum + call write_bin_array(partit%part, unit, iostat, iomsg) + + write(unit, iostat=iostat, iomsg=iomsg) partit%myDim_nod2D + write(unit, iostat=iostat, iomsg=iomsg) partit%eDim_nod2D + call write_bin_array(partit%myList_nod2D, unit, iostat, iomsg) + + write(unit, iostat=iostat, iomsg=iomsg) partit%myDim_elem2D + write(unit, iostat=iostat, iomsg=iomsg) partit%eDim_elem2D + write(unit, iostat=iostat, iomsg=iomsg) partit%eXDim_elem2D + call write_bin_array(partit%myList_elem2D, unit, iostat, iomsg) + + write(unit, iostat=iostat, iomsg=iomsg) partit%myDim_edge2D + write(unit, iostat=iostat, iomsg=iomsg) partit%eDim_edge2D + call write_bin_array(partit%myList_edge2D, unit, iostat, iomsg) + write(unit, iostat=iostat, iomsg=iomsg) partit%pe_status +end subroutine WRITE_T_PARTIT +! Unformatted reading for T_PARTIT +subroutine READ_T_PARTIT(partit, unit, iostat, iomsg) + IMPLICIT NONE + class(T_PARTIT), intent(inout) :: partit + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + read(unit, iostat=iostat, iomsg=iomsg) partit%com_nod2D + read(unit, iostat=iostat, iomsg=iomsg) partit%com_elem2D + read(unit, iostat=iostat, iomsg=iomsg) partit%com_elem2D_full + + read(unit, iostat=iostat, iomsg=iomsg) partit%npes + read(unit, iostat=iostat, iomsg=iomsg) partit%mype + read(unit, iostat=iostat, iomsg=iomsg) partit%maxPEnum + call read_bin_array(partit%part, unit, iostat, iomsg) + + read(unit, iostat=iostat, iomsg=iomsg) partit%myDim_nod2D + read(unit, iostat=iostat, iomsg=iomsg) partit%eDim_nod2D + call read_bin_array(partit%myList_nod2D, unit, iostat, iomsg) + + read(unit, iostat=iostat, iomsg=iomsg) partit%myDim_elem2D + read(unit, iostat=iostat, iomsg=iomsg) partit%eDim_elem2D + read(unit, iostat=iostat, iomsg=iomsg) partit%eXDim_elem2D + call read_bin_array(partit%myList_elem2D, unit, iostat, iomsg) + + read(unit, iostat=iostat, iomsg=iomsg) partit%myDim_edge2D + read(unit, iostat=iostat, iomsg=iomsg) partit%eDim_edge2D + call read_bin_array(partit%myList_edge2D, unit, iostat, iomsg) + read(unit, iostat=iostat, iomsg=iomsg) partit%pe_status +end subroutine READ_T_PARTIT + +end module MOD_PARTIT diff --git a/src/MOD_READ_BINARY_ARRAYS.F90 b/src/MOD_READ_BINARY_ARRAYS.F90 index 95f71c584..87f0b2389 100644 --- a/src/MOD_READ_BINARY_ARRAYS.F90 +++ b/src/MOD_READ_BINARY_ARRAYS.F90 @@ -5,7 +5,7 @@ MODULE MOD_READ_BINARY_ARRAYS use o_PARAM private -public :: read_bin_array +public :: read_bin_array, read1d_int_static INTERFACE read_bin_array MODULE PROCEDURE read1d_real, read1d_int, read1d_char, read2d_real, read2d_int, read3d_real, read3d_int END INTERFACE @@ -49,6 +49,19 @@ subroutine read1d_char(arr, unit, iostat, iomsg) read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) end subroutine read1d_char +subroutine read1d_int_static(arr, unit, iostat, iomsg) + IMPLICIT NONE + integer, intent(inout) :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + read(unit, iostat=iostat, iomsg=iomsg) s1 + if (s1==0) return + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) +end subroutine read1d_int_static + subroutine read2d_real(arr, unit, iostat, iomsg) real(kind=WP), intent(inout), allocatable :: arr(:,:) integer, intent(in) :: unit diff --git a/src/MOD_TRACER.F90 b/src/MOD_TRACER.F90 index 35e35020f..8e8247830 100644 --- a/src/MOD_TRACER.F90 +++ b/src/MOD_TRACER.F90 @@ -85,6 +85,7 @@ MODULE MOD_TRACER ! Unformatted writing for T_TRACER_DATA subroutine WRITE_T_TRACER_DATA(tdata, unit, iostat, iomsg) + IMPLICIT NONE class(T_TRACER_DATA), intent(in) :: tdata integer, intent(in) :: unit integer, intent(out) :: iostat @@ -107,6 +108,7 @@ end subroutine WRITE_T_TRACER_DATA ! Unformatted reading for T_TRACER_DATA subroutine READ_T_TRACER_DATA(tdata, unit, iostat, iomsg) + IMPLICIT NONE class(T_TRACER_DATA), intent(inout) :: tdata integer, intent(in) :: unit integer, intent(out) :: iostat @@ -129,6 +131,7 @@ end subroutine READ_T_TRACER_DATA ! Unformatted writing for T_TRACER_WORK subroutine WRITE_T_TRACER_WORK(twork, unit, iostat, iomsg) + IMPLICIT NONE class(T_TRACER_WORK), intent(in) :: twork integer, intent(in) :: unit integer, intent(out) :: iostat @@ -153,6 +156,7 @@ end subroutine WRITE_T_TRACER_WORK ! Unformatted reading for T_TRACER_WORK subroutine READ_T_TRACER_WORK(twork, unit, iostat, iomsg) + IMPLICIT NONE class(T_TRACER_WORK), intent(inout) :: twork integer, intent(in) :: unit integer, intent(out) :: iostat @@ -177,6 +181,7 @@ end subroutine READ_T_TRACER_WORK ! Unformatted writing for T_TRACER subroutine WRITE_T_TRACER(tracer, unit, iostat, iomsg) + IMPLICIT NONE class(T_TRACER), intent(in) :: tracer integer, intent(in) :: unit integer, intent(out) :: iostat @@ -197,6 +202,7 @@ end subroutine WRITE_T_TRACER ! Unformatted reading for T_TRACER subroutine READ_T_TRACER(tracer, unit, iostat, iomsg) + IMPLICIT NONE class(T_TRACER), intent(inout) :: tracer integer, intent(in) :: unit integer, intent(out) :: iostat diff --git a/src/MOD_WRITE_BINARY_ARRAYS.F90 b/src/MOD_WRITE_BINARY_ARRAYS.F90 index c76d39fb9..4f03b5cea 100644 --- a/src/MOD_WRITE_BINARY_ARRAYS.F90 +++ b/src/MOD_WRITE_BINARY_ARRAYS.F90 @@ -5,7 +5,7 @@ MODULE MOD_WRITE_BINARY_ARRAYS use o_PARAM private -public :: write_bin_array +public :: write_bin_array, write1d_int_static INTERFACE write_bin_array MODULE PROCEDURE write1d_real, write1d_int, write1d_char, write2d_real, write2d_int, write3d_real, write3d_int END INTERFACE @@ -62,6 +62,19 @@ subroutine write1d_char(arr, unit, iostat, iomsg) end if end subroutine write1d_char +subroutine write1d_int_static(arr, unit, iostat, iomsg) + IMPLICIT NONE + integer, intent(in) :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + s1=size(arr, 1) + write(unit, iostat=iostat, iomsg=iomsg) s1 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) +end subroutine write1d_int_static + subroutine write2d_real(arr, unit, iostat, iomsg) real(kind=WP), intent(in), allocatable :: arr(:,:) integer, intent(in) :: unit diff --git a/src/associate_mesh_ass.h b/src/associate_mesh_ass.h new file mode 100644 index 000000000..591aef4a4 --- /dev/null +++ b/src/associate_mesh_ass.h @@ -0,0 +1,68 @@ +nod2D => mesh%nod2D +elem2D => mesh%elem2D +edge2D => mesh%edge2D +edge2D_in => mesh%edge2D_in +ocean_area => mesh%ocean_area +nl => mesh%nl +coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%coord_nod2D +geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D +elem2D_nodes(1:3, 1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem2D_nodes +edges(1:2,1:myDim_edge2D+eDim_edge2D) => mesh%edges +edge_tri(1:2,1:myDim_edge2D+eDim_edge2D) => mesh%edge_tri +elem_edges(1:3,1:myDim_elem2D) => mesh%elem_edges +elem_area(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem_area +edge_dxdy(1:2,1:myDim_edge2D+eDim_edge2D) => mesh%edge_dxdy +edge_cross_dxdy(1:4,1:myDim_edge2D+eDim_edge2D) => mesh%edge_cross_dxdy +elem_cos(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem_cos +metric_factor(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%metric_factor +elem_neighbors(1:3,1:myDim_elem2D) => mesh%elem_neighbors +nod_in_elem2D => mesh%nod_in_elem2D ! (maxval(rmax),myDim_nod2D+eDim_nod2D) +x_corners => mesh%x_corners ! (myDim_nod2D, maxval(rmax)) +y_corners => mesh%y_corners ! (myDim_nod2D, maxval(rmax)) +nod_in_elem2D_num(1:myDim_nod2D+eDim_nod2D) => mesh%nod_in_elem2D_num +depth(1:myDim_nod2D+eDim_nod2D) => mesh%depth +gradient_vec(1:6,1:myDim_elem2D) => mesh%gradient_vec +gradient_sca(1:6,1:myDim_elem2D) => mesh%gradient_sca +bc_index_nod2D(1:myDim_nod2D+eDim_nod2D) => mesh%bc_index_nod2D +zbar(1:mesh%nl) => mesh%zbar +Z(1:mesh%nl-1) => mesh%Z +elem_depth => mesh%elem_depth ! never used, not even allocated +nlevels(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%nlevels +nlevels_nod2D(1:myDim_nod2D+eDim_nod2D) => mesh%nlevels_nod2D +nlevels_nod2D_min(1:myDim_nod2D+eDim_nod2D) => mesh%nlevels_nod2D_min +area(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%area +areasvol(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%areasvol +area_inv(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%area_inv +areasvol_inv(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%areasvol_inv +mesh_resolution(1:myDim_nod2d+eDim_nod2D) => mesh%mesh_resolution +ssh_stiff => mesh%ssh_stiff +lump2d_north(1:myDim_nod2d) => mesh%lump2d_north +lump2d_south(1:myDim_nod2d) => mesh%lump2d_south +cavity_flag_n(1:myDim_nod2D+eDim_nod2D) => mesh%cavity_flag_n +cavity_flag_e(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%cavity_flag_e +!!$cavity_lev_nod2D(1:myDim_nod2D+eDim_nod2D) => mesh%cavity_lev_nod2D +!!$cavity_lev_elem2D(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%cavity_lev_elem2D +cavity_depth(1:myDim_nod2D+eDim_nod2D) => mesh%cavity_depth +ulevels(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%ulevels +ulevels_nod2D(1:myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D +ulevels_nod2D_max(1:myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D_max +nn_num(1:myDim_nod2D) => mesh%nn_num +nn_pos(1:mesh%nn_size, 1:myDim_nod2D) => mesh%nn_pos +hnode(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%hnode +hnode_new(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%hnode_new +zbar_3d_n(1:mesh%nl, 1:myDim_nod2D+eDim_nod2D) => mesh%zbar_3d_n +Z_3d_n(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%Z_3d_n +helem(1:mesh%nl-1, 1:myDim_elem2D) => mesh%helem +bottom_elem_thickness(1:myDim_elem2D) => mesh%bottom_elem_thickness +bottom_node_thickness(1:myDim_nod2D+eDim_nod2D) => mesh%bottom_node_thickness +dhe(1:myDim_elem2D) => mesh%dhe +hbar(1:myDim_nod2D+eDim_nod2D) => mesh%hbar +hbar_old(1:myDim_nod2D+eDim_nod2D) => mesh%hbar_old +zbar_n(1:mesh%nl) => mesh%zbar_n +Z_n(1:mesh%nl-1) => mesh%Z_n +zbar_n_bot(1:myDim_nod2D+eDim_nod2D) => mesh%zbar_n_bot +zbar_e_bot(1:myDim_elem2D+eDim_elem2D) => mesh%zbar_e_bot +zbar_n_srf(1:myDim_nod2D+eDim_nod2D) => mesh%zbar_n_srf +zbar_e_srf(1:myDim_elem2D+eDim_elem2D) => mesh%zbar_e_srf + + diff --git a/src/associate_mesh_def.h b/src/associate_mesh_def.h new file mode 100644 index 000000000..cf146d70b --- /dev/null +++ b/src/associate_mesh_def.h @@ -0,0 +1,52 @@ +integer , pointer :: nod2D +integer , pointer :: elem2D +integer , pointer :: edge2D +integer , pointer :: edge2D_in +real(kind=WP) , pointer :: ocean_area +real(kind=WP) , pointer :: ocean_areawithcav +integer , pointer :: nl +integer , pointer :: nn_size +real(kind=WP), dimension(:,:), pointer :: coord_nod2D, geo_coord_nod2D +integer, dimension(:,:) , pointer :: elem2D_nodes +integer, dimension(:,:) , pointer :: edges +integer, dimension(:,:) , pointer :: edge_tri +integer, dimension(:,:) , pointer :: elem_edges +real(kind=WP), dimension(:) , pointer :: elem_area +real(kind=WP), dimension(:,:), pointer :: edge_dxdy, edge_cross_dxdy +real(kind=WP), dimension(:) , pointer :: elem_cos, metric_factor +integer, dimension(:,:), pointer :: elem_neighbors +integer, dimension(:,:), pointer :: nod_in_elem2D +real(kind=WP), dimension(:,:), pointer :: x_corners, y_corners +integer, dimension(:) , pointer :: nod_in_elem2D_num +real(kind=WP), dimension(:) , pointer :: depth +real(kind=WP), dimension(:,:), pointer :: gradient_vec +real(kind=WP), dimension(:,:), pointer :: gradient_sca +integer, dimension(:) , pointer :: bc_index_nod2D +real(kind=WP), dimension(:) , pointer :: zbar, Z, elem_depth +integer, dimension(:) , pointer :: nlevels, nlevels_nod2D, nlevels_nod2D_min +real(kind=WP), dimension(:,:), pointer :: area, area_inv, areasvol, areasvol_inv +real(kind=WP), dimension(:) , pointer :: mesh_resolution +real(kind=WP), dimension(:) , pointer :: lump2d_north, lump2d_south +type(sparse_matrix) , pointer :: ssh_stiff +integer, dimension(:) , pointer :: cavity_flag_n, cavity_flag_e +real(kind=WP), dimension(:) , pointer :: cavity_depth +integer, dimension(:) , pointer :: ulevels, ulevels_nod2D, ulevels_nod2D_max +integer, dimension(:) , pointer :: nn_num +integer, dimension(:,:), pointer :: nn_pos + +real(kind=WP), dimension(:,:), pointer :: hnode +real(kind=WP), dimension(:,:), pointer :: hnode_new +real(kind=WP), dimension(:,:), pointer :: zbar_3d_n +real(kind=WP), dimension(:,:), pointer :: Z_3d_n +real(kind=WP), dimension(:,:), pointer :: helem +real(kind=WP), dimension(:) , pointer :: bottom_elem_thickness +real(kind=WP), dimension(:) , pointer :: bottom_node_thickness +real(kind=WP), dimension(:) , pointer :: dhe +real(kind=WP), dimension(:) , pointer :: hbar +real(kind=WP), dimension(:) , pointer :: hbar_old +real(kind=WP), dimension(:) , pointer :: zbar_n +real(kind=WP), dimension(:) , pointer :: Z_n +real(kind=WP), dimension(:) , pointer :: zbar_n_bot +real(kind=WP), dimension(:) , pointer :: zbar_e_bot +real(kind=WP), dimension(:) , pointer :: zbar_n_srf +real(kind=WP), dimension(:) , pointer :: zbar_e_srf diff --git a/src/associate_mesh_ini.h b/src/associate_mesh_ini.h deleted file mode 100644 index 2a89de07a..000000000 --- a/src/associate_mesh_ini.h +++ /dev/null @@ -1,71 +0,0 @@ -integer , pointer :: nod2D -integer , pointer :: elem2D -integer , pointer :: edge2D -integer , pointer :: edge2D_in -real(kind=WP) , pointer :: ocean_area -integer , pointer :: nl -real(kind=WP), dimension(:,:), pointer :: coord_nod2D, geo_coord_nod2D -integer, dimension(:,:) , pointer :: elem2D_nodes -integer, dimension(:,:) , pointer :: edges -integer, dimension(:,:) , pointer :: edge_tri -integer, dimension(:,:) , pointer :: elem_edges -real(kind=WP), dimension(:) , pointer :: elem_area -real(kind=WP), dimension(:,:), pointer :: edge_dxdy, edge_cross_dxdy -real(kind=WP), dimension(:) , pointer :: elem_cos, metric_factor -integer, dimension(:,:), pointer :: elem_neighbors -integer, dimension(:,:), pointer :: nod_in_elem2D -real(kind=WP), dimension(:,:), pointer :: x_corners, y_corners -integer, dimension(:) , pointer :: nod_in_elem2D_num -real(kind=WP), dimension(:) , pointer :: depth -real(kind=WP), dimension(:,:), pointer :: gradient_vec -real(kind=WP), dimension(:,:), pointer :: gradient_sca -integer, dimension(:) , pointer :: bc_index_nod2D -real(kind=WP), dimension(:) , pointer :: zbar, Z, elem_depth -integer, dimension(:) , pointer :: nlevels, nlevels_nod2D -real(kind=WP), dimension(:,:), pointer :: area, area_inv -real(kind=WP), dimension(:) , pointer :: mesh_resolution -integer, dimension(:) , pointer :: cavity_flag, ulevels_nod2D, ulevels -real(kind=WP), dimension(:) , pointer :: cavity_depth -type(sparse_matrix) , pointer :: ssh_stiff - -nod2D => mesh%nod2D -elem2D => mesh%elem2D -edge2D => mesh%edge2D -edge2D_in => mesh%edge2D_in -ocean_area => mesh%ocean_area -nl => mesh%nl - -coord_nod2D => mesh%coord_nod2D -geo_coord_nod2D => mesh%geo_coord_nod2D -elem2D_nodes => mesh%elem2D_nodes -edges => mesh%edges -edge_tri => mesh%edge_tri -elem_edges => mesh%elem_edges -elem_area => mesh%elem_area -edge_dxdy => mesh%edge_dxdy -edge_cross_dxdy => mesh%edge_cross_dxdy -elem_cos => mesh%elem_cos -metric_factor => mesh%metric_factor -elem_neighbors => mesh%elem_neighbors -nod_in_elem2D => mesh%nod_in_elem2D -x_corners => mesh%x_corners -y_corners => mesh%y_corners -nod_in_elem2D_num => mesh%nod_in_elem2D_num -depth => mesh%depth -gradient_vec => mesh%gradient_vec -gradient_sca => mesh%gradient_sca -bc_index_nod2D => mesh%bc_index_nod2D -zbar => mesh%zbar -Z => mesh%Z -elem_depth => mesh%elem_depth -nlevels => mesh%nlevels -nlevels_nod2D => mesh%nlevels_nod2D -area => mesh%area -area_inv => mesh%area_inv -mesh_resolution => mesh%mesh_resolution -ssh_stiff => mesh%ssh_stiff -!!$cavity_flag_n => mesh%cavity_flag_n -!!$cavity_flag_e => mesh%cavity_flag_e -ulevels_nod2D => mesh%ulevels_nod2D -ulevels => mesh%ulevels -cavity_depth => mesh%cavity_depth diff --git a/src/associate_part_ass.h b/src/associate_part_ass.h new file mode 100644 index 000000000..33d9f27d0 --- /dev/null +++ b/src/associate_part_ass.h @@ -0,0 +1,63 @@ +MPI_COMM_FESOM => partit%MPI_COMM_FESOM +com_nod2D => partit%com_nod2D +com_elem2D => partit%com_elem2D +com_elem2D_full => partit%com_elem2D_full +myDim_nod2D => partit%myDim_nod2D +eDim_nod2D => partit%eDim_nod2D +myDim_elem2D => partit%myDim_elem2D +eDim_elem2D => partit%eDim_elem2D +eXDim_elem2D => partit%eXDim_elem2D +myDim_edge2D => partit%myDim_edge2D +eDim_edge2D => partit%eDim_edge2D +pe_status => partit%pe_status +elem_full_flag => partit%elem_full_flag +MPIERR => partit%MPIERR +npes => partit%npes +mype => partit%mype +maxPEnum => partit%maxPEnum + +myList_nod2D (1:myDim_nod2D +eDim_nod2D) => partit%myList_nod2D +myList_elem2D(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => partit%myList_elem2D +myList_edge2D(1:myDim_edge2D+eDim_edge2D) => partit%myList_edge2D + +lb=lbound(partit%s_mpitype_elem3D, 2) +ub=ubound(partit%s_mpitype_elem3D, 2) + +if (allocated(partit%remPtr_nod2D)) then + remPtr_nod2D (1:npes) => partit%remPtr_nod2D + remList_nod2D (1:remPtr_nod2D(npes)) => partit%remList_nod2D +end if + +if (allocated(partit%remPtr_elem2D)) then +remPtr_elem2D (1:npes) => partit%remPtr_elem2D +remList_elem2D(1:remPtr_elem2D(npes)) => partit%remList_elem2D +end if + +s_mpitype_elem2D(1:com_elem2D%sPEnum, 1:4) => partit%s_mpitype_elem2D +r_mpitype_elem2D(1:com_elem2D%rPEnum, 1:4) => partit%r_mpitype_elem2D + +s_mpitype_elem2D_full_i(1:com_elem2D_full%sPEnum) => partit%s_mpitype_elem2D_full_i +r_mpitype_elem2D_full_i(1:com_elem2D_full%rPEnum) => partit%r_mpitype_elem2D_full_i + +s_mpitype_elem2D_full(1:com_elem2D_full%sPEnum, 1:4) => partit%s_mpitype_elem2D_full +r_mpitype_elem2D_full(1:com_elem2D_full%rPEnum, 1:4) => partit%r_mpitype_elem2D_full + +s_mpitype_elem3D(1:com_elem2D%sPEnum, lb:ub, 1:4) => partit%s_mpitype_elem3D +r_mpitype_elem3D(1:com_elem2D%rPEnum, lb:ub, 1:4) => partit%r_mpitype_elem3D + +s_mpitype_elem3D_full(1:com_elem2D_full%sPEnum, lb:ub, 1:4) => partit%s_mpitype_elem3D_full +r_mpitype_elem3D_full(1:com_elem2D_full%rPEnum, lb:ub, 1:4) => partit%r_mpitype_elem3D_full + +r_mpitype_elem3D(1:com_elem2D%rPEnum, lb:ub, 1:4) => partit%r_mpitype_elem3D +r_mpitype_elem3D_full(1:com_elem2D_full%rPEnum, lb:ub, 1:4) => partit%r_mpitype_elem3D_full + +s_mpitype_nod2D(1:com_nod2D%sPEnum) => partit%s_mpitype_nod2D +r_mpitype_nod2D(1:com_nod2D%rPEnum) => partit%r_mpitype_nod2D + +s_mpitype_nod2D_i(1:com_nod2D%sPEnum) => partit%s_mpitype_nod2D_i +r_mpitype_nod2D_i(1:com_nod2D%rPEnum) => partit%r_mpitype_nod2D_i + +s_mpitype_nod3D(1:com_nod2D%sPEnum, lb:ub, 1:3) => partit%s_mpitype_nod3D +r_mpitype_nod3D(1:com_nod2D%rPEnum, lb:ub, 1:3) => partit%r_mpitype_nod3D + +part(1:npes+1) => partit%part diff --git a/src/associate_part_def.h b/src/associate_part_def.h new file mode 100644 index 000000000..42145248e --- /dev/null +++ b/src/associate_part_def.h @@ -0,0 +1,39 @@ + + integer, pointer :: MPI_COMM_FESOM ! FESOM communicator (for ocean only runs if often a copy of MPI_COMM_WORLD) + type(com_struct), pointer :: com_nod2D + type(com_struct), pointer :: com_elem2D + type(com_struct), pointer :: com_elem2D_full + integer :: ub, lb ! to work with r(s)_mpitype_elem3D(nod3D) + + integer, dimension(:), pointer :: s_mpitype_edge2D, r_mpitype_edge2D + integer, dimension(:,:), pointer :: s_mpitype_elem2D, r_mpitype_elem2D + integer, dimension(:), pointer :: s_mpitype_elem2D_full_i, r_mpitype_elem2D_full_i + integer, dimension(:,:), pointer :: s_mpitype_elem2D_full, r_mpitype_elem2D_full + integer, dimension(:,:,:), pointer :: s_mpitype_elem3D, r_mpitype_elem3D + integer, dimension(:,:,:), pointer :: s_mpitype_elem3D_full, r_mpitype_elem3D_full + + integer, dimension(:), pointer :: s_mpitype_nod2D, r_mpitype_nod2D + integer, dimension(:), pointer :: s_mpitype_nod2D_i, r_mpitype_nod2D_i + integer, dimension(:,:,:), pointer :: s_mpitype_nod3D, r_mpitype_nod3D + + integer, pointer :: MPIERR + integer, pointer :: npes + integer, pointer :: mype + integer, pointer :: maxPEnum + + integer, dimension(:), pointer :: part + + ! Mesh partition + integer, pointer :: myDim_nod2D, eDim_nod2D + integer, dimension(:), pointer :: myList_nod2D + integer, pointer :: myDim_elem2D, eDim_elem2D, eXDim_elem2D + integer, dimension(:), pointer :: myList_elem2D + integer, pointer :: myDim_edge2D, eDim_edge2D + integer, dimension(:), pointer :: myList_edge2D + + integer, pointer :: pe_status + + integer, dimension(:), pointer :: remPtr_nod2D(:), remList_nod2D(:) + integer, dimension(:), pointer :: remPtr_elem2D(:), remList_elem2D(:) + + logical, pointer :: elem_full_flag diff --git a/src/cavity_param.F90 b/src/cavity_param.F90 index 946996802..24c91bfb5 100644 --- a/src/cavity_param.F90 +++ b/src/cavity_param.F90 @@ -1,10 +1,12 @@ module cavity_heat_water_fluxes_3eq_interface interface - subroutine cavity_heat_water_fluxes_3eq(tracers, mesh) + subroutine cavity_heat_water_fluxes_3eq(tracers, partit, mesh) use mod_mesh + use mod_partit use mod_tracer - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers end subroutine end interface end module @@ -15,19 +17,23 @@ subroutine cavity_heat_water_fluxes_3eq(tracers, mesh) ! that have at least one cavity nodes as nearest neighbour. ! Than compute for all cavity points (ulevels_nod2D>1), which is the closest ! cavity line point to that point --> use their coordinates and depth -subroutine compute_nrst_pnt2cavline(mesh) +subroutine compute_nrst_pnt2cavline(partit, mesh) use MOD_MESH + use MOD_PARTIT use o_PARAM , only: WP - use g_PARSUP implicit none - type(t_mesh), intent(inout) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(inout), target :: mesh integer :: node, kk, elnodes(3), gnode, aux_idx integer, allocatable, dimension(:) :: cavl_idx, lcl_cavl_idx real(kind=WP), allocatable, dimension(:) :: cavl_lon, cavl_lat, cavl_dep,lcl_cavl_lon, lcl_cavl_lat, lcl_cavl_dep real(kind=WP) :: aux_x, aux_y, aux_d, aux_dmin -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ if (mype==0) write(*,*) ' --> compute cavity line ' @@ -129,16 +135,17 @@ end subroutine compute_nrst_pnt2cavline ! adjusted for use in FESOM by Ralph Timmermann, 16.02.2011 ! Reviewed by ? ! adapted by P. SCholz for FESOM2.0 -subroutine cavity_heat_water_fluxes_3eq(tracers, mesh) +subroutine cavity_heat_water_fluxes_3eq(tracers, partit, mesh) use MOD_MESH + use MOD_PARTIT use MOD_TRACER use o_PARAM , only: density_0, WP use o_ARRAYS, only: heat_flux, water_flux, Unode, density_m_rho0,density_ref use i_ARRAYS, only: net_heat_flux, fresh_wa_flux - use g_PARSUP implicit none !___________________________________________________________________________ - type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh type(t_tracer), intent(in), target :: tracers real (kind=WP) :: temp,sal,tin,zice real (kind=WP) :: rhow, rhor, rho @@ -178,7 +185,10 @@ subroutine cavity_heat_water_fluxes_3eq(tracers, mesh) ! oomw= -30. ! oofw= -2.5 -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ do node=1,myDim_nod2D !+eDim_nod2D @@ -314,23 +324,27 @@ end subroutine cavity_heat_water_fluxes_3eq ! Compute the heat and freshwater fluxes under ice cavity using simple 2equ. ! Coded by Adriana Huerta-Casas ! Reviewed by Qiang Wang -subroutine cavity_heat_water_fluxes_2eq(tracers, mesh) +subroutine cavity_heat_water_fluxes_2eq(tracers, partit, mesh) use MOD_MESH + use MOD_PARTIT use MOD_TRACER use o_PARAM , only: WP use o_ARRAYS, only: heat_flux, water_flux use i_ARRAYS, only: net_heat_flux, fresh_wa_flux - use g_PARSUP implicit none - type(t_mesh), intent(inout) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh type(t_tracer), intent(in), target :: tracers integer :: node, nzmin real(kind=WP) :: gama, L, aux real(kind=WP) :: c2, c3, c4, c5, c6 real(kind=WP) :: t_i, s_i, p, t_fz -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! parameter for computing heat and water fluxes @@ -364,20 +378,24 @@ end subroutine cavity_heat_water_fluxes_2eq !_______________________________________________________________________________ ! Compute the momentum fluxes under ice cavity ! Moved to this separated routine by Qiang, 20.1.2012 -subroutine cavity_momentum_fluxes(mesh) +subroutine cavity_momentum_fluxes(partit, mesh) use MOD_MESH + use MOD_PARTIT use o_PARAM , only: density_0, C_d, WP use o_ARRAYS, only: UV, Unode, stress_surf, stress_node_surf - use i_ARRAYS, only: u_w, v_w - use g_PARSUP + use i_ARRAYS, only: u_w, v_w implicit none !___________________________________________________________________________ - type(t_mesh), intent(inout) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh integer :: elem, elnodes(3), nzmin, node real(kind=WP) :: aux -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ do elem=1,myDim_elem2D @@ -409,15 +427,19 @@ end subroutine cavity_momentum_fluxes ! ! !_______________________________________________________________________________ -subroutine cavity_ice_clean_vel(mesh) +subroutine cavity_ice_clean_vel(partit, mesh) use MOD_MESH + use MOD_PARTIT use i_ARRAYS, only: U_ice, V_ice - use g_PARSUP implicit none - type(t_mesh), intent(inout) , target :: mesh - integer :: node + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + integer :: node -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" do node=1,myDim_nod2d+eDim_nod2d if(ulevels_nod2D(node)>1) then @@ -429,15 +451,19 @@ end subroutine cavity_ice_clean_vel ! ! !_______________________________________________________________________________ -subroutine cavity_ice_clean_ma(mesh) +subroutine cavity_ice_clean_ma(partit, mesh) use MOD_MESH + use MOD_PARTIT use i_ARRAYS, only: m_ice, m_snow, a_ice - use g_PARSUP implicit none - type(t_mesh), intent(inout) , target :: mesh - integer :: node + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + integer :: node -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" do node=1,myDim_nod2d+eDim_nod2d if(ulevels_nod2D(node)>1) then @@ -469,7 +495,7 @@ end subroutine dist_on_earth ! [oC] (TIN) bezogen auf den in-situ Druck[dbar] (PRES) mit Hilfe ! eines Iterationsverfahrens aus. subroutine potit(salz,pt,pres,rfpres,tin) - use o_PARAM , only: WP + use o_PARAM , only: WP integer iter real(kind=WP) :: salz,pt,pres,rfpres,tin real(kind=WP) :: epsi, pt1,ptd,pttmpr @@ -502,7 +528,7 @@ end subroutine potit ! PRES = 10000.000 dbar ! RFPRES = 0.000 dbar real(kind=WP) function pttmpr(salz,temp,pres,rfpres) - use o_PARAM , only: WP + use o_PARAM , only: WP real(kind=WP) :: salz,temp,pres,rfpres real(kind=WP) :: p,t,dp,dt,q diff --git a/src/cpl_driver.F90 b/src/cpl_driver.F90 index c87a83522..ccefeb7f8 100755 --- a/src/cpl_driver.F90 +++ b/src/cpl_driver.F90 @@ -15,7 +15,6 @@ module cpl_driver use mod_oasis ! oasis module use g_config, only : dt use o_param, only : rad - use g_PARSUP implicit none save ! @@ -93,7 +92,7 @@ module cpl_driver contains subroutine cpl_oasis3mct_init( localCommunicator ) - implicit none + implicit none save !------------------------------------------------------------------- @@ -109,7 +108,7 @@ subroutine cpl_oasis3mct_init( localCommunicator ) ! !-------------------------------------------------------------------- ! - + #ifdef VERBOSE print *, '=================================================' print *, 'cpl_oasis3mct_init : coupler initialization for OASIS3-MCT' @@ -158,7 +157,7 @@ end subroutine cpl_oasis3mct_init ! ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ! ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - subroutine cpl_oasis3mct_define_unstr(mesh) + subroutine cpl_oasis3mct_define_unstr(partit, mesh) #ifdef __oifs use mod_oasis_auxiliary_routines, ONLY: oasis_get_debug, oasis_set_debug @@ -166,11 +165,13 @@ subroutine cpl_oasis3mct_define_unstr(mesh) use mod_oasis_method, ONLY: oasis_get_debug, oasis_set_debug #endif use mod_mesh + use mod_partit use g_rotate_grid use mod_oasis, only: oasis_write_area, oasis_write_mask implicit none save - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit !------------------------------------------------------------------- ! Definition of grid and field information for ocean ! exchange between FESOM, ECHAM6 and OASIS3-MCT. @@ -225,7 +226,10 @@ subroutine cpl_oasis3mct_define_unstr(mesh) real(kind=WP), allocatable :: all_y_coords(:, :) ! latitude coordinates real(kind=WP), allocatable :: all_area(:,:) -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" #ifdef VERBOSE print *, '==============================================================' @@ -479,10 +483,9 @@ subroutine cpl_oasis3mct_define_unstr(mesh) call oasis_enddef(ierror) if (commRank) print *, 'fesom oasis_enddef: COMPLETED' - #ifndef __oifs if (commRank) print *, 'FESOM: calling exchange_roots' - call exchange_roots(source_root, target_root, 1, MPI_COMM_FESOM, MPI_COMM_WORLD) + call exchange_roots(source_root, target_root, 1, partit%MPI_COMM_FESOM, MPI_COMM_WORLD) if (commRank) print *, 'FESOM source/target roots: ', source_root, target_root #endif @@ -504,8 +507,9 @@ end subroutine cpl_oasis3mct_define_unstr ! ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ! ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - subroutine cpl_oasis3mct_send(ind, data_array, action) + subroutine cpl_oasis3mct_send(ind, data_array, action, partit) use o_param + use MOD_PARTIT implicit none save !--------------------------------------------------------------------- @@ -523,7 +527,8 @@ subroutine cpl_oasis3mct_send(ind, data_array, action) ! integer, intent( IN ) :: ind ! variable Id logical, intent( OUT ) :: action ! - real(kind=WP), intent(IN) :: data_array(myDim_nod2D+eDim_nod2D) + type(t_partit), intent(in) :: partit + real(kind=WP), intent(IN) :: data_array(partit%myDim_nod2D+partit%eDim_nod2D) ! ! Local declarations ! @@ -540,11 +545,11 @@ subroutine cpl_oasis3mct_send(ind, data_array, action) cplsnd(ind, :)=cplsnd(ind, :)+data_array ! call do_oce_2_atm(cplsnd(ind, :)/real(o2a_call_count), atm_fld, 1) - exfld = cplsnd(ind, 1:myDim_nod2D)/real(o2a_call_count) + exfld = cplsnd(ind, 1:partit%myDim_nod2D)/real(o2a_call_count) t2=MPI_Wtime() #ifdef VERBOSE - if (mype==0) then + if (partit%mype==0) then print *, 'FESOM oasis_send: ', cpl_send(ind) endif #endif @@ -571,9 +576,10 @@ end subroutine cpl_oasis3mct_send ! ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ! ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - subroutine cpl_oasis3mct_recv(ind, data_array, action) + subroutine cpl_oasis3mct_recv(ind, data_array, action, partit) use o_param use g_comm_auto + use MOD_PARTIT implicit none save !--------------------------------------------------------------------- @@ -616,7 +622,7 @@ subroutine cpl_oasis3mct_recv(ind, data_array, action) action=(info==3 .OR. info==10 .OR. info==11 .OR. info==12 .OR. info==13) if (action) then data_array(1:myDim_nod2d) = exfld - call exchange_nod(data_array) + call exchange_nod(data_array, partit) end if t3=MPI_Wtime() if (ind==1) then @@ -643,14 +649,15 @@ SUBROUTINE exchange_roots(source_root, target_root, il_side, & !global_comm (i.e. comm_psmile here) IMPLICIT NONE - - INTEGER, INTENT(IN) :: il_side - INTEGER, INTENT(IN) :: local_comm, global_comm - INTEGER, INTENT(OUT) :: source_root, target_root + + INTEGER, INTENT(IN) :: il_side + INTEGER, INTENT(IN) :: local_comm, global_comm + INTEGER, INTENT(OUT) :: source_root, target_root INTEGER :: status(MPI_STATUS_SIZE) INTEGER :: local_rank, my_global_rank, ierror + source_root = 500000 target_root = 500000 diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index dbffdf80a..8d9ff12d4 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -9,9 +9,9 @@ program main USE MOD_MESH USE MOD_TRACER +USE MOD_PARTIT USE o_ARRAYS USE o_PARAM -USE g_PARSUP USE i_PARAM use i_ARRAYS use g_clock @@ -31,6 +31,8 @@ program main use update_atm_forcing_interface use before_oce_step_interface use oce_timestep_ale_interface +use par_support_interfaces +use read_mesh_interface use fesom_version_info_module use command_line_options_module ! Define icepack module @@ -44,7 +46,8 @@ program main IMPLICIT NONE -integer :: n, nsteps, offset, row, i, provided +integer :: n, nsteps, offset, row, i, provided +integer, pointer :: mype, npes, MPIerr, MPI_COMM_FESOM real(kind=WP) :: t0, t1, t2, t3, t4, t5, t6, t7, t8, t0_ice, t1_ice, t0_frc, t1_frc real(kind=WP) :: rtime_fullice, rtime_write_restart, rtime_write_means, rtime_compute_diag, rtime_read_forcing real(kind=real32) :: rtime_setup_mesh, rtime_setup_ocean, rtime_setup_forcing @@ -53,11 +56,13 @@ program main real(kind=real32) :: runtime_alltimesteps -type(t_mesh), target, save :: mesh -type(t_tracer), target, save :: tracers +type(t_mesh), target, save :: mesh +type(t_tracer), target, save :: tracers +type(t_partit), target, save :: partit -character(LEN=256) :: dump_filename +character(LEN=256) :: dump_dir, dump_filename +logical :: L_EXISTS type(t_mesh), target, save :: mesh_copy type(t_tracer), target, save :: tracers_copy @@ -78,11 +83,16 @@ program main #if defined (__oasis) - call cpl_oasis3mct_init(MPI_COMM_FESOM) + call cpl_oasis3mct_init(partit%MPI_COMM_FESOM) #endif t1 = MPI_Wtime() - call par_init + call par_init(partit) + + mype =>partit%mype + MPIerr =>partit%MPIerr + MPI_COMM_FESOM=>partit%MPI_COMM_FESOM + npes =>partit%npes if(mype==0) then write(*,*) print *,"FESOM2 git SHA: "//fesom_git_sha() @@ -96,10 +106,10 @@ program main ! load the mesh and fill in ! auxiliary mesh arrays !===================== - call setup_model ! Read Namelists, always before clock_init - call clock_init ! read the clock file - call get_run_steps(nsteps) - call mesh_setup(mesh) + call setup_model(partit) ! Read Namelists, always before clock_init + call clock_init(partit) ! read the clock file + call get_run_steps(nsteps, partit) + call mesh_setup(partit, mesh) if (mype==0) write(*,*) 'FESOM mesh_setup... complete' @@ -111,9 +121,9 @@ program main call check_mesh_consistency(mesh) if (mype==0) t2=MPI_Wtime() - call tracer_init(tracers, mesh) ! allocate array of ocean tracers (derived type "t_tracer") - call arrays_init(tracers%num_tracers, mesh) ! allocate other arrays (to be refactured same as tracers in the future) - call ocean_setup(tracers, mesh) + call tracer_init(tracers, partit, mesh) ! allocate array of ocean tracers (derived type "t_tracer") + call arrays_init(tracers%num_tracers, partit, mesh) ! allocate other arrays (to be refactured same as tracers in the future) + call ocean_setup(tracers, partit, mesh) if (mype==0) then write(*,*) 'FESOM ocean_setup... complete' @@ -123,15 +133,15 @@ program main if (mype==0) t4=MPI_Wtime() if (use_ice) then - call ice_setup(tracers, mesh) + call ice_setup(tracers, partit, mesh) ice_steps_since_upd = ice_ave_steps-1 ice_update=.true. if (mype==0) write(*,*) 'EVP scheme option=', whichEVP endif if (mype==0) t5=MPI_Wtime() - call compute_diagnostics(0, tracers, mesh) ! allocate arrays for diagnostic + call compute_diagnostics(0, tracers, partit, mesh) ! allocate arrays for diagnostic #if defined (__oasis) - call cpl_oasis3mct_define_unstr(mesh) + call cpl_oasis3mct_define_unstr(partit, mesh) if(mype==0) write(*,*) 'FESOM ----> cpl_oasis3mct_define_unstr nsend, nrecv:',nsend, nrecv #endif @@ -142,7 +152,7 @@ program main if (mype==0) write(*,*) 'Icepack: reading namelists from namelist.icepack' call set_icepack call alloc_icepack - call init_icepack(tracers%data(1), mesh) + call init_icepack(tracers%data(1), partit, mesh) if (mype==0) write(*,*) 'Icepack: setup complete' #endif call clock_newyear ! check if it is a new year @@ -153,16 +163,16 @@ program main ! if istep is not zero it will be decided whether restart shall be written ! if l_write is TRUE the restart will be forced ! if l_read the restart will be read - ! as an example, for reading restart one does: call restart(0, .false., .false., .true., tracers, mesh) - call restart(0, .false., r_restart, tracers, mesh) ! istep, l_write, l_read + ! as an example, for reading restart one does: call restart(0, .false., .false., .true., tracers, partit, mesh) + call restart(0, .false., r_restart, tracers, partit, mesh) ! istep, l_write, l_read if (mype==0) t7=MPI_Wtime() ! store grid information into netcdf file - if (.not. r_restart) call write_mesh_info(mesh) + if (.not. r_restart) call write_mesh_info(partit, mesh) !___IF RESTART WITH ZLEVEL OR ZSTAR IS DONE, ALSO THE ACTUAL LEVELS AND ____ !___MIDDEPTH LEVELS NEEDS TO BE CALCULATET AT RESTART_______________________ if (r_restart) then - call restart_thickness_ale(mesh) + call restart_thickness_ale(partit, mesh) end if if (mype==0) then t8=MPI_Wtime() @@ -186,27 +196,29 @@ program main write(*,*) '============================================' endif + DUMP_DIR='DUMP/' + if (.not. L_EXISTS) call system('mkdir '//trim(dump_dir)) -! write (dump_filename, "(A7,I7.7)") "t_mesh.", mype -! open (mype+300, file=trim(dump_filename), status='replace', form="unformatted") -! write (mype+300) mesh -! close (mype+300) + write (dump_filename, "(A7,I7.7)") "t_mesh.", mype + open (mype+300, file=TRIM(DUMP_DIR)//trim(dump_filename), status='replace', form="unformatted") + write (mype+300) mesh + close (mype+300) ! open (mype+300, file=trim(dump_filename), status='old', form="unformatted") ! read (mype+300) mesh_copy ! close (mype+300) -! write (dump_filename, "(A9,I7.7)") "t_tracer.", mype -! open (mype+300, file=trim(dump_filename), status='replace', form="unformatted") -! write (mype+300) tracers -! close (mype+300) + write (dump_filename, "(A9,I7.7)") "t_tracer.", mype + open (mype+300, file=TRIM(DUMP_DIR)//trim(dump_filename), status='replace', form="unformatted") + write (mype+300) tracers + close (mype+300) ! open (mype+300, file=trim(dump_filename), status='old', form="unformatted") ! read (mype+300) tracers_copy ! close (mype+300) -!call par_ex -!stop +call par_ex(partit) +stop ! ! if (mype==10) write(,) mesh1%ssh_stiff%values-mesh%ssh_stiff%value @@ -235,12 +247,12 @@ program main end if !___MODEL TIME STEPPING LOOP________________________________________________ if (use_global_tides) then - call foreph_ini(yearnew, month) + call foreph_ini(yearnew, month, partit) end if do n=1, nsteps if (use_global_tides) then - call foreph(mesh) + call foreph(partit, mesh) end if mstep = n if (mod(n,logfile_outfreq)==0 .and. mype==0) then @@ -261,12 +273,12 @@ program main if(use_ice) then !___compute fluxes from ocean to ice________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call ocean2ice(n)'//achar(27)//'[0m' - call ocean2ice(tracers, mesh) + call ocean2ice(tracers, partit, mesh) !___compute update of atmospheric forcing____________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call update_atm_forcing(n)'//achar(27)//'[0m' t0_frc = MPI_Wtime() - call update_atm_forcing(n, tracers, mesh) + call update_atm_forcing(n, tracers, partit, mesh) t1_frc = MPI_Wtime() !___compute ice step________________________________________________ if (ice_steps_since_upd>=ice_ave_steps-1) then @@ -277,27 +289,27 @@ program main ice_steps_since_upd=ice_steps_since_upd+1 endif if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call ice_timestep(n)'//achar(27)//'[0m' - if (ice_update) call ice_timestep(n, mesh) + if (ice_update) call ice_timestep(n, partit, mesh) !___compute fluxes to the ocean: heat, freshwater, momentum_________ if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call oce_fluxes_mom...'//achar(27)//'[0m' - call oce_fluxes_mom(mesh) ! momentum only - call oce_fluxes(tracers, mesh) + call oce_fluxes_mom(partit, mesh) ! momentum only + call oce_fluxes(tracers, partit, mesh) end if - call before_oce_step(tracers, mesh) ! prepare the things if required + call before_oce_step(tracers, partit, mesh) ! prepare the things if required t2 = MPI_Wtime() !___model ocean step____________________________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call oce_timestep_ale'//achar(27)//'[0m' - call oce_timestep_ale(n, tracers, mesh) + call oce_timestep_ale(n, tracers, partit, mesh) t3 = MPI_Wtime() !___compute energy diagnostics..._______________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call compute_diagnostics(1)'//achar(27)//'[0m' - call compute_diagnostics(1, tracers, mesh) + call compute_diagnostics(1, tracers, partit, mesh) t4 = MPI_Wtime() !___prepare output______________________________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call output (n)'//achar(27)//'[0m' - call output (n, tracers, mesh) + call output (n, tracers, partit, mesh) t5 = MPI_Wtime() - call restart(n, .false., .false., tracers, mesh) + call restart(n, .false., .false., tracers, partit, mesh) t6 = MPI_Wtime() rtime_fullice = rtime_fullice + t2 - t1 @@ -366,6 +378,6 @@ program main write(*,*) end if ! call clock_finish - call par_ex + call par_ex(partit) end program main diff --git a/src/gen_bulk_formulae.F90 b/src/gen_bulk_formulae.F90 index f87dc5748..61535feb3 100755 --- a/src/gen_bulk_formulae.F90 +++ b/src/gen_bulk_formulae.F90 @@ -1,11 +1,11 @@ MODULE gen_bulk ! Compute heat and momentum exchange coefficients use mod_mesh + use mod_partit use i_therm_param use i_arrays use g_forcing_arrays use g_forcing_param, only: ncar_bulk_z_wind, ncar_bulk_z_tair, ncar_bulk_z_shum - use g_parsup use o_param, only: WP use g_sbf, only: atmdata, i_totfl, i_xwind, i_ywind, i_humi, i_qsr, i_qlw, i_tair, i_prec, i_mslp, i_cloud @@ -18,7 +18,7 @@ MODULE gen_bulk ! ! !_______________________________________________________________________________ -subroutine ncar_ocean_fluxes_mode_fesom14(mesh) +subroutine ncar_ocean_fluxes_mode_fesom14(partit, mesh) ! Compute drag coefficient and the transfer coefficients for evaporation ! and sensible heat according to LY2004. ! In this routine we assume air temperature and humidity are at the same @@ -46,9 +46,10 @@ subroutine ncar_ocean_fluxes_mode_fesom14(mesh) real(kind=WP), parameter :: grav = 9.80_WP, vonkarm = 0.40_WP real(kind=WP), parameter :: q1=640380._WP, q2=-5107.4_WP ! for saturated surface specific humidity real(kind=WP), parameter :: zz = 10.0_WP - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit - do i=1,myDim_nod2d+eDim_nod2d + do i=1, partit%myDim_nod2d+partit%eDim_nod2d t=tair(i) + tmelt ! degree celcium to Kelvin ts=t_oc_array(i) + tmelt ! q=shum(i) @@ -112,7 +113,7 @@ end subroutine ncar_ocean_fluxes_mode_fesom14 ! ! !_______________________________________________________________________________ -subroutine ncar_ocean_fluxes_mode(mesh) +subroutine ncar_ocean_fluxes_mode(partit, mesh) ! Compute drag coefficient and the transfer coefficients for evaporation ! and sensible heat according to LY2004. ! with updates from Large et al. 2009 for the computation of the wind drag @@ -151,9 +152,10 @@ subroutine ncar_ocean_fluxes_mode(mesh) real(kind=WP) :: test, cd_prev, inc_ratio=1.0e-4 real(kind=WP) :: t_prev, q_prev - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit - do i=1,myDim_nod2d+eDim_nod2d + do i=1,partit%myDim_nod2d+partit%eDim_nod2d if (mesh%ulevels_nod2d(i)>1) cycle ! degree celcium to Kelvin t = tair(i) + tmelt @@ -310,15 +312,14 @@ subroutine ncar_ocean_fluxes_mode(mesh) ! final transfer coefficients for wind, sensible heat and evaporation cd_atm_oce_arr(i)=cd ch_atm_oce_arr(i)=ch - ce_atm_oce_arr(i)=ce - + ce_atm_oce_arr(i)=ce end do end subroutine ncar_ocean_fluxes_mode ! !--------------------------------------------------------------------------------------------------- ! -subroutine cal_wind_drag_coeff +subroutine cal_wind_drag_coeff(partit) ! Compute wind-ice drag coefficient following AOMIP ! ! Coded by Qiang Wang @@ -327,25 +328,26 @@ subroutine cal_wind_drag_coeff use i_arrays use g_forcing_arrays - use g_parsup implicit none - integer :: i - real(kind=WP) :: ws + integer :: i + real(kind=WP) :: ws + type(t_partit), intent(in) :: partit - do i=1,myDim_nod2d+eDim_nod2d + do i=1,partit%myDim_nod2d+partit%eDim_nod2d ws=sqrt(u_wind(i)**2+v_wind(i)**2) cd_atm_ice_arr(i)=(1.1_WP+0.04_WP*ws)*1.0e-3_WP end do end subroutine cal_wind_drag_coeff ! -SUBROUTINE nemo_ocean_fluxes_mode +SUBROUTINE nemo_ocean_fluxes_mode(partit) !!---------------------------------------------------------------------- !! ** Purpose : Change model variables according to atm fluxes !! source of original code: NEMO 3.1.1 + NCAR !!---------------------------------------------------------------------- IMPLICIT NONE + type(t_partit), intent(in) :: partit integer :: i real(wp) :: rtmp ! temporal real real(wp) :: wndm ! delta of wind module and ocean curent module @@ -366,7 +368,7 @@ SUBROUTINE nemo_ocean_fluxes_mode real(wp) :: zevap, zqsb, zqla, zqlw !!$OMP PARALLEL !!$OMP DO - do i = 1, myDim_nod2D+eDim_nod2d + do i = 1, partit%myDim_nod2D+partit%eDim_nod2d wdx = atmdata(i_xwind,i) - u_w(i) ! wind from data - ocean current ( x direction) wdy = atmdata(i_ywind,i) - v_w(i) ! wind from data - ocean current ( y direction) wndm = SQRT( wdx * wdx + wdy * wdy ) diff --git a/src/gen_comm.F90 b/src/gen_comm.F90 index 8d6c4f345..1baab3fcb 100755 --- a/src/gen_comm.F90 +++ b/src/gen_comm.F90 @@ -4,22 +4,22 @@ ! The communication rules are saved. ! set_par_support in the main phase just allocates memory for buffer ! arrays, the rest is read together with mesh from saved files. - -!KK: moved par_ex,set_par_support,set_par_support_ini to module g_PARSUP - -! =============================================================== !======================================================================= -subroutine communication_nodn(mesh) +subroutine communication_nodn(partit, mesh) use MOD_MESH - use g_PARSUP + use MOD_PARTIT implicit none - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: n, np, prank, el, r_count, s_count, q, i, j, nod, k, l integer :: num_send(0:npes-1), num_recv(0:npes-1), nd_count integer, allocatable :: recv_from_pe(:), send_to_pes(:,:) logical :: max_laendereck_too_small=.false. integer :: IERR -#include "associate_mesh_ini.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! Assume we have 2D partitioning vector in part. Find communication rules ! Reduce allocation: find all neighboring PE @@ -159,12 +159,13 @@ subroutine communication_nodn(mesh) r_count = 0 eDim_nod2D=com_nod2D%rptr(com_nod2D%rPEnum+1)-1 - allocate(com_nod2D%rlist(eDim_nod2D), & - com_nod2D%slist(com_nod2D%sptr(com_nod2D%sPEnum+1)-1), STAT=IERR) + allocate(partit%com_nod2D%rlist(eDim_nod2D), & + partit%com_nod2D%slist(com_nod2D%sptr(com_nod2D%sPEnum+1)-1), STAT=IERR) if (IERR /= 0) then write (*,*) 'Could not allocate arrays in communication_nodn' stop endif + com_nod2D=>partit%com_nod2D do np = 1,com_nod2D%rPEnum prank = com_nod2D%rPE(np) @@ -215,19 +216,23 @@ subroutine communication_nodn(mesh) end subroutine communication_nodn !========================================================================== -subroutine communication_elemn(mesh) +subroutine communication_elemn(partit, mesh) use MOD_MESH - use g_PARSUP + use MOD_PARTIT implicit none - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer, allocatable :: recv_from_pe(:), send_to_pes(:,:) logical :: max_laendereck_too_small=.false. integer :: n, k, ep, np, prank, el, nod integer :: p, q, j, elem, i, l, r_count, s_count, el_count integer :: num_send(0:npes-1), num_recv(0:npes-1) integer :: IERR -#include "associate_mesh_ini.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! Assume we have 2D partitioning vector in part. Find communication ! rules. An elem is external to element n if neither of its nodes ! belongs to PE, but it is among the neighbors. Element n belongs to PE if @@ -258,11 +263,12 @@ subroutine communication_elemn(mesh) end do myDim_elem2D=el_count - allocate(myList_elem2D(el_count), send_to_pes(MAX_LAENDERECK,el_count), STAT=IERR) + allocate(partit%myList_elem2D(el_count), send_to_pes(MAX_LAENDERECK,el_count), STAT=IERR) if (IERR /= 0) then write (*,*) 'Could not allocate arrays in communication_elemn' stop endif + myList_elem2D=>partit%myList_elem2D myList_elem2D(1:el_count) = recv_from_pe(1:el_count) num_send(0:npes-1) = 0 @@ -362,7 +368,8 @@ subroutine communication_elemn(mesh) r_count = 0 eDim_elem2D=com_elem2D%rptr(com_elem2D%rPEnum+1)-1 - allocate(com_elem2D%rlist(eDim_elem2D)) + allocate(partit%com_elem2D%rlist(eDim_elem2D)) + com_elem2D=>partit%com_elem2D !not needed? do np = 1,com_elem2D%rPEnum prank = com_elem2D%rPE(np) do el = 1, elem2D @@ -374,7 +381,8 @@ subroutine communication_elemn(mesh) end do s_count = 0 - allocate(com_elem2D%slist(com_elem2D%sptr(com_elem2D%sPEnum+1)-1)) + allocate(partit%com_elem2D%slist(com_elem2D%sptr(com_elem2D%sPEnum+1)-1)) + com_elem2D=>partit%com_elem2D! not needed? do np = 1,com_elem2D%sPEnum prank = com_elem2D%sPE(np) do l = 1, el_count @@ -487,7 +495,8 @@ subroutine communication_elemn(mesh) ! Lists themselves r_count = 0 - allocate(com_elem2D_full%rlist(com_elem2D_full%rptr(com_elem2D_full%rPEnum+1)-1)) + allocate(partit%com_elem2D_full%rlist(com_elem2D_full%rptr(com_elem2D_full%rPEnum+1)-1)) + com_elem2D_full=>partit%com_elem2D_full !not needed? do np = 1,com_elem2D_full%rPEnum prank = com_elem2D_full%rPE(np) do el = 1, elem2D @@ -500,6 +509,7 @@ subroutine communication_elemn(mesh) s_count = 0 allocate(com_elem2D_full%slist(com_elem2D_full%sptr(com_elem2D_full%sPEnum+1)-1)) + com_elem2D_full=>partit%com_elem2D_full !not needed? do np = 1,com_elem2D_full%sPEnum prank = com_elem2D_full%sPE(np) do l = 1, el_count @@ -514,15 +524,19 @@ subroutine communication_elemn(mesh) deallocate(recv_from_pe, send_to_pes) end subroutine communication_elemn !========================================================================== -subroutine mymesh(mesh) +subroutine mymesh(partit, mesh) use MOD_MESH - use g_PARSUP + use MOD_PARTIT implicit none - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: n, counter, q, k, elem, q2, eledges(4) integer, allocatable :: aux(:) -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !======= NODES ! Owned nodes + external nodes which I need: @@ -641,17 +655,18 @@ subroutine mymesh(mesh) end subroutine mymesh !================================================================= #ifndef FVOM_INIT -subroutine status_check +subroutine status_check(partit) use g_config -use g_parsup +use mod_partit implicit none +type(t_partit), intent(in), target :: partit integer :: res res=0 -call MPI_Allreduce (pe_status, res, 1, MPI_INTEGER, MPI_SUM, MPI_COMM_FESOM, MPIerr) +call MPI_Allreduce (partit%pe_status, res, 1, MPI_INTEGER, MPI_SUM, partit%MPI_COMM_FESOM, partit%MPIerr) if (res /= 0 ) then - if (mype==0) write(*,*) 'Something Broke. Flushing and stopping...' + if (partit%mype==0) write(*,*) 'Something Broke. Flushing and stopping...' !!! a restart file must be written here !!! - call par_ex(1) + call par_ex(partit, 1) endif end subroutine status_check #endif diff --git a/src/gen_events.F90 b/src/gen_events.F90 index e364e3f0d..977a386eb 100644 --- a/src/gen_events.F90 +++ b/src/gen_events.F90 @@ -90,16 +90,16 @@ end subroutine step_event ! !-------------------------------------------------------------------------------------------- ! -subroutine handle_err(errcode) - use g_parsup +subroutine handle_err(errcode, partit) + use mod_partit implicit none #include "netcdf.inc" - - integer errcode + type(t_partit), intent(inout) :: partit + integer :: errcode write(*,*) 'Error: ', nf_strerror(errcode) - call par_ex(1) + call par_ex(partit, 1) stop end subroutine handle_err ! diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index 1817052fd..682685162 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -1,58 +1,74 @@ module force_flux_consv_interface interface - subroutine force_flux_consv(field2d, mask, n, h, do_stats, mesh) + subroutine force_flux_consv(field2d, mask, n, h, do_stats, partit, mesh) use mod_mesh - use g_parsup !myDim_nod2D, eDim_nod2D, MPI stuff - real(kind=WP), intent (inout) :: field2d(myDim_nod2D+eDim_nod2D) - real(kind=WP), intent (in) :: mask(myDim_nod2D+eDim_nod2D) + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + real(kind=WP), intent (inout) :: field2d(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent (in) :: mask(partit%myDim_nod2D+partit%eDim_nod2D) integer, intent (in) :: n, h logical, intent (in) :: do_stats - type(t_mesh), intent(in) , target :: mesh end subroutine end interface end module module compute_residual_interface interface - subroutine compute_residual(field2d, mask, n, mesh) + subroutine compute_residual(field2d, mask, n, partit, mesh) use mod_mesh - use g_parsup !myDim_nod2D, eDim_nod2D, MPI stuff - real(kind=WP), intent (in) :: field2d(myDim_nod2D+eDim_nod2D) - real(kind=WP), intent (in) :: mask(myDim_nod2D+eDim_nod2D) + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + real(kind=WP), intent (in) :: field2d(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent (in) :: mask(partit%myDim_nod2D+partit%eDim_nod2D) integer, intent (in) :: n - type(t_mesh), intent(in) , target :: mesh end subroutine end interface end module module integrate_2D_interface interface - subroutine integrate_2D(flux_global, flux_local, eff_vol, field2d, mask, mesh) + subroutine integrate_2D(flux_global, flux_local, eff_vol, field2d, mask, partit, mesh) use mod_mesh - use g_parsup !myDim_nod2D, eDim_nod2D, MPI stuff + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit real(kind=WP), intent (out) :: flux_global(2), flux_local(2) real(kind=WP), intent (out) :: eff_vol(2) - real(kind=WP), intent (in) :: field2d(myDim_nod2D+eDim_nod2D) - real(kind=WP), intent (in) :: mask(myDim_nod2D +eDim_nod2D) - type(t_mesh), intent(in) , target :: mesh + real(kind=WP), intent (in) :: field2d(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent (in) :: mask(partit%myDim_nod2D +partit%eDim_nod2D) end subroutine end interface end module module update_atm_forcing_interface interface - subroutine update_atm_forcing(istep, tracers, mesh) + subroutine update_atm_forcing(istep, tracers, partit,mesh) use mod_mesh + use mod_partit use mod_tracer - integer, intent(in) :: istep - type(t_tracer), intent(in), target :: tracers - type(t_mesh), intent(in), target :: mesh + integer, intent(in) :: istep + type(t_tracer), intent(in), target :: tracers + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + end subroutine + end interface +end module + +module net_rec_from_atm_interface + interface + subroutine net_rec_from_atm(action, partit) + use mod_partit + logical, intent(in) :: action + type(t_partit), intent(inout), target :: partit end subroutine end interface end module ! Routines for updating ocean surface forcing fields !------------------------------------------------------------------------- -subroutine update_atm_forcing(istep, tracers, mesh) +subroutine update_atm_forcing(istep, tracers, partit, mesh) use o_PARAM - use mod_MESH + use MOD_MESH + use MOD_PARTIT use MOD_TRACER use o_arrays use i_arrays @@ -60,11 +76,11 @@ subroutine update_atm_forcing(istep, tracers, mesh) use i_therm_param use g_forcing_param use g_forcing_arrays - use g_parsup use g_clock use g_config use g_comm_auto use g_rotate_grid + use net_rec_from_atm_interface use g_sbf, only: sbc_do use g_sbf, only: atmdata, i_totfl, i_xwind, i_ywind, i_humi, i_qsr, i_qlw, i_tair, i_prec, i_mslp, i_cloud, i_snow, & l_xwind, l_ywind, l_humi, l_qsr, l_qlw, l_tair, l_prec, l_mslp, l_cloud, l_snow @@ -75,8 +91,9 @@ subroutine update_atm_forcing(istep, tracers, mesh) use force_flux_consv_interface implicit none - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers integer :: i, istep,itime,n2,n,nz,k,elem real(kind=WP) :: i_coef, aux real(kind=WP) :: dux, dvy,tx,ty,tvol @@ -97,7 +114,10 @@ subroutine update_atm_forcing(istep, tracers, mesh) !integer, parameter :: nci=192, ncj=94 ! T62 grid !real(kind=WP), dimension(nci,ncj) :: array_nc, array_nc2,array_nc3,x !character(500) :: file -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" t1=MPI_Wtime() #ifdef __oasis if (firstcall) then @@ -141,7 +161,7 @@ subroutine update_atm_forcing(istep, tracers, mesh) print *, 'not installed yet or error in cpl_oasis3mct_send', mype #endif endif - call cpl_oasis3mct_send(i, exchange, action) + call cpl_oasis3mct_send(i, exchange, action, partit) enddo #ifdef VERBOSE do i=1, nsend @@ -151,10 +171,10 @@ subroutine update_atm_forcing(istep, tracers, mesh) mask=1. do i=1,nrecv exchange =0.0 - call cpl_oasis3mct_recv (i,exchange,action) + call cpl_oasis3mct_recv (i, exchange, action, partit) !if (.not. action) cycle !Do not apply a correction at first time step! - if (i==1 .and. action .and. istep/=1) call net_rec_from_atm(action) + if (i==1 .and. action .and. istep/=1) call net_rec_from_atm(action, partit) if (i.eq.1) then if (.not. action) cycle stress_atmoce_x(:) = exchange(:) ! taux_oce @@ -175,14 +195,14 @@ subroutine update_atm_forcing(istep, tracers, mesh) if (action) then prec_rain(:) = exchange(:) ! tot_prec mask=1. - call force_flux_consv(prec_rain, mask, i, 0,action, mesh) + call force_flux_consv(prec_rain, mask, i, 0,action, partit, mesh) end if elseif (i.eq.6) then if (action) then prec_snow(:) = exchange(:) ! snowfall mask=1. - call force_flux_consv(prec_snow, mask,i,1,action, mesh) ! Northern hemisphere - call force_flux_consv(prec_snow, mask,i,2,action, mesh) ! Southern Hemisphere + call force_flux_consv(prec_snow, mask,i,1,action, partit, mesh) ! Northern hemisphere + call force_flux_consv(prec_snow, mask,i,2,action, partit, mesh) ! Southern Hemisphere end if elseif (i.eq.7) then if (action) then @@ -192,7 +212,7 @@ subroutine update_atm_forcing(istep, tracers, mesh) end if mask=1.-a_ice evap_no_ifrac(:) = tmp_evap_no_ifrac(:) - call force_flux_consv(evap_no_ifrac,mask,i,0,action, mesh) + call force_flux_consv(evap_no_ifrac,mask,i,0,action, partit, mesh) elseif (i.eq.8) then if (action) then sublimation(:) = exchange(:) ! tot_subl @@ -201,8 +221,8 @@ subroutine update_atm_forcing(istep, tracers, mesh) end if mask=a_ice sublimation(:) = tmp_sublimation(:) - call force_flux_consv(sublimation,mask,i,1,action, mesh) ! Northern hemisphere - call force_flux_consv(sublimation,mask,i,2,action, mesh) ! Southern Hemisphere + call force_flux_consv(sublimation,mask,i,1,action, partit, mesh) ! Northern hemisphere + call force_flux_consv(sublimation,mask,i,2,action, partit, mesh) ! Southern Hemisphere elseif (i.eq.9) then if (action) then oce_heat_flux(:) = exchange(:) ! heat_oce @@ -211,7 +231,7 @@ subroutine update_atm_forcing(istep, tracers, mesh) end if mask=1.-a_ice oce_heat_flux(:) = tmp_oce_heat_flux(:) - call force_flux_consv(oce_heat_flux, mask, i, 0,action, mesh) + call force_flux_consv(oce_heat_flux, mask, i, 0,action, partit, mesh) elseif (i.eq.10) then if (action) then ice_heat_flux(:) = exchange(:) ! heat_ice @@ -220,8 +240,8 @@ subroutine update_atm_forcing(istep, tracers, mesh) end if mask=a_ice ice_heat_flux(:) = tmp_ice_heat_flux(:) - call force_flux_consv(ice_heat_flux, mask, i, 1,action, mesh) ! Northern hemisphere - call force_flux_consv(ice_heat_flux, mask, i, 2,action, mesh) ! Southern Hemisphere + call force_flux_consv(ice_heat_flux, mask, i, 1,action, partit, mesh) ! Northern hemisphere + call force_flux_consv(ice_heat_flux, mask, i, 2,action, partit, mesh) ! Southern Hemisphere elseif (i.eq.11) then if (action) then shortwave(:) = exchange(:) ! heat_swr @@ -230,12 +250,12 @@ subroutine update_atm_forcing(istep, tracers, mesh) end if mask=1.-a_ice shortwave(:) = tmp_shortwave(:) - call force_flux_consv(shortwave, mask, i, 0,action, mesh) + call force_flux_consv(shortwave, mask, i, 0,action, partit, mesh) elseif (i.eq.12) then if (action) then runoff(:) = exchange(:) ! AWI-CM2: runoff, AWI-CM3: runoff + excess snow on glaciers mask=1. - call force_flux_consv(runoff, mask, i, 0,action, mesh) + call force_flux_consv(runoff, mask, i, 0,action, partit, mesh) end if #if defined (__oifs) @@ -243,7 +263,7 @@ subroutine update_atm_forcing(istep, tracers, mesh) if (action) then enthalpyoffuse(:) = exchange(:) ! enthalpy of fusion via solid water discharge from glaciers mask=1. - call force_flux_consv(enthalpyoffuse, mask, i, 0,action, mesh) + call force_flux_consv(enthalpyoffuse, mask, i, 0, action, partit, mesh) end if #endif end if @@ -264,7 +284,7 @@ subroutine update_atm_forcing(istep, tracers, mesh) do_rotate_ice_wind=.false. end if #else - call sbc_do(mesh) + call sbc_do(partit, mesh) u_wind = atmdata(i_xwind,:) v_wind = atmdata(i_ywind,:) shum = atmdata(i_humi ,:) @@ -295,14 +315,14 @@ subroutine update_atm_forcing(istep, tracers, mesh) ! second, compute exchange coefficients ! 1) drag coefficient if(AOMIP_drag_coeff) then - call cal_wind_drag_coeff + call cal_wind_drag_coeff(partit) end if ! 2) drag coeff. and heat exchange coeff. over ocean in case using ncar formulae if(ncar_bulk_formulae) then cd_atm_oce_arr=0.0_WP ch_atm_oce_arr=0.0_WP ce_atm_oce_arr=0.0_WP - call ncar_ocean_fluxes_mode(mesh) + call ncar_ocean_fluxes_mode(partit, mesh) elseif(AOMIP_drag_coeff) then cd_atm_oce_arr=cd_atm_ice_arr end if @@ -366,33 +386,35 @@ end subroutine update_atm_forcing ! 10-12 (T.Rackow, AWI Germany) code reordering and cleanup !----------------------------------------------------------------- ! -SUBROUTINE force_flux_consv(field2d, mask, n, h, do_stats, mesh) +SUBROUTINE force_flux_consv(field2d, mask, n, h, do_stats, partit, mesh) use g_forcing_arrays, only : atm_net_fluxes_north, atm_net_fluxes_south, & oce_net_fluxes_north, oce_net_fluxes_south, & flux_correction_north, flux_correction_south, & flux_correction_total - use g_parsup use mod_mesh + use mod_partit use cpl_driver, only : nrecv, cpl_recv, a2o_fcorr_stat use o_PARAM, only : mstep, WP use compute_residual_interface use integrate_2D_interface IMPLICIT NONE - - real(kind=WP), INTENT (INOUT) :: field2d(myDim_nod2D+eDim_nod2D) - real(kind=WP), INTENT (IN) :: mask(myDim_nod2D+eDim_nod2D) - INTEGER, INTENT (IN) :: n - INTEGER, INTENT (IN) :: h !hemisphere: 0=GL, 1=NH, 2=SH - logical, INTENT (IN) :: do_stats - - real(kind=WP) :: rmask(myDim_nod2D+eDim_nod2D) - real(kind=WP) :: weight(myDim_nod2D+eDim_nod2D) - real(kind=WP) :: flux_global(2), flux_local(2) - real(kind=WP) :: eff_vol(2) - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + real(kind=WP), INTENT (INOUT) :: field2d(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), INTENT (IN) :: mask(partit%myDim_nod2D+partit%eDim_nod2D) + INTEGER, INTENT (IN) :: n + INTEGER, INTENT (IN) :: h !hemisphere: 0=GL, 1=NH, 2=SH + logical, INTENT (IN) :: do_stats + real(kind=WP) :: rmask(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP) :: weight(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP) :: flux_global(2), flux_local(2) + real(kind=WP) :: eff_vol(2) -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" #if defined (__oifs) return !OIFS-FESOM2 coupling uses OASIS3MCT conservative remapping instead @@ -418,7 +440,7 @@ SUBROUTINE force_flux_consv(field2d, mask, n, h, do_stats, mesh) END SELECT !residual (net) fluxes; computes also oce_net_fluxes_* - call compute_residual(field2d, rmask, n, mesh) + call compute_residual(field2d, rmask, n, partit, mesh) #ifdef VERBOSE if (mype == 0) then @@ -444,7 +466,7 @@ SUBROUTINE force_flux_consv(field2d, mask, n, h, do_stats, mesh) end if !integrate (masked) abs(field2d) to get positive weights - call integrate_2D(flux_global, flux_local, eff_vol, abs(field2d), rmask, mesh) + call integrate_2D(flux_global, flux_local, eff_vol, abs(field2d), rmask, partit, mesh) !get weight pattern with integral 1 if (abs(sum(flux_global))>1.e-10) then @@ -468,7 +490,7 @@ SUBROUTINE force_flux_consv(field2d, mask, n, h, do_stats, mesh) END SELECT !check conservation - call integrate_2D(flux_global, flux_local, eff_vol, field2d, rmask, mesh) + call integrate_2D(flux_global, flux_local, eff_vol, field2d, rmask, partit, mesh) #ifdef VERBOSE if (mype == 0) then write(*,'(3A,3e15.7)') 'oce NH SH GL / ', trim(cpl_recv(n)), ': ', & @@ -484,30 +506,33 @@ END SUBROUTINE force_flux_consv ! Compute the difference between the net fluxes seen by the atmosphere ! and ocean component (residual flux) for flux n. ! -SUBROUTINE compute_residual(field2d, mask, n, mesh) +SUBROUTINE compute_residual(field2d, mask, n, partit, mesh) use g_forcing_arrays, only : atm_net_fluxes_north, atm_net_fluxes_south, & oce_net_fluxes_north, oce_net_fluxes_south, & flux_correction_north, flux_correction_south, & flux_correction_total - use g_parsup use o_PARAM, only : WP use MOD_MESH + use MOD_PARTIT use integrate_2D_interface IMPLICIT NONE - - real(kind=WP), INTENT(IN) :: field2d(myDim_nod2D+eDim_nod2D) - real(kind=WP), INTENT(IN) :: mask(myDim_nod2D+eDim_nod2D) - INTEGER, INTENT(IN) :: n + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + real(kind=WP), INTENT(IN) :: field2d(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), INTENT(IN) :: mask(partit%myDim_nod2D+partit%eDim_nod2D) + INTEGER, INTENT(IN) :: n real(kind=WP) :: flux_global(2), flux_local(2) real(kind=WP) :: eff_vol(2) - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !compute net flux (for flux n) on ocean side - call integrate_2D(flux_global, flux_local, eff_vol, field2d, mask, mesh) + call integrate_2D(flux_global, flux_local, eff_vol, field2d, mask, partit, mesh) oce_net_fluxes_north(n)=flux_global(1) oce_net_fluxes_south(n)=flux_global(2) @@ -522,24 +547,24 @@ END SUBROUTINE compute_residual ! -flux_local (returned) is the net local flux (for current pc) ! -flux_global (returned) is the communicated and summarized flux_local ! -SUBROUTINE integrate_2D(flux_global, flux_local, eff_vol, field2d, mask, mesh) - - - use g_parsup !myDim_nod2D, eDim_nod2D, MPI stuff +SUBROUTINE integrate_2D(flux_global, flux_local, eff_vol, field2d, mask, partit, mesh) use MOD_MESH - use o_PARAM, only: WP - + use MOD_PARTIT + use o_PARAM, only: WP IMPLICIT NONE - + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(in), target :: partit real(kind=WP), INTENT(OUT) :: flux_global(2), flux_local(2) real(kind=WP), INTENT(OUT) :: eff_vol(2) - real(kind=WP), INTENT(IN) :: field2d(myDim_nod2D+eDim_nod2D) - real(kind=WP), INTENT(IN) :: mask(myDim_nod2D +eDim_nod2D) + real(kind=WP), INTENT(IN) :: field2d(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), INTENT(IN) :: mask(partit%myDim_nod2D +partit%eDim_nod2D) real(kind=WP) :: eff_vol_local(2) - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" flux_local(1)=sum(lump2d_north*field2d(1:myDim_nod2D)*mask(1:myDim_nod2D)) flux_local(2)=sum(lump2d_south*field2d(1:myDim_nod2D)*mask(1:myDim_nod2D)) @@ -584,21 +609,21 @@ END SUBROUTINE integrate_2D !--------------------------------------------------------------------------------------------------- ! Receieve atmospheric net fluxes (atm_net_fluxes_north and atm_net_fluxes_south) ! -SUBROUTINE net_rec_from_atm(action) +SUBROUTINE net_rec_from_atm(action, partit) ! use g_forcing_arrays - use g_parsup use cpl_driver use o_PARAM, only: WP - + use mod_partit IMPLICIT NONE - LOGICAL, INTENT (IN) :: action + LOGICAL, INTENT (IN) :: action + type(t_partit), intent(inout), target :: partit INTEGER :: my_global_rank, ierror INTEGER :: n INTEGER :: status(MPI_STATUS_SIZE,npes) INTEGER :: request(2) - real(kind=WP) :: aux(nrecv) + real(kind=WP) :: aux(nrecv) #if defined (__oifs) return !OIFS-FESOM2 coupling uses OASIS3MCT conservative remapping and recieves no net fluxes here. #endif @@ -608,14 +633,14 @@ SUBROUTINE net_rec_from_atm(action) atm_net_fluxes_north=0. atm_net_fluxes_south=0. if (my_global_rank==target_root) then - CALL MPI_IRecv(atm_net_fluxes_north(1), nrecv, MPI_DOUBLE_PRECISION, source_root, 111, MPI_COMM_WORLD, request(1), MPIerr) - CALL MPI_IRecv(atm_net_fluxes_south(1), nrecv, MPI_DOUBLE_PRECISION, source_root, 112, MPI_COMM_WORLD, request(2), MPIerr) - CALL MPI_Waitall(2, request, status, MPIerr) + CALL MPI_IRecv(atm_net_fluxes_north(1), nrecv, MPI_DOUBLE_PRECISION, source_root, 111, MPI_COMM_WORLD, request(1), partit%MPIerr) + CALL MPI_IRecv(atm_net_fluxes_south(1), nrecv, MPI_DOUBLE_PRECISION, source_root, 112, MPI_COMM_WORLD, request(2), partit%MPIerr) + CALL MPI_Waitall(2, request, status, partit%MPIerr) end if - call MPI_Barrier(MPI_COMM_FESOM, MPIerr) - call MPI_AllREDUCE(atm_net_fluxes_north(1), aux, nrecv, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) + call MPI_Barrier(partit%MPI_COMM_FESOM, MPIerr) + call MPI_AllREDUCE(atm_net_fluxes_north(1), aux, nrecv, MPI_DOUBLE_PRECISION, MPI_SUM, partit%MPI_COMM_FESOM, partit%MPIerr) atm_net_fluxes_north=aux - call MPI_AllREDUCE(atm_net_fluxes_south(1), aux, nrecv, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) + call MPI_AllREDUCE(atm_net_fluxes_south(1), aux, nrecv, MPI_DOUBLE_PRECISION, MPI_SUM, partit%MPI_COMM_FESOM, partit%MPIerr) atm_net_fluxes_south=aux end if END SUBROUTINE net_rec_from_atm diff --git a/src/gen_forcing_init.F90 b/src/gen_forcing_init.F90 index cb69dba6b..0e2f6ecaa 100755 --- a/src/gen_forcing_init.F90 +++ b/src/gen_forcing_init.F90 @@ -1,8 +1,10 @@ module forcing_array_setup_interfaces interface - subroutine forcing_array_setup(mesh) + subroutine forcing_array_setup(partit, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module @@ -11,40 +13,46 @@ subroutine forcing_array_setup(mesh) ! Added the driving routine forcing_setup. ! S.D 05.04.12 ! ========================================================== -subroutine forcing_setup(mesh) -use g_parsup +subroutine forcing_setup(partit, mesh) use g_CONFIG use g_sbf, only: sbc_ini use mod_mesh +use mod_partit use forcing_array_setup_interfaces implicit none - type(t_mesh), intent(in) , target :: mesh - if (mype==0) write(*,*) '****************************************************' +type(t_mesh), intent(in), target :: mesh +type(t_partit), intent(inout), target :: partit + + if (partit%mype==0) write(*,*) '****************************************************' if (use_ice) then - call forcing_array_setup(mesh) + call forcing_array_setup(partit, mesh) #ifndef __oasis - call sbc_ini(mesh) ! initialize forcing fields + call sbc_ini(partit, mesh) ! initialize forcing fields #endif endif end subroutine forcing_setup ! ========================================================== -subroutine forcing_array_setup(mesh) +subroutine forcing_array_setup(partit, mesh) !inializing forcing fields use o_param use mod_mesh + use mod_partit use i_arrays use g_forcing_arrays use g_forcing_param - use g_parsup use g_config use g_sbf, only: l_mslp, l_cloud #if defined (__oasis) use cpl_driver, only : nrecv #endif implicit none - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: n2 -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" n2=myDim_nod2D+eDim_nod2D ! Allocate memory for atmospheric forcing allocate(shortwave(n2), longwave(n2)) diff --git a/src/gen_halo_exchange.F90 b/src/gen_halo_exchange.F90 index e15344cae..7b9f66e6b 100755 --- a/src/gen_halo_exchange.F90 +++ b/src/gen_halo_exchange.F90 @@ -23,61 +23,62 @@ module g_comm contains #ifdef DEBUG - ! Only needed in debug mode - subroutine check_mpi_comm(rn, sn, r_mpitype, s_mpitype, rPE, sPE) - USE g_PARSUP - IMPLICIT NONE - - ! General version of the communication routine for 2D nodal fields - - integer, intent(in) :: sn, rn, r_mpitype(:), s_mpitype(:), rPE(:), sPE(:) - integer :: n, sdebug, rdebug, status(MPI_STATUS_SIZE), request - - DO n=1,rn - call MPI_TYPE_SIZE(r_mpitype(n), rdebug, MPIerr) - CALL MPI_ISEND(rdebug, 1, MPI_INTEGER, rPE(n), 10, MPI_COMM_FESOM, request, MPIerr) - END DO - - DO n=1, sn - call MPI_RECV(sdebug, 1, MPI_INTEGER, sPE(n), 10, MPI_COMM_FESOM, & - status, MPIerr) - call MPI_TYPE_SIZE(s_mpitype(n), rdebug, MPIerr) - if (sdebug /= rdebug) then - print *, "Mismatching MPI send/recieve message lengths." - print *,"Send/receive process numbers: ", mype, '/', sPE(n) - print *,"Number of send/receive bytes: ", sdebug, '/', rdebug - call MPI_ABORT( MPI_COMM_FESOM, 1 ) - end if - END DO - CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) - - END SUBROUTINE check_mpi_comm +! General version of the communication routine for 2D nodal fields +! Only needed in debug mode +subroutine check_mpi_comm(rn, sn, r_mpitype, s_mpitype, rPE, sPE, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer, intent(in) :: sn, rn, r_mpitype(:), s_mpitype(:), rPE(:), sPE(:) +integer :: n, sdebug, rdebug, status(MPI_STATUS_SIZE), request +#include "associate_part_def.h" +#include "associate_part_ass.h" +DO n=1,rn + CALL MPI_TYPE_SIZE(r_mpitype(n), rdebug, MPIerr) + CALL MPI_ISEND(rdebug, 1, MPI_INTEGER, rPE(n), 10, MPI_COMM_FESOM, request, MPIerr) +END DO +DO n=1, sn + call MPI_RECV(sdebug, 1, MPI_INTEGER, sPE(n), 10, MPI_COMM_FESOM, & + status, MPIerr) + call MPI_TYPE_SIZE(s_mpitype(n), rdebug, MPIerr) + if (sdebug /= rdebug) then + print *, "Mismatching MPI send/recieve message lengths." + print *,"Send/receive process numbers: ", mype, '/', sPE(n) + print *,"Number of send/receive bytes: ", sdebug, '/', rdebug + call MPI_ABORT( MPI_COMM_FESOM, 1 ) + end if +END DO +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) +END SUBROUTINE check_mpi_comm #endif -subroutine exchange_nod2D_i(nod_array2D) - -USE g_PARSUP +subroutine exchange_nod2D_i(nod_array2D, partit) +use MOD_MESH +use MOD_PARTIT IMPLICIT NONE - - integer, intent(inout) :: nod_array2D(:) - - if (npes > 1) then - call exchange_nod2D_i_begin(nod_array2D) - call exchange_nod_end +type(t_partit), intent(inout), target :: partit +integer, intent(inout) :: nod_array2D(:) +#include "associate_part_def.h" +#include "associate_part_ass.h" +if (npes > 1) then + call exchange_nod2D_i_begin(nod_array2D, partit) + call exchange_nod_end(partit) endif END SUBROUTINE exchange_nod2D_i !============================================================================= - -subroutine exchange_nod2D_i_begin(nod_array2D) - USE g_PARSUP - IMPLICIT NONE - - ! General version of the communication routine for 2D nodal fields - - integer, intent(inout) :: nod_array2D(:) - integer :: n, sn, rn +! General version of the communication routine for 2D nodal fields +subroutine exchange_nod2D_i_begin(nod_array2D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer, intent(inout) :: nod_array2D(:) +integer :: n, sn, rn +#include "associate_part_def.h" +#include "associate_part_ass.h" if (npes > 1) then @@ -108,32 +109,34 @@ subroutine exchange_nod2D_i_begin(nod_array2D) END SUBROUTINE exchange_nod2D_i_begin ! ======================================================================== -subroutine exchange_nod2D(nod_array2D) - -USE g_PARSUP -IMPLICIT NONE - ! General version of the communication routine for 2D nodal fields - - real(real64), intent(inout) :: nod_array2D(:) +subroutine exchange_nod2D(nod_array2D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod_array2D(:) +#include "associate_part_def.h" +#include "associate_part_ass.h" if (npes > 1) then - call exchange_nod2D_begin(nod_array2D) - call exchange_nod_end + call exchange_nod2D_begin(nod_array2D, partit) + call exchange_nod_end(partit) end if END SUBROUTINE exchange_nod2D ! ======================================================================== -subroutine exchange_nod2D_begin(nod_array2D) - USE g_PARSUP - IMPLICIT NONE - - ! General version of the communication routine for 2D nodal fields - - real(real64), intent(inout) :: nod_array2D(:) - - integer :: n, sn, rn +! General version of the communication routine for 2D nodal fields +subroutine exchange_nod2D_begin(nod_array2D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod_array2D(:) +integer :: n, sn, rn +#include "associate_part_def.h" +#include "associate_part_ass.h" if (npes > 1) then @@ -161,36 +164,39 @@ subroutine exchange_nod2D_begin(nod_array2D) END SUBROUTINE exchange_nod2D_begin !=============================================== -subroutine exchange_nod2D_2fields(nod1_array2D, nod2_array2D) - -USE g_PARSUP +! General version of the communication routine for 2D nodal fields +subroutine exchange_nod2D_2fields(nod1_array2D, nod2_array2D, partit) +use MOD_MESH +use MOD_PARTIT IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod1_array2D(:) +real(real64), intent(inout) :: nod2_array2D(:) +#include "associate_part_def.h" +#include "associate_part_ass.h" -! General version of the communication routine for 2D nodal fields - - real(real64), intent(inout) :: nod1_array2D(:) - real(real64), intent(inout) :: nod2_array2D(:) if (npes > 1) then - call exchange_nod2D_2fields_begin(nod1_array2D, nod2_array2D) - call exchange_nod_end + call exchange_nod2D_2fields_begin(nod1_array2D, nod2_array2D, partit) + call exchange_nod_end(partit) end if END SUBROUTINE exchange_nod2D_2fields ! ======================================================================== -subroutine exchange_nod2D_2fields_begin(nod1_array2D, nod2_array2D) -USE g_PARSUP -IMPLICIT NONE - ! General version of the communication routine for 2D nodal fields - - real(real64), intent(inout) :: nod1_array2D(:) - real(real64), intent(inout) :: nod2_array2D(:) - - integer :: n, sn, rn +subroutine exchange_nod2D_2fields_begin(nod1_array2D, nod2_array2D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod1_array2D(:) +real(real64), intent(inout) :: nod2_array2D(:) +integer :: n, sn, rn +#include "associate_part_def.h" +#include "associate_part_ass.h" - if (npes > 1) then +if (npes > 1) then sn=com_nod2D%sPEnum rn=com_nod2D%rPEnum @@ -223,37 +229,39 @@ subroutine exchange_nod2D_2fields_begin(nod1_array2D, nod2_array2D) END SUBROUTINE exchange_nod2D_2fields_begin !=============================================== -subroutine exchange_nod2D_3fields(nod1_array2D, nod2_array2D, nod3_array2D) - -USE g_PARSUP +subroutine exchange_nod2D_3fields(nod1_array2D, nod2_array2D, nod3_array2D, partit) +! General version of the communication routine for 2D nodal fields +use MOD_MESH +use MOD_PARTIT IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod1_array2D(:) +real(real64), intent(inout) :: nod2_array2D(:) +real(real64), intent(inout) :: nod3_array2D(:) +#include "associate_part_def.h" +#include "associate_part_ass.h" -! General version of the communication routine for 2D nodal fields - - real(real64), intent(inout) :: nod1_array2D(:) - real(real64), intent(inout) :: nod2_array2D(:) - real(real64), intent(inout) :: nod3_array2D(:) if (npes > 1) then - call exchange_nod2D_3fields_begin(nod1_array2D, nod2_array2D, nod3_array2D) - call exchange_nod_end + call exchange_nod2D_3fields_begin(nod1_array2D, nod2_array2D, nod3_array2D, partit) + call exchange_nod_end(partit) end if END SUBROUTINE exchange_nod2D_3fields ! ======================================================================== -subroutine exchange_nod2D_3fields_begin(nod1_array2D, nod2_array2D, nod3_array2D) -USE g_PARSUP -IMPLICIT NONE - +subroutine exchange_nod2D_3fields_begin(nod1_array2D, nod2_array2D, nod3_array2D, partit) ! General version of the communication routine for 2D nodal fields - - real(real64), intent(inout) :: nod1_array2D(:) - real(real64), intent(inout) :: nod2_array2D(:) - real(real64), intent(inout) :: nod3_array2D(:) - - - integer :: n, sn, rn +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod1_array2D(:) +real(real64), intent(inout) :: nod2_array2D(:) +real(real64), intent(inout) :: nod3_array2D(:) +integer :: n, sn, rn +#include "associate_part_def.h" +#include "associate_part_ass.h" if (npes > 1) then @@ -294,33 +302,35 @@ subroutine exchange_nod2D_3fields_begin(nod1_array2D, nod2_array2D, nod3_array2D END SUBROUTINE exchange_nod2D_3fields_begin ! ======================================================================== -subroutine exchange_nod3D(nod_array3D) - -USE g_PARSUP -IMPLICIT NONE - -real(real64), intent(inout) :: nod_array3D(:,:) ! General version of the communication routine for 3D nodal fields ! stored in (vertical, horizontal) format - -if (npes > 1) then - call exchange_nod3D_begin(nod_array3D) - call exchange_nod_end +subroutine exchange_nod3D(nod_array3D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod_array3D(:,:) + +if (partit%npes > 1) then + call exchange_nod3D_begin(nod_array3D, partit) + call exchange_nod_end(partit) endif + END SUBROUTINE exchange_nod3D ! ======================================================================== -subroutine exchange_nod3D_begin(nod_array3D) -USE g_PARSUP -IMPLICIT NONE - - -real(real64), intent(inout) :: nod_array3D(:,:) ! General version of the communication routine for 3D nodal fields ! stored in (vertical, horizontal) format - - integer :: n, sn, rn - integer :: nz, nl1 +subroutine exchange_nod3D_begin(nod_array3D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod_array3D(:,:) +integer :: n, sn, rn +integer :: nz, nl1 +#include "associate_part_def.h" +#include "associate_part_ass.h" if (npes > 1) then sn=com_nod2D%sPEnum @@ -333,7 +343,7 @@ subroutine exchange_nod3D_begin(nod_array3D) print *,'Subroutine exchange_nod3D not implemented for',nl1,'layers.' print *,'Adding the MPI datatypes is easy, see oce_modules.F90.' endif - call par_ex(1) + call par_ex(partit, 1) endif ! Check MPI point-to-point communication for consistency @@ -341,52 +351,52 @@ subroutine exchange_nod3D_begin(nod_array3D) call check_mpi_comm(rn, sn, r_mpitype_nod3D(:,nl1,1), s_mpitype_nod3D(:,nl1,1), & com_nod2D%rPE, com_nod2D%sPE) #endif - DO n=1,rn call MPI_IRECV(nod_array3D, 1, r_mpitype_nod3D(n,nl1,1), com_nod2D%rPE(n), & com_nod2D%rPE(n), MPI_COMM_FESOM, com_nod2D%req(n), MPIerr) END DO - DO n=1, sn call MPI_ISEND(nod_array3D, 1, s_mpitype_nod3D(n,nl1,1), com_nod2D%sPE(n), & mype, MPI_COMM_FESOM, com_nod2D%req(rn+n), MPIerr) END DO - com_nod2D%nreq = rn+sn endif END SUBROUTINE exchange_nod3D_begin ! ======================================================================== -subroutine exchange_nod3D_2fields(nod1_array3D,nod2_array3D) - -USE g_PARSUP -IMPLICIT NONE - -real(real64), intent(inout) :: nod1_array3D(:,:) -real(real64), intent(inout) :: nod2_array3D(:,:) ! General version of the communication routine for 3D nodal fields ! stored in (vertical, horizontal) format - +subroutine exchange_nod3D_2fields(nod1_array3D,nod2_array3D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod1_array3D(:,:) +real(real64), intent(inout) :: nod2_array3D(:,:) +#include "associate_part_def.h" +#include "associate_part_ass.h" + if (npes > 1) then - call exchange_nod3D_2fields_begin(nod1_array3D,nod2_array3D) - call exchange_nod_end + call exchange_nod3D_2fields_begin(nod1_array3D,nod2_array3D, partit) + call exchange_nod_end(partit) endif END SUBROUTINE exchange_nod3D_2fields ! ======================================================================== -subroutine exchange_nod3D_2fields_begin(nod1_array3D,nod2_array3D) -USE g_PARSUP -IMPLICIT NONE - - -real(real64), intent(inout) :: nod1_array3D(:,:) -real(real64), intent(inout) :: nod2_array3D(:,:) +subroutine exchange_nod3D_2fields_begin(nod1_array3D,nod2_array3D, partit) ! General version of the communication routine for 3D nodal fields ! stored in (vertical, horizontal) format - - integer :: n, sn, rn - integer :: nz, nl1, nl2 +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod1_array3D(:,:) +real(real64), intent(inout) :: nod2_array3D(:,:) +integer :: n, sn, rn +integer :: nz, nl1, nl2 +#include "associate_part_def.h" +#include "associate_part_ass.h" if (npes > 1) then sn=com_nod2D%sPEnum @@ -437,39 +447,37 @@ subroutine exchange_nod3D_2fields_begin(nod1_array3D,nod2_array3D) endif END SUBROUTINE exchange_nod3D_2fields_begin ! ======================================================================== -subroutine exchange_nod3D_n(nod_array3D) -USE g_PARSUP +subroutine exchange_nod3D_n(nod_array3D, partit) +use MOD_MESH +use MOD_PARTIT IMPLICIT NONE - -real(real64), intent(inout) :: nod_array3D(:,:,:) - -if (npes>1) then - call exchange_nod3D_n_begin(nod_array3D) - call exchange_nod_end +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod_array3D(:,:,:) +if (partit%npes>1) then + call exchange_nod3D_n_begin(nod_array3D, partit) + call exchange_nod_end(partit) endif END SUBROUTINE exchange_nod3D_n !================================================= - -subroutine exchange_nod3D_n_begin(nod_array3D) -USE g_PARSUP -IMPLICIT NONE - -real(real64), intent(inout) :: nod_array3D(:,:,:) ! General version of the communication routine for 3D nodal fields ! stored in (vertical, horizontal) format - - integer :: n, sn, rn - integer :: nz, nl1, n_val - +subroutine exchange_nod3D_n_begin(nod_array3D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod_array3D(:,:,:) +integer :: n, sn, rn +integer :: nz, nl1, n_val +#include "associate_part_def.h" +#include "associate_part_ass.h" if (npes>1) then ! nod_array3D(n_val,nl1,nod2D_size) - nl1= ubound(nod_array3D,2) + nl1 = ubound(nod_array3D,2) n_val = ubound(nod_array3D,1) - if ((nl1ubound(r_mpitype_nod3D, 2)) .or. (n_val > 3)) then - ! This routine also works for swapped dimensions nod_array3D(nl1,n_val, nod2D_size) nl1 = ubound(nod_array3D,1) n_val = ubound(nod_array3D,2) @@ -513,17 +521,24 @@ END SUBROUTINE exchange_nod3D_n_begin ! AND WAITING !======================================= -SUBROUTINE exchange_nod_end - USE g_PARSUP +SUBROUTINE exchange_nod_end(partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit -if (npes > 1) & - call MPI_WAITALL(com_nod2D%nreq, com_nod2D%req, MPI_STATUSES_IGNORE, MPIerr) +if (partit%npes > 1) & + call MPI_WAITALL(partit%com_nod2D%nreq, partit%com_nod2D%req, MPI_STATUSES_IGNORE, partit%MPIerr) END SUBROUTINE exchange_nod_end -SUBROUTINE exchange_elem_end - - USE g_PARSUP +SUBROUTINE exchange_elem_end(partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_part_ass.h" if (npes > 1) then if (elem_full_flag) then @@ -535,129 +550,32 @@ SUBROUTINE exchange_elem_end endif end if END SUBROUTINE exchange_elem_end -! ======================================================================== - -!nr Not used, no MPI datatype built (yet) -! -!!$subroutine exchange_edge3D(edge_array3D) -!!$ use g_PARSUP -!!$ implicit none -!!$ -!!$ ! Communication of edge based data stored in (vertical, horizontal) format -!!$ -!!$ INTEGER :: sreq(maxPEnum) -!!$ INTEGER :: rreq(maxPEnum) -!!$ INTEGER :: sstat(MPI_STATUS_SIZE,maxPEnum) -!!$ INTEGER :: rstat(MPI_STATUS_SIZE,maxPEnum) -!!$ integer :: n, sn, rn, dest, nini, nend, offset, source,tag -!!$ integer :: nz, nh, nc -!!$ real(real64) :: edge_array3D(nl-1,edge2D) -!!$ -!!$ sn=com_edge2D%sPEnum -!!$ rn=com_edge2D%rPEnum -!!$ ! Put data to be communicated into send buffer -!!$ -!!$ -!!$ do n=1, sn -!!$ nini=com_edge2D%sptr(n) -!!$ nend=com_edge2D%sptr(n+1) - 1 -!!$ nc=0 -!!$ DO nh=nini, nend -!!$ DO nz=1, nl-1 -!!$ nc=nc+1 -!!$ s_buff_edge3D(n)%array(nc)=edge_array3D(nz,com_edge2D%slist(nh)) -!!$ END DO -!!$ END DO -!!$ end do -!!$ -!!$ -!!$ do n=1, sn -!!$ dest=com_edge2D%sPE(n) -!!$ nini=com_edge2D%sptr(n) -!!$ offset=(com_edge2D%sptr(n+1) - nini)*(nl-1) -!!$ -!!$ call MPI_ISEND(s_buff_edge3D(n)%array, offset, MPI_DOUBLE_PRECISION, dest, mype, & -!!$ MPI_COMM_FESOM, sreq(n), MPIerr) -!!$ end do -!!$ do n=1, rn -!!$ source=com_edge2D%rPE(n) -!!$ nini=com_edge2D%rptr(n) -!!$ offset=(com_edge2D%rptr(n+1) - nini)*(nl-1) -!!$ -!!$ call MPI_IRECV(r_buff_edge3D(n)%array, offset, MPI_DOUBLE_PRECISION, source, & -!!$ source, MPI_COMM_FESOM, rreq(n), MPIerr) -!!$ end do -!!$ -!!$ call MPI_WAITALL(sn,sreq,sstat, MPIerr) -!!$ call MPI_WAITALL(rn,rreq,rstat, MPIerr) -!!$ -!!$ ! Put received data to their destination -!!$ -!!$ do n=1, rn -!!$ nini=com_edge2D%rptr(n) -!!$ nend=com_edge2D%rptr(n+1) - 1 -!!$ nc=0 -!!$ DO nh=nini, nend -!!$ DO nz=1, nl-1 -!!$ nc=nc+1 -!!$ edge_array3D(nz,com_edge2D%rlist(nh))=r_buff_edge3D(n)%array(nc) -!!$ END DO -!!$ END DO -!!$ end do -!!$ -!!$end subroutine exchange_edge3D -!========================================================================== - -!!$subroutine exchange_edge2D(edge_array2D) -!!$ use g_PARSUP -!!$ implicit none -!!$ -!!$! General version of the communication routine for 2D edge fields -!!$! This routine is not split, it is used only once during setup. -!!$ real(real64), intent(inout) :: edge_array2D(:) -!!$ -!!$ integer :: n, sn, rn -!!$ -!!$ if (npes> 1) then -!!$ sn=com_edge2D%sPEnum -!!$ rn=com_edge2D%rPEnum -!!$ -!!$ DO n=1,rn -!!$ call MPI_IRECV(edge_array2D, 1, r_mpitype_edge2D(n), com_edge2D%rPE(n), & -!!$ com_edge2D%rPE(n), MPI_COMM_FESOM, com_edge2D%req(n), MPIerr) -!!$ END DO -!!$ DO n=1, sn -!!$ call MPI_ISEND(edge_array2D, 1, s_mpitype_edge2D(n), com_edge2D%sPE(n), & -!!$ mype, MPI_COMM_FESOM, com_edge2D%req(rn+n), MPIerr) -!!$ END DO -!!$ -!!$ call MPI_WAITALL(rn+sn,com_edge2D%req,MPI_STATUSES_IGNORE, MPIerr) -!!$ -!!$ endif -!!$ -!!$end subroutine exchange_edge2D !============================================================================= -subroutine exchange_elem3D(elem_array3D) - -USE g_PARSUP +subroutine exchange_elem3D(elem_array3D, partit) +use MOD_MESH +use MOD_PARTIT IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: elem_array3D(:,:) +#include "associate_part_def.h" +#include "associate_part_ass.h" - real(real64), intent(inout) :: elem_array3D(:,:) - - call exchange_elem3D_begin(elem_array3D) - call exchange_elem_end +call exchange_elem3D_begin(elem_array3D, partit) +call exchange_elem_end(partit) END SUBROUTINE exchange_elem3D !=========================================== -subroutine exchange_elem3D_begin(elem_array3D) -USE g_PARSUP -IMPLICIT NONE - ! General version of the communication routine for 3D elemental fields ! stored in (vertical, horizontal) format - -real(real64), intent(inout) :: elem_array3D(:,:) -integer :: n, sn, rn, nl1 +subroutine exchange_elem3D_begin(elem_array3D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: elem_array3D(:,:) +integer :: n, sn, rn, nl1 +#include "associate_part_def.h" +#include "associate_part_ass.h" if (npes> 1) then @@ -775,30 +693,34 @@ subroutine exchange_elem3D_begin(elem_array3D) END SUBROUTINE exchange_elem3D_begin !============================================================================= -subroutine exchange_elem3D_n(elem_array3D) -USE g_PARSUP -IMPLICIT NONE - ! General version of the communication routine for 3D elemental fields ! stored in (vertical, horizontal) format - - real(real64), intent(inout) :: elem_array3D(:,:,:) +subroutine exchange_elem3D_n(elem_array3D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: elem_array3D(:,:,:) +#include "associate_part_def.h" +#include "associate_part_ass.h" if (npes> 1) then - call exchange_elem3D_n_begin(elem_array3D) - call exchange_elem_end + call exchange_elem3D_n_begin(elem_array3D, partit) + call exchange_elem_end(partit) endif END SUBROUTINE exchange_elem3D_n !============================================================================= -subroutine exchange_elem3D_n_begin(elem_array3D) -USE g_PARSUP -IMPLICIT NONE - +subroutine exchange_elem3D_n_begin(elem_array3D, partit) ! General version of the communication routine for 3D elemental fields ! stored in (vertical, horizontal) format - - real(real64), intent(inout) :: elem_array3D(:,:,:) - integer :: n, sn, rn, n_val, nl1 +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: elem_array3D(:,:,:) +integer :: n, sn, rn, n_val, nl1 +#include "associate_part_def.h" +#include "associate_part_ass.h" if (npes> 1) then nl1 = ubound(elem_array3D,2) @@ -874,33 +796,37 @@ subroutine exchange_elem3D_n_begin(elem_array3D) endif END SUBROUTINE exchange_elem3D_n_begin !======================================================================== -subroutine exchange_elem2D(elem_array2D) -USE g_PARSUP -IMPLICIT NONE - ! General version of the communication routine for 3D elemental fields ! stored in (vertical, horizontal) format - - real(real64), intent(inout) :: elem_array2D(:) +subroutine exchange_elem2D(elem_array2D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: elem_array2D(:) +#include "associate_part_def.h" +#include "associate_part_ass.h" if (npes> 1) then - call exchange_elem2D_begin(elem_array2D) - call exchange_elem_end + call exchange_elem2D_begin(elem_array2D, partit) + call exchange_elem_end(partit) end if END SUBROUTINE exchange_elem2D !======================================================================== -subroutine exchange_elem2D_begin(elem_array2D) -USE g_PARSUP -IMPLICIT NONE - ! General version of the communication routine for 3D elemental fields ! stored in (vertical, horizontal) format - - real(real64), intent(inout) :: elem_array2D(:) - integer :: n, sn, rn +subroutine exchange_elem2D_begin(elem_array2D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: elem_array2D(:) +integer :: n, sn, rn +#include "associate_part_def.h" +#include "associate_part_ass.h" - if (npes> 1) then +if (npes> 1) then if (ubound(elem_array2D,1)<=myDim_elem2D+eDim_elem2D) then @@ -955,30 +881,34 @@ subroutine exchange_elem2D_begin(elem_array2D) END SUBROUTINE exchange_elem2D_begin ! ======================================================================== -subroutine exchange_elem2D_i(elem_array2D) !Exchange with ALL(!) the neighbours -USE g_PARSUP +subroutine exchange_elem2D_i(elem_array2D, partit) +use MOD_MESH +use MOD_PARTIT IMPLICIT NONE - - integer, intent(inout) :: elem_array2D(:) - - integer :: n, sn, rn +type(t_partit), intent(inout), target :: partit +integer, intent(inout) :: elem_array2D(:) +integer :: n, sn, rn +#include "associate_part_def.h" +#include "associate_part_ass.h" if (npes> 1) then - call exchange_elem2D_i_begin(elem_array2D) - call exchange_elem_end + call exchange_elem2D_i_begin(elem_array2D, partit) + call exchange_elem_end(partit) end if END SUBROUTINE exchange_elem2D_i !============================================================================= -subroutine exchange_elem2D_i_begin(elem_array2D) !Exchange with ALL(!) the neighbours -USE g_PARSUP +subroutine exchange_elem2D_i_begin(elem_array2D, partit) +use MOD_MESH +use MOD_PARTIT IMPLICIT NONE - - integer, intent(inout) :: elem_array2D(:) - - integer :: n, sn, rn +type(t_partit), intent(inout), target :: partit +integer, intent(inout) :: elem_array2D(:) +integer :: n, sn, rn +#include "associate_part_def.h" +#include "associate_part_ass.h" if (npes> 1) then @@ -1009,27 +939,25 @@ subroutine exchange_elem2D_i_begin(elem_array2D) end if END SUBROUTINE exchange_elem2D_i_begin -!============================================================================= - - - ! ======================================================================== ! Broadcast routines ! Many because of different sizes. ! ======================================================================== -subroutine broadcast_nod3D(arr3D, arr3Dglobal) +subroutine broadcast_nod3D(arr3D, arr3Dglobal, partit) ! Distribute the nodal information available on 0 PE to other PEs -use g_PARSUP - +use MOD_MESH +use MOD_PARTIT IMPLICIT NONE - -INTEGER :: nz, counter,nl1 -integer :: i, n, nTS, sender, status(MPI_STATUS_SIZE) -INTEGER, ALLOCATABLE, DIMENSION(:) :: irecvbuf -real(real64) :: arr3D(:,:) -real(real64) :: arr3Dglobal(:,:) +type(t_partit), intent(inout), target :: partit +INTEGER :: nz, counter,nl1 +integer :: i, n, nTS, sender, status(MPI_STATUS_SIZE) +real(real64) :: arr3D(:,:) +real(real64) :: arr3Dglobal(:,:) +integer :: node_size +INTEGER, ALLOCATABLE, DIMENSION(:) :: irecvbuf real(real64), ALLOCATABLE, DIMENSION(:) :: sendbuf, recvbuf -integer :: node_size +#include "associate_part_def.h" +#include "associate_part_ass.h" node_size=myDim_nod2D+eDim_nod2D nl1=ubound(arr3D,1) @@ -1081,18 +1009,20 @@ end subroutine broadcast_nod3D ! !============================================================================ ! -subroutine broadcast_nod2D(arr2D, arr2Dglobal) +subroutine broadcast_nod2D(arr2D, arr2Dglobal, partit) ! A 2D version of the previous routine -use g_PARSUP +use MOD_MESH +use MOD_PARTIT IMPLICIT NONE - -real(real64) :: arr2D(:) -real(real64) :: arr2Dglobal(:) - -integer :: i, n, nTS, sender, status(MPI_STATUS_SIZE) -INTEGER, ALLOCATABLE, DIMENSION(:) :: irecvbuf -real(real64), ALLOCATABLE, DIMENSION(:) :: sendbuf -integer :: node_size +type(t_partit), intent(in), target :: partit +real(real64) :: arr2D(:) +real(real64) :: arr2Dglobal(:) +integer :: i, n, nTS, sender, status(MPI_STATUS_SIZE) +INTEGER, ALLOCATABLE, DIMENSION(:) :: irecvbuf +real(real64), ALLOCATABLE, DIMENSION(:) :: sendbuf +integer :: node_size +#include "associate_part_def.h" +#include "associate_part_ass.h" node_size=myDim_nod2D+eDim_nod2D @@ -1129,19 +1059,22 @@ end subroutine broadcast_nod2D ! !============================================================================ ! -subroutine broadcast_elem3D(arr3D, arr3Dglobal) +subroutine broadcast_elem3D(arr3D, arr3Dglobal, partit) ! Distribute the elemental information available on 0 PE to other PEs -use g_PARSUP - +use MOD_MESH +use MOD_PARTIT IMPLICIT NONE - -INTEGER :: nz, counter,nl1 -integer :: i, n, nTS, sender, status(MPI_STATUS_SIZE) -INTEGER, ALLOCATABLE, DIMENSION(:) :: irecvbuf -real(real64) :: arr3D(:,:) -real(real64) :: arr3Dglobal(:,:) +type(t_partit), intent(in), target :: partit +INTEGER :: nz, counter,nl1 +integer :: i, n, nTS, sender, status(MPI_STATUS_SIZE) +real(real64) :: arr3D(:,:) +real(real64) :: arr3Dglobal(:,:) +integer :: elem_size + +INTEGER, ALLOCATABLE, DIMENSION(:) :: irecvbuf real(real64), ALLOCATABLE, DIMENSION(:) :: sendbuf, recvbuf -integer :: elem_size +#include "associate_part_def.h" +#include "associate_part_ass.h" elem_size=myDim_elem2D+eDim_elem2D @@ -1194,23 +1127,23 @@ end subroutine broadcast_elem3D ! !============================================================================ ! -subroutine broadcast_elem2D(arr2D, arr2Dglobal) +subroutine broadcast_elem2D(arr2D, arr2Dglobal, partit) ! A 2D version of the previous routine -use g_PARSUP +use MOD_MESH +use MOD_PARTIT IMPLICIT NONE - -integer :: i, n, nTS, sender, status(MPI_STATUS_SIZE) -INTEGER, ALLOCATABLE, DIMENSION(:) :: irecvbuf - -real(real64) :: arr2D(:) -real(real64) :: arr2Dglobal(:) -real(real64), ALLOCATABLE, DIMENSION(:) :: sendbuf -integer :: elem_size +type(t_partit), intent(in), target :: partit +integer :: i, n, nTS, sender, status(MPI_STATUS_SIZE) +real(real64) :: arr2D(:) +real(real64) :: arr2Dglobal(:) +integer :: elem_size +INTEGER, ALLOCATABLE, DIMENSION(:) :: irecvbuf +real(real64), ALLOCATABLE, DIMENSION(:) :: sendbuf +#include "associate_part_def.h" +#include "associate_part_ass.h" elem_size=myDim_elem2D+eDim_elem2D - - IF ( mype == 0 ) THEN if (npes>1) then arr2D(1:elem_size)=arr2Dglobal(myList_elem2D(1:elem_size)) @@ -1243,28 +1176,24 @@ subroutine broadcast_elem2D(arr2D, arr2Dglobal) end subroutine broadcast_elem2D ! !============================================================================ -! -subroutine gather_nod3D(arr3D, arr3D_global) - ! Make nodal information available to master PE -! ! Use only with 3D arrays stored in (vertical, horizontal) way - -use g_PARSUP - - +subroutine gather_nod3D(arr3D, arr3D_global, partit) +use MOD_MESH +use MOD_PARTIT IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +real(real64) :: arr3D(:,:) +real(real64) :: arr3D_global(:,:) +real(real64), allocatable :: recvbuf(:,:) +integer :: req(partit%npes-1) +integer :: start, n3D +#include "associate_part_def.h" +#include "associate_part_ass.h" -INTEGER :: nl1 -integer :: n - -real(real64) :: arr3D(:,:) -real(real64) :: arr3D_global(:,:) -real(real64), allocatable :: recvbuf(:,:) -integer :: req(npes-1) -integer :: start, n3D - - if (npes> 1) then +if (npes> 1) then CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) nl1=ubound(arr3D,1) @@ -1306,27 +1235,26 @@ end subroutine gather_nod3D ! !============================================================================ ! -subroutine gather_real4_nod3D(arr3D, arr3D_global) +subroutine gather_real4_nod3D(arr3D, arr3D_global, partit) ! Make nodal information available to master PE ! ! Use only with 3D arrays stored in (vertical, horizontal) way - -use g_PARSUP - - +use MOD_MESH +use MOD_PARTIT IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +real(real32) :: arr3D(:,:) +real(real32) :: arr3D_global(:,:) +real(real32), allocatable :: recvbuf(:,:) +integer :: req(partit%npes-1) +integer :: start, n3D +#include "associate_part_def.h" +#include "associate_part_ass.h" -INTEGER :: nl1 -integer :: n - -real(real32) :: arr3D(:,:) -real(real32) :: arr3D_global(:,:) -real(real32), allocatable :: recvbuf(:,:) -integer :: req(npes-1) -integer :: start, n3D - - if (npes> 1) then +if (npes> 1) then CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) nl1=ubound(arr3D,1) @@ -1367,26 +1295,27 @@ subroutine gather_real4_nod3D(arr3D, arr3D_global) end subroutine gather_real4_nod3D !======================================================= -subroutine gather_int2_nod3D(arr3D, arr3D_global) +subroutine gather_int2_nod3D(arr3D, arr3D_global, partit) ! Make nodal information available to master PE ! ! Use only with 3D arrays stored in (vertical, horizontal) way - -use g_PARSUP - +use MOD_MESH +use MOD_PARTIT IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +integer(int16) :: arr3D(:,:) +integer(int16) :: arr3D_global(:,:) +integer(int16), allocatable :: recvbuf(:,:) +integer :: req(partit%npes-1) +integer :: start, n3D +#include "associate_part_def.h" +#include "associate_part_ass.h" -INTEGER :: nl1 -integer :: n - -integer(int16) :: arr3D(:,:) -integer(int16) :: arr3D_global(:,:) -integer(int16), allocatable :: recvbuf(:,:) -integer :: req(npes-1) -integer :: start, n3D - if (npes> 1) then +if (npes> 1) then CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) nl1=ubound(arr3D,1) @@ -1426,23 +1355,22 @@ subroutine gather_int2_nod3D(arr3D, arr3D_global) end if end subroutine gather_int2_nod3D !============================================== -subroutine gather_nod2D(arr2D, arr2D_global) - +subroutine gather_nod2D(arr2D, arr2D_global, partit) ! Make nodal information available to master PE - -use g_PARSUP - +use MOD_MESH +use MOD_PARTIT IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +real(real64) :: arr2D(:) +real(real64) :: arr2D_global(:) +real(real64), allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, n2D +#include "associate_part_def.h" +#include "associate_part_ass.h" -integer :: n - -real(real64) :: arr2D(:) -real(real64) :: arr2D_global(:) -real(real64), allocatable :: recvbuf(:) -integer :: req(npes-1) -integer :: start, n2D - - if (npes> 1) then +if (npes> 1) then CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) @@ -1480,23 +1408,22 @@ subroutine gather_nod2D(arr2D, arr2D_global) endif end subroutine gather_nod2D !============================================== -subroutine gather_real4_nod2D(arr2D, arr2D_global) - +subroutine gather_real4_nod2D(arr2D, arr2D_global, partit) ! Make nodal information available to master PE - -use g_PARSUP - +use MOD_MESH +use MOD_PARTIT IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +real(real32) :: arr2D(:) +real(real32) :: arr2D_global(:) +real(real32), allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, n2D +#include "associate_part_def.h" +#include "associate_part_ass.h" -integer :: n - -real(real32) :: arr2D(:) -real(real32) :: arr2D_global(:) -real(real32), allocatable :: recvbuf(:) -integer :: req(npes-1) -integer :: start, n2D - - if (npes> 1) then +if (npes> 1) then CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) @@ -1535,23 +1462,22 @@ subroutine gather_real4_nod2D(arr2D, arr2D_global) end subroutine gather_real4_nod2D !============================================== -subroutine gather_int2_nod2D(arr2D, arr2D_global) - -! Make nodal information available to master PE - -use g_PARSUP - +! Make nodal information available to master PE +subroutine gather_int2_nod2D(arr2D, arr2D_global, partit) +use MOD_MESH +use MOD_PARTIT IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +integer(int16) :: arr2D(:) +integer(int16) :: arr2D_global(:) +integer(int16), allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, n2D +#include "associate_part_def.h" +#include "associate_part_ass.h" -integer :: n - -integer(int16) :: arr2D(:) -integer(int16) :: arr2D_global(:) -integer(int16), allocatable :: recvbuf(:) -integer :: req(npes-1) -integer :: start, n2D - - if (npes> 1) then +if (npes> 1) then CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) @@ -1590,28 +1516,26 @@ subroutine gather_int2_nod2D(arr2D, arr2D_global) end subroutine gather_int2_nod2D !============================================================================ -subroutine gather_elem3D(arr3D, arr3D_global) - +subroutine gather_elem3D(arr3D, arr3D_global, partit) ! Make element information available to master PE ! ! Use only with 3D arrays stored in (vertical, horizontal) way - -use g_PARSUP - - +use MOD_MESH +use MOD_PARTIT IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +real(real64) :: arr3D(:,:) +real(real64) :: arr3D_global(:,:) +real(real64), allocatable :: recvbuf(:,:) +integer :: req(partit%npes-1) +integer :: start, e3D, ende, err_alloc +integer :: max_loc_Dim, i, status(MPI_STATUS_SIZE) +#include "associate_part_def.h" +#include "associate_part_ass.h" -INTEGER :: nl1 -integer :: n - -real(real64) :: arr3D(:,:) -real(real64) :: arr3D_global(:,:) -real(real64), allocatable :: recvbuf(:,:) -integer :: req(npes-1) -integer :: start, e3D, ende, err_alloc -integer :: max_loc_Dim, i, status(MPI_STATUS_SIZE) - - if (npes> 1) then +if (npes> 1) then CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) nl1=ubound(arr3D,1) @@ -1656,29 +1580,25 @@ subroutine gather_elem3D(arr3D, arr3D_global) end subroutine gather_elem3D !=================================================================== - -subroutine gather_real4_elem3D(arr3D, arr3D_global) - ! Make element information available to master PE -! ! Use only with 3D arrays stored in (vertical, horizontal) way - -use g_PARSUP - - +subroutine gather_real4_elem3D(arr3D, arr3D_global, partit) +use MOD_MESH +use MOD_PARTIT IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +real(real32) :: arr3D(:,:) +real(real32) :: arr3D_global(:,:) +real(real32), allocatable :: recvbuf(:,:) +integer :: req(partit%npes-1) +integer :: start, e3D, ende, err_alloc +integer :: max_loc_Dim, i, status(MPI_STATUS_SIZE) +#include "associate_part_def.h" +#include "associate_part_ass.h" -INTEGER :: nl1 -integer :: n - -real(real32) :: arr3D(:,:) -real(real32) :: arr3D_global(:,:) -real(real32), allocatable :: recvbuf(:,:) -integer :: req(npes-1) -integer :: start, e3D, ende, err_alloc -integer :: max_loc_Dim, i, status(MPI_STATUS_SIZE) - - if (npes> 1) then +if (npes> 1) then CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) nl1=ubound(arr3D,1) @@ -1724,29 +1644,25 @@ end subroutine gather_real4_elem3D !=================================================================== - -subroutine gather_int2_elem3D(arr3D, arr3D_global) - ! Make element information available to master PE -! ! Use only with 3D arrays stored in (vertical, horizontal) way - -use g_PARSUP - - +subroutine gather_int2_elem3D(arr3D, arr3D_global, partit) +use MOD_MESH +use MOD_PARTIT IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +integer(int16) :: arr3D(:,:) +integer(int16) :: arr3D_global(:,:) +integer(int16), allocatable :: recvbuf(:,:) +integer :: req(partit%npes-1) +integer :: start, e3D, ende, err_alloc +integer :: max_loc_Dim, i, status(MPI_STATUS_SIZE) +#include "associate_part_def.h" +#include "associate_part_ass.h" -INTEGER :: nl1 -integer :: n - -integer(int16) :: arr3D(:,:) -integer(int16) :: arr3D_global(:,:) -integer(int16), allocatable :: recvbuf(:,:) -integer :: req(npes-1) -integer :: start, e3D, ende, err_alloc -integer :: max_loc_Dim, i, status(MPI_STATUS_SIZE) - - if (npes> 1) then +if (npes> 1) then CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) nl1=ubound(arr3D,1) @@ -1792,24 +1708,22 @@ end subroutine gather_int2_elem3D !============================================== -subroutine gather_elem2D(arr2D, arr2D_global) - ! Make element information available to master PE - -use g_PARSUP - +subroutine gather_elem2D(arr2D, arr2D_global, partit) +use MOD_MESH +use MOD_PARTIT IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +real(real64) :: arr2D(:) +real(real64) :: arr2D_global(:) +real(real64), allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, e2D +#include "associate_part_def.h" +#include "associate_part_ass.h" -integer :: n - -real(real64) :: arr2D(:) -real(real64) :: arr2D_global(:) -real(real64), allocatable :: recvbuf(:) -integer :: req(npes-1) -integer :: start, e2D - - - if (npes> 1) then +if (npes> 1) then CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) ! Consider MPI-datatypes to recv directly into arr2D_global! @@ -1850,25 +1764,24 @@ subroutine gather_elem2D(arr2D, arr2D_global) end subroutine gather_elem2D -!============================================== -subroutine gather_real4_elem2D(arr2D, arr2D_global) - +!================================================ ! Make element information available to master PE - -use g_PARSUP - +subroutine gather_real4_elem2D(arr2D, arr2D_global, partit) +use MOD_MESH +use MOD_PARTIT IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +real(real32) :: arr2D(:) +real(real32) :: arr2D_global(:) +real(real32), allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, e2D +#include "associate_part_def.h" +#include "associate_part_ass.h" -integer :: n - -real(real32) :: arr2D(:) -real(real32) :: arr2D_global(:) -real(real32), allocatable :: recvbuf(:) -integer :: req(npes-1) -integer :: start, e2D - - if (npes> 1) then +if (npes> 1) then CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) ! Consider MPI-datatypes to recv directly into arr2D_global! @@ -1909,25 +1822,23 @@ subroutine gather_real4_elem2D(arr2D, arr2D_global) end subroutine gather_real4_elem2D -!============================================== -subroutine gather_int2_elem2D(arr2D, arr2D_global) - +!================================================ ! Make element information available to master PE - -use g_PARSUP - +subroutine gather_int2_elem2D(arr2D, arr2D_global, partit) +use MOD_MESH +use MOD_PARTIT IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +integer(int16) :: arr2D(:) +integer(int16) :: arr2D_global(:) +integer(int16), allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, e2D +#include "associate_part_def.h" +#include "associate_part_ass.h" -integer :: n - -integer(int16) :: arr2D(:) -integer(int16) :: arr2D_global(:) -integer(int16), allocatable :: recvbuf(:) -integer :: req(npes-1) -integer :: start, e2D - - - if (npes> 1) then +if (npes> 1) then CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) ! Consider MPI-datatypes to recv directly into arr2D_global! @@ -1970,28 +1881,25 @@ end subroutine gather_int2_elem2D !============================================================================ -subroutine gather_real8to4_nod3D(arr3D, arr3D_global) - ! Make nodal information available to master PE -! ! Use only with 3D arrays stored in (vertical, horizontal) way - -use g_PARSUP - - +subroutine gather_real8to4_nod3D(arr3D, arr3D_global, partit) +use MOD_MESH +use MOD_PARTIT IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +real(real64) :: arr3D(:,:) +real(real32) :: arr3D_global(:,:) +integer :: req(partit%npes-1) +integer :: start, n3D, ierr +real(real32), allocatable :: recvbuf(:,:) +real(real32), allocatable :: sendbuf(:,:) +#include "associate_part_def.h" +#include "associate_part_ass.h" -INTEGER :: nl1 -integer :: n - -real(real64) :: arr3D(:,:) -real(real32) :: arr3D_global(:,:) -real(real32), allocatable :: recvbuf(:,:) -real(real32), allocatable :: sendbuf(:,:) -integer :: req(npes-1) -integer :: start, n3D, ierr - - if (npes> 1) then +if (npes> 1) then CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) nl1=ubound(arr3D,1) @@ -2036,22 +1944,21 @@ subroutine gather_real8to4_nod3D(arr3D, arr3D_global) end subroutine gather_real8to4_nod3D !============================================== -subroutine gather_real8to4_nod2D(arr2D, arr2D_global) - ! Make nodal information available to master PE - -use g_PARSUP - +subroutine gather_real8to4_nod2D(arr2D, arr2D_global, partit) +use MOD_MESH +use MOD_PARTIT IMPLICIT NONE - -integer :: n - -real(real64) :: arr2D(:) -real(real32) :: arr2D_global(:) -real(real32) :: sendbuf(myDim_nod2D) -real(real64), allocatable :: recvbuf(:) -integer :: req(npes-1) -integer :: start, n2D +type(t_partit), intent(inout), target :: partit +integer :: n +real(real64) :: arr2D(:) +real(real32) :: arr2D_global(:) +real(real32) :: sendbuf(partit%myDim_nod2D) +real(real64), allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, n2D +#include "associate_part_def.h" +#include "associate_part_ass.h" ! Consider MPI-datatypes to recv directly into arr2D_global! @@ -2089,31 +1996,27 @@ subroutine gather_real8to4_nod2D(arr2D, arr2D_global) end if end subroutine gather_real8to4_nod2D -!============================================== !============================================================================ -subroutine gather_real8to4_elem3D(arr3D, arr3D_global) - +subroutine gather_real8to4_elem3D(arr3D, arr3D_global, partit) ! Make element information available to master PE -! ! Use only with 3D arrays stored in (vertical, horizontal) way - -use g_PARSUP - - +use MOD_MESH +use MOD_PARTIT IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +real(real64) :: arr3D(:,:) +real(real32) :: arr3D_global(:,:) +integer :: req(partit%npes-1) +integer :: start, e3D +real(real32), allocatable :: recvbuf(:,:) +real(real32), allocatable :: sendbuf(:,:) +#include "associate_part_def.h" +#include "associate_part_ass.h" -INTEGER :: nl1 -integer :: n -real(real64) :: arr3D(:,:) -real(real32) :: arr3D_global(:,:) -real(real32), allocatable :: recvbuf(:,:) -real(real32), allocatable :: sendbuf(:,:) -integer :: req(npes-1) -integer :: start, e3D - - - if (npes> 1) then +if (npes> 1) then CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) nl1=ubound(arr3D,1) @@ -2153,26 +2056,24 @@ subroutine gather_real8to4_elem3D(arr3D, arr3D_global) end if end subroutine gather_real8to4_elem3D -!============================================== -subroutine gather_real8to4_elem2D(arr2D, arr2D_global) - +!================================================ ! Make element information available to master PE - -use g_PARSUP - +subroutine gather_real8to4_elem2D(arr2D, arr2D_global, partit) +use MOD_MESH +use MOD_PARTIT IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +real(real64) :: arr2D(:) +real(real32) :: arr2D_global(:) +real(real32), allocatable :: recvbuf(:) +real(real32) :: sendbuf(partit%myDim_elem2D) +integer :: req(partit%npes-1) +integer :: start, e2D +#include "associate_part_def.h" +#include "associate_part_ass.h" -integer :: n - -real(real64) :: arr2D(:) -real(real32) :: arr2D_global(:) -real(real32), allocatable :: recvbuf(:) -real(real32) :: sendbuf(myDim_elem2D) -integer :: req(npes-1) -integer :: start, e2D - - - if (npes> 1) then +if (npes> 1) then CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) ! Consider MPI-datatypes to recv directly into arr2D_global! @@ -2213,18 +2114,23 @@ subroutine gather_real8to4_elem2D(arr2D, arr2D_global) end if end subroutine gather_real8to4_elem2D !============================================== -subroutine gather_elem2D_i(arr2D, arr2D_global) +subroutine gather_elem2D_i(arr2D, arr2D_global, partit) ! Make element information available to master PE - use g_PARSUP - IMPLICIT NONE - - integer :: n - integer :: arr2D(:) - integer :: arr2D_global(:) - integer, allocatable :: recvbuf(:) - integer :: req(npes-1) - integer :: start, e2D - CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +integer :: arr2D(:) +integer :: arr2D_global(:) +integer, allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, e2D +#include "associate_part_def.h" +#include "associate_part_ass.h" + + +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) ! Consider MPI-datatypes to recv directly into arr2D_global! IF ( mype == 0 ) THEN if (npes > 1) then @@ -2246,21 +2152,21 @@ subroutine gather_elem2D_i(arr2D, arr2D_global) call MPI_SEND(arr2D, myDim_elem2D, MPI_INTEGER, 0, 2, MPI_COMM_FESOM, MPIerr ) ENDIF end subroutine gather_elem2D_i -!============================================================================ -subroutine gather_nod2D_i(arr2D, arr2D_global) - -! Make nodal information available to master PE - -use g_PARSUP - +!============================================== +! Make nodal information available to master PE +subroutine gather_nod2D_i(arr2D, arr2D_global, partit) +use MOD_MESH +use MOD_PARTIT IMPLICIT NONE - +type(t_partit), intent(inout), target :: partit integer :: n integer :: arr2D(:) integer :: arr2D_global(:) integer, allocatable :: recvbuf(:) -integer :: req(npes-1) +integer :: req(partit%npes-1) integer :: start, n2D +#include "associate_part_def.h" +#include "associate_part_ass.h" if (npes> 1) then @@ -2300,18 +2206,19 @@ subroutine gather_nod2D_i(arr2D, arr2D_global) endif end subroutine gather_nod2D_i !============================================================================ -! -subroutine gather_edg2D(arr2D, arr2Dglobal) ! A 2D version of the previous routine -use g_PARSUP +subroutine gather_edg2D(arr2D, arr2Dglobal, partit) +use MOD_MESH +use MOD_PARTIT IMPLICIT NONE - -real(real64) :: arr2D(:) -real(real64) :: arr2Dglobal(:) - -integer :: i, n, buf_size, sender, status(MPI_STATUS_SIZE) -INTEGER, ALLOCATABLE, DIMENSION(:) :: ibuf +type(t_partit), intent(in), target :: partit +real(real64) :: arr2D(:) +real(real64) :: arr2Dglobal(:) +integer :: i, n, buf_size, sender, status(MPI_STATUS_SIZE) +INTEGER, ALLOCATABLE, DIMENSION(:) :: ibuf REAL(real64), ALLOCATABLE, DIMENSION(:) :: rbuf +#include "associate_part_def.h" +#include "associate_part_ass.h" IF ( mype == 0 ) THEN arr2Dglobal(myList_edge2D(1:myDim_edge2D))=arr2D(1:myDim_edge2D) @@ -2340,17 +2247,18 @@ subroutine gather_edg2D(arr2D, arr2Dglobal) end subroutine gather_edg2D ! !============================================================================ -! -subroutine gather_edg2D_i(arr2D, arr2Dglobal) ! A 2D version of the previous routine -use g_PARSUP +subroutine gather_edg2D_i(arr2D, arr2Dglobal, partit) +use MOD_MESH +use MOD_PARTIT IMPLICIT NONE - -integer :: arr2D(:) -integer :: arr2Dglobal(:) - -integer :: i, n, buf_size, sender, status(MPI_STATUS_SIZE) -INTEGER, ALLOCATABLE, DIMENSION(:) :: ibuf, vbuf +type(t_partit), intent(inout), target :: partit +integer :: arr2D(:) +integer :: arr2Dglobal(:) +integer :: i, n, buf_size, sender, status(MPI_STATUS_SIZE) +INTEGER, ALLOCATABLE, DIMENSION(:) :: ibuf, vbuf +#include "associate_part_def.h" +#include "associate_part_ass.h" IF ( mype == 0 ) THEN arr2Dglobal(myList_edge2D(1:myDim_edge2D))=arr2D(1:myDim_edge2D) diff --git a/src/gen_ic3d.F90 b/src/gen_ic3d.F90 index dc1fd43c3..3dc343132 100644 --- a/src/gen_ic3d.F90 +++ b/src/gen_ic3d.F90 @@ -13,9 +13,9 @@ MODULE g_ic3d !! USE o_ARRAYS USE MOD_MESH + USE MOD_PARTIT USE MOD_TRACER USE o_PARAM - USE g_PARSUP USE g_comm_auto USE g_support USE g_config, only: dummy, ClimateDataPath, use_cavity @@ -64,33 +64,33 @@ MODULE g_ic3d !============== NETCDF ========================================== CONTAINS - SUBROUTINE nc_readGrid + SUBROUTINE nc_readGrid(partit) ! Read time array and grid from nc file IMPLICIT NONE - - integer :: iost !I/O status - integer :: ncid ! netcdf file id - integer :: i + type(t_partit), intent(in) :: partit + integer :: iost !I/O status + integer :: ncid ! netcdf file id + integer :: i ! ID dimensions and variables: - integer :: id_lon - integer :: id_lat - integer :: id_lond - integer :: id_latd - integer :: id_depth - integer :: id_depthd - integer :: nf_start(4) - integer :: nf_edges(4) - integer :: ierror ! return error code + integer :: id_lon + integer :: id_lat + integer :: id_lond + integer :: id_latd + integer :: id_depth + integer :: id_depthd + integer :: nf_start(4) + integer :: nf_edges(4) + integer :: ierror ! return error code !open file - if (mype==0) then + if (partit%mype==0) then iost = nf_open(trim(filename),NF_NOWRITE,ncid) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,filename,partit) ! get dimensions - if (mype==0) then + if (partit%mype==0) then iost = nf_inq_dimid(ncid, "LAT", id_latd) if (iost .ne. NF_NOERR) then iost = nf_inq_dimid(ncid, "lat", id_latd) @@ -99,9 +99,9 @@ SUBROUTINE nc_readGrid iost = nf_inq_dimid(ncid, "latitude", id_latd) end if end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) - if (mype==0) then + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,filename,partit) + if (partit%mype==0) then iost = nf_inq_dimid(ncid, "LON", id_lond) if (iost .ne. NF_NOERR) then iost = nf_inq_dimid(ncid, "longitude", id_lond) @@ -110,18 +110,18 @@ SUBROUTINE nc_readGrid iost = nf_inq_dimid(ncid, "lon", id_lond) end if end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) - if (mype==0) then + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,filename,partit) + if (partit%mype==0) then iost = nf_inq_dimid(ncid, "depth", id_depthd) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,filename,partit) ! get variable id - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) - if (mype==0) then + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,filename,partit) + if (partit%mype==0) then iost = nf_inq_varid(ncid, "LAT", id_lat) if (iost .ne. NF_NOERR) then iost = nf_inq_varid(ncid, "lat", id_lat) @@ -130,7 +130,7 @@ SUBROUTINE nc_readGrid iost = nf_inq_varid(ncid, "latitude", id_lat) end if end if - if (mype==0) then + if (partit%mype==0) then iost = nf_inq_varid(ncid, "LON", id_lon) if (iost .ne. NF_NOERR) then iost = nf_inq_varid(ncid, "longitude", id_lon) @@ -139,75 +139,75 @@ SUBROUTINE nc_readGrid iost = nf_inq_varid(ncid, "lon", id_lon) end if end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) - if (mype==0) then + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,filename,partit) + if (partit%mype==0) then iost = nf_inq_varid(ncid, "depth", id_depth) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,filename,partit) ! get dimensions size - if (mype==0) then + if (partit%mype==0) then iost = nf_inq_dimlen(ncid, id_latd, nc_Nlat) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) - if (mype==0) then + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,filename,partit) + if (partit%mype==0) then iost = nf_inq_dimlen(ncid, id_lond, nc_Nlon) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) - if (mype==0) then + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,filename,partit) + if (partit%mype==0) then iost = nf_inq_dimlen(ncid, id_depthd, nc_Ndepth) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,filename,partit) nc_Nlon=nc_Nlon+2 !for the halo in case of periodic boundary - call MPI_BCast(nc_Nlon, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call MPI_BCast(nc_Nlat, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call MPI_BCast(nc_Ndepth, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + call MPI_BCast(nc_Nlon, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call MPI_BCast(nc_Nlat, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call MPI_BCast(nc_Ndepth, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) ALLOCATE( nc_lon(nc_Nlon), nc_lat(nc_Nlat),& & nc_depth(nc_Ndepth)) !read variables from file ! coordinates - if (mype==0) then + if (partit%mype==0) then nf_start(1)=1 nf_edges(1)=nc_Nlat iost = nf_get_vara_double(ncid, id_lat, nf_start, nf_edges, nc_lat) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) - if (mype==0) then + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,filename,partit) + if (partit%mype==0) then nf_start(1)=1 nf_edges(1)=nc_Nlon-2 iost = nf_get_vara_double(ncid, id_lon, nf_start, nf_edges, nc_lon(2:nc_Nlon-1)) nc_lon(1) =nc_lon(nc_Nlon-1) nc_lon(nc_Nlon) =nc_lon(2) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,filename,partit) ! depth - if (mype==0) then + if (partit%mype==0) then nf_start(1)=1 nf_edges(1)=nc_Ndepth iost = nf_get_vara_double(ncid, id_depth, nf_start, nf_edges,nc_depth) if (nc_depth(2) < 0.) nc_depth=-nc_depth end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,filename,partit) - call MPI_BCast(nc_lon, nc_Nlon, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) - call MPI_BCast(nc_lat, nc_Nlat, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) - call MPI_BCast(nc_depth, nc_Ndepth, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) + call MPI_BCast(nc_lon, nc_Nlon, MPI_DOUBLE_PRECISION, 0, partit%MPI_COMM_FESOM, ierror) + call MPI_BCast(nc_lat, nc_Nlat, MPI_DOUBLE_PRECISION, 0, partit%MPI_COMM_FESOM, ierror) + call MPI_BCast(nc_depth, nc_Ndepth, MPI_DOUBLE_PRECISION, 0, partit%MPI_COMM_FESOM, ierror) - if (mype==0) then + if (partit%mype==0) then iost = nf_close(ncid) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,filename,partit) if (ic_cyclic) then nc_lon(1) =nc_lon(1)-360. @@ -216,18 +216,21 @@ SUBROUTINE nc_readGrid END SUBROUTINE nc_readGrid - SUBROUTINE nc_ic3d_ini(mesh) + SUBROUTINE nc_ic3d_ini(partit, mesh) !!--------------------------------------------------------------------- !! ** Purpose : inizialization of ocean forcing from NETCDF file !!---------------------------------------------------------------------- IMPLICIT NONE - - integer :: i - integer :: elnodes(3) - real(wp) :: x, y ! coordinates of elements + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: i + integer :: elnodes(3) + real(wp) :: x, y ! coordinates of elements real(kind=WP), allocatable,dimension(:,:) :: cav_nrst_xyz - type(t_mesh), intent(in), target :: mesh -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" warn = 0 @@ -236,7 +239,7 @@ SUBROUTINE nc_ic3d_ini(mesh) write(*,*) 'variable : ', trim(varname) end if - call nc_readGrid + call nc_readGrid(partit) ! prepare nearest coordinates in INfile , save to bilin_indx_i/j !_________________________________________________________________________ @@ -296,7 +299,7 @@ SUBROUTINE nc_ic3d_ini(mesh) end if END SUBROUTINE nc_ic3d_ini - SUBROUTINE getcoeffld(tracers, mesh) + SUBROUTINE getcoeffld(tracers, partit, mesh) !!--------------------------------------------------------------------- !! *** ROUTINE getcoeffld *** !! @@ -305,27 +308,28 @@ SUBROUTINE getcoeffld(tracers, mesh) !! ** Action : !!---------------------------------------------------------------------- IMPLICIT NONE - - integer :: iost !I/O status - integer :: ncid ! netcdf file id + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(inout), target :: tracers + integer :: iost !I/O status + integer :: ncid ! netcdf file id ! ID dimensions and variables: - integer :: id_data - integer :: nf_start(4) - integer :: nf_edges(4) - integer :: fld_idx, i,j,ii, ip1, jp1, k - integer :: d_indx, d_indx_p1 ! index of neares - real(wp) :: cf_a, cf_b, delta_d - integer :: nl1, ul1 - real(wp) :: denom, x1, x2, y1, y2, x, y, d1,d2, aux_z - - real(wp), allocatable, dimension(:,:,:) :: ncdata - real(wp), allocatable, dimension(:) :: data1d - integer :: elnodes(3) - integer :: ierror ! return error code - - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(inout), target :: tracers -#include "associate_mesh.h" + integer :: id_data + integer :: nf_start(4) + integer :: nf_edges(4) + integer :: fld_idx, i,j,ii, ip1, jp1, k + integer :: d_indx, d_indx_p1 ! index of neares + real(wp) :: cf_a, cf_b, delta_d + integer :: nl1, ul1 + real(wp) :: denom, x1, x2, y1, y2, x, y, d1,d2, aux_z + real(wp), allocatable, dimension(:,:,:) :: ncdata + real(wp), allocatable, dimension(:) :: data1d + integer :: elnodes(3) + integer :: ierror ! return error code +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ALLOCATE(ncdata(nc_Nlon,nc_Nlat,nc_Ndepth), data1d(nc_Ndepth)) ncdata=0.0_WP @@ -336,13 +340,13 @@ SUBROUTINE getcoeffld(tracers, mesh) iost = nf_open(filename,NF_NOWRITE,ncid) end if call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) + call check_nferr(iost,filename,partit) ! get variable id if (mype==0) then iost = nf_inq_varid(ncid, varname, id_data) end if call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) + call check_nferr(iost,filename,partit) !read data from file if (mype==0) then nf_start(1)=1 @@ -359,7 +363,7 @@ SUBROUTINE getcoeffld(tracers, mesh) end where end if call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) + call check_nferr(iost,filename,partit) call MPI_BCast(ncdata, nc_Nlon*nc_Nlat*nc_Ndepth, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) ! bilinear space interpolation, ! data is assumed to be sampled on a regular grid @@ -464,7 +468,7 @@ SUBROUTINE getcoeffld(tracers, mesh) DEALLOCATE( ncdata, data1d ) END SUBROUTINE getcoeffld - SUBROUTINE do_ic3d(tracers, mesh) + SUBROUTINE do_ic3d(tracers, partit, mesh) !!--------------------------------------------------------------------- !! *** ROUTINE do_ic3d *** !! @@ -472,31 +476,31 @@ SUBROUTINE do_ic3d(tracers, mesh) !!---------------------------------------------------------------------- USE insitu2pot_interface IMPLICIT NONE - integer :: n, i - real(kind=WP) :: locTmax, locTmin, locSmax, locSmin, glo - - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(inout), target :: tracers + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(inout), target :: tracers + integer :: n, i + real(kind=WP) :: locTmax, locTmin, locSmax, locSmin, glo - if (mype==0) write(*,*) "Start: Initial conditions for tracers" + if (partit%mype==0) write(*,*) "Start: Initial conditions for tracers" - ALLOCATE(bilin_indx_i(myDim_nod2d+eDim_nod2D), bilin_indx_j(myDim_nod2d+eDim_nod2D)) + ALLOCATE(bilin_indx_i(partit%myDim_nod2d+partit%eDim_nod2D), bilin_indx_j(partit%myDim_nod2d+partit%eDim_nod2D)) DO n=1, n_ic3d filename=trim(ClimateDataPath)//trim(filelist(n)) varname =trim(varlist(n)) DO current_tracer=1, tracers%num_tracers if (tracers%data(current_tracer)%ID==idlist(n)) then ! read initial conditions for current tracer - call nc_ic3d_ini(mesh) + call nc_ic3d_ini(partit, mesh) ! get first coeficients for time inerpolation on model grid for all datas - call getcoeffld(tracers, mesh) + call getcoeffld(tracers, partit, mesh) call nc_end ! deallocate arrqays associated with netcdf file - call extrap_nod(tracers%data(current_tracer)%values(:,:), mesh) + call extrap_nod(tracers%data(current_tracer)%values(:,:), partit, mesh) exit elseif (current_tracer==tracers%num_tracers) then - if (mype==0) write(*,*) "idlist contains tracer which is not listed in tracer_id!" - if (mype==0) write(*,*) "check your namelists!" - call par_ex + if (partit%mype==0) write(*,*) "idlist contains tracer which is not listed in tracer_id!" + if (partit%mype==0) write(*,*) "check your namelists!" + call par_ex(partit) stop end if END DO @@ -513,7 +517,7 @@ SUBROUTINE do_ic3d(tracers, mesh) !_________________________________________________________________________ ! eliminate values within cavity that result from the extrapolation of ! initialisation - do n=1,myDim_nod2d + eDim_nod2D + do n=1,partit%myDim_nod2d + partit%eDim_nod2D ! ensure cavity is zero if (use_cavity) tracers%data(current_tracer)%values(1:mesh%ulevels_nod2D(n)-1,n)=0.0_WP ! ensure bottom is zero @@ -528,46 +532,33 @@ SUBROUTINE do_ic3d(tracers, mesh) !_________________________________________________________________________ if (t_insitu) then - if (mype==0) write(*,*) "converting insitu temperature to potential..." - call insitu2pot(tracers, mesh) + if (partit%mype==0) write(*,*) "converting insitu temperature to potential..." + call insitu2pot(tracers, partit, mesh) end if - if (mype==0) write(*,*) "DONE: Initial conditions for tracers" + if (partit%mype==0) write(*,*) "DONE: Initial conditions for tracers" !_________________________________________________________________________ ! check initial fields locTmax = -6666 locTmin = 6666 locSmax = locTmax locSmin = locTmin - do n=1,myDim_nod2d + do n=1, partit%myDim_nod2d locTmax = max(locTmax,maxval(tracers%data(1)%values(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n)) ) locTmin = min(locTmin,minval(tracers%data(1)%values(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n)) ) locSmax = max(locSmax,maxval(tracers%data(2)%values(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n)) ) locSmin = min(locSmin,minval(tracers%data(2)%values(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n)) ) end do - call MPI_AllREDUCE(locTmax , glo , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - if (mype==0) write(*,*) ' |-> gobal max init. temp. =', glo - call MPI_AllREDUCE(locTmin , glo , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - if (mype==0) write(*,*) ' |-> gobal min init. temp. =', glo - call MPI_AllREDUCE(locSmax , glo , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - if (mype==0) write(*,*) ' |-> gobal max init. salt. =', glo - call MPI_AllREDUCE(locSmin , glo , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - if (mype==0) write(*,*) ' `-> gobal min init. salt. =', glo + call MPI_AllREDUCE(locTmax , glo , 1, MPI_DOUBLE_PRECISION, MPI_MAX, partit%MPI_COMM_FESOM, partit%MPIerr) + if (partit%mype==0) write(*,*) ' |-> gobal max init. temp. =', glo + call MPI_AllREDUCE(locTmin , glo , 1, MPI_DOUBLE_PRECISION, MPI_MIN, partit%MPI_COMM_FESOM, partit%MPIerr) + if (partit%mype==0) write(*,*) ' |-> gobal min init. temp. =', glo + call MPI_AllREDUCE(locSmax , glo , 1, MPI_DOUBLE_PRECISION, MPI_MAX, partit%MPI_COMM_FESOM, partit%MPIerr) + if (partit%mype==0) write(*,*) ' |-> gobal max init. salt. =', glo + call MPI_AllREDUCE(locSmin , glo , 1, MPI_DOUBLE_PRECISION, MPI_MIN, partit%MPI_COMM_FESOM, partit%MPIerr) + if (partit%mype==0) write(*,*) ' `-> gobal min init. salt. =', glo END SUBROUTINE do_ic3d - - SUBROUTINE err_call(iost,fname) - !!--------------------------------------------------------------------- - !! *** ROUTINE err_call *** - !!---------------------------------------------------------------------- - IMPLICIT NONE - integer, intent(in) :: iost - character(len=MAX_PATH), intent(in) :: fname - write(*,*) 'ERROR: I/O status=',iost,' file= ',fname - call par_ex - stop - END SUBROUTINE err_call - - + SUBROUTINE nc_end IMPLICIT NONE @@ -576,13 +567,14 @@ SUBROUTINE nc_end END SUBROUTINE nc_end - SUBROUTINE check_nferr(iost,fname) + SUBROUTINE check_nferr(iost,fname, partit ) IMPLICIT NONE + type(t_partit), intent(in) :: partit character(len=MAX_PATH), intent(in) :: fname - integer, intent(in) :: iost + integer, intent(in) :: iost if (iost .ne. NF_NOERR) then write(*,*) 'ERROR: I/O status= "',trim(nf_strerror(iost)),'";',iost,' file= ', trim(fname) - call par_ex + call par_ex (partit) stop endif END SUBROUTINE diff --git a/src/gen_interpolation.F90 b/src/gen_interpolation.F90 index 4d877a0c3..3faf7eb9d 100755 --- a/src/gen_interpolation.F90 +++ b/src/gen_interpolation.F90 @@ -1,7 +1,7 @@ ! routines doing 3D, 2D and 1D interpolation subroutine interp_2d_field_v2(num_lon_reg, num_lat_reg, lon_reg, lat_reg, data_reg, missvalue, & - num_mod, lon_mod, lat_mod, data_mod) + num_mod, lon_mod, lat_mod, data_mod, partit) !------------------------------------------------------------------------------------- ! A second version of 2D interpolation. ! This routine does 2d interpolation from a regular grid to specified nodes @@ -29,12 +29,13 @@ subroutine interp_2d_field_v2(num_lon_reg, num_lat_reg, lon_reg, lat_reg, data_r ! Coded by Qiang Wang ! Reviewed by ?? !------------------------------------------------------------------------------------- - use g_PARSUP, only: par_ex + use mod_partit use o_PARAM, only: WP implicit none integer :: n, i, ii, jj, k, nod_find integer :: ind_lat_h, ind_lat_l, ind_lon_h, ind_lon_l - integer, intent(in) :: num_lon_reg, num_lat_reg, num_mod + integer, intent(in) :: num_lon_reg, num_lat_reg, num_mod + type(t_partit), intent(in) :: partit real(kind=WP) :: x, y, diff, d, dmin real(kind=WP) :: rt_lat1, rt_lat2, rt_lon1, rt_lon2 real(kind=WP) :: data(2,2) @@ -47,7 +48,7 @@ subroutine interp_2d_field_v2(num_lon_reg, num_lat_reg, lon_reg, lat_reg, data_r if(lon_reg(1)<0.0 .or. lon_reg(num_lon_reg)>360.) then write(*,*) 'Error in 2D interpolation!' write(*,*) 'The regular grid is not in the proper longitude range.' - call par_ex(1) + call par_ex(partit, 1) stop end if @@ -135,7 +136,7 @@ end subroutine interp_2d_field_v2 !--------------------------------------------------------------------------- ! subroutine interp_2d_field(num_lon_reg, num_lat_reg, lon_reg, lat_reg, data_reg, & - num_mod, lon_mod, lat_mod, data_mod, phase_flag) + num_mod, lon_mod, lat_mod, data_mod, phase_flag, partit) !------------------------------------------------------------------------------------- ! This routine does 2d interpolation from a regular grid to specified nodes ! on the surface grid. The regular grid is assumed to be global. @@ -162,17 +163,18 @@ subroutine interp_2d_field(num_lon_reg, num_lat_reg, lon_reg, lat_reg, data_reg, ! Coded by Qiang Wang ! Reviewed by ?? !------------------------------------------------------------------------------------- - use g_PARSUP, only: par_ex + use mod_partit use o_PARAM, only: WP implicit none integer :: n, i integer :: ind_lat_h, ind_lat_l, ind_lon_h, ind_lon_l integer, intent(in) :: num_lon_reg, num_lat_reg, num_mod integer, intent(in) :: phase_flag + type(t_partit), intent(in) :: partit real(kind=WP) :: x, y, diff real(kind=WP) :: rt_lat1, rt_lat2, rt_lon1, rt_lon2 - real(kind=WP) :: data_ll, data_lh, data_hl, data_hh - real(kind=WP) :: data_lo, data_up + real(kind=WP) :: data_ll, data_lh, data_hl, data_hh + real(kind=WP) :: data_lo, data_up real(kind=WP), intent(in) :: lon_reg(num_lon_reg), lat_reg(num_lat_reg) real(kind=WP), intent(in) :: data_reg(num_lon_reg, num_lat_reg) real(kind=WP), intent(in) :: lon_mod(num_mod), lat_mod(num_mod) @@ -281,7 +283,7 @@ end subroutine interp_2d_field ! subroutine interp_3d_field(num_lon_reg, num_lat_reg, num_lay_reg, & lon_reg, lat_reg, lay_reg, data_reg, & - num_mod_z, num_mod, lon_mod, lat_mod, lay_mod, data_mod, mesh) + num_mod_z, num_mod, lon_mod, lat_mod, lay_mod, data_mod, partit, mesh) !------------------------------------------------------------------------------------- ! This routine does 3d interpolation from a regular grid to specified nodes. ! The regular grid is assumed to be global. @@ -311,8 +313,8 @@ subroutine interp_3d_field(num_lon_reg, num_lat_reg, num_lay_reg, & ! Reviewed by ?? !------------------------------------------------------------------------------------- use MOD_MESH + use MOD_PARTIT use o_param, only: WP - use g_parsup implicit none integer :: n, i, flag,nz integer :: ind_lat_h, ind_lat_l, ind_lon_h, ind_lon_l @@ -330,9 +332,12 @@ subroutine interp_3d_field(num_lon_reg, num_lat_reg, num_lay_reg, & real(kind=WP), intent(in) :: data_reg(num_lon_reg, num_lat_reg, num_lay_reg) real(kind=WP), intent(in) :: lon_mod(num_mod), lat_mod(num_mod), lay_mod(num_mod) real(kind=WP), intent(out) :: data_mod(num_mod_z,num_mod) - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(in), target :: partit +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" do n=1,num_mod !!PS do nz=1,nlevels_nod2D(n)-1 diff --git a/src/gen_model_setup.F90 b/src/gen_model_setup.F90 index 8226de04d..5a5aa0813 100755 --- a/src/gen_model_setup.F90 +++ b/src/gen_model_setup.F90 @@ -1,28 +1,19 @@ ! ============================================================== -subroutine setup_model - implicit none - call read_namelist ! should be before clock_init -end subroutine setup_model -! ============================================================== -subroutine read_namelist - ! Reads namelist files and overwrites default parameters. - ! - ! Coded by Lars Nerger - ! Modified by Qiang Wang, SD - !-------------------------------------------------------------- +subroutine setup_model(partit) + use mod_partit use o_param use i_param use i_therm_param use g_forcing_param - use g_parsup use g_config use diagnostics, only: ldiag_solver,lcurt_stress_surf,lcurt_stress_surf, ldiag_energy, & ldiag_dMOC, ldiag_DVD, diag_list - use g_clock, only: timenew, daynew, yearnew + use g_clock, only: timenew, daynew, yearnew use g_ic3d implicit none + type(t_partit), intent(inout), target :: partit + character(len=MAX_PATH) :: nmlfile - character(len=MAX_PATH) :: nmlfile namelist /clockinit/ timenew, daynew, yearnew nmlfile ='namelist.config' ! name of general configuration namelist file @@ -83,11 +74,11 @@ subroutine read_namelist read (20,NML=diag_list) close (20) - if(mype==0) write(*,*) 'Namelist files are read in' + if(partit%mype==0) write(*,*) 'Namelist files are read in' !_____________________________________________________________________________ ! Check for namelist parameter consistency - if(mype==0) then + if(partit%mype==0) then ! check for valid step per day number if (mod(86400,step_per_day)==0) then @@ -111,25 +102,25 @@ subroutine read_namelist write(*,*) '____________________________________________________________________' print *, achar(27)//'[0m' write(*,*) - call par_ex(0) + call par_ex(partit, 0) endif endif - ! if ((output_length_unit=='s').or.(int(real(step_per_day)/24.0)<=1)) use_means=.false. -end subroutine read_namelist +end subroutine setup_model ! ================================================================= -subroutine get_run_steps(nsteps) +subroutine get_run_steps(nsteps, partit) ! Coded by Qiang Wang ! Reviewed by ?? - !-------------------------------------------------------------- - + !-------------------------------------------------------------- use g_clock - use g_parsup + use mod_partit implicit none - integer :: i, temp_year, temp_mon, temp_fleapyear, nsteps + type(t_partit), intent(in) :: partit + integer, intent(inout) :: nsteps + integer :: i, temp_year, temp_mon, temp_fleapyear ! clock should have been inialized before calling this routine @@ -161,11 +152,11 @@ subroutine get_run_steps(nsteps) else write(*,*) 'Run length unit ', run_length_unit, ' is not defined.' write(*,*) 'Please check and update the code.' - call par_ex(1) + call par_ex(partit, 1) stop end if - if(mype==0) write(*,*) nsteps, ' steps to run for ', runid, ' job submission' + if(partit%mype==0) write(*,*) nsteps, ' steps to run for ', runid, ' job submission' end subroutine get_run_steps diff --git a/src/gen_modules_clock.F90 b/src/gen_modules_clock.F90 index b90aa0a0d..28cbbfb67 100755 --- a/src/gen_modules_clock.F90 +++ b/src/gen_modules_clock.F90 @@ -65,12 +65,13 @@ end subroutine clock ! !-------------------------------------------------------------------------------- ! - subroutine clock_init - use g_parsup + subroutine clock_init(partit) + use mod_partit use g_config implicit none - integer :: i, daystart, yearstart - real(kind=WP) :: aux1, aux2, timestart + type(t_partit), intent(in), target :: partit + integer :: i, daystart, yearstart + real(kind=WP) :: aux1, aux2, timestart ! the model initialized at timestart=timenew @@ -123,7 +124,7 @@ subroutine clock_init aux1=aux2 end do - if(mype==0) then + if(partit%mype==0) then if(r_restart) then write(*,*) print *, achar(27)//'[31m' //'____________________________________________________________'//achar(27)//'[0m' diff --git a/src/gen_modules_cvmix_idemix.F90 b/src/gen_modules_cvmix_idemix.F90 index 01f0af4a9..f8d9e941d 100644 --- a/src/gen_modules_cvmix_idemix.F90 +++ b/src/gen_modules_cvmix_idemix.F90 @@ -27,7 +27,7 @@ module g_cvmix_idemix use g_config , only: dt use o_param use mod_mesh - use g_parsup + use mod_partit use o_arrays use g_comm_auto use g_read_other_NetCDF @@ -114,15 +114,18 @@ module g_cvmix_idemix !=========================================================================== ! allocate and initialize IDEMIX variables --> call initialisation ! routine from cvmix library - subroutine init_cvmix_idemix(mesh) + subroutine init_cvmix_idemix(partit, mesh) implicit none character(len=cvmix_strlen) :: nmlfile logical :: file_exist=.False. integer :: node_size - type(t_mesh), intent(in), target :: mesh - -#include "associate_mesh.h" + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !_______________________________________________________________________ if(mype==0) then write(*,*) '____________________________________________________________' @@ -220,7 +223,7 @@ subroutine init_cvmix_idemix(mesh) inquire(file=trim(idemix_surforc_file),exist=file_exist) if (file_exist) then if (mype==0) write(*,*) ' --> read IDEMIX near inertial wave surface forcing' - call read_other_NetCDF(idemix_surforc_file, 'var706', 1, forc_iw_surface_2D, .true., mesh) + call read_other_NetCDF(idemix_surforc_file, 'var706', 1, forc_iw_surface_2D, .true., partit, mesh) ! only 20% of the niw-input are available to penetrate into the deeper ocean forc_iw_surface_2D = forc_iw_surface_2D/density_0 * idemix_sforcusage @@ -243,7 +246,7 @@ subroutine init_cvmix_idemix(mesh) inquire(file=trim(idemix_surforc_file),exist=file_exist) if (file_exist) then if (mype==0) write(*,*) ' --> read IDEMIX near tidal bottom forcing' - call read_other_NetCDF(idemix_botforc_file, 'wave_dissipation', 1, forc_iw_bottom_2D, .true., mesh) + call read_other_NetCDF(idemix_botforc_file, 'wave_dissipation', 1, forc_iw_bottom_2D, .true., partit, mesh) ! convert from W/m^2 to m^3/s^3 forc_iw_bottom_2D = forc_iw_bottom_2D/density_0 @@ -268,9 +271,10 @@ end subroutine init_cvmix_idemix ! !=========================================================================== ! calculate IDEMIX internal wave energy and its dissipation - subroutine calc_cvmix_idemix(mesh) + subroutine calc_cvmix_idemix(partit, mesh) implicit none - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: node, elem, edge, node_size integer :: nz, nln, nl1, nl2, nl12, nu1, nu2, nu12, uln integer :: elnodes1(3), elnodes2(3), el(2), ednodes(2) @@ -278,7 +282,10 @@ subroutine calc_cvmix_idemix(mesh) real(kind=WP) :: grad_v0Eiw(2), deltaX1, deltaY1, deltaX2, deltaY2 logical :: debug=.false. -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! nils tstep_count = tstep_count + 1 @@ -364,7 +371,7 @@ subroutine calc_cvmix_idemix(mesh) ! make boundary exchange for iwe, and iwe_v0 --> for propagation need ! to calculate edge contribution that crosses the halo - call exchange_nod(iwe) + call exchange_nod(iwe, partit) !___________________________________________________________________ ! calculate inverse volume and restrict iwe_v0 to fullfill stability @@ -423,8 +430,8 @@ subroutine calc_cvmix_idemix(mesh) iwe_v0(nln+1,node) = min(iwe_v0(nln+1,node),aux) end do !-->do node = 1,node_size - call exchange_nod(vol_wcelli) - call exchange_nod(iwe_v0) + call exchange_nod(vol_wcelli, partit) + call exchange_nod(iwe_v0, partit) !___________________________________________________________________ ! calculate horizontal diffusion term for internal wave energy @@ -667,12 +674,12 @@ subroutine calc_cvmix_idemix(mesh) if(mix_scheme_nmb==6) then !___________________________________________________________________ ! write out diffusivity - call exchange_nod(iwe_Kv) + call exchange_nod(iwe_Kv, partit) Kv = iwe_Kv !___________________________________________________________________ ! write out viscosity -->interpolate therefor from nodes to elements - call exchange_nod(iwe_Av) !Warning: don't forget to communicate before averaging on elements!!! + call exchange_nod(iwe_Av, partit) !Warning: don't forget to communicate before averaging on elements!!! do elem=1, myDim_elem2D elnodes1=elem2D_nodes(:,elem) !!PS do nz=1,nlevels(elem)-1 diff --git a/src/gen_modules_cvmix_kpp.F90 b/src/gen_modules_cvmix_kpp.F90 index 69dba70b0..77019be15 100644 --- a/src/gen_modules_cvmix_kpp.F90 +++ b/src/gen_modules_cvmix_kpp.F90 @@ -23,8 +23,8 @@ module g_cvmix_kpp use g_config use o_param use mod_mesh + use mod_partit use mod_tracer - use g_parsup use o_arrays use g_comm_auto use i_arrays @@ -219,13 +219,17 @@ module g_cvmix_kpp !=========================================================================== ! allocate and initialize CVMIX KPP variables --> call initialisation ! routine from cvmix library - subroutine init_cvmix_kpp(mesh) + subroutine init_cvmix_kpp(partit, mesh) implicit none + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit character(len=MAX_PATH) :: nmlfile logical :: nmlfile_exist=.False. integer :: node_size - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !_______________________________________________________________________ if(mype==0) then write(*,*) '____________________________________________________________' @@ -342,9 +346,10 @@ end subroutine init_cvmix_kpp ! !=========================================================================== ! calculate PP vertrical mixing coefficients from CVMIX library - subroutine calc_cvmix_kpp(tracers, mesh) - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers + subroutine calc_cvmix_kpp(tracers, partit, mesh) + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers integer :: node, elem, nz, nln, nun, elnodes(3), aux_nz real(kind=WP) :: vshear2, dz2, aux, aux_wm(mesh%nl), aux_ws(mesh%nl) real(kind=WP) :: aux_coeff, sigma, stable @@ -355,7 +360,10 @@ subroutine calc_cvmix_kpp(tracers, mesh) real(kind=WP) :: rhopot, bulk_0, bulk_pz, bulk_pz2 real(kind=WP) :: sfc_rhopot, sfc_bulk_0, sfc_bulk_pz, sfc_bulk_pz2 real(kind=WP), dimension(:,:), pointer :: temp, salt -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" temp=>tracers%data(1)%values(:,:) salt=>tracers%data(2)%values(:,:) !_______________________________________________________________________ @@ -894,14 +902,14 @@ subroutine calc_cvmix_kpp(tracers, mesh) ! original kpp parameterisation of FESOM1.4 & FESOM2.0 !!PS if (flag_debug .and. mype==0) print *, achar(27)//'[35m'//' --> calc smooth kpp_oblmixc'//achar(27)//'[0m' if (kpp_use_smoothblmc .and. kpp_use_fesomkpp) then - call exchange_nod(kpp_oblmixc(:,:,1)) - call exchange_nod(kpp_oblmixc(:,:,2)) - call exchange_nod(kpp_oblmixc(:,:,3)) + call exchange_nod(kpp_oblmixc(:,:,1), partit) + call exchange_nod(kpp_oblmixc(:,:,2), partit) + call exchange_nod(kpp_oblmixc(:,:,3), partit) do nz=1, 3 !_______________________________________________________________ ! all loops go over myDim_nod2D so no halo information --> for ! smoothing haloinfo is required --> therefor exchange_nod - call smooth_nod(kpp_oblmixc(:,:,nz), kpp_smoothblmc_nmb, mesh) + call smooth_nod(kpp_oblmixc(:,:,nz), kpp_smoothblmc_nmb, partit, mesh) end do end if @@ -934,13 +942,13 @@ subroutine calc_cvmix_kpp(tracers, mesh) !_______________________________________________________________________ ! write out diffusivities to FESOM2.0 --> diffusivities remain on nodes - call exchange_nod(kpp_Kv) + call exchange_nod(kpp_Kv, partit) Kv = kpp_Kv !_______________________________________________________________________ ! write out viscosities to FESOM2.0 --> viscosities for FESOM2.0 are ! defined on elements --> interpolate therefor from nodes to elements - call exchange_nod(kpp_Av) + call exchange_nod(kpp_Av, partit) Av = 0.0_WP do elem=1, myDim_elem2D elnodes=elem2D_nodes(:,elem) diff --git a/src/gen_modules_cvmix_pp.F90 b/src/gen_modules_cvmix_pp.F90 index 3722fcd8a..6654644bd 100644 --- a/src/gen_modules_cvmix_pp.F90 +++ b/src/gen_modules_cvmix_pp.F90 @@ -25,7 +25,7 @@ module g_cvmix_pp use g_config use o_param use MOD_MESH - use g_parsup + use MOD_PARTIT use o_arrays use g_comm_auto use i_arrays @@ -64,14 +64,18 @@ module g_cvmix_pp !=========================================================================== ! allocate and initialize CVMIX PP variables --> call initialisation ! routine from cvmix library - subroutine init_cvmix_pp(mesh) + subroutine init_cvmix_pp(partit, mesh) use MOD_MESH implicit none - type(t_mesh), intent(in), target :: mesh - character(len=MAX_PATH) :: nmlfile - logical :: nmlfile_exist=.False. - integer :: node_size -#include "associate_mesh.h" + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + character(len=MAX_PATH) :: nmlfile + logical :: nmlfile_exist=.False. + integer :: node_size +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !_______________________________________________________________________ if(mype==0) then write(*,*) '____________________________________________________________' @@ -157,13 +161,17 @@ end subroutine init_cvmix_pp ! !=========================================================================== ! calculate PP vertrical mixing coefficients from CVMIX library - subroutine calc_cvmix_pp(mesh) + subroutine calc_cvmix_pp(partit, mesh) use MOD_MESH implicit none - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: node, elem, nz, nln, nun, elnodes(3), windnl=2, node_size real(kind=WP) :: vshear2, dz2, Kvb -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" node_size = myDim_nod2D !_______________________________________________________________________ do node = 1,node_size @@ -247,13 +255,13 @@ subroutine calc_cvmix_pp(mesh) !_______________________________________________________________________ ! write out diffusivities to FESOM2.0 --> diffusivities remain on nodes - call exchange_nod(pp_Kv) + call exchange_nod(pp_Kv, partit) Kv = pp_Kv !_______________________________________________________________________ ! write out viscosities to FESOM2.0 --> viscosities for FESOM2.0 are ! defined on elements --> interpolate therefor from nodes to elements - call exchange_nod(pp_Av) + call exchange_nod(pp_Av, partit) Av = 0.0_WP do elem=1, myDim_elem2D elnodes=elem2D_nodes(:,elem) diff --git a/src/gen_modules_cvmix_tidal.F90 b/src/gen_modules_cvmix_tidal.F90 index 8a1e5fdc9..8faa154c9 100644 --- a/src/gen_modules_cvmix_tidal.F90 +++ b/src/gen_modules_cvmix_tidal.F90 @@ -15,7 +15,7 @@ module g_cvmix_tidal use g_config , only: dt use o_param use mod_mesh - use g_parsup + use mod_partit use o_arrays use g_comm_auto use g_read_other_NetCDF @@ -76,13 +76,17 @@ module g_cvmix_tidal !=========================================================================== ! allocate and initialize IDEMIX variables --> call initialisation ! routine from cvmix library - subroutine init_cvmix_tidal(mesh) + subroutine init_cvmix_tidal(partit, mesh) character(len=MAX_PATH) :: nmlfile logical :: file_exist=.False. integer :: node_size - type(t_mesh), intent(in), target :: mesh -#include "associate_mesh.h" + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !_______________________________________________________________________ if(mype==0) then write(*,*) '____________________________________________________________' @@ -134,7 +138,7 @@ subroutine init_cvmix_tidal(mesh) inquire(file=trim(tidal_botforc_file),exist=file_exist) if (file_exist) then if (mype==0) write(*,*) ' --> read TIDAL near tidal bottom forcing' - call read_other_NetCDF(tidal_botforc_file, 'wave_dissipation', 1, tidal_forc_bottom_2D, .true., mesh) + call read_other_NetCDF(tidal_botforc_file, 'wave_dissipation', 1, tidal_forc_bottom_2D, .true., partit, mesh) !!PS ! convert from W/m^2 to m^3/s^3 !!PS tidal_forc_bottom_2D = tidal_forc_bottom_2D/density_0 ! --> the tidal energy for dissipation is divided by rho0 in @@ -148,7 +152,7 @@ subroutine init_cvmix_tidal(mesh) write(*,*) ' --> check your namelist.cvmix, tidal_botforc_file & ' write(*,*) '____________________________________________________________________' end if - call par_ex(0) + call par_ex(partit, 0) end if !_______________________________________________________________________ @@ -165,14 +169,17 @@ end subroutine init_cvmix_tidal ! !=========================================================================== ! calculate TIDAL mixing parameterisation - subroutine calc_cvmix_tidal(mesh) - type(t_mesh), intent(in), target :: mesh + subroutine calc_cvmix_tidal(partit, mesh) + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: node, elem, node_size integer :: nz, nln, nun integer :: elnodes(3) real(kind=WP) :: simmonscoeff, vertdep(mesh%nl) - -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !_______________________________________________________________________ node_size = myDim_nod2D do node = 1,node_size @@ -226,13 +233,13 @@ subroutine calc_cvmix_tidal(mesh) ! ! MPIOM note 2: background diffusivities were already added in the mixed layer ! scheme (KPP) - call exchange_nod(tidal_Kv) + call exchange_nod(tidal_Kv, partit) Kv = Kv + tidal_Kv !_______________________________________________________________________ ! add tidal viscosity to main model diffusivity Av -->interpolate ! therefor from nodes to elements - call exchange_nod(tidal_Av) + call exchange_nod(tidal_Av, partit) do elem=1, myDim_elem2D elnodes=elem2D_nodes(:,elem) !!PS do nz=1,nlevels(elem)-1 diff --git a/src/gen_modules_cvmix_tke.F90 b/src/gen_modules_cvmix_tke.F90 index c809811b8..cd9555a7c 100644 --- a/src/gen_modules_cvmix_tke.F90 +++ b/src/gen_modules_cvmix_tke.F90 @@ -26,7 +26,7 @@ module g_cvmix_tke use g_config , only: dt use o_param use mod_mesh - use g_parsup + use mod_partit use o_arrays use g_comm_auto implicit none @@ -116,13 +116,19 @@ module g_cvmix_tke !=========================================================================== ! allocate and initialize TKE 2D and 3D variables --> call initialisation ! routine from cvmix library - subroutine init_cvmix_tke(mesh) + subroutine init_cvmix_tke(partit, mesh) implicit none - character(len=cvmix_strlen) :: nmlfile - logical :: nmlfile_exist=.False. - integer :: node_size - type(t_mesh), intent(in), target :: mesh -#include "associate_mesh.h" + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + character(len=cvmix_strlen) :: nmlfile + logical :: nmlfile_exist=.False. + integer :: node_size + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + !_______________________________________________________________________ if(mype==0) then write(*,*) '____________________________________________________________' @@ -242,15 +248,19 @@ end subroutine init_cvmix_tke ! !=========================================================================== ! calculate TKE vertical mixing coefficients from CVMIX library - subroutine calc_cvmix_tke(mesh) + subroutine calc_cvmix_tke(partit, mesh) implicit none - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: node, elem, nelem, nz, nln, nun, elnodes(3), node_size real(kind=WP) :: tvol real(kind=WP) :: dz_trr(mesh%nl), bvfreq2(mesh%nl), vshear2(mesh%nl) real(kind=WP) :: tke_Av_old(mesh%nl), tke_Kv_old(mesh%nl), tke_old(mesh%nl) -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" node_size = myDim_nod2D !_______________________________________________________________________ @@ -374,12 +384,12 @@ subroutine calc_cvmix_tke(mesh) !_______________________________________________________________________ ! write out diffusivity - call exchange_nod(tke_Kv) + call exchange_nod(tke_Kv, partit) Kv = tke_Kv !_______________________________________________________________________ ! write out viscosity -->interpolate therefor from nodes to elements - call exchange_nod(tke_Av) !Warning: don't forget to communicate before averaging on elements!!! + call exchange_nod(tke_Av, partit) !Warning: don't forget to communicate before averaging on elements!!! Av = 0.0_WP do elem=1, myDim_elem2D elnodes=elem2D_nodes(:,elem) diff --git a/src/gen_modules_diag.F90 b/src/gen_modules_diag.F90 index 560dc3e16..ea324e6e3 100755 --- a/src/gen_modules_diag.F90 +++ b/src/gen_modules_diag.F90 @@ -2,8 +2,8 @@ module diagnostics use g_config use mod_mesh + use mod_partit use mod_tracer - use g_parsup use g_clock use g_comm_auto use o_ARRAYS @@ -15,7 +15,6 @@ module diagnostics implicit none private -!!PS public :: ldiag_solver, lcurt_stress_surf, ldiag_energy, ldiag_dMOC, ldiag_DVD, ldiag_forc, ldiag_salt3D, ldiag_curl_vel3, diag_list, & compute_diagnostics, rhs_diag, curl_stress_surf, curl_vel3, wrhof, rhof, & u_x_u, u_x_v, v_x_v, v_x_w, u_x_w, dudx, dudy, dvdx, dvdy, dudz, dvdz, utau_surf, utau_bott, av_dudz_sq, av_dudz, av_dvdz, stress_bott, u_surf, v_surf, u_bott, v_bott, & @@ -75,13 +74,17 @@ module diagnostics ! ============================================================== !rhs_diag=ssh_rhs? -subroutine diag_solver(mode, mesh) +subroutine diag_solver(mode, partit, mesh) implicit none - integer, intent(in) :: mode - integer :: n, is, ie - logical, save :: firstcall=.true. - type(t_mesh), intent(in), target :: mesh -#include "associate_mesh.h" + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + integer, intent(in) :: mode + integer :: n, is, ie + logical, save :: firstcall=.true. +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !===================== if (firstcall) then !allocate the stuff at the first call @@ -98,15 +101,19 @@ subroutine diag_solver(mode, mesh) end subroutine diag_solver ! ============================================================== !curt(stress_surf) -subroutine diag_curl_stress_surf(mode, mesh) +subroutine diag_curl_stress_surf(mode, partit, mesh) implicit none - integer, intent(in) :: mode - logical, save :: firstcall=.true. - integer :: enodes(2), el(2), ed, n - real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2, c1 - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + integer, intent(in) :: mode + logical, save :: firstcall=.true. + integer :: enodes(2), el(2), ed, n + real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2, c1 !===================== -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" if (firstcall) then !allocate the stuff at the first call allocate(curl_stress_surf(myDim_nod2D+eDim_nod2D)) @@ -141,15 +148,19 @@ subroutine diag_curl_stress_surf(mode, mesh) end subroutine diag_curl_stress_surf ! ============================================================== !3D curl(velocity) -subroutine diag_curl_vel3(mode, mesh) +subroutine diag_curl_vel3(mode, partit, mesh) implicit none - integer, intent(in) :: mode - logical, save :: firstcall=.true. - integer :: enodes(2), el(2), ed, n, nz, nl1, nl2, nl12, nu1, nu2, nu12 - real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2, c1 - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + integer, intent(in) :: mode + logical, save :: firstcall=.true. + integer :: enodes(2), el(2), ed, n, nz, nl1, nl2, nl12, nu1, nu2, nu12 + real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2, c1 + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !===================== if (firstcall) then !allocate the stuff at the first call @@ -217,17 +228,21 @@ subroutine diag_curl_vel3(mode, mesh) end subroutine diag_curl_vel3 ! ============================================================== !energy budget -subroutine diag_energy(mode, mesh) +subroutine diag_energy(mode, partit, mesh) implicit none - integer, intent(in) :: mode - type(t_mesh), intent(in) , target :: mesh - logical, save :: firstcall=.true. + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + integer, intent(in) :: mode + logical, save :: firstcall=.true. integer :: n, nz, k, i, elem, nzmax, nzmin, elnodes(3) integer :: iup, ilo real(kind=WP) :: ux, vx, uy, vy, tvol, rval(2) real(kind=WP) :: geo_grad_x(3), geo_grad_y(3), geo_u(3), geo_v(3) -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !===================== if (firstcall) then !allocate the stuff at the first call allocate(wrhof(nl, myDim_nod2D), rhof(nl, myDim_nod2D)) @@ -385,10 +400,11 @@ subroutine diag_energy(mode, mesh) END DO end subroutine diag_energy ! ============================================================== -subroutine diag_densMOC(mode, tracers, mesh) +subroutine diag_densMOC(mode, tracers, partit, mesh) implicit none integer, intent(in) :: mode type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in), target :: tracers integer :: nz, snz, elem, nzmax, nzmin, elnodes(3), is, ie, pos integer :: e, edge, enodes(2), eelems(2) @@ -400,7 +416,10 @@ subroutine diag_densMOC(mode, tracers, mesh) real(kind=WP), save, allocatable :: std_dens_w(:,:), std_dens_VOL1(:,:), std_dens_VOL2(:,:) logical, save :: firstcall_s=.true., firstcall_e=.true. real(kind=WP), dimension(:,:), pointer :: temp, salt -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" temp=>tracers%data(1)%values(:,:) salt=>tracers%data(2)%values(:,:) @@ -637,31 +656,31 @@ subroutine diag_densMOC(mode, tracers, mesh) end subroutine diag_densMOC ! ============================================================== -subroutine compute_diagnostics(mode, tracers, mesh) +subroutine compute_diagnostics(mode, tracers, partit, mesh) implicit none - integer, intent(in) :: mode !constructor mode (0=only allocation; any other=do diagnostic) - real(kind=WP) :: val - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers - !1. solver diagnostic - if (ldiag_solver) call diag_solver(mode, mesh) + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers + integer, intent(in) :: mode !constructor mode (0=only allocation; any other=do diagnostic) + real(kind=WP) :: val !1. solver diagnostic + if (ldiag_solver) call diag_solver(mode, partit, mesh) !2. compute curl(stress_surf) - if (lcurt_stress_surf) call diag_curl_stress_surf(mode, mesh) + if (lcurt_stress_surf) call diag_curl_stress_surf(mode, partit, mesh) !3. compute curl(velocity) - if (ldiag_curl_vel3) call diag_curl_vel3(mode, mesh) + if (ldiag_curl_vel3) call diag_curl_vel3(mode, partit, mesh) !4. compute energy budget - if (ldiag_energy) call diag_energy(mode, mesh) + if (ldiag_energy) call diag_energy(mode, partit, mesh) !5. print integrated temperature if (ldiag_salt3d) then if (mod(mstep,logfile_outfreq)==0) then - call integrate_nod(tracers%data(2)%values(:,:), val, mesh) - if (mype==0) then + call integrate_nod(tracers%data(2)%values(:,:), val, partit, mesh) + if (partit%mype==0) then write(*,*) 'total integral of salinity at timestep :', mstep, val end if end if end if !6. MOC in density coordinate - if (ldiag_dMOC) call diag_densMOC(mode, tracers, mesh) + if (ldiag_dMOC) call diag_densMOC(mode, tracers, partit, mesh) end subroutine compute_diagnostics @@ -676,18 +695,21 @@ end subroutine compute_diagnostics ! in a coastal model application ... ! Klingbeil et al., 2014, Quantification of spurious dissipation and mixing – ! Discrete variance decay in a Finite-Volume framework ... -subroutine compute_diag_dvd_2ndmoment_burchard_etal_2008(tr_num, tracers, mesh) +subroutine compute_diag_dvd_2ndmoment_burchard_etal_2008(tr_num, tracers, partit, mesh) use o_arrays - use g_PARSUP use oce_adv_tra_driver_interfaces implicit none - integer, intent(in) :: tr_num - type(t_tracer), intent(inout), target :: tracers - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(inout), target :: tracers + integer, intent(in) :: tr_num integer :: node, nz, nzmin, nzmax - real(kind=WP) :: tr_sqr(mesh%nl-1,myDim_nod2D+eDim_nod2D), trAB_sqr(mesh%nl-1,myDim_nod2D+eDim_nod2D) + real(kind=WP) :: tr_sqr(mesh%nl-1,partit%myDim_nod2D+partit%eDim_nod2D), trAB_sqr(mesh%nl-1,partit%myDim_nod2D+partit%eDim_nod2D) -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! square up fields for actual tracers and Adams Bashfort tracer @@ -711,7 +733,7 @@ subroutine compute_diag_dvd_2ndmoment_burchard_etal_2008(tr_num, tracers, mesh) tracers%work%del_ttf_advhoriz = 0.0_WP tracers%work%del_ttf_advvert = 0.0_WP ! maybe just to introduce an another tharer of t_tracer type with **do_Xmoment? -! call do_oce_adv_tra(dt, UV, wvel, wvel_i, wvel_e, tr_sqr, trAB_sqr, 1, tracers%work%del_ttf_advhoriz, tracers%work%del_ttf_advvert, tra_adv_ph, tra_adv_pv, mesh) +! call do_oce_adv_tra(dt, UV, wvel, wvel_i, wvel_e, tr_sqr, trAB_sqr, 1, tracers%work%del_ttf_advhoriz, tracers%work%del_ttf_advvert, tra_adv_ph, tra_adv_pv, partit, mesh) !___________________________________________________________________________ ! add target second moment to DVD do node = 1,mydim_nod2D @@ -744,17 +766,20 @@ end subroutine compute_diag_dvd_2ndmoment_burchard_etal_2008 ! see: ! Klingbeil et al., 2014, Quantification of spurious dissipation and mixing – ! Discrete variance decay in a Finite-Volume framework ... -subroutine compute_diag_dvd_2ndmoment_klingbeil_etal_2014(tr_num, tracers, mesh) +subroutine compute_diag_dvd_2ndmoment_klingbeil_etal_2014(tr_num, tracers, partit, mesh) use o_arrays - use g_PARSUP use oce_adv_tra_driver_interfaces implicit none - integer :: node, nz, nzmin, nzmax - integer, intent(in) :: tr_num - type(t_tracer), intent(inout), target :: tracers - type(t_mesh), intent(in), target :: mesh - -#include "associate_mesh.h" + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(inout), target :: tracers + integer :: node, nz, nzmin, nzmax + integer, intent(in) :: tr_num + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! calculate horizintal and vertical advection for squared tracer (2nd moments) ! see Burchard and Rennau, 2008, Comparative quantification of physically and @@ -762,7 +787,7 @@ subroutine compute_diag_dvd_2ndmoment_klingbeil_etal_2014(tr_num, tracers, mesh) tracers%work%del_ttf_advhoriz = 0.0_WP tracers%work%del_ttf_advvert = 0.0_WP ! maybe just to introduce an another tharer of t_tracer type with **do_Xmoment? -! call do_oce_adv_tra(dt, UV, wvel, wvel_i, wvel_e, tracers%data(tr_num)%values, tracers%data(tr_num)%valuesAB(:,:), 2, tracers%work%del_ttf_advhoriz, tracers%work%del_ttf_advvert, tra_adv_ph, tra_adv_pv, mesh) +! call do_oce_adv_tra(dt, UV, wvel, wvel_i, wvel_e, tracers%data(tr_num)%values, tracers%data(tr_num)%valuesAB(:,:), 2, tracers%work%del_ttf_advhoriz, tracers%work%del_ttf_advvert, tra_adv_ph, tra_adv_pv, partit, mesh) !___________________________________________________________________________ ! add target second moment to DVD do node = 1,mydim_nod2D @@ -803,18 +828,21 @@ end subroutine compute_diag_dvd_2ndmoment_klingbeil_etal_2014 ! in a coastal model application ... ! Klingbeil et al., 2014, Quantification of spurious dissipation and mixing – ! Discrete variance decay in a Finite-Volume framework ... -subroutine compute_diag_dvd(tr_num, tracers, mesh) +subroutine compute_diag_dvd(tr_num, tracers, partit, mesh) use g_config, only: dt - use o_arrays - use g_PARSUP - + use o_arrays implicit none - integer :: node, nz, nzmin, nzmax - integer, intent(in) :: tr_num - type(t_tracer), intent(inout), target :: tracers - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(inout), target :: tracers + integer :: node, nz, nzmin, nzmax + integer, intent(in) :: tr_num + -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! add discret second moment to DVD do node = 1,mydim_nod2D diff --git a/src/gen_modules_gpot.F90 b/src/gen_modules_gpot.F90 index 439197a7f..72fb2ffd2 100644 --- a/src/gen_modules_gpot.F90 +++ b/src/gen_modules_gpot.F90 @@ -23,9 +23,9 @@ MODULE mo_tidal USE o_PARAM USE o_ARRAYS, only : ssh_gp - USE mod_mesh + USE MOD_MESH + USE MOD_PARTIT USE g_config, only : dt - USE g_PARSUP USE g_clock IMPLICIT NONE !Earth Tides ( maik thomas, emr pers. comm ) @@ -36,14 +36,14 @@ MODULE mo_tidal CONTAINS - SUBROUTINE foreph_ini(lyear,lmonth) + SUBROUTINE foreph_ini(lyear,lmonth, partit) ! Initialization of tidal module ! Determination of Julian Day of first time step ! Projection of mpiom grid on tidal module internal coordinates IMPLICIT NONE - - INTEGER,INTENT(IN)::lyear,lmonth + type(t_partit), intent(in) :: partit + INTEGER,INTENT(IN) :: lyear,lmonth INTEGER :: i, j, jcc, moph mmccdt = 0; jcc = 0; moph = 0 @@ -57,7 +57,7 @@ SUBROUTINE foreph_ini(lyear,lmonth) ! FIXME : replace eph by a some to code that directly calculates julian days and ! centuries as needed by siderial time and ephemerides - if (mype==0) WRITE(*,*)'tidal: phase relative to 2000 :' & + if (partit%mype==0) WRITE(*,*)'tidal: phase relative to 2000 :' & ,'year= ',lyear, 'month= ',lmonth, 'yearoff= ',jcc,' monoff= ',moph ,'mmccdt= ',mmccdt END SUBROUTINE foreph_ini @@ -107,18 +107,22 @@ SUBROUTINE eph(jul,mon,jahrph,moph) END SUBROUTINE eph - SUBROUTINE foreph(mesh) + SUBROUTINE foreph(partit, mesh) ! calculates the realtime gravitational potential of sun & moon ! output: ssh_gp (with Body Earth Tide effect) IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit REAL(WP) :: dres(3,2),crim3,rkomp,erdrad,rekts,dekls REAL(WP) :: cris3,rektm,deklm,deklm2,dekls2,sidm,sidmq REAL(WP) :: rkosp,codm,codmq,sids,sidsq,cods,codsq,sidm2 REAL(WP) :: sids2,hamp,hasp INTEGER :: i,j -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" mmccdt = mmccdt + 1 diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index a348faaaa..cc7d3c080 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -1,97 +1,56 @@ -!========================================================== -module g_PARSUP -USE o_PARAM -! Variables to organize parallel work -implicit none -save - -#ifdef PETSC -#include "finclude/petsc.h" -#else - include 'mpif.h' -#endif - - integer :: MPI_COMM_FESOM - integer, parameter :: MAX_LAENDERECK=16 - integer, parameter :: MAX_NEIGHBOR_PARTITIONS=32 - type com_struct - integer :: rPEnum ! the number of PE I receive info from - integer, dimension(MAX_NEIGHBOR_PARTITIONS) :: rPE ! their list - integer, dimension(MAX_NEIGHBOR_PARTITIONS+1) :: rptr ! allocatables to the list of nodes - integer, dimension(:), allocatable :: rlist ! the list of nodes - integer :: sPEnum ! send part - integer, dimension(MAX_NEIGHBOR_PARTITIONS) :: sPE - integer, dimension(MAX_NEIGHBOR_PARTITIONS) :: sptr - integer, dimension(:), allocatable :: slist - integer, dimension(:), allocatable :: req ! request for MPI_Wait - integer :: nreq ! number of requests for MPI_Wait - ! (to combine halo exchange of several fields) - end type com_struct - - type(com_struct) :: com_nod2D -!!$ type(com_struct) :: com_edge2D - type(com_struct), target :: com_elem2D - type(com_struct), target :: com_elem2D_full - - ! MPI Datatypes for interface exchange - - ! Edge fields (2D) - integer, allocatable :: s_mpitype_edge2D(:), r_mpitype_edge2D(:) - - ! Element fields (2D; 2D integer; 3D with nl-1 or nl levels, 1 - 4 values) - ! small halo and / or full halo - integer, allocatable, target :: s_mpitype_elem2D(:,:), r_mpitype_elem2D(:,:) - integer, allocatable :: s_mpitype_elem2D_full_i(:), r_mpitype_elem2D_full_i(:) - integer, allocatable, target :: s_mpitype_elem2D_full(:,:), r_mpitype_elem2D_full(:,:) - integer, allocatable, target :: s_mpitype_elem3D(:,:,:), r_mpitype_elem3D(:,:,:) - integer, allocatable, target :: s_mpitype_elem3D_full(:,:,:),r_mpitype_elem3D_full(:,:,:) - - ! Nodal fields (2D; 2D integer; 3D with nl-1 or nl levels, one, two, or three values) - integer, allocatable :: s_mpitype_nod2D(:), r_mpitype_nod2D(:) - integer, allocatable :: s_mpitype_nod2D_i(:), r_mpitype_nod2D_i(:) - integer, allocatable :: s_mpitype_nod3D(:,:,:), r_mpitype_nod3D(:,:,:) - - ! general MPI part - integer :: MPIERR - integer :: npes - integer :: mype - integer :: maxPEnum=100 - integer, allocatable, dimension(:) :: part - - ! Mesh partition - integer :: myDim_nod2D, eDim_nod2D - integer, allocatable, dimension(:) :: myList_nod2D - integer :: myDim_elem2D, eDim_elem2D, eXDim_elem2D - integer, allocatable, dimension(:) :: myList_elem2D - integer :: myDim_edge2D, eDim_edge2D - integer, allocatable, dimension(:) :: myList_edge2D - - integer :: pe_status = 0 ! if /=0 then something is wrong - - integer, allocatable :: remPtr_nod2D(:), remList_nod2D(:) - integer, allocatable :: remPtr_elem2D(:), remList_elem2D(:) - - logical :: elem_full_flag - -contains -subroutine par_init ! initializes MPI +module par_support_interfaces + interface + subroutine par_init(partit) + USE o_PARAM + USE MOD_PARTIT + implicit none + type(t_partit), intent(inout), target :: partit + end subroutine + + subroutine par_ex(partit, abort) + USE MOD_PARTIT + implicit none + type(t_partit), intent(inout), target :: partit + integer,optional :: abort + end subroutine + + subroutine set_par_support(partit, mesh) + use MOD_MESH + use MOD_PARTIT + implicit none + type(t_partit), intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + end subroutine + + subroutine init_gatherLists(partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + implicit none + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + end subroutine + end interface +end module + +subroutine par_init(partit) ! initializes MPI + USE o_PARAM + USE MOD_PARTIT implicit none - - - integer :: i - integer provided_mpi_thread_support_level - character(:), allocatable :: provided_mpi_thread_support_level_name + type(t_partit), intent(inout), target :: partit + integer :: i + integer :: provided_mpi_thread_support_level + character(:), allocatable :: provided_mpi_thread_support_level_name #ifndef __oasis - call MPI_Comm_Size(MPI_COMM_WORLD,npes,i) - call MPI_Comm_Rank(MPI_COMM_WORLD,mype,i) - MPI_COMM_FESOM=MPI_COMM_WORLD + call MPI_Comm_Size(MPI_COMM_WORLD,partit%npes,i) + call MPI_Comm_Rank(MPI_COMM_WORLD,partit%mype,i) + partit%MPI_COMM_FESOM=MPI_COMM_WORLD #else - call MPI_Comm_Size(MPI_COMM_FESOM,npes,i) - call MPI_Comm_Rank(MPI_COMM_FESOM,mype,i) + call MPI_Comm_Size(MPI_COMM_FESOM,partit%npes,i) + call MPI_Comm_Rank(MPI_COMM_FESOM,partit%mype,i) #endif - if(mype==0) then + if(partit%mype==0) then call MPI_Query_thread(provided_mpi_thread_support_level, i) if(provided_mpi_thread_support_level == MPI_THREAD_SINGLE) then provided_mpi_thread_support_level_name = "MPI_THREAD_SINGLE" @@ -106,67 +65,74 @@ subroutine par_init ! initializes MPI end if write(*,*) 'MPI has been initialized, provided MPI thread support level: ', & provided_mpi_thread_support_level_name,provided_mpi_thread_support_level - write(*, *) 'Running on ', npes, ' PEs' + write(*, *) 'Running on ', partit%npes, ' PEs' end if end subroutine par_init !================================================================= -subroutine par_ex(abort) ! finalizes MPI +subroutine par_ex(partit, abort) ! finalizes MPI +USE MOD_PARTIT #ifndef __oifs !For standalone and coupled ECHAM runs #if defined (__oasis) use mod_prism #endif implicit none - integer,optional :: abort + type(t_partit), intent(inout), target :: partit + integer,optional :: abort #ifndef __oasis if (present(abort)) then - if (mype==0) write(*,*) 'Run finished unexpectedly!' - call MPI_ABORT( MPI_COMM_FESOM, 1 ) + if (partit%mype==0) write(*,*) 'Run finished unexpectedly!' + call MPI_ABORT(partit%MPI_COMM_FESOM, 1 ) else - call MPI_Barrier(MPI_COMM_FESOM,MPIerr) - call MPI_Finalize(MPIerr) + call MPI_Barrier(partit%MPI_COMM_FESOM,partit%MPIerr) + call MPI_Finalize(partit%MPIerr) endif #else if (.not. present(abort)) then - if (mype==0) print *, 'FESOM calls MPI_Barrier before calling prism_terminate' - call MPI_Barrier(MPI_COMM_WORLD, MPIerr) + if (partit%mype==0) print *, 'FESOM calls MPI_Barrier before calling prism_terminate' + call MPI_Barrier(MPI_COMM_WORLD, partit%MPIerr) end if call prism_terminate_proto(MPIerr) - if (mype==0) print *, 'FESOM calls MPI_Barrier before calling MPI_Finalize' - call MPI_Barrier(MPI_COMM_WORLD, MPIerr) + if (partit%mype==0) print *, 'FESOM calls MPI_Barrier before calling MPI_Finalize' + call MPI_Barrier(MPI_COMM_WORLD, partit%MPIerr) - if (mype==0) print *, 'FESOM calls MPI_Finalize' + if (partit%mype==0) print *, 'FESOM calls MPI_Finalize' call MPI_Finalize(MPIerr) #endif - if (mype==0) print *, 'fesom should stop with exit status = 0' + if (partit%mype==0) print *, 'fesom should stop with exit status = 0' #endif #if defined (__oifs) !OIFS coupling doesnt call prism_terminate_proto and uses MPI_COMM_FESOM implicit none integer,optional :: abort if (present(abort)) then - if (mype==0) write(*,*) 'Run finished unexpectedly!' - call MPI_ABORT( MPI_COMM_FESOM, 1 ) + if (partit%mype==0) write(*,*) 'Run finished unexpectedly!' + call MPI_ABORT( partit%MPI_COMM_FESOM, 1 ) else - call MPI_Barrier(MPI_COMM_FESOM,MPIerr) - call MPI_Finalize(MPIerr) + call MPI_Barrier(partit%MPI_COMM_FESOM,partit%MPIerr) + call MPI_Finalize(partit%MPIerr) endif #endif end subroutine par_ex !======================================================================= -subroutine set_par_support(mesh) +subroutine set_par_support(partit, mesh) use MOD_MESH + use MOD_PARTIT implicit none - type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh integer :: n, offset integer :: i, max_nb, nb, nini, nend, nl1, n_val integer, allocatable :: blocklen(:), displace(:) integer, allocatable :: blocklen_tmp(:), displace_tmp(:) -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! ! In the distributed memory version, most of the job is already done ! at the initialization phase and is taken into account in read_mesh @@ -178,30 +144,25 @@ subroutine set_par_support(mesh) !================================================ ! MPI REQUEST BUFFERS !================================================ - allocate(com_nod2D%req( 3*com_nod2D%rPEnum + 3*com_nod2D%sPEnum)) - allocate(com_elem2D%req( 3*com_elem2D%rPEnum + 3*com_elem2D%sPEnum)) - allocate(com_elem2D_full%req(3*com_elem2D_full%rPEnum + 3*com_elem2D_full%sPEnum)) - + if (.not. allocated(com_nod2D%req)) allocate(com_nod2D%req( 3*com_nod2D%rPEnum + 3*com_nod2D%sPEnum)) + if (.not. allocated(com_elem2D%req)) allocate(com_elem2D%req( 3*com_elem2D%rPEnum + 3*com_elem2D%sPEnum)) + if (.not. allocated(com_elem2D_full%req)) allocate(com_elem2D_full%req(3*com_elem2D_full%rPEnum + 3*com_elem2D_full%sPEnum)) !================================================ ! MPI DATATYPES !================================================ - ! Build MPI Data types for halo exchange: Elements - allocate(r_mpitype_elem2D(com_elem2D%rPEnum,4)) ! 2D, small halo - allocate(s_mpitype_elem2D(com_elem2D%sPEnum,4)) - allocate(r_mpitype_elem2D_full_i(com_elem2D_full%rPEnum)) ! 2D, wide halo, integer - allocate(s_mpitype_elem2D_full_i(com_elem2D_full%sPEnum)) - - allocate(r_mpitype_elem2D_full(com_elem2D_full%rPEnum,4)) ! 2D, wide halo - allocate(s_mpitype_elem2D_full(com_elem2D_full%sPEnum,4)) - - allocate(r_mpitype_elem3D(com_elem2D%rPEnum, nl-1:nl,4)) ! 3D, small halo - allocate(s_mpitype_elem3D(com_elem2D%sPEnum, nl-1:nl,4)) - - allocate(r_mpitype_elem3D_full(com_elem2D_full%rPEnum, nl-1:nl,4)) ! 3D, wide halo - allocate(s_mpitype_elem3D_full(com_elem2D_full%sPEnum, nl-1:nl,4)) - - + allocate(partit%r_mpitype_elem2D(com_elem2D%rPEnum,4)) ! 2D, small halo + allocate(partit%s_mpitype_elem2D(com_elem2D%sPEnum,4)) + allocate(partit%r_mpitype_elem2D_full_i(com_elem2D_full%rPEnum)) ! 2D, wide halo, integer + allocate(partit%s_mpitype_elem2D_full_i(com_elem2D_full%sPEnum)) + allocate(partit%r_mpitype_elem2D_full(com_elem2D_full%rPEnum,4)) ! 2D, wide halo + allocate(partit%s_mpitype_elem2D_full(com_elem2D_full%sPEnum,4)) + allocate(partit%r_mpitype_elem3D(com_elem2D%rPEnum, nl-1:nl,4)) ! 3D, small halo + allocate(partit%s_mpitype_elem3D(com_elem2D%sPEnum, nl-1:nl,4)) + allocate(partit%r_mpitype_elem3D_full(com_elem2D_full%rPEnum, nl-1:nl,4)) ! 3D, wide halo + allocate(partit%s_mpitype_elem3D_full(com_elem2D_full%sPEnum, nl-1:nl,4)) +!after the allocation we just reassotiate ALL pointers again here +#include "associate_part_ass.h" ! Upper limit for the length of the local interface between the neighbor PEs max_nb = max( & maxval(com_elem2D%rptr(2:com_elem2D%rPEnum+1) - com_elem2D%rptr(1:com_elem2D%rPEnum)), & @@ -378,13 +339,15 @@ subroutine set_par_support(mesh) ! Build MPI Data types for halo exchange: Nodes - allocate(r_mpitype_nod2D(com_nod2D%rPEnum)) ! 2D - allocate(s_mpitype_nod2D(com_nod2D%sPEnum)) - allocate(r_mpitype_nod2D_i(com_nod2D%rPEnum)) ! 2D integer - allocate(s_mpitype_nod2D_i(com_nod2D%sPEnum)) + allocate(partit%r_mpitype_nod2D(com_nod2D%rPEnum)) ! 2D + allocate(partit%s_mpitype_nod2D(com_nod2D%sPEnum)) + allocate(partit%r_mpitype_nod2D_i(com_nod2D%rPEnum)) ! 2D integer + allocate(partit%s_mpitype_nod2D_i(com_nod2D%sPEnum)) - allocate(r_mpitype_nod3D(com_nod2D%rPEnum,nl-1:nl,3)) ! 3D with nl-1 or nl layers, 1-3 values - allocate(s_mpitype_nod3D(com_nod2D%sPEnum,nl-1:nl,3)) + allocate(partit%r_mpitype_nod3D(com_nod2D%rPEnum,nl-1:nl,3)) ! 3D with nl-1 or nl layers, 1-3 values + allocate(partit%s_mpitype_nod3D(com_nod2D%sPEnum,nl-1:nl,3)) +!after the allocation we just reassotiate ALL pointers again here +#include "associate_part_ass.h" ! Upper limit for the length of the local interface between the neighbor PEs max_nb = max(maxval(com_nod2D%rptr(2:com_nod2D%rPEnum+1) - com_nod2D%rptr(1:com_nod2D%rPEnum)), & @@ -478,26 +441,32 @@ subroutine set_par_support(mesh) endif - call init_gatherLists + call init_gatherLists(partit, mesh) if(mype==0) write(*,*) 'Communication arrays are set' end subroutine set_par_support !=================================================================== -subroutine init_gatherLists - +subroutine init_gatherLists(partit, mesh) + USE MOD_MESH + USE MOD_PARTIT implicit none - - integer :: n2D, e2D, sum_loc_elem2D - integer :: n, estart, nstart - + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: n2D, e2D, sum_loc_elem2D + integer :: n, estart, nstart +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" if (mype==0) then if (npes > 1) then - allocate(remPtr_nod2D(npes)) - allocate(remPtr_elem2D(npes)) - + allocate(partit%remPtr_nod2D(npes)) + allocate(partit%remPtr_elem2D(npes)) +!reassociate the pointers to the just allocated arrays +#include "associate_part_ass.h" remPtr_nod2D(1) = 1 remPtr_elem2D(1) = 1 @@ -509,12 +478,12 @@ subroutine init_gatherLists remPtr_elem2D(n+1) = remPtr_elem2D(n) + e2D enddo - - - allocate(remList_nod2D(remPtr_nod2D(npes))) ! this should be nod2D - myDim_nod2D - allocate(remList_elem2D(remPtr_elem2D(npes))) ! this is > elem2D, because the elements overlap. + allocate(partit%remList_nod2D(remPtr_nod2D(npes))) ! this should be nod2D - myDim_nod2D + allocate(partit%remList_elem2D(remPtr_elem2D(npes))) ! this is > elem2D, because the elements overlap. ! Consider optimization: avoid multiple communication ! of the same elem from different PEs. +!reassociate the pointers to the just allocated arrays +#include "associate_part_ass.h" do n=1, npes-1 nstart = remPtr_nod2D(n) @@ -536,8 +505,4 @@ subroutine init_gatherLists call MPI_SEND(myList_elem2D, myDim_elem2D, MPI_INTEGER, 0, 3, MPI_COMM_FESOM, MPIerr ) endif - end subroutine init_gatherLists - - -end module g_PARSUP diff --git a/src/gen_modules_read_NetCDF.F90 b/src/gen_modules_read_NetCDF.F90 index 919830b14..7faf8ec59 100755 --- a/src/gen_modules_read_NetCDF.F90 +++ b/src/gen_modules_read_NetCDF.F90 @@ -3,7 +3,7 @@ ! module g_read_other_NetCDF contains -subroutine read_other_NetCDF(file, vari, itime, model_2Darray, check_dummy, mesh) +subroutine read_other_NetCDF(file, vari, itime, model_2Darray, check_dummy, partit, mesh) ! Read 2D data and interpolate to the model grid. ! Currently used to read runoff and SSS. ! First, missing values are filled in on the raw regular grid; @@ -16,11 +16,12 @@ subroutine read_other_NetCDF(file, vari, itime, model_2Darray, check_dummy, mesh use g_config use o_param USE MOD_MESH - use g_parsup + USE MOD_PARTIT implicit none #include "netcdf.inc" - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: i, j, ii, jj, k, n, num, flag, cnt integer :: itime, latlen, lonlen integer :: status, ncid, varid @@ -30,13 +31,17 @@ subroutine read_other_NetCDF(file, vari, itime, model_2Darray, check_dummy, mesh real(real64), allocatable :: lon(:), lat(:) real(real64), allocatable :: ncdata(:,:), ncdata_temp(:,:) real(real64), allocatable :: temp_x(:), temp_y(:) - real(real64) :: model_2Darray(myDim_nod2d+eDim_nod2D) + real(real64) :: model_2Darray(partit%myDim_nod2d+partit%eDim_nod2D) character(*) :: vari character(*) :: file logical :: check_dummy integer :: ierror ! return error code -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + if (mype==0) then ! open file @@ -144,13 +149,13 @@ subroutine read_other_NetCDF(file, vari, itime, model_2Darray, check_dummy, mesh ! interpolation flag=0 call interp_2d_field(lonlen, latlen, lon, lat, ncdata, num, temp_x, temp_y, & - model_2Darray, flag) + model_2Darray, flag, partit) deallocate(temp_y, temp_x, ncdata_temp, ncdata, lon, lat) end subroutine read_other_NetCDF ! !------------------------------------------------------------------------------------ ! - subroutine read_surf_hydrography_NetCDF(file, vari, itime, model_2Darray, mesh) +subroutine read_surf_hydrography_NetCDF(file, vari, itime, model_2Darray, partit, mesh) ! Read WOA (NetCDF) surface T/S and interpolate to the model grid. ! Currently used for surface restoring in case of ocean-alone models ! Calling interp_2d_field_v2 to do interpolation, which also treats the dummy value. @@ -160,12 +165,12 @@ subroutine read_surf_hydrography_NetCDF(file, vari, itime, model_2Darray, mesh) use g_config use o_param USE MOD_MESH + USE MOD_PARTIT use g_rotate_grid - use g_parsup implicit none - #include "netcdf.inc" - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: i, j, n, num integer :: itime, latlen, lonlen integer :: status, ncid, varid @@ -175,13 +180,16 @@ subroutine read_surf_hydrography_NetCDF(file, vari, itime, model_2Darray, mesh) real(real64), allocatable :: lon(:), lat(:) real(real64), allocatable :: ncdata(:,:) real(real64), allocatable :: temp_x(:), temp_y(:) - real(real64) :: model_2Darray(myDim_nod2d+eDim_nod2D) + real(real64) :: model_2Darray(partit%myDim_nod2d+partit%eDim_nod2D) character(15) :: vari character(300) :: file logical :: check_dummy integer :: ierror ! return error code -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" if (mype==0) then ! open file @@ -265,36 +273,40 @@ subroutine read_surf_hydrography_NetCDF(file, vari, itime, model_2Darray, mesh) end do ! interpolation call interp_2d_field_v2(lonlen, latlen, lon, lat, ncdata, miss, & - num, temp_x, temp_y, model_2Darray) + num, temp_x, temp_y, model_2Darray, partit) deallocate(temp_y, temp_x, ncdata, lon, lat) end subroutine read_surf_hydrography_NetCDF ! !------------------------------------------------------------------------------------ ! -subroutine read_2ddata_on_grid_NetCDF(file, vari, itime, model_2Darray, mesh) +subroutine read_2ddata_on_grid_NetCDF(file, vari, itime, model_2Darray, partit, mesh) use, intrinsic :: ISO_FORTRAN_ENV use g_config use o_param USE MOD_MESH + USE MOD_PARTIT use g_rotate_grid - use g_parsup implicit none #include "netcdf.inc" - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit integer :: n, i integer :: itime integer :: status, ncid, varid integer :: istart(2), icount(2) real(real64) :: ncdata(mesh%nod2D) - real(real64), intent(out) :: model_2Darray(myDim_nod2D+eDim_nod2D) - character(*), intent(in) :: file + real(real64), intent(out) :: model_2Darray(partit%myDim_nod2D+partit%eDim_nod2D) + character(*), intent(in) :: file character(*), intent(in) :: vari integer :: ierror ! return error code -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" if (mype==0) then ! open file diff --git a/src/gen_modules_rotate_grid.F90 b/src/gen_modules_rotate_grid.F90 index 797a8a74d..42f70ddc0 100755 --- a/src/gen_modules_rotate_grid.F90 +++ b/src/gen_modules_rotate_grid.F90 @@ -33,7 +33,6 @@ subroutine set_mesh_transform_matrix ! angle A around z-axis, the second is by an angle B about the new ! x-axis, and the third is by an angle G about the new z-axis. use o_PARAM - use g_PARSUP, only : mype implicit none real(kind=WP) :: al, be, ga @@ -51,7 +50,6 @@ subroutine set_mesh_transform_matrix r2g_matrix(3,1)=sin(be)*sin(al) r2g_matrix(3,2)=-sin(be)*cos(al) r2g_matrix(3,3)=cos(be) - if(mype==0) write(*,*) 'rotation matrix for rotated model grids prepared' end subroutine set_mesh_transform_matrix ! !---------------------------------------------------------------- diff --git a/src/gen_support.F90 b/src/gen_support.F90 index ffddaa677..c8c619c22 100644 --- a/src/gen_support.F90 +++ b/src/gen_support.F90 @@ -3,7 +3,7 @@ !2. computing surface integrals of the FESOM fields module g_support USE MOD_MESH - use g_parsup + use MOD_PARTIT use g_comm_auto use o_ARRAYS use g_config, only: dummy @@ -43,15 +43,20 @@ module g_support ! !-------------------------------------------------------------------------------------------- ! -subroutine smooth_nod2D(arr, N, mesh) +subroutine smooth_nod2D(arr, N, partit, mesh) IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer, intent(in) :: N real(KIND=WP), dimension(:), intent(inout) :: arr integer :: node, elem, j, q, elnodes(3) real(kind=WP) :: vol -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + allocate(work_array(myDim_nod2D)) DO q=1, N !apply mass matrix N times to smooth the field DO node=1, myDim_nod2D @@ -68,25 +73,31 @@ subroutine smooth_nod2D(arr, N, mesh) DO node=1,myDim_nod2D arr(node)=work_array(node) ENDDO - call exchange_nod(arr) + call exchange_nod(arr, partit) END DO deallocate(work_array) end subroutine smooth_nod2D ! !-------------------------------------------------------------------------------------------- ! -subroutine smooth_nod3D(arr, N_smooth, mesh) +subroutine smooth_nod3D(arr, N_smooth, partit, mesh) IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + integer, intent(in) :: N_smooth real(KIND=WP), intent(inout) :: arr(:,:) integer :: n, el, nz, j, q, num_el, nlev, nl_loc, nu_loc integer :: uln, nln, ule, nle - real(kind=WP) :: vol(mesh%nl,myDim_nod2D) + real(kind=WP) :: vol(mesh%nl, partit%myDim_nod2D) real(kind=WP), allocatable :: work_array(:,:) -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + nlev=ubound(arr,1) allocate(work_array(nlev,myDim_nod2D)) @@ -138,7 +149,7 @@ subroutine smooth_nod3D(arr, N_smooth, mesh) END DO end DO - call exchange_nod(arr) + call exchange_nod(arr, partit) ! And the remaining smoothing sweeps @@ -171,7 +182,7 @@ subroutine smooth_nod3D(arr, N_smooth, mesh) arr(nz, n) = work_array(nz, n) *vol(nz,n) END DO end DO - call exchange_nod(arr) + call exchange_nod(arr, partit) enddo deallocate(work_array) @@ -180,14 +191,18 @@ end subroutine smooth_nod3D ! !-------------------------------------------------------------------------------------------- ! -subroutine smooth_elem2D(arr, N, mesh) +subroutine smooth_elem2D(arr, N, partit, mesh) IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer, intent(in) :: N real(KIND=WP), dimension(:), intent(inout) :: arr integer :: node, elem, j, q, elnodes(3) real(kind=WP) :: vol -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" allocate(work_array(myDim_nod2D+eDim_nod2D)) DO q=1, N !apply mass matrix N times to smooth the field DO node=1, myDim_nod2D @@ -201,26 +216,30 @@ subroutine smooth_elem2D(arr, N, mesh) END DO work_array(node)=work_array(node)/vol END DO - call exchange_nod(work_array) + call exchange_nod(work_array, partit) DO elem=1, myDim_elem2D elnodes=elem2D_nodes(:, elem) arr(elem)=sum(work_array(elnodes))/3.0_WP ! Here, we need the inverse and scale by 1/3 ENDDO - call exchange_elem(arr) + call exchange_elem(arr, partit) END DO deallocate(work_array) end subroutine smooth_elem2D ! !-------------------------------------------------------------------------------------------- ! -subroutine smooth_elem3D(arr, N, mesh) +subroutine smooth_elem3D(arr, N, partit, mesh) IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer, intent(in) :: N real(KIND=WP), dimension(:,:), intent(inout) :: arr integer :: node, elem, my_nl, nz, j, q, elnodes(3) real(kind=WP) :: vol -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" allocate(work_array(myDim_nod2D+eDim_nod2D)) @@ -243,7 +262,7 @@ subroutine smooth_elem3D(arr, N, mesh) END DO work_array(node)=work_array(node)/vol END DO - call exchange_nod(work_array) + call exchange_nod(work_array, partit) DO elem=1, myDim_elem2D if (nz>nlevels(elem) ) CYCLE if (nz do nz=1, nl-1 !_______________________________________________________________________ - call exchange_nod(arr) + call exchange_nod(arr, partit) !_______________________________________________________________________ loc_max=maxval(arr(1,:)) @@ -409,7 +440,7 @@ subroutine extrap_nod3D(arr, mesh) if (arr(nz,n)>0.99_WP*dummy) arr(nz,n)=arr(nz-1,n) end do end do - call exchange_nod(arr) + call exchange_nod(arr, partit) !___________________________________________________________________________ deallocate(work_array) diff --git a/src/gen_surface_forcing.F90 b/src/gen_surface_forcing.F90 index bfd057638..c53908e70 100644 --- a/src/gen_surface_forcing.F90 +++ b/src/gen_surface_forcing.F90 @@ -33,10 +33,10 @@ MODULE g_sbf !! sbc_ini -- inizialization atmpospheric forcing !! sbc_do -- provide a sbc (surface boundary conditions) each time step !! - USE o_ARRAYS USE MOD_MESH + USE MOD_PARTIT + USE o_ARRAYS USE o_PARAM - USE g_PARSUP USE g_comm_auto USE g_support USE g_rotate_grid @@ -178,37 +178,37 @@ MODULE g_sbf CONTAINS - SUBROUTINE nc_readTimeGrid(flf) + SUBROUTINE nc_readTimeGrid(flf, partit) ! Read time array and grid from nc file - IMPLICIT NONE - - type(flfi_type),intent(inout) :: flf - integer :: iost !I/O status - integer :: ncid ! netcdf file id - integer :: i + IMPLICIT NONE + type(flfi_type),intent(inout) :: flf + type(t_partit), intent(inout), target :: partit + integer :: iost !I/O status + integer :: ncid ! netcdf file id + integer :: i ! ID dimensions and variables: - integer :: id_lon - integer :: id_lat - integer :: id_lond - integer :: id_latd - integer :: id_time - integer :: id_timed - integer :: nf_start(4) - integer :: nf_edges(4) - integer :: ierror ! return error code - character(len=20) :: aux_calendar - integer :: aux_len + integer :: id_lon + integer :: id_lat + integer :: id_lond + integer :: id_latd + integer :: id_time + integer :: id_timed + integer :: nf_start(4) + integer :: nf_edges(4) + integer :: ierror ! return error code + character(len=20) :: aux_calendar + integer :: aux_len !open file - if (mype==0) then + if (partit%mype==0) then iost = nf_open(trim(flf%file_name),NF_NOWRITE,ncid) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) call check_nferr(iost,flf%file_name) ! get dimensions - if (mype==0) then + if (partit%mype==0) then iost = nf_inq_dimid(ncid, "LAT", id_latd) if (iost .ne. NF_NOERR) then iost = nf_inq_dimid(ncid, "lat", id_latd) @@ -220,10 +220,10 @@ SUBROUTINE nc_readTimeGrid(flf) iost = nf_inq_dimid(ncid, "LAT1", id_latd) end if end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) call check_nferr(iost,flf%file_name) - if (mype==0) then + if (partit%mype==0) then iost = nf_inq_dimid(ncid, "LON", id_lond) if (iost .ne. NF_NOERR) then iost = nf_inq_dimid(ncid, "lon", id_lond) @@ -235,10 +235,10 @@ SUBROUTINE nc_readTimeGrid(flf) iost = nf_inq_dimid(ncid, "LON1", id_lond) end if end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) call check_nferr(iost,flf%file_name) - if (mype==0) then + if (partit%mype==0) then iost = nf_inq_dimid(ncid, "TIME", id_timed) if (iost .ne. NF_NOERR) then iost = nf_inq_dimid(ncid, "time", id_timed) @@ -247,11 +247,11 @@ SUBROUTINE nc_readTimeGrid(flf) iost = nf_inq_dimid(ncid, "TIME1", id_timed) end if end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) call check_nferr(iost,flf%file_name) ! get variable id - if (mype==0) then + if (partit%mype==0) then iost = nf_inq_varid(ncid, "LAT", id_lat) if (iost .ne. NF_NOERR) then iost = nf_inq_varid(ncid, "lat", id_lat) @@ -263,9 +263,9 @@ SUBROUTINE nc_readTimeGrid(flf) iost = nf_inq_varid(ncid, "LAT1", id_lat) end if end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) call check_nferr(iost,flf%file_name) - if (mype==0) then + if (partit%mype==0) then iost = nf_inq_varid(ncid, "LON", id_lon) if (iost .ne. NF_NOERR) then iost = nf_inq_varid(ncid, "longitude", id_lon) @@ -277,10 +277,10 @@ SUBROUTINE nc_readTimeGrid(flf) iost = nf_inq_varid(ncid, "LON1", id_lon) end if end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) call check_nferr(iost,flf%file_name) - if (mype==0) then + if (partit%mype==0) then iost = nf_inq_varid(ncid, "TIME", id_time) if (iost .ne. NF_NOERR) then iost = nf_inq_varid(ncid, "time", id_time) @@ -289,28 +289,28 @@ SUBROUTINE nc_readTimeGrid(flf) iost = nf_inq_varid(ncid, "TIME1",id_time) end if end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) call check_nferr(iost,flf%file_name) ! get dimensions size - if (mype==0) then + if (partit%mype==0) then iost = nf_inq_dimlen(ncid, id_latd, flf%nc_Nlat) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) call check_nferr(iost,flf%file_name) - if (mype==0) then + if (partit%mype==0) then iost = nf_inq_dimlen(ncid, id_lond, flf%nc_Nlon) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) call check_nferr(iost,flf%file_name) - if (mype==0) then + if (partit%mype==0) then iost = nf_inq_dimlen(ncid, id_timed,flf%nc_Ntime) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) call check_nferr(iost,flf%file_name) flf%nc_Nlon=flf%nc_Nlon+2 !for the halo in case of periodic boundary - call MPI_BCast(flf%nc_Nlon, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call MPI_BCast(flf%nc_Nlat, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call MPI_BCast(flf%nc_Ntime, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + call MPI_BCast(flf%nc_Nlon, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call MPI_BCast(flf%nc_Nlat, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call MPI_BCast(flf%nc_Ntime, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) if (.not. allocated(flf%nc_time)) then allocate( flf%nc_lon(flf%nc_Nlon), flf%nc_lat(flf%nc_Nlat),& @@ -323,38 +323,38 @@ SUBROUTINE nc_readTimeGrid(flf) !____________________________________________________________________________ !read variables from file ! read lat - if (mype==0) then + if (partit%mype==0) then nf_start(1)=1 nf_edges(1)=flf%nc_Nlat iost = nf_get_vara_double(ncid, id_lat, nf_start, nf_edges, flf%nc_lat) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) call check_nferr(iost,flf%file_name) ! read lon - if (mype==0) then + if (partit%mype==0) then nf_start(1)=1 nf_edges(1)=flf%nc_Nlon-2 iost = nf_get_vara_double(ncid, id_lon, nf_start, nf_edges, flf%nc_lon(2:flf%nc_Nlon-1)) flf%nc_lon(1) =flf%nc_lon(flf%nc_Nlon-1) flf%nc_lon(flf%nc_Nlon) =flf%nc_lon(2) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) call check_nferr(iost,flf%file_name) !____________________________________________________________________________ ! read time axis from file - if (mype==0) then + if (partit%mype==0) then nf_start(1)=1 nf_edges(1)=flf%nc_Ntime iost = nf_get_vara_double(ncid, id_time, nf_start, nf_edges, flf%nc_time) ! digg for calendar attribute in time axis variable end if - call MPI_BCast(flf%nc_time, flf%nc_Ntime, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + call MPI_BCast(flf%nc_time, flf%nc_Ntime, MPI_DOUBLE_PRECISION, 0, partit%MPI_COMM_FESOM, ierror) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) call check_nferr(iost,flf%file_name) ! digg for calendar attribute in time axis variable - if (mype==0 .and. use_flpyrcheck) then + if (partit%mype==0 .and. use_flpyrcheck) then iost = nf_inq_attlen(ncid, id_time,'calendar',aux_len) iost = nf_get_att(ncid, id_time,'calendar',aux_calendar) aux_calendar = aux_calendar(1:aux_len) @@ -439,8 +439,8 @@ SUBROUTINE nc_readTimeGrid(flf) flf%nc_time(flf%nc_Ntime) = flf%nc_time(flf%nc_Ntime) + (flf%nc_time(flf%nc_Ntime) - flf%nc_time(flf%nc_Ntime-1))/2.0 end if end if - call MPI_BCast(flf%nc_lon, flf%nc_Nlon, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) - call MPI_BCast(flf%nc_lat, flf%nc_Nlat, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) + call MPI_BCast(flf%nc_lon, flf%nc_Nlon, MPI_DOUBLE_PRECISION, 0, partit%MPI_COMM_FESOM, ierror) + call MPI_BCast(flf%nc_lat, flf%nc_Nlat, MPI_DOUBLE_PRECISION, 0, partit%MPI_COMM_FESOM, ierror) !___________________________________________________________________________ !flip lat and data in case of lat from -90 to 90 @@ -450,14 +450,14 @@ SUBROUTINE nc_readTimeGrid(flf) if ( flf%nc_lat(1) > flf%nc_lat(flf%nc_Nlat) ) then flip_lat = 1 flf%nc_lat=flf%nc_lat(flf%nc_Nlat:1:-1) - if (mype==0) write(*,*) "fv_sbc: nc_readTimeGrid: FLIP lat and data while lat from -90 to 90" + if (partit%mype==0) write(*,*) "fv_sbc: nc_readTimeGrid: FLIP lat and data while lat from -90 to 90" endif endif - if (mype==0) then + if (partit%mype==0) then iost = nf_close(ncid) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) call check_nferr(iost,flf%file_name) if (ic_cyclic) then @@ -510,7 +510,7 @@ SUBROUTINE nc_sbc_ini_fillnames(yyyy) if (l_cloud) sbc_flfi(i_cloud)%var_name=ADJUSTL(trim(nm_cloud_var)) END SUBROUTINE nc_sbc_ini_fillnames - SUBROUTINE nc_sbc_ini(rdate, mesh) + SUBROUTINE nc_sbc_ini(rdate, partit, mesh) !!--------------------------------------------------------------------- !! ** Purpose : initialization of ocean forcing from NETCDF file !!---------------------------------------------------------------------- @@ -528,9 +528,12 @@ SUBROUTINE nc_sbc_ini(rdate, mesh) real(wp) :: x, y ! coordinates of elements integer :: fld_idx type(flfi_type), pointer :: flf - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! used for interpolate on elements ! ALLOCATE( bilin_indx_i(elem2D),bilin_indx_j(elem2D), & @@ -546,7 +549,7 @@ SUBROUTINE nc_sbc_ini(rdate, mesh) call nc_sbc_ini_fillnames(yyyy) ! we assume that all NetCDF files have identical grid and time variable do fld_idx = 1, i_totfl - call nc_readTimeGrid(sbc_flfi(fld_idx)) + call nc_readTimeGrid(sbc_flfi(fld_idx), partit) end do if (lfirst) then do fld_idx = 1, i_totfl @@ -589,13 +592,13 @@ SUBROUTINE nc_sbc_ini(rdate, mesh) end if do fld_idx = 1, i_totfl ! get first coefficients for time interpolation on model grid for all data - call getcoeffld(fld_idx, rdate, mesh) + call getcoeffld(fld_idx, rdate, partit, mesh) end do ! interpolate in time - call data_timeinterp(rdate) + call data_timeinterp(rdate, partit) END SUBROUTINE nc_sbc_ini - SUBROUTINE getcoeffld(fld_idx, rdate, mesh) + SUBROUTINE getcoeffld(fld_idx, rdate, partit, mesh) use forcing_provider_async_module use io_netcdf_workaround_module !!--------------------------------------------------------------------- @@ -606,6 +609,8 @@ SUBROUTINE getcoeffld(fld_idx, rdate, mesh) !! ** Action : !!---------------------------------------------------------------------- IMPLICIT NONE + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer, intent(in) :: fld_idx real(wp),intent(in) :: rdate ! initialization date integer :: iost !I/O status @@ -634,12 +639,14 @@ SUBROUTINE getcoeffld(fld_idx, rdate, mesh) character(len=MAX_PATH), pointer :: file_name character(len=34) , pointer :: var_name real(wp), pointer :: nc_time(:), nc_lon(:), nc_lat(:) - type(t_mesh), intent(in) , target :: mesh real(4), dimension(:,:), pointer :: sbcdata1, sbcdata2 logical sbcdata1_from_cache, sbcdata2_from_cache integer rootrank -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! fld_idx determines which ouf our forcing fields we use here nc_Ntime =>sbc_flfi(fld_idx)%nc_Ntime @@ -658,7 +665,7 @@ SUBROUTINE getcoeffld(fld_idx, rdate, mesh) sbc_flfi(fld_idx)%sbcdata_a_t_index = -1 allocate(sbc_flfi(fld_idx)%sbcdata_b(nc_Nlon,nc_Nlat)) sbc_flfi(fld_idx)%sbcdata_b_t_index = -1 - sbc_flfi(fld_idx)%read_forcing_rootrank = next_io_rank(MPI_COMM_FESOM, sbc_flfi(fld_idx)%async_netcdf_allowed) + sbc_flfi(fld_idx)%read_forcing_rootrank = next_io_rank(MPI_COMM_FESOM, sbc_flfi(fld_idx)%async_netcdf_allowed, partit) end if rootrank = sbc_flfi(fld_idx)%read_forcing_rootrank @@ -848,7 +855,7 @@ SUBROUTINE getcoeffld(fld_idx, rdate, mesh) !!$OMP END PARALLEL END SUBROUTINE getcoeffld - SUBROUTINE data_timeinterp(rdate) + SUBROUTINE data_timeinterp(rdate, partit) !!--------------------------------------------------------------------- !! *** ROUTINE data_timeinterp *** !! @@ -857,7 +864,8 @@ SUBROUTINE data_timeinterp(rdate) !! ** Action : !!---------------------------------------------------------------------- IMPLICIT NONE - real(wp),intent(in) :: rdate ! seconds + type(t_partit), intent(inout), target :: partit + real(wp), intent(in) :: rdate ! seconds ! assign data from interpolation to taux and tauy integer :: fld_idx, i,j,ii @@ -865,7 +873,7 @@ SUBROUTINE data_timeinterp(rdate) !!$OMP PARALLEL !!$OMP DO do fld_idx = 1, i_totfl - do i = 1, myDim_nod2D+eDim_nod2D + do i = 1, partit%myDim_nod2D+partit%eDim_nod2D ! store processed forcing data for fesom computation atmdata(fld_idx,i) = rdate * coef_a(fld_idx,i) + coef_b(fld_idx,i) end do !nod2D @@ -874,7 +882,7 @@ SUBROUTINE data_timeinterp(rdate) !!$OMP END PARALLEL END SUBROUTINE data_timeinterp - SUBROUTINE sbc_ini(mesh) + SUBROUTINE sbc_ini(partit, mesh) !!--------------------------------------------------------------------- !! *** ROUTINE sbc_ini *** !! @@ -890,7 +898,8 @@ SUBROUTINE sbc_ini(mesh) integer :: sbc_alloc !: allocation status real(wp) :: tx, ty - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit namelist /nam_sbc/ nm_xwind_file, nm_ywind_file, nm_humi_file, nm_qsr_file, & nm_qlw_file, nm_tair_file, nm_prec_file, nm_snow_file, & @@ -899,6 +908,12 @@ SUBROUTINE sbc_ini(mesh) nm_mslp_var, nm_cloud_var, nm_cloud_file, nm_nc_iyear, nm_nc_imm, nm_nc_idd, nm_nc_freq, nm_nc_tmid, y_perpetual, & l_xwind, l_ywind, l_humi, l_qsr, l_qlw, l_tair, l_prec, l_mslp, l_cloud, l_snow, & nm_runoff_file, runoff_data_source, runoff_climatology, nm_sss_data_file, sss_data_source + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + ! OPEN and read namelist for SBC open( unit=nm_sbc_unit, file='namelist.forcing', form='formatted', access='sequential', status='old', iostat=iost ) if (iost == 0) then @@ -1023,13 +1038,13 @@ SUBROUTINE sbc_ini(mesh) emp = 0.0_WP qsr = 0.0_WP ALLOCATE(sbc_flfi(i_totfl)) - call nc_sbc_ini(rdate, mesh) + call nc_sbc_ini(rdate, partit, mesh) !========================================================================== ! runoff if (runoff_data_source=='CORE1' .or. runoff_data_source=='CORE2' ) then ! runoff in CORE is constant in time ! Warning: For a global mesh, conservative scheme is to be updated!! - call read_other_NetCDF(nm_runoff_file, 'Foxx_o_roff', 1, runoff, .false., mesh) + call read_other_NetCDF(nm_runoff_file, 'Foxx_o_roff', 1, runoff, .false., partit, mesh) runoff=runoff/1000.0_WP ! Kg/s/m2 --> m/s end if @@ -1037,7 +1052,7 @@ SUBROUTINE sbc_ini(mesh) if (mype==0) write(*,*) 'Parts of forcing data (only constant in time fields) are read' END SUBROUTINE sbc_ini - SUBROUTINE sbc_do(mesh) + SUBROUTINE sbc_do(partit, mesh) !!--------------------------------------------------------------------- !! *** ROUTINE sbc_do *** !! @@ -1054,10 +1069,14 @@ SUBROUTINE sbc_do(mesh) integer :: yyyy, dd, mm integer, pointer :: nc_Ntime, t_indx, t_indx_p1 real(wp), pointer :: nc_time(:) - character(len=MAX_PATH) :: filename - type(t_mesh), intent(in) , target :: mesh + character(len=MAX_PATH) :: filename + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" force_newcoeff=.false. if (yearnew/=yearold) then @@ -1067,7 +1086,7 @@ SUBROUTINE sbc_do(mesh) call nc_sbc_ini_fillnames(yyyy) ! we assume that all NetCDF files have identical grid and time variable do fld_idx = 1, i_totfl - call nc_readTimeGrid(sbc_flfi(fld_idx)) + call nc_readTimeGrid(sbc_flfi(fld_idx), partit) end do force_newcoeff=.true. end if @@ -1084,7 +1103,7 @@ SUBROUTINE sbc_do(mesh) nc_Ntime =>sbc_flfi(fld_idx)%nc_Ntime if ( ((rdate > nc_time(t_indx_p1)) .and. (nc_time(t_indx) < nc_time(nc_Ntime))) .or. force_newcoeff) then ! get new coefficients for time interpolation on model grid for all data - call getcoeffld(fld_idx, rdate, mesh) + call getcoeffld(fld_idx, rdate, partit, mesh) if (fld_idx==i_xwind) do_rotation=.true. endif end do @@ -1108,7 +1127,7 @@ SUBROUTINE sbc_do(mesh) i=month+1 if (i > 12) i=1 if (mype==0) write(*,*) 'Updating SSS restoring data for month ', i - call read_other_NetCDF(nm_sss_data_file, 'SALT', i, Ssurf, .true., mesh) + call read_other_NetCDF(nm_sss_data_file, 'SALT', i, Ssurf, .true., partit, mesh) end if end if end if @@ -1123,7 +1142,7 @@ SUBROUTINE sbc_do(mesh) if (i > 12) i=1 if (mype==0) write(*,*) 'Updating monthly climatology runoff for month ', i filename=trim(nm_runoff_file) - call read_2ddata_on_grid_NetCDF(filename,'runoff', i, runoff, mesh) + call read_2ddata_on_grid_NetCDF(filename,'runoff', i, runoff, partit, mesh) !kg/m2/s -> m/s runoff=runoff/1000.0_WP @@ -1135,7 +1154,7 @@ SUBROUTINE sbc_do(mesh) if (i > 12) i=1 if (mype==0) write(*,*) 'Updating monthly runoff for month ', i filename=trim(nm_runoff_file)//cyearnew//'.nc' - call read_2ddata_on_grid_NetCDF(filename,'runoff', i, runoff, mesh) + call read_2ddata_on_grid_NetCDF(filename,'runoff', i, runoff, partit, mesh) !kg/m2/s -> m/s runoff=runoff/1000.0_WP @@ -1147,27 +1166,10 @@ SUBROUTINE sbc_do(mesh) ! interpolate in time - call data_timeinterp(rdate) + call data_timeinterp(rdate, partit) END SUBROUTINE sbc_do - SUBROUTINE err_call(iost,fname) - !!--------------------------------------------------------------------- - !! *** ROUTINE err_call *** - !! - !! ** Purpose : call Error - !! ** Method : - !! ** Action : - !!---------------------------------------------------------------------- - IMPLICIT NONE - integer, intent(in) :: iost - character(len=MAX_PATH), intent(in) :: fname - write(*,*) 'ERROR: I/O status=',iost,' file= ',fname - STOP 'ERROR: stop' - - - END SUBROUTINE err_call - FUNCTION julday(yyyy,mm,dd) IMPLICIT NONE diff --git a/src/ice_EVP.F90 b/src/ice_EVP.F90 index fcfd8d224..a0c04c597 100755 --- a/src/ice_EVP.F90 +++ b/src/ice_EVP.F90 @@ -1,17 +1,19 @@ module ice_EVP_interfaces interface - subroutine stress_tensor(ice_strength, mesh) - use g_parsup - use mod_mesh - real(kind=WP), intent(in) :: ice_strength(mydim_elem2D) - type(t_mesh), intent(in), target :: mesh + subroutine stress_tensor(ice_strength, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + real(kind=WP), intent(in) :: ice_strength(partit%mydim_elem2D) end subroutine - subroutine stress2rhs(inv_areamass, ice_strength, mesh) - USE MOD_MESH - USE g_PARSUP - REAL(kind=WP), intent(in) :: inv_areamass(myDim_nod2D), ice_strength(mydim_elem2D) - type(t_mesh), intent(in) , target :: mesh + subroutine stress2rhs(inv_areamass, ice_strength, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + REAL(kind=WP), intent(in) :: inv_areamass(partit%myDim_nod2D), ice_strength(partit%mydim_elem2D) end subroutine end interface end module @@ -20,33 +22,35 @@ subroutine stress2rhs(inv_areamass, ice_strength, mesh) ! Contains routines of EVP dynamics ! !=================================================================== -subroutine stress_tensor(ice_strength, mesh) +subroutine stress_tensor(ice_strength, partit, mesh) ! EVP rheology. The routine computes stress tensor components based on ice ! velocity field. They are stored as elemental arrays (sigma11, sigma22 and ! sigma12). The ocean velocity is at nodal locations. use o_param use i_param -use mod_mesh use i_arrays -use g_parsup USE g_CONFIG +USE MOD_MESH +USE MOD_PARTIT #if defined (__icepack) use icedrv_main, only: rdg_conv_elem, rdg_shear_elem, strength #endif implicit none - -real(kind=WP), intent(in) :: ice_strength(mydim_elem2D) +type(t_mesh), intent(in), target :: mesh +type(t_partit), intent(inout), target :: partit +real(kind=WP), intent(in) :: ice_strength(partit%mydim_elem2D) real(kind=WP) :: eta, xi, delta, aa integer :: el, elnodes(3) real(kind=WP) :: asum, msum, vale, dx(3), dy(3) real(kind=WP) :: det1, det2, r1, r2, r3, si1, si2, dte real(kind=WP) :: zeta, delta_inv, d1, d2 -type(t_mesh), intent(in), target :: mesh - -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" vale = 1.0_WP/(ellipse**2) @@ -132,28 +136,30 @@ subroutine stress_tensor(ice_strength, mesh) end subroutine stress_tensor !=================================================================== -subroutine stress_tensor_no1(ice_strength, mesh) +subroutine stress_tensor_no1(ice_strength, partit, mesh) ! EVP rheology. The routine computes stress tensor components based on ice ! velocity field. They are stored as elemental arrays (sigma11, sigma22 and ! sigma12). The ocean velocity is at nodal locations. use o_param use i_param -use mod_mesh use i_arrays -use g_parsup USE g_CONFIG +USE MOD_MESH +USE MOD_PARTIT implicit none - -real(kind=WP), intent(in) :: ice_strength(mydim_elem2D) +type(t_mesh), intent(in), target :: mesh +type(t_partit), intent(inout), target :: partit +real(kind=WP), intent(in) :: ice_strength(partit%mydim_elem2D) real(kind=WP) :: eta, xi, delta, aa integer :: el, elnodes(3) real(kind=WP) :: asum, msum, vale, dx(3), dy(3) real(kind=WP) :: det1, det2, r1, r2, r3, si1, si2, dte real(kind=WP) :: zeta, delta_inv, d1, d2 -type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" vale = 1.0_WP/(ellipse**2) @@ -234,30 +240,29 @@ subroutine stress_tensor_no1(ice_strength, mesh) end do end subroutine stress_tensor_no1 !=================================================================== -subroutine stress2rhs_e(mesh) +subroutine stress2rhs_e(partit, mesh) ! EVP implementation: ! Computes the divergence of stress tensor and puts the result into the ! rhs vectors. Velocity is at nodes. ! The divergence is computed in a cysly over edges. It is slower that the ! approach in stress2rhs_e inherited from FESOM - - -USE MOD_MESH USE o_PARAM USE i_PARAM USE i_therm_param USE i_arrays -USE g_PARSUP use g_config, only: use_cavity - +USE MOD_MESH +USE MOD_PARTIT IMPLICIT NONE -INTEGER :: n, elem, ed, elnodes(3), el(2), ednodes(2) +type(t_mesh), intent(in), target :: mesh +type(t_partit), intent(inout), target :: partit +INTEGER :: n, elem, ed, elnodes(3), el(2), ednodes(2) REAL(kind=WP) :: mass, uc, vc, deltaX1, deltaX2, deltaY1, deltaY2 - -type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" DO n=1, myDim_nod2D U_rhs_ice(n)=0.0_WP @@ -320,26 +325,27 @@ subroutine stress2rhs_e(mesh) END DO end subroutine stress2rhs_e !=================================================================== -subroutine stress2rhs(inv_areamass, ice_strength, mesh) +subroutine stress2rhs(inv_areamass, ice_strength, partit, mesh) ! EVP implementation: ! Computes the divergence of stress tensor and puts the result into the ! rhs vectors - -USE MOD_MESH USE o_PARAM USE i_PARAM USE i_THERM_PARAM -USE g_PARSUP USE i_arrays +USE MOD_MESH +USE MOD_PARTIT IMPLICIT NONE -REAL(kind=WP), intent(in) :: inv_areamass(myDim_nod2D), ice_strength(mydim_elem2D) -INTEGER :: n, el, k -REAL(kind=WP):: val3 -type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - +type(t_mesh), intent(in), target :: mesh +type(t_partit), intent(inout), target :: partit +REAL(kind=WP), intent(in) :: inv_areamass(partit%myDim_nod2D), ice_strength(partit%mydim_elem2D) +INTEGER :: n, el, k +REAL(kind=WP) :: val3 +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" val3=1/3.0_WP DO n=1, myDim_nod2D @@ -394,19 +400,19 @@ end subroutine stress2rhs ! ! !=================================================================== -subroutine EVPdynamics(mesh) +subroutine EVPdynamics(partit, mesh) ! EVP implementation. Does subcycling and boundary conditions. ! Velocities at nodes -USE MOD_MESH USE o_PARAM USE i_ARRAYS USE i_PARAM USE i_therm_param -USE g_PARSUP USE o_ARRAYS USE g_CONFIG USE g_comm_auto use ice_EVP_interfaces +USE MOD_MESH +USE MOD_PARTIT #if defined (__icepack) use icedrv_main, only: rdg_conv_elem, rdg_shear_elem, strength @@ -414,14 +420,16 @@ subroutine EVPdynamics(mesh) #endif IMPLICIT NONE +type(t_mesh), intent(in), target :: mesh +type(t_partit), intent(inout), target :: partit integer :: steps, shortstep real(kind=WP) :: rdt, asum, msum, r_a, r_b real(kind=WP) :: drag, det, umod, rhsu, rhsv integer :: n, ed, ednodes(2), el, elnodes(3) real(kind=WP) :: ax, ay, aa, elevation_dx, elevation_dy -real(kind=WP) :: inv_areamass(myDim_nod2D), inv_mass(myDim_nod2D) -real(kind=WP) :: ice_strength(myDim_elem2D), elevation_elem(3), p_ice(3) +real(kind=WP) :: inv_areamass(partit%myDim_nod2D), inv_mass(partit%myDim_nod2D) +real(kind=WP) :: ice_strength(partit%myDim_elem2D), elevation_elem(3), p_ice(3) integer :: use_pice real(kind=WP) :: eta, xi, delta @@ -433,9 +441,10 @@ subroutine EVPdynamics(mesh) INTEGER :: elem REAL(kind=WP) :: mass, uc, vc, deltaX1, deltaX2, deltaY1, deltaY2 -type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! If Icepack is used, always update the tracers @@ -598,8 +607,8 @@ subroutine EVPdynamics(mesh) do shortstep=1, evp_rheol_steps - call stress_tensor(ice_strength, mesh) - call stress2rhs(inv_areamass,ice_strength, mesh) + call stress_tensor(ice_strength, partit, mesh) + call stress2rhs(inv_areamass,ice_strength, partit, mesh) U_ice_old = U_ice !PS V_ice_old = V_ice !PS @@ -660,7 +669,7 @@ subroutine EVPdynamics(mesh) end do !___________________________________________________________________________ - call exchange_nod(U_ice,V_ice) + call exchange_nod(U_ice,V_ice,partit) END DO diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index 3de0e1881..f6b2864cc 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -1,24 +1,32 @@ module ice_fct_interfaces interface - subroutine ice_mass_matrix_fill(mesh) + subroutine ice_mass_matrix_fill(partit, mesh) use MOD_MESH - type(t_mesh), intent(in) , target :: mesh + use MOD_PARTIT + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh end subroutine - subroutine ice_solve_high_order(mesh) + subroutine ice_solve_high_order(partit, mesh) use MOD_MESH - type(t_mesh), intent(in) , target :: mesh + use MOD_PARTIT + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh end subroutine - subroutine ice_solve_low_order(mesh) + subroutine ice_solve_low_order(partit, mesh) use MOD_MESH - type(t_mesh), intent(in) , target :: mesh + use MOD_PARTIT + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh end subroutine - subroutine ice_fem_fct(tr_array_id, mesh) + subroutine ice_fem_fct(tr_array_id, partit, mesh) use MOD_MESH + use MOD_PARTIT integer :: tr_array_id - type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh end subroutine end interface end module @@ -36,19 +44,23 @@ subroutine ice_fem_fct(tr_array_id, mesh) ! The code is adapted from FESOM ! ! ===================================================================== -subroutine ice_TG_rhs(mesh) +subroutine ice_TG_rhs(partit, mesh) use MOD_MESH + use MOD_PARTIT use i_Arrays use i_PARAM - use g_PARSUP use o_PARAM USE g_CONFIG implicit none real(kind=WP) :: diff, entries(3), um, vm, vol, dx(3), dy(3) integer :: n, q, row, elem, elnodes(3) - type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! Taylor-Galerkin (Lax-Wendroff) rhs DO row=1, myDim_nod2D @@ -101,17 +113,21 @@ end subroutine ice_TG_rhs ! !---------------------------------------------------------------------------- ! -subroutine ice_fct_init(mesh) +subroutine ice_fct_init(partit, mesh) use o_PARAM use MOD_MESH + use MOD_PARTIT use i_ARRAYS - use g_PARSUP use ice_fct_interfaces implicit none integer :: n_size - type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" n_size=myDim_nod2D+eDim_nod2D @@ -142,35 +158,37 @@ subroutine ice_fct_init(mesh) dm_snow = 0.0_WP ! Fill in the mass matrix - call ice_mass_matrix_fill(mesh) + call ice_mass_matrix_fill(partit, mesh) if (mype==0) write(*,*) 'Ice FCT is initialized' end subroutine ice_fct_init ! !---------------------------------------------------------------------------- ! -subroutine ice_fct_solve(mesh) +subroutine ice_fct_solve(partit, mesh) use MOD_MESH + use MOD_PARTIT use ice_fct_interfaces implicit none - type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh ! Driving routine - call ice_solve_high_order(mesh) ! uses arrays of low-order solutions as temp + call ice_solve_high_order(partit, mesh) ! uses arrays of low-order solutions as temp ! storage. It should preceed the call of low ! order solution. - call ice_solve_low_order(mesh) + call ice_solve_low_order(partit, mesh) - call ice_fem_fct(1, mesh) ! m_ice - call ice_fem_fct(2, mesh) ! a_ice - call ice_fem_fct(3, mesh) ! m_snow + call ice_fem_fct(1, partit, mesh) ! m_ice + call ice_fem_fct(2, partit, mesh) ! a_ice + call ice_fem_fct(3, partit, mesh) ! m_snow #if defined (__oifs) - call ice_fem_fct(4, mesh) ! ice_temp + call ice_fem_fct(4, partit, mesh) ! ice_temp #endif /* (__oifs) */ end subroutine ice_fct_solve ! ! !_______________________________________________________________________________ -subroutine ice_solve_low_order(mesh) +subroutine ice_solve_low_order(partit, mesh) !============================ ! Low-order solution @@ -183,17 +201,21 @@ subroutine ice_solve_low_order(mesh) ! matrices acting on the field from the previous time step. The consistent ! mass matrix on the lhs is replaced with the lumped one. use MOD_MESH + use MOD_PARTIT use MOD_TRACER use i_ARRAYS use i_PARAM - use g_PARSUP use g_comm_auto implicit none integer :: row, clo, clo2, cn, location(100) real(kind=WP) :: gamma - type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" gamma=ice_gamma_fct ! Added diffusivity parameter ! Adjust it to ensure posivity of solution @@ -224,10 +246,10 @@ subroutine ice_solve_low_order(mesh) end do ! Low-order solution must be known to neighbours - call exchange_nod(m_icel,a_icel,m_snowl) + call exchange_nod(m_icel,a_icel,m_snowl, partit) #if defined (__oifs) - call exchange_nod(m_templ) + call exchange_nod(m_templ, partit) #endif /* (__oifs) */ @@ -235,21 +257,25 @@ end subroutine ice_solve_low_order ! ! !_______________________________________________________________________________ -subroutine ice_solve_high_order(mesh) +subroutine ice_solve_high_order(partit, mesh) use MOD_MESH + use MOD_PARTIT use MOD_TRACER use i_ARRAYS - use g_PARSUP use o_PARAM use g_comm_auto implicit none ! - integer :: n,i,clo,clo2,cn,location(100),row - real(kind=WP) :: rhs_new - integer :: num_iter_solve=3 - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" + integer :: n,i,clo,clo2,cn,location(100),row + real(kind=WP) :: rhs_new + integer :: num_iter_solve=3 + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! Does Taylor-Galerkin solution ! !the first approximation @@ -266,10 +292,10 @@ subroutine ice_solve_high_order(mesh) #endif /* (__oifs) */ end do - call exchange_nod(dm_ice, da_ice, dm_snow) + call exchange_nod(dm_ice, da_ice, dm_snow, partit) #if defined (__oifs) - call exchange_nod(dm_temp) + call exchange_nod(dm_temp, partit) #endif /* (__oifs) */ !iterate do n=1,num_iter_solve-1 @@ -305,10 +331,10 @@ subroutine ice_solve_high_order(mesh) dm_temp(row)=m_templ(row) #endif /* (__oifs) */ end do - call exchange_nod(dm_ice, da_ice, dm_snow) + call exchange_nod(dm_ice, da_ice, dm_snow, partit) #if defined (__oifs) - call exchange_nod(dm_temp) + call exchange_nod(dm_temp, partit) #endif /* (__oifs) */ end do @@ -316,7 +342,7 @@ end subroutine ice_solve_high_order ! ! !_______________________________________________________________________________ -subroutine ice_fem_fct(tr_array_id, mesh) +subroutine ice_fem_fct(tr_array_id, partit, mesh) ! Flux corrected transport algorithm for tracer advection ! ! It is based on Loehner et al. (Finite-element flux-corrected @@ -325,11 +351,11 @@ subroutine ice_fem_fct(tr_array_id, mesh) ! Turek. (kuzmin@math.uni-dortmund.de) ! use MOD_MESH + use MOD_PARTIT use MOD_TRACER use i_arrays use i_param use o_PARAM - use g_PARSUP use g_comm_auto implicit none @@ -337,9 +363,13 @@ subroutine ice_fem_fct(tr_array_id, mesh) integer :: icoef(3,3),n,q, elem,elnodes(3),row real(kind=WP), allocatable, dimension(:) :: tmax, tmin real(kind=WP) :: vol, flux, ae, gamma - type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" gamma=ice_gamma_fct ! It should coinside with gamma in ! ts_solve_low_order @@ -513,7 +543,7 @@ subroutine ice_fem_fct(tr_array_id, mesh) end if end do ! pminus and pplus are to be known to neighbouting PE - call exchange_nod(icepminus, icepplus) + call exchange_nod(icepminus, icepplus, partit) !======================== ! Limiting @@ -617,10 +647,10 @@ subroutine ice_fem_fct(tr_array_id, mesh) end if #endif /* (__oifs) */ - call exchange_nod(m_ice, a_ice, m_snow) + call exchange_nod(m_ice, a_ice, m_snow, partit) #if defined (__oifs) - call exchange_nod(ice_temp) + call exchange_nod(ice_temp, partit) #endif /* (__oifs) */ deallocate(tmin, tmax) @@ -628,13 +658,13 @@ end subroutine ice_fem_fct ! ! !_______________________________________________________________________________ -SUBROUTINE ice_mass_matrix_fill(mesh) +SUBROUTINE ice_mass_matrix_fill(partit, mesh) ! Used in ice_fct inherited from FESOM use MOD_MESH + use MOD_PARTIT use MOD_TRACER use i_PARAM use i_ARRAYS - use g_PARSUP ! implicit none integer :: n, n1, n2, row @@ -643,9 +673,13 @@ SUBROUTINE ice_mass_matrix_fill(mesh) integer, allocatable :: col_pos(:) real(kind=WP) :: aa integer :: flag=0,iflag=0 - type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! ! a) allocate(mass_matrix(sum(nn_num(1:myDim_nod2D)))) @@ -707,20 +741,24 @@ END SUBROUTINE ice_mass_matrix_fill ! !========================================================= ! -subroutine ice_TG_rhs_div(mesh) +subroutine ice_TG_rhs_div(partit, mesh) use MOD_MESH + use MOD_PARTIT use i_Arrays use i_PARAM - use g_PARSUP use o_PARAM USE g_CONFIG implicit none real(kind=WP) :: diff, entries(3), um, vm, vol, dx(3), dy(3) integer :: n, q, row, elem, elnodes(3) real(kind=WP) :: c1, c2, c3, c4, cx1, cx2, cx3, cx4, entries2(3) - type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! Computes the rhs in a Taylor-Galerkin way (with upwind type of ! correction for the advection operator) @@ -798,12 +836,12 @@ end subroutine ice_TG_rhs_div ! ! !_______________________________________________________________________________ -subroutine ice_update_for_div(mesh) +subroutine ice_update_for_div(partit, mesh) use MOD_MESH + use MOD_PARTIT use MOD_TRACER use i_Arrays use i_PARAM - use g_PARSUP use o_PARAM USE g_CONFIG use g_comm_auto @@ -812,9 +850,13 @@ subroutine ice_update_for_div(mesh) integer :: n,i,clo,clo2,cn,location(100),row real(kind=WP) :: rhs_new integer :: num_iter_solve=3 - type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! Does Taylor-Galerkin solution ! @@ -831,11 +873,11 @@ subroutine ice_update_for_div(mesh) dm_temp(row)=rhs_tempdiv(row)/area(1,row) #endif /* (__oifs) */ end do - call exchange_nod(dm_ice) - call exchange_nod(da_ice) - call exchange_nod(dm_snow) + call exchange_nod(dm_ice, partit) + call exchange_nod(da_ice, partit) + call exchange_nod(dm_snow, partit) #if defined (__oifs) - call exchange_nod(dm_temp) + call exchange_nod(dm_temp, partit) #endif /* (__oifs) */ !iterate @@ -872,11 +914,11 @@ subroutine ice_update_for_div(mesh) dm_temp(row)=m_templ(row) #endif /* (__oifs) */ end do - call exchange_nod(dm_ice) - call exchange_nod(da_ice) - call exchange_nod(dm_snow) + call exchange_nod(dm_ice, partit) + call exchange_nod(da_ice, partit) + call exchange_nod(dm_snow, partit) #if defined (__oifs) - call exchange_nod(dm_temp) + call exchange_nod(dm_temp, partit) #endif /* (__oifs) */ end do m_ice=m_ice+dm_ice diff --git a/src/ice_maEVP.F90 b/src/ice_maEVP.F90 index f5aedca3d..a6b0856e7 100644 --- a/src/ice_maEVP.F90 +++ b/src/ice_maEVP.F90 @@ -1,28 +1,38 @@ module ice_maEVP_interfaces interface - subroutine ssh2rhs(mesh) + subroutine ssh2rhs(partit, mesh) use mod_mesh - type(t_mesh), intent(in), target :: mesh + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine - subroutine stress_tensor_a(mesh) + subroutine stress_tensor_a(partit, mesh) use mod_mesh - type(t_mesh), intent(in), target :: mesh + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine - subroutine stress2rhs_m(mesh) + subroutine stress2rhs_m(partit, mesh) use mod_mesh - type(t_mesh), intent(in), target :: mesh + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine - subroutine find_alpha_field_a(mesh) + subroutine find_alpha_field_a(partit, mesh) use mod_mesh - type(t_mesh), intent(in), target :: mesh + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine - subroutine find_beta_field_a(mesh) + subroutine find_beta_field_a(partit, mesh) use mod_mesh - type(t_mesh), intent(in), target :: mesh + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module @@ -31,7 +41,7 @@ subroutine find_beta_field_a(mesh) ! New evp implementation following Bouillion et al. 2013 ! and Kimmritz et al. 2015 (mEVP) and Kimmritz et al. 2016 (aEVP) ! ==================================================================== -subroutine stress_tensor_m(mesh) +subroutine stress_tensor_m(partit, mesh) ! Internal stress tensor ! New implementation following Boullion et al, Ocean Modelling 2013. ! SD, 30.07.2014 @@ -39,24 +49,28 @@ subroutine stress_tensor_m(mesh) use o_param use i_param use mod_mesh + use mod_partit use g_config use i_arrays - use g_parsup #if defined (__icepack) use icedrv_main, only: rdg_conv_elem, rdg_shear_elem, strength #endif implicit none + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: elem, elnodes(3) real(kind=WP) :: dx(3), dy(3), msum, asum real(kind=WP) :: eps1, eps2, pressure, delta real(kind=WP) :: val3, meancos, usum, vsum, vale real(kind=WP) :: det1, det2, r1, r2, r3, si1, si2 - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" val3=1.0_WP/3.0_WP vale=1.0_WP/(ellipse**2) @@ -127,24 +141,28 @@ end subroutine stress_tensor_m ! ! ================================================================== ! -subroutine ssh2rhs(mesh) +subroutine ssh2rhs(partit, mesh) ! Compute the contribution from the elevation to the rhs ! S.D. 30.07.2014 use o_param use i_param use mod_mesh + use mod_partit use g_config use i_arrays - use g_parsup use i_therm_param implicit none + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: row, elem, elnodes(3), n real(kind=WP) :: dx(3), dy(3), vol real(kind=WP) :: val3, meancos, aa, bb, p_ice(3) - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" val3=1.0_WP/3.0_WP @@ -203,7 +221,7 @@ end subroutine ssh2rhs ! !=================================================================== ! -subroutine stress2rhs_m(mesh) +subroutine stress2rhs_m(partit, mesh) ! add internal stress to the rhs ! SD, 30.07.2014 @@ -212,18 +230,23 @@ subroutine stress2rhs_m(mesh) use i_param use i_therm_param use mod_mesh + use mod_partit use g_config use i_arrays - use g_parsup implicit none + + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: k, row, elem, elnodes(3) real(kind=WP) :: dx(3), dy(3), vol real(kind=WP) :: val3, mf, aa, bb real(kind=WP) :: mass, cluster_area, elevation_elem(3) - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" val3=1.0_WP/3.0_WP @@ -270,7 +293,7 @@ end subroutine stress2rhs_m ! !=================================================================== ! -subroutine EVPdynamics_m(mesh) +subroutine EVPdynamics_m(partit, mesh) ! assemble rhs and solve for ice velocity ! New implementation based on Bouillion et al. Ocean Modelling 2013 ! SD 30.07.14 @@ -280,10 +303,10 @@ subroutine EVPdynamics_m(mesh) use i_param use i_therm_param use mod_mesh + use mod_partit use g_config use i_arrays use o_arrays - use g_parsup use g_comm_auto #if defined (__icepack) @@ -292,15 +315,17 @@ subroutine EVPdynamics_m(mesh) #endif implicit none + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: steps, shortstep, i, ed,n real(kind=WP) :: rdt, drag, det - real(kind=WP) :: inv_thickness(myDim_nod2D), umod, rhsu, rhsv - logical :: ice_el(myDim_elem2D), ice_nod(myDim_nod2D) + real(kind=WP) :: inv_thickness(partit%myDim_nod2D), umod, rhsu, rhsv + logical :: ice_el(partit%myDim_elem2D), ice_nod(partit%myDim_nod2D) !NR for stress_tensor_m integer :: el, elnodes(3) real(kind=WP) :: dx(3), dy(3), msum, asum - real(kind=WP) :: eps1, eps2, pressure, pressure_fac(myDim_elem2D), delta + real(kind=WP) :: eps1, eps2, pressure, pressure_fac(partit%myDim_elem2D), delta real(kind=WP) :: val3, meancos, vale real(kind=WP) :: det1, det2, r1, r2, r3, si1, si2 @@ -308,10 +333,12 @@ subroutine EVPdynamics_m(mesh) integer :: k, row real(kind=WP) :: vol real(kind=WP) :: mf,aa, bb,p_ice(3) - real(kind=WP) :: mass(myDim_nod2D) - type(t_mesh), intent(in) , target :: mesh + real(kind=WP) :: mass(partit%myDim_nod2D) -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" val3=1.0_WP/3.0_WP vale=1.0_WP/(ellipse**2) @@ -585,14 +612,14 @@ subroutine EVPdynamics_m(mesh) end do ! --> do ed=1,myDim_edge2D !___________________________________________________________________________ - call exchange_nod_begin(u_ice_aux, v_ice_aux) + call exchange_nod_begin(u_ice_aux, v_ice_aux, partit) do row=1, myDim_nod2d u_rhs_ice(row)=0.0_WP v_rhs_ice(row)=0.0_WP end do - call exchange_nod_end + call exchange_nod_end(partit) end do ! --> do shortstep=1, steps @@ -608,7 +635,7 @@ end subroutine EVPdynamics_m ! The subroutines involved are with _a. ! ==================================================================== ! -subroutine find_alpha_field_a(mesh) +subroutine find_alpha_field_a(partit, mesh) ! EVP stability parameter alpha is computed at each element ! aEVP implementation ! SD, 13.02.2017 @@ -617,23 +644,26 @@ subroutine find_alpha_field_a(mesh) use i_param use i_therm_param use mod_mesh + use mod_partit use g_config use i_arrays - use g_parsup #if defined (__icepack) use icedrv_main, only: strength #endif implicit none - + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: elem, elnodes(3) real(kind=WP) :: dx(3), dy(3), msum, asum real(kind=WP) :: eps1, eps2, pressure, delta real(kind=WP) :: val3, meancos, usum, vsum, vale - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" val3=1.0_WP/3.0_WP vale=1.0_WP/(ellipse**2) @@ -683,7 +713,7 @@ subroutine find_alpha_field_a(mesh) end subroutine find_alpha_field_a ! ==================================================================== -subroutine stress_tensor_a(mesh) +subroutine stress_tensor_a(partit, mesh) ! Internal stress tensor ! New implementation following Boullion et al, Ocean Modelling 2013. ! and Kimmritz et al., Ocean Modelling 2016 @@ -692,24 +722,26 @@ subroutine stress_tensor_a(mesh) use o_param use i_param use mod_mesh + use mod_partit use g_config use i_arrays - use g_parsup #if defined (__icepack) use icedrv_main, only: rdg_conv_elem, rdg_shear_elem, strength #endif implicit none - + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: elem, elnodes(3) real(kind=WP) :: dx(3), dy(3), msum, asum real(kind=WP) :: eps1, eps2, pressure, delta real(kind=WP) :: val3, meancos, usum, vsum, vale real(kind=WP) :: det1, det2, r1, r2, r3, si1, si2 - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" val3=1.0_WP/3.0_WP vale=1.0_WP/(ellipse**2) @@ -782,7 +814,7 @@ end subroutine stress_tensor_a ! !=================================================================== ! -subroutine EVPdynamics_a(mesh) +subroutine EVPdynamics_a(partit, mesh) ! assemble rhs and solve for ice velocity ! New implementation based on Bouillion et al. Ocean Modelling 2013 ! and Kimmritz et al., Ocean Modelling 2016 @@ -791,12 +823,12 @@ subroutine EVPdynamics_a(mesh) use o_param use mod_mesh +use mod_partit use i_arrays USE o_arrays use i_param use o_PARAM use i_therm_param -use g_parsup use g_config, only: use_cavity use g_comm_auto use ice_maEVP_interfaces @@ -806,19 +838,22 @@ subroutine EVPdynamics_a(mesh) #endif implicit none + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: steps, shortstep, i, ed real(kind=WP) :: rdt, drag, det, fc real(kind=WP) :: thickness, inv_thickness, umod, rhsu, rhsv REAL(kind=WP) :: t0,t1, t2, t3, t4, t5, t00, txx - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" steps=evp_rheol_steps rdt=ice_dt u_ice_aux=u_ice ! Initialize solver variables v_ice_aux=v_ice - call ssh2rhs(mesh) + call ssh2rhs(partit, mesh) #if defined (__icepack) rdg_conv_elem(:) = 0.0_WP @@ -826,8 +861,8 @@ subroutine EVPdynamics_a(mesh) #endif do shortstep=1, steps - call stress_tensor_a(mesh) - call stress2rhs_m(mesh) ! _m=_a, so no _m version is the only one! + call stress_tensor_a(partit, mesh) + call stress2rhs_m(partit, mesh) ! _m=_a, so no _m version is the only one! do i=1,myDim_nod2D !_______________________________________________________________________ @@ -876,36 +911,40 @@ subroutine EVPdynamics_a(mesh) end if end do ! --> do ed=1,myDim_edge2D - call exchange_nod(u_ice_aux, v_ice_aux) + call exchange_nod(u_ice_aux, v_ice_aux, partit) end do u_ice=u_ice_aux v_ice=v_ice_aux - call find_alpha_field_a(mesh) ! alpha_evp_array is initialized with alpha_evp; + call find_alpha_field_a(partit, mesh) ! alpha_evp_array is initialized with alpha_evp; ! At this stage we already have non-trivial velocities. - call find_beta_field_a(mesh) + call find_beta_field_a(partit, mesh) end subroutine EVPdynamics_a ! ! ================================================================= ! -subroutine find_beta_field_a(mesh) +subroutine find_beta_field_a(partit, mesh) ! beta_evp_array is defined at nodes, and this is the only ! reason we need it in addition to alpha_evp_array (we work with ! alpha=beta, and keep different names for generality; mEVP can work with ! alpha \ne beta, but not aEVP). use mod_mesh +use mod_partit use o_param USE i_param use i_arrays -use g_parsup Implicit none integer :: n -type(t_mesh), intent(in) , target :: mesh +type(t_mesh), intent(in), target :: mesh +type(t_partit), intent(inout), target :: partit -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" DO n=1, myDim_nod2D !_______________________________________________________________________ diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index 0b641a7fa..b0c629737 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -1,9 +1,11 @@ module ocean2ice_interface interface - subroutine ocean2ice(tracers, mesh) + subroutine ocean2ice(tracers, partit, mesh) use mod_mesh + use mod_partit use mod_tracer - type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh type(t_tracer), intent(inout), target :: tracers end subroutine end interface @@ -11,10 +13,12 @@ subroutine ocean2ice(tracers, mesh) module oce_fluxes_interface interface - subroutine oce_fluxes(tracers, mesh) + subroutine oce_fluxes(tracers, partit, mesh) use mod_mesh + use mod_partit use mod_tracer - type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh type(t_tracer), intent(inout), target :: tracers end subroutine end interface @@ -23,14 +27,14 @@ subroutine oce_fluxes(tracers, mesh) ! ! !_______________________________________________________________________________ -subroutine oce_fluxes_mom(mesh) +subroutine oce_fluxes_mom(partit, mesh) ! transmits the relevant fields from the ice to the ocean model ! use o_PARAM use o_ARRAYS use MOD_MESH + use MOD_PARTIT use i_ARRAYS - use g_PARSUP use i_PARAM USE g_CONFIG use g_comm_auto @@ -43,9 +47,13 @@ subroutine oce_fluxes_mom(mesh) integer :: n, elem, elnodes(3),n1 real(kind=WP) :: aux, aux1 - type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! ================== ! momentum flux: @@ -94,13 +102,13 @@ subroutine oce_fluxes_mom(mesh) END DO !___________________________________________________________________________ - if (use_cavity) call cavity_momentum_fluxes(mesh) + if (use_cavity) call cavity_momentum_fluxes(partit, mesh) end subroutine oce_fluxes_mom ! ! !_______________________________________________________________________________ -subroutine ocean2ice(tracers, mesh) +subroutine ocean2ice(tracers, partit, mesh) ! transmits the relevant fields from the ocean to the ice model @@ -109,17 +117,21 @@ subroutine ocean2ice(tracers, mesh) use i_ARRAYS use MOD_MESH use MOD_TRACER - use g_PARSUP + use MOD_PARTIT USE g_CONFIG use g_comm_auto implicit none - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers integer :: n, elem, k real(kind=WP) :: uw, vw, vol real(kind=WP), dimension(:,:), pointer :: temp, salt -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" temp=>tracers%data(1)%values(:,:) salt=>tracers%data(2)%values(:,:) @@ -173,22 +185,22 @@ subroutine ocean2ice(tracers, mesh) v_w(n)=(v_w(n)*real(ice_steps_since_upd,WP)+vw)/real(ice_steps_since_upd+1,WP) endif end do - call exchange_nod(u_w, v_w) + call exchange_nod(u_w, v_w, partit) end subroutine ocean2ice ! ! !_______________________________________________________________________________ -subroutine oce_fluxes(tracers, mesh) +subroutine oce_fluxes(tracers, partit, mesh) use MOD_MESH use MOD_TRACER + use MOD_PARTIT USE g_CONFIG use o_ARRAYS use i_ARRAYS use g_comm_auto use g_forcing_param, only: use_virt_salt use g_forcing_arrays - use g_PARSUP use g_support use i_therm_param @@ -198,13 +210,17 @@ subroutine oce_fluxes(tracers, mesh) #endif use cavity_heat_water_fluxes_3eq_interface implicit none - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers integer :: n, elem, elnodes(3),n1 real(kind=WP) :: rsss, net real(kind=WP), allocatable :: flux(:) real(kind=WP), dimension(:,:), pointer :: temp, salt -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" temp=>tracers%data(1)%values(:,:) salt=>tracers%data(2)%values(:,:) @@ -248,14 +264,14 @@ subroutine oce_fluxes(tracers, mesh) water_flux = -fresh_wa_flux #endif heat_flux_in=heat_flux ! sw_pene will change the heat_flux - if (use_cavity) call cavity_heat_water_fluxes_3eq(tracers, mesh) + if (use_cavity) call cavity_heat_water_fluxes_3eq(tracers, partit, mesh) !!PS if (use_cavity) call cavity_heat_water_fluxes_2eq(mesh) !!PS where(ulevels_nod2D>1) heat_flux=0.0_WP !!PS where(ulevels_nod2D>1) water_flux=0.0_WP !___________________________________________________________________________ - call exchange_nod(heat_flux, water_flux) + call exchange_nod(heat_flux, water_flux, partit) !___________________________________________________________________________ ! on freshwater inflow/outflow or virtual salinity: @@ -282,9 +298,9 @@ subroutine oce_fluxes(tracers, mesh) if (use_cavity) then flux = virtual_salt where (ulevels_nod2d > 1) flux = 0.0_WP - call integrate_nod(flux, net, mesh) + call integrate_nod(flux, net, partit, mesh) else - call integrate_nod(virtual_salt, net, mesh) + call integrate_nod(virtual_salt, net, partit, mesh) end if virtual_salt=virtual_salt-net/ocean_area end if @@ -311,7 +327,7 @@ subroutine oce_fluxes(tracers, mesh) end if ! --> if use_cavity=.true. relax_salt anyway zero where is cavity see above - call integrate_nod(relax_salt, net, mesh) + call integrate_nod(relax_salt, net, partit, mesh) relax_salt=relax_salt-net/ocean_area !___________________________________________________________________________ @@ -346,7 +362,7 @@ subroutine oce_fluxes(tracers, mesh) end if ! compute total global net freshwater flux into the ocean - call integrate_nod(flux, net, mesh) + call integrate_nod(flux, net, partit, mesh) !___________________________________________________________________________ ! here the + sign must be used because we switched up the sign of the @@ -364,7 +380,7 @@ subroutine oce_fluxes(tracers, mesh) end if !___________________________________________________________________________ - if (use_sw_pene) call cal_shortwave_rad(mesh) + if (use_sw_pene) call cal_shortwave_rad(partit, mesh) !___________________________________________________________________________ deallocate(flux) diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index c6e069af6..f1304b814 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -1,49 +1,56 @@ module ice_array_setup_interface interface - subroutine ice_array_setup(mesh) + subroutine ice_array_setup(partit, mesh) use mod_mesh + use mod_partit use mod_tracer - type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh end subroutine end interface end module module ice_initial_state_interface interface - subroutine ice_initial_state(tracers, mesh) + subroutine ice_initial_state(tracers, partit, mesh) use mod_mesh + use mod_partit use mod_tracer - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers end subroutine end interface end module module ice_setup_interface interface - subroutine ice_setup(tracers, mesh) + subroutine ice_setup(tracers, partit, mesh) use mod_mesh + use mod_partit use mod_tracer - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers end subroutine end interface end module ! !_______________________________________________________________________________ ! ice initialization + array allocation + time stepping -subroutine ice_setup(tracers, mesh) +subroutine ice_setup(tracers, partit, mesh) use o_param - use g_parsup use i_param use i_arrays use g_CONFIG use mod_mesh + use mod_partit use mod_tracer use ice_array_setup_interface use ice_initial_state_interface implicit none - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers ! ================ DO not change ice_dt=real(ice_ave_steps,WP)*dt @@ -52,19 +59,19 @@ subroutine ice_setup(tracers, mesh) Clim_evp=Clim_evp*(evp_rheol_steps/ice_dt)**2/Tevp_inv ! This is combination ! it always enters ! ================ - call ice_array_setup(mesh) - call ice_fct_init(mesh) + call ice_array_setup(partit, mesh) + call ice_fct_init(partit, mesh) ! ================ ! Initialization routine, user input is required ! ================ !call ice_init_fields_test - call ice_initial_state(tracers, mesh) ! Use it unless running test example - if(mype==0) write(*,*) 'Ice is initialized' + call ice_initial_state(tracers, partit, mesh) ! Use it unless running test example + if(partit%mype==0) write(*,*) 'Ice is initialized' end subroutine ice_setup ! ! !_______________________________________________________________________________ -subroutine ice_array_setup(mesh) +subroutine ice_array_setup(partit, mesh) ! ! inializing sea ice model ! @@ -74,15 +81,19 @@ subroutine ice_array_setup(mesh) use o_param use i_param use MOD_MESH +use MOD_PARTIT use i_arrays -use g_parsup USE g_CONFIG implicit none -type(t_mesh), intent(in) , target :: mesh -integer :: n_size, e_size, mn, k, n, n1, n2 +type(t_partit), intent(inout), target :: partit +type(t_mesh), intent(in), target :: mesh +integer :: n_size, e_size, mn, k, n, n1, n2 -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" n_size=myDim_nod2D+eDim_nod2D e_size=myDim_elem2D+eDim_elem2D @@ -179,27 +190,34 @@ end subroutine ice_array_setup ! !_______________________________________________________________________________ ! Sea ice model step -subroutine ice_timestep(step, mesh) +subroutine ice_timestep(step, partit, mesh) +use mod_mesh +use mod_partit use i_arrays use o_param -use g_parsup use g_CONFIG use i_PARAM, only: whichEVP -use mod_mesh #if defined (__icepack) use icedrv_main, only: step_icepack #endif implicit none -type(t_mesh), intent(in) , target :: mesh -integer :: step,i -REAL(kind=WP) :: t0,t1, t2, t3 +type(t_partit), intent(inout), target :: partit +type(t_mesh), intent(in), target :: mesh +integer :: step,i +REAL(kind=WP) :: t0,t1, t2, t3 #if defined (__icepack) -real(kind=WP) :: time_evp, time_advec, time_therm +real(kind=WP) :: time_evp, time_advec, time_therm #endif +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + t0=MPI_Wtime() #if defined (__icepack) @@ -211,18 +229,18 @@ subroutine ice_timestep(step, mesh) if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call EVPdynamics...'//achar(27)//'[0m' SELECT CASE (whichEVP) CASE (0) - call EVPdynamics(mesh) + call EVPdynamics (partit, mesh) CASE (1) - call EVPdynamics_m(mesh) + call EVPdynamics_m(partit, mesh) CASE (2) - call EVPdynamics_a(mesh) + call EVPdynamics_a(partit, mesh) CASE DEFAULT if (mype==0) write(*,*) 'a non existing EVP scheme specified!' - call par_ex + call par_ex(partit) stop END SELECT - if (use_cavity) call cavity_ice_clean_vel(mesh) + if (use_cavity) call cavity_ice_clean_vel(partit, mesh) t1=MPI_Wtime() !___________________________________________________________________________ @@ -238,26 +256,26 @@ subroutine ice_timestep(step, mesh) end do #endif /* (__oifs) */ if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call ice_TG_rhs_div...'//achar(27)//'[0m' - call ice_TG_rhs_div(mesh) + call ice_TG_rhs_div (partit, mesh) if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call ice_fct_solve...'//achar(27)//'[0m' - call ice_fct_solve(mesh) + call ice_fct_solve (partit, mesh) if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call ice_update_for_div...'//achar(27)//'[0m' - call ice_update_for_div(mesh) + call ice_update_for_div(partit, mesh) #if defined (__oifs) do i=1,myDim_nod2D+eDim_nod2D if (a_ice(i)>0.0_WP) ice_temp(i) = ice_temp(i)/a_ice(i) end do #endif /* (__oifs) */ if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call cut_off...'//achar(27)//'[0m' - call cut_off(mesh) + call cut_off(partit, mesh) - if (use_cavity) call cavity_ice_clean_ma(mesh) + if (use_cavity) call cavity_ice_clean_ma(partit, mesh) t2=MPI_Wtime() !___________________________________________________________________________ ! ===== Thermodynamic part if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call thermodynamics...'//achar(27)//'[0m' - call thermodynamics(mesh) + call thermodynamics(partit, mesh) #endif /* (__icepack) */ @@ -270,10 +288,6 @@ subroutine ice_timestep(step, mesh) write(*,*) end if end do - - - - t3=MPI_Wtime() rtime_ice = rtime_ice + (t3-t0) rtime_tot = rtime_tot + (t3-t0) @@ -298,23 +312,28 @@ end subroutine ice_timestep ! !_______________________________________________________________________________ ! sets inital values or reads restart file for ice model -subroutine ice_initial_state(tracers, mesh) +subroutine ice_initial_state(tracers, partit, mesh) use i_ARRAYs use MOD_MESH + use MOD_PARTIT use MOD_TRACER use o_PARAM use o_arrays - use g_parsup use g_CONFIG implicit none ! - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers - integer :: i - character(MAX_PATH) :: filename - real(kind=WP), external :: TFrez ! Sea water freeze temperature. + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers + integer :: i + character(MAX_PATH) :: filename + real(kind=WP), external :: TFrez ! Sea water freeze temperature. + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" -#include "associate_mesh.h" m_ice =0._WP a_ice =0._WP u_ice =0._WP diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index d4b6896b3..9b6f17981 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -1,14 +1,18 @@ !=================================================================== -subroutine cut_off(mesh) +subroutine cut_off(partit, mesh) use o_param use i_arrays use MOD_MESH + use MOD_PARTIT use g_config, only: use_cavity - use g_parsup implicit none - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! lower cutoff: a_ice @@ -73,7 +77,7 @@ end subroutine cut_off ! by Qiang Wang, 13.01.2009 !---------------------------------------------------------------------------- -subroutine thermodynamics(mesh) +subroutine thermodynamics(partit, mesh) ! ! For every surface node, this subroutine extracts the information ! needed for computation of thermodydnamics, calls the relevant @@ -83,16 +87,19 @@ subroutine thermodynamics(mesh) use o_param use mod_mesh + use mod_partit use i_therm_param use i_param use i_arrays use g_config use g_forcing_param use g_forcing_arrays - use g_parsup use g_comm_auto use g_sbf, only: l_snow implicit none + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + real(kind=WP) :: h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss,rsf,evap_in real(kind=WP) :: ug,ustar,T_oc,S_oc,h_ml,t,ch,ce,ch_i,ce_i,fw,ehf,evap real(kind=WP) :: ithdgr, ithdgrsn, iflice, hflatow, hfsenow, hflwrdout, subli @@ -101,9 +108,10 @@ subroutine thermodynamics(mesh) real(kind=WP), allocatable :: ustar_aux(:) real(kind=WP) lid_clo - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" rsss=ref_sss @@ -122,7 +130,7 @@ subroutine thermodynamics(mesh) (v_ice(i)-v_w(i))**2) ustar_aux(i)=sqrt(ustar*Cd_oce_ice) END DO - call exchange_nod(ustar_aux) !TODO Why do we need it? + call exchange_nod(ustar_aux, partit) !TODO Why do we need it? ! ================ ! end: friction velocity ! ================ @@ -258,10 +266,8 @@ subroutine therm_ice(h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss, & ! ehf - net heat flux at the ocean surface [W/m2] !RTnew use i_therm_param - use g_forcing_param, only: use_virt_salt - + use g_forcing_param, only: use_virt_salt use o_param - use g_parsup implicit none integer k diff --git a/src/io_blowup.F90 b/src/io_blowup.F90 index 4d033719a..0d64878c1 100644 --- a/src/io_blowup.F90 +++ b/src/io_blowup.F90 @@ -1,9 +1,9 @@ MODULE io_BLOWUP use g_config use g_clock - use g_parsup use g_comm_auto USE MOD_MESH + USE MOD_PARTIT USE MOD_TRACER use o_arrays use i_arrays @@ -63,11 +63,12 @@ MODULE io_BLOWUP !_______________________________________________________________________________ ! ini_ocean_io initializes bid datatype which contains information of all variables need to be written into ! the ocean restart file. This is the only place need to be modified if a new variable is added! - subroutine ini_blowup_io(year, tracers, mesh) + subroutine ini_blowup_io(year, tracers, partit, mesh) implicit none integer, intent(in) :: year - type(t_tracer), intent(in), target :: tracers - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers integer :: ncid, j integer :: varid character(500) :: longname @@ -75,7 +76,10 @@ subroutine ini_blowup_io(year, tracers, mesh) character(500) :: trname, units character(4) :: cyear -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" if(mype==0) write(*,*)' --> Init. blowpup file ' write(cyear,'(i4)') year @@ -168,37 +172,38 @@ end subroutine ini_blowup_io ! ! !_______________________________________________________________________________ - subroutine blowup(istep, tracers, mesh) + subroutine blowup(istep, tracers, partit, mesh) implicit none - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers - integer :: istep + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers + integer :: istep ctime=timeold+(dayold-1.)*86400 - call ini_blowup_io(yearnew, tracers, mesh) - if(mype==0) write(*,*)'Do output (netCDF, blowup) ...' - if(mype==0) write(*,*)' --> call assoc_ids(bid)' - call assoc_ids(bid) ; call was_error(bid) - if(mype==0) write(*,*)' --> call write_blowup(bid, istep)' - call write_blowup(bid, istep, mesh) ; call was_error(bid) + call ini_blowup_io(yearnew, tracers, partit, mesh) + if(partit%mype==0) write(*,*)'Do output (netCDF, blowup) ...' + if(partit%mype==0) write(*,*)' --> call assoc_ids(bid)' + call assoc_ids(bid, partit) ; call was_error(bid, partit) + if(partit%mype==0) write(*,*)' --> call write_blowup(bid, istep)' + call write_blowup(bid, istep, partit, mesh) ; call was_error(bid, partit) end subroutine blowup ! ! !_______________________________________________________________________________ - subroutine create_new_file(id) + subroutine create_new_file(id, partit) implicit none - + type(t_partit), intent(inout), target :: partit type(nc_file), intent(inout) :: id integer :: c, j integer :: n, k, l, kdim, dimid(4) character(2000) :: att_text ! Serial output implemented so far - if (mype/=0) return + if (partit%mype/=0) return c=1 id%error_status=0 ! create an ocean output file - if(mype==0) write(*,*) 'initializing blowup file ', trim(id%filename) + if(partit%mype==0) write(*,*) 'initializing blowup file ', trim(id%filename) id%error_status(c) = nf_create(id%filename, IOR(NF_NOCLOBBER,IOR(NF_NETCDF4,NF_CLASSIC_MODEL)), id%ncid); c=c+1 do j=1, id%ndim @@ -345,16 +350,20 @@ end subroutine def_variable_2d ! ! !_______________________________________________________________________________ - subroutine write_blowup(id, istep, mesh) + subroutine write_blowup(id, istep, partit, mesh) implicit none type(nc_file), intent(inout) :: id integer, intent(in) :: istep real(kind=WP), allocatable :: aux1(:), aux2(:,:) integer :: i, size1, size2, shape integer :: c - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! Serial output implemented so far if (mype==0) then @@ -372,8 +381,8 @@ subroutine write_blowup(id, istep, mesh) if (shape==1) then size1=id%var(i)%dims(1) if (mype==0) allocate(aux1(size1)) - if (size1==nod2D) call gather_nod (id%var(i)%pt1, aux1) - if (size1==elem2D) call gather_elem(id%var(i)%pt1, aux1) + if (size1==nod2D) call gather_nod (id%var(i)%pt1, aux1, partit) + if (size1==elem2D) call gather_elem(id%var(i)%pt1, aux1, partit) if (mype==0) then id%error_status(c)=nf_put_vara_double(id%ncid, id%var(i)%code, (/1, id%rec_count/), (/size1, 1/), aux1, 1); c=c+1 end if @@ -383,37 +392,37 @@ subroutine write_blowup(id, istep, mesh) size1=id%var(i)%dims(1) size2=id%var(i)%dims(2) if (mype==0) allocate(aux2(size1, size2)) - if (size1==nod2D .or. size2==nod2D) call gather_nod (id%var(i)%pt2, aux2) - if (size1==elem2D .or. size2==elem2D) call gather_elem(id%var(i)%pt2, aux2) + if (size1==nod2D .or. size2==nod2D) call gather_nod (id%var(i)%pt2, aux2, partit) + if (size1==elem2D .or. size2==elem2D) call gather_elem(id%var(i)%pt2, aux2, partit) if (mype==0) then id%error_status(c)=nf_put_vara_double(id%ncid, id%var(i)%code, (/1, 1, id%rec_count/), (/size1, size2, 1/), aux2, 2); c=c+1 end if if (mype==0) deallocate(aux2) else if (mype==0) write(*,*) 'not supported shape of array in restart file' - call par_ex + call par_ex(partit) stop end if end do if (mype==0) id%error_count=c-1 - call was_error(id) + call was_error(id, partit) if (mype==0) id%error_status(1)=nf_close(id%ncid); id%error_count=1 - call was_error(id) + call was_error(id, partit) end subroutine write_blowup ! ! !_______________________________________________________________________________ - subroutine assoc_ids(id) + subroutine assoc_ids(id, partit) implicit none - + type(t_partit), intent(inout) :: partit type(nc_file), intent(inout) :: id character(500) :: longname integer :: c, j, k real(kind=WP) :: rtime !timestamp of the record ! Serial output implemented so far - if (mype/=0) return + if (partit%mype/=0) return c=1 id%error_status=0 ! open existing netcdf file @@ -422,7 +431,7 @@ subroutine assoc_ids(id) id%error_status(c) = nf_open(id%filename, nf_nowrite, id%ncid) !if the file does not exist it will be created! if (id%error_status(c) .ne. nf_noerr) then - call create_new_file(id) ! error status counter will be reset + call create_new_file(id, partit) ! error status counter will be reset c=id%error_count+1 id%error_status(c) = nf_open(id%filename, nf_nowrite, id%ncid); c=c+1 end if @@ -449,9 +458,9 @@ subroutine assoc_ids(id) exit ! a proper rec_count detected, ready for reading restart, exit the loop end if if (k==1) then - if (mype==0) write(*,*) 'WARNING: all dates in restart file are after the current date' - if (mype==0) write(*,*) 'reading restart will not be possible !' - if (mype==0) write(*,*) 'the model attempted to start with the time stamp = ', int(ctime) + if (partit%mype==0) write(*,*) 'WARNING: all dates in restart file are after the current date' + if (partit%mype==0) write(*,*) 'reading restart will not be possible !' + if (partit%mype==0) write(*,*) 'the model attempted to start with the time stamp = ', int(ctime) id%error_status(c)=-310; end if end do @@ -468,20 +477,21 @@ end subroutine assoc_ids ! ! !_______________________________________________________________________________ - subroutine was_error(id) + subroutine was_error(id, partit) implicit none - type(nc_file), intent(inout) :: id - integer :: k, status, ierror - - call MPI_BCast(id%error_count, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call MPI_BCast(id%error_status(1), id%error_count, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + type(nc_file), intent(inout) :: id + type(t_partit), intent(in) :: partit + integer :: k, status, ierror + + call MPI_BCast(id%error_count, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call MPI_BCast(id%error_status(1), id%error_count, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) do k=1, id%error_count status=id%error_status(k) if (status .ne. nf_noerr) then - if (mype==0) write(*,*) 'error counter=', k - if (mype==0) call handle_err(status) - call par_ex + if (partit%mype==0) write(*,*) 'error counter=', k + if (partit%mype==0) call handle_err(status, partit) + call par_ex(partit) stop end if end do diff --git a/src/io_gather.F90 b/src/io_gather.F90 index 5aa8f68fb..e4a95454a 100644 --- a/src/io_gather.F90 +++ b/src/io_gather.F90 @@ -1,4 +1,5 @@ module io_gather_module + use MOD_PARTIT implicit none public init_io_gather, gather_nod2D, gather_real4_nod2D, gather_elem2D, gather_real4_elem2D private @@ -14,26 +15,32 @@ module io_gather_module contains - subroutine init_io_gather() + subroutine init_io_gather(partit) + use MOD_PARTIT + implicit none + type(t_partit), intent(inout), target :: partit integer err - if(.not. nod2D_lists_initialized) call init_nod2D_lists() - if(.not. elem2D_lists_initialized) call init_elem2D_lists() + if(.not. nod2D_lists_initialized) call init_nod2D_lists (partit) + if(.not. elem2D_lists_initialized) call init_elem2D_lists(partit) end subroutine - subroutine init_nod2D_lists() - use g_PARSUP + subroutine init_nod2D_lists(partit) implicit none + type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_part_ass.h" ! EO args - ! todo: initialize with the other comm arrays, probably in "init_gatherLists" subroutine if(mype /= 0) then - if(.not. allocated(remPtr_nod2D)) allocate(remPtr_nod2D(npes)) + if(.not. allocated(partit%remPtr_nod2D)) allocate(partit%remPtr_nod2D(npes)) +#include "associate_part_ass.h" end if call MPI_Bcast(remPtr_nod2D, size(remPtr_nod2D), MPI_INTEGER, 0, MPI_COMM_FESOM, MPIerr) if(mype /= 0) then - if(.not. allocated(remList_nod2D)) allocate(remList_nod2D(remPtr_nod2D(npes))) + if(.not. allocated(partit%remList_nod2D)) allocate(partit%remList_nod2D(remPtr_nod2D(npes))) +#include "associate_part_ass.h" end if call MPI_Bcast(remList_nod2D, size(remList_nod2D), MPI_INTEGER, 0, MPI_COMM_FESOM, MPIerr) @@ -52,18 +59,23 @@ subroutine init_nod2D_lists() end subroutine - subroutine init_elem2D_lists() - use g_PARSUP + subroutine init_elem2D_lists(partit) + use MOD_PARTIT implicit none + type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_part_ass.h" ! EO args ! todo: initialize with the other comm arrays, probably in "init_gatherLists" subroutine if(mype /= 0) then - if(.not. allocated(remPtr_elem2D)) allocate(remPtr_elem2D(npes)) + if(.not. allocated(partit%remPtr_elem2D)) allocate(partit%remPtr_elem2D(npes)) +#include "associate_part_ass.h" end if call MPI_Bcast(remPtr_elem2D, size(remPtr_elem2D), MPI_INTEGER, 0, MPI_COMM_FESOM, MPIerr) if(mype /= 0) then - if(.not. allocated(remList_elem2D)) allocate(remList_elem2D(remPtr_elem2D(npes))) + if(.not. allocated(partit%remList_elem2D)) allocate(partit%remList_elem2D(remPtr_elem2D(npes))) +#include "associate_part_ass.h" end if call MPI_Bcast(remList_elem2D, size(remList_elem2D), MPI_INTEGER, 0, MPI_COMM_FESOM, MPIerr) @@ -83,10 +95,11 @@ subroutine init_elem2D_lists() ! thread-safe procedure - subroutine gather_nod2D(arr2D, arr2D_global, root_rank, tag, io_comm) - use g_PARSUP + subroutine gather_nod2D(arr2D, arr2D_global, root_rank, tag, io_comm, partit) + use MOD_PARTIT use, intrinsic :: iso_fortran_env, only: real64 implicit none + type(t_partit), intent(inout), target :: partit real(real64), intent(in) :: arr2D(:) real(real64), intent(out) :: arr2D_global(:) integer, intent(in) :: root_rank ! rank of receiving process @@ -97,10 +110,11 @@ subroutine gather_nod2D(arr2D, arr2D_global, root_rank, tag, io_comm) integer :: remote_node_count = -1 real(real64), allocatable :: sendbuf(:) real(real64), allocatable :: recvbuf(:) ! todo: alloc only for root_rank - integer :: req(npes-1) + integer :: req(partit%npes-1) integer :: request_index integer :: mpi_precision = MPI_DOUBLE_PRECISION - +#include "associate_part_def.h" +#include "associate_part_ass.h" if(.not. nod2D_lists_initialized) stop "io_gather_module has not been initialized" include "io_gather_nod.inc" @@ -108,12 +122,13 @@ subroutine gather_nod2D(arr2D, arr2D_global, root_rank, tag, io_comm) ! thread-safe procedure - subroutine gather_real4_nod2D(arr2D, arr2D_global, root_rank, tag, io_comm) - use g_PARSUP + subroutine gather_real4_nod2D(arr2D, arr2D_global, root_rank, tag, io_comm, partit) + use MOD_PARTIT use, intrinsic :: iso_fortran_env, only: real32 implicit none - real(real32), intent(in) :: arr2D(:) - real(real32), intent(out) :: arr2D_global(:) + type(t_partit), intent(inout), target :: partit + real(real32), intent(in) :: arr2D(:) + real(real32), intent(out) :: arr2D_global(:) integer, intent(in) :: root_rank ! rank of receiving process integer, intent(in) :: tag integer io_comm @@ -122,10 +137,11 @@ subroutine gather_real4_nod2D(arr2D, arr2D_global, root_rank, tag, io_comm) integer :: remote_node_count = -1 real(real32), allocatable :: sendbuf(:) real(real32), allocatable :: recvbuf(:) ! todo: alloc only for root_rank - integer :: req(npes-1) + integer :: req(partit%npes-1) integer :: request_index integer :: mpi_precision = MPI_REAL - +#include "associate_part_def.h" +#include "associate_part_ass.h" if(.not. nod2D_lists_initialized) stop "io_gather_module has not been initialized" include "io_gather_nod.inc" @@ -133,12 +149,13 @@ subroutine gather_real4_nod2D(arr2D, arr2D_global, root_rank, tag, io_comm) ! thread-safe procedure - subroutine gather_elem2D(arr2D, arr2D_global, root_rank, tag, io_comm) - use g_PARSUP + subroutine gather_elem2D(arr2D, arr2D_global, root_rank, tag, io_comm, partit) + use MOD_PARTIT use, intrinsic :: iso_fortran_env, only: real64 implicit none - real(real64), intent(in) :: arr2D(:) - real(real64), intent(out) :: arr2D_global(:) + type(t_partit), intent(inout), target :: partit + real(real64), intent(in) :: arr2D(:) + real(real64), intent(out) :: arr2D_global(:) integer, intent(in) :: root_rank ! rank of receiving process integer, intent(in) :: tag integer io_comm @@ -147,10 +164,11 @@ subroutine gather_elem2D(arr2D, arr2D_global, root_rank, tag, io_comm) integer :: remote_elem_count = -1 real(real64), allocatable :: sendbuf(:) real(real64), allocatable :: recvbuf(:) - integer :: req(npes-1) + integer :: req(partit%npes-1) integer :: request_index integer :: mpi_precision = MPI_DOUBLE_PRECISION - +#include "associate_part_def.h" +#include "associate_part_ass.h" if(.not. elem2D_lists_initialized) stop "io_gather_module has not been initialized" include "io_gather_elem.inc" @@ -158,12 +176,13 @@ subroutine gather_elem2D(arr2D, arr2D_global, root_rank, tag, io_comm) ! thread-safe procedure - subroutine gather_real4_elem2D(arr2D, arr2D_global, root_rank, tag, io_comm) - use g_PARSUP + subroutine gather_real4_elem2D(arr2D, arr2D_global, root_rank, tag, io_comm, partit) + use MOD_PARTIT use, intrinsic :: iso_fortran_env, only: real32 implicit none - real(real32), intent(in) :: arr2D(:) - real(real32), intent(out) :: arr2D_global(:) + type(t_partit), intent(inout), target :: partit + real(real32), intent(in) :: arr2D(:) + real(real32), intent(out) :: arr2D_global(:) integer, intent(in) :: root_rank ! rank of receiving process integer, intent(in) :: tag integer io_comm @@ -172,10 +191,11 @@ subroutine gather_real4_elem2D(arr2D, arr2D_global, root_rank, tag, io_comm) integer :: remote_elem_count = -1 real(real32), allocatable :: sendbuf(:) real(real32), allocatable :: recvbuf(:) - integer :: req(npes-1) + integer :: req(partit%npes-1) integer :: request_index integer :: mpi_precision = MPI_REAL - +#include "associate_part_def.h" +#include "associate_part_ass.h" if(.not. elem2D_lists_initialized) stop "io_gather_module has not been initialized" include "io_gather_elem.inc" diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index bf85bf83e..98ac5b228 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -1,5 +1,5 @@ module io_MEANDATA - + use MOD_PARTIT use o_PARAM, only : WP use, intrinsic :: iso_fortran_env, only: real64, real32 use io_data_strategy_module @@ -16,6 +16,7 @@ module io_MEANDATA type Meandata private + type(t_partit), pointer :: mypartit integer :: ndim integer :: glsize(2) integer :: accuracy @@ -91,14 +92,14 @@ subroutine destructor(this) end subroutine -subroutine ini_mean_io(tracers, mesh) +subroutine ini_mean_io(tracers, partit, mesh) use MOD_MESH use MOD_TRACER + use MOD_PARTIT use g_cvmix_tke use g_cvmix_idemix use g_cvmix_kpp use g_cvmix_tidal - use g_PARSUP use diagnostics use i_PARAM, only: whichEVP implicit none @@ -108,12 +109,16 @@ subroutine ini_mean_io(tracers, mesh) integer,dimension(15) :: sel_forcvar=0 character(len=10) :: id_string - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers namelist /nml_listsize/ io_listsize namelist /nml_list / io_list -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! OPEN and read namelist for I/O open( unit=nm_io_unit, file='namelist.io', form='formatted', access='sequential', status='old', iostat=iost ) @@ -142,254 +147,254 @@ subroutine ini_mean_io(tracers, mesh) SELECT CASE (trim(io_list(i)%id)) !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!2D streams!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! CASE ('sst ') - call def_stream(nod2D, myDim_nod2D, 'sst', 'sea surface temperature', 'C', tracers%data(1)%values(1,1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'sst', 'sea surface temperature', 'C', tracers%data(1)%values(1,1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('sss ') - call def_stream(nod2D, myDim_nod2D, 'sss', 'sea surface salinity', 'psu', tracers%data(2)%values(1,1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'sss', 'sea surface salinity', 'psu', tracers%data(2)%values(1,1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('ssh ') - call def_stream(nod2D, myDim_nod2D, 'ssh', 'sea surface elevation', 'm', eta_n, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'ssh', 'sea surface elevation', 'm', eta_n, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('vve_5 ') - call def_stream(nod2D, myDim_nod2D, 'vve_5', 'vertical velocity at 5th level', 'm/s', Wvel(5,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'vve_5', 'vertical velocity at 5th level', 'm/s', Wvel(5,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('ssh_rhs ') - call def_stream(nod2D, myDim_nod2D, 'ssh_rhs', 'ssh rhs', '?', ssh_rhs, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'ssh_rhs', 'ssh rhs', '?', ssh_rhs, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('ssh_rhs_old ') - call def_stream(nod2D, myDim_nod2D, 'ssh_rhs_old', 'ssh rhs', '?', ssh_rhs_old, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'ssh_rhs_old', 'ssh rhs', '?', ssh_rhs_old, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) !___________________________________________________________________________________________________________________________________ ! output sea ice CASE ('uice ') if (use_ice) then - call def_stream(nod2D, myDim_nod2D, 'uice', 'ice velocity x', 'm/s', u_ice, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'uice', 'ice velocity x', 'm/s', u_ice, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('vice ') if (use_ice) then - call def_stream(nod2D, myDim_nod2D, 'vice', 'ice velocity y', 'm/s', v_ice, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'vice', 'ice velocity y', 'm/s', v_ice, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('a_ice ') if (use_ice) then - call def_stream(nod2D, myDim_nod2D, 'a_ice', 'ice concentration', '%', a_ice(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'a_ice', 'ice concentration', '%', a_ice(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('m_ice ') if (use_ice) then - call def_stream(nod2D, myDim_nod2D, 'm_ice', 'ice height', 'm', m_ice(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'm_ice', 'ice height', 'm', m_ice(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('thdgr ') if (use_ice) then - call def_stream(nod2D, myDim_nod2D, 'thdgr', 'thermodynamic growth rate ice', 'm/s', thdgr(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'thdgr', 'thermodynamic growth rate ice', 'm/s', thdgr(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('thdgrsn ') if (use_ice) then - call def_stream(nod2D, myDim_nod2D, 'thdgrsn', 'thermodynamic growth rate snow', 'm/s', thdgrsn(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'thdgrsn', 'thermodynamic growth rate snow', 'm/s', thdgrsn(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('flice ') if (use_ice) then - call def_stream(nod2D, myDim_nod2D, 'flice', 'flooding growth rate ice', 'm/s', flice(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'flice', 'flooding growth rate ice', 'm/s', flice(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('m_snow ') if (use_ice) then - call def_stream(nod2D, myDim_nod2D, 'm_snow', 'snow height', 'm', m_snow(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'm_snow', 'snow height', 'm', m_snow(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if !___________________________________________________________________________________________________________________________________ ! output mixed layer depth CASE ('MLD1 ') - call def_stream(nod2D, myDim_nod2D, 'MLD1', 'Mixed Layer Depth', 'm', MLD1(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'MLD1', 'Mixed Layer Depth', 'm', MLD1(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('MLD2 ') - call def_stream(nod2D, myDim_nod2D, 'MLD2', 'Mixed Layer Depth', 'm', MLD2(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'MLD2', 'Mixed Layer Depth', 'm', MLD2(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) !___________________________________________________________________________________________________________________________________ ! output surface forcing CASE ('fh ') - call def_stream(nod2D, myDim_nod2D, 'fh', 'heat flux', 'W', heat_flux_in(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'fh', 'heat flux', 'W', heat_flux_in(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('fw ') - call def_stream(nod2D, myDim_nod2D, 'fw', 'fresh water flux', 'm/s', water_flux(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'fw', 'fresh water flux', 'm/s', water_flux(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('atmice_x ') - call def_stream(nod2D, myDim_nod2D, 'atmice_x', 'stress atmice x', 'N/m2', stress_atmice_x(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'atmice_x', 'stress atmice x', 'N/m2', stress_atmice_x(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('atmice_y ') - call def_stream(nod2D, myDim_nod2D, 'atmice_y', 'stress atmice y', 'N/m2', stress_atmice_y(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'atmice_y', 'stress atmice y', 'N/m2', stress_atmice_y(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('atmoce_x ') - call def_stream(nod2D, myDim_nod2D, 'atmoce_x', 'stress atmoce x', 'N/m2', stress_atmoce_x(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'atmoce_x', 'stress atmoce x', 'N/m2', stress_atmoce_x(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('atmoce_y ') - call def_stream(nod2D, myDim_nod2D, 'atmoce_y', 'stress atmoce y', 'N/m2', stress_atmoce_y(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'atmoce_y', 'stress atmoce y', 'N/m2', stress_atmoce_y(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('iceoce_x ') - call def_stream(nod2D, myDim_nod2D, 'iceoce_x', 'stress iceoce x', 'N/m2', stress_iceoce_x(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'iceoce_x', 'stress iceoce x', 'N/m2', stress_iceoce_x(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('iceoce_y ') - call def_stream(nod2D, myDim_nod2D, 'iceoce_y', 'stress iceoce y', 'N/m2', stress_iceoce_y(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'iceoce_y', 'stress iceoce y', 'N/m2', stress_iceoce_y(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('alpha ') - call def_stream(nod2D, myDim_nod2D, 'alpha', 'thermal expansion', 'none', sw_alpha(1,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'alpha', 'thermal expansion', 'none', sw_alpha(1,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('beta ') - call def_stream(nod2D, myDim_nod2D, 'beta', 'saline contraction', 'none', sw_beta (1,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'beta', 'saline contraction', 'none', sw_beta (1,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('dens_flux ') - call def_stream(nod2D, myDim_nod2D , 'dflux', 'density flux', 'kg/(m3*s)', dens_flux(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D , 'dflux', 'density flux', 'kg/(m3*s)', dens_flux(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('runoff ') sel_forcvar(10)= 1 - call def_stream(nod2D, myDim_nod2D, 'runoff', 'river runoff', 'none', runoff(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'runoff', 'river runoff', 'none', runoff(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('evap ') sel_forcvar(7) = 1 - call def_stream(nod2D, myDim_nod2D, 'evap', 'evaporation', 'm/s', evaporation(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'evap', 'evaporation', 'm/s', evaporation(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('prec ') sel_forcvar(5) = 1 - call def_stream(nod2D, myDim_nod2D, 'prec', 'precicipation rain', 'm/s', prec_rain(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'prec', 'precicipation rain', 'm/s', prec_rain(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('snow ') sel_forcvar(6) = 1 - call def_stream(nod2D, myDim_nod2D, 'snow', 'precicipation snow', 'm/s', prec_snow(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'snow', 'precicipation snow', 'm/s', prec_snow(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('tair ') sel_forcvar(3) = 1 - call def_stream(nod2D, myDim_nod2D, 'tair', 'surface air temperature', '°C', Tair(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'tair', 'surface air temperature', '°C', Tair(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('shum ') sel_forcvar(4) = 1 - call def_stream(nod2D, myDim_nod2D, 'shum', 'specific humidity', '', shum(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'shum', 'specific humidity', '', shum(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('swr ') sel_forcvar(8) = 1 - call def_stream(nod2D, myDim_nod2D, 'swr', 'short wave radiation', 'W/m^2', shortwave(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'swr', 'short wave radiation', 'W/m^2', shortwave(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('lwr ') sel_forcvar(9) = 1 - call def_stream(nod2D, myDim_nod2D, 'lwr', 'long wave radiation', 'W/m^2', longwave(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'lwr', 'long wave radiation', 'W/m^2', longwave(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('uwind ') sel_forcvar(1) = 1 - call def_stream(nod2D, myDim_nod2D, 'uwind', '10m zonal surface wind velocity', 'm/s', u_wind(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'uwind', '10m zonal surface wind velocity', 'm/s', u_wind(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('vwind ') sel_forcvar(2) = 1 - call def_stream(nod2D, myDim_nod2D, 'vwind', '10m merid. surface wind velocity','m/s', v_wind(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'vwind', '10m merid. surface wind velocity','m/s', v_wind(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) !___________________________________________________________________________________________________________________________________ ! output KPP vertical mixing schemes CASE ('kpp_obldepth ') if (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) then! fesom KPP - call def_stream(nod2D, myDim_nod2D, 'kpp_obldepth', 'KPP ocean boundary layer depth', 'm', hbl(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'kpp_obldepth', 'KPP ocean boundary layer depth', 'm', hbl(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) elseif (mix_scheme_nmb==3 .or. mix_scheme_nmb==37) then ! cvmix KPP - call def_stream(nod2D, myDim_nod2D, 'kpp_obldepth', 'KPP ocean boundary layer depth', 'm', kpp_obldepth(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'kpp_obldepth', 'KPP ocean boundary layer depth', 'm', kpp_obldepth(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('kpp_sbuoyflx') if (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) then ! fesom KPP - call def_stream(nod2D, myDim_nod2D, 'kpp_sbuoyflx', 'surface buoyancy flux', 'm2/s3', Bo(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'kpp_sbuoyflx', 'surface buoyancy flux', 'm2/s3', Bo(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) elseif (mix_scheme_nmb==3 .or. mix_scheme_nmb==37) then ! cvmix KPP - call def_stream(nod2D, myDim_nod2D, 'kpp_sbuoyflx', 'surface buoyancy flux', 'm2/s3', kpp_sbuoyflx(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'kpp_sbuoyflx', 'surface buoyancy flux', 'm2/s3', kpp_sbuoyflx(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('tx_sur ') sel_forcvar(11) = 1 - call def_stream(elem2D, myDim_elem2D, 'tx_sur', 'zonal wind str. to ocean', 'm/s2', stress_surf(1, :), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(elem2D, myDim_elem2D, 'tx_sur', 'zonal wind str. to ocean', 'm/s2', stress_surf(1, :), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('ty_sur ') sel_forcvar(12) = 1 - call def_stream(elem2D, myDim_elem2D, 'ty_sur', 'meridional wind str. to ocean', 'm/s2', stress_surf(2, :), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(elem2D, myDim_elem2D, 'ty_sur', 'meridional wind str. to ocean', 'm/s2', stress_surf(2, :), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('curl_surf ') if (lcurt_stress_surf) then - call def_stream(nod2D, myDim_nod2D, 'curl_surf', 'vorticity of the surface stress','none', curl_stress_surf(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'curl_surf', 'vorticity of the surface stress','none', curl_stress_surf(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if !___________________________________________________________________________________________________________________________________ ! output Ferrari/GM parameterisation 2D CASE ('fer_C ') if (Fer_GM) then - call def_stream(nod2D, myDim_nod2D, 'fer_C', 'GM, depth independent speed', 'm/s' , fer_c(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'fer_C', 'GM, depth independent speed', 'm/s' , fer_c(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if !___________________________________________________________________________________________________________________________________ !>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> 3D streams <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< !___________________________________________________________________________________________________________________________________ CASE ('temp ') - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'temp', 'temperature', 'C', tracers%data(1)%values(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'temp', 'temperature', 'C', tracers%data(1)%values(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('salt ') - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'salt', 'salinity', 'psu', tracers%data(2)%values(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'salt', 'salinity', 'psu', tracers%data(2)%values(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('otracers ') do j=3, tracers%num_tracers write (id_string, "(I3.3)") tracers%data(j)%ID - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'tra_'//id_string, 'pasive tracer ID='//id_string, 'n/a', tracers%data(j)%values(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'tra_'//id_string, 'pasive tracer ID='//id_string, 'n/a', tracers%data(j)%values(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end do CASE ('slope_x ') - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'slope_x', 'neutral slope X', 'none', slope_tapered(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'slope_x', 'neutral slope X', 'none', slope_tapered(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('slope_y ') - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'slope_y', 'neutral slope Y', 'none', slope_tapered(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'slope_y', 'neutral slope Y', 'none', slope_tapered(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('slope_z ') - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'slope_z', 'neutral slope Z', 'none', slope_tapered(3,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'slope_z', 'neutral slope Z', 'none', slope_tapered(3,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('N2 ') - call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'N2', 'brunt väisälä', '1/s2', bvfreq(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'N2', 'brunt väisälä', '1/s2', bvfreq(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('Kv ') - call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'Kv', 'vertical diffusivity Kv', 'm2/s', Kv(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'Kv', 'vertical diffusivity Kv', 'm2/s', Kv(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('u ') - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u', 'horizontal velocity','m/s', uv(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u', 'horizontal velocity','m/s', uv(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('v ') - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v', 'meridional velocity','m/s', uv(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v', 'meridional velocity','m/s', uv(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('w ') - call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'w', 'vertical velocity', 'm/s', Wvel(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'w', 'vertical velocity', 'm/s', Wvel(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('Av ') - call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'Av', 'vertical viscosity Av', 'm2/s', Av(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'Av', 'vertical viscosity Av', 'm2/s', Av(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('u_dis_tend') if(visc_option==8) then - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u_dis_tend', 'horizontal velocity viscosity tendency', 'm/s', UV_dis_tend(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u_dis_tend', 'horizontal velocity viscosity tendency', 'm/s', UV_dis_tend(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('v_dis_tend') if(visc_option==8) then - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v_dis_tend', 'meridional velocity viscosity tendency', 'm/s', UV_dis_tend(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v_dis_tend', 'meridional velocity viscosity tendency', 'm/s', UV_dis_tend(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('u_back_tend') if(visc_option==8) then - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u_back_tend', 'horizontal velocity backscatter tendency', 'm2/s2', UV_back_tend(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u_back_tend', 'horizontal velocity backscatter tendency', 'm2/s2', UV_back_tend(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('v_back_tend') if(visc_option==8) then - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v_back_tend', 'meridional velocity backscatter tendency', 'm2/s2', UV_back_tend(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v_back_tend', 'meridional velocity backscatter tendency', 'm2/s2', UV_back_tend(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('u_total_tend') if(visc_option==8) then - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u_total_tend', 'horizontal velocity total viscosity tendency', 'm/s', UV_total_tend(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u_total_tend', 'horizontal velocity total viscosity tendency', 'm/s', UV_total_tend(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('v_total_tend') if(visc_option==8) then - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v_total_tend', 'meridional velocity total viscosity tendency', 'm/s', UV_total_tend(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v_total_tend', 'meridional velocity total viscosity tendency', 'm/s', UV_total_tend(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if !___________________________________________________________________________________________________________________________________ ! output Ferrari/GM parameterisation CASE ('bolus_u ') if (Fer_GM) then - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'bolus_u', 'GM bolus velocity U','m/s', fer_uv(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'bolus_u', 'GM bolus velocity U','m/s', fer_uv(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('bolus_v ') if (Fer_GM) then - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'bolus_v', 'GM bolus velocity V','m/s', fer_uv(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'bolus_v', 'GM bolus velocity V','m/s', fer_uv(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('bolus_w ') if (Fer_GM) then - call def_stream((/nl , nod2D /), (/nl, myDim_nod2D /), 'bolus_w', 'GM bolus velocity W','m/s', fer_Wvel(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl , nod2D /), (/nl, myDim_nod2D /), 'bolus_w', 'GM bolus velocity W','m/s', fer_Wvel(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('fer_K ') if (Fer_GM) then - call def_stream((/nl , nod2D /), (/nl, myDim_nod2D /), 'fer_K', 'GM, stirring diff.','m2/s', fer_k(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl , nod2D /), (/nl, myDim_nod2D /), 'fer_K', 'GM, stirring diff.','m2/s', fer_k(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('fer_scal ') if (Fer_GM) then - call def_stream( nod2D , myDim_nod2D , 'fer_scal', 'GM surface scaling','', fer_scal(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream( nod2D , myDim_nod2D , 'fer_scal', 'GM surface scaling','', fer_scal(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('dMOC ') if (ldiag_dMOC) then - call def_stream((/std_dens_N, elem2D/), (/std_dens_N, myDim_elem2D/), 'U_rho_x_DZ', 'fluxes for density MOC', 'fluxes', std_dens_UVDZ(1,:,:), 1, 'y', i_real4, mesh) - call def_stream((/std_dens_N, elem2D/), (/std_dens_N, myDim_elem2D/), 'V_rho_x_DZ', 'fluxes for density MOC', 'fluxes', std_dens_UVDZ(2,:,:), 1, 'y', i_real4, mesh) - call def_stream((/std_dens_N, elem2D/), (/std_dens_N, myDim_elem2D/), 'std_heat_flux', 'HF bouyancy flux ', 'kg*m/s' ,std_dens_flux(1,:,:), 1, 'y', i_real4, mesh) - call def_stream((/std_dens_N, elem2D/), (/std_dens_N, myDim_elem2D/), 'std_rest_flux', 'RESTOR. bouyancy flux ', 'kg*m/s' ,std_dens_flux(2,:,:), 1, 'y', i_real4, mesh) - call def_stream((/std_dens_N, elem2D/), (/std_dens_N, myDim_elem2D/), 'std_frwt_flux', 'FW bouyancy flux ', 'kg*m/s' ,std_dens_flux(3,:,:), 1, 'y', i_real4, mesh) - call def_stream((/std_dens_N, elem2D/), (/std_dens_N, myDim_elem2D/), 'std_dens_dVdT', 'dV/dT', 'm3/s' ,std_dens_dVdT(:,:), 1, 'y', i_real4, mesh) - call def_stream((/std_dens_N, nod2D /), (/std_dens_N, myDim_nod2D/), 'std_dens_DIV', 'm3/s', 'm3/s' ,std_dens_DIV(:,:), 1, 'y', i_real4, mesh) - call def_stream((/std_dens_N, elem2D/), (/std_dens_N, myDim_elem2D/), 'std_dens_Z', 'm', 'm' ,std_dens_Z(:,:), 1, 'y', i_real4, mesh) - call def_stream((/nl-1, nod2D /), (/nl-1, myDim_nod2D /), 'density_dMOC', 'density' , 'm', density_dmoc(:,:), 1, 'y', i_real4, mesh) - call def_stream(elem2D, myDim_elem2D , 'density_flux_e', 'density flux at elems ', 'm', dens_flux_e(:), 1, 'y', i_real4, mesh) + call def_stream((/std_dens_N, elem2D/), (/std_dens_N, myDim_elem2D/), 'U_rho_x_DZ', 'fluxes for density MOC', 'fluxes', std_dens_UVDZ(1,:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/std_dens_N, elem2D/), (/std_dens_N, myDim_elem2D/), 'V_rho_x_DZ', 'fluxes for density MOC', 'fluxes', std_dens_UVDZ(2,:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/std_dens_N, elem2D/), (/std_dens_N, myDim_elem2D/), 'std_heat_flux', 'HF bouyancy flux ', 'kg*m/s' ,std_dens_flux(1,:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/std_dens_N, elem2D/), (/std_dens_N, myDim_elem2D/), 'std_rest_flux', 'RESTOR. bouyancy flux ', 'kg*m/s' ,std_dens_flux(2,:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/std_dens_N, elem2D/), (/std_dens_N, myDim_elem2D/), 'std_frwt_flux', 'FW bouyancy flux ', 'kg*m/s' ,std_dens_flux(3,:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/std_dens_N, elem2D/), (/std_dens_N, myDim_elem2D/), 'std_dens_dVdT', 'dV/dT', 'm3/s' ,std_dens_dVdT(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/std_dens_N, nod2D /), (/std_dens_N, myDim_nod2D/), 'std_dens_DIV', 'm3/s', 'm3/s' ,std_dens_DIV(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/std_dens_N, elem2D/), (/std_dens_N, myDim_elem2D/), 'std_dens_Z', 'm', 'm' ,std_dens_Z(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/nl-1, nod2D /), (/nl-1, myDim_nod2D /), 'density_dMOC', 'density' , 'm', density_dmoc(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream(elem2D, myDim_elem2D , 'density_flux_e', 'density flux at elems ', 'm', dens_flux_e(:), 1, 'y', i_real4, partit, mesh) end if !___________________________________________________________________________________________________________________________________ CASE ('pgf_x ') - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'pgf_x', 'zonal pressure gradient force' , 'm/s^2', pgf_x(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'pgf_x', 'zonal pressure gradient force' , 'm/s^2', pgf_x(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('pgf_y ') - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'pgf_y', 'meridional pressure gradient force', 'm/s^2', pgf_y(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'pgf_y', 'meridional pressure gradient force', 'm/s^2', pgf_y(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) !___________________________________________________________________________________________________________________________________ #if defined (__oifs) CASE ('alb ') - call def_stream(nod2D, myDim_nod2D, 'alb', 'ice albedo', 'none', ice_alb(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'alb', 'ice albedo', 'none', ice_alb(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('ist ') - call def_stream(nod2D, myDim_nod2D, 'ist', 'ice surface temperature', 'K', ice_temp(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'ist', 'ice surface temperature', 'K', ice_temp(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('qsi ') - call def_stream(nod2D, myDim_nod2D, 'qsi', 'ice heat flux', 'W/m^2', ice_heat_flux(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'qsi', 'ice heat flux', 'W/m^2', ice_heat_flux(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('qso ') - call def_stream(nod2D, myDim_nod2D, 'qso', 'oce heat flux', 'W/m^2', oce_heat_flux(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'qso', 'oce heat flux', 'W/m^2', oce_heat_flux(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) #endif !___________________________________________________________________________________________________________________________________ @@ -402,95 +407,95 @@ subroutine ini_mean_io(tracers, mesh) !3D if (ldiag_energy) then - call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'rhof', 'in-situ density at faces', 'kg/m3', rhof(:,:), 1, 'm', i_real8, mesh) - call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'wrhof', 'vertical velocity x density', 'kg/(s*m2)', wrhof(:,:), 1, 'm', i_real8, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'uu', 'u times u', 'm2/s2', u_x_u(:,:), 1, 'm', i_real8, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'uv', 'u times v', 'm2/s2', u_x_v(:,:), 1, 'm', i_real8, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'vv', 'v times v', 'm2/s2', v_x_v(:,:), 1, 'm', i_real8, mesh) - call def_stream((/nl, elem2D/), (/nl-1, myDim_elem2D/),'uw', 'u times w', 'm2/s2', u_x_w(:,:), 1, 'm', i_real8, mesh) - call def_stream((/nl, elem2D/), (/nl-1, myDim_elem2D/),'vw', 'v times w', 'm2/s2', v_x_w(:,:), 1, 'm', i_real8, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dudx', 'du/dx', '1/s', dudx(:,:), 1, 'm', i_real8, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dudy', 'du/dy', '1/s', dudy(:,:), 1, 'm', i_real8, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvdx', 'dv/dx', '1/s', dvdx(:,:), 1, 'm', i_real8, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvdy', 'dv/dy', '1/s', dvdy(:,:), 1, 'm', i_real8, mesh) - call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'dudz', 'du/dz', '1/s', dudz(:,:), 1, 'm', i_real8, mesh) - call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'dvdz', 'dv/dz', '1/s', dvdz(:,:), 1, 'm', i_real8, mesh) - call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'av_dudz', 'int(Av * du/dz)', 'm3/s2', av_dudz(:,:), 1, 'm', i_real4, mesh) - call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'av_dvdz', 'int(Av * dv/dz)', 'm3/s2', av_dvdz(:,:), 1, 'm', i_real4, mesh) - call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'av_dudz_sq', 'Av * (du/dz)^2', 'm^2/s^3', av_dudz_sq(:,:), 1, 'm', i_real4, mesh) - call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'Av', 'Vertical mixing A', 'm2/s', Av(:,:), 1, 'm', i_real4, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'unod', 'horizontal velocity at nodes', 'm/s', Unode(1,:,:), 1, 'm', i_real8, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'vnod', 'meridional velocity at nodes', 'm/s', Unode(2,:,:), 1, 'm', i_real8, mesh) + call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'rhof', 'in-situ density at faces', 'kg/m3', rhof(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'wrhof', 'vertical velocity x density', 'kg/(s*m2)', wrhof(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'uu', 'u times u', 'm2/s2', u_x_u(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'uv', 'u times v', 'm2/s2', u_x_v(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'vv', 'v times v', 'm2/s2', v_x_v(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl, elem2D/), (/nl-1, myDim_elem2D/),'uw', 'u times w', 'm2/s2', u_x_w(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl, elem2D/), (/nl-1, myDim_elem2D/),'vw', 'v times w', 'm2/s2', v_x_w(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dudx', 'du/dx', '1/s', dudx(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dudy', 'du/dy', '1/s', dudy(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvdx', 'dv/dx', '1/s', dvdx(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvdy', 'dv/dy', '1/s', dvdy(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'dudz', 'du/dz', '1/s', dudz(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'dvdz', 'dv/dz', '1/s', dvdz(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'av_dudz', 'int(Av * du/dz)', 'm3/s2', av_dudz(:,:), 1, 'm', i_real4, partit, mesh) + call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'av_dvdz', 'int(Av * dv/dz)', 'm3/s2', av_dvdz(:,:), 1, 'm', i_real4, partit, mesh) + call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'av_dudz_sq', 'Av * (du/dz)^2', 'm^2/s^3', av_dudz_sq(:,:), 1, 'm', i_real4, partit, mesh) + call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'Av', 'Vertical mixing A', 'm2/s', Av(:,:), 1, 'm', i_real4, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'unod', 'horizontal velocity at nodes', 'm/s', Unode(1,:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'vnod', 'meridional velocity at nodes', 'm/s', Unode(2,:,:), 1, 'm', i_real8, partit, mesh) - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'um', 'horizontal velocity', 'm/s', uv(1,:,:), 1, 'm', i_real4, mesh) - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'vm', 'meridional velocity', 'm/s', uv(2,:,:), 1, 'm', i_real4, mesh) - call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'wm', 'vertical velocity', 'm/s', Wvel(:,:), 1, 'm', i_real8, mesh) - - call def_stream(elem2D, myDim_elem2D, 'utau_surf', '(u, tau) at the surface', 'N/(m s)', utau_surf(1:myDim_elem2D), 1, 'm', i_real4, mesh) - call def_stream(elem2D, myDim_elem2D, 'utau_bott', '(u, tau) at the bottom', 'N/(m s)', utau_bott(1:myDim_elem2D), 1, 'm', i_real4, mesh) - call def_stream(elem2D, myDim_elem2D, 'u_bott', 'bottom velocity', 'm/s', u_bott(1:myDim_elem2D), 1, 'm', i_real4, mesh) - call def_stream(elem2D, myDim_elem2D, 'v_bott', 'bottom velocity', 'm/s', v_bott(1:myDim_elem2D), 1, 'm', i_real4, mesh) - call def_stream(elem2D, myDim_elem2D, 'u_surf', 'surface velocity', 'm/s', u_surf(1:myDim_elem2D), 1, 'm', i_real4, mesh) - call def_stream(elem2D, myDim_elem2D, 'v_surf', 'surface velocity', 'm/s', u_surf(1:myDim_elem2D), 1, 'm', i_real4, mesh) - call def_stream(elem2D, myDim_elem2D, 'tx_bot', 'bottom stress x', 'N/m2', stress_bott(1, 1:myDim_elem2D),1, 'm', i_real4, mesh) - call def_stream(elem2D, myDim_elem2D, 'ty_bot', 'bottom stress y', 'N/m2', stress_bott(2, 1:myDim_elem2D),1, 'm', i_real4, mesh) - if (sel_forcvar(11)==0) call def_stream(elem2D, myDim_elem2D, 'tx_sur', 'zonal wind stress to ocean', 'm/s2', stress_surf(1, 1:myDim_elem2D),1, 'm', i_real4, mesh) ; sel_forcvar(11)=1 - if (sel_forcvar(12)==0) call def_stream(elem2D, myDim_elem2D, 'ty_sur', 'meridional wind stress to ocean','m/s2', stress_surf(2, 1:myDim_elem2D),1, 'm', i_real4, mesh) ; sel_forcvar(12)=1 + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'um', 'horizontal velocity', 'm/s', uv(1,:,:), 1, 'm', i_real4, partit, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'vm', 'meridional velocity', 'm/s', uv(2,:,:), 1, 'm', i_real4, partit, mesh) + call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'wm', 'vertical velocity', 'm/s', Wvel(:,:), 1, 'm', i_real8, partit, mesh) + + call def_stream(elem2D, myDim_elem2D, 'utau_surf', '(u, tau) at the surface', 'N/(m s)', utau_surf(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) + call def_stream(elem2D, myDim_elem2D, 'utau_bott', '(u, tau) at the bottom', 'N/(m s)', utau_bott(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) + call def_stream(elem2D, myDim_elem2D, 'u_bott', 'bottom velocity', 'm/s', u_bott(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) + call def_stream(elem2D, myDim_elem2D, 'v_bott', 'bottom velocity', 'm/s', v_bott(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) + call def_stream(elem2D, myDim_elem2D, 'u_surf', 'surface velocity', 'm/s', u_surf(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) + call def_stream(elem2D, myDim_elem2D, 'v_surf', 'surface velocity', 'm/s', u_surf(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) + call def_stream(elem2D, myDim_elem2D, 'tx_bot', 'bottom stress x', 'N/m2', stress_bott(1, 1:myDim_elem2D),1, 'm', i_real4, partit, mesh) + call def_stream(elem2D, myDim_elem2D, 'ty_bot', 'bottom stress y', 'N/m2', stress_bott(2, 1:myDim_elem2D),1, 'm', i_real4, partit, mesh) + if (sel_forcvar(11)==0) call def_stream(elem2D, myDim_elem2D, 'tx_sur', 'zonal wind stress to ocean', 'm/s2', stress_surf(1, 1:myDim_elem2D),1, 'm', i_real4, partit, mesh) ; sel_forcvar(11)=1 + if (sel_forcvar(12)==0) call def_stream(elem2D, myDim_elem2D, 'ty_sur', 'meridional wind stress to ocean','m/s2', stress_surf(2, 1:myDim_elem2D),1, 'm', i_real4, partit, mesh) ; sel_forcvar(12)=1 end if if (mix_scheme_nmb==5 .or. mix_scheme_nmb==56) then ! TKE diagnostic - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke' , 'turbulent kinetic energy' , 'm^2/s^2', tke(:,:) , 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Ttot', 'total production of turbulent kinetic energy', 'm^2/s^3', tke_Ttot(:,:), 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Tbpr', 'TKE production by buoyancy' , 'm^2/s^3', tke_Tbpr(:,:), 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Tspr', 'TKE production by shear' , 'm^2/s^3', tke_Tspr(:,:), 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Tdif', 'TKE production by vertical diffusion' , 'm^2/s^3', tke_Tdif(:,:), 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Tdis', 'TKE production by dissipation' , 'm^2/s^3', tke_Tdis(:,:), 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Twin', 'TKE production by wind' , 'm^2/s^3', tke_Twin(:,:), 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Tbck', 'background forcing for TKE' , 'm^2/s^3', tke_Tbck(:,:), 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Lmix', 'mixing length scale of TKE' , 'm' , tke_Lmix(:,:), 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Pr' , 'Prantl number' , '' , tke_Pr(:,:) , 1, 'y', i_real4, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke' , 'turbulent kinetic energy' , 'm^2/s^2', tke(:,:) , 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Ttot', 'total production of turbulent kinetic energy', 'm^2/s^3', tke_Ttot(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Tbpr', 'TKE production by buoyancy' , 'm^2/s^3', tke_Tbpr(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Tspr', 'TKE production by shear' , 'm^2/s^3', tke_Tspr(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Tdif', 'TKE production by vertical diffusion' , 'm^2/s^3', tke_Tdif(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Tdis', 'TKE production by dissipation' , 'm^2/s^3', tke_Tdis(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Twin', 'TKE production by wind' , 'm^2/s^3', tke_Twin(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Tbck', 'background forcing for TKE' , 'm^2/s^3', tke_Tbck(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Lmix', 'mixing length scale of TKE' , 'm' , tke_Lmix(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Pr' , 'Prantl number' , '' , tke_Pr(:,:) , 1, 'y', i_real4, partit, mesh) if (mix_scheme_nmb==56) then ! TKE-IDEMIX diagnostic - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Tiwf', 'TKE production by internal waves (IDEMIX)', 'm^2/s^3', tke_Tiwf(:,:), 1, 'y', i_real4, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Tiwf', 'TKE production by internal waves (IDEMIX)', 'm^2/s^3', tke_Tiwf(:,:), 1, 'y', i_real4, partit, mesh) end if end if if (mod(mix_scheme_nmb,10)==6) then ! IDEMIX Internal-Wave-Energy diagnostics - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe' , 'internal wave energy' , 'm^2/s^2', iwe(:,:) , 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe_Ttot', 'total production of internal wave energy', 'm^2/s^2', iwe_Ttot(:,:), 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe_Tdif', 'IWE production by vertical diffusion' , 'm^2/s^3', iwe_Tdif(:,:), 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe_Tdis', 'IWE production by dissipation' , 'm^2/s^3', iwe_Tdis(:,:), 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe_Tsur', 'IWE production from surface forcing' , 'm^2/s^2', iwe_Tsur(:,:), 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe_Tbot', 'IWE production from bottom forcing' , 'm^2/s^2', iwe_Tbot(:,:), 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe_c0' , 'IWE vertical group velocity' , 'm/s' , iwe_c0(:,:) , 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe_v0' , 'IWE horizontal group velocity' , 'm/s' , iwe_c0(:,:) , 1, 'y', i_real4, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe' , 'internal wave energy' , 'm^2/s^2', iwe(:,:) , 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe_Ttot', 'total production of internal wave energy', 'm^2/s^2', iwe_Ttot(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe_Tdif', 'IWE production by vertical diffusion' , 'm^2/s^3', iwe_Tdif(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe_Tdis', 'IWE production by dissipation' , 'm^2/s^3', iwe_Tdis(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe_Tsur', 'IWE production from surface forcing' , 'm^2/s^2', iwe_Tsur(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe_Tbot', 'IWE production from bottom forcing' , 'm^2/s^2', iwe_Tbot(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe_c0' , 'IWE vertical group velocity' , 'm/s' , iwe_c0(:,:) , 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe_v0' , 'IWE horizontal group velocity' , 'm/s' , iwe_c0(:,:) , 1, 'y', i_real4, partit, mesh) end if if (mod(mix_scheme_nmb,10)==7) then ! cvmix_TIDAL diagnostics - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tidal_Kv' , 'tidal diffusivity' , 'm^2/s' , tidal_Kv(:,:) , 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tidal_Av' , 'tidal viscosity' , 'm^2/s' , tidal_Av(:,:) , 1, 'y', i_real4, mesh) - call def_stream( nod2D , myDim_nod2D , 'tidal_forcbot', 'near tidal bottom forcing', 'W/m^2' , tidal_forc_bottom_2D , 100, 'y', i_real4, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tidal_Kv' , 'tidal diffusivity' , 'm^2/s' , tidal_Kv(:,:) , 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tidal_Av' , 'tidal viscosity' , 'm^2/s' , tidal_Av(:,:) , 1, 'y', i_real4, partit, mesh) + call def_stream( nod2D , myDim_nod2D , 'tidal_forcbot', 'near tidal bottom forcing', 'W/m^2' , tidal_forc_bottom_2D , 100, 'y', i_real4, partit, mesh) end if !___________________________________________________________________________________________________________________________________ ! output Redi parameterisation if (Redi) then - call def_stream((/nl-1 , nod2D /), (/nl-1, myDim_nod2D /), 'Redi_K', 'Redi diffusion coefficient', 'm2/s', Ki(:,:), 1, 'y', i_real4, mesh) + call def_stream((/nl-1 , nod2D /), (/nl-1, myDim_nod2D /), 'Redi_K', 'Redi diffusion coefficient', 'm2/s', Ki(:,:), 1, 'y', i_real4, partit, mesh) end if !___________________________________________________________________________________________________________________________________ ! output Monin-Obukov (TB04) mixing length if (use_momix) then - call def_stream(nod2D, myDim_nod2D, 'momix_length', 'Monin-Obukov mixing length', 'm', mixlength(:), 1, 'm', i_real4, mesh) + call def_stream(nod2D, myDim_nod2D, 'momix_length', 'Monin-Obukov mixing length', 'm', mixlength(:), 1, 'm', i_real4, partit, mesh) end if !___________________________________________________________________________________________________________________________________ if (ldiag_curl_vel3) then - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'curl_u', 'relative vorticity', '1/s', vorticity, 1, 'm', i_real4, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'curl_u', 'relative vorticity', '1/s', vorticity, 1, 'm', i_real4, partit, mesh) end if !___________________________________________________________________________________________________________________________________ @@ -498,35 +503,35 @@ subroutine ini_mean_io(tracers, mesh) end if if (whichEVP==2) then - call def_stream(elem2D, myDim_elem2D, 'alpha_EVP', 'alpha in EVP', 'n/a', alpha_evp_array, 1, 'd', i_real4, mesh) - call def_stream(nod2D, myDim_nod2D, 'beta_EVP', 'beta in EVP', 'n/a', beta_evp_array, 1, 'd', i_real4, mesh) + call def_stream(elem2D, myDim_elem2D, 'alpha_EVP', 'alpha in EVP', 'n/a', alpha_evp_array, 1, 'd', i_real4, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'beta_EVP', 'beta in EVP', 'n/a', beta_evp_array, 1, 'd', i_real4, partit, mesh) end if !___________________________________________________________________________ if (ldiag_dvd) then - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvd_temp_h', 'horiz. dvd of temperature', '°C/s' , tracers%work%tr_dvd_horiz(:,:,1), 1, 'm', i_real4, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvd_temp_v', 'vert. dvd of temperature' , '°C/s' , tracers%work%tr_dvd_vert(:,:,1) , 1, 'm', i_real4, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvd_salt_h', 'horiz. dvd of salinity' , 'psu/s', tracers%work%tr_dvd_horiz(:,:,2), 1, 'm', i_real4, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvd_salt_v', 'vert. dvd of salinity' , 'psu/s', tracers%work%tr_dvd_vert(:,:,2) , 1, 'm', i_real4, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvd_temp_h', 'horiz. dvd of temperature', '°C/s' , tracers%work%tr_dvd_horiz(:,:,1), 1, 'm', i_real4, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvd_temp_v', 'vert. dvd of temperature' , '°C/s' , tracers%work%tr_dvd_vert(:,:,1) , 1, 'm', i_real4, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvd_salt_h', 'horiz. dvd of salinity' , 'psu/s', tracers%work%tr_dvd_horiz(:,:,2), 1, 'm', i_real4, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvd_salt_v', 'vert. dvd of salinity' , 'psu/s', tracers%work%tr_dvd_vert(:,:,2) , 1, 'm', i_real4, partit, mesh) end if !___________________________________________________________________________ if (ldiag_forc) then - if (sel_forcvar( 1)==0) call def_stream(nod2D , myDim_nod2D , 'uwind' , '10m zonal surface wind velocity', 'm/s' , u_wind(:) , 1, 'm', i_real4, mesh) - if (sel_forcvar( 2)==0) call def_stream(nod2D , myDim_nod2D , 'vwind' , '10m merid surface wind velocity', 'm/s' , v_wind(:) , 1, 'm', i_real4, mesh) - if (sel_forcvar( 3)==0) call def_stream(nod2D , myDim_nod2D , 'tair' , 'surface air temperature' , '°C' , Tair(:) , 1, 'm', i_real4, mesh) - if (sel_forcvar( 4)==0) call def_stream(nod2D , myDim_nod2D , 'shum' , 'specific humidity' , '' , shum(:) , 1, 'm', i_real4, mesh) - if (sel_forcvar( 5)==0) call def_stream(nod2D , myDim_nod2D , 'prec' , 'precicipation rain' , 'm/s' , prec_rain(:) , 1, 'm', i_real4, mesh) - if (sel_forcvar( 6)==0) call def_stream(nod2D , myDim_nod2D , 'snow' , 'precicipation snow' , 'm/s' , prec_snow(:) , 1, 'm', i_real4, mesh) - if (sel_forcvar( 7)==0) call def_stream(nod2D , myDim_nod2D , 'evap' , 'evaporation' , 'm/s' , evaporation(:) , 1, 'm', i_real4, mesh) - if (sel_forcvar( 8)==0) call def_stream(nod2D , myDim_nod2D , 'swr' , 'short wave radiation' , 'W/m^2', shortwave(:) , 1, 'm', i_real4, mesh) - if (sel_forcvar( 9)==0) call def_stream(nod2D , myDim_nod2D , 'lwr' , 'long wave radiation' , 'W/m^2', longwave(:) , 1, 'm', i_real4, mesh) - if (sel_forcvar(10)==0) call def_stream(nod2D , myDim_nod2D , 'runoff', 'river runoff' , 'none' , runoff(:) , 1, 'm', i_real4, mesh) - if (sel_forcvar(11)==0) call def_stream(elem2D, myDim_elem2D, 'tx_sur', 'zonal wind str. to ocean' , 'm/s^2', stress_surf(1, :), 1, 'm', i_real4, mesh) - if (sel_forcvar(12)==0) call def_stream(elem2D, myDim_elem2D, 'ty_sur', 'meridional wind str. to ocean' , 'm/s^2', stress_surf(2, :), 1, 'm', i_real4, mesh) - call def_stream(nod2D , myDim_nod2D , 'cd', 'wind drag coef. ' , '', cd_atm_oce_arr(:), 1, 'm', i_real4, mesh) - call def_stream(nod2D , myDim_nod2D , 'ch', 'transfer coeff. sensible heat', '', ch_atm_oce_arr(:), 1, 'm', i_real4, mesh) - call def_stream(nod2D , myDim_nod2D , 'ce', 'transfer coeff. evaporation ' , '', ce_atm_oce_arr(:), 1, 'm', i_real4, mesh) + if (sel_forcvar( 1)==0) call def_stream(nod2D , myDim_nod2D , 'uwind' , '10m zonal surface wind velocity', 'm/s' , u_wind(:) , 1, 'm', i_real4, partit, mesh) + if (sel_forcvar( 2)==0) call def_stream(nod2D , myDim_nod2D , 'vwind' , '10m merid surface wind velocity', 'm/s' , v_wind(:) , 1, 'm', i_real4, partit, mesh) + if (sel_forcvar( 3)==0) call def_stream(nod2D , myDim_nod2D , 'tair' , 'surface air temperature' , '°C' , Tair(:) , 1, 'm', i_real4, partit, mesh) + if (sel_forcvar( 4)==0) call def_stream(nod2D , myDim_nod2D , 'shum' , 'specific humidity' , '' , shum(:) , 1, 'm', i_real4, partit, mesh) + if (sel_forcvar( 5)==0) call def_stream(nod2D , myDim_nod2D , 'prec' , 'precicipation rain' , 'm/s' , prec_rain(:) , 1, 'm', i_real4, partit, mesh) + if (sel_forcvar( 6)==0) call def_stream(nod2D , myDim_nod2D , 'snow' , 'precicipation snow' , 'm/s' , prec_snow(:) , 1, 'm', i_real4, partit, mesh) + if (sel_forcvar( 7)==0) call def_stream(nod2D , myDim_nod2D , 'evap' , 'evaporation' , 'm/s' , evaporation(:) , 1, 'm', i_real4, partit, mesh) + if (sel_forcvar( 8)==0) call def_stream(nod2D , myDim_nod2D , 'swr' , 'short wave radiation' , 'W/m^2', shortwave(:) , 1, 'm', i_real4, partit, mesh) + if (sel_forcvar( 9)==0) call def_stream(nod2D , myDim_nod2D , 'lwr' , 'long wave radiation' , 'W/m^2', longwave(:) , 1, 'm', i_real4, partit, mesh) + if (sel_forcvar(10)==0) call def_stream(nod2D , myDim_nod2D , 'runoff', 'river runoff' , 'none' , runoff(:) , 1, 'm', i_real4, partit, mesh) + if (sel_forcvar(11)==0) call def_stream(elem2D, myDim_elem2D, 'tx_sur', 'zonal wind str. to ocean' , 'm/s^2', stress_surf(1, :), 1, 'm', i_real4, partit, mesh) + if (sel_forcvar(12)==0) call def_stream(elem2D, myDim_elem2D, 'ty_sur', 'meridional wind str. to ocean' , 'm/s^2', stress_surf(2, :), 1, 'm', i_real4, partit, mesh) + call def_stream(nod2D , myDim_nod2D , 'cd', 'wind drag coef. ' , '', cd_atm_oce_arr(:), 1, 'm', i_real4, partit, mesh) + call def_stream(nod2D , myDim_nod2D , 'ch', 'transfer coeff. sensible heat', '', ch_atm_oce_arr(:), 1, 'm', i_real4, partit, mesh) + call def_stream(nod2D , myDim_nod2D , 'ce', 'transfer coeff. evaporation ' , '', ce_atm_oce_arr(:), 1, 'm', i_real4, partit, mesh) #if defined (__oasis) call def_stream(nod2D, myDim_nod2D, 'subli', 'sublimation', 'm/s', sublimation(:), 1, 'm', i_real4, mesh) #endif @@ -537,16 +542,17 @@ subroutine ini_mean_io(tracers, mesh) ! !-------------------------------------------------------------------------------------------- ! -function mesh_dimname_from_dimsize(size, mesh) result(name) +function mesh_dimname_from_dimsize(size, partit, mesh) result(name) use mod_mesh - use g_PARSUP + use mod_partit use diagnostics #if defined (__icepack) use icedrv_main, only: ncat ! number of ice thickness cathegories #endif implicit none integer :: size - type(t_mesh) mesh + type(t_mesh) , intent(in) :: mesh + type(t_partit), intent(in) :: partit character(50) :: name if (size==mesh%nod2D) then @@ -565,29 +571,30 @@ function mesh_dimname_from_dimsize(size, mesh) result(name) #endif else name='unknown' - if (mype==0) write(*,*) 'WARNING: unknown dimension in mean I/O with size of ', size + if (partit%mype==0) write(*,*) 'WARNING: unknown dimension in mean I/O with size of ', size end if end function ! !-------------------------------------------------------------------------------------------- ! -subroutine create_new_file(entry, mesh) +subroutine create_new_file(entry, partit, mesh) use g_clock - use g_PARSUP use mod_mesh + use mod_partit use fesom_version_info_module use g_config use i_PARAM use o_PARAM implicit none - character(2000) :: att_text - type(t_mesh) mesh + character(2000) :: att_text + type(t_mesh) , intent(in) :: mesh + type(t_partit), intent(in) :: partit type(Meandata), intent(inout) :: entry character(len=*), parameter :: global_attributes_prefix = "FESOM_" ! Serial output implemented so far - if (mype/=entry%root_rank) return + if (partit%mype/=entry%root_rank) return ! create an ocean output file write(*,*) 'initializing I/O file for ', trim(entry%name) @@ -606,7 +613,7 @@ subroutine create_new_file(entry, mesh) elseif (entry%dimname(1)=='ncat') then call assert_nf( nf_put_att_text(entry%ncid, entry%dimvarID(1), 'long_name', len_trim('sea-ice thickness class'),'sea-ice thickness class'), __LINE__) else - if (mype==0) write(*,*) 'WARNING: unknown first dimension in 2d mean I/O data' + if (partit%mype==0) write(*,*) 'WARNING: unknown first dimension in 2d mean I/O data' end if call assert_nf( nf_put_att_text(entry%ncid, entry%dimvarID(1), 'units', len_trim('m'),'m'), __LINE__) call assert_nf( nf_put_att_text(entry%ncid, entry%dimvarID(1), 'positive', len_trim('down'),'down'), __LINE__) @@ -672,7 +679,7 @@ subroutine create_new_file(entry, mesh) elseif (entry%dimname(1)=='nz1') then call assert_nf( nf_put_var_double(entry%ncid, entry%dimvarID(1), abs(mesh%Z)), __LINE__) else - if (mype==0) write(*,*) 'WARNING: unknown first dimension in 2d mean I/O data' + if (partit%mype==0) write(*,*) 'WARNING: unknown first dimension in 2d mean I/O data' end if call assert_nf( nf_close(entry%ncid), __LINE__) @@ -681,7 +688,6 @@ subroutine create_new_file(entry, mesh) !-------------------------------------------------------------------------------------------- ! subroutine assoc_ids(entry) - use g_PARSUP implicit none type(Meandata), intent(inout) :: entry @@ -705,18 +711,18 @@ subroutine assoc_ids(entry) ! subroutine write_mean(entry, entry_index) use mod_mesh - use g_PARSUP + use mod_partit use io_gather_module implicit none type(Meandata), intent(inout) :: entry - integer, intent(in) :: entry_index - integer tag + integer, intent(in) :: entry_index + integer :: tag integer :: i, size1, size2, size_gen, size_lev, order integer :: c, lev ! Serial output implemented so far - if (mype==entry%root_rank) then + if (entry%mypartit%mype==entry%root_rank) then write(*,*) 'writing mean record for ', trim(entry%name), '; rec. count = ', entry%rec_count call assert_nf( nf_put_vara_double(entry%ncid, entry%Tid, entry%rec_count, 1, entry%ctime_copy, 1), __LINE__) end if @@ -726,16 +732,16 @@ subroutine write_mean(entry, entry_index) tag = 2 ! we can use a fixed tag here as we have an individual communicator for each output field !___________writing 8 byte real_________________________________________ if (entry%accuracy == i_real8) then - if(mype==entry%root_rank) then + if(entry%mypartit%mype==entry%root_rank) then if(.not. allocated(entry%aux_r8)) allocate(entry%aux_r8(size2)) end if do lev=1, size1 if(.not. entry%is_elem_based) then - call gather_nod2D (entry%local_values_r8_copy(lev,1:size(entry%local_values_r8_copy,dim=2)), entry%aux_r8, entry%root_rank, tag, entry%comm) + call gather_nod2D (entry%local_values_r8_copy(lev,1:size(entry%local_values_r8_copy,dim=2)), entry%aux_r8, entry%root_rank, tag, entry%comm, entry%mypartit) else - call gather_elem2D(entry%local_values_r8_copy(lev,1:size(entry%local_values_r8_copy,dim=2)), entry%aux_r8, entry%root_rank, tag, entry%comm) + call gather_elem2D(entry%local_values_r8_copy(lev,1:size(entry%local_values_r8_copy,dim=2)), entry%aux_r8, entry%root_rank, tag, entry%comm, entry%mypartit) end if - if (mype==entry%root_rank) then + if (entry%mypartit%mype==entry%root_rank) then if (entry%ndim==1) then call assert_nf( nf_put_vara_double(entry%ncid, entry%varID, (/1, entry%rec_count/), (/size2, 1/), entry%aux_r8, 1), __LINE__) elseif (entry%ndim==2) then @@ -746,16 +752,16 @@ subroutine write_mean(entry, entry_index) !___________writing 4 byte real _________________________________________ else if (entry%accuracy == i_real4) then - if(mype==entry%root_rank) then + if(entry%mypartit%mype==entry%root_rank) then if(.not. allocated(entry%aux_r4)) allocate(entry%aux_r4(size2)) end if do lev=1, size1 if(.not. entry%is_elem_based) then - call gather_real4_nod2D (entry%local_values_r4_copy(lev,1:size(entry%local_values_r4_copy,dim=2)), entry%aux_r4, entry%root_rank, tag, entry%comm) + call gather_real4_nod2D (entry%local_values_r4_copy(lev,1:size(entry%local_values_r4_copy,dim=2)), entry%aux_r4, entry%root_rank, tag, entry%comm, entry%mypartit) else - call gather_real4_elem2D(entry%local_values_r4_copy(lev,1:size(entry%local_values_r4_copy,dim=2)), entry%aux_r4, entry%root_rank, tag, entry%comm) + call gather_real4_elem2D(entry%local_values_r4_copy(lev,1:size(entry%local_values_r4_copy,dim=2)), entry%aux_r4, entry%root_rank, tag, entry%comm, entry%mypartit) end if - if (mype==entry%root_rank) then + if (entry%mypartit%mype==entry%root_rank) then if (entry%ndim==1) then call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/1, entry%rec_count/), (/size2, 1/), entry%aux_r4, 1), __LINE__) elseif (entry%ndim==2) then @@ -769,7 +775,6 @@ subroutine write_mean(entry, entry_index) subroutine update_means - use g_PARSUP implicit none type(Meandata), pointer :: entry integer :: n @@ -798,36 +803,35 @@ subroutine update_means ! !-------------------------------------------------------------------------------------------- ! -subroutine output(istep, tracers, mesh) +subroutine output(istep, tracers, partit, mesh) use g_clock use mod_mesh + use mod_partit use mod_tracer - use g_PARSUP use io_gather_module #if defined (__icepack) use icedrv_main, only: init_io_icepack #endif - implicit none - integer :: istep logical, save :: lfirst=.true. integer :: n, k logical :: do_output type(Meandata), pointer :: entry - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers - character(:), allocatable :: filepath - real(real64) :: rtime !timestamp of the record + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers + character(:), allocatable :: filepath + real(real64) :: rtime !timestamp of the record ctime=timeold+(dayold-1.)*86400 if (lfirst) then - call ini_mean_io(tracers, mesh) - call init_io_gather() + call ini_mean_io(tracers, partit, mesh) + call init_io_gather(partit) #if defined (__icepack) - call init_io_icepack(mesh) + call init_io_icepack(partit, mesh) #endif - call init_io_gather() + call init_io_gather(partit) end if call update_means @@ -855,7 +859,7 @@ subroutine output(istep, tracers, mesh) else write(*,*) 'You did not specify a supported outputflag.' write(*,*) 'The program will stop to give you opportunity to do it.' - call par_ex(1) + call par_ex(partit, 1) stop endif @@ -865,13 +869,13 @@ subroutine output(istep, tracers, mesh) entry%thread_running = .false. filepath = trim(ResultPath)//trim(entry%name)//'.'//trim(runid)//'.'//cyearnew//'.nc' - if(mype == entry%root_rank) then + if(partit%mype == entry%root_rank) then if(filepath /= trim(entry%filename)) then if("" /= trim(entry%filename)) call assert_nf(nf_close(entry%ncid), __LINE__) entry%filename = filepath ! use any existing file with this name or create a new one if( nf_open(entry%filename, nf_write, entry%ncid) /= nf_noerr ) then - call create_new_file(entry, mesh) + call create_new_file(entry, partit, mesh) call assert_nf( nf_open(entry%filename, nf_write, entry%ncid), __LINE__) end if call assoc_ids(entry) @@ -912,18 +916,18 @@ subroutine output(istep, tracers, mesh) subroutine do_output_callback(entry_index) -use g_PARSUP use mod_mesh +use mod_partit integer, intent(in) :: entry_index ! EO args - type(Meandata), pointer :: entry + type(Meandata), pointer :: entry entry=>io_stream(entry_index) - mype=entry%mype_workaround ! for the thread callback, copy back the value of our mype as a workaround for errors with the cray envinronment (at least with ftn 2.5.9 and cray-mpich 7.5.3) + entry%mypartit%mype=entry%mype_workaround ! for the thread callback, copy back the value of our mype as a workaround for errors with the cray envinronment (at least with ftn 2.5.9 and cray-mpich 7.5.3) call write_mean(entry, entry_index) - if(mype == entry%root_rank) call assert_nf( nf_sync(entry%ncid), __LINE__ ) ! flush the file to disk after each write + if(entry%mypartit%mype == entry%root_rank) call assert_nf( nf_sync(entry%ncid), __LINE__ ) ! flush the file to disk after each write end subroutine @@ -940,10 +944,11 @@ subroutine finalize_output() ! !-------------------------------------------------------------------------------------------- ! -subroutine def_stream3D(glsize, lcsize, name, description, units, data, freq, freq_unit, accuracy, mesh, flip_array) +subroutine def_stream3D(glsize, lcsize, name, description, units, data, freq, freq_unit, accuracy, partit, mesh, flip_array) use mod_mesh - use g_PARSUP + use mod_partit implicit none + type(t_partit), intent(inout), target :: partit integer, intent(in) :: glsize(2), lcsize(2) character(len=*), intent(in) :: name, description, units real(kind=WP), target, intent(in) :: data(:,:) @@ -955,10 +960,10 @@ subroutine def_stream3D(glsize, lcsize, name, description, units, data, freq, fr type(t_mesh), intent(in), target :: mesh logical, optional, intent(in) :: flip_array integer i - + do i = 1, rank(data) if ((ubound(data, dim = i)<=0)) then - if (mype==0) then + if (partit%mype==0) then write(*,*) 'WARNING: adding I/O stream for ', trim(name), ' failed (contains 0 dimension)' write(*,*) 'upper bound is: ', ubound(data, dim = i) end if @@ -966,7 +971,7 @@ subroutine def_stream3D(glsize, lcsize, name, description, units, data, freq, fr end if end do - if (mype==0) then + if (partit%mype==0) then write(*,*) 'adding I/O stream 3D for ', trim(name) end if @@ -996,18 +1001,17 @@ subroutine def_stream3D(glsize, lcsize, name, description, units, data, freq, fr entry%local_values_r4 = 0._real32 end if - entry%dimname(1)=mesh_dimname_from_dimsize(glsize(1), mesh) !2D! mesh_dimname_from_dimsize(glsize, mesh) - entry%dimname(2)=mesh_dimname_from_dimsize(glsize(2), mesh) !2D! entry%dimname(2)='unknown' - + entry%dimname(1)=mesh_dimname_from_dimsize(glsize(1), partit, mesh) !2D! mesh_dimname_from_dimsize(glsize, mesh) + entry%dimname(2)=mesh_dimname_from_dimsize(glsize(2), partit, mesh) !2D! entry%dimname(2)='unknown' ! non dimension specific - call def_stream_after_dimension_specific(entry, name, description, units, freq, freq_unit, accuracy, mesh) + call def_stream_after_dimension_specific(entry, name, description, units, freq, freq_unit, accuracy, partit, mesh) end subroutine ! !-------------------------------------------------------------------------------------------- ! -subroutine def_stream2D(glsize, lcsize, name, description, units, data, freq, freq_unit, accuracy, mesh) +subroutine def_stream2D(glsize, lcsize, name, description, units, data, freq, freq_unit, accuracy, partit, mesh) use mod_mesh - use g_PARSUP + use mod_partit implicit none integer, intent(in) :: glsize, lcsize character(len=*), intent(in) :: name, description, units @@ -1017,12 +1021,13 @@ subroutine def_stream2D(glsize, lcsize, name, description, units, data, freq, fr integer, intent(in) :: accuracy type(Meandata), allocatable :: tmparr(:) type(Meandata), pointer :: entry - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in) :: mesh + type(t_partit), intent(inout) :: partit integer i do i = 1, rank(data) if ((ubound(data, dim = i)<=0)) then - if (mype==0) then + if (partit%mype==0) then write(*,*) 'WARNING: adding I/O stream for ', trim(name), ' failed (contains 0 dimension)' write(*,*) 'upper bound is: ', ubound(data, dim = i) end if @@ -1030,7 +1035,7 @@ subroutine def_stream2D(glsize, lcsize, name, description, units, data, freq, fr end if end do - if (mype==0) then + if (partit%mype==0) then write(*,*) 'adding I/O stream 2D for ', trim(name) end if @@ -1050,16 +1055,15 @@ subroutine def_stream2D(glsize, lcsize, name, description, units, data, freq, fr entry%ndim=1 entry%glsize=(/1, glsize/) - entry%dimname(1)=mesh_dimname_from_dimsize(glsize, mesh) + entry%dimname(1)=mesh_dimname_from_dimsize(glsize, partit, mesh) entry%dimname(2)='unknown' - ! non dimension specific - call def_stream_after_dimension_specific(entry, name, description, units, freq, freq_unit, accuracy, mesh) + call def_stream_after_dimension_specific(entry, name, description, units, freq, freq_unit, accuracy, partit, mesh) end subroutine subroutine associate_new_stream(name, entry) - type(Meandata), pointer :: entry + type(Meandata), pointer :: entry character(len=*), intent(in) :: name integer i @@ -1082,16 +1086,17 @@ subroutine associate_new_stream(name, entry) end subroutine - subroutine def_stream_after_dimension_specific(entry, name, description, units, freq, freq_unit, accuracy, mesh) + subroutine def_stream_after_dimension_specific(entry, name, description, units, freq, freq_unit, accuracy, partit, mesh) use mod_mesh - use g_PARSUP + use mod_partit use io_netcdf_workaround_module - type(Meandata), intent(inout) :: entry - character(len=*), intent(in) :: name, description, units - integer, intent(in) :: freq - character, intent(in) :: freq_unit - integer, intent(in) :: accuracy - type(t_mesh), intent(in), target :: mesh + type(Meandata), intent(inout) :: entry + character(len=*), intent(in) :: name, description, units + integer, intent(in) :: freq + character, intent(in) :: freq_unit + integer, intent(in) :: accuracy + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit ! EO args logical async_netcdf_allowed integer provided_mpi_thread_support_level @@ -1107,8 +1112,8 @@ subroutine def_stream_after_dimension_specific(entry, name, description, units, elseif (accuracy == i_real4) then allocate(data_strategy_nf_float_type :: entry%data_strategy) else - if (mype==0) write(*,*) 'not supported output accuracy:',accuracy,'for',trim(name) - call par_ex + if (partit%mype==0) write(*,*) 'not supported output accuracy:',accuracy,'for',trim(name) + call par_ex(partit) stop endif ! accuracy @@ -1127,7 +1132,7 @@ subroutine def_stream_after_dimension_specific(entry, name, description, units, else if(entry%glsize(1)==mesh%elem2D .or. entry%glsize(2)==mesh%elem2D) then entry%is_elem_based = .true. else - if(mype == 0) print *,"can not determine if ",trim(name)," is node or elem based" + if(partit%mype == 0) print *,"can not determine if ",trim(name)," is node or elem based" stop end if @@ -1139,9 +1144,9 @@ subroutine def_stream_after_dimension_specific(entry, name, description, units, ! set up async output - entry%root_rank = next_io_rank(MPI_COMM_FESOM, async_netcdf_allowed) + entry%root_rank = next_io_rank(partit%MPI_COMM_FESOM, async_netcdf_allowed, partit) - call MPI_Comm_dup(MPI_COMM_FESOM, entry%comm, err) + call MPI_Comm_dup(partit%MPI_COMM_FESOM, entry%comm, err) call entry%thread%initialize(do_output_callback, entry_index) if(.not. async_netcdf_allowed) call entry%thread%disable_async() @@ -1152,7 +1157,8 @@ subroutine def_stream_after_dimension_specific(entry, name, description, units, call MPI_Query_thread(provided_mpi_thread_support_level, err) if(provided_mpi_thread_support_level < MPI_THREAD_MULTIPLE) call entry%thread%disable_async() - entry%mype_workaround = mype ! make a copy of the mype variable as there is an error with the cray compiler or environment which voids the global mype for our threads + entry%mype_workaround = partit%mype ! make a copy of the mype variable as there is an error with the cray compiler or environment which voids the global mype for our threads + entry%mypartit=>partit end subroutine diff --git a/src/io_mesh_info.F90 b/src/io_mesh_info.F90 index 5deb51530..a682e753e 100644 --- a/src/io_mesh_info.F90 +++ b/src/io_mesh_info.F90 @@ -1,6 +1,6 @@ module io_mesh_info -use g_PARSUP -use MOD_MESH +USE MOD_MESH +USE MOD_PARTIT use g_config use g_comm_auto use o_PARAM @@ -33,9 +33,10 @@ module io_mesh_info !------------------------------------------------------------------------- ! this routine stores most of metadata used in FESOM. Shall be called at the cold start once during the simulation. ! info: fesom.mesh.diag.nc is 77MB for the CORE II mesh with 47 vertical levels -subroutine write_mesh_info(mesh) +subroutine write_mesh_info(partit, mesh) implicit none - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit integer :: status, ncid, j integer :: nod_n_id, elem_n_id, edge_n_id, nod_part_id, elem_part_id integer :: nl_id, nl1_id @@ -59,68 +60,71 @@ subroutine write_mesh_info(mesh) integer :: vtype integer, pointer :: pid -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" call MPI_AllREDUCE(maxval(nod_in_elem2D_num), N_max, 1, MPI_INTEGER, MPI_MAX, MPI_COMM_FESOM, MPIerr) filename=trim(ResultPath)//runid//'.mesh.diag.nc' - call my_create(filename, IOR(NF_CLOBBER,IOR(NF_NETCDF4,NF_CLASSIC_MODEL)), ncid) + call my_create(filename, IOR(NF_CLOBBER,IOR(NF_NETCDF4,NF_CLASSIC_MODEL)), ncid, partit) !Define the dimensions - call my_def_dim(ncid, 'nod2', nod2D, nod_n_id) - call my_def_dim(ncid, 'edg_n', edge2d, edge_n_id) - call my_def_dim(ncid, 'elem', elem2d, elem_n_id) - call my_def_dim(ncid, 'nz', nl, nl_id) - call my_def_dim(ncid, 'nz1', nl-1, nl1_id) - call my_def_dim(ncid, 'n2', 2, id_2) - call my_def_dim(ncid, 'n3', 3, id_3) - call my_def_dim(ncid, 'n4', 4, id_4) - call my_def_dim(ncid, 'N', N_max, id_N) + call my_def_dim(ncid, 'nod2', nod2D, nod_n_id, partit) + call my_def_dim(ncid, 'edg_n', edge2d, edge_n_id, partit) + call my_def_dim(ncid, 'elem', elem2d, elem_n_id, partit) + call my_def_dim(ncid, 'nz', nl, nl_id, partit) + call my_def_dim(ncid, 'nz1', nl-1, nl1_id, partit) + call my_def_dim(ncid, 'n2', 2, id_2, partit) + call my_def_dim(ncid, 'n3', 3, id_3, partit) + call my_def_dim(ncid, 'n4', 4, id_4, partit) + call my_def_dim(ncid, 'N', N_max, id_N, partit) !Define the variables ! 1D - call my_def_var(ncid, 'nz', NF_DOUBLE, 1, (/nl_id /), zbar_id, 'depth of levels' ) - call my_def_var(ncid, 'nz1', NF_DOUBLE, 1, (/nl1_id/), z_id, 'depth of layers' ) - call my_def_var(ncid, 'elem_area', NF_DOUBLE, 1, (/elem_n_id/), elem_area_id, 'element areas' ) - call my_def_var(ncid, 'nlevels_nod2D', NF_INT, 1, (/nod_n_id/), nlevels_nod2D_id, 'number of levels below nodes' ) - call my_def_var(ncid, 'nlevels', NF_INT, 1, (/elem_n_id/), nlevels_id, 'number of levels below elements' ) - call my_def_var(ncid, 'nod_in_elem2D_num', NF_INT, 1, (/nod_n_id/), nod_in_elem2D_num_id, 'number of elements containing the node') - call my_def_var(ncid, 'nod_part', NF_INT, 1, (/nod_n_id/), nod_part_id, 'nodal partitioning at the cold start' ) - call my_def_var(ncid, 'elem_part', NF_INT, 1, (/elem_n_id/), elem_part_id, 'element partitioning at the cold start') - call my_def_var(ncid, 'zbar_e_bottom', NF_DOUBLE, 1, (/elem_n_id/), zbar_e_bot_id, 'element bottom depth') - call my_def_var(ncid, 'zbar_n_bottom', NF_DOUBLE, 1, (/nod_n_id/), zbar_n_bot_id, 'nodal bottom depth') - call my_def_var(ncid, 'lon', NF_DOUBLE, 1, (/nod_n_id/), lon_id, 'longitude') - call my_def_var(ncid, 'lat', NF_DOUBLE, 1, (/nod_n_id/), lat_id, 'latitude') + call my_def_var(ncid, 'nz', NF_DOUBLE, 1, (/nl_id /), zbar_id, 'depth of levels', partit) + call my_def_var(ncid, 'nz1', NF_DOUBLE, 1, (/nl1_id/), z_id, 'depth of layers', partit) + call my_def_var(ncid, 'elem_area', NF_DOUBLE, 1, (/elem_n_id/), elem_area_id, 'element areas', partit) + call my_def_var(ncid, 'nlevels_nod2D', NF_INT, 1, (/nod_n_id/), nlevels_nod2D_id, 'number of levels below nodes', partit) + call my_def_var(ncid, 'nlevels', NF_INT, 1, (/elem_n_id/), nlevels_id, 'number of levels below elements', partit) + call my_def_var(ncid, 'nod_in_elem2D_num', NF_INT, 1, (/nod_n_id/), nod_in_elem2D_num_id, 'number of elements containing the node', partit) + call my_def_var(ncid, 'nod_part', NF_INT, 1, (/nod_n_id/), nod_part_id, 'nodal partitioning at the cold start', partit) + call my_def_var(ncid, 'elem_part', NF_INT, 1, (/elem_n_id/), elem_part_id, 'element partitioning at the cold start', partit) + call my_def_var(ncid, 'zbar_e_bottom', NF_DOUBLE, 1, (/elem_n_id/), zbar_e_bot_id, 'element bottom depth', partit) + call my_def_var(ncid, 'zbar_n_bottom', NF_DOUBLE, 1, (/nod_n_id/), zbar_n_bot_id, 'nodal bottom depth', partit) + call my_def_var(ncid, 'lon', NF_DOUBLE, 1, (/nod_n_id/), lon_id, 'longitude', partit) + call my_def_var(ncid, 'lat', NF_DOUBLE, 1, (/nod_n_id/), lat_id, 'latitude', partit) ! 2D - call my_def_var(ncid, 'nod_area', NF_DOUBLE, 2, (/nod_n_id, nl_id/), nod_area_id, 'nodal areas' ) - call my_def_var(ncid, 'elements', NF_INT, 2, (/elem_n_id, id_3/), elem_id, 'elements' ) - call my_def_var(ncid, 'nodes', NF_DOUBLE, 2, (/nod_n_id, id_2/), nod_id, 'nodal geo. coordinates' ) - call my_def_var(ncid, 'nod_in_elem2D', NF_INT, 2, (/nod_n_id, id_N/), nod_in_elem2D_id, 'elements containing the node') - call my_def_var(ncid, 'edges', NF_INT, 2, (/edge_n_id, id_2/), edges_id, 'edges' ) - call my_def_var(ncid, 'edge_tri', NF_INT, 2, (/edge_n_id, id_2/), edge_tri_id, 'edge triangles' ) - call my_def_var(ncid, 'edge_cross_dxdy', NF_DOUBLE, 2, (/edge_n_id, id_4/), edge_cross_dxdy_id, 'edge cross distancess' ) - call my_def_var(ncid, 'gradient_sca_x', NF_DOUBLE, 2, (/id_3, elem_n_id/), gradient_sca_x_id, 'x component of a gradient at nodes of an element') - call my_def_var(ncid, 'gradient_sca_y', NF_DOUBLE, 2, (/id_3, elem_n_id/), gradient_sca_y_id, 'y component of a gradient at nodes of an element') - call my_nf_enddef(ncid) + call my_def_var(ncid, 'nod_area', NF_DOUBLE, 2, (/nod_n_id, nl_id/), nod_area_id, 'nodal areas', partit) + call my_def_var(ncid, 'elements', NF_INT, 2, (/elem_n_id, id_3/), elem_id, 'elements', partit) + call my_def_var(ncid, 'nodes', NF_DOUBLE, 2, (/nod_n_id, id_2/), nod_id, 'nodal geo. coordinates', partit) + call my_def_var(ncid, 'nod_in_elem2D', NF_INT, 2, (/nod_n_id, id_N/), nod_in_elem2D_id, 'elements containing the node', partit) + call my_def_var(ncid, 'edges', NF_INT, 2, (/edge_n_id, id_2/), edges_id, 'edges', partit) + call my_def_var(ncid, 'edge_tri', NF_INT, 2, (/edge_n_id, id_2/), edge_tri_id, 'edge triangles', partit) + call my_def_var(ncid, 'edge_cross_dxdy', NF_DOUBLE, 2, (/edge_n_id, id_4/), edge_cross_dxdy_id, 'edge cross distancess', partit) + call my_def_var(ncid, 'gradient_sca_x', NF_DOUBLE, 2, (/id_3, elem_n_id/), gradient_sca_x_id, 'x component of a gradient at nodes of an element', partit) + call my_def_var(ncid, 'gradient_sca_y', NF_DOUBLE, 2, (/id_3, elem_n_id/), gradient_sca_y_id, 'y component of a gradient at nodes of an element', partit) + call my_nf_enddef(ncid, partit) ! vercical levels/layers - call my_put_vara(ncid, zbar_id, 1, nl, zbar) - call my_put_vara(ncid, z_id, 1, nl-1, Z) + call my_put_vara(ncid, zbar_id, 1, nl, zbar, partit) + call my_put_vara(ncid, z_id, 1, nl-1, Z, partit) ! nodal areas allocate(rbuffer(nod2D)) do k=1, nl - call gather_nod(area(k, :), rbuffer) - call my_put_vara(ncid, nod_area_id, (/1, k/), (/nod2D, 1/), rbuffer) + call gather_nod(area(k, :), rbuffer, partit) + call my_put_vara(ncid, nod_area_id, (/1, k/), (/nod2D, 1/), rbuffer, partit) end do deallocate(rbuffer) ! element areas allocate(rbuffer(elem2D)) - call gather_elem(elem_area(1:myDim_elem2D), rbuffer) - call my_put_vara(ncid, elem_area_id, 1, elem2D, rbuffer) + call gather_elem(elem_area(1:myDim_elem2D), rbuffer, partit) + call my_put_vara(ncid, elem_area_id, 1, elem2D, rbuffer, partit) deallocate(rbuffer) ! elements @@ -130,27 +134,27 @@ subroutine write_mesh_info(mesh) do k=1, myDim_elem2D lbuffer(k)=myList_nod2D(elem2d_nodes(i, k)) end do - call gather_elem(lbuffer, ibuffer) - call my_put_vara(ncid, elem_id, (/1, i/), (/elem2D, 1/), ibuffer) + call gather_elem(lbuffer, ibuffer, partit) + call my_put_vara(ncid, elem_id, (/1, i/), (/elem2D, 1/), ibuffer, partit) end do deallocate(lbuffer, ibuffer) ! number of levels below elements allocate(ibuffer(elem2D)) - call gather_elem(nlevels(1:myDim_elem2D), ibuffer) - call my_put_vara(ncid, nlevels_id, 1, elem2D, ibuffer) + call gather_elem(nlevels(1:myDim_elem2D), ibuffer, partit) + call my_put_vara(ncid, nlevels_id, 1, elem2D, ibuffer, partit) deallocate(ibuffer) ! number of levels below nodes allocate(ibuffer(nod2D)) - call gather_nod(nlevels_nod2D(1:myDim_nod2D), ibuffer) - call my_put_vara(ncid, nlevels_nod2D_id, 1, nod2D, ibuffer) + call gather_nod(nlevels_nod2D(1:myDim_nod2D), ibuffer, partit) + call my_put_vara(ncid, nlevels_nod2D_id, 1, nod2D, ibuffer, partit) deallocate(ibuffer) ! number of elements containing the node allocate(ibuffer(nod2D)) - call gather_nod(nod_in_elem2D_num(1:myDim_nod2D), ibuffer) - call my_put_vara(ncid, nod_in_elem2D_num_id, 1, nod2D, ibuffer) + call gather_nod(nod_in_elem2D_num(1:myDim_nod2D), ibuffer, partit) + call my_put_vara(ncid, nod_in_elem2D_num_id, 1, nod2D, ibuffer, partit) deallocate(ibuffer) ! elements containing the node @@ -163,8 +167,8 @@ subroutine write_mesh_info(mesh) lbuffer(k)=myList_elem2D(nod_in_elem2D(i, k)) end if end do - call gather_nod(lbuffer, ibuffer) - call my_put_vara(ncid, nod_in_elem2D_id, (/1, i/), (/nod2D, 1/), ibuffer) + call gather_nod(lbuffer, ibuffer, partit) + call my_put_vara(ncid, nod_in_elem2D_id, (/1, i/), (/nod2D, 1/), ibuffer, partit) END DO deallocate(lbuffer, ibuffer) @@ -172,29 +176,29 @@ subroutine write_mesh_info(mesh) allocate(ibuffer(nod2D)) allocate(lbuffer(myDim_nod2D)) lbuffer=mype - call gather_nod(lbuffer, ibuffer) - call my_put_vara(ncid, nod_part_id, 1, nod2D, ibuffer) + call gather_nod(lbuffer, ibuffer, partit) + call my_put_vara(ncid, nod_part_id, 1, nod2D, ibuffer, partit) deallocate(lbuffer, ibuffer) ! element partitioning allocate(ibuffer(elem2D)) allocate(lbuffer(myDim_elem2D)) lbuffer=mype - call gather_elem(lbuffer, ibuffer) - call my_put_vara(ncid, elem_part_id, 1, elem2D, ibuffer) + call gather_elem(lbuffer, ibuffer, partit) + call my_put_vara(ncid, elem_part_id, 1, elem2D, ibuffer, partit) deallocate(lbuffer, ibuffer) ! nodes (GEO coordinates) allocate(rbuffer(nod2D)) do i=1, 2 - call gather_nod(geo_coord_nod2D(i, 1:myDim_nod2D), rbuffer) + call gather_nod(geo_coord_nod2D(i, 1:myDim_nod2D), rbuffer, partit) rbuffer = rbuffer/rad - call my_put_vara(ncid, nod_id, (/1, i/), (/nod2D, 1/), rbuffer) + call my_put_vara(ncid, nod_id, (/1, i/), (/nod2D, 1/), rbuffer, partit) if (i == 1) then - call my_put_vara(ncid, lon_id, 1, nod2D, rbuffer) + call my_put_vara(ncid, lon_id, 1, nod2D, rbuffer, partit) else - call my_put_vara(ncid, lat_id, 1, nod2D, rbuffer) + call my_put_vara(ncid, lat_id, 1, nod2D, rbuffer, partit) endif end do deallocate(rbuffer) @@ -206,8 +210,8 @@ subroutine write_mesh_info(mesh) do k=1, myDim_edge2D lbuffer(k)=myList_nod2D(edges(i, k)) end do - call gather_edge(lbuffer, ibuffer) - call my_put_vara(ncid, edges_id, (/1, i/), (/edge2D, 1/), ibuffer) + call gather_edge(lbuffer, ibuffer, partit) + call my_put_vara(ncid, edges_id, (/1, i/), (/edge2D, 1/), ibuffer, partit) end do deallocate(lbuffer, ibuffer) @@ -222,8 +226,8 @@ subroutine write_mesh_info(mesh) lbuffer(k) = 0 endif end do - call gather_edge(lbuffer, ibuffer) - call my_put_vara(ncid, edge_tri_id, (/1, i/), (/edge2D, 1/), ibuffer) + call gather_edge(lbuffer, ibuffer, partit) + call my_put_vara(ncid, edge_tri_id, (/1, i/), (/edge2D, 1/), ibuffer, partit) end do deallocate(lbuffer, ibuffer) @@ -232,8 +236,8 @@ subroutine write_mesh_info(mesh) allocate(lrbuffer(myDim_edge2D)) do i=1, 4 lrbuffer=edge_cross_dxdy(i, 1:myDim_edge2D) - call gather_edge(lrbuffer, rbuffer) - call my_put_vara(ncid, edge_cross_dxdy_id, (/1, i/), (/edge2D, 1/), rbuffer) + call gather_edge(lrbuffer, rbuffer, partit) + call my_put_vara(ncid, edge_cross_dxdy_id, (/1, i/), (/edge2D, 1/), rbuffer, partit) end do deallocate(rbuffer, lrbuffer) @@ -241,183 +245,187 @@ subroutine write_mesh_info(mesh) ! X component of gadient at elements allocate(rbuffer(elem2D)) do i=1, 3 - call gather_elem(gradient_sca(i, 1:myDim_elem2D), rbuffer) - call my_put_vara(ncid, gradient_sca_x_id, (/4-i, 1/), (/1, elem2D/), rbuffer) ! (4-i), NETCDF will permute otherwise + call gather_elem(gradient_sca(i, 1:myDim_elem2D), rbuffer, partit) + call my_put_vara(ncid, gradient_sca_x_id, (/4-i, 1/), (/1, elem2D/), rbuffer, partit) ! (4-i), NETCDF will permute otherwise end do deallocate(rbuffer) ! Y component of gadient at elements allocate(rbuffer(elem2D)) do i=1, 3 - call gather_elem(gradient_sca(i+3, 1:myDim_elem2D), rbuffer) - call my_put_vara(ncid, gradient_sca_y_id, (/4-i, 1/), (/1, elem2D/), rbuffer) ! (4-i), NETCDF will permute otherwise + call gather_elem(gradient_sca(i+3, 1:myDim_elem2D), rbuffer, partit) + call my_put_vara(ncid, gradient_sca_y_id, (/4-i, 1/), (/1, elem2D/), rbuffer, partit)! (4-i), NETCDF will permute otherwise end do deallocate(rbuffer) ! nodal bottom depth (take into account partial cells if used) allocate(rbuffer(nod2D)) - call gather_nod(zbar_n_bot(1:myDim_nod2D), rbuffer) - call my_put_vara(ncid, zbar_n_bot_id, 1, nod2D, rbuffer) + call gather_nod(zbar_n_bot(1:myDim_nod2D), rbuffer, partit) + call my_put_vara(ncid, zbar_n_bot_id, 1, nod2D, rbuffer, partit) deallocate(rbuffer) ! element bottom depth (take into account partial cells if used) allocate(rbuffer(elem2D)) - call gather_elem(zbar_e_bot(1:myDim_elem2D), rbuffer) - call my_put_vara(ncid, zbar_e_bot_id, 1, elem2D, rbuffer) + call gather_elem(zbar_e_bot(1:myDim_elem2D), rbuffer, partit) + call my_put_vara(ncid, zbar_e_bot_id, 1, elem2D, rbuffer, partit) deallocate(rbuffer) - call my_close(ncid) + call my_close(ncid, partit) end subroutine write_mesh_info ! !============================================================================ ! -subroutine my_def_dim(ncid, short_name, value, id) +subroutine my_def_dim(ncid, short_name, value, id, partit) IMPLICIT NONE -integer, intent(in) :: ncid, value -character(*), intent(in) :: short_name -integer, intent(inout):: id -integer :: ierror, status +type(t_partit), intent(inout) :: partit +integer, intent(in) :: ncid, value +character(*), intent(in) :: short_name +integer, intent(inout) :: id +integer :: ierror, status -if (mype==0) then +if (partit%mype==0) then status = nf_def_dim(ncid, trim(short_name), value, id) end if -call MPI_BCast(status, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) -if (status .ne. nf_noerr) call handle_err(status) +call MPI_BCast(status, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) +if (status .ne. nf_noerr) call handle_err(status, partit) end subroutine my_def_dim ! !============================================================================ ! -subroutine my_def_var(ncid, short_name, vtype, dsize, dids, id, att_text) +subroutine my_def_var(ncid, short_name, vtype, dsize, dids, id, att_text, partit) IMPLICIT NONE -integer, intent(in) :: ncid, dsize, dids(dsize), vtype -character(*), intent(in) :: short_name, att_text -integer, intent(inout):: id -integer :: ierror, status +type(t_partit), intent(inout):: partit +integer, intent(in) :: ncid, dsize, dids(dsize), vtype +character(*), intent(in) :: short_name, att_text +integer, intent(inout):: id +integer :: ierror, status -if (mype==0) then +if (partit%mype==0) then status = nf_def_var(ncid, trim(short_name), vtype, dsize, dids, id) end if -call MPI_BCast(status, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) -if (status .ne. nf_noerr) call handle_err(status) +call MPI_BCast(status, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) +if (status .ne. nf_noerr) call handle_err(status, partit) -if (mype==0) then +if (partit%mype==0) then status = nf_put_att_text(ncid, id, 'long_name', len_trim(att_text), trim(att_text)); end if -call MPI_BCast(status, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) -if (status .ne. nf_noerr) call handle_err(status) +call MPI_BCast(status, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) +if (status .ne. nf_noerr) call handle_err(status, partit) end subroutine my_def_var ! !============================================================================ ! -subroutine my_nf_enddef(ncid) +subroutine my_nf_enddef(ncid, partit) IMPLICIT NONE -integer, intent(in) :: ncid -integer :: ierror, status +type(t_partit), intent(inout) :: partit +integer, intent(in) :: ncid +integer :: ierror, status -if (mype==0) then +if (partit%mype==0) then status = nf_enddef(ncid) end if -call MPI_BCast(status, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) -if (status .ne. nf_noerr) call handle_err(status) +call MPI_BCast(status, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) +if (status .ne. nf_noerr) call handle_err(status, partit) end subroutine my_nf_enddef ! !============================================================================ ! -subroutine my_put_vara_double_1D(ncid, varid, start, N, var) +subroutine my_put_vara_double_1D(ncid, varid, start, N, var, partit) IMPLICIT NONE +type(t_partit), intent(inout) :: partit +integer, intent(in) :: ncid, varid, start, N +real(kind=WP) :: var(:) +integer :: ierror, status -integer, intent(in) :: ncid, varid, start, N -real(kind=WP) :: var(:) -integer :: ierror, status - - - if (mype==0) status=nf_put_vara_double(ncid, varid, start, N, var) - call MPI_BCast(status, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - if (status .ne. nf_noerr) call handle_err(status) + if (partit%mype==0) status=nf_put_vara_double(ncid, varid, start, N, var) + call MPI_BCast(status, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + if (status .ne. nf_noerr) call handle_err(status, partit) end subroutine my_put_vara_double_1D ! !============================================================================ ! -subroutine my_put_vara_double_2D(ncid, varid, start, N, var) +subroutine my_put_vara_double_2D(ncid, varid, start, N, var, partit) IMPLICIT NONE +type(t_partit), intent(inout) :: partit +integer, intent(in) :: ncid, varid, start(:), N(:) +real(kind=WP) :: var(:) +integer :: ierror, status -integer, intent(in) :: ncid, varid, start(:), N(:) -real(kind=WP) :: var(:) -integer :: ierror, status - - if (mype==0) status=nf_put_vara_double(ncid, varid, start, N, var) - call MPI_BCast(status, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - if (status .ne. nf_noerr) call handle_err(status) + if (partit%mype==0) status=nf_put_vara_double(ncid, varid, start, N, var) + call MPI_BCast(status, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + if (status .ne. nf_noerr) call handle_err(status, partit) end subroutine my_put_vara_double_2D ! !============================================================================ ! -subroutine my_put_vara_int_1D(ncid, varid, start, N, var) +subroutine my_put_vara_int_1D(ncid, varid, start, N, var, partit) IMPLICIT NONE +type(t_partit), intent(inout) :: partit +integer, intent(in) :: ncid, varid, start, N +integer :: var(:) +integer :: ierror, status -integer, intent(in) :: ncid, varid, start, N -integer :: var(:) -integer :: ierror, status - - - if (mype==0) status=nf_put_vara_int(ncid, varid, start, N, var) - call MPI_BCast(status, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - if (status .ne. nf_noerr) call handle_err(status) + if (partit%mype==0) status=nf_put_vara_int(ncid, varid, start, N, var) + call MPI_BCast(status, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + if (status .ne. nf_noerr) call handle_err(status, partit) end subroutine my_put_vara_int_1D ! !============================================================================ ! -subroutine my_put_vara_int_2D(ncid, varid, start, N, var) +subroutine my_put_vara_int_2D(ncid, varid, start, N, var, partit) IMPLICIT NONE -integer, intent(in) :: ncid, varid, start(:), N(:) -integer :: var(:) -integer :: ierror, status +type(t_partit), intent(inout) :: partit +integer, intent(in) :: ncid, varid, start(:), N(:) +integer :: var(:) +integer :: ierror, status - if (mype==0) status=nf_put_vara_int(ncid, varid, start, N, var) - call MPI_BCast(status, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - if (status .ne. nf_noerr) call handle_err(status) + if (partit%mype==0) status=nf_put_vara_int(ncid, varid, start, N, var) + call MPI_BCast(status, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + if (status .ne. nf_noerr) call handle_err(status, partit) end subroutine my_put_vara_int_2D ! !============================================================================ ! -subroutine my_create(filename, opt, ncid) +subroutine my_create(filename, opt, ncid, partit) IMPLICIT NONE -integer, intent(in) :: opt, ncid -character(*), intent(in) :: filename -integer :: ierror, status - if (mype==0) then ! create a file +type(t_partit), intent(inout):: partit +integer, intent(in) :: opt, ncid +character(*), intent(in) :: filename +integer :: ierror, status + if (partit%mype==0) then ! create a file ! create a file status = nf_create(filename, opt, ncid) - if (status.ne.nf_noerr) call handle_err(status) + if (status.ne.nf_noerr) call handle_err(status, partit) end if - call MPI_BCast(status, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - if (status .ne. nf_noerr) call handle_err(status) + call MPI_BCast(status, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + if (status .ne. nf_noerr) call handle_err(status, partit) end subroutine my_create ! !============================================================================ ! -subroutine my_close(ncid) +subroutine my_close(ncid, partit) IMPLICIT NONE -integer, intent(in) :: ncid -integer :: ierror, status +type(t_partit), intent(inout) :: partit +integer, intent(in) :: ncid +integer :: ierror, status -if (mype==0) status = nf_close(ncid) +if (partit%mype==0) status = nf_close(ncid) -call MPI_BCast(status, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) -if (status .ne. nf_noerr) call handle_err(status) +call MPI_BCast(status, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) +if (status .ne. nf_noerr) call handle_err(status, partit) end subroutine my_close end module io_mesh_info diff --git a/src/io_netcdf_workaround_module.F90 b/src/io_netcdf_workaround_module.F90 index 09efb7b3e..d572c97aa 100644 --- a/src/io_netcdf_workaround_module.F90 +++ b/src/io_netcdf_workaround_module.F90 @@ -6,18 +6,19 @@ module io_netcdf_workaround_module contains - integer function next_io_rank(communicator, async_netcdf_allowed) result(result) - use g_PARSUP + integer function next_io_rank(communicator, async_netcdf_allowed, partit) result(result) + use MOD_PARTIT use mpi_topology_module - integer, intent(in) :: communicator - logical, intent(out) :: async_netcdf_allowed + integer, intent(in) :: communicator + logical, intent(out) :: async_netcdf_allowed + type(t_partit), intent(in), target :: partit ! EO args integer rank_use_count integer rank result = next_io_rank_helper(communicator, rank_use_count) if(rank_use_count > 1) then - if(mype == SEQUENTIAL_IO_RANK) print *,"rejecting additional async NetCDF for process:",result, "use count:", rank_use_count, "falling back to sequential I/O on process ",SEQUENTIAL_IO_RANK + if(partit%mype == SEQUENTIAL_IO_RANK) print *,"rejecting additional async NetCDF for process:",result, "use count:", rank_use_count, "falling back to sequential I/O on process ",SEQUENTIAL_IO_RANK result = SEQUENTIAL_IO_RANK async_netcdf_allowed = .false. else diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 9d49f6648..2bd24a00a 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -1,9 +1,9 @@ MODULE io_RESTART use g_config use g_clock - use g_parsup use g_comm_auto use mod_mesh + use mod_partit use mod_tracer use o_arrays use i_arrays @@ -78,7 +78,7 @@ MODULE io_RESTART !-------------------------------------------------------------------------------------------- ! ini_ocean_io initializes oid datatype which contains information of all variables need to be written into ! the ocean restart file. This is the only place need to be modified if a new variable is added! -subroutine ini_ocean_io(year, tracers, mesh) +subroutine ini_ocean_io(year, tracers, partit, mesh) implicit none integer, intent(in) :: year @@ -88,9 +88,13 @@ subroutine ini_ocean_io(year, tracers, mesh) character(500) :: filename character(500) :: trname, units character(4) :: cyear - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers -#include "associate_mesh.h" + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" write(cyear,'(i4)') year ! create an ocean restart file; serial output implemented so far @@ -159,7 +163,7 @@ end subroutine ini_ocean_io !-------------------------------------------------------------------------------------------- ! ini_ice_io initializes iid datatype which contains information of all variables need to be written into ! the ice restart file. This is the only place need to be modified if a new variable is added! -subroutine ini_ice_io(year, mesh) +subroutine ini_ice_io(year, partit, mesh) implicit none integer, intent(in) :: year @@ -169,9 +173,13 @@ subroutine ini_ice_io(year, mesh) character(500) :: filename character(500) :: trname, units character(4) :: cyear - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" write(cyear,'(i4)') year ! create an ocean restart file; serial output implemented so far @@ -198,7 +206,7 @@ end subroutine ini_ice_io ! !-------------------------------------------------------------------------------------------- ! -subroutine restart(istep, l_write, l_read, tracers, mesh) +subroutine restart(istep, l_write, l_read, tracers, partit, mesh) #if defined(__icepack) use icedrv_main, only: init_restart_icepack @@ -213,32 +221,33 @@ subroutine restart(istep, l_write, l_read, tracers, mesh) logical :: l_write, l_read logical :: is_restart integer :: mpierr - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers ctime=timeold+(dayold-1.)*86400 if (.not. l_read) then - call ini_ocean_io(yearnew, tracers, mesh) - if (use_ice) call ini_ice_io (yearnew, mesh) + call ini_ocean_io(yearnew, tracers, partit, mesh) + if (use_ice) call ini_ice_io (yearnew, partit, mesh) #if defined(__icepack) - if (use_ice) call init_restart_icepack(yearnew, mesh) + if (use_ice) call init_restart_icepack(yearnew, partit, mesh) #endif else - call ini_ocean_io(yearold, tracers, mesh) - if (use_ice) call ini_ice_io (yearold, mesh) + call ini_ocean_io(yearold, tracers, partit, mesh) + if (use_ice) call ini_ice_io (yearold, partit, mesh) #if defined(__icepack) - if (use_ice) call init_restart_icepack(yearold, mesh) + if (use_ice) call init_restart_icepack(yearold, partit, mesh) #endif end if if (l_read) then - call assoc_ids(oid); call was_error(oid) - call read_restart(oid, mesh); call was_error(oid) + call assoc_ids(oid, partit); call was_error(oid, partit) + call read_restart(oid, partit, mesh); call was_error(oid, partit) if (use_ice) then - call assoc_ids(iid); call was_error(iid) - call read_restart(iid, mesh); call was_error(iid) + call assoc_ids(iid, partit); call was_error(iid, partit) + call read_restart(iid, partit, mesh); call was_error(iid, partit) #if defined(__icepack) - call assoc_ids(ip_id); call was_error(ip_id) - call read_restart(ip_id, mesh); call was_error(ip_id) + call assoc_ids(ip_id, partit); call was_error(ip_id, partit) + call read_restart(ip_id, partit, mesh); call was_error(ip_id, partit) #endif end if end if @@ -270,20 +279,20 @@ subroutine restart(istep, l_write, l_read, tracers, mesh) if (.not. is_restart) return ! write restart - if(mype==0) write(*,*)'Do output (netCDF, restart) ...' - call assoc_ids(oid); call was_error(oid) - call write_restart(oid, istep, mesh); call was_error(oid) + if(partit%mype==0) write(*,*)'Do output (netCDF, restart) ...' + call assoc_ids(oid, partit); call was_error(oid, partit) + call write_restart(oid, istep, partit, mesh); call was_error(oid, partit) if (use_ice) then - call assoc_ids(iid); call was_error(iid) - call write_restart(iid, istep, mesh); call was_error(iid) + call assoc_ids(iid, partit); call was_error(iid, partit) + call write_restart(iid, istep, partit, mesh); call was_error(iid, partit) #if defined(__icepack) - call assoc_ids(ip_id); call was_error(ip_id) - call write_restart(ip_id, istep, mesh); call was_error(ip_id) + call assoc_ids(ip_id, partit); call was_error(ip_id, partit) + call write_restart(ip_id, istep, partit, mesh); call was_error(ip_id, partit) #endif end if ! actualize clock file to latest restart point - if (mype==0) then + if (partit%mype==0) then write(*,*) ' --> actualize clock file to latest restart point' call clock_finish end if @@ -292,15 +301,15 @@ end subroutine restart ! !-------------------------------------------------------------------------------------------- ! -subroutine create_new_file(id) +subroutine create_new_file(id, partit) implicit none - + type(t_partit), intent(in) :: partit type(nc_file), intent(inout) :: id integer :: c, j integer :: n, k, l, kdim, dimid(4) character(2000) :: att_text ! Serial output implemented so far - if (mype/=0) return + if (partit%mype/=0) return c=1 id%error_status=0 ! create an ocean output file @@ -455,17 +464,21 @@ end subroutine def_variable_2d ! !-------------------------------------------------------------------------------------------- ! -subroutine write_restart(id, istep, mesh) +subroutine write_restart(id, istep, partit, mesh) implicit none type(nc_file), intent(inout) :: id integer, intent(in) :: istep - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit real(kind=WP), allocatable :: aux(:), laux(:) real(kind=WP) :: t0, t1, t2, t3 integer :: i, lev, size1, size2, size_gen, size_lev, shape integer :: c, order -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! Serial output implemented so far if (mype==0) then @@ -477,7 +490,7 @@ subroutine write_restart(id, istep, mesh) id%error_status(c)=nf_put_vara_int(id%ncid, id%iID, id%rec_count, 1, globalstep+istep, 1); c=c+1 end if - call was_error(id); c=1 + call was_error(id, partit); c=1 do i=1, id%nvar shape=id%var(i)%ndim @@ -486,8 +499,8 @@ subroutine write_restart(id, istep, mesh) size1=id%var(i)%dims(1) if (mype==0) allocate(aux(size1)) t0=MPI_Wtime() - if (size1==nod2D) call gather_nod (id%var(i)%pt1, aux) - if (size1==elem2D) call gather_elem(id%var(i)%pt1, aux) + if (size1==nod2D) call gather_nod (id%var(i)%pt1, aux, partit) + if (size1==elem2D) call gather_elem(id%var(i)%pt1, aux, partit) t1=MPI_Wtime() if (mype==0) then id%error_status(c)=nf_put_vara_double(id%ncid, id%var(i)%code, (/1, id%rec_count/), (/size1, 1/), aux, 1); c=c+1 @@ -524,8 +537,8 @@ subroutine write_restart(id, istep, mesh) do lev=1, size_lev if (order==1) laux=id%var(i)%pt2(:,lev) if (order==2) laux=id%var(i)%pt2(lev,:) - if (size_gen==nod2D) call gather_nod (laux, aux) - if (size_gen==elem2D) call gather_elem(laux, aux) + if (size_gen==nod2D) call gather_nod (laux, aux, partit) + if (size_gen==elem2D) call gather_elem(laux, aux, partit) if (mype==0) then if (order==1) id%error_status(c)=nf_put_vara_double(id%ncid, id%var(i)%code, (/1, lev, id%rec_count/), (/size_gen, 1, 1/), aux, 1); c=c+1 if (order==2) id%error_status(c)=nf_put_vara_double(id%ncid, id%var(i)%code, (/lev, 1, id%rec_count/), (/1, size_gen, 1/), aux, 1); c=c+1 @@ -544,19 +557,19 @@ subroutine write_restart(id, istep, mesh) call par_ex stop end if - call was_error(id); c=1 + call was_error(id, partit); c=1 end do if (mype==0) id%error_count=c-1 - call was_error(id) + call was_error(id, partit) if (mype==0) id%error_status(1)=nf_close(id%ncid); id%error_count=1 - call was_error(id) + call was_error(id, partit) end subroutine write_restart ! !-------------------------------------------------------------------------------------------- ! -subroutine read_restart(id, mesh, arg) +subroutine read_restart(id, partit, mesh, arg) implicit none type(nc_file), intent(inout) :: id integer, optional, intent(in) :: arg @@ -565,9 +578,14 @@ subroutine read_restart(id, mesh, arg) integer :: rec2read, c, order, ierror real(kind=WP) :: rtime !timestamp of the record logical :: file_exist=.False., var_exist - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit -#include "associate_mesh.h" + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! laux=0. ! Serial output implemented so far @@ -606,7 +624,7 @@ subroutine read_restart(id, mesh, arg) end if end if - call was_error(id); c=1 + call was_error(id, partit); c=1 do i=1, id%nvar shape=id%var(i)%ndim @@ -626,8 +644,8 @@ subroutine read_restart(id, mesh, arg) id%error_status(c)=nf_get_vara_double(id%ncid, id%var(i)%code, (/1, id%rec_count/), (/size1, 1/), aux, 1); c=c+1 ! write(*,*) 'min/max 2D =', minval(aux), maxval(aux) end if - if (size1==nod2D) call broadcast_nod (id%var(i)%pt1, aux) - if (size1==elem2D) call broadcast_elem(id%var(i)%pt1, aux) + if (size1==nod2D) call broadcast_nod (id%var(i)%pt1, aux, partit) + if (size1==elem2D) call broadcast_elem(id%var(i)%pt1, aux, partit) if (mype==0) deallocate(aux) !_______writing 3D fields________________________________________________ elseif (shape==2) then @@ -658,12 +676,12 @@ subroutine read_restart(id, mesh, arg) end if id%var(i)%pt2(lev,:)=0. if (size_gen==nod2D) then - call broadcast_nod (laux, aux) + call broadcast_nod (laux, aux, partit) if (order==1) id%var(i)%pt2(:,lev)=laux(1:myDim_nod2D+eDim_nod2D) if (order==2) id%var(i)%pt2(lev,:)=laux(1:myDim_nod2D+eDim_nod2D) end if if (size_gen==elem2D) then - call broadcast_elem(laux, aux) + call broadcast_elem(laux, aux, partit) if (order==1) id%var(i)%pt2(:,lev)=laux(1:myDim_elem2D+eDim_elem2D) if (order==2) id%var(i)%pt2(lev,:)=laux(1:myDim_elem2D+eDim_elem2D) end if @@ -675,25 +693,25 @@ subroutine read_restart(id, mesh, arg) call par_ex stop end if - call was_error(id); c=1 + call was_error(id, partit); c=1 end do if (mype==0) id%error_status(1)=nf_close(id%ncid); id%error_count=1 - call was_error(id) + call was_error(id, partit) end subroutine read_restart ! !-------------------------------------------------------------------------------------------- ! -subroutine assoc_ids(id) +subroutine assoc_ids(id, partit) implicit none - - type(nc_file), intent(inout) :: id - character(500) :: longname - integer :: c, j, k, status - real(kind=WP) :: rtime !timestamp of the record + type(t_partit), intent(in), target :: partit + type(nc_file), intent(inout) :: id + character(500) :: longname + integer :: c, j, k, status + real(kind=WP) :: rtime !timestamp of the record ! Serial output implemented so far - if (mype/=0) return + if (partit%mype/=0) return c=1 id%error_status=0 ! open existing netcdf file @@ -702,7 +720,7 @@ subroutine assoc_ids(id) id%error_status(c) = nf_open(id%filename, nf_nowrite, id%ncid) !if the file does not exist it will be created! if (id%error_status(c) .ne. nf_noerr) then - call create_new_file(id) ! error status counter will be reset + call create_new_file(id, partit) ! error status counter will be reset c=id%error_count+1 id%error_status(c) = nf_open(id%filename, nf_nowrite, id%ncid); c=c+1 end if @@ -729,9 +747,9 @@ subroutine assoc_ids(id) exit ! a proper rec_count detected, ready for reading restart, exit the loop end if if (k==1) then - if (mype==0) write(*,*) 'WARNING: all dates in restart file are after the current date' - if (mype==0) write(*,*) 'reading restart will not be possible !' - if (mype==0) write(*,*) 'the model attempted to start with the time stamp = ', int(ctime) + if (partit%mype==0) write(*,*) 'WARNING: all dates in restart file are after the current date' + if (partit%mype==0) write(*,*) 'reading restart will not be possible !' + if (partit%mype==0) write(*,*) 'the model attempted to start with the time stamp = ', int(ctime) id%error_status(c)=-310; end if end do @@ -752,20 +770,21 @@ end subroutine assoc_ids ! !-------------------------------------------------------------------------------------------- ! -subroutine was_error(id) +subroutine was_error(id, partit) implicit none - type(nc_file), intent(inout) :: id - integer :: k, status, ierror + type(t_partit), intent(inout), target :: partit + type(nc_file), intent(inout) :: id + integer :: k, status, ierror - call MPI_BCast(id%error_count, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call MPI_BCast(id%error_status(1), id%error_count, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + call MPI_BCast(id%error_count, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call MPI_BCast(id%error_status(1), id%error_count, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) do k=1, id%error_count status=id%error_status(k) if (status .ne. nf_noerr) then - if (mype==0) write(*,*) 'error counter=', k - if (mype==0) call handle_err(status) - call par_ex + if (partit%mype==0) write(*,*) 'error counter=', k + if (partit%mype==0) call handle_err(status, partit) + call par_ex(partit) stop end if end do diff --git a/src/oce_adv_tra_driver.F90 b/src/oce_adv_tra_driver.F90 index 7b58b63f2..305f128f4 100644 --- a/src/oce_adv_tra_driver.F90 +++ b/src/oce_adv_tra_driver.F90 @@ -1,46 +1,48 @@ module oce_adv_tra_driver_interfaces interface - subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, mesh) + subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, partit, mesh) use MOD_MESH use MOD_TRACER - use g_PARSUP + use MOD_PARTIT real(kind=WP), intent(in), target :: dt integer, intent(in) :: tr_num + type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh type(t_tracer), intent(inout), target :: tracers - real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) - real(kind=WP), intent(in), target :: W(mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in), target :: WI(mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in), target :: WE(mesh%nl, myDim_nod2D+eDim_nod2D) + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(in), target :: W(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in), target :: WI(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in), target :: WE(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) end subroutine end interface end module module oce_tra_adv_flux2dtracer_interface interface - subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, mesh, use_lo, ttf, lo) + subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, partit, mesh, use_lo, ttf, lo) !update the solution for vertical and horizontal flux contributions use MOD_MESH - use g_PARSUP - real(kind=WP), intent(in), target :: dt - type(t_mesh), intent(in), target :: mesh - real(kind=WP), intent(inout) :: dttf_h(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: dttf_v(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: flux_h(mesh%nl-1, myDim_edge2D) - real(kind=WP), intent(inout) :: flux_v(mesh%nl, myDim_nod2D) + use MOD_PARTIT + real(kind=WP), intent(in), target :: dt + type(t_partit),intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(inout) :: dttf_h(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: dttf_v(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux_h(mesh%nl-1, partit%myDim_edge2D) + real(kind=WP), intent(inout) :: flux_v(mesh%nl, partit%myDim_nod2D) logical, optional :: use_lo - real(kind=WP), optional :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), optional :: lo (mesh%nl-1, myDim_nod2D+eDim_nod2D) + real(kind=WP), optional :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), optional :: lo (mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) end subroutine end interface end module ! ! !=============================================================================== -subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, mesh) +subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, partit, mesh) use MOD_MESH use MOD_TRACER - use g_PARSUP + use MOD_PARTIT use g_comm_auto use oce_adv_tra_hor_interfaces use oce_adv_tra_ver_interfaces @@ -50,11 +52,12 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, mesh) real(kind=WP), intent(in), target :: dt integer, intent(in) :: tr_num type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers - real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) - real(kind=WP), intent(in), target :: W(mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in), target :: WI(mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in), target :: WE(mesh%nl, myDim_nod2D+eDim_nod2D) + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(in), target :: W(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in), target :: WI(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in), target :: WE(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), pointer, dimension (:,:) :: pwvel real(kind=WP), pointer, dimension (:,:) :: ttf, ttfAB, fct_LO @@ -74,7 +77,10 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, mesh) real(kind=WP) :: opth, optv logical :: do_zero_flux -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ttf => tracers%data(tr_num)%values ttfAB => tracers%data(tr_num)%valuesAB opth = tracers%data(tr_num)%tra_adv_ph @@ -97,8 +103,7 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, mesh) ! compute the low order upwind horizontal flux ! init_zero=.true. : zero the horizontal flux before computation ! init_zero=.false. : input flux will be substracted - call adv_tra_hor_upw1(vel, ttf, mesh, adv_flux_hor, init_zero=.true.) - + call adv_tra_hor_upw1(vel, ttf, partit, mesh, adv_flux_hor, init_zero=.true.) ! update the LO solution for horizontal contribution fct_LO=0.0_WP do e=1, myDim_edge2D @@ -122,11 +127,10 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, mesh) fct_LO(nz, enodes(1))=fct_LO(nz, enodes(1))+adv_flux_hor(nz, e) fct_LO(nz, enodes(2))=fct_LO(nz, enodes(2))-adv_flux_hor(nz, e) end do - end do - + end do ! compute the low order upwind vertical flux (explicit part only) ! zero the input/output flux before computation - call adv_tra_ver_upw1(we, ttf, mesh, adv_flux_ver, init_zero=.true.) + call adv_tra_ver_upw1(we, ttf, partit, mesh, adv_flux_ver, init_zero=.true.) ! update the LO solution for vertical contribution do n=1, myDim_nod2D nu1 = ulevels_nod2D(n) @@ -138,35 +142,32 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, mesh) end do if (w_split) then !wvel/=wvel_e ! update for implicit contribution (w_split option) - call adv_tra_vert_impl(dt, wi, fct_LO, mesh) + call adv_tra_vert_impl(dt, wi, fct_LO, partit, mesh) ! compute the low order upwind vertical flux (full vertical velocity) ! zero the input/output flux before computation ! --> compute here low order part of vertical anti diffusive fluxes, ! has to be done on the full vertical velocity w - call adv_tra_ver_upw1(w, ttf, mesh, adv_flux_ver, init_zero=.true.) - end if - - call exchange_nod(fct_LO) + call adv_tra_ver_upw1(w, ttf, partit, mesh, adv_flux_ver, init_zero=.true.) + end if + call exchange_nod(fct_LO, partit) end if do_zero_flux=.true. if (trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') do_zero_flux=.false. - !___________________________________________________________________________ ! do horizontal tracer advection, in case of FCT high order solution SELECT CASE(trim(tracers%data(tr_num)%tra_adv_hor)) CASE('MUSCL') ! compute the untidiffusive horizontal flux (init_zero=.false.: input is the LO horizontal flux computed above) - call adv_tra_hor_muscl(vel, ttfAB, mesh, opth, adv_flux_hor, edge_up_dn_grad, nboundary_lay, init_zero=do_zero_flux) + call adv_tra_hor_muscl(vel, ttfAB, partit, mesh, opth, adv_flux_hor, edge_up_dn_grad, nboundary_lay, init_zero=do_zero_flux) CASE('MFCT') - call adv_tra_hor_mfct(vel, ttfAB, mesh, opth, adv_flux_hor, edge_up_dn_grad, init_zero=do_zero_flux) + call adv_tra_hor_mfct(vel, ttfAB, partit, mesh, opth, adv_flux_hor, edge_up_dn_grad, init_zero=do_zero_flux) CASE('UPW1') - call adv_tra_hor_upw1(vel, ttfAB, mesh, adv_flux_hor, init_zero=do_zero_flux) + call adv_tra_hor_upw1(vel, ttfAB, partit, mesh, adv_flux_hor, init_zero=do_zero_flux) CASE DEFAULT !unknown if (mype==0) write(*,*) 'Unknown horizontal advection type ', trim(tracers%data(tr_num)%tra_adv_hor), '! Check your namelists!' - call par_ex(1) + call par_ex(partit, 1) END SELECT - if (trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') then pwvel=>w else @@ -177,50 +178,54 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, mesh) SELECT CASE(trim(tracers%data(tr_num)%tra_adv_ver)) CASE('QR4C') ! compute the untidiffusive vertical flux (init_zero=.false.:input is the LO vertical flux computed above) - call adv_tra_ver_qr4c ( pwvel, ttfAB, mesh, optv, adv_flux_ver, init_zero=do_zero_flux) + call adv_tra_ver_qr4c ( pwvel, ttfAB, partit, mesh, optv, adv_flux_ver, init_zero=do_zero_flux) CASE('CDIFF') - call adv_tra_ver_cdiff( pwvel, ttfAB, mesh, adv_flux_ver, init_zero=do_zero_flux) + call adv_tra_ver_cdiff( pwvel, ttfAB, partit, mesh, adv_flux_ver, init_zero=do_zero_flux) CASE('PPM') - call adv_tra_vert_ppm(dt, pwvel, ttfAB, mesh, adv_flux_ver, init_zero=do_zero_flux) + call adv_tra_vert_ppm(dt, pwvel, ttfAB, partit, mesh, adv_flux_ver, init_zero=do_zero_flux) CASE('UPW1') - call adv_tra_ver_upw1 ( pwvel, ttfAB, mesh, adv_flux_ver, init_zero=do_zero_flux) + call adv_tra_ver_upw1 ( pwvel, ttfAB, partit, mesh, adv_flux_ver, init_zero=do_zero_flux) CASE DEFAULT !unknown if (mype==0) write(*,*) 'Unknown vertical advection type ', trim(tracers%data(tr_num)%tra_adv_ver), '! Check your namelists!' call par_ex(1) ! --> be aware the vertical implicite part in case without FCT is done in - ! oce_ale_tracer.F90 --> subroutine diff_ver_part_impl_ale(tr_num, mesh) + ! oce_ale_tracer.F90 --> subroutine diff_ver_part_impl_ale(tr_num, partit, mesh) ! for do_wimpl=.true. END SELECT !___________________________________________________________________________ ! if (trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') then !edge_up_dn_grad will be used as an auxuary array here - call oce_tra_adv_fct(dt, ttf, fct_LO, adv_flux_hor, adv_flux_ver, fct_ttf_min, fct_ttf_max, fct_plus, fct_minus, edge_up_dn_grad, mesh) - call oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, adv_flux_hor, adv_flux_ver, mesh, use_lo=.TRUE., ttf=ttf, lo=fct_LO) + call oce_tra_adv_fct(dt, ttf, fct_LO, adv_flux_hor, adv_flux_ver, fct_ttf_min, fct_ttf_max, fct_plus, fct_minus, edge_up_dn_grad, partit, mesh) + call oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, adv_flux_hor, adv_flux_ver, partit, mesh, use_lo=.TRUE., ttf=ttf, lo=fct_LO) else - call oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, adv_flux_hor, adv_flux_ver, mesh) + call oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, adv_flux_hor, adv_flux_ver, partit, mesh) end if end subroutine do_oce_adv_tra ! ! !=============================================================================== -subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, mesh, use_lo, ttf, lo) +subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, partit, mesh, use_lo, ttf, lo) use MOD_MESH use o_ARRAYS - use g_PARSUP + use MOD_PARTIT use g_comm_auto implicit none - real(kind=WP), intent(in), target :: dt - type(t_mesh), intent(in), target :: mesh - real(kind=WP), intent(inout) :: dttf_h(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: dttf_v(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: flux_h(mesh%nl-1, myDim_edge2D) - real(kind=WP), intent(inout) :: flux_v(mesh%nl, myDim_nod2D) + real(kind=WP), intent(in), target :: dt + type(t_partit),intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(inout) :: dttf_h(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: dttf_v(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux_h(mesh%nl-1, partit%myDim_edge2D) + real(kind=WP), intent(inout) :: flux_v(mesh%nl, partit%myDim_nod2D) logical, optional :: use_lo - real(kind=WP), optional :: lo (mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), optional :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) + real(kind=WP), optional :: lo (mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), optional :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) integer :: n, nz, k, elem, enodes(3), num, el(2), nu12, nl12, nu1, nu2, nl1, nl2, edge -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! c. Update the solution ! Vertical diff --git a/src/oce_adv_tra_fct.F90 b/src/oce_adv_tra_fct.F90 index ec8314e09..5eb7993a9 100644 --- a/src/oce_adv_tra_fct.F90 +++ b/src/oce_adv_tra_fct.F90 @@ -1,26 +1,28 @@ module oce_adv_tra_fct_interfaces interface - subroutine oce_adv_tra_fct_init(twork, mesh) + subroutine oce_adv_tra_fct_init(twork, partit, mesh) use MOD_MESH use MOD_TRACER - use g_PARSUP - type(t_mesh), intent(in), target :: mesh - type(t_tracer_work), intent(inout), target :: twork + use MOD_PARTIT + type(t_mesh), intent(in), target :: mesh + type(t_partit),intent(inout), target :: partit + type(t_tracer_work), intent(inout), target :: twork end subroutine - subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_plus, fct_minus, AUX, mesh) + subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_plus, fct_minus, AUX, partit, mesh) use MOD_MESH - use g_PARSUP - real(kind=WP), intent(in), target :: dt - type(t_mesh), intent(in), target :: mesh - real(kind=WP), intent(inout) :: fct_ttf_min(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: fct_ttf_max(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: lo (mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: adf_h(mesh%nl-1, myDim_edge2D) - real(kind=WP), intent(inout) :: adf_v(mesh%nl, myDim_nod2D) - real(kind=WP), intent(inout) :: fct_plus(mesh%nl-1, myDim_edge2D) - real(kind=WP), intent(inout) :: fct_minus(mesh%nl, myDim_nod2D) + use MOD_PARTIT + real(kind=WP), intent(in), target :: dt + type(t_partit),intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(inout) :: fct_ttf_min(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: fct_ttf_max(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: lo (mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: adf_h(mesh%nl-1, partit%myDim_edge2D) + real(kind=WP), intent(inout) :: adf_v(mesh%nl, partit%myDim_nod2D) + real(kind=WP), intent(inout) :: fct_plus(mesh%nl-1, partit%myDim_edge2D) + real(kind=WP), intent(inout) :: fct_minus(mesh%nl, partit%myDim_nod2D) real(kind=WP), intent(inout) :: AUX(:,:,:) !a large auxuary array end subroutine end interface @@ -28,20 +30,24 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, ! ! !=============================================================================== -subroutine oce_adv_tra_fct_init(twork, mesh) +subroutine oce_adv_tra_fct_init(twork, partit, mesh) use MOD_MESH use MOD_TRACER - use g_PARSUP + use MOD_PARTIT implicit none integer :: my_size type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit type(t_tracer_work), intent(inout), target :: twork -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" my_size=myDim_nod2D+eDim_nod2D allocate(twork%fct_LO(nl-1, my_size)) ! Low-order solution - allocate(twork%adv_flux_hor(nl-1,myDim_edge2D)) ! antidiffusive hor. contributions / from edges - allocate(twork%adv_flux_ver(nl, myDim_nod2D)) ! antidiffusive ver. fluxes / from nodes + allocate(twork%adv_flux_hor(nl-1,partit%myDim_edge2D)) ! antidiffusive hor. contributions / from edges + allocate(twork%adv_flux_ver(nl, partit%myDim_nod2D)) ! antidiffusive ver. fluxes / from nodes allocate(twork%fct_ttf_max(nl-1, my_size),twork%fct_ttf_min(nl-1, my_size)) allocate(twork%fct_plus(nl-1, my_size), twork%fct_minus(nl-1, my_size)) @@ -60,7 +66,7 @@ end subroutine oce_adv_tra_fct_init ! ! !=============================================================================== -subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_plus, fct_minus, AUX, mesh) +subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_plus, fct_minus, AUX, partit, mesh) ! ! 3D Flux Corrected Transport scheme ! Limits antidiffusive fluxes==the difference in flux HO-LO @@ -69,27 +75,31 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, ! Adds limited fluxes to the LO solution use MOD_MESH use MOD_TRACER - use g_PARSUP + use MOD_PARTIT use g_comm_auto implicit none - real(kind=WP), intent(in), target :: dt - type(t_mesh), intent(in), target :: mesh - real(kind=WP), intent(inout) :: fct_ttf_min(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: fct_ttf_max(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: lo (mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: adf_h(mesh%nl-1, myDim_edge2D) - real(kind=WP), intent(inout) :: adf_v(mesh%nl, myDim_nod2D) - real(kind=WP), intent(inout) :: fct_plus (mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: fct_minus(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: AUX(:,:,:) !a large auxuary array, let us use twork%edge_up_dn_grad(1:4, 1:NL-2, 1:myDim_edge2D) to save space + real(kind=WP), intent(in), target :: dt + type(t_mesh), intent(in), target :: mesh + type(t_partit),intent(inout), target :: partit + real(kind=WP), intent(inout) :: fct_ttf_min(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: fct_ttf_max(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: lo (mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: adf_h(mesh%nl-1, partit%myDim_edge2D) + real(kind=WP), intent(inout) :: adf_v(mesh%nl, partit%myDim_nod2D) + real(kind=WP), intent(inout) :: fct_plus (mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: fct_minus(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: AUX(:,:,:) !a large auxuary array, let us use twork%edge_up_dn_grad(1:4, 1:NL-2, 1:partit%myDim_edge2D) to save space integer :: n, nz, k, elem, enodes(3), num, el(2), nl1, nl2, nu1, nu2, nl12, nu12, edge real(kind=WP) :: flux, ae,tvert_max(mesh%nl-1),tvert_min(mesh%nl-1) real(kind=WP) :: flux_eps=1e-16 real(kind=WP) :: bignumber=1e3 integer :: vlimit=1 -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! -------------------------------------------------------------------------- ! ttf is the tracer field on step n @@ -282,7 +292,7 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, end do ! fct_minus and fct_plus must be known to neighbouring PE - call exchange_nod(fct_plus, fct_minus) + call exchange_nod(fct_plus, fct_minus, partit) !___________________________________________________________________________ ! b3. Limiting @@ -318,7 +328,7 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, ! the bottom flux is always zero end do - call exchange_nod_end ! fct_plus, fct_minus + call exchange_nod_end(partit) ! fct_plus, fct_minus !Horizontal do edge=1, myDim_edge2D diff --git a/src/oce_adv_tra_hor.F90 b/src/oce_adv_tra_hor.F90 index df6e91dcf..714eccf68 100644 --- a/src/oce_adv_tra_hor.F90 +++ b/src/oce_adv_tra_hor.F90 @@ -8,14 +8,15 @@ module oce_adv_tra_hor_interfaces ! IF init_zero=.TRUE. : flux will be set to zero before computation ! IF init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_hor_upw1(vel, ttf, mesh, flux, init_zero) + subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, init_zero) use MOD_MESH use MOD_TRACER - use g_PARSUP - type(t_mesh), intent(in) , target :: mesh - real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) - real(kind=WP), intent(inout) :: flux(mesh%nl-1, myDim_edge2D) + use MOD_PARTIT + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) logical, optional :: init_zero end subroutine !=============================================================================== @@ -25,29 +26,31 @@ subroutine adv_tra_hor_upw1(vel, ttf, mesh, flux, init_zero) ! IF init_zero=.TRUE. : flux will be set to zero before computation ! IF init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_hor_muscl(vel, ttf, mesh, num_ord, flux, edge_up_dn_grad, nboundary_lay, init_zero) + subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, nboundary_lay, init_zero) use MOD_MESH - use g_PARSUP + use MOD_PARTIT + type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution - real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) - real(kind=WP), intent(inout) :: flux(mesh%nl-1, myDim_edge2D) - integer, intent(in) :: nboundary_lay(myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, myDim_edge2D) + real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) + integer, intent(in) :: nboundary_lay(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, partit%myDim_edge2D) logical, optional :: init_zero end subroutine ! a not stable version of MUSCL (reconstruction in the vicinity of bottom topography is not upwind) ! it runs with FCT option only - subroutine adv_tra_hor_mfct(vel, ttf, mesh, num_ord, flux, edge_up_dn_grad, init_zero) + subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, init_zero) use MOD_MESH - use g_PARSUP + use MOD_PARTIT + type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution - real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) - real(kind=WP), intent(inout) :: flux(mesh%nl-1, myDim_edge2D) - real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, myDim_edge2D) + real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) + real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, partit%myDim_edge2D) logical, optional :: init_zero end subroutine end interface @@ -55,22 +58,26 @@ subroutine adv_tra_hor_mfct(vel, ttf, mesh, num_ord, flux, edge_up_dn_grad, ! ! !=============================================================================== -subroutine adv_tra_hor_upw1(vel, ttf, mesh, flux, init_zero) +subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, init_zero) use MOD_MESH - use g_PARSUP + use MOD_PARTIT use g_comm_auto implicit none - type(t_mesh), intent(in) , target :: mesh - real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) - real(kind=WP), intent(inout) :: flux(mesh%nl-1, myDim_edge2D) + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) logical, optional :: init_zero real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2 real(kind=WP) :: a, vflux integer :: el(2), enodes(2), nz, edge integer :: nu12, nl12, nl1, nl2, nu1, nu2 -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" if (present(init_zero))then if (init_zero) flux=0.0_WP @@ -207,19 +214,20 @@ end subroutine adv_tra_hor_upw1 ! ! !=============================================================================== -subroutine adv_tra_hor_muscl(vel, ttf, mesh, num_ord, flux, edge_up_dn_grad, nboundary_lay, init_zero) +subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, nboundary_lay, init_zero) use MOD_MESH use MOD_TRACER - use g_PARSUP + use MOD_PARTIT use g_comm_auto implicit none + type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution - real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) - real(kind=WP), intent(inout) :: flux(mesh%nl-1, myDim_edge2D) - integer, intent(in) :: nboundary_lay(myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, myDim_edge2D) + real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) + integer, intent(in) :: nboundary_lay(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, partit%myDim_edge2D) logical, optional :: init_zero real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2 real(kind=WP) :: Tmean1, Tmean2, cHO @@ -228,7 +236,10 @@ subroutine adv_tra_hor_muscl(vel, ttf, mesh, num_ord, flux, edge_up_dn_grad, nbo integer :: el(2), enodes(2), nz, edge integer :: nu12, nl12, nl1, nl2, nu1, nu2 -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" if (present(init_zero))then if (init_zero) flux=0.0_WP @@ -475,18 +486,19 @@ end subroutine adv_tra_hor_muscl ! ! !=============================================================================== - subroutine adv_tra_hor_mfct(vel, ttf, mesh, num_ord, flux, edge_up_dn_grad, init_zero) + subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, init_zero) use MOD_MESH use MOD_TRACER - use g_PARSUP + use MOD_PARTIT use g_comm_auto implicit none + type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution - real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) - real(kind=WP), intent(inout) :: flux(mesh%nl-1, myDim_edge2D) - real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, myDim_edge2D) + real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) + real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, partit%myDim_edge2D) logical, optional :: init_zero real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2 real(kind=WP) :: Tmean1, Tmean2, cHO @@ -494,7 +506,10 @@ subroutine adv_tra_hor_mfct(vel, ttf, mesh, num_ord, flux, edge_up_dn_grad, integer :: el(2), enodes(2), nz, edge integer :: nu12, nl12, nl1, nl2, nu1, nu2 -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" if (present(init_zero))then if (init_zero) flux=0.0_WP diff --git a/src/oce_adv_tra_ver.F90 b/src/oce_adv_tra_ver.F90 index 7f7270635..eab9847a8 100644 --- a/src/oce_adv_tra_ver.F90 +++ b/src/oce_adv_tra_ver.F90 @@ -2,13 +2,14 @@ module oce_adv_tra_ver_interfaces interface ! implicit 1st order upwind vertical advection with to solve for fct_LO ! updates the input tracer ttf - subroutine adv_tra_vert_impl(dt, w, ttf, mesh) + subroutine adv_tra_vert_impl(dt, w, ttf, partit, mesh) use mod_mesh - use g_PARSUP + use MOD_PARTIT real(kind=WP), intent(in), target :: dt + type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh - real(kind=WP), intent(inout) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: W (mesh%nl, myDim_nod2D+eDim_nod2D) + real(kind=WP), intent(inout) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) end subroutine !=============================================================================== ! 1st order upwind (explicit) @@ -16,13 +17,14 @@ subroutine adv_tra_vert_impl(dt, w, ttf, mesh) ! IF init_zero=.TRUE. : flux will be set to zero before computation ! IF init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_ver_upw1(w, ttf, mesh, flux, init_zero) + subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, init_zero) use MOD_MESH - use g_PARSUP + use MOD_PARTIT + type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh - real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: W (mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: flux(mesh%nl, myDim_nod2D) + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) logical, optional :: init_zero end subroutine !=============================================================================== @@ -31,14 +33,15 @@ subroutine adv_tra_ver_upw1(w, ttf, mesh, flux, init_zero) ! IF init_zero=.TRUE. : flux will be set to zero before computation ! IF init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_ver_qr4c(w, ttf, mesh, num_ord, flux, init_zero) + subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, init_zero) use MOD_MESH - use g_PARSUP + use MOD_PARTIT + type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution - real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: W (mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: flux(mesh%nl, myDim_nod2D) + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) logical, optional :: init_zero end subroutine !=============================================================================== @@ -47,16 +50,17 @@ subroutine adv_tra_ver_qr4c(w, ttf, mesh, num_ord, flux, init_zero) ! IF init_zero=.TRUE. : flux will be set to zero before computation ! IF init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_vert_ppm(dt, w, ttf, mesh, flux, init_zero) + subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, init_zero) use MOD_MESH - use g_PARSUP + use MOD_PARTIT real(kind=WP), intent(in), target :: dt + type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh integer :: n, nz, nl1 real(kind=WP) :: tvert(mesh%nl), tv - real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: W (mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: flux(mesh%nl, myDim_nod2D) + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) logical, optional :: init_zero end subroutine ! central difference reconstruction (2nd order, use only with FCT) @@ -64,38 +68,43 @@ subroutine adv_tra_vert_ppm(dt, w, ttf, mesh, flux, init_zero) ! IF init_zero=.TRUE. : flux will be set to zero before computation ! IF init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_ver_cdiff(w, ttf, mesh, flux, init_zero) + subroutine adv_tra_ver_cdiff(w, ttf, partit, mesh, flux, init_zero) use MOD_MESH - use g_PARSUP + use MOD_PARTIT + type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh integer :: n, nz, nl1 real(kind=WP) :: tvert(mesh%nl), tv - real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: W (mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: flux(mesh%nl, myDim_nod2D) + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) logical, optional :: init_zero end subroutine end interface end module !=============================================================================== -subroutine adv_tra_vert_impl(dt, w, ttf, mesh) +subroutine adv_tra_vert_impl(dt, w, ttf, partit, mesh) use MOD_MESH use MOD_TRACER - use g_PARSUP + use MOD_PARTIT use g_comm_auto implicit none real(kind=WP), intent(in) , target :: dt + type(t_partit),intent(in), target :: partit type(t_mesh), intent(in) , target :: mesh - real(kind=WP), intent(inout) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: W (mesh%nl, myDim_nod2D+eDim_nod2D) + real(kind=WP), intent(inout) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP) :: a(mesh%nl), b(mesh%nl), c(mesh%nl), tr(mesh%nl) real(kind=WP) :: cp(mesh%nl), tp(mesh%nl) integer :: nz, n, nzmax, nzmin, tr_num real(kind=WP) :: m, zinv, dt_inv, dz real(kind=WP) :: c1, v_adv -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" dt_inv=1.0_WP/dt @@ -222,21 +231,25 @@ end subroutine adv_tra_vert_impl ! ! !=============================================================================== -subroutine adv_tra_ver_upw1(w, ttf, mesh, flux, init_zero) +subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, init_zero) use MOD_MESH use MOD_TRACER - use g_PARSUP + use MOD_PARTIT use g_comm_auto implicit none + type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh real(kind=WP) :: tvert(mesh%nl) integer :: n, nz, nzmax, nzmin - real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: W (mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: flux(mesh%nl, myDim_nod2D) + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) logical, optional :: init_zero -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" if (present(init_zero))then if (init_zero) flux=0.0_WP @@ -275,26 +288,28 @@ end subroutine adv_tra_ver_upw1 ! ! !=============================================================================== -subroutine adv_tra_ver_qr4c(w, ttf, mesh, num_ord, flux, init_zero) - use g_config +subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, init_zero) use MOD_MESH use o_ARRAYS use o_PARAM - use g_PARSUP - use g_forcing_arrays + use MOD_PARTIT implicit none + type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution - real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: W (mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: flux(mesh%nl, myDim_nod2D) + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) logical, optional :: init_zero real(kind=WP) :: tvert(mesh%nl) integer :: n, nz, nzmax, nzmin real(kind=WP) :: Tmean, Tmean1, Tmean2 real(kind=WP) :: qc, qu, qd -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" if (present(init_zero))then if (init_zero) flux=0.0_WP @@ -349,24 +364,28 @@ end subroutine adv_tra_ver_qr4c ! ! !=============================================================================== -subroutine adv_tra_vert_ppm(dt, w, ttf, mesh, flux, init_zero) +subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, init_zero) use MOD_MESH use MOD_TRACER - use g_PARSUP + use MOD_PARTIT use g_comm_auto implicit none real(kind=WP), intent(in), target :: dt + type(t_partit),intent(in), target :: partit type(t_mesh), intent(in) , target :: mesh - real(kind=WP), intent(in) :: ttf (mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: W (mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: flux(mesh%nl, myDim_nod2D) + real(kind=WP), intent(in) :: ttf (mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) logical, optional :: init_zero real(kind=WP) :: tvert(mesh%nl), tv(mesh%nl), aL, aR, aj, x real(kind=WP) :: dzjm1, dzj, dzjp1, dzjp2, deltaj, deltajp1 integer :: n, nz, nzmax, nzmin integer :: overshoot_counter, counter -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" if (present(init_zero))then if (init_zero) flux=0.0_WP @@ -528,20 +547,24 @@ end subroutine adv_tra_vert_ppm ! ! !=============================================================================== -subroutine adv_tra_ver_cdiff(w, ttf, mesh, flux, init_zero) +subroutine adv_tra_ver_cdiff(w, ttf, partit, mesh, flux, init_zero) use MOD_MESH use MOD_TRACER - use g_PARSUP + use MOD_PARTIT use g_comm_auto implicit none + type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh - real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: W (mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: flux(mesh%nl, myDim_nod2D) + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) logical, optional :: init_zero integer :: n, nz, nzmax, nzmin real(kind=WP) :: tvert(mesh%nl), tv -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" if (present(init_zero))then if (init_zero) flux=0.0_WP diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 51c4d72b2..95c70702c 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -1,70 +1,94 @@ module oce_ale_interfaces interface - subroutine init_bottom_elem_thickness(mesh) + subroutine init_bottom_elem_thickness(partit, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine - subroutine init_bottom_node_thickness(mesh) + subroutine init_bottom_node_thickness(partit, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine - subroutine init_surface_elem_depth(mesh) + subroutine init_surface_elem_depth(partit, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine - subroutine init_surface_node_depth(mesh) + subroutine init_surface_node_depth(partit, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine - subroutine impl_vert_visc_ale(mesh) + subroutine impl_vert_visc_ale(partit, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine - subroutine update_stiff_mat_ale(mesh) + subroutine update_stiff_mat_ale(partit, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine - subroutine compute_ssh_rhs_ale(mesh) + subroutine compute_ssh_rhs_ale(partit, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine - subroutine solve_ssh_ale(mesh) + subroutine solve_ssh_ale(partit, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine - subroutine compute_hbar_ale(mesh) + subroutine compute_hbar_ale(partit, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine - subroutine vert_vel_ale(mesh) + subroutine vert_vel_ale(partit, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine - subroutine update_thickness_ale(mesh) + subroutine update_thickness_ale(partit, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module oce_timestep_ale_interface interface - subroutine oce_timestep_ale(n, tracers, mesh) + subroutine oce_timestep_ale(n, tracers, partit, mesh) use mod_mesh + use mod_partit use mod_tracer integer, intent(in) :: n - type(t_tracer), intent(inout), target :: tracers - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(inout), target :: tracers end subroutine end interface end module @@ -90,19 +114,23 @@ subroutine oce_timestep_ale(n, tracers, mesh) ! !=============================================================================== ! allocate & initialise arrays for Arbitrary-Langrangian-Eularian (ALE) method -subroutine init_ale(mesh) +subroutine init_ale(partit, mesh) USE o_PARAM USE MOD_MESH - USE g_PARSUP + USE MOD_PARTIT USE o_ARRAYS USE g_config, only: which_ale, use_cavity, use_partial_cell USE g_forcing_param, only: use_virt_salt use oce_ale_interfaces Implicit NONE - integer :: n, nzmax, nzmin, elnodes(3), elem - type(t_mesh), intent(inout), target :: mesh -#include "associate_mesh.h" + integer :: n, nzmax, nzmin, elnodes(3), elem + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___allocate________________________________________________________________ ! hnode and hnode_new: layer thicknesses at nodes. allocate(mesh%hnode(1:nl-1, myDim_nod2D+eDim_nod2D)) @@ -172,14 +200,14 @@ subroutine init_ale(mesh) ! of partial cell bootom layer zbar_n_bot = 0.0 zbar_e_bot = 0.0 - call init_bottom_elem_thickness(mesh) - call init_bottom_node_thickness(mesh) + call init_bottom_elem_thickness(partit, mesh) + call init_bottom_node_thickness(partit, mesh) ! compute depth of partial cell ocean-cavity interface zbar_n_srf = zbar(1) zbar_e_srf = zbar(1) - call init_surface_elem_depth(mesh) - call init_surface_node_depth(mesh) + call init_surface_elem_depth(partit, mesh) + call init_surface_node_depth(partit, mesh) !___________________________________________________________________________ ! initialise 3d field of depth levels and mid-depth levels @@ -224,10 +252,10 @@ end subroutine init_ale ! ! !=============================================================================== -subroutine init_bottom_elem_thickness(mesh) +subroutine init_bottom_elem_thickness(partit, mesh) use o_PARAM use MOD_MESH - use g_PARSUP + use MOD_PARTIT use o_ARRAYS use g_config,only: use_partial_cell, partial_cell_thresh use g_comm_auto @@ -236,8 +264,12 @@ subroutine init_bottom_elem_thickness(mesh) integer :: elem, elnodes(3), nle real(kind=WP) :: dd - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! If we use partial cells, the thickness of bottom cell is adjusted. @@ -336,16 +368,16 @@ subroutine init_bottom_elem_thickness(mesh) end if !___________________________________________________________________________ - call exchange_elem(zbar_e_bot) + call exchange_elem(zbar_e_bot, partit) end subroutine init_bottom_elem_thickness ! ! !=============================================================================== -subroutine init_bottom_node_thickness(mesh) +subroutine init_bottom_node_thickness(partit, mesh) use o_PARAM use MOD_MESH - use g_PARSUP + use MOD_PARTIT use o_ARRAYS use g_config,only: use_partial_cell use g_comm_auto @@ -355,8 +387,12 @@ subroutine init_bottom_node_thickness(mesh) integer :: node, nln, elem, elemi, nelem real(kind=WP) :: dd real(kind=WP) :: hnbot, tvol - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! If we use partial cells, the thickness of bottom cell is adjusted. @@ -441,17 +477,17 @@ subroutine init_bottom_node_thickness(mesh) end if ! --> if(use_partial_cell) then !___________________________________________________________________________ - call exchange_nod(zbar_n_bot) - call exchange_nod(bottom_node_thickness) + call exchange_nod(zbar_n_bot, partit) + call exchange_nod(bottom_node_thickness, partit) end subroutine init_bottom_node_thickness ! ! !=============================================================================== -subroutine init_surface_elem_depth(mesh) +subroutine init_surface_elem_depth(partit, mesh) use o_PARAM use MOD_MESH - use g_PARSUP + use MOD_PARTIT use o_ARRAYS use g_config,only: use_cavity, use_cavity_partial_cell, cavity_partial_cell_thresh use g_comm_auto @@ -460,9 +496,13 @@ subroutine init_surface_elem_depth(mesh) integer :: elem, elnodes(3), ule real(kind=WP) :: dd - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" if (use_cavity) then @@ -515,16 +555,16 @@ subroutine init_surface_elem_depth(mesh) end do ! --> do elem=1, myDim_elem2D !_______________________________________________________________________ - call exchange_elem(zbar_e_srf) + call exchange_elem(zbar_e_srf, partit) end if end subroutine init_surface_elem_depth ! ! !=============================================================================== -subroutine init_surface_node_depth(mesh) +subroutine init_surface_node_depth(partit, mesh) use o_PARAM use MOD_MESH - use g_PARSUP + use MOD_PARTIT use o_ARRAYS use g_config,only: use_cavity, use_cavity_partial_cell use g_comm_auto @@ -533,8 +573,13 @@ subroutine init_surface_node_depth(mesh) integer :: node, uln, nelem, elemi real(kind=WP) :: dd - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" if (use_cavity) then !___________________________________________________________________________ @@ -568,14 +613,14 @@ subroutine init_surface_node_depth(mesh) end do ! --> do node=1, myDim_nod2D+eDim_nod2D !_______________________________________________________________________ - call exchange_nod(zbar_n_srf) + call exchange_nod(zbar_n_srf, partit) end if end subroutine init_surface_node_depth ! ! !=============================================================================== ! initialize thickness arrays based on the current hbar -subroutine init_thickness_ale(mesh) +subroutine init_thickness_ale(partit, mesh) ! For z-star case: we stretch scalar thicknesses (nodal) ! through nlevels_nod2D_min -2 layers. Layer nlevels_nod2D_min-1 ! should not be touched if partial cell is implemented (it is). @@ -584,14 +629,18 @@ subroutine init_thickness_ale(mesh) use g_config,only: dt, which_ale use o_PARAM use MOD_MESH - use g_PARSUP + use MOD_PARTIT use o_ARRAYS implicit none integer :: n, nz, elem, elnodes(3), nzmin, nzmax real(kind=WP) :: dd - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" if(mype==0) then write(*,*) '____________________________________________________________' @@ -818,25 +867,29 @@ subroutine init_thickness_ale(mesh) !___________________________________________________________________________ hnode_new=hnode ! Should be initialized, because only variable part is updated. - !!PS call check_total_volume(mesh) + !!PS call check_total_volume(partit, mesh) end subroutine init_thickness_ale ! ! !=============================================================================== ! update thickness arrays based on the current hbar -subroutine update_thickness_ale(mesh) +subroutine update_thickness_ale(partit, mesh) use o_PARAM use MOD_MESH - use g_PARSUP + use MOD_PARTIT use o_ARRAYS use g_config,only: which_ale,lzstar_lev,min_hnode implicit none integer :: n, nz, elem, elnodes(3),nzmax, nzmin integer , dimension(:), allocatable :: idx - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! >->->->->->->->->->->->->->-> z-level <-<-<-<-<-<-<-<-<-<-<-<-< @@ -1022,17 +1075,22 @@ end subroutine update_thickness_ale ! !=============================================================================== ! update thickness arrays based on the current hbar -subroutine restart_thickness_ale(mesh) +subroutine restart_thickness_ale(partit, mesh) use o_PARAM use MOD_MESH - use g_PARSUP + use MOD_PARTIT use o_ARRAYS use g_config,only: which_ale,lzstar_lev,min_hnode implicit none integer :: n, nz, elem, elnodes(3), nzmax, nzmin, lcl_lzstar_lev integer , dimension(:), allocatable :: idx - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" if(mype==0) then write(*,*) '____________________________________________________________' @@ -1122,10 +1180,10 @@ end subroutine restart_thickness_ale ! ! To achive it we should use global arrays n_num and n_pos. ! Reserved for future. -subroutine init_stiff_mat_ale(mesh) +subroutine init_stiff_mat_ale(partit, mesh) use o_PARAM use MOD_MESH - use g_PARSUP + use MOD_PARTIT use g_CONFIG implicit none @@ -1139,8 +1197,12 @@ subroutine init_stiff_mat_ale(mesh) character(MAX_PATH) :: dist_mesh_dir, file_name real(kind=WP) :: t0, t1 integer :: ierror ! MPI, return error code - type(t_mesh), intent(inout) , target :: mesh -#include "associate_mesh.h" + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" t0=MPI_Wtime() if (mype==0) then @@ -1404,12 +1466,12 @@ end subroutine init_stiff_mat_ale ! due to changes in ssh is done here ! = ssh_rhs in the update of the stiff matrix ! -subroutine update_stiff_mat_ale(mesh) +subroutine update_stiff_mat_ale(partit, mesh) use g_config,only: dt use o_PARAM use MOD_MESH use MOD_TRACER - use g_PARSUP + use MOD_PARTIT use o_ARRAYS ! implicit none @@ -1419,9 +1481,13 @@ subroutine update_stiff_mat_ale(mesh) real(kind=WP) :: factor real(kind=WP) :: fx(3), fy(3) integer, allocatable :: n_num(:) - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! update secod term of lhs od equation (18) of "FESOM2 from finite element @@ -1511,12 +1577,12 @@ end subroutine update_stiff_mat_ale !"FESOM2: from finite elements to finite volumes" ! ! ssh_rhs = alpha * grad[ int_hbot^hbar(n+0.5)( u^n+deltau)dz + W(n+0.5) ] -subroutine compute_ssh_rhs_ale(mesh) +subroutine compute_ssh_rhs_ale(partit, mesh) use g_config,only: which_ALE,dt use MOD_MESH use o_ARRAYS use o_PARAM - use g_PARSUP + use MOD_PARTIT use g_comm_auto implicit none @@ -1526,9 +1592,14 @@ subroutine compute_ssh_rhs_ale(mesh) integer :: ed, el(2), enodes(2), nz, n, nzmin, nzmax real(kind=WP) :: c1, c2, deltaX1, deltaX2, deltaY1, deltaY2 real(kind=WP) :: dumc1_1, dumc1_2, dumc2_1, dumc2_2 !!PS - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit + -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ssh_rhs=0.0_WP !___________________________________________________________________________ @@ -1603,7 +1674,7 @@ subroutine compute_ssh_rhs_ale(mesh) ssh_rhs(n)=ssh_rhs(n)+(1.0_WP-alpha)*ssh_rhs_old(n) end do end if - call exchange_nod(ssh_rhs) + call exchange_nod(ssh_rhs, partit) end subroutine compute_ssh_rhs_ale ! @@ -1618,12 +1689,12 @@ end subroutine compute_ssh_rhs_ale ! hbar(n+0.5) = hbar(n-0.5) - tau*ssh_rhs_old ! ! in S. Danilov et al.: "FESOM2: from finite elements to finite volumes" -subroutine compute_hbar_ale(mesh) +subroutine compute_hbar_ale(partit, mesh) use g_config,only: dt, which_ALE, use_cavity use MOD_MESH use o_ARRAYS use o_PARAM - use g_PARSUP + use MOD_PARTIT use g_comm_auto implicit none @@ -1635,9 +1706,13 @@ subroutine compute_hbar_ale(mesh) integer :: ed, el(2), enodes(2), nz,n, elnodes(3), elem, nzmin, nzmax real(kind=WP) :: c1, c2, deltaX1, deltaX2, deltaY1, deltaY2 - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! compute the rhs @@ -1687,7 +1762,7 @@ subroutine compute_hbar_ale(mesh) do n=1,myDim_nod2D ssh_rhs_old(n)=ssh_rhs_old(n)-water_flux(n)*areasvol(ulevels_nod2D(n),n) end do - call exchange_nod(ssh_rhs_old) + call exchange_nod(ssh_rhs_old, partit) end if !___________________________________________________________________________ @@ -1696,7 +1771,7 @@ subroutine compute_hbar_ale(mesh) do n=1,myDim_nod2D hbar(n)=hbar_old(n)+ssh_rhs_old(n)*dt/areasvol(ulevels_nod2D(n),n) end do - call exchange_nod(hbar) + call exchange_nod(hbar, partit) !___________________________________________________________________________ ! fill the array for updating the stiffness matrix @@ -1725,12 +1800,12 @@ end subroutine compute_hbar_ale ! > for zlevel: dh_k/dt_k=1 != 0 ! > for zstar : dh_k/dt_k=1...kbot-1 != 0 ! -subroutine vert_vel_ale(mesh) +subroutine vert_vel_ale(partit, mesh) use g_config,only: dt, which_ALE, min_hnode, lzstar_lev, flag_warn_cflz use MOD_MESH use o_ARRAYS use o_PARAM - use g_PARSUP + use MOD_PARTIT use g_comm_auto use io_RESTART !!PS use i_arrays !!PS @@ -1745,9 +1820,13 @@ subroutine vert_vel_ale(mesh) real(kind=WP) :: dhbar_total, dhbar_rest, distrib_dhbar_int !PS real(kind=WP), dimension(:), allocatable :: max_dhbar2distr,cumsum_maxdhbar,distrib_dhbar integer , dimension(:), allocatable :: idx - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! Contributions from levels in divergence @@ -2167,9 +2246,9 @@ subroutine vert_vel_ale(mesh) endif !___________________________________________________________________________ - call exchange_nod(Wvel) - call exchange_nod(hnode_new) ! Or extend cycles above - if (Fer_GM) call exchange_nod(fer_Wvel) + call exchange_nod(Wvel, partit) + call exchange_nod(hnode_new, partit) ! Or extend cycles above + if (Fer_GM) call exchange_nod(fer_Wvel, partit) !___________________________________________________________________________ ! calc vertical CFL criteria for debugging purpose and vertical Wvel splitting @@ -2242,11 +2321,11 @@ end subroutine vert_vel_ale !=============================================================================== ! solve eq.18 in S. Danilov et al. : FESOM2: from finite elements to finite volumes. ! for (eta^(n+1)-eta^n) = d_eta -subroutine solve_ssh_ale(mesh) +subroutine solve_ssh_ale(partit, mesh) use o_PARAM use MOD_MESH use o_ARRAYS -use g_PARSUP +use MOD_PARTIT use g_comm_auto use g_config, only: which_ale ! @@ -2270,9 +2349,14 @@ subroutine solve_ssh_ale(mesh) real(kind=WP), allocatable :: arr_nod2D(:),arr_nod2D2(:,:),arr_nod2D3(:) real(kind=WP) :: cssh1,cssh2,crhs integer :: i -type(t_mesh), intent(in) , target :: mesh +type(t_mesh), intent(inout), target :: mesh +type(t_partit), intent(inout), target :: partit -#include "associate_mesh.h" + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" Pmode = PET_BLOCKP+PET_SOLVE + PET_BICGSTAB +PET_REPORT + PET_QUIET+ PET_RCM+PET_PCBJ if (lfirst) then @@ -2302,7 +2386,9 @@ subroutine solve_ssh_ale(mesh) integer(kind=C_INT) :: maxiter, restart, lutype, fillin real(kind=C_DOUBLE) :: droptol, soltol integer :: n -type(t_mesh), intent(in) , target :: mesh +type(t_mesh), intent(inout), target :: mesh +type(t_partit), intent(inout), target :: partit + interface subroutine psolver_init(ident, SOL, PCGLOB, PCLOC, lutype, & @@ -2325,7 +2411,10 @@ subroutine psolve(ident, ssh_rhs, values, d_eta, newvalues) bind(C) end subroutine psolve end interface -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ident=1 maxiter=2000 @@ -2374,27 +2463,32 @@ end subroutine psolve ! ! !___________________________________________________________________________ -call exchange_nod(d_eta) !is this required after calling psolve ? +call exchange_nod(d_eta, partit) !is this required after calling psolve ? end subroutine solve_ssh_ale ! ! !=============================================================================== -subroutine impl_vert_visc_ale(mesh) +subroutine impl_vert_visc_ale(partit, mesh) USE MOD_MESH USE o_PARAM USE o_ARRAYS -USE g_PARSUP +USE MOD_PARTIT USE g_CONFIG,only: dt IMPLICIT NONE -type(t_mesh), intent(in) , target :: mesh +type(t_mesh), intent(inout), target :: mesh +type(t_partit), intent(inout), target :: partit + real(kind=WP) :: a(mesh%nl-1), b(mesh%nl-1), c(mesh%nl-1), ur(mesh%nl-1), vr(mesh%nl-1) real(kind=WP) :: cp(mesh%nl-1), up(mesh%nl-1), vp(mesh%nl-1) integer :: nz, elem, nzmax, nzmin, elnodes(3) real(kind=WP) :: zinv, m, friction, wu, wd -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" DO elem=1,myDim_elem2D elnodes=elem2D_nodes(:,elem) @@ -2553,13 +2647,13 @@ end subroutine impl_vert_visc_ale ! ! !=============================================================================== -subroutine oce_timestep_ale(n, tracers, mesh) +subroutine oce_timestep_ale(n, tracers, partit, mesh) use g_config use MOD_MESH use MOD_TRACER use o_ARRAYS use o_PARAM - use g_PARSUP + use MOD_PARTIT use g_comm_auto use io_RESTART !PS use i_ARRAYS !PS @@ -2578,11 +2672,17 @@ subroutine oce_timestep_ale(n, tracers, mesh) use write_step_info_interface use check_blowup_interface IMPLICIT NONE - real(kind=8) :: t0,t1, t2, t30, t3, t4, t5, t6, t7, t8, t9, t10, loc, glo - integer :: n, node type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers -#include "associate_mesh.h" + + real(kind=8) :: t0,t1, t2, t30, t3, t4, t5, t6, t7, t8, t9, t10, loc, glo + integer :: n, node + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" t0=MPI_Wtime() @@ -2594,30 +2694,30 @@ subroutine oce_timestep_ale(n, tracers, mesh) !___________________________________________________________________________ ! calculate equation of state, density, pressure and mixed layer depths if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call pressure_bv'//achar(27)//'[0m' - call pressure_bv(tracers, mesh) !!!!! HeRE change is made. It is linear EoS now. + call pressure_bv(tracers, partit, mesh) !!!!! HeRE change is made. It is linear EoS now. !___________________________________________________________________________ ! calculate calculate pressure gradient force if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call pressure_force_4_...'//achar(27)//'[0m' if (trim(which_ale)=='linfs') then - call pressure_force_4_linfs(tracers, mesh) + call pressure_force_4_linfs(tracers, partit, mesh) else - call pressure_force_4_zxxxx(tracers, mesh) + call pressure_force_4_zxxxx(tracers, partit, mesh) end if !___________________________________________________________________________ ! calculate alpha and beta ! it will be used for KPP, Redi, GM etc. Shall we keep it on in general case? - call sw_alpha_beta(tracers%data(1)%values, tracers%data(2)%values, mesh) + call sw_alpha_beta(tracers%data(1)%values, tracers%data(2)%values, partit, mesh) ! computes the xy gradient of a neutral surface; will be used by Redi, GM etc. - call compute_sigma_xy(tracers%data(1)%values,tracers%data(2)%values, mesh) + call compute_sigma_xy(tracers%data(1)%values,tracers%data(2)%values, partit, mesh) ! compute both: neutral slope and tapered neutral slope. Can be later combined with compute_sigma_xy ! will be primarily used for computing Redi diffusivities. etc? - call compute_neutral_slope(mesh) + call compute_neutral_slope(partit, partit, mesh) !___________________________________________________________________________ - call status_check + ! call status_check(partit) !___________________________________________________________________________ ! >>>>>> <<<<<< ! >>>>>> calculate vertical mixing coefficients for tracer (Kv) <<<<<< @@ -2638,37 +2738,37 @@ subroutine oce_timestep_ale(n, tracers, mesh) ! for debugging if (mod(mix_scheme_nmb,10)==6) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call calc_cvmix_idemix'//achar(27)//'[0m' - call calc_cvmix_idemix(mesh) + call calc_cvmix_idemix(partit, mesh) end if !___MAIN MIXING SCHEMES_____________________________________________________ ! use FESOM2.0 tuned k-profile parameterization for vertical mixing if (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call oce_mixing_KPP'//achar(27)//'[0m' - call oce_mixing_KPP(Av, Kv_double, tracers, mesh) + call oce_mixing_KPP(Av, Kv_double, tracers, partit, mesh) Kv=Kv_double(:,:,1) - call mo_convect(mesh) + call mo_convect(partit, mesh) ! use FESOM2.0 tuned pacanowski & philander parameterization for vertical ! mixing else if(mix_scheme_nmb==2 .or. mix_scheme_nmb==27) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call oce_mixing_PP'//achar(27)//'[0m' - call oce_mixing_PP(mesh) - call mo_convect(mesh) + call oce_mixing_PP(partit, mesh) + call mo_convect(partit, mesh) ! use CVMIX KPP (Large at al. 1994) else if(mix_scheme_nmb==3 .or. mix_scheme_nmb==37) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call calc_cvmix_kpp'//achar(27)//'[0m' - call calc_cvmix_kpp(tracers, mesh) - call mo_convect(mesh) + call calc_cvmix_kpp(tracers, partit, mesh) + call mo_convect(partit, mesh) ! use CVMIX PP (Pacanowski and Philander 1981) parameterisation for mixing ! based on Richardson number Ri = N^2/(du/dz)^2, using Brunt Väisälä frequency ! N^2 and vertical horizontal velocity shear dui/dz else if(mix_scheme_nmb==4 .or. mix_scheme_nmb==47) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call calc_cvmix_pp'//achar(27)//'[0m' - call calc_cvmix_pp(mesh) - call mo_convect(mesh) + call calc_cvmix_pp(partit, mesh) + call mo_convect(partit, mesh) ! use CVMIX TKE (turbulent kinetic energy closure) parameterisation for ! vertical mixing with or without the IDEMIX (dissipation of energy by @@ -2676,8 +2776,8 @@ subroutine oce_timestep_ale(n, tracers, mesh) ! Model for the diapycnal diffusivity induced by internal gravity waves" else if(mix_scheme_nmb==5 .or. mix_scheme_nmb==56) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call calc_cvmix_tke'//achar(27)//'[0m' - call calc_cvmix_tke(mesh) - call mo_convect(mesh) + call calc_cvmix_tke(partit, mesh) + call mo_convect(partit, mesh) end if @@ -2689,7 +2789,7 @@ subroutine oce_timestep_ale(n, tracers, mesh) ! mixing schemes if ( mod(mix_scheme_nmb,10)==7) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call calc_cvmix_tidal'//achar(27)//'[0m' - call calc_cvmix_tidal(mesh) + call calc_cvmix_tidal(partit, mesh) end if t1=MPI_Wtime() @@ -2704,17 +2804,17 @@ subroutine oce_timestep_ale(n, tracers, mesh) !!PS if (any(abs(Wvel_e)>1.0e20)) write(*,*) n, mype,' --> found Inf Wvel_e before compute_vel_rhs' if(mom_adv/=3) then - call compute_vel_rhs(mesh) + call compute_vel_rhs(partit, mesh) else - call compute_vel_rhs_vinv(mesh) + call compute_vel_rhs_vinv(partit, mesh) end if !___________________________________________________________________________ - call viscosity_filter(visc_option, mesh) + call viscosity_filter(visc_option, partit, mesh) !___________________________________________________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call impl_vert_visc_ale'//achar(27)//'[0m' - if(i_vert_visc) call impl_vert_visc_ale(mesh) + if(i_vert_visc) call impl_vert_visc_ale(partit, mesh) t2=MPI_Wtime() !___________________________________________________________________________ @@ -2722,30 +2822,30 @@ subroutine oce_timestep_ale(n, tracers, mesh) !___________________________________________________________________________ ! Update stiffness matrix by dhe=hbar(n+1/2)-hbar(n-1/2) on elements, only ! needed for zlevel and zstar - if (.not. trim(which_ale)=='linfs') call update_stiff_mat_ale(mesh) + if (.not. trim(which_ale)=='linfs') call update_stiff_mat_ale(partit, mesh) if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call compute_ssh_rhs_ale'//achar(27)//'[0m' ! ssh_rhs=-alpha*\nabla\int(U_n+U_rhs)dz-(1-alpha)*... ! see "FESOM2: from finite elements to finte volumes, S. Danilov..." eq. (18) rhs - call compute_ssh_rhs_ale(mesh) + call compute_ssh_rhs_ale(partit, mesh) ! Take updated ssh matrix and solve --> new ssh! t30=MPI_Wtime() - call solve_ssh_ale(mesh) + call solve_ssh_ale(partit, mesh) - if ((toy_ocean) .AND. (TRIM(which_toy)=="soufflet")) call relax_zonal_vel(mesh) + if ((toy_ocean) .AND. (TRIM(which_toy)=="soufflet")) call relax_zonal_vel(partit, mesh) t3=MPI_Wtime() ! estimate new horizontal velocity u^(n+1) ! u^(n+1) = u* + [-g * tau * theta * grad(eta^(n+1)-eta^(n)) ] if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call update_vel'//achar(27)//'[0m' - call update_vel(mesh) + call update_vel(partit, mesh) ! --> eta_(n) --> eta_(n+1) = eta_(n) + deta = eta_(n) + (eta_(n+1) + eta_(n)) t4=MPI_Wtime() ! Update to hbar(n+3/2) and compute dhe to be used on the next step if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call compute_hbar_ale'//achar(27)//'[0m' - call compute_hbar_ale(mesh) + call compute_hbar_ale(partit, mesh) !___________________________________________________________________________ ! - Current dynamic elevation alpha*hbar(n+1/2)+(1-alpha)*hbar(n-1/2) @@ -2765,42 +2865,42 @@ subroutine oce_timestep_ale(n, tracers, mesh) !___________________________________________________________________________ ! Do horizontal and vertical scaling of GM/Redi diffusivity if (Fer_GM .or. Redi) then - call init_Redi_GM(mesh) + call init_Redi_GM(partit, mesh) end if ! Implementation of Gent & McWiliams parameterization after R. Ferrari et al., 2010 ! does not belong directly to ALE formalism if (Fer_GM) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call fer_solve_Gamma'//achar(27)//'[0m' - call fer_solve_Gamma(mesh) - call fer_gamma2vel(mesh) + call fer_solve_Gamma(partit, mesh) + call fer_gamma2vel(partit, mesh) end if t6=MPI_Wtime() !___________________________________________________________________________ ! The main step of ALE procedure --> this is were the magic happens --> here ! is decided how change in hbar is distributed over the vertical layers if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call vert_vel_ale'//achar(27)//'[0m' - call vert_vel_ale(mesh) + call vert_vel_ale(partit, mesh) t7=MPI_Wtime() !___________________________________________________________________________ ! solve tracer equation if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call solve_tracers_ale'//achar(27)//'[0m' - call solve_tracers_ale(tracers, mesh) + call solve_tracers_ale(tracers, partit, mesh) t8=MPI_Wtime() !___________________________________________________________________________ ! Update hnode=hnode_new, helem if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call update_thickness_ale'//achar(27)//'[0m' - call update_thickness_ale(mesh) + call update_thickness_ale(partit, mesh) t9=MPI_Wtime() !___________________________________________________________________________ ! write out global fields for debugging - call write_step_info(n,logfile_outfreq, tracers, mesh) + call write_step_info(n,logfile_outfreq, tracers, partit, mesh) ! check model for blowup --> ! write_step_info and check_blowup require ! togeather around 2.5% of model runtime - call check_blowup(n, tracers, mesh) + call check_blowup(n, tracers, partit, mesh) t10=MPI_Wtime() !___________________________________________________________________________ ! write out execution times for ocean step parts diff --git a/src/oce_ale_mixing_kpp.F90 b/src/oce_ale_mixing_kpp.F90 index 731e8cd1d..93784e168 100755 --- a/src/oce_ale_mixing_kpp.F90 +++ b/src/oce_ale_mixing_kpp.F90 @@ -8,9 +8,9 @@ MODULE o_mixing_KPP_mod !--------------------------------------------------------------- USE o_PARAM USE MOD_MESH + USE MOD_PARTIT USE MOD_TRACER USE o_ARRAYS - USE g_PARSUP USE g_config USE i_arrays USE g_forcing_arrays @@ -95,7 +95,7 @@ MODULE o_mixing_KPP_mod ! PP: Kv(nl,node_size) and Av(nl,elem_size) ! ******************************************************************* - subroutine oce_mixing_kpp_init(mesh) + subroutine oce_mixing_kpp_init(partit, mesh) IMPLICIT NONE @@ -115,9 +115,12 @@ subroutine oce_mixing_kpp_init(mesh) integer :: i, j - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" allocate ( ghats ( nl-1, myDim_nod2D+eDim_nod2D )) ! nonlocal transport (s/m^2) allocate ( hbl ( myDim_nod2D+eDim_nod2D )) ! boundary layer depth @@ -238,7 +241,7 @@ end subroutine oce_mixing_kpp_init ! diffK = diffusion coefficient (m^2/s) ! !--------------------------------------------------------------- - subroutine oce_mixing_KPP(viscAE, diffK, tracers, mesh) + subroutine oce_mixing_KPP(viscAE, diffK, tracers, partit, mesh) IMPLICIT NONE @@ -246,23 +249,26 @@ subroutine oce_mixing_KPP(viscAE, diffK, tracers, mesh) ! Define allocatble arrays under oce_modules.F90 ! Allocate arrays under oce_setup_step.F90 ! ******************************************************************* - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers integer :: node, kn, elem, elnodes(3) integer :: nz, ns, j, q, lay, lay_mi, nzmin, nzmax real(KIND=WP) :: smftu, smftv, aux, vol real(KIND=WP) :: dens_up, minmix real(KIND=WP) :: u_loc, v_loc -!!PS real(kind=WP) :: tsurf, ssurf, t, s real(kind=WP) :: usurf, vsurf real(kind=WP) :: rhopot, bulk, pz real(kind=WP) :: bulk_0, bulk_pz, bulk_pz2 real(kind=WP) :: rho_surf, rho_insitu - real(KIND=WP), dimension(mesh%nl, myDim_elem2D+eDim_elem2D), intent(inout) :: viscAE!for momentum (elements) - real(KIND=WP), dimension(mesh%nl, myDim_nod2D+eDim_nod2D) :: viscA !for momentum (nodes) - real(KIND=WP), dimension(mesh%nl, myDim_nod2D+eDim_nod2D, tracers%num_tracers), intent(inout) :: diffK !for T and S + real(KIND=WP), dimension(mesh%nl, partit%myDim_elem2D+partit%eDim_elem2D), intent(inout) :: viscAE!for momentum (elements) + real(KIND=WP), dimension(mesh%nl, partit%myDim_nod2D +partit%eDim_nod2D) :: viscA !for momentum (nodes) + real(KIND=WP), dimension(mesh%nl, partit%myDim_nod2D +partit%eDim_nod2D, tracers%num_tracers), intent(inout) :: diffK !for T and S -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ViscA=0.0_WP DO node=1, myDim_nod2D !+eDim_nod2D @@ -340,30 +346,30 @@ subroutine oce_mixing_KPP(viscAE, diffK, tracers, mesh) ! compute interior mixing coefficients everywhere, due to constant ! internal wave activity, static instability, and local shear ! instability. - CALL ri_iwmix(viscA, diffK, tracers, mesh) + CALL ri_iwmix(viscA, diffK, tracers, partit, mesh) ! add double diffusion IF (double_diffusion) then - CALL ddmix(diffK, tracers, mesh) + CALL ddmix(diffK, tracers, partit, mesh) END IF ! boundary layer mixing coefficients: diagnose new b.l. depth - CALL bldepth(mesh) + CALL bldepth(partit, mesh) ! boundary layer diffusivities - CALL blmix_kpp(viscA, diffK, mesh) + CALL blmix_kpp(viscA, diffK, partit, mesh) ! enhance diffusivity at interface kbl - 1 - CALL enhance(viscA, diffK, mesh) + CALL enhance(viscA, diffK, partit, mesh) if (smooth_blmc) then - call exchange_nod(blmc(:,:,1)) - call exchange_nod(blmc(:,:,2)) - call exchange_nod(blmc(:,:,3)) + call exchange_nod(blmc(:,:,1), partit) + call exchange_nod(blmc(:,:,2), partit) + call exchange_nod(blmc(:,:,3), partit) do j=1, 3 !_____________________________________________________________________ ! all loops go over myDim_nod2D so no halo information --> for smoothing ! haloinfo is required --> therefor exchange_nod - call smooth_nod(blmc(:,:,j), 3, mesh) + call smooth_nod(blmc(:,:,j), 3, partit, mesh) end do end if @@ -387,12 +393,12 @@ subroutine oce_mixing_KPP(viscAE, diffK, tracers, mesh) !_____________________________________________________________________________ ! do all node loops only over myDim_nod2D --> therefore do an halo exchange ! only at the end should save some time - call exchange_nod(diffK(:,:,1)) - call exchange_nod(diffK(:,:,2)) - call exchange_nod(ghats) + call exchange_nod(diffK(:,:,1), partit) + call exchange_nod(diffK(:,:,2), partit) + call exchange_nod(ghats, partit) ! OVER ELEMENTS - call exchange_nod(viscA) !Warning: don't forget to communicate before averaging on elements!!! + call exchange_nod(viscA, partit) !Warning: don't forget to communicate before averaging on elements!!! minmix=3.0e-3_WP DO elem=1, myDim_elem2D elnodes=elem2D_nodes(:,elem) @@ -465,7 +471,7 @@ END SUBROUTINE oce_mixing_kpp ! real caseA(t2d) ! =1 in case A, =0 in case B ! integer kbl(t2d) ! index of first grid level below hbl ! - SUBROUTINE bldepth(mesh) + SUBROUTINE bldepth(partit, mesh) IMPLICIT NONE @@ -478,9 +484,13 @@ SUBROUTINE bldepth(mesh) real(KIND=WP), parameter :: cekman = 0.7_WP ! constant for Ekman depth real(KIND=WP), parameter :: cmonob = 1.0_WP ! constant for Monin-Obukhov depth - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! Initialize hbl and kbl to bottomed out values DO node=1, myDim_nod2D !+eDim_nod2D @@ -590,8 +600,8 @@ SUBROUTINE bldepth(mesh) END DO if (smooth_hbl) then - call exchange_nod(hbl) - call smooth_nod(hbl, 3, mesh) + call exchange_nod(hbl, partit) + call smooth_nod(hbl, 3, partit, mesh) end if DO node=1, myDim_nod2D !+eDim_nod2D @@ -718,24 +728,28 @@ END SUBROUTINE wscale ! visc = viscosity coefficient (m**2/s) ! diff = diffusion coefficient (m**2/s) ! - subroutine ri_iwmix(viscA, diffK, tracers, mesh) + subroutine ri_iwmix(viscA, diffK, tracers, partit, mesh) IMPLICIT NONE - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers integer :: node, nz, mr, nzmin, nzmax real(KIND=WP) , parameter :: Riinfty = 0.8_WP ! local Richardson Number limit for shear instability (LMD 1994 uses 0.7) real(KIND=WP) :: ri_prev, tmp real(KIND=WP) :: Rigg, ratio, frit real(KIND=WP) :: dz_inv, shear, aux, dep, lat, Kv0_b - real(KIND=WP), dimension(mesh%nl, myDim_nod2D+eDim_nod2D ), intent(inout) :: viscA !for momentum (nodes) - real(KIND=WP), dimension(mesh%nl, myDim_nod2D+eDim_nod2D ,tracers%num_tracers), intent(inout) :: diffK !for T and S + real(KIND=WP), dimension(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D ), intent(inout) :: viscA !for momentum (nodes) + real(KIND=WP), dimension(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D ,tracers%num_tracers), intent(inout) :: diffK !for T and S ! Put them under the namelist.oce logical :: smooth_richardson_number = .false. integer :: num_smoothings = 1 ! for vertical smoothing of Richardson number -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! Compute Richardson number and store it as diffK to save memory DO node=1, myDim_nod2D! +eDim_nod2D @@ -776,7 +790,7 @@ subroutine ri_iwmix(viscA, diffK, tracers, mesh) END DO if (smooth_Ri_hor) then - call smooth_nod(diffK(:,:,1), 3, mesh) + call smooth_nod(diffK(:,:,1), 3, partit, mesh) end if !___________________________________________________________________________ @@ -844,11 +858,12 @@ end subroutine ri_iwmix ! ! output: update diffu ! - subroutine ddmix(diffK, tracers, mesh) + subroutine ddmix(diffK, tracers, partit, mesh) IMPLICIT NONE - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers real(KIND=WP), parameter :: Rrho0 = 1.9_WP ! limit for double diffusive density ratio real(KIND=WP), parameter :: dsfmax = 1.e-4_WP ! (m^2/s) max diffusivity in case of salt fingering real(KIND=WP), parameter :: viscosity_molecular = 1.5e-6_WP ! (m^2/s) @@ -857,9 +872,12 @@ subroutine ddmix(diffK, tracers, mesh) real(KIND=WP) :: alphaDT, betaDS real(KIND=WP) :: diffdd, Rrho, prandtl - real(KIND=WP), dimension(mesh%nl, myDim_nod2D+eDim_nod2D, 2), intent(inout) :: diffK ! for T and S + real(KIND=WP), dimension(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D, 2), intent(inout) :: diffK ! for T and S -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" DO node=1, myDim_nod2D!+eDim_nod2D nzmin = ulevels_nod2D(node) @@ -946,10 +964,11 @@ end subroutine ddmix ! real blmc(3d,3) = boundary layer mixing coeff.(m**2/s) ! real ghats(3d) = nonlocal scalar transport ! - subroutine blmix_kpp(viscA,diffK, mesh) + subroutine blmix_kpp(viscA,diffK, partit, mesh) IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: node, nz, kn, elem, elnodes(3), knm1, knp1, nl1, nu1 real(KIND=WP) :: delhat, R, dvdzup, dvdzdn real(KIND=WP) :: viscp, difsp, diftp, visch, difsh, difth, f1 @@ -959,10 +978,13 @@ subroutine blmix_kpp(viscA,diffK, mesh) real(KIND=WP) :: dthick(mesh%nl), diff_col(mesh%nl,3), diff_colE(mesh%nl) - real(KIND=WP), dimension(mesh%nl, myDim_nod2D+eDim_nod2D ), intent(inout) :: viscA ! for momentum (nodes) - real(KIND=WP), dimension(mesh%nl, myDim_nod2D+eDim_nod2D, 2 ), intent(inout) :: diffK ! for T and S + real(KIND=WP), dimension(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D ), intent(inout) :: viscA ! for momentum (nodes) + real(KIND=WP), dimension(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D, 2 ), intent(inout) :: diffK ! for T and S -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" blmc = 0.0_WP @@ -1140,16 +1162,20 @@ end subroutine blmix_kpp ! output ! real blmc(n3,3) = enhanced boundary layer mixing coefficient ! - subroutine enhance(viscA, diffK, mesh) + subroutine enhance(viscA, diffK, partit, mesh) IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh - real(KIND=WP), dimension(mesh%nl, myDim_nod2D+eDim_nod2D), intent(inout) :: viscA !for momentum (nodes) - real(kind=WP), dimension(mesh%nl, myDim_nod2D+eDim_nod2D,2), intent(inout) :: diffK !for T and S + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + real(KIND=WP), dimension(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D), intent(inout) :: viscA !for momentum (nodes) + real(kind=WP), dimension(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D,2), intent(inout) :: diffK !for T and S integer :: nz, node, k real(kind=WP) :: delta, dkmp5, dstar -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" DO node=1, myDim_nod2D !+eDim_nod2D diff --git a/src/oce_ale_mixing_pp.F90 b/src/oce_ale_mixing_pp.F90 index 982882c08..baed158c7 100644 --- a/src/oce_ale_mixing_pp.F90 +++ b/src/oce_ale_mixing_pp.F90 @@ -1,5 +1,5 @@ !======================================================================= -subroutine oce_mixing_pp(mesh) +subroutine oce_mixing_pp(partit, mesh) ! Compute Richardson number dependent Av and Kv following ! Pacanowski and Philander, 1981 ! Av = Avmax * factor**2 + Av0, @@ -15,19 +15,23 @@ subroutine oce_mixing_pp(mesh) ! SD no if in Kv computations (only minor differences are introduced) ! ! -use MOD_MESH +USE MOD_MESH +USE MOD_PARTIT USE o_PARAM USE o_ARRAYS -USE g_PARSUP USE g_config use i_arrays IMPLICIT NONE -type(t_mesh), intent(in) , target :: mesh -real(kind=WP) :: dz_inv, bv, shear, a, rho_up, rho_dn, t, s, Kv0_b -integer :: node, nz, nzmax, nzmin, elem, elnodes(3), i +type(t_mesh), intent(in), target :: mesh +type(t_partit), intent(inout), target :: partit +real(kind=WP) :: dz_inv, bv, shear, a, rho_up, rho_dn, t, s, Kv0_b +integer :: node, nz, nzmax, nzmin, elem, elnodes(3), i -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ do node=1, myDim_nod2D+eDim_nod2D nzmin = ulevels_nod2d(node) diff --git a/src/oce_ale_pressure_bv.F90 b/src/oce_ale_pressure_bv.F90 index 3ef70833c..f74ca11a9 100644 --- a/src/oce_ale_pressure_bv.F90 +++ b/src/oce_ale_pressure_bv.F90 @@ -1,155 +1,187 @@ module densityJM_components_interface interface - subroutine densityJM_components(t, s, bulk_0, bulk_pz, bulk_pz2, rhopot, mesh) + subroutine densityJM_components(t, s, bulk_0, bulk_pz, bulk_pz2, rhopot, partit, mesh) USE MOD_MESH - real(kind=WP), intent(IN) :: t,s - real(kind=WP), intent(OUT) :: bulk_0, bulk_pz, bulk_pz2, rhopot - type(t_mesh), intent(in) , target :: mesh + USE MOD_PARTIT + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + real(kind=WP), intent(IN) :: t,s + real(kind=WP), intent(OUT) :: bulk_0, bulk_pz, bulk_pz2, rhopot end subroutine end interface end module module density_linear_interface interface - subroutine density_linear(t, s, bulk_0, bulk_pz, bulk_pz2, rho_out, mesh) + subroutine density_linear(t, s, bulk_0, bulk_pz, bulk_pz2, rho_out, partit, mesh) USE MOD_MESH - real(kind=WP), intent(IN) :: t,s - real(kind=WP), intent(OUT) :: bulk_0, bulk_pz, bulk_pz2, rho_out - type(t_mesh), intent(in) , target :: mesh + USE MOD_PARTIT + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + real(kind=WP), intent(IN) :: t,s + real(kind=WP), intent(OUT) :: bulk_0, bulk_pz, bulk_pz2, rho_out end subroutine end interface end module module pressure_force_4_linfs_fullcell_interface interface - subroutine pressure_force_4_linfs_fullcell(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh + subroutine pressure_force_4_linfs_fullcell(partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module pressure_force_4_linfs_nemo_interface interface - subroutine pressure_force_4_linfs_nemo(tracers, mesh) - use mod_mesh - use mod_tracer - type(t_tracer), intent(in), target :: tracers - type(t_mesh), intent(in), target :: mesh + subroutine pressure_force_4_linfs_nemo(tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_TRACER + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers end subroutine end interface end module module pressure_force_4_linfs_shchepetkin_interface interface - subroutine pressure_force_4_linfs_shchepetkin(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh + subroutine pressure_force_4_linfs_shchepetkin(partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module pressure_force_4_linfs_easypgf_interface interface - subroutine pressure_force_4_linfs_easypgf(tracers, mesh) - use mod_mesh - use mod_tracer - type(t_tracer), intent(in), target :: tracers - type(t_mesh), intent(in), target :: mesh + subroutine pressure_force_4_linfs_easypgf(tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_TRACER + type(t_tracer), intent(in), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh end subroutine end interface end module module pressure_force_4_linfs_cubicspline_interface interface - subroutine pressure_force_4_linfs_cubicspline(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh + subroutine pressure_force_4_linfs_cubicspline(partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module pressure_force_4_linfs_cavity_interface interface - subroutine pressure_force_4_linfs_cavity(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh + subroutine pressure_force_4_linfs_cavity(partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module pressure_force_4_zxxxx_shchepetkin_interface interface - subroutine pressure_force_4_zxxxx_shchepetkin(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh + subroutine pressure_force_4_zxxxx_shchepetkin(partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module pressure_force_4_zxxxx_easypgf_interface interface - subroutine pressure_force_4_zxxxx_easypgf(tracers, mesh) - use mod_mesh - use mod_tracer - type(t_tracer), intent(in), target :: tracers - type(t_mesh), intent(in), target :: mesh + subroutine pressure_force_4_zxxxx_easypgf(tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_TRACER + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers end subroutine end interface end module module pressure_force_4_zxxxx_cubicspline_interface interface - subroutine pressure_force_4_zxxxx_cubicspline(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh + subroutine pressure_force_4_zxxxx_cubicspline(partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module init_ref_density_interface interface - subroutine init_ref_density(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh + subroutine init_ref_density(partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module insitu2pot_interface interface - subroutine insitu2pot(tracers, mesh) - use mod_mesh - use mod_tracer - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(inout), target :: tracers + subroutine insitu2pot(tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_TRACER + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers end subroutine end interface end module module pressure_bv_interface interface - subroutine pressure_bv(tracers, mesh) - use mod_mesh - use mod_tracer - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers + subroutine pressure_bv(tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_TRACER + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers end subroutine end interface end module module pressure_force_4_linfs_interface interface - subroutine pressure_force_4_linfs(tracers, mesh) - use mod_mesh - use mod_tracer - type(t_tracer), intent(in), target :: tracers - type(t_mesh), intent(in), target :: mesh + subroutine pressure_force_4_linfs(tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_TRACER + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers end subroutine end interface end module module pressure_force_4_zxxxx_interface interface - subroutine pressure_force_4_zxxxx(tracers, mesh) - use mod_mesh - use mod_tracer - type(t_tracer), intent(in), target :: tracers - type(t_mesh), intent(in), target :: mesh + subroutine pressure_force_4_zxxxx(tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_TRACER + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers end subroutine end interface end module ! ! !=============================================================================== -subroutine pressure_bv(tracers, mesh) +subroutine pressure_bv(tracers, partit, mesh) ! fill in the hydrostatic pressure and the Brunt-Vaisala frequency ! in a single pass the using split form of the equation of state ! as proposed by NR @@ -157,25 +189,28 @@ subroutine pressure_bv(tracers, mesh) USE o_PARAM USE MOD_MESH USE MOD_TRACER + USE MOD_PARTIT USE o_ARRAYS - USE g_PARSUP use i_arrays USE o_mixing_KPP_mod, only: dbsfc USE diagnostics, only: ldiag_dMOC use densityJM_components_interface use density_linear_interface IMPLICIT NONE - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(inout), target :: tracers - real(kind=WP) :: dz_inv, bv, a, rho_up, rho_dn, t, s - integer :: node, nz, nl1, nzmax, nzmin - real(kind=WP) :: rhopot(mesh%nl), bulk_0(mesh%nl), bulk_pz(mesh%nl), bulk_pz2(mesh%nl), rho(mesh%nl), dbsfc1(mesh%nl), db_max - real(kind=WP) :: bulk_up, bulk_dn, smallvalue, buoyancy_crit, rho_surf, aux_rho, aux_rho1 - real(kind=WP) :: sigma_theta_crit=0.125_WP !kg/m3, Levitus threshold for computing MLD2 - logical :: flag1, flag2, mixing_kpp - - real(kind=WP), dimension(:,:), pointer :: temp, salt -#include "associate_mesh.h" + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers + real(kind=WP) :: dz_inv, bv, a, rho_up, rho_dn, t, s + integer :: node, nz, nl1, nzmax, nzmin + real(kind=WP) :: rhopot(mesh%nl), bulk_0(mesh%nl), bulk_pz(mesh%nl), bulk_pz2(mesh%nl), rho(mesh%nl), dbsfc1(mesh%nl), db_max + real(kind=WP) :: bulk_up, bulk_dn, smallvalue, buoyancy_crit, rho_surf, aux_rho, aux_rho1 + real(kind=WP) :: sigma_theta_crit=0.125_WP !kg/m3, Levitus threshold for computing MLD2 + logical :: flag1, flag2, mixing_kpp + real(kind=WP), dimension(:,:), pointer :: temp, salt +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" temp=>tracers%data(1)%values(:,:) salt=>tracers%data(2)%values(:,:) smallvalue=1.0e-20 @@ -231,9 +266,9 @@ subroutine pressure_bv(tracers, mesh) s=salt(nz, node) select case(state_equation) case(0) - call density_linear(t, s, bulk_0(nz), bulk_pz(nz), bulk_pz2(nz), rhopot(nz), mesh) + call density_linear(t, s, bulk_0(nz), bulk_pz(nz), bulk_pz2(nz), rhopot(nz), partit, mesh) case(1) - call densityJM_components(t, s, bulk_0(nz), bulk_pz(nz), bulk_pz2(nz), rhopot(nz), mesh) + call densityJM_components(t, s, bulk_0(nz), bulk_pz(nz), bulk_pz2(nz), rhopot(nz), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' call par_ex(1) @@ -295,9 +330,9 @@ subroutine pressure_bv(tracers, mesh) do nz=1, nzmin-1 select case(state_equation) case(0) - call density_linear(t, s, bulk_0(nz), bulk_pz(nz), bulk_pz2(nz), rhopot(nz), mesh) + call density_linear(t, s, bulk_0(nz), bulk_pz(nz), bulk_pz2(nz), rhopot(nz), partit, mesh) case(1) - call densityJM_components(t, s, bulk_0(nz), bulk_pz(nz), bulk_pz2(nz), rhopot(nz), mesh) + call densityJM_components(t, s, bulk_0(nz), bulk_pz(nz), bulk_pz2(nz), rhopot(nz), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' call par_ex(1) @@ -420,10 +455,10 @@ end subroutine pressure_bv ! !=============================================================================== ! Calculate pressure gradient force (PGF) for linear free surface case -subroutine pressure_force_4_linfs(tracers, mesh) +subroutine pressure_force_4_linfs(tracers, partit, mesh) use g_config - use g_PARSUP use mod_mesh + use MOD_PARTIT use mod_tracer use pressure_force_4_linfs_fullcell_interface use pressure_force_4_linfs_nemo_interface @@ -432,7 +467,8 @@ subroutine pressure_force_4_linfs(tracers, mesh) use pressure_force_4_linfs_cavity_interface use pressure_force_4_linfs_easypgf_interface implicit none - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in), target :: tracers real(kind=WP), dimension(:,:), pointer :: temp, salt temp=>tracers%data(1)%values(:,:) @@ -440,15 +476,15 @@ subroutine pressure_force_4_linfs(tracers, mesh) !___________________________________________________________________________ ! calculate pressure gradient force (PGF) for linfs with full cells if ( .not. use_partial_cell .and. .not. use_cavity_partial_cell) then - call pressure_force_4_linfs_fullcell(mesh) + call pressure_force_4_linfs_fullcell(partit, mesh) elseif (use_cavity .and. use_cavity_partial_cell ) then if (trim(which_pgf)=='sergey') then - call pressure_force_4_linfs_cavity(mesh) + call pressure_force_4_linfs_cavity(partit, mesh) elseif (trim(which_pgf)=='shchepetkin') then - call pressure_force_4_linfs_shchepetkin(mesh) + call pressure_force_4_linfs_shchepetkin(partit, mesh) elseif (trim(which_pgf)=='easypgf') then - call pressure_force_4_linfs_easypgf(tracers, mesh) + call pressure_force_4_linfs_easypgf(tracers, partit, mesh) else write(*,*) '________________________________________________________' write(*,*) ' --> ERROR: the choosen form of pressure gradient ' @@ -463,13 +499,13 @@ subroutine pressure_force_4_linfs(tracers, mesh) ! calculate pressure gradient force (PGF) for linfs with partiall cells else ! --> (trim(which_ale)=='linfs' .and. use_partial_cell ) if (trim(which_pgf)=='nemo') then - call pressure_force_4_linfs_nemo(tracers, mesh) + call pressure_force_4_linfs_nemo(tracers, partit, mesh) elseif (trim(which_pgf)=='shchepetkin') then - call pressure_force_4_linfs_shchepetkin(mesh) + call pressure_force_4_linfs_shchepetkin(partit, mesh) elseif (trim(which_pgf)=='cubicspline') then - call pressure_force_4_linfs_cubicspline(mesh) + call pressure_force_4_linfs_cubicspline(partit, mesh) elseif (trim(which_pgf)=='easypgf') then - call pressure_force_4_linfs_easypgf(tracers, mesh) + call pressure_force_4_linfs_easypgf(tracers, partit, mesh) else write(*,*) '________________________________________________________' write(*,*) ' --> ERROR: the choosen form of pressure gradient ' @@ -486,19 +522,21 @@ end subroutine pressure_force_4_linfs ! !=============================================================================== ! calculate pressure gradient force for linfs in case full cells -subroutine pressure_force_4_linfs_fullcell(mesh) +subroutine pressure_force_4_linfs_fullcell(partit, mesh) use o_PARAM use MOD_MESH + use MOD_PARTIT use o_ARRAYS - use g_PARSUP use g_config implicit none - - integer :: elem, elnodes(3), nle, ule, nlz - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: elem, elnodes(3), nle, ule, nlz -#include "associate_mesh.h" - +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! loop over triangular elemments do elem=1, myDim_elem2D @@ -533,27 +571,30 @@ end subroutine pressure_force_4_linfs_fullcell ! Calculate pressure gradient force (PGF) like in NEMO based on NEMO ocean engine ! Gurvan Madec, and the NEMO team gurvan.madec@locean-ipsl.umpc.fr, nemo st@locean-ipsl.umpc.fr ! November 2015, – version 3.6 stable – -subroutine pressure_force_4_linfs_nemo(tracers, mesh) +subroutine pressure_force_4_linfs_nemo(tracers, partit, mesh) use o_PARAM use MOD_MESH + use MOD_PARTIT use MOD_TRACER use o_ARRAYS - use g_PARSUP use g_config use densityJM_components_interface use density_linear_interface implicit none - - logical :: do_interpTS=.true. - integer :: elem, elnodes(3), nle, ule, nlz, nln(3), uln(3), ni, nlc, nlce - real(kind=WP) :: hpress_n_bottom(3) - real(kind=WP) :: interp_n_dens(3), interp_n_temp, interp_n_salt, & - dZn, dZn_i, dh, dval, mean_e_rho,dZn_rho_grad(2) - real(kind=WP) :: rhopot, bulk_0, bulk_pz, bulk_pz2 - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers + logical :: do_interpTS=.true. + integer :: elem, elnodes(3), nle, ule, nlz, nln(3), uln(3), ni, nlc, nlce + real(kind=WP) :: hpress_n_bottom(3) + real(kind=WP) :: interp_n_dens(3), interp_n_temp, interp_n_salt, & + dZn, dZn_i, dh, dval, mean_e_rho,dZn_rho_grad(2) + real(kind=WP) :: rhopot, bulk_0, bulk_pz, bulk_pz2 real(kind=WP), dimension(:,:), pointer :: temp, salt -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" temp=>tracers%data(1)%values(:,:) salt=>tracers%data(2)%values(:,:) !___________________________________________________________________________ @@ -661,9 +702,9 @@ subroutine pressure_force_4_linfs_nemo(tracers, mesh) ! salinity select case(state_equation) case(0) - call density_linear(interp_n_temp, interp_n_salt, bulk_0, bulk_pz, bulk_pz2, rhopot, mesh) + call density_linear(interp_n_temp, interp_n_salt, bulk_0, bulk_pz, bulk_pz2, rhopot, partit, mesh) case(1) - call densityJM_components(interp_n_temp, interp_n_salt, bulk_0, bulk_pz, bulk_pz2, rhopot, mesh) + call densityJM_components(interp_n_temp, interp_n_salt, bulk_0, bulk_pz, bulk_pz2, rhopot, partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' call par_ex(1) @@ -706,19 +747,22 @@ end subroutine pressure_force_4_linfs_nemo ! --> based on density jacobian method ... ! calculate PGF for linfs with partiell cell on/off ! First coded by P. Scholz for FESOM2.0, 08.02.2019 -subroutine pressure_force_4_linfs_shchepetkin(mesh) +subroutine pressure_force_4_linfs_shchepetkin(partit, mesh) use o_PARAM use MOD_MESH + use MOD_PARTIT use o_ARRAYS - use g_PARSUP use g_config implicit none - - integer :: elem, elnodes(3), nle, ule, nlz, idx(3),ni - real(kind=WP) :: int_dp_dx(2), drho_dx, dz_dx, aux_sum - real(kind=WP) :: dx10(3), dx20(3), dx21(3), df10(3), df21(3), drho_dz(3) - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: elem, elnodes(3), nle, ule, nlz, idx(3),ni + real(kind=WP) :: int_dp_dx(2), drho_dx, dz_dx, aux_sum + real(kind=WP) :: dx10(3), dx20(3), dx21(3), df10(3), df21(3), drho_dz(3) +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! loop over triangular elemments do elem=1, myDim_elem2D @@ -957,30 +1001,32 @@ end subroutine pressure_force_4_linfs_shchepetkin !=============================================================================== ! Calculate pressure gradient force (PGF) ! First coded by P. Scholz for FESOM2.0, 08.02.2019 -subroutine pressure_force_4_linfs_easypgf(tracers, mesh) +subroutine pressure_force_4_linfs_easypgf(tracers, partit, mesh) use o_PARAM use MOD_MESH + use MOD_PARTIT use MOD_TRACER use o_ARRAYS - use g_PARSUP use g_config use densityJM_components_interface use density_linear_interface implicit none - - integer :: elem, elnodes(3), nle, ule, nlz, idx(3),ni - real(kind=WP) :: int_dp_dx(2), drho_dx, aux_sum - real(kind=WP) :: dx10(3), dx20(3), dx21(3) - real(kind=WP) :: t0(3), dt10(3), dt21(3) - real(kind=WP) :: s0(3), ds10(3), ds21(3) - real(kind=WP) :: rho_at_Zn(3), temp_at_Zn(3), salt_at_Zn(3), drho_dz(3), aux_dref - real(kind=WP) :: rhopot(3), bulk_0(3), bulk_pz(3), bulk_pz2(3) - real(kind=WP) :: dref_rhopot, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2 - - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers + integer :: elem, elnodes(3), nle, ule, nlz, idx(3),ni + real(kind=WP) :: int_dp_dx(2), drho_dx, aux_sum + real(kind=WP) :: dx10(3), dx20(3), dx21(3) + real(kind=WP) :: t0(3), dt10(3), dt21(3) + real(kind=WP) :: s0(3), ds10(3), ds21(3) + real(kind=WP) :: rho_at_Zn(3), temp_at_Zn(3), salt_at_Zn(3), drho_dz(3), aux_dref + real(kind=WP) :: rhopot(3), bulk_0(3), bulk_pz(3), bulk_pz2(3) + real(kind=WP) :: dref_rhopot, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2 real(kind=WP), dimension(:,:), pointer :: temp, salt -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" temp=>tracers%data(1)%values(:,:) salt=>tracers%data(2)%values(:,:) @@ -1013,9 +1059,9 @@ subroutine pressure_force_4_linfs_easypgf(tracers, mesh) if (use_cavity .and. .not. use_density_ref) then select case(state_equation) case(0) - call density_linear(density_ref_T, density_ref_S, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2, dref_rhopot, mesh) + call density_linear(density_ref_T, density_ref_S, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2, dref_rhopot, partit, mesh) case(1) - call densityJM_components(density_ref_T, density_ref_S, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2, dref_rhopot, mesh) + call densityJM_components(density_ref_T, density_ref_S, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2, dref_rhopot, partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' call par_ex(1) @@ -1103,9 +1149,9 @@ subroutine pressure_force_4_linfs_easypgf(tracers, mesh) ! compute density from state equation select case(state_equation) case(0) - call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case(1) - call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' call par_ex(1) @@ -1142,9 +1188,9 @@ subroutine pressure_force_4_linfs_easypgf(tracers, mesh) ! compute density from state equation select case(state_equation) case(0) - call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case(1) - call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' call par_ex(1) @@ -1245,9 +1291,9 @@ subroutine pressure_force_4_linfs_easypgf(tracers, mesh) ! compute density from state equation select case(state_equation) case(0) - call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case(1) - call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' call par_ex(1) @@ -1284,9 +1330,9 @@ subroutine pressure_force_4_linfs_easypgf(tracers, mesh) ! compute density from state equation select case(state_equation) case(0) - call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case(1) - call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' call par_ex(1) @@ -1317,23 +1363,26 @@ end subroutine pressure_force_4_linfs_easypgf !=============================================================================== ! Calculate pressure gradient force (PGF) via cubicspline used in FEOSM1.4 ! First coded by Q. Wang for FESOM1.4, adapted by P. Scholz for FESOM2.0, 08.02.2019 -subroutine pressure_force_4_linfs_cubicspline(mesh) +subroutine pressure_force_4_linfs_cubicspline(partit, mesh) use o_PARAM use MOD_MESH + use MOD_PARTIT use o_ARRAYS - use g_PARSUP use g_config implicit none - - integer :: elem, elnodes(3), nle, ule, nlz, nlc, ni, node, nln(3), uln(3), dd - real(kind=WP) :: int_dp_dx(2), drho_dx, dz_dx, drho_dz, auxp - real(kind=WP) :: dx10, dx20, dx21, df10, df21 - real(kind=WP) :: interp_n_dens(3) - integer :: s_ind(4) - real(kind=WP) :: s_z(4), s_dens(4), s_H, aux1, aux2, s_dup, s_dlo - real(kind=WP) :: a, b, c, d, dz - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: elem, elnodes(3), nle, ule, nlz, nlc, ni, node, nln(3), uln(3), dd + real(kind=WP) :: int_dp_dx(2), drho_dx, dz_dx, drho_dz, auxp + real(kind=WP) :: dx10, dx20, dx21, df10, df21 + real(kind=WP) :: interp_n_dens(3) + integer :: s_ind(4) + real(kind=WP) :: s_z(4), s_dens(4), s_H, aux1, aux2, s_dup, s_dlo + real(kind=WP) :: a, b, c, d, dz +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! loop over triangular elemments do elem=1, myDim_elem2D @@ -1516,20 +1565,23 @@ end subroutine pressure_force_4_linfs_cubicspline !=============================================================================== ! calculate pressure gradient force for linfs in case cavities are used with ! surface partial cells or bottom partial cells -subroutine pressure_force_4_linfs_cavity(mesh) +subroutine pressure_force_4_linfs_cavity(partit, mesh) use o_PARAM use MOD_MESH + use MOD_PARTIT use o_ARRAYS - use g_PARSUP use g_config implicit none - - integer :: elem, elnodes(3), nle, ule, nlz, idx(3), ni - real(kind=WP) :: int_dp_dx(2), drho_dx, dz_dx, aux_sum - real(kind=WP) :: dx10(3), dx20(3), dx21(3), df10(3), df21(3), drho_dz(3) - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: elem, elnodes(3), nle, ule, nlz, idx(3), ni + real(kind=WP) :: int_dp_dx(2), drho_dx, dz_dx, aux_sum + real(kind=WP) :: dx10(3), dx20(3), dx21(3), df10(3), df21(3), drho_dz(3) -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! loop over triangular elemments @@ -1726,25 +1778,25 @@ end subroutine pressure_force_4_linfs_cavity ! !=============================================================================== ! Calculate pressure gradient force (PGF) for full free surface case zlevel and zstar -subroutine pressure_force_4_zxxxx(tracers, mesh) - use g_PARSUP - use g_config +subroutine pressure_force_4_zxxxx(tracers, partit, mesh) use mod_mesh + use MOD_PARTIT use mod_tracer + use g_config use pressure_force_4_zxxxx_shchepetkin_interface use pressure_force_4_zxxxx_cubicspline_interface use pressure_force_4_zxxxx_easypgf_interface implicit none - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers - + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers !___________________________________________________________________________ if (trim(which_pgf)=='shchepetkin') then - call pressure_force_4_zxxxx_shchepetkin(mesh) + call pressure_force_4_zxxxx_shchepetkin(partit, mesh) elseif (trim(which_pgf)=='cubicspline') then - call pressure_force_4_zxxxx_cubicspline(mesh) + call pressure_force_4_zxxxx_cubicspline(partit, mesh) elseif (trim(which_pgf)=='easypgf' ) then - call pressure_force_4_zxxxx_easypgf(tracers, mesh) + call pressure_force_4_zxxxx_easypgf(tracers, partit, mesh) else write(*,*) '________________________________________________________' write(*,*) ' --> ERROR: the choosen form of pressure gradient ' @@ -1765,22 +1817,25 @@ end subroutine pressure_force_4_zxxxx ! interpolation. ! First coded by Q. Wang for FESOM1.4, adapted by P. Scholz for FESOM2.0 ! 26.04.2018 -subroutine pressure_force_4_zxxxx_cubicspline(mesh) +subroutine pressure_force_4_zxxxx_cubicspline(partit, mesh) use o_PARAM use MOD_MESH + use MOD_PARTIT use o_ARRAYS - use g_PARSUP use g_config implicit none - - integer :: elem, elnodes(3), nle, ule, nln(3), uln(3), nlz, nlc,dd - integer :: ni, node, dens_ind,kk - real(kind=WP) :: ze - integer :: s_ind(4) - real(kind=WP) :: s_z(4), s_dens(4), s_H, aux1, aux2, aux(2), s_dup, s_dlo - real(kind=WP) :: a, b, c, d, dz, rho_n(3), rhograd_e(2), p_grad(2) - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: elem, elnodes(3), nle, ule, nln(3), uln(3), nlz, nlc,dd + integer :: ni, node, dens_ind,kk + real(kind=WP) :: ze + integer :: s_ind(4) + real(kind=WP) :: s_z(4), s_dens(4), s_H, aux1, aux2, aux(2), s_dup, s_dlo + real(kind=WP) :: a, b, c, d, dz, rho_n(3), rhograd_e(2), p_grad(2) +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! loop over triangular elemments do elem=1, myDim_elem2D @@ -1946,22 +2001,25 @@ end subroutine pressure_force_4_zxxxx_cubicspline ! --> based on density jacobian method ... ! calculate PGF for linfs with partiell cell on/off ! First coded by P. Scholz for FESOM2.0, 08.02.2019 -subroutine pressure_force_4_zxxxx_shchepetkin(mesh) +subroutine pressure_force_4_zxxxx_shchepetkin(partit, mesh) use o_PARAM use MOD_MESH + use MOD_PARTIT use o_ARRAYS - use g_PARSUP use g_config use densityJM_components_interface use density_linear_interface implicit none - - integer :: elem, elnodes(3), nle,ule, nlz, nln(3), ni, nlc, nlce, idx(3) - real(kind=WP) :: int_dp_dx(2), drho_dx, drho_dy, drho_dz(3), dz_dx, dz_dy, aux_sum - real(kind=WP) :: dx10(3), dx20(3), dx21(3), df10(3), df21(3) - real(kind=WP) :: rhopot(3), bulk_0(3), bulk_pz(3), bulk_pz2(3) - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: elem, elnodes(3), nle,ule, nlz, nln(3), ni, nlc, nlce, idx(3) + real(kind=WP) :: int_dp_dx(2), drho_dx, drho_dy, drho_dz(3), dz_dx, dz_dy, aux_sum + real(kind=WP) :: dx10(3), dx20(3), dx21(3), df10(3), df21(3) + real(kind=WP) :: rhopot(3), bulk_0(3), bulk_pz(3), bulk_pz2(3) +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! loop over triangular elemments @@ -2184,30 +2242,33 @@ end subroutine pressure_force_4_zxxxx_shchepetkin ! --> based on density jacobian method ... ! calculate PGF for linfs with partiell cell on/off ! First coded by P. Scholz for FESOM2.0, 08.02.2019 -subroutine pressure_force_4_zxxxx_easypgf(tracers, mesh) +subroutine pressure_force_4_zxxxx_easypgf(tracers, partit, mesh) use o_PARAM use MOD_MESH + use MOD_PARTIT use MOD_TRACER use o_ARRAYS - use g_PARSUP use g_config use densityJM_components_interface use density_linear_interface implicit none - - integer :: elem, elnodes(3), nle,ule, nlz, nln(3), ni, nlc, nlce, idx(3) - real(kind=WP) :: int_dp_dx(2), drho_dx, dz_dx, drho_dy, dz_dy,aux_sum - real(kind=WP) :: dx10(3), dx20(3), dx21(3) - real(kind=WP) :: f0(3), df10(3), df21(3) - real(kind=WP) :: t0(3), dt10(3), dt21(3) - real(kind=WP) :: s0(3), ds10(3), ds21(3) - real(kind=WP) :: rho_at_Zn(3), temp_at_Zn(3), salt_at_Zn(3), drho_dz(3), aux_dref - real(kind=WP) :: rhopot(3), bulk_0(3), bulk_pz(3), bulk_pz2(3) - real(kind=WP) :: dref_rhopot, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2 - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers + integer :: elem, elnodes(3), nle,ule, nlz, nln(3), ni, nlc, nlce, idx(3) + real(kind=WP) :: int_dp_dx(2), drho_dx, dz_dx, drho_dy, dz_dy,aux_sum + real(kind=WP) :: dx10(3), dx20(3), dx21(3) + real(kind=WP) :: f0(3), df10(3), df21(3) + real(kind=WP) :: t0(3), dt10(3), dt21(3) + real(kind=WP) :: s0(3), ds10(3), ds21(3) + real(kind=WP) :: rho_at_Zn(3), temp_at_Zn(3), salt_at_Zn(3), drho_dz(3), aux_dref + real(kind=WP) :: rhopot(3), bulk_0(3), bulk_pz(3), bulk_pz2(3) + real(kind=WP) :: dref_rhopot, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2 real(kind=WP), dimension(:,:), pointer :: temp, salt -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" temp=>tracers%data(1)%values(:,:) salt=>tracers%data(2)%values(:,:) !___________________________________________________________________________ @@ -2239,9 +2300,9 @@ subroutine pressure_force_4_zxxxx_easypgf(tracers, mesh) if (use_cavity .and. .not. use_density_ref) then select case(state_equation) case(0) - call density_linear(density_ref_T, density_ref_S, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2, dref_rhopot, mesh) + call density_linear(density_ref_T, density_ref_S, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2, dref_rhopot, partit, mesh) case(1) - call densityJM_components(density_ref_T, density_ref_S, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2, dref_rhopot, mesh) + call densityJM_components(density_ref_T, density_ref_S, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2, dref_rhopot, partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' call par_ex(1) @@ -2318,9 +2379,9 @@ subroutine pressure_force_4_zxxxx_easypgf(tracers, mesh) ! compute density from state equation select case(state_equation) case(0) - call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case(1) - call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' call par_ex(1) @@ -2367,9 +2428,9 @@ subroutine pressure_force_4_zxxxx_easypgf(tracers, mesh) ! compute density from state equation select case(state_equation) case(0) - call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case(1) - call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' call par_ex(1) @@ -2447,13 +2508,13 @@ subroutine pressure_force_4_zxxxx_easypgf(tracers, mesh) ! compute density from state equation select case(state_equation) case(0) - call density_linear(temp_at_Zn(1), salt_at_Zn(1), bulk_0(1), bulk_pz(1), bulk_pz2(1), rhopot(1), mesh) - call density_linear(temp_at_Zn(2), salt_at_Zn(2), bulk_0(2), bulk_pz(2), bulk_pz2(2), rhopot(2), mesh) - call density_linear(temp_at_Zn(3), salt_at_Zn(3), bulk_0(3), bulk_pz(3), bulk_pz2(3), rhopot(3), mesh) + call density_linear(temp_at_Zn(1), salt_at_Zn(1), bulk_0(1), bulk_pz(1), bulk_pz2(1), rhopot(1), partit, mesh) + call density_linear(temp_at_Zn(2), salt_at_Zn(2), bulk_0(2), bulk_pz(2), bulk_pz2(2), rhopot(2), partit, mesh) + call density_linear(temp_at_Zn(3), salt_at_Zn(3), bulk_0(3), bulk_pz(3), bulk_pz2(3), rhopot(3), partit, mesh) case(1) - call densityJM_components(temp_at_Zn(1), salt_at_Zn(1), bulk_0(1), bulk_pz(1), bulk_pz2(1), rhopot(1), mesh) - call densityJM_components(temp_at_Zn(2), salt_at_Zn(2), bulk_0(2), bulk_pz(2), bulk_pz2(2), rhopot(2), mesh) - call densityJM_components(temp_at_Zn(3), salt_at_Zn(3), bulk_0(3), bulk_pz(3), bulk_pz2(3), rhopot(3), mesh) + call densityJM_components(temp_at_Zn(1), salt_at_Zn(1), bulk_0(1), bulk_pz(1), bulk_pz2(1), rhopot(1), partit, mesh) + call densityJM_components(temp_at_Zn(2), salt_at_Zn(2), bulk_0(2), bulk_pz(2), bulk_pz2(2), rhopot(2), partit, mesh) + call densityJM_components(temp_at_Zn(3), salt_at_Zn(3), bulk_0(3), bulk_pz(3), bulk_pz2(3), rhopot(3), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' call par_ex(1) @@ -2532,9 +2593,9 @@ subroutine pressure_force_4_zxxxx_easypgf(tracers, mesh) ! compute density from state equation select case(state_equation) case(0) - call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case(1) - call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' call par_ex(1) @@ -2581,9 +2642,9 @@ subroutine pressure_force_4_zxxxx_easypgf(tracers, mesh) ! compute density from state equation select case(state_equation) case(0) - call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case(1) - call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' call par_ex(1) @@ -2624,11 +2685,11 @@ end subroutine pressure_force_4_zxxxx_easypgf ! ! !=============================================================================== -SUBROUTINE densityJM_local(t, s, pz, rho_out, mesh) +SUBROUTINE densityJM_local(t, s, pz, rho_out, partit, mesh) USE MOD_MESH +use MOD_PARTIT !, only: par_ex,pe_status USE o_ARRAYS USE o_PARAM -use g_PARSUP !, only: par_ex,pe_status use densityJM_components_interface IMPLICIT NONE @@ -2640,16 +2701,19 @@ SUBROUTINE densityJM_local(t, s, pz, rho_out, mesh) ! - has been derived from the SPEM subroutine rhocal ! !--------------------------------------------------------------------------- - - real(kind=WP), intent(IN) :: t,s,pz - real(kind=WP), intent(OUT) :: rho_out - real(kind=WP) :: rhopot, bulk - real(kind=WP) :: bulk_0, bulk_pz, bulk_pz2 - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + real(kind=WP), intent(IN) :: t,s,pz + real(kind=WP), intent(OUT) :: rho_out + real(kind=WP) :: rhopot, bulk + real(kind=WP) :: bulk_0, bulk_pz, bulk_pz2 +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !compute secant bulk modulus - call densityJM_components(t, s, bulk_0, bulk_pz, bulk_pz2, rhopot, mesh) + call densityJM_components(t, s, bulk_0, bulk_pz, bulk_pz2, rhopot, partit, mesh) bulk = bulk_0 + pz*(bulk_pz + pz*bulk_pz2) @@ -2659,11 +2723,11 @@ end subroutine densityJM_local ! ! !=============================================================================== -SUBROUTINE densityJM_components(t, s, bulk_0, bulk_pz, bulk_pz2, rhopot, mesh) +SUBROUTINE densityJM_components(t, s, bulk_0, bulk_pz, bulk_pz2, rhopot, partit, mesh) USE MOD_MESH +use MOD_PARTIT !, only: par_ex,pe_status USE o_ARRAYS USE o_PARAM -use g_PARSUP !, only: par_ex,pe_status IMPLICIT NONE ! @@ -2677,9 +2741,11 @@ SUBROUTINE densityJM_components(t, s, bulk_0, bulk_pz, bulk_pz2, rhopot, mesh) !--------------------------------------------------------------------------- ! N. Rakowski 2014 the split form !--------------------------------------------------------------------------- - real(kind=WP), intent(IN) :: t,s - real(kind=WP), intent(OUT) :: bulk_0, bulk_pz, bulk_pz2, rhopot - real(kind=WP) :: s_sqrt + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + real(kind=WP), intent(IN) :: t,s + real(kind=WP), intent(OUT) :: bulk_0, bulk_pz, bulk_pz2, rhopot + real(kind=WP) :: s_sqrt real(kind=WP), parameter :: a0 = 19092.56, at = 209.8925 real(kind=WP), parameter :: at2 = -3.041638, at3 = -1.852732e-3 @@ -2705,9 +2771,11 @@ SUBROUTINE densityJM_components(t, s, bulk_0, bulk_pz, bulk_pz2, rhopot, mesh) real(kind=WP), parameter :: bst4 = 5.38750e-9 real(kind=WP), parameter :: bss = -5.72466e-3, bsst = 1.02270e-4 real(kind=WP), parameter :: bsst2 = -1.65460e-6,bss2 = 4.8314e-4 - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !compute secant bulk modulus @@ -2809,7 +2877,7 @@ end function atg ! ! !=============================================================================== -subroutine sw_alpha_beta(TF1,SF1, mesh) +subroutine sw_alpha_beta(TF1,SF1, partit, mesh) ! DESCRIPTION: ! A function to calculate the thermal expansion coefficient ! and saline contraction coefficient. (elementwise) @@ -2834,19 +2902,23 @@ subroutine sw_alpha_beta(TF1,SF1, mesh) ! a_over_b=0.34765 psu*C^-1 @ S=40.0psu, ptmp=10.0C, p=4000db !----------------------------------------------------------------- use mod_mesh + use MOD_PARTIT use o_arrays - use g_parsup use o_param use g_comm_auto implicit none ! - type(t_mesh), intent(in) , target :: mesh - integer :: n, nz, nzmin, nzmax - real(kind=WP) :: t1,t1_2,t1_3,t1_4,p1,p1_2,p1_3,s1,s35,s35_2 - real(kind=WP) :: a_over_b - real(kind=WP) :: TF1(mesh%nl-1, myDim_nod2D+eDim_nod2D),SF1(mesh%nl-1, myDim_nod2D+eDim_nod2D) + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: n, nz, nzmin, nzmax + real(kind=WP) :: t1,t1_2,t1_3,t1_4,p1,p1_2,p1_3,s1,s35,s35_2 + real(kind=WP) :: a_over_b + real(kind=WP) :: TF1(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D),SF1(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" do n = 1,myDim_nod2d nzmin = ulevels_nod2d(n) @@ -2892,14 +2964,14 @@ subroutine sw_alpha_beta(TF1,SF1, mesh) sw_alpha(nz,n) = a_over_b*sw_beta(nz,n) end do end do -call exchange_nod(sw_alpha) -call exchange_nod(sw_beta) +call exchange_nod(sw_alpha, partit) +call exchange_nod(sw_beta, partit) end subroutine sw_alpha_beta ! ! ! !=============================================================================== -subroutine compute_sigma_xy(TF1,SF1, mesh) +subroutine compute_sigma_xy(TF1,SF1, partit, mesh) !-------------------------------------------------------------------- ! DESCRIPTION: ! computes density gradient @@ -2908,23 +2980,27 @@ subroutine compute_sigma_xy(TF1,SF1, mesh) ! SF = salinity [psu (PSS-78)] ! TF = potential temperature [degree C (ITS-90)] ! - ! OUTPUT: + ! OUTPUT:7 ! based on thermal expansion and saline contraction coefficients ! computes density gradient sigma_xy !------------------------------------------------------------------- use mod_mesh + use MOD_PARTIT use o_param use o_arrays - use g_parsup use g_comm_auto implicit none ! - type(t_mesh), intent(in) , target :: mesh - real(kind=WP), intent(IN) :: TF1(mesh%nl-1, myDim_nod2D+eDim_nod2D), SF1(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP) :: tx(mesh%nl-1), ty(mesh%nl-1), sx(mesh%nl-1), sy(mesh%nl-1), vol(mesh%nl-1), testino(2) - integer :: n, nz, elnodes(3),el, k, nln, uln, nle, ule + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + real(kind=WP), intent(IN) :: TF1(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D), SF1(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP) :: tx(mesh%nl-1), ty(mesh%nl-1), sx(mesh%nl-1), sy(mesh%nl-1), vol(mesh%nl-1), testino(2) + integer :: n, nz, elnodes(3),el, k, nln, uln, nle, ule -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! DO n=1, myDim_nod2D nln = nlevels_nod2D(n)-1 @@ -2972,27 +3048,31 @@ subroutine compute_sigma_xy(TF1,SF1, mesh) sigma_xy(2,uln:nln,n) = (-sw_alpha(uln:nln,n)*ty(uln:nln)+sw_beta(uln:nln,n)*sy(uln:nln))/vol(uln:nln)*density_0 END DO - call exchange_nod(sigma_xy) + call exchange_nod(sigma_xy, partit) end subroutine compute_sigma_xy ! ! ! !=============================================================================== -subroutine compute_neutral_slope(mesh) +subroutine compute_neutral_slope(partit, mesh) use o_ARRAYS - use g_PARSUP + use MOD_PARTIT use MOD_MESH use o_param use g_config use g_comm_auto IMPLICIT NONE - real(kind=WP) :: deltaX1,deltaY1,deltaX2,deltaY2 - integer :: edge - integer :: n,nz,nl1,ul1,el(2),elnodes(3),enodes(2) - real(kind=WP) :: c, ro_z_inv,eps,S_cr,S_d - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + real(kind=WP) :: deltaX1,deltaY1,deltaX2,deltaY2 + integer :: edge + integer :: n,nz,nl1,ul1,el(2),elnodes(3),enodes(2) + real(kind=WP) :: c, ro_z_inv,eps,S_cr,S_d -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !if sigma_xy is not computed eps=5.0e-6_WP S_cr=1.0e-2_WP @@ -3017,8 +3097,8 @@ subroutine compute_neutral_slope(mesh) enddo enddo - call exchange_nod(neutral_slope) - call exchange_nod(slope_tapered) + call exchange_nod(neutral_slope, partit) + call exchange_nod(slope_tapered, partit) end subroutine compute_neutral_slope ! ! @@ -3026,22 +3106,25 @@ end subroutine compute_neutral_slope !=============================================================================== !converts insitu temperature to a potential one ! tracers%data(1)%values will be modified! -subroutine insitu2pot(tracers, mesh) +subroutine insitu2pot(tracers, partit, mesh) use mod_mesh + use MOD_PARTIT use mod_tracer use o_param use o_arrays use g_config - use g_PARSUP implicit none - real(kind=WP), external :: ptheta - real(kind=WP) :: pp, pr, tt, ss - integer :: n, nz, nzmin,nzmax - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(inout), target :: tracers - real(kind=WP), dimension(:,:), pointer :: temp, salt - -#include "associate_mesh.h" + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers + real(kind=WP), external :: ptheta + real(kind=WP) :: pp, pr, tt, ss + integer :: n, nz, nzmin,nzmax + real(kind=WP), dimension(:,:), pointer :: temp, salt +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" temp=>tracers%data(1)%values(:,:) salt=>tracers%data(2)%values(:,:) ! Convert in situ temperature into potential temperature @@ -3069,21 +3152,24 @@ end subroutine insitu2pot ! ! !=============================================================================== -SUBROUTINE density_linear(t, s, bulk_0, bulk_pz, bulk_pz2, rho_out, mesh) +SUBROUTINE density_linear(t, s, bulk_0, bulk_pz, bulk_pz2, rho_out, partit, mesh) !coded by Margarita Smolentseva, 21.05.2020 USE MOD_MESH +use MOD_PARTIT !, only: par_ex,pe_status USE o_ARRAYS USE o_PARAM -use g_PARSUP !, only: par_ex,pe_status use g_config !, only: which_toy, toy_ocean IMPLICIT NONE - - real(kind=WP), intent(IN) :: t,s - real(kind=WP), intent(OUT) :: rho_out - real(kind=WP) :: rhopot, bulk - real(kind=WP), intent(OUT) :: bulk_0, bulk_pz, bulk_pz2 - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + real(kind=WP), intent(IN) :: t,s + real(kind=WP), intent(OUT) :: rho_out + real(kind=WP) :: rhopot, bulk + real(kind=WP), intent(OUT) :: bulk_0, bulk_pz, bulk_pz2 +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !compute secant bulk modulus bulk_0 = 1 @@ -3101,25 +3187,29 @@ end subroutine density_linear ! ! !=============================================================================== -subroutine init_ref_density(mesh) +subroutine init_ref_density(partit, mesh) ! compute reference density ! Coded by Qiang Wang ! Reviewed by ?? !___________________________________________________________________________ USE MOD_MESH + use MOD_PARTIT use o_PARAM use o_ARRAYS - use g_PARSUP use densityJM_components_interface implicit none !___________________________________________________________________________ - type(t_mesh), intent(in) , target :: mesh - integer :: node, nz, nzmin, nzmax - real(kind=WP) :: rhopot, bulk_0, bulk_pz, bulk_pz2, rho - real(kind=8) :: T, S, auxz + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: node, nz, nzmin, nzmax + real(kind=WP) :: rhopot, bulk_0, bulk_pz, bulk_pz2, rho + real(kind=8) :: T, S, auxz -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ !!PS S=34. @@ -3134,7 +3224,7 @@ subroutine init_ref_density(mesh) auxz=min(0.0,Z_3d_n(nzmin,node)) !_______________________________________________________________________ - call densityJM_components(density_ref_T, density_ref_S, bulk_0, bulk_pz, bulk_pz2, rhopot, mesh) + call densityJM_components(density_ref_T, density_ref_S, bulk_0, bulk_pz, bulk_pz2, rhopot, partit, mesh) rho = bulk_0 + auxz*bulk_pz + auxz*bulk_pz2 density_ref(nzmin,node) = rho*rhopot/(rho+0.1_WP*auxz) diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 233b3f17d..ab9350f3b 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -1,101 +1,116 @@ module diff_part_hor_redi_interface interface - subroutine diff_part_hor_redi(tr_num, tracer, mesh) + subroutine diff_part_hor_redi(tr_num, tracer, partit, mesh) use mod_mesh + use mod_partit use mod_tracer integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracer type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module adv_tracers_ale_interface interface - subroutine adv_tracers_ale(dt, tr_num, tracer, mesh) + subroutine adv_tracers_ale(dt, tr_num, tracer, partit, mesh) use mod_mesh + use mod_partit use mod_tracer real(kind=WP), intent(in), target :: dt integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracer type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module diff_ver_part_expl_ale_interface interface - subroutine diff_ver_part_expl_ale(tr_num, tracer, mesh) + subroutine diff_ver_part_expl_ale(tr_num, tracer, partit, mesh) use mod_mesh + use mod_partit use mod_tracer integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracer type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module diff_ver_part_redi_expl_interface interface - subroutine diff_ver_part_redi_expl(tr_num, tracer, mesh) + subroutine diff_ver_part_redi_expl(tr_num, tracer, partit, mesh) use mod_mesh + use mod_partit use mod_tracer integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracer type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module diff_ver_part_impl_ale_interface interface - subroutine diff_ver_part_impl_ale(tr_num, tracer, mesh) + subroutine diff_ver_part_impl_ale(tr_num, tracer, partit, mesh) use mod_mesh + use mod_partit use mod_tracer integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracer type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module diff_tracers_ale_interface interface - subroutine diff_tracers_ale(tr_num, tracer, mesh) + subroutine diff_tracers_ale(tr_num, tracer, partit, mesh) use mod_mesh + use mod_partit use mod_tracer integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracer type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module bc_surface_interface interface - function bc_surface(n, id, sval, mesh) + function bc_surface(n, id, sval, partit) use mod_mesh - integer , intent(in) :: n, id - type(t_mesh), intent(in) , target :: mesh - real(kind=WP) :: bc_surface - real(kind=WP), intent(in) :: sval + use mod_partit + integer , intent(in) :: n, id + type(t_partit), intent(inout), target :: partit + real(kind=WP) :: bc_surface + real(kind=WP), intent(in) :: sval end function end interface end module module diff_part_bh_interface interface - subroutine diff_part_bh(tr_num, tracer, mesh) - use g_PARSUP + subroutine diff_part_bh(tr_num, tracer, partit, mesh) use mod_mesh + use mod_partit use mod_tracer integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracer type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module solve_tracers_ale_interface interface - subroutine solve_tracers_ale(tracers, mesh) - use g_PARSUP + subroutine solve_tracers_ale(tracers, partit, mesh) use mod_mesh + use mod_partit use mod_tracer type(t_tracer), intent(inout), target :: tracers type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module @@ -103,12 +118,12 @@ subroutine solve_tracers_ale(tracers, mesh) ! !=============================================================================== ! Driving routine Here with ALE changes!!! -subroutine solve_tracers_ale(tracers, mesh) +subroutine solve_tracers_ale(tracers, partit, mesh) use g_config - use g_parsup use o_PARAM, only: SPP, Fer_GM use o_arrays use mod_mesh + use mod_partit use mod_tracer use g_comm_auto use o_tracers @@ -119,12 +134,16 @@ subroutine solve_tracers_ale(tracers, mesh) implicit none type(t_tracer), intent(inout), target :: tracers type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: tr_num, node, nzmax, nzmin -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ - if (SPP) call cal_rejected_salt(mesh) - if (SPP) call app_rejected_salt(tracers%data(2)%values, mesh) + if (SPP) call cal_rejected_salt(partit, mesh) + if (SPP) call app_rejected_salt(tracers%data(2)%values, partit, mesh) !___________________________________________________________________________ ! update 3D velocities with the bolus velocities: ! 1. bolus velocities are computed according to GM implementation after R. Ferrari et al., 2010 @@ -140,22 +159,22 @@ subroutine solve_tracers_ale(tracers, mesh) ! do tracer AB (Adams-Bashfort) interpolation only for advectiv part ! needed if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call init_tracers_AB'//achar(27)//'[0m' - call init_tracers_AB(tr_num, tracers, mesh) + call init_tracers_AB(tr_num, tracers, partit, mesh) ! advect tracers if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call adv_tracers_ale'//achar(27)//'[0m' - call adv_tracers_ale(dt, tr_num, tracers, mesh) + call adv_tracers_ale(dt, tr_num, tracers, partit, mesh) ! diffuse tracers if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call diff_tracers_ale'//achar(27)//'[0m' - call diff_tracers_ale(tr_num, tracers, mesh) + call diff_tracers_ale(tr_num, tracers, partit, mesh) ! relax to salt and temp climatology if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call relax_to_clim'//achar(27)//'[0m' ! if ((toy_ocean) .AND. ((tr_num==1) .AND. (TRIM(which_toy)=="soufflet"))) then if ((toy_ocean) .AND. ((TRIM(which_toy)=="soufflet"))) then - call relax_zonal_temp(tracers%data(1), mesh) + call relax_zonal_temp(tracers%data(1), partit, mesh) else - call relax_to_clim(tr_num, tracers, mesh) + call relax_to_clim(tr_num, tracers, partit, mesh) end if - call exchange_nod(tracers%data(tr_num)%values(:,:)) + call exchange_nod(tracers%data(tr_num)%values(:,:), partit) end do !___________________________________________________________________________ do tr_num=1, ptracers_restore_total @@ -189,10 +208,10 @@ end subroutine solve_tracers_ale ! ! !=============================================================================== -subroutine adv_tracers_ale(dt, tr_num, tracers, mesh) +subroutine adv_tracers_ale(dt, tr_num, tracers, partit, mesh) use g_config, only: flag_debug - use g_parsup use mod_mesh + use mod_partit use mod_tracer use o_arrays use diagnostics, only: ldiag_DVD, compute_diag_dvd_2ndmoment_klingbeil_etal_2014, & @@ -205,6 +224,7 @@ subroutine adv_tracers_ale(dt, tr_num, tracers, mesh) integer :: node, nz integer, intent(in) :: tr_num type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers ! del_ttf ... initialised and setted to zero in call init_tracers_AB(tr_num) ! --> del_ttf ... equivalent to R_T^n in Danilov etal FESOM2: "from finite element @@ -216,8 +236,8 @@ subroutine adv_tracers_ale(dt, tr_num, tracers, mesh) ! if ldiag_DVD=.true. --> compute tracer second moments for the calcualtion ! of discret variance decay if (ldiag_DVD .and. tr_num <= 2) then - if (flag_debug .and. mype==0) print *, achar(27)//'[38m'//' --> call compute_diag_dvd_2ndmoment'//achar(27)//'[0m' - call compute_diag_dvd_2ndmoment_klingbeil_etal_2014(tr_num, tracers,mesh) + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call compute_diag_dvd_2ndmoment'//achar(27)//'[0m' + call compute_diag_dvd_2ndmoment_klingbeil_etal_2014(tr_num, tracers, partit, mesh) end if !___________________________________________________________________________ @@ -225,7 +245,7 @@ subroutine adv_tracers_ale(dt, tr_num, tracers, mesh) ! here --> add horizontal advection part to del_ttf(nz,n) = del_ttf(nz,n) + ... tracers%work%del_ttf_advhoriz = 0.0_WP tracers%work%del_ttf_advvert = 0.0_WP - call do_oce_adv_tra(dt, UV, wvel, wvel_i, wvel_e, tr_num, tracers, mesh) + call do_oce_adv_tra(dt, UV, wvel, wvel_i, wvel_e, tr_num, tracers, partit, mesh) !___________________________________________________________________________ ! update array for total tracer flux del_ttf with the fluxes from horizontal ! and vertical advection @@ -234,18 +254,18 @@ subroutine adv_tracers_ale(dt, tr_num, tracers, mesh) !___________________________________________________________________________ ! compute discrete variance decay after Burchard and Rennau 2008 if (ldiag_DVD .and. tr_num <= 2) then - if (flag_debug .and. mype==0) print *, achar(27)//'[38m'//' --> call compute_diag_dvd'//achar(27)//'[0m' - call compute_diag_dvd(tr_num, tracers, mesh) + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call compute_diag_dvd'//achar(27)//'[0m' + call compute_diag_dvd(tr_num, tracers, partit, mesh) end if end subroutine adv_tracers_ale ! ! !=============================================================================== -subroutine diff_tracers_ale(tr_num, tracers, mesh) +subroutine diff_tracers_ale(tr_num, tracers, partit, mesh) use mod_mesh + use mod_partit use mod_tracer - use g_PARSUP use o_arrays use o_tracers use diff_part_hor_redi_interface @@ -259,9 +279,13 @@ subroutine diff_tracers_ale(tr_num, tracers, mesh) integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracers type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit real(kind=WP), pointer :: del_ttf(:,:) -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" del_ttf => tracers%work%del_ttf !___________________________________________________________________________ @@ -274,13 +298,13 @@ subroutine diff_tracers_ale(tr_num, tracers, mesh) ! write there also horizontal diffusion rhs to del_ttf which is equal the R_T^n ! in danilovs srcipt ! includes Redi diffusivity if Redi=.true. - call diff_part_hor_redi(tr_num, tracers, mesh) ! seems to be ~9% faster than diff_part_hor + call diff_part_hor_redi(tr_num, tracers, partit, mesh) ! seems to be ~9% faster than diff_part_hor !___________________________________________________________________________ ! do vertical diffusion: explicite - if (.not. tracers%i_vert_diff) call diff_ver_part_expl_ale(tr_num, tracers, mesh) + if (.not. tracers%i_vert_diff) call diff_ver_part_expl_ale(tr_num, tracers, partit, mesh) ! A projection of horizontal Redi diffussivity onto vertical. This par contains horizontal ! derivatives and has to be computed explicitly! - if (Redi) call diff_ver_part_redi_expl(tr_num, tracers, mesh) + if (Redi) call diff_ver_part_redi_expl(tr_num, tracers, partit, mesh) !___________________________________________________________________________ ! Update tracers --> calculate T* see Danilov etal "FESOM2 from finite elements @@ -307,32 +331,33 @@ subroutine diff_tracers_ale(tr_num, tracers, mesh) !___________________________________________________________________________ if (tracers%i_vert_diff) then ! do vertical diffusion: implicite - call diff_ver_part_impl_ale(tr_num, tracers, mesh) + call diff_ver_part_impl_ale(tr_num, tracers, partit, mesh) end if !We DO not set del_ttf to zero because it will not be used in this timestep anymore !init_tracers will set it to zero for the next timestep !init_tracers will set it to zero for the next timestep if (tracers%smooth_bh_tra) then - call diff_part_bh(tr_num, tracers, mesh) ! alpply biharmonic diffusion (implemented as filter) + call diff_part_bh(tr_num, tracers, partit, mesh) ! alpply biharmonic diffusion (implemented as filter) end if end subroutine diff_tracers_ale ! ! !=============================================================================== !Vertical diffusive flux(explicit scheme): -subroutine diff_ver_part_expl_ale(tr_num, tracers, mesh) +subroutine diff_ver_part_expl_ale(tr_num, tracers, partit, mesh) use o_ARRAYS use g_forcing_arrays use MOD_MESH + use MOD_PARTIT use MOD_TRACER - use g_PARSUP use g_config,only: dt implicit none integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracers type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit real(kind=WP) :: vd_flux(mesh%nl-1) real(kind=WP) :: rdata,flux,rlx integer :: nz,nl1,ul1,n @@ -340,7 +365,10 @@ subroutine diff_ver_part_expl_ale(tr_num, tracers, mesh) real(kind=WP), pointer :: del_ttf(:,:) -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" del_ttf => tracers%work%del_ttf @@ -396,13 +424,14 @@ end subroutine diff_ver_part_expl_ale ! !=============================================================================== ! vertical diffusivity augmented with Redi contribution [vertical flux of K(3,3)*d_zT] -subroutine diff_ver_part_impl_ale(tr_num, tracers, mesh) +subroutine diff_ver_part_impl_ale(tr_num, tracers, partit, mesh) use MOD_MESH + use MOD_PARTIT use MOD_TRACER use o_PARAM use o_ARRAYS use i_ARRAYS - use g_PARSUP + use MOD_PARTIT use g_CONFIG use g_forcing_arrays use o_mixing_KPP_mod !for ghats _GO_ @@ -413,6 +442,7 @@ subroutine diff_ver_part_impl_ale(tr_num, tracers, mesh) integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracers type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit real(kind=WP) :: a(mesh%nl), b(mesh%nl), c(mesh%nl), tr(mesh%nl) real(kind=WP) :: cp(mesh%nl), tp(mesh%nl) integer :: nz, n, nzmax,nzmin @@ -424,7 +454,10 @@ subroutine diff_ver_part_impl_ale(tr_num, tracers, mesh) real(kind=WP), dimension(:,:), pointer :: trarr -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" trarr=>tracers%data(tr_num)%values(:,:) !___________________________________________________________________________ if ((trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') .OR. (.not. w_split)) do_wimpl=.false. @@ -801,7 +834,7 @@ subroutine diff_ver_part_impl_ale(tr_num, tracers, mesh) ! (BUT CHECK!) | | | | ! v (+) v (+) ! - tr(nzmin)= tr(nzmin)+bc_surface(n, tracers%data(tr_num)%ID, trarr(mesh%ulevels_nod2D(n),n), mesh) + tr(nzmin)= tr(nzmin)+bc_surface(n, tracers%data(tr_num)%ID, trarr(mesh%ulevels_nod2D(n),n), partit) !_______________________________________________________________________ ! The forward sweep algorithm to solve the three-diagonal matrix @@ -855,10 +888,10 @@ end subroutine diff_ver_part_impl_ale ! ! !=============================================================================== -subroutine diff_ver_part_redi_expl(tr_num, tracers, mesh) +subroutine diff_ver_part_redi_expl(tr_num, tracers, partit, mesh) use o_ARRAYS - use g_PARSUP use MOD_MESH + use MOD_PARTIT use MOD_TRACER USE o_param use g_config @@ -867,13 +900,17 @@ subroutine diff_ver_part_redi_expl(tr_num, tracers, mesh) integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracers type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: elem,k integer :: n2,nl1,ul1,nl2,nz,n real(kind=WP) :: Tx, Ty - real(kind=WP) :: tr_xynodes(2,mesh%nl-1,myDim_nod2D+eDim_nod2D), vd_flux(mesh%nl) + real(kind=WP) :: tr_xynodes(2,mesh%nl-1,partit%myDim_nod2D+partit%eDim_nod2D), vd_flux(mesh%nl) real(kind=WP), pointer :: del_ttf(:,:) -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" del_ttf => tracers%work%del_ttf @@ -935,10 +972,10 @@ end subroutine diff_ver_part_redi_expl ! ! !=============================================================================== -subroutine diff_part_hor_redi(tr_num, tracers, mesh) +subroutine diff_part_hor_redi(tr_num, tracers, partit, mesh) use o_ARRAYS - use g_PARSUP use MOD_MESH + use MOD_PARTIT use MOD_TRACER use o_param use g_config @@ -946,6 +983,7 @@ subroutine diff_part_hor_redi(tr_num, tracers, mesh) integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracers type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit real(kind=WP) :: deltaX1,deltaY1,deltaX2,deltaY2 integer :: edge integer :: n2,nl1,ul1,nl2,ul2,nl12,ul12,nz,el(2),elnodes(3),n,enodes(2) @@ -954,7 +992,10 @@ subroutine diff_part_hor_redi(tr_num, tracers, mesh) real(kind=WP) :: isredi=0._WP real(kind=WP), pointer :: del_ttf(:,:) -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" del_ttf => tracers%work%del_ttf @@ -1088,10 +1129,10 @@ end subroutine diff_part_hor_redi ! ! !=============================================================================== -SUBROUTINE diff_part_bh(tr_num, tracers, mesh) +SUBROUTINE diff_part_bh(tr_num, tracers, partit, mesh) use o_ARRAYS - use g_PARSUP use MOD_MESH + use MOD_PARTIT use MOD_TRACER use o_param use g_config @@ -1101,11 +1142,15 @@ SUBROUTINE diff_part_bh(tr_num, tracers, mesh) integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracers type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit real(kind=WP) :: u1, v1, len, vi, tt, ww integer :: nz, ed, el(2), en(2), k, elem, nl1, ul1 real(kind=WP), allocatable :: temporary_ttf(:,:) real(kind=WP), pointer :: ttf(:,:) -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ttf => tracers%data(tr_num)%values ed=myDim_nod2D+eDim_nod2D @@ -1132,7 +1177,7 @@ SUBROUTINE diff_part_bh(tr_num, tracers, mesh) temporary_ttf(nz,en(2))=temporary_ttf(nz,en(2))+tt END DO END DO - call exchange_nod(temporary_ttf) + call exchange_nod(temporary_ttf, partit) ! =========== ! Second round: ! =========== @@ -1163,19 +1208,19 @@ end subroutine diff_part_bh !=============================================================================== ! this function returns a boundary conditions for a specified thacer ID and surface node ! ID = 0 and 1 are reserved for temperature and salinity -FUNCTION bc_surface(n, id, sval, mesh) +FUNCTION bc_surface(n, id, sval, partit) use MOD_MESH + use MOD_PARTIT USE o_ARRAYS USE g_forcing_arrays - USE g_PARSUP, only: mype, par_ex USE g_config implicit none - integer, intent(in) :: n, id - real(kind=WP), intent(in) :: sval - type(t_mesh), intent(in) , target :: mesh - REAL(kind=WP) :: bc_surface - character(len=10) :: id_string + integer, intent(in) :: n, id + real(kind=WP), intent(in) :: sval + type(t_partit),intent(inout), target :: partit + REAL(kind=WP) :: bc_surface + character(len=10) :: id_string ! --> is_nonlinfs=1.0 for zelvel,zstar .... ! --> is_nonlinfs=0.0 for linfs @@ -1196,12 +1241,12 @@ FUNCTION bc_surface(n, id, sval, mesh) CASE (303) bc_surface=0.0_WP CASE DEFAULT - if (mype==0) then + if (partit%mype==0) then write (id_string, "(I3)") id - if (mype==0) write(*,*) 'invalid ID '//trim(id_string)//' specified in boundary conditions' - if (mype==0) write(*,*) 'the model will stop!' + if (partit%mype==0) write(*,*) 'invalid ID '//trim(id_string)//' specified in boundary conditions' + if (partit%mype==0) write(*,*) 'the model will stop!' end if - call par_ex + call par_ex(partit) stop END SELECT RETURN diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index d13945542..ce2b8b0e6 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -1,8 +1,10 @@ module momentum_adv_scalar_interface interface - subroutine momentum_adv_scalar(mesh) + subroutine momentum_adv_scalar(partit, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module @@ -10,13 +12,13 @@ subroutine momentum_adv_scalar(mesh) ! ! !_______________________________________________________________________________ -subroutine compute_vel_rhs(mesh) +subroutine compute_vel_rhs(partit, mesh) use MOD_MESH + use MOD_PARTIT use o_ARRAYS use i_ARRAYS use i_therm_param use o_PARAM - use g_PARSUP use g_CONFIG use g_forcing_param, only: use_virt_salt use g_forcing_arrays, only: press_air @@ -25,7 +27,8 @@ subroutine compute_vel_rhs(mesh) use momentum_adv_scalar_interface implicit none - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: elem, elnodes(3), nz, nzmax, nzmin real(kind=WP) :: ff, mm real(kind=WP) :: Fx, Fy, pre(3) @@ -33,8 +36,10 @@ subroutine compute_vel_rhs(mesh) real(kind=WP) :: t1, t2, t3, t4 real(kind=WP) :: p_ice(3), p_air(3), p_eta(3) integer :: use_pice - -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" t1=MPI_Wtime() use_pice=0 @@ -114,7 +119,7 @@ subroutine compute_vel_rhs(mesh) if (mype==0) write(*,*) 'in moment not adapted mom_adv advection typ for ALE, check your namelist' call par_ex(1) elseif (mom_adv==2) then - call momentum_adv_scalar(mesh) + call momentum_adv_scalar(partit, mesh) end if t3=MPI_Wtime() @@ -151,22 +156,26 @@ END SUBROUTINE compute_vel_rhs ! Momentum advection on scalar control volumes with ALE adaption--> exchange zinv(nz) ! against hnode(nz,node) !_______________________________________________________________________________ -subroutine momentum_adv_scalar(mesh) +subroutine momentum_adv_scalar(partit, mesh) USE MOD_MESH +USE MOD_PARTIT USE o_ARRAYS USE o_PARAM -USE g_PARSUP use g_comm_auto IMPLICIT NONE -type(t_mesh), intent(in) , target :: mesh +type(t_mesh), intent(in), target :: mesh +type(t_partit), intent(inout), target :: partit integer :: n, nz, el1, el2 integer :: nl1, nl2, ul1, ul2, nod(2), el, ed, k, nle, ule real(kind=WP) :: un1(1:mesh%nl-1), un2(1:mesh%nl-1) real(kind=WP) :: wu(1:mesh%nl), wv(1:mesh%nl) -real(kind=WP) :: Unode_rhs(2,mesh%nl-1,myDim_nod2d+eDim_nod2D) +real(kind=WP) :: Unode_rhs(2,mesh%nl-1,partit%myDim_nod2d+partit%eDim_nod2D) -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! 1st. compute vertical momentum advection component: w * du/dz, w*dv/dz @@ -327,7 +336,7 @@ subroutine momentum_adv_scalar(mesh) end do !-->do n=1,myDim_nod2d !___________________________________________________________________________ - call exchange_nod(Unode_rhs) + call exchange_nod(Unode_rhs, partit) !___________________________________________________________________________ ! convert total nodal advection from vertice --> elements diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 317041029..6943fb344 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -13,82 +13,102 @@ ! 5. Leith_c=? (need to be adjusted) module h_viscosity_leith_interface interface - subroutine h_viscosity_leith(mesh) + subroutine h_viscosity_leith(partit, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module visc_filt_harmon_interface interface - subroutine visc_filt_harmon(mesh) + subroutine visc_filt_harmon(partit, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module visc_filt_hbhmix_interface interface - subroutine visc_filt_hbhmix(mesh) + subroutine visc_filt_hbhmix(partit, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module visc_filt_biharm_interface interface - subroutine visc_filt_biharm(option, mesh) + subroutine visc_filt_biharm(option, partit, mesh) use mod_mesh + use mod_partit integer :: option - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module visc_filt_bcksct_interface interface - subroutine visc_filt_bcksct(mesh) + subroutine visc_filt_bcksct(partit, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module visc_filt_bilapl_interface interface - subroutine visc_filt_bilapl(mesh) + subroutine visc_filt_bilapl(partit, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module visc_filt_bidiff_interface interface - subroutine visc_filt_bidiff(mesh) + subroutine visc_filt_bidiff(partit, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module visc_filt_dbcksc_interface interface - subroutine visc_filt_dbcksc(mesh) + subroutine visc_filt_dbcksc(partit, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module backscatter_coef_interface interface - subroutine backscatter_coef(mesh) + subroutine backscatter_coef(partit, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module uke_update_interface interface - subroutine uke_update(mesh) + subroutine uke_update(partit, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module @@ -98,20 +118,24 @@ subroutine uke_update(mesh) ! Contains routines needed for computations of dynamics. ! includes: update_vel, compute_vel_nodes ! =================================================================== -SUBROUTINE update_vel(mesh) +SUBROUTINE update_vel(partit, mesh) USE MOD_MESH + USE MOD_PARTIT USE o_ARRAYS USE o_PARAM - USE g_PARSUP USE g_CONFIG use g_comm_auto IMPLICIT NONE integer :: elem, elnodes(3), nz, m, nzmax, nzmin real(kind=WP) :: eta(3) real(kind=WP) :: Fx, Fy - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" DO elem=1, myDim_elem2D elnodes=elem2D_nodes(:,elem) @@ -127,21 +151,25 @@ SUBROUTINE update_vel(mesh) END DO END DO eta_n=eta_n+d_eta - call exchange_elem(UV) + call exchange_elem(UV, partit) end subroutine update_vel !========================================================================== -subroutine compute_vel_nodes(mesh) +subroutine compute_vel_nodes(partit, mesh) USE MOD_MESH + USE MOD_PARTIT USE o_PARAM USE o_ARRAYS - USE g_PARSUP use g_comm_auto IMPLICIT NONE integer :: n, nz, k, elem, nln, uln, nle, ule real(kind=WP) :: tx, ty, tvol - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" DO n=1, myDim_nod2D uln = ulevels_nod2D(n) @@ -165,13 +193,13 @@ subroutine compute_vel_nodes(mesh) Unode(2,nz,n)=ty/tvol END DO END DO - call exchange_nod(Unode) + call exchange_nod(Unode, partit) end subroutine compute_vel_nodes !=========================================================================== -subroutine viscosity_filter(option, mesh) +subroutine viscosity_filter(option, partit, mesh) use o_PARAM -use g_PARSUP use MOD_MESH +USE MOD_PARTIT use h_viscosity_leith_interface use visc_filt_harmon_interface use visc_filt_hbhmix_interface @@ -182,8 +210,10 @@ subroutine viscosity_filter(option, mesh) use visc_filt_dbcksc_interface use backscatter_coef_interface IMPLICIT NONE -integer :: option -type(t_mesh), intent(in) , target :: mesh +integer :: option +type(t_mesh), intent(in), target :: mesh +type(t_partit), intent(inout), target :: partit + ! Driving routine ! Background viscosity is selected in terms of Vl, where V is ! background velocity scale and l is the resolution. V is 0.005 @@ -198,54 +228,58 @@ subroutine viscosity_filter(option, mesh) ! ==== ! Harmonic Leith parameterization ! ==== - call h_viscosity_leith(mesh) - call visc_filt_harmon(mesh) + call h_viscosity_leith(partit, mesh) + call visc_filt_harmon(partit, mesh) CASE (2) ! === ! Laplacian+Leith+biharmonic background ! === - call h_viscosity_leith(mesh) - call visc_filt_hbhmix(mesh) + call h_viscosity_leith(partit, mesh) + call visc_filt_hbhmix(partit, mesh) CASE (3) ! === ! Biharmonic Leith parameterization ! === - call h_viscosity_leith(mesh) - call visc_filt_biharm(2, mesh) + call h_viscosity_leith(partit, mesh) + call visc_filt_biharm(2, partit, mesh) CASE (4) ! === ! Biharmonic+upwind-type ! === - call visc_filt_biharm(1, mesh) + call visc_filt_biharm(1, partit, mesh) CASE (5) - call visc_filt_bcksct(mesh) + call visc_filt_bcksct(partit, mesh) CASE (6) - call visc_filt_bilapl(mesh) + call visc_filt_bilapl(partit, mesh) CASE (7) - call visc_filt_bidiff(mesh) + call visc_filt_bidiff(partit, mesh) CASE (8) - call backscatter_coef(mesh) - call visc_filt_dbcksc(mesh) + call backscatter_coef(partit, mesh) + call visc_filt_dbcksc(partit, mesh) CASE DEFAULT - if (mype==0) write(*,*) 'mixing scheme with option ' , option, 'has not yet been implemented' - call par_ex + if (partit%mype==0) write(*,*) 'mixing scheme with option ' , option, 'has not yet been implemented' + call par_ex(partit) stop END SELECT end subroutine viscosity_filter ! =================================================================== -SUBROUTINE visc_filt_harmon(mesh) +SUBROUTINE visc_filt_harmon(partit, mesh) USE MOD_MESH +USE MOD_PARTIT USE o_ARRAYS USE o_PARAM -USE g_PARSUP USE g_CONFIG IMPLICIT NONE real(kind=WP) :: u1, v1, le(2), len, vi integer :: nz, ed, el(2) , nzmin,nzmax -type(t_mesh), intent(in) , target :: mesh +type(t_mesh), intent(in), target :: mesh +type(t_partit), intent(inout), target :: partit -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! An analog of harmonic viscosity operator. ! It adds to the rhs(0) Visc*(u1+u2+u3-3*u0)/area @@ -272,11 +306,11 @@ SUBROUTINE visc_filt_harmon(mesh) END DO end subroutine visc_filt_harmon ! =================================================================== -SUBROUTINE visc_filt_biharm(option, mesh) +SUBROUTINE visc_filt_biharm(option, partit, mesh) USE MOD_MESH + USE MOD_PARTIT USE o_ARRAYS USE o_PARAM - USE g_PARSUP USE g_CONFIG use g_comm_auto IMPLICIT NONE @@ -286,9 +320,13 @@ SUBROUTINE visc_filt_biharm(option, mesh) real(kind=WP) :: u1, v1, vi, len integer :: ed, el(2), nz, option, nzmin, nzmax real(kind=WP), allocatable :: U_c(:,:), V_c(:,:) - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! Filter is applied twice. ed=myDim_elem2D+eDim_elem2D @@ -350,8 +388,8 @@ SUBROUTINE visc_filt_biharm(option, mesh) end do end if - call exchange_elem(U_c) - call exchange_elem(V_c) + call exchange_elem(U_c, partit) + call exchange_elem(V_c, partit) DO ed=1, myDim_edge2D+eDim_edge2D ! check if its a boudnary edge if(myList_edge2D(ed)>edge2D_in) cycle @@ -373,11 +411,11 @@ SUBROUTINE visc_filt_biharm(option, mesh) end subroutine visc_filt_biharm ! =================================================================== -SUBROUTINE visc_filt_hbhmix(mesh) +SUBROUTINE visc_filt_hbhmix(partit, mesh) USE MOD_MESH + USE MOD_PARTIT USE o_ARRAYS USE o_PARAM - USE g_PARSUP USE g_CONFIG use g_comm_auto IMPLICIT NONE @@ -389,9 +427,13 @@ SUBROUTINE visc_filt_hbhmix(mesh) real(kind=WP) :: u1, v1, vi, len, crosslen, le(2) integer :: ed, el(2), nz, nzmin, nzmax real(kind=WP), allocatable :: U_c(:,:), V_c(:,:) - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! Filter is applied twice. ed=myDim_elem2D+eDim_elem2D @@ -434,8 +476,8 @@ SUBROUTINE visc_filt_hbhmix(mesh) V_c(nz,ed)=-V_c(nz,ed)*vi END DO end do - call exchange_elem(U_c) - call exchange_elem(V_c) + call exchange_elem(U_c, partit) + call exchange_elem(V_c, partit) DO ed=1, myDim_edge2D+eDim_edge2D ! check if its a boudnary edge if(myList_edge2D(ed)>edge2D_in) cycle @@ -458,13 +500,13 @@ SUBROUTINE visc_filt_hbhmix(mesh) end subroutine visc_filt_hbhmix ! =================================================================== -SUBROUTINE h_viscosity_leith(mesh) +SUBROUTINE h_viscosity_leith(partit, mesh) ! ! Coefficient of horizontal viscosity is a combination of the Leith (with Leith_c) and modified Leith (with Div_c) USE MOD_MESH + USE MOD_PARTIT USE o_ARRAYS USE o_PARAM - USE g_PARSUP USE g_CONFIG use g_comm_auto IMPLICIT NONE @@ -472,12 +514,15 @@ SUBROUTINE h_viscosity_leith(mesh) integer :: elem, nl1, nz, elnodes(3), n, k, nt, ul1 real(kind=WP) :: leithx, leithy real(kind=WP), allocatable :: aux(:,:) - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! - if(mom_adv<4) call relative_vorticity(mesh) !!! vorticity array should be allocated + if(mom_adv<4) call relative_vorticity(partit, mesh) !!! vorticity array should be allocated ! Fill in viscosity: Visc = 0.0_WP DO elem=1, myDim_elem2D !! m=1, myDim_elem2D @@ -539,7 +584,7 @@ SUBROUTINE h_viscosity_leith(mesh) aux(nz,n)=vi/dz END DO END DO - call exchange_nod(aux) + call exchange_nod(aux, partit) do elem=1, myDim_elem2D elnodes=elem2D_nodes(:,elem) nl1=nlevels(elem)-1 @@ -556,15 +601,15 @@ SUBROUTINE h_viscosity_leith(mesh) END Do end do end do - call exchange_elem(Visc) + call exchange_elem(Visc, partit) deallocate(aux) END subroutine h_viscosity_leith ! ======================================================================= -SUBROUTINE visc_filt_bcksct(mesh) +SUBROUTINE visc_filt_bcksct(partit, mesh) USE MOD_MESH + USE MOD_PARTIT USE o_ARRAYS USE o_PARAM - USE g_PARSUP USE g_CONFIG USE g_comm_auto IMPLICIT NONE @@ -572,9 +617,13 @@ SUBROUTINE visc_filt_bcksct(mesh) real(kind=8) :: u1, v1, len, vi integer :: nz, ed, el(2), nelem(3),k, elem, nzmin, nzmax real(kind=8), allocatable :: U_b(:,:), V_b(:,:), U_c(:,:), V_c(:,:) - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! An analog of harmonic viscosity operator. ! Same as visc_filt_h, but with the backscatter. @@ -610,8 +659,8 @@ SUBROUTINE visc_filt_bcksct(mesh) V_b(nz,el(2))=V_b(nz,el(2))+v1/elem_area(el(2)) END DO END DO - call exchange_elem(U_b) - call exchange_elem(V_b) + call exchange_elem(U_b, partit) + call exchange_elem(V_b, partit) ! =========== ! Compute smoothed viscous term: ! =========== @@ -633,8 +682,8 @@ SUBROUTINE visc_filt_bcksct(mesh) V_c(nz,ed)=v1/vi END DO END DO - call exchange_nod(U_c) - call exchange_nod(V_c) + call exchange_nod(U_c, partit) + call exchange_nod(V_c, partit) do ed=1, myDim_elem2D nelem=elem2D_nodes(:,ed) nzmin = ulevels(ed) @@ -655,20 +704,25 @@ end subroutine visc_filt_bcksct ! \nu=|3u_c-u_n1-u_n2-u_n3|*sqrt(S_c)/100. There is an additional term ! in viscosity that is proportional to the velocity amplitude squared. ! The coefficient has to be selected experimentally. -SUBROUTINE visc_filt_bilapl(mesh) +SUBROUTINE visc_filt_bilapl(partit, mesh) USE MOD_MESH + USE MOD_PARTIT USE o_ARRAYS USE o_PARAM - USE g_PARSUP USE g_CONFIG USE g_comm_auto IMPLICIT NONE real(kind=8) :: u1, v1, vi, len integer :: ed, el(2), nz, nzmin, nzmax real(kind=8), allocatable :: U_c(:,:), V_c(:,:) - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" -! + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + ed=myDim_elem2D+eDim_elem2D allocate(U_c(nl-1,ed), V_c(nl-1, ed)) U_c=0.0_WP @@ -704,8 +758,8 @@ SUBROUTINE visc_filt_bilapl(mesh) END DO end do - call exchange_elem(U_c) - call exchange_elem(V_c) + call exchange_elem(U_c, partit) + call exchange_elem(V_c, partit) DO ed=1, myDim_edge2D+eDim_edge2D if(myList_edge2D(ed)>edge2D_in) cycle el=edge_tri(:,ed) @@ -731,20 +785,24 @@ end subroutine visc_filt_bilapl ! On each edge, \nu=sqrt(|u_c1-u_c2|*sqrt(S_c1+S_c2)/100) ! The effect is \nu^2 ! Quadratic in velocity term can be introduced if needed. -SUBROUTINE visc_filt_bidiff(mesh) +SUBROUTINE visc_filt_bidiff(partit, mesh) USE MOD_MESH + USE MOD_PARTIT USE o_ARRAYS USE o_PARAM - USE g_PARSUP USE g_CONFIG USE g_comm_auto IMPLICIT NONE real(kind=8) :: u1, v1, vi, len integer :: ed, el(2), nz, nzmin, nzmax real(kind=8), allocatable :: U_c(:,:), V_c(:,:) - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! ed=myDim_elem2D+eDim_elem2D allocate(U_c(nl-1,ed), V_c(nl-1, ed)) @@ -772,8 +830,8 @@ SUBROUTINE visc_filt_bidiff(mesh) END DO END DO - call exchange_elem(U_c) - call exchange_elem(V_c) + call exchange_elem(U_c, partit) + call exchange_elem(V_c, partit) DO ed=1, myDim_edge2D+eDim_edge2D if(myList_edge2D(ed)>edge2D_in) cycle el=edge_tri(:,ed) @@ -802,11 +860,11 @@ end subroutine visc_filt_bidiff ! =================================================================== -SUBROUTINE visc_filt_dbcksc(mesh) +SUBROUTINE visc_filt_dbcksc(partit, mesh) USE MOD_MESH +USE MOD_PARTIT USE o_ARRAYS USE o_PARAM -USE g_PARSUP USE g_CONFIG USE g_comm_auto USE g_support @@ -817,8 +875,13 @@ SUBROUTINE visc_filt_dbcksc(mesh) integer :: nz, ed, el(2) real(kind=8), allocatable :: U_c(:,:), V_c(:,:), UV_back(:,:,:), UV_dis(:,:,:), uke_d(:,:) real(kind=8), allocatable :: uuu(:) -type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" +type(t_mesh), intent(in), target :: mesh +type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + ! An analog of harmonic viscosity operator. ! It adds to the rhs(0) Visc*(u1+u2+u3-3*u0)/area ! on triangles, which is Visc*Laplacian/4 on equilateral triangles. @@ -868,8 +931,8 @@ SUBROUTINE visc_filt_dbcksc(mesh) end do - call exchange_elem(U_c) - call exchange_elem(V_c) + call exchange_elem(U_c, partit) + call exchange_elem(V_c, partit) DO ed=1, myDim_edge2D+eDim_edge2D if(myList_edge2D(ed)>edge2D_in) cycle @@ -919,16 +982,16 @@ SUBROUTINE visc_filt_dbcksc(mesh) END DO END DO -call exchange_elem(UV_back) +call exchange_elem(UV_back, partit) DO nz=1, nl-1 uuu=0.0_8 uuu=UV_back(1,nz,:) - call smooth_elem(uuu,smooth_back_tend, mesh) + call smooth_elem(uuu,smooth_back_tend, partit, mesh) UV_back(1,nz,:)=uuu uuu=0.0_8 uuu=UV_back(2,nz,:) - call smooth_elem(uuu,smooth_back_tend, mesh) + call smooth_elem(uuu,smooth_back_tend, partit, mesh) UV_back(2,nz,:)=uuu END DO @@ -944,7 +1007,7 @@ SUBROUTINE visc_filt_dbcksc(mesh) UV_back_tend=UV_back uke_dif=uke_d - call uke_update(mesh) + call uke_update(partit, mesh) deallocate(V_c,U_c) deallocate(UV_dis,UV_back) deallocate(uke_d) @@ -953,17 +1016,21 @@ SUBROUTINE visc_filt_dbcksc(mesh) end subroutine visc_filt_dbcksc !=========================================================================== -SUBROUTINE backscatter_coef(mesh) +SUBROUTINE backscatter_coef(partit, mesh) USE MOD_MESH +USE MOD_PARTIT USE o_ARRAYS USE o_PARAM -USE g_PARSUP USE g_CONFIG use g_comm_auto IMPLICIT NONE -type(t_mesh), intent(in) , target :: mesh -integer :: elem, nz -#include "associate_mesh.h" +type(t_mesh), intent(in), target :: mesh +type(t_partit), intent(inout), target :: partit +integer :: elem, nz +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !Potentially add the Rossby number scaling to the script... !check if sign is right! Different in the Jansen paper @@ -979,16 +1046,16 @@ SUBROUTINE backscatter_coef(mesh) END DO END DO -call exchange_elem(v_back) +call exchange_elem(v_back, partit) end subroutine backscatter_coef !=========================================================================== -SUBROUTINE uke_update(mesh) +SUBROUTINE uke_update(partit, mesh) USE MOD_MESH +USE MOD_PARTIT USE o_ARRAYS USE o_PARAM -USE g_PARSUP USE g_CONFIG use g_comm_auto USE g_support @@ -999,18 +1066,19 @@ SUBROUTINE uke_update(mesh) !Why is it necessary to implement the length of the array? It doesn't work without! !integer, intent(in) :: t_levels -type(t_mesh), intent(in) , target :: mesh -!real(kind=8), dimension(:,:,:), intent(in) :: UV_dis, UV_back -!real(kind=8), dimension(:,:), intent(in) :: UV_dif -!real(kind=8), intent(in) :: UV_dis(nl-1,myDim_elem2D+eDim_elem2D), UV_back(nl-1, myDim_elem2D+eDim_elem2D) -!real(kind=8), intent(in) :: UV_dif(nl-1,myDim_elem2D+eDim_elem2D) +type(t_mesh), intent(in), target :: mesh +type(t_partit), intent(inout), target :: partit real(kind=8) :: hall, h1_eta, hnz, vol integer :: elnodes(3), nz, ed, edi, node, j, elem, q real(kind=8), allocatable :: uuu(:), work_array(:), U_work(:,:), V_work(:,:), rosb_array(:,:), work_uv(:) integer :: kk, nzmax, el real(kind=8) :: c1, rosb, vel_u, vel_v, vel_uv, scaling, reso real*8 :: c_min=0.5, f_min=1.e-6, r_max=200000., ex, ey, a1, a2, len_reg, dist_reg(2) ! Are those values still correct? -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + !rosb_dis=1._8 !Should be variable to control how much of the dissipated energy is backscattered !rossby_num=2 @@ -1029,7 +1097,7 @@ SUBROUTINE uke_update(mesh) DO nz=1,nl-1 uuu=0.0_8 uuu=uke_back(nz,:) - call smooth_elem(uuu,smooth_back, mesh) !3) ? + call smooth_elem(uuu,smooth_back, partit, mesh) !3) ? uke_back(nz,:)=uuu END DO @@ -1042,7 +1110,7 @@ SUBROUTINE uke_update(mesh) allocate(U_work(nl-1,myDim_nod2D+eDim_nod2D),V_work(nl-1,myDim_nod2D+eDim_nod2D)) allocate(work_uv(myDim_nod2D+eDim_nod2D)) allocate(rosb_array(nl-1,ed)) -call exchange_elem(UV) +call exchange_elem(UV, partit) rosb_array=0._8 DO nz=1, nl-1 work_uv=0._WP @@ -1060,10 +1128,10 @@ SUBROUTINE uke_update(mesh) V_work(nz,node)=U_work(nz,node)/vol END DO work_uv=U_work(nz,:) - call exchange_nod(work_uv) + call exchange_nod(work_uv, partit) U_work(nz,:)=work_uv work_uv=V_work(nz,:) - call exchange_nod(work_uv) + call exchange_nod(work_uv, partit) V_work(nz,:)=work_uv END DO @@ -1131,11 +1199,10 @@ SUBROUTINE uke_update(mesh) deallocate(U_work, V_work) deallocate(rosb_array) deallocate(work_uv) -call exchange_elem(uke_dis) -!call exchange_elem(uke_dif) +call exchange_elem(uke_dis, partit) DO nz=1, nl-1 uuu=uke_dis(nz,:) - call smooth_elem(uuu,smooth_dis, mesh) + call smooth_elem(uuu,smooth_dis, partit, mesh) uke_dis(nz,:)=uuu END DO DO ed=1, myDim_elem2D @@ -1145,7 +1212,7 @@ SUBROUTINE uke_update(mesh) uke(nz,ed)=uke(nz,ed)+1.5_8*uke_rhs(nz,ed)-0.5_8*uke_rhs_old(nz,ed) END DO END DO -call exchange_elem(uke) +call exchange_elem(uke, partit) deallocate(uuu) end subroutine uke_update diff --git a/src/oce_fer_gm.F90 b/src/oce_fer_gm.F90 index f83eb04b3..4da9ea2c1 100644 --- a/src/oce_fer_gm.F90 +++ b/src/oce_fer_gm.F90 @@ -5,22 +5,26 @@ ! fer_gamma2vel ! fer_compute_C_K ! this subroutine shall be a subject of future tuning (with respect to fer_k) !=========================================================================== -subroutine fer_solve_Gamma(mesh) +subroutine fer_solve_Gamma(partit, mesh) USE MOD_MESH + USE MOD_PARTIT USE o_PARAM USE o_ARRAYS, ONLY: sigma_xy, fer_gamma, bvfreq, fer_c, fer_K - USE g_PARSUP USE g_CONFIG use g_comm_auto IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh integer :: nz, n, nzmax, nzmin real(kind=WP) :: zinv1,zinv2, zinv, m, r real(kind=WP) :: a(mesh%nl), b(mesh%nl), c(mesh%nl) real(kind=WP) :: cp(mesh%nl), tp(2,mesh%nl) real(kind=WP), dimension(:,:), pointer :: tr -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" DO n=1,myDim_nod2D tr=>fer_gamma(:,:,n) @@ -116,17 +120,17 @@ subroutine fer_solve_Gamma(mesh) end do END DO !!! cycle over nodes - call exchange_nod(fer_gamma) + call exchange_nod(fer_gamma, partit) END subroutine fer_solve_Gamma ! ! ! !==================================================================== -subroutine fer_gamma2vel(mesh) +subroutine fer_gamma2vel(partit, mesh) USE MOD_MESH + USE MOD_PARTIT USE o_PARAM USE o_ARRAYS, ONLY: fer_gamma, fer_uv - USE g_PARSUP USE g_CONFIG use g_comm_auto IMPLICIT NONE @@ -134,9 +138,13 @@ subroutine fer_gamma2vel(mesh) integer :: nz, nzmax, el, elnod(3), nzmin real(kind=WP) :: zinv real(kind=WP) :: onethird=1._WP/3._WP - type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" DO el=1, myDim_elem2D elnod=elem2D_nodes(:,el) @@ -150,21 +158,22 @@ subroutine fer_gamma2vel(mesh) fer_uv(2,nz,el)=sum(fer_gamma(2,nz,elnod)-fer_gamma(2,nz+1,elnod))*zinv END DO END DO - call exchange_elem(fer_uv) + call exchange_elem(fer_uv, partit) end subroutine fer_gamma2vel ! ! ! !=============================================================================== -subroutine init_Redi_GM(mesh) !fer_compute_C_K_Redi +subroutine init_Redi_GM(partit, mesh) !fer_compute_C_K_Redi USE MOD_MESH USE o_PARAM USE o_ARRAYS, ONLY: fer_c, fer_k, fer_scal, Ki, bvfreq, MLD1_ind, neutral_slope, coriolis_node - USE g_PARSUP + USE MOD_PARTIT USE g_CONFIG use g_comm_auto IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: n, nz, nzmax, nzmin real(kind=WP) :: reso, c1, rosb, scaling, rr_ratio, aux_zz(mesh%nl) real(kind=WP) :: x0=1.5_WP, sigma=.15_WP ! Fermi function parameters to cut off GM where Rossby radius is resolved @@ -172,7 +181,10 @@ subroutine init_Redi_GM(mesh) !fer_compute_C_K_Redi real(kind=WP) :: zscaling(mesh%nl) real(kind=WP) :: bvref -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! fill arrays for 3D Redi and GM coefficients: F1(xy)*F2(z) !******************************* F1(x,y) *********************************** @@ -334,8 +346,8 @@ subroutine init_Redi_GM(mesh) !fer_compute_C_K_Redi end if end do - if (Fer_GM) call exchange_nod(fer_c) - if (Fer_GM) call exchange_nod(fer_k) - if (Redi) call exchange_nod(Ki) + if (Fer_GM) call exchange_nod(fer_c, partit) + if (Fer_GM) call exchange_nod(fer_k, partit) + if (Redi) call exchange_nod(Ki, partit) end subroutine init_Redi_GM !==================================================================== diff --git a/src/oce_local.F90 b/src/oce_local.F90 index 3c4793e3e..76fed1b6e 100755 --- a/src/oce_local.F90 +++ b/src/oce_local.F90 @@ -1,22 +1,28 @@ module com_global2local_interface interface - subroutine com_global2local(mesh) + subroutine com_global2local(partit, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module !============================================================================= -SUBROUTINE com_global2local(mesh) -USE g_PARSUP +SUBROUTINE com_global2local(partit, mesh) use MOD_MESH +use MOD_PARTIT IMPLICIT NONE -type(t_mesh), intent(in) , target :: mesh +type(t_mesh), intent(in), target :: mesh +type(t_partit), intent(inout), target :: partit + INTEGER :: n, m INTEGER, ALLOCATABLE, DIMENSION(:) :: temp +#include "associate_part_def.h" +#include "associate_part_ass.h" #include "associate_mesh_ini.h" allocate(temp(max(nod2D, elem2D))) @@ -116,15 +122,16 @@ SUBROUTINE com_global2local(mesh) deallocate(temp) END SUBROUTINE com_global2local !============================================================================= -SUBROUTINE save_dist_mesh(mesh) +SUBROUTINE save_dist_mesh(partit, mesh) USE g_CONFIG USE MOD_MESH - USE o_ARRAYS - USE g_PARSUP + USE MOD_PARTIT + USE o_ARRAYS use com_global2local_interface IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit Integer n, m, q, q2, counter, fileID, nend, nini,ed(2) character*10 mype_string,npes_string character(MAX_PATH) file_name @@ -132,7 +139,8 @@ SUBROUTINE save_dist_mesh(mesh) integer, allocatable, dimension(:) :: temp, ncount integer n1, n2, flag, eledges(4) -#include "associate_mesh_ini.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" !!$ allocate(temp(nod2D)) ! serves for mapping !!$ allocate(ncount(npes+1)) diff --git a/src/oce_mesh.F90 b/src/oce_mesh.F90 index 7d37a8372..f063ba2d3 100755 --- a/src/oce_mesh.F90 +++ b/src/oce_mesh.F90 @@ -1,56 +1,70 @@ module read_mesh_interface interface - subroutine read_mesh(mesh) + subroutine read_mesh(partit, mesh) use mod_mesh - type(t_mesh), intent(inout) , target :: mesh + use mod_partit + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module find_levels_interface interface - subroutine find_levels(mesh) + subroutine find_levels(partit, mesh) use mod_mesh - type(t_mesh), intent(inout) , target :: mesh + use mod_partit + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module find_levels_cavity_interface interface - subroutine find_levels_cavity(mesh) + subroutine find_levels_cavity(partit, mesh) use mod_mesh - type(t_mesh), intent(inout) , target :: mesh + use mod_partit + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module test_tri_interface interface - subroutine test_tri(mesh) + subroutine test_tri(partit, mesh) use mod_mesh - type(t_mesh), intent(inout) , target :: mesh + use mod_partit + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module load_edges_interface interface - subroutine load_edges(mesh) + subroutine load_edges(partit, mesh) use mod_mesh - type(t_mesh), intent(inout) , target :: mesh + use mod_partit + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module find_neighbors_interface interface - subroutine find_neighbors(mesh) + subroutine find_neighbors(partit, mesh) use mod_mesh - type(t_mesh), intent(inout) , target :: mesh + use mod_partit + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module mesh_areas_interface interface - subroutine mesh_areas(mesh) + subroutine mesh_areas(partit, mesh) use mod_mesh - type(t_mesh), intent(inout) , target :: mesh + use mod_partit + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module @@ -58,43 +72,51 @@ module elem_center_interface interface subroutine elem_center(elem, x, y, mesh) use mod_mesh - integer :: elem + use mod_partit + integer :: elem real(kind=WP) :: x, y - type(t_mesh), intent(inout) , target :: mesh + type(t_mesh), intent(inout), target :: mesh end subroutine end interface end module module edge_center_interface interface subroutine edge_center(n1, n2, x, y, mesh) - USE MOD_MESH + use mod_mesh + use mod_partit integer :: n1, n2 real(kind=WP) :: x, y - type(t_mesh), intent(inout), target :: mesh + type(t_mesh), intent(inout), target :: mesh end subroutine end interface end module module mesh_auxiliary_arrays_interface interface - subroutine mesh_auxiliary_arrays(mesh) + subroutine mesh_auxiliary_arrays(partit, mesh) use mod_mesh - type(t_mesh), intent(inout) , target :: mesh + use mod_partit + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module find_levels_min_e2n_interface interface - subroutine find_levels_min_e2n(mesh) + subroutine find_levels_min_e2n(partit, mesh) use mod_mesh - type(t_mesh), intent(inout) , target :: mesh + use mod_partit + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module check_total_volume_interface interface - subroutine check_total_volume(mesh) + subroutine check_total_volume(partit, mesh) use mod_mesh - type(t_mesh), intent(inout) , target :: mesh + use mod_partit + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module @@ -105,9 +127,9 @@ subroutine check_total_volume(mesh) ! At the beginning of each routine I list arrays it initializes. ! Array sizes vary (sometimes we need only myDim, yet sometimes more)! ! S. Danilov, 2012 -SUBROUTINE mesh_setup(mesh) +SUBROUTINE mesh_setup(partit, mesh) USE MOD_MESH -USE g_parsup +USE MOD_PARTIT USE g_ROTATE_grid use read_mesh_interface use find_levels_interface @@ -119,42 +141,44 @@ SUBROUTINE mesh_setup(mesh) use find_neighbors_interface use mesh_areas_interface IMPLICIT NONE + type(t_mesh), intent(inout) :: mesh + type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(inout) :: mesh +write(*,*) 'CP 1' call set_mesh_transform_matrix !(rotated grid) - call read_mesh(mesh) - call set_par_support(mesh) -!!PS call find_levels(mesh) + call read_mesh(partit, mesh) + call set_par_support(partit, mesh) +!!PS call find_levels(partit, mesh) !!PS -!!PS if (use_cavity) call find_levels_cavity(mesh) +!!PS if (use_cavity) call find_levels_cavity(partit, mesh) !!PS - call test_tri(mesh) - call load_edges(mesh) - call find_neighbors(mesh) +write(*,*) 'CP 2' + call test_tri(partit, mesh) + call load_edges(partit, mesh) + call find_neighbors(partit, mesh) - call find_levels(mesh) - if (use_cavity) call find_levels_cavity(mesh) + call find_levels(partit, mesh) + if (use_cavity) call find_levels_cavity(partit, mesh) - call find_levels_min_e2n(mesh) - call mesh_areas(mesh) - call mesh_auxiliary_arrays(mesh) + call find_levels_min_e2n(partit, mesh) + call mesh_areas(partit, mesh) + call mesh_auxiliary_arrays(partit, mesh) END SUBROUTINE mesh_setup !====================================================================== ! Reads distributed mesh ! The mesh will be read only by 0 proc and broadcasted to the others. -SUBROUTINE read_mesh(mesh) +SUBROUTINE read_mesh(partit, mesh) USE o_PARAM USE g_CONFIG USE MOD_MESH +USE MOD_PARTIT USE o_ARRAYS -USE g_PARSUP USE g_rotate_grid IMPLICIT NONE - -type(t_mesh), intent(inout), target :: mesh - +type(t_mesh), intent(inout), target :: mesh +type(t_partit), intent(inout), target :: partit integer :: n, nn, k, m, fileID integer :: error_status !0/1=no error/error integer :: vert_nodes(1000) @@ -173,8 +197,8 @@ SUBROUTINE read_mesh(mesh) integer fileunit, iostat character(32) mesh_checksum -!NR Cannot include the pointers before the targets are allocated... -!NR #include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" !mesh related files will be read in chunks of chunk_size chunk_size=100000 @@ -237,23 +261,25 @@ SUBROUTINE read_mesh(mesh) open(fileID, file=trim(file_name)) read(fileID,*) n - read(fileID,*) myDim_nod2D - read(fileID,*) eDim_nod2D - allocate(myList_nod2D(myDim_nod2D+eDim_nod2D)) + read(fileID,*) partit%myDim_nod2D + read(fileID,*) partit%eDim_nod2D + allocate(partit%myList_nod2D(partit%myDim_nod2D+partit%eDim_nod2D)) read(fileID,*) myList_nod2D - read(fileID,*) myDim_elem2D - read(fileID,*) eDim_elem2D - read(fileID,*) eXDim_elem2D - allocate(myList_elem2D(myDim_elem2D+eDim_elem2D+eXDim_elem2D)) + read(fileID,*) partit%myDim_elem2D + read(fileID,*) partit%eDim_elem2D + read(fileID,*) partit%eXDim_elem2D + allocate(partit%myList_elem2D(partit%myDim_elem2D+partit%eDim_elem2D+partit%eXDim_elem2D)) read(fileID,*) myList_elem2D - read(fileID,*) myDim_edge2D - read(fileID,*) eDim_edge2D - allocate(myList_edge2D(myDim_edge2D+eDim_edge2D)) - read(fileID,*) myList_edge2D ! m + read(fileID,*) partit%myDim_edge2D + read(fileID,*) partit%eDim_edge2D + allocate(partit%myList_edge2D(partit%myDim_edge2D+partit%eDim_edge2D)) + read(fileID,*) partit%myList_edge2D ! m close(fileID) +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" if (mype==0) write(*,*) 'myLists are read' !============================== @@ -696,15 +722,16 @@ END subroutine read_mesh ! load fesom2.0 mesh files: nlvls.out and elvls.out that are created during the ! partitioning !_______________________________________________________________________________ -subroutine find_levels(mesh) +subroutine find_levels(partit, mesh) use MOD_MESH + USE MOD_PARTIT use o_PARAM - use g_PARSUP use g_config ! implicit none ! - type(t_mesh), intent(inout), target :: mesh + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit character(len=MAX_PATH) :: file_name integer :: ierror ! MPI return error code integer :: k, n, fileID @@ -712,10 +739,8 @@ subroutine find_levels(mesh) integer, allocatable, dimension(:) :: mapping integer, allocatable, dimension(:) :: ibuff real(kind=WP) :: t0, t1 - -!NR Cannot include the pointers before the targets are allocated... -!NR #include "associate_mesh.h" - +#include "associate_part_def.h" +#include "associate_part_ass.h" t0=MPI_Wtime() !___________________________________________________________________________ allocate(mesh%nlevels(myDim_elem2D+eDim_elem2D+eXDim_elem2D)) @@ -898,15 +923,16 @@ end subroutine find_levels ! cavity_elvls.out that are created during the partitioning when namelist.config flag ! use_cavity=.True. !_______________________________________________________________________________ -subroutine find_levels_cavity(mesh) +subroutine find_levels_cavity(partit, mesh) use MOD_MESH + USE MOD_PARTIT use o_PARAM - use g_PARSUP use g_config ! implicit none ! - type(t_mesh), intent(inout), target :: mesh + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit character(MAX_PATH) :: file_name integer :: ierror ! MPI return error code integer :: k, n, fileID @@ -917,8 +943,8 @@ subroutine find_levels_cavity(mesh) logical :: file_exist=.False. integer :: elem, elnodes(3), ule, uln(3), node, j, nz integer, allocatable, dimension(:) :: numelemtonode -!NR Cannot include the pointers before the targets are allocated... -!NR #include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_part_ass.h" t0=MPI_Wtime() !___________________________________________________________________________ @@ -1329,21 +1355,22 @@ end subroutine find_levels_cavity ! cavity_elvls.out that are created during the partitioning when namelist.config flag ! use_cavity=.True. !_______________________________________________________________________________ -subroutine find_levels_min_e2n(mesh) +subroutine find_levels_min_e2n(partit, mesh) use MOD_MESH + USE MOD_PARTIT use o_PARAM - use g_PARSUP use g_config use g_comm_auto ! implicit none ! - type(t_mesh), intent(inout), target :: mesh + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: node, k real(kind=WP) :: t0, t1 -!NR Cannot include the pointers before the targets are allocated... -!NR #include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_part_ass.h" t0=MPI_Wtime() !___________________________________________________________________________ @@ -1355,8 +1382,8 @@ subroutine find_levels_min_e2n(mesh) mesh%nlevels_nod2D_min(node)=minval(mesh%nlevels(mesh%nod_in_elem2D(1:k,node))) mesh%ulevels_nod2D_max(node)=maxval(mesh%ulevels(mesh%nod_in_elem2D(1:k,node))) end do - call exchange_nod(mesh%nlevels_nod2D_min) - call exchange_nod(mesh%ulevels_nod2D_max) + call exchange_nod(mesh%nlevels_nod2D_min, partit) + call exchange_nod(mesh%ulevels_nod2D_max, partit) !___________________________________________________________________________ t1=MPI_Wtime() @@ -1370,20 +1397,25 @@ end subroutine find_levels_min_e2n ! ! !=========================================================================== -SUBROUTINE test_tri(mesh) +SUBROUTINE test_tri(partit, mesh) USE MOD_MESH +USE MOD_PARTIT USE o_PARAM -USE g_PARSUP USE g_CONFIG use g_rotate_grid IMPLICIT NONE ! Check the order of nodes in triangles; correct it if necessary to make ! it same sense (clockwise) -type(t_mesh), intent(inout), target :: mesh +type(t_mesh), intent(inout), target :: mesh +type(t_partit), intent(inout), target :: partit real(kind=WP) :: a(2), b(2), c(2), r integer :: n, nx, elnodes(3) real(kind=WP) :: t0, t1 +#include "associate_part_def.h" +#include "associate_part_ass.h" + + t0=MPI_Wtime() DO n=1, myDim_elem2D @@ -1416,13 +1448,14 @@ SUBROUTINE test_tri(mesh) END SUBROUTINE test_tri !========================================================================= -SUBROUTINE load_edges(mesh) +SUBROUTINE load_edges(partit, mesh) USE MOD_MESH +USE MOD_PARTIT USE o_PARAM -USE g_PARSUP USE g_CONFIG IMPLICIT NONE -type(t_mesh), intent(inout), target :: mesh +type(t_mesh), intent(inout), target :: mesh +type(t_partit), intent(inout), target :: partit character(MAX_PATH) :: file_name integer :: counter, n, m, nn, k, q, fileID integer :: elems(2), elem @@ -1434,8 +1467,8 @@ SUBROUTINE load_edges(mesh) integer, allocatable, dimension(:,:) :: ibuff integer :: ierror ! return error code -!NR Cannot include the pointers before the targets are allocated... -!NR #include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_part_ass.h" t0=MPI_Wtime() @@ -1647,7 +1680,7 @@ SUBROUTINE load_edges(mesh) END SUBROUTINE load_edges !=========================================================================== -SUBROUTINE find_neighbors(mesh) +SUBROUTINE find_neighbors(partit, mesh) ! For each element three its element neighbors are found ! For each node the elements containing it are found ! Allocated are: @@ -1658,20 +1691,21 @@ SUBROUTINE find_neighbors(mesh) USE o_PARAM USE MOD_MESH -USE g_PARSUP +USE MOD_PARTIT USE g_ROTATE_grid use g_comm_auto use elem_center_interface implicit none -type(t_mesh), intent(inout), target :: mesh +type(t_mesh), intent(inout), target :: mesh +type(t_partit), intent(inout), target :: partit integer :: elem, eledges(3), elem1, j, n, node, enum,elems(3),count1,count2,exit_flag,i,nz integer, allocatable :: temp_i(:) -integer :: mymax(npes), rmax(npes) +integer :: mymax(partit%npes), rmax(partit%npes) real(kind=WP) :: gx,gy,rx,ry real(kind=WP) :: t0, t1 -!NR Cannot include the pointers before the targets are allocated... -!NR #include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_part_ass.h" CALL MPI_BARRIER(MPI_COMM_FESOM, MPIerr) t0=MPI_Wtime() @@ -1727,7 +1761,7 @@ SUBROUTINE find_neighbors(mesh) end do end do - call exchange_nod(mesh%nod_in_elem2D_num) + call exchange_nod(mesh%nod_in_elem2D_num, partit) allocate (temp_i(myDim_nod2D+eDim_nod2D)) temp_i=0 DO n=1, maxval(rmax) @@ -1735,7 +1769,7 @@ SUBROUTINE find_neighbors(mesh) do j=1,myDim_nod2D if (mesh%nod_in_elem2D(n,j)>0) temp_i(j)=myList_elem2D(mesh%nod_in_elem2D(n,j)) enddo - call exchange_nod(temp_i) + call exchange_nod(temp_i, partit) mesh%nod_in_elem2D(n,:)=temp_i END DO deallocate(temp_i) @@ -1802,9 +1836,10 @@ subroutine edge_center(n1, n2, x, y, mesh) USE o_PARAM USE g_CONFIG implicit none -integer :: n1, n2 ! nodes of the edge -real(kind=WP) :: x, y, a(2), b(2) -type(t_mesh), intent(inout), target :: mesh +integer :: n1, n2 ! nodes of the edge +real(kind=WP), intent(inout) :: x, y +type(t_mesh), intent(in) :: mesh +real(kind=WP) :: a(2), b(2) a=mesh%coord_nod2D(:,n1) b=mesh%coord_nod2D(:,n2) @@ -1820,13 +1855,13 @@ subroutine elem_center(elem, x, y, mesh) USE o_PARAM USE g_CONFIG implicit none -integer :: elem, elnodes(3), k -real(kind=WP) :: x, y, ax(3), amin - -type(t_mesh), intent(inout), target :: mesh +real(kind=WP), intent(inout) :: x, y +type(t_mesh), intent(in) :: mesh +integer :: elem, elnodes(3), k +real(kind=WP) :: ax(3), amin elnodes=mesh%elem2D_nodes(:,elem) - ax=mesh%coord_nod2D(1, elnodes) + ax=mesh%coord_nod2D(1, elnodes) amin=minval(ax) DO k=1,3 if(ax(k)-amin>=cyclic_length/2.0_WP) ax(k)=ax(k)-cyclic_length @@ -1837,11 +1872,11 @@ subroutine elem_center(elem, x, y, mesh) end subroutine elem_center !========================================================================== -SUBROUTINE mesh_areas(mesh) +SUBROUTINE mesh_areas(partit, mesh) USE MOD_MESH + USE MOD_PARTIT USE o_PARAM USE o_arrays, only: dum_3d_n - USE g_PARSUP USE g_ROTATE_GRID use g_comm_auto IMPLICIT NONE @@ -1855,10 +1890,12 @@ SUBROUTINE mesh_areas(mesh) real(kind=WP), allocatable,dimension(:) :: work_array integer, allocatable,dimension(:,:) :: cavity_contribut real(kind=WP) :: t0, t1 - type(t_mesh), intent(inout), target :: mesh +type(t_mesh), intent(inout), target :: mesh +type(t_partit), intent(inout), target :: partit - !NR Cannot include the pointers before the targets are allocated... - !NR #include "associate_mesh.h" + +#include "associate_part_def.h" +#include "associate_part_ass.h" t0=MPI_Wtime() @@ -1892,7 +1929,7 @@ SUBROUTINE mesh_areas(mesh) b(1)=b(1)*ay mesh%elem_area(n)=0.5_WP*abs(a(1)*b(2)-b(1)*a(2)) end do - call exchange_elem(mesh%elem_area) + call exchange_elem(mesh%elem_area, partit) !___compute areas of upper/lower scalar cell edge___________________________ ! areas at different levels (there can be partly land) @@ -1994,8 +2031,8 @@ SUBROUTINE mesh_areas(mesh) mesh%area = mesh%area *r_earth*r_earth mesh%areasvol = mesh%areasvol *r_earth*r_earth - call exchange_nod(mesh%area) - call exchange_nod(mesh%areasvol) + call exchange_nod(mesh%area, partit) + call exchange_nod(mesh%areasvol, partit) !___compute inverse area____________________________________________________ mesh%area_inv = 0.0_WP @@ -2053,7 +2090,7 @@ SUBROUTINE mesh_areas(mesh) do n=1,myDim_nod2D mesh%mesh_resolution(n)=work_array(n) end do - call exchange_nod(mesh%mesh_resolution) + call exchange_nod(mesh%mesh_resolution, partit) end do deallocate(work_array) @@ -2094,7 +2131,7 @@ END SUBROUTINE mesh_areas !=================================================================== -SUBROUTINE mesh_auxiliary_arrays(mesh) +SUBROUTINE mesh_auxiliary_arrays(partit, mesh) ! Collects auxiliary information needed to speed up computations ! of gradients, div. This also makes implementation of cyclicity ! much more straightforward @@ -2108,9 +2145,9 @@ SUBROUTINE mesh_auxiliary_arrays(mesh) ! coriolis(myDim_elem2D) USE MOD_MESH +USE MOD_PARTIT USE o_PARAM USE i_PARAM -USE g_PARSUP USE o_ARRAYS USE g_ROTATE_grid use g_comm_auto @@ -2125,10 +2162,11 @@ SUBROUTINE mesh_auxiliary_arrays(mesh) real(kind=WP), allocatable :: center_x(:), center_y(:), temp(:) real(kind=WP) :: t0, t1 integer :: i, nn, ns -type(t_mesh), intent(inout), target :: mesh +type(t_mesh), intent(inout), target :: mesh +type(t_partit), intent(inout), target :: partit -!NR Cannot include the pointers before the targets are allocated... -!NR #include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_part_ass.h" t0=MPI_Wtime() allocate(mesh%edge_dxdy(2,myDim_edge2D+eDim_edge2D)) @@ -2183,10 +2221,10 @@ SUBROUTINE mesh_auxiliary_arrays(mesh) mesh%metric_factor=tan(ay)/r_earth END DO - call exchange_elem(mesh%metric_factor) - call exchange_elem(mesh%elem_cos) - call exchange_elem(center_x) - call exchange_elem(center_y) + call exchange_elem(mesh%metric_factor, partit) + call exchange_elem(mesh%elem_cos, partit) + call exchange_elem(center_x, partit) + call exchange_elem(center_y, partit) if (cartesian) then mesh%elem_cos=1.0_WP mesh%metric_factor=0.0_WP @@ -2453,10 +2491,10 @@ END SUBROUTINE mesh_auxiliary_arrays ! ! !_______________________________________________________________________________ -SUBROUTINE check_mesh_consistency(mesh) +SUBROUTINE check_mesh_consistency(partit, mesh) USE MOD_MESH +USE MOD_PARTIT USE o_PARAM -USE g_PARSUP USE g_ROTATE_GRID use g_comm_auto IMPLICIT NONE @@ -2464,10 +2502,12 @@ SUBROUTINE check_mesh_consistency(mesh) ! Allocated and filled in are: ! elem_area(myDim_elem2D) ! area(nl, myDim_nod2D) -type(t_mesh), intent(inout), target :: mesh +type(t_mesh), intent(inout), target :: mesh +type(t_partit), intent(inout), target :: partit integer :: nz, n, elem , elnodes(3) real(kind=WP) :: vol_n(mesh%nl), vol_e(mesh%nl), aux(mesh%nl) - +#include "associate_part_def.h" +#include "associate_part_ass.h" vol_n=0._WP vol_e=0._WP @@ -2506,19 +2546,23 @@ END SUBROUTINE check_mesh_consistency ! ! !_______________________________________________________________________________ -subroutine check_total_volume(mesh) +subroutine check_total_volume(partit, mesh) USE MOD_MESH + USE MOD_PARTIT USE o_PARAM - USE g_PARSUP use g_comm_auto use o_ARRAYS IMPLICIT NONE - type(t_mesh), intent(inout), target :: mesh - integer :: nz, n, elem , elnodes(3) - real(kind=WP) :: vol_n, vol_e, aux + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: nz, n, elem , elnodes(3) + real(kind=WP) :: vol_n, vol_e, aux -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ vol_n=0._WP diff --git a/src/oce_mo_conv.F90 b/src/oce_mo_conv.F90 index f9ddac0c4..cdcff4857 100644 --- a/src/oce_mo_conv.F90 +++ b/src/oce_mo_conv.F90 @@ -1,20 +1,25 @@ ! ! !_______________________________________________________________________________ -subroutine mo_convect(mesh) +subroutine mo_convect(partit, mesh) USE o_PARAM USE MOD_MESH + USE MOD_PARTIT USE o_ARRAYS - USE g_PARSUP USE g_config use i_arrays use g_comm_auto IMPLICIT NONE - integer :: node, elem, nz, elnodes(3), nzmin, nzmax - type(t_mesh), intent(in) , target :: mesh + integer :: node, elem, nz, elnodes(3), nzmin, nzmax + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" -#include "associate_mesh.h" !___________________________________________________________________________ ! add vertical mixing scheme of Timmermann and Beckmann, 2004,"Parameterization ! of vertical mixing in the Weddell Sea! diff --git a/src/oce_muscl_adv.F90 b/src/oce_muscl_adv.F90 index 29970add2..8b73c738f 100755 --- a/src/oce_muscl_adv.F90 +++ b/src/oce_muscl_adv.F90 @@ -1,9 +1,11 @@ module find_up_downwind_triangles_interface interface - subroutine find_up_downwind_triangles(twork, mesh) + subroutine find_up_downwind_triangles(twork, partit, mesh) use MOD_MESH + use MOD_PARTIT use MOD_TRACER type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit type(t_tracer_work), intent(inout), target :: twork end subroutine end interface @@ -26,12 +28,12 @@ subroutine find_up_downwind_triangles(twork, mesh) ! find_up_downwind_triangles ! fill_up_dn_grad ! adv_tracer_muscl -subroutine muscl_adv_init(twork, mesh) +subroutine muscl_adv_init(twork, partit, mesh) use MOD_MESH + use MOD_PARTIT use MOD_TRACER use o_ARRAYS use o_PARAM - use g_PARSUP use g_comm_auto use g_config use find_up_downwind_triangles_interface @@ -40,13 +42,17 @@ subroutine muscl_adv_init(twork, mesh) integer :: nz type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit type(t_tracer_work), intent(inout), target :: twork -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! find upwind and downwind triangle for each local edge - call find_up_downwind_triangles(twork, mesh) + call find_up_downwind_triangles(twork, partit, mesh) !___________________________________________________________________________ nn_size=0 @@ -112,12 +118,12 @@ end SUBROUTINE muscl_adv_init ! ! !_______________________________________________________________________________ -SUBROUTINE find_up_downwind_triangles(twork, mesh) +SUBROUTINE find_up_downwind_triangles(twork, partit, mesh) USE MOD_MESH +USE MOD_PARTIT USE MOD_TRACER USE o_ARRAYS USE o_PARAM -USE g_PARSUP USE g_CONFIG use g_comm_auto IMPLICIT NONE @@ -127,8 +133,12 @@ SUBROUTINE find_up_downwind_triangles(twork, mesh) integer, allocatable :: temp_i(:), e_nodes(:,:) type(t_mesh), intent(in) , target :: mesh +type(t_partit), intent(inout), target :: partit type(t_tracer_work), intent(inout), target :: twork -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" allocate(twork%edge_up_dn_tri(2,myDim_edge2D)) allocate(twork%edge_up_dn_grad(4,nl-1,myDim_edge2D)) @@ -144,7 +154,7 @@ SUBROUTINE find_up_downwind_triangles(twork, mesh) do el=1,myDim_elem2D temp(el)=coord_nod2D(k,elem2D_nodes(n,el)) end do - call exchange_elem(temp) + call exchange_elem(temp, partit) coord_elem(k,n,:)=temp(:) END DO END DO @@ -156,7 +166,7 @@ SUBROUTINE find_up_downwind_triangles(twork, mesh) do el=1,myDim_elem2D temp_i(el)=myList_nod2D(elem2D_nodes(n,el)) end do - call exchange_elem(temp_i) + call exchange_elem(temp_i, partit) e_nodes(n,:)=temp_i(:) END DO deallocate(temp_i) @@ -274,19 +284,23 @@ end SUBROUTINE find_up_downwind_triangles ! ! !_______________________________________________________________________________ -SUBROUTINE fill_up_dn_grad(twork, mesh) +SUBROUTINE fill_up_dn_grad(twork, partit, mesh) ! ttx, tty elemental gradient of tracer USE o_PARAM USE MOD_MESH +USE MOD_PARTIT USE MOD_TRACER USE o_ARRAYS -USE g_PARSUP IMPLICIT NONE integer :: n, nz, elem, k, edge, ednodes(2), nzmin, nzmax real(kind=WP) :: tvol, tx, ty type(t_mesh), intent(in), target :: mesh +type(t_partit), intent(inout), target :: partit type(t_tracer_work), intent(inout), target :: twork -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! loop over edge segments diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index ba3ec8ac7..0d83734d6 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -1,39 +1,47 @@ module oce_initial_state_interface interface - subroutine oce_initial_state(tracers, mesh) - use mod_mesh + subroutine oce_initial_state(tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT use mod_tracer type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers end subroutine end interface end module module tracer_init_interface interface - subroutine tracer_init(tracers, mesh) - use mod_mesh + subroutine tracer_init(tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT use mod_tracer type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers end subroutine end interface end module module ocean_setup_interface interface - subroutine ocean_setup(tracers, mesh) - use mod_mesh + subroutine ocean_setup(tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT use mod_tracer type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers end subroutine end interface end module module before_oce_step_interface interface - subroutine before_oce_step(tracers, mesh) - use mod_mesh + subroutine before_oce_step(tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT use mod_tracer type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers end subroutine end interface @@ -42,11 +50,11 @@ subroutine before_oce_step(tracers, mesh) ! ! !_______________________________________________________________________________ -subroutine ocean_setup(tracers, mesh) +subroutine ocean_setup(tracers, partit, mesh) USE MOD_MESH +USE MOD_PARTIT USE MOD_TRACER USE o_PARAM -USE g_PARSUP USE o_ARRAYS USE g_config USE g_forcing_param, only: use_virt_salt @@ -60,6 +68,7 @@ subroutine ocean_setup(tracers, mesh) use oce_adv_tra_fct_interfaces IMPLICIT NONE type(t_mesh), intent(inout), target :: mesh +type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers integer :: n !___setup virt_salt_flux____________________________________________________ @@ -79,13 +88,13 @@ subroutine ocean_setup(tracers, mesh) !___________________________________________________________________________ ! initialize arrays for ALE - if (mype==0) then + if (partit%mype==0) then write(*,*) '____________________________________________________________' write(*,*) ' --> initialise ALE arrays + sparse SSH stiff matrix' write(*,*) end if - call init_ale(mesh) - call init_stiff_mat_ale(mesh) !!PS test + call init_ale(partit, mesh) + call init_stiff_mat_ale(partit, mesh) !!PS test !___________________________________________________________________________ ! initialize arrays from cvmix library for CVMIX_KPP, CVMIX_PP, CVMIX_TKE, @@ -107,39 +116,39 @@ subroutine ocean_setup(tracers, mesh) case ('cvmix_TKE+cvmix_IDEMIX') ; mix_scheme_nmb = 56 case default stop "!not existing mixing scheme!" - call par_ex + call par_ex(partit) end select ! initialise fesom1.4 like KPP if (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) then - call oce_mixing_kpp_init(mesh) + call oce_mixing_kpp_init(partit, mesh) ! initialise fesom1.4 like PP elseif (mix_scheme_nmb==2 .or. mix_scheme_nmb==27) then ! initialise cvmix_KPP elseif (mix_scheme_nmb==3 .or. mix_scheme_nmb==37) then - call init_cvmix_kpp(mesh) + call init_cvmix_kpp(partit, mesh) ! initialise cvmix_PP elseif (mix_scheme_nmb==4 .or. mix_scheme_nmb==47) then - call init_cvmix_pp(mesh) + call init_cvmix_pp(partit, mesh) ! initialise cvmix_TKE elseif (mix_scheme_nmb==5 .or. mix_scheme_nmb==56) then - call init_cvmix_tke(mesh) + call init_cvmix_tke(partit, mesh) endif ! initialise additional mixing cvmix_IDEMIX --> only in combination with ! cvmix_TKE+cvmix_IDEMIX or stand alone for debbuging as cvmix_TKE if (mod(mix_scheme_nmb,10)==6) then - call init_cvmix_idemix(mesh) + call init_cvmix_idemix(partit, mesh) ! initialise additional mixing cvmix_TIDAL --> only in combination with ! KPP+cvmix_TIDAL, PP+cvmix_TIDAL, cvmix_KPP+cvmix_TIDAL, cvmix_PP+cvmix_TIDAL ! or stand alone for debbuging as cvmix_TIDAL elseif (mod(mix_scheme_nmb,10)==7) then - call init_cvmix_tidal(mesh) + call init_cvmix_tidal(partit, mesh) end if !___________________________________________________________________________ @@ -150,18 +159,18 @@ subroutine ocean_setup(tracers, mesh) ! compute for all cavity points (ulevels_nod2D>1), which is the closest ! cavity line point to that point --> use their coordinates and depth --> ! use for extrapolation of init state under cavity - if (use_cavity) call compute_nrst_pnt2cavline(mesh) + if (use_cavity) call compute_nrst_pnt2cavline(partit, mesh) - if (use_density_ref) call init_ref_density(mesh) + if (use_density_ref) call init_ref_density(partit, mesh) !___________________________________________________________________________ - if(mype==0) write(*,*) 'Arrays are set' + if(partit%mype==0) write(*,*) 'Arrays are set' !if(open_boundary) call set_open_boundary !TODO - call oce_adv_tra_fct_init(tracers%work, mesh) - call muscl_adv_init(tracers%work, mesh) !!PS test + call oce_adv_tra_fct_init(tracers%work, partit, mesh) + call muscl_adv_init(tracers%work, partit, mesh) !!PS test !===================== ! Initialize fields ! A user-defined routine has to be called here! @@ -170,13 +179,13 @@ subroutine ocean_setup(tracers, mesh) SELECT CASE (TRIM(which_toy)) CASE ("soufflet") !forcing update for soufflet testcase if (mod(mstep, soufflet_forc_update)==0) then - call initial_state_soufflet(tracers, mesh) - call compute_zonal_mean_ini(mesh) - call compute_zonal_mean(tracers, mesh) + call initial_state_soufflet(tracers, partit, mesh) + call compute_zonal_mean_ini(partit, mesh) + call compute_zonal_mean(tracers, partit, mesh) end if END SELECT else - call oce_initial_state(tracers, mesh) ! Use it if not running tests + call oce_initial_state(tracers, partit, mesh) ! Use it if not running tests end if if (.not.r_restart) then @@ -187,16 +196,16 @@ subroutine ocean_setup(tracers, mesh) !___________________________________________________________________________ ! first time fill up array for hnode & helem - if (mype==0) then + if (partit%mype==0) then write(*,*) '____________________________________________________________' write(*,*) ' --> call init_thickness_ale' write(*,*) end if - call init_thickness_ale(mesh) + call init_thickness_ale(partit, mesh) !___________________________________________________________________________ - if(mype==0) write(*,*) 'Initial state' - if (w_split .and. mype==0) then + if(partit%mype==0) write(*,*) 'Initial state' + if (w_split .and. partit%mype==0) then write(*,*) '******************************************************************************' write(*,*) 'vertical velocity will be split onto explicit and implicit constitutes;' write(*,*) 'maximum allowed CDF on explicit W is set to: ', w_max_cfl @@ -204,11 +213,11 @@ subroutine ocean_setup(tracers, mesh) end if end subroutine ocean_setup !_______________________________________________________________________________ -SUBROUTINE tracer_init(tracers, mesh) +SUBROUTINE tracer_init(tracers, partit, mesh) USE MOD_MESH +USE MOD_PARTIT USE MOD_TRACER USE DIAGNOSTICS, only: ldiag_DVD -USE g_PARSUP USE g_ic3d IMPLICIT NONE integer :: elem_size, node_size @@ -221,6 +230,7 @@ SUBROUTINE tracer_init(tracers, mesh) real(kind=WP) :: gamma0_tra, gamma1_tra, gamma2_tra type(t_mesh), intent(in) , target :: mesh +type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers type(nml_tracer_list_type), target, allocatable :: nml_tracer_list(:) @@ -228,7 +238,10 @@ SUBROUTINE tracer_init(tracers, mesh) namelist /tracer_list / nml_tracer_list namelist /tracer_general / smooth_bh_tra, gamma0_tra, gamma1_tra, gamma2_tra, i_vert_diff -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! OPEN and read namelist for I/O open( unit=nm_unit, file='namelist.tra', form='formatted', access='sequential', status='old', iostat=iost ) @@ -236,7 +249,7 @@ SUBROUTINE tracer_init(tracers, mesh) if (mype==0) WRITE(*,*) ' file : ', 'namelist.tra',' open ok' else if (mype==0) WRITE(*,*) 'ERROR: --> bad opening file : ', 'namelist.tra',' ; iostat=',iost - call par_ex + call par_ex(partit) stop end if @@ -297,11 +310,11 @@ END SUBROUTINE tracer_init ! ! !_______________________________________________________________________________ -SUBROUTINE arrays_init(num_tracers, mesh) +SUBROUTINE arrays_init(num_tracers, partit, mesh) USE MOD_MESH +USE MOD_PARTIT USE o_ARRAYS USE o_PARAM -USE g_PARSUP use g_comm_auto use g_config use g_forcing_arrays @@ -309,11 +322,15 @@ SUBROUTINE arrays_init(num_tracers, mesh) USE g_forcing_param, only: use_virt_salt use diagnostics, only: ldiag_dMOC, ldiag_DVD IMPLICIT NONE -integer :: elem_size, node_size -integer :: n -integer, intent(in) :: num_tracers -type(t_mesh), intent(in), target :: mesh -#include "associate_mesh.h" +integer :: elem_size, node_size +integer :: n +integer, intent(in) :: num_tracers +type(t_mesh), intent(in), target :: mesh +type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" elem_size=myDim_elem2D+eDim_elem2D @@ -432,7 +449,7 @@ SUBROUTINE arrays_init(num_tracers, mesh) ! Ki(n)=K_hor*area(1,n)/scale_area Ki(:,n)=K_hor*(mesh_resolution(n)/100000.0_WP)**2 end do -call exchange_nod(Ki) +call exchange_nod(Ki, partit) neutral_slope=0.0_WP slope_tapered=0.0_WP @@ -549,11 +566,11 @@ END SUBROUTINE arrays_init !_______________________________________________________________________________ ! Here the 3D tracers will be initialized. Initialization strategy depends on a tracer ID. ! ID = 0 and 1 are reserved for temperature and salinity -SUBROUTINE oce_initial_state(tracers, mesh) +SUBROUTINE oce_initial_state(tracers, partit, mesh) USE MOD_MESH +USE MOD_PARTIT USE MOD_TRACER USE o_ARRAYS -USE g_PARSUP USE g_config USE g_ic3d ! @@ -563,10 +580,14 @@ SUBROUTINE oce_initial_state(tracers, mesh) integer :: i, k, counter, rcounter3, id character(len=10) :: i_string, id_string type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers real(kind=WP) :: loc, max_temp, min_temp, max_salt, min_salt -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" if (mype==0) write(*,*) tracers%num_tracers, ' tracers will be used in FESOM' if (mype==0) write(*,*) 'tracer IDs are: ', tracers%data(1:tracers%num_tracers)%ID @@ -575,7 +596,7 @@ SUBROUTINE oce_initial_state(tracers, mesh) ! this must be always done! First two tracers with IDs 0 and 1 are the temperature and salinity. if(mype==0) write(*,*) 'read Temperatur climatology from:', trim(filelist(1)) if(mype==0) write(*,*) 'read Salt climatology from:', trim(filelist(2)) - call do_ic3d(tracers, mesh) + call do_ic3d(tracers, partit, mesh) Tclim=tracers%data(1)%values Sclim=tracers%data(2)%values @@ -700,7 +721,7 @@ SUBROUTINE oce_initial_state(tracers, mesh) if (mype==0) write(*,*) 'invalid ID '//trim(id_string)//' specified for '//trim(i_string)//' th tracer!!!' if (mype==0) write(*,*) 'the model will stop!' end if - call par_ex + call par_ex(partit) stop END SELECT END DO @@ -709,26 +730,30 @@ end subroutine oce_initial_state ! !========================================================================== ! Here we do things (if applicable) before the ocean timestep will be made -SUBROUTINE before_oce_step(tracers, mesh) +SUBROUTINE before_oce_step(tracers, partit, mesh) USE MOD_MESH + USE MOD_PARTIT USE MOD_TRACER USE o_ARRAYS - USE g_PARSUP USE g_config USE Toy_Channel_Soufflet implicit none integer :: i, k, counter, rcounter3, id character(len=10) :: i_string, id_string - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(inout), target :: tracers + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(inout), target :: tracers -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" if (toy_ocean) then SELECT CASE (TRIM(which_toy)) CASE ("soufflet") !forcing update for soufflet testcase if (mod(mstep, soufflet_forc_update)==0) then - call compute_zonal_mean(tracers, mesh) + call compute_zonal_mean(tracers, partit, mesh) end if END SELECT end if diff --git a/src/oce_shortwave_pene.F90 b/src/oce_shortwave_pene.F90 index 36d1b4b32..f18e926ca 100644 --- a/src/oce_shortwave_pene.F90 +++ b/src/oce_shortwave_pene.F90 @@ -1,13 +1,13 @@ -subroutine cal_shortwave_rad(mesh) +subroutine cal_shortwave_rad(partit, mesh) ! This routine is inherited from FESOM 1.4 and adopted appropreately. It calculates ! shortwave penetration into the ocean assuming the constant chlorophyll concentration. ! No penetration under the ice is applied. A decent way for ice region is to be discussed. ! This routine should be called after ice2oce coupling done if ice model is used. ! Ref.: Morel and Antoine 1994, Sweeney et al. 2005 USE MOD_MESH + USE MOD_PARTIT USE o_PARAM USE o_ARRAYS - USE g_PARSUP USE g_CONFIG use g_forcing_arrays use g_comm_auto @@ -20,9 +20,13 @@ subroutine cal_shortwave_rad(mesh) real(kind=WP):: swsurf, aux real(kind=WP):: c, c2, c3, c4, c5 real(kind=WP):: v1, v2, sc1, sc2 - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" sw_3d=0.0_WP !_____________________________________________________________________________ diff --git a/src/oce_spp.F90 b/src/oce_spp.F90 index df627380e..c4b181b80 100644 --- a/src/oce_spp.F90 +++ b/src/oce_spp.F90 @@ -8,10 +8,10 @@ ! Ref: Duffy1997, Duffy1999, Nguyen2009 ! Originaly coded by Qiang Wang in FESOM 1.4 !-------------------------------------------------------- -subroutine cal_rejected_salt(mesh) -use g_parsup +subroutine cal_rejected_salt(partit, mesh) use o_arrays use mod_mesh +use mod_partit use g_comm_auto use o_tracers use g_forcing_arrays, only: thdgr @@ -22,9 +22,13 @@ subroutine cal_rejected_salt(mesh) integer :: row real(kind=WP) :: aux -type(t_mesh), intent(in), target :: mesh +type(t_mesh), intent(in), target :: mesh +type(t_partit), intent(in), target :: partit -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" aux=rhoice/rhowat*dt do row=1, myDim_nod2d +eDim_nod2D! myDim is sufficient @@ -41,12 +45,12 @@ end subroutine cal_rejected_salt ! !---------------------------------------------------------------------------- ! -subroutine app_rejected_salt(ttf, mesh) - use g_parsup +subroutine app_rejected_salt(ttf, partit, mesh) use o_arrays use mod_mesh - use g_comm_auto + use mod_partit use o_tracers + use g_comm_auto implicit none integer :: row, k, nod, nup, nlo, kml, nzmin, nzmax @@ -58,10 +62,14 @@ subroutine app_rejected_salt(ttf, mesh) data n_distr /5/ data rho_cri /0.4_WP/ !kg/m3 !SH !Duffy1999 - type(t_mesh), intent(in), target :: mesh - real(kind=WP), intent (inout) :: ttf(mesh%nl-1,myDim_nod2D+eDim_nod2D) + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(in), target :: partit + real(kind=WP), intent (inout) :: ttf(mesh%nl-1,partit%myDim_nod2D+partit%eDim_nod2D) -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" do row=1,myDim_nod2d+eDim_nod2D ! myDim is sufficient if (ulevels_nod2D(row)>1) cycle diff --git a/src/oce_tracer_mod.F90 b/src/oce_tracer_mod.F90 index e3ee059b0..67c9b7b26 100755 --- a/src/oce_tracer_mod.F90 +++ b/src/oce_tracer_mod.F90 @@ -2,14 +2,18 @@ MODULE o_tracers USE MOD_MESH USE MOD_TRACER +USE MOD_PARTIT IMPLICIT NONE interface - subroutine tracer_gradient_z(ttf, mesh) - use g_PARSUP, only: myDim_nod2D, eDim_nod2D - use mod_mesh - type(t_mesh), intent(in) , target :: mesh - real(kind=WP) :: ttf(mesh%nl-1,myDim_nod2D+eDim_nod2D) + subroutine tracer_gradient_z(ttf, partit, mesh) + USE MOD_MESH + USE MOD_TRACER + USE MOD_PARTIT + IMPLICIT NONE + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + real(kind=WP) :: ttf(mesh%nl-1,partit%myDim_nod2D+partit%eDim_nod2D) end subroutine end interface @@ -17,20 +21,25 @@ subroutine tracer_gradient_z(ttf, mesh) ! ! !======================================================================= -SUBROUTINE tracer_gradient_elements(ttf, mesh) - !computes elemental gradient of tracer - +SUBROUTINE tracer_gradient_elements(ttf, partit, mesh) + !computes elemental gradient of tracer + USE MOD_MESH + USE MOD_PARTIT + USE MOD_TRACER USE o_PARAM USE o_ARRAYS - USE g_PARSUP IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh - real(kind=WP) :: ttf(mesh%nl-1,myDim_nod2D+eDim_nod2D) - integer :: elem, elnodes(3) - integer :: n, nz, nzmin, nzmax + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + real(kind=WP) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + integer :: elem, elnodes(3) + integer :: n, nz, nzmin, nzmax -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" DO elem=1, myDim_elem2D elnodes=elem2D_nodes(:,elem) @@ -46,16 +55,17 @@ END SUBROUTINE tracer_gradient_elements ! ! !======================================================================================== -SUBROUTINE init_tracers_AB(tr_num, tracers, mesh) +SUBROUTINE init_tracers_AB(tr_num, tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_TRACER use g_config, only: flag_debug - use g_parsup use o_arrays use g_comm_auto - use mod_mesh - use mod_tracer IMPLICIT NONE integer, intent(in) :: tr_num - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers integer :: n,nz !filling work arrays @@ -64,40 +74,46 @@ SUBROUTINE init_tracers_AB(tr_num, tracers, mesh) !AB interpolation tracers%data(tr_num)%valuesAB(:,:)=-(0.5_WP+epsilon)*tracers%data(tr_num)%valuesAB(:,:)+(1.5_WP+epsilon)*tracers%data(tr_num)%values(:,:) - if (flag_debug .and. mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_elements'//achar(27)//'[0m' - call tracer_gradient_elements(tracers%data(tr_num)%valuesAB, mesh) - call exchange_elem_begin(tr_xy) + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_elements'//achar(27)//'[0m' + call tracer_gradient_elements(tracers%data(tr_num)%valuesAB, partit, mesh) + call exchange_elem_begin(tr_xy, partit) - if (flag_debug .and. mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_z'//achar(27)//'[0m' - call tracer_gradient_z(tracers%data(tr_num)%values, mesh) !WHY NOT AB HERE? DSIDOREN! - call exchange_elem_end() ! tr_xy used in fill_up_dn_grad - call exchange_nod_begin(tr_z) ! not used in fill_up_dn_grad + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_z'//achar(27)//'[0m' + call tracer_gradient_z(tracers%data(tr_num)%values, partit, mesh) !WHY NOT AB HERE? DSIDOREN! + call exchange_elem_end(partit) ! tr_xy used in fill_up_dn_grad + call exchange_nod_begin(tr_z, partit) ! not used in fill_up_dn_grad - if (flag_debug .and. mype==0) print *, achar(27)//'[38m'//' --> call fill_up_dn_grad'//achar(27)//'[0m' - call fill_up_dn_grad(tracers%work, mesh) - call exchange_nod_end() ! tr_z halos should have arrived by now. + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call fill_up_dn_grad'//achar(27)//'[0m' + call fill_up_dn_grad(tracers%work, partit, mesh) + call exchange_nod_end(partit) ! tr_z halos should have arrived by now. - if (flag_debug .and. mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_elements'//achar(27)//'[0m' - call tracer_gradient_elements(tracers%data(tr_num)%values, mesh) !redefine tr_arr to the current timestep - call exchange_elem(tr_xy) + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_elements'//achar(27)//'[0m' + call tracer_gradient_elements(tracers%data(tr_num)%values, partit, mesh) !redefine tr_arr to the current timestep + call exchange_elem(tr_xy, partit) END SUBROUTINE init_tracers_AB ! ! !======================================================================================== -SUBROUTINE relax_to_clim(tr_num, tracers, mesh) +SUBROUTINE relax_to_clim(tr_num, tracers, partit, mesh) use g_config,only: dt - USE g_PARSUP use o_arrays + USE MOD_MESH + USE MOD_PARTIT + USE MOD_TRACER IMPLICIT NONE integer, intent(in) :: tr_num type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers integer :: n,nz, nzmin, nzmax real(kind=WP), dimension(:,:), pointer :: trarr -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" trarr=>tracers%data(tr_num)%values(:,:) if ((clim_relax>1.0e-8_WP).and.(tracers%data(tr_num)%ID==1)) then @@ -123,20 +139,25 @@ END MODULE o_tracers ! ! !======================================================================================== -SUBROUTINE tracer_gradient_z(ttf, mesh) +SUBROUTINE tracer_gradient_z(ttf, partit, mesh) !computes vertical gradient of tracer - USE o_PARAM USE MOD_MESH + USE MOD_PARTIT + USE MOD_TRACER + USE o_PARAM USE o_ARRAYS - USE g_PARSUP USE g_CONFIG IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh - real(kind=WP) :: ttf(mesh%nl-1,myDim_nod2D+eDim_nod2D) + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + real(kind=WP) :: ttf(mesh%nl-1,partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP) :: dz integer :: n, nz, nzmin, nzmax -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" DO n=1, myDim_nod2D+eDim_nod2D !!PS nlev=nlevels_nod2D(n) diff --git a/src/oce_vel_rhs_vinv.F90 b/src/oce_vel_rhs_vinv.F90 index 46881e065..1b5a1694b 100755 --- a/src/oce_vel_rhs_vinv.F90 +++ b/src/oce_vel_rhs_vinv.F90 @@ -1,8 +1,10 @@ module relative_vorticity_interface interface - subroutine relative_vorticity(mesh) + subroutine relative_vorticity(partit, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + use mod_partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module @@ -11,17 +13,20 @@ subroutine relative_vorticity(mesh) ! (curl u+f)\times u+grad(u^2/2)+w du/dz ! ! =================================================================== -subroutine relative_vorticity(mesh) +subroutine relative_vorticity(partit, mesh) USE o_ARRAYS USE MOD_MESH - USE g_PARSUP + USE MOD_PARTIT use g_comm_auto IMPLICIT NONE integer :: n, nz, el(2), enodes(2), nl1, nl2, edge, ul1, ul2, nl12, ul12 real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2, c1 - - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !!PS DO n=1,myDim_nod2D !!PS nl1 = nlevels_nod2D(n)-1 @@ -96,30 +101,35 @@ subroutine relative_vorticity(mesh) END DO END DO - call exchange_nod(vorticity) + call exchange_nod(vorticity, partit) ! Now it the relative vorticity known on neighbors too end subroutine relative_vorticity ! ========================================================================== -subroutine compute_vel_rhs_vinv(mesh) !vector invariant +subroutine compute_vel_rhs_vinv(partit, mesh) !vector invariant USE o_PARAM USE o_ARRAYS USE MOD_MESH - USE g_PARSUP + USE MOD_PARTIT USE g_CONFIG use g_comm_auto use relative_vorticity_interface IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: n, n1, nz, elem, elnodes(3), nl1, j, nzmin,nzmax real(kind=WP) :: a, b, c, da, db, dc, dg, ff(3), gg, eta(3), pre(3), Fx, Fy,w real(kind=WP) :: uvert(mesh%nl,2), umean, vmean, friction logical, save :: lfirst=.true. - real(kind=WP) :: KE_node(mesh%nl-1,myDim_nod2D+eDim_nod2D) + real(kind=WP) :: KE_node(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP) :: dZ_inv(2:mesh%nl-1), dzbar_inv(mesh%nl-1), elem_area_inv real(kind=WP) :: density0_inv = 1./density_0 -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + w = 0.0_WP uvert=0.0_WP @@ -165,7 +175,7 @@ subroutine compute_vel_rhs_vinv(mesh) !vector invariant endif end DO - call exchange_nod(KE_node) + call exchange_nod(KE_node, partit) ! Now gradients of KE will be correct on myDim_elem2D ! ================== @@ -182,7 +192,7 @@ subroutine compute_vel_rhs_vinv(mesh) !vector invariant END DO END DO - call relative_vorticity(mesh) + call relative_vorticity(partit, mesh) ! ==================== ! Sea level and pressure contribution -\nabla(g\eta +hpressure/rho_0+V^2/2) ! and the Coriolis force (elemental part) diff --git a/src/toy_channel_soufflet.F90 b/src/toy_channel_soufflet.F90 index 3f799eeca..fa64b9e13 100644 --- a/src/toy_channel_soufflet.F90 +++ b/src/toy_channel_soufflet.F90 @@ -1,9 +1,9 @@ MODULE Toy_Channel_Soufflet - use mod_mesh - use mod_tracer + USE MOD_MESH + USE MOD_PARTIT + USE MOD_TRACER USE o_ARRAYS USE o_PARAM - USE g_PARSUP USE g_config use g_comm_auto @@ -43,12 +43,16 @@ MODULE Toy_Channel_Soufflet ! !-------------------------------------------------------------------------------------------- ! -subroutine relax_zonal_vel(mesh) +subroutine relax_zonal_vel(partit, mesh) implicit none integer :: elem, nz, nn, nn1 real(kind=WP) :: a, yy, uzon type(t_mesh), intent(in), target :: mesh -#include "associate_mesh.h" + type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" DO elem=1, myDim_elem2D ! ======== @@ -72,18 +76,20 @@ subroutine relax_zonal_vel(mesh) UV_rhs(1,nz,elem) = UV_rhs(1,nz,elem)+dt*tau_inv*(Uclim(nz,elem)-Uzon) END DO END DO - call exchange_elem(UV_rhs) - + call exchange_elem(UV_rhs, partit) end subroutine relax_zonal_vel !========================================================================== -subroutine relax_zonal_temp(tdata, mesh) +subroutine relax_zonal_temp(tdata, partit, mesh) implicit none integer :: n, nz, nn, nn1 real(kind=WP) :: yy, a, Tzon type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit type(t_tracer_data), intent(inout), target :: tdata - -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" do n=1, myDim_nod2D+eDim_nod2D yy=coord_nod2D(2,n)-lat0 @@ -105,14 +111,17 @@ subroutine relax_zonal_temp(tdata, mesh) end do end subroutine relax_zonal_temp !========================================================================== -subroutine compute_zonal_mean_ini(mesh) +subroutine compute_zonal_mean_ini(partit, mesh) implicit none real(kind=8) :: ymean, Ly integer :: elem, nz, m, elnodes(3) - real(kind=8), allocatable :: zvel1D(:), znum1D(:) + real(kind=8), allocatable :: zvel1D(:), znum1D(:) type(t_mesh), intent(in), target :: mesh - -#include "associate_mesh.h" + type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" Ly=ysize/r_earth ! The meridional lenght in radians dy=Ly/real(nybins) @@ -161,15 +170,17 @@ subroutine compute_zonal_mean_ini(mesh) ! no division by 0 is occurring end subroutine compute_zonal_mean_ini !========================================================================== -subroutine compute_zonal_mean(tracers, mesh) +subroutine compute_zonal_mean(tracers, partit, mesh) implicit none integer :: elem, nz, m, elnodes(3) real(kind=8), allocatable :: zvel1D(:), znum1D(:) - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers - -#include "associate_mesh.h" - + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(inout), target :: tracers +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ztem=0. zvel=0. @@ -223,10 +234,11 @@ subroutine compute_zonal_mean(tracers, mesh) end subroutine compute_zonal_mean ! ==================================================================================== -subroutine initial_state_soufflet(tracers, mesh) +subroutine initial_state_soufflet(tracers, partit, mesh) ! Profiles Soufflet 2016 (OM) implicit none type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers integer :: n, nz, elnodes(3) @@ -234,7 +246,10 @@ subroutine initial_state_soufflet(tracers, mesh) ! real(kind=8) :: Ljet,rhomax,Sb, drho_No, drho_So ! real(kind=8) :: z_No, z_So,dz_No,dz_So, drhosurf_No, drhosurf_So, zsurf real(kind=8) :: d_No(mesh%nl-1), d_So(mesh%nl-1), rho_No(mesh%nl-1), rho_So(mesh%nl-1) -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" dy=ysize/nybins/r_earth @@ -332,20 +347,26 @@ subroutine initial_state_soufflet(tracers, mesh) UV(1,nz,n)=UV(1,nz+1,n)+d_No(nz+1)*(zbar(nz+1)-Z(nz+1))+d_No(nz)*(Z(nz)-zbar(nz+1)) END DO END DO - call exchange_elem(UV) + call exchange_elem(UV, partit) allocate(Uclim(nl-1,myDim_elem2D+eDim_elem2D)) Uclim=UV(1,:,:) write(*,*) mype, 'Vel', maxval(UV(1,:,:)), minval(UV(1,:,:)) END subroutine initial_state_soufflet ! =============================================================================== -subroutine energy_out_soufflet(mesh) +subroutine energy_out_soufflet(partit, mesh) implicit none real(kind=8) :: tke(2), aux(2), ww, wwaux integer :: elem, nz, m, elnodes(3), nybins - real(kind=8), allocatable :: zvel1D(:), znum1D(:) - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" + real(kind=8), allocatable :: zvel1D(:), znum1D(:) + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + nybins=100 zvel=0. diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index 68b7960cf..ffd6e86dc 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -1,34 +1,38 @@ module write_step_info_interface interface - subroutine write_step_info(istep,outfreq,tracers,mesh) + subroutine write_step_info(istep,outfreq,tracers,partit,mesh) use MOD_MESH + use MOD_PARTIT use MOD_TRACER - integer :: istep,outfreq - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers + integer :: istep,outfreq + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers end subroutine end interface end module module check_blowup_interface interface - subroutine check_blowup(istep, tracers, mesh) + subroutine check_blowup(istep, tracers,partit,mesh) use MOD_MESH + use MOD_PARTIT use MOD_TRACER - integer :: istep - type(t_tracer), intent(in), target :: tracers - type(t_mesh), intent(in), target :: mesh + integer :: istep + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers end subroutine end interface end module ! ! !=============================================================================== -subroutine write_step_info(istep, outfreq, tracers, mesh) +subroutine write_step_info(istep, outfreq, tracers, partit, mesh) use g_config, only: dt, use_ice use MOD_MESH + use MOD_PARTIT use MOD_TRACER use o_PARAM - use g_PARSUP use o_ARRAYS use i_ARRAYS use g_comm_auto @@ -45,9 +49,13 @@ subroutine write_step_info(istep, outfreq, tracers, mesh) max_cfl_z, max_pgfx, max_pgfy, max_kv, max_av real(kind=WP) :: int_deta , int_dhbar real(kind=WP) :: loc, loc_eta, loc_hbar, loc_deta, loc_dhbar, loc_wflux,loc_hflux, loc_temp, loc_salt - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers -#include "associate_mesh.h" + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" if (mod(istep,outfreq)==0) then !_______________________________________________________________________ @@ -231,12 +239,12 @@ end subroutine write_step_info ! ! !=============================================================================== -subroutine check_blowup(istep, tracers, mesh) +subroutine check_blowup(istep, tracers, partit, mesh) use g_config, only: logfile_outfreq, which_ALE use MOD_MESH use MOD_TRACER + use MOD_PARTIT use o_PARAM - use g_PARSUP use o_ARRAYS use i_ARRAYS use g_comm_auto @@ -246,11 +254,15 @@ subroutine check_blowup(istep, tracers, mesh) use write_step_info_interface implicit none - integer :: n, nz, istep, found_blowup_loc=0, found_blowup=0 - integer :: el, elidx - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers -#include "associate_mesh.h" + integer :: n, nz, istep, found_blowup_loc=0, found_blowup=0 + integer :: el, elidx + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! ! if (mod(istep,logfile_outfreq)==0) then ! ! if (mype==0) then @@ -493,7 +505,7 @@ subroutine check_blowup(istep, tracers, mesh) ! moment only over CPU mype==0 call MPI_AllREDUCE(found_blowup_loc , found_blowup , 1, MPI_INTEGER, MPI_MAX, MPI_COMM_FESOM, MPIerr) if (found_blowup==1) then - call write_step_info(istep,1,tracers,mesh) + call write_step_info(istep,1,tracers,partit,mesh) if (mype==0) then call sleep(1) write(*,*) @@ -513,7 +525,7 @@ subroutine check_blowup(istep, tracers, mesh) write(*,*) ' _____.,-#%&$@%#&#~,._____' write(*,*) end if - call blowup(istep, tracers, mesh) + call blowup(istep, tracers, partit, mesh) if (mype==0) write(*,*) ' --> finished writing blow up file' call par_ex endif From 0d6f70b9d2cab0a24565772d0a2490bc8f787ca2 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Thu, 7 Oct 2021 22:33:46 +0200 Subject: [PATCH 387/909] surprisingly but it runs...some problems with IO and threads but otherwise ...just works --- src/associate_mesh_ass.h | 1 + src/fvom_main.F90 | 23 ++++++++++++---------- src/io_meandata.F90 | 3 ++- src/oce_ale.F90 | 26 +++++++++++++------------ src/oce_mesh.F90 | 42 ++++++++++++++++++---------------------- 5 files changed, 49 insertions(+), 46 deletions(-) diff --git a/src/associate_mesh_ass.h b/src/associate_mesh_ass.h index 591aef4a4..ebeb51a51 100644 --- a/src/associate_mesh_ass.h +++ b/src/associate_mesh_ass.h @@ -4,6 +4,7 @@ edge2D => mesh%edge2D edge2D_in => mesh%edge2D_in ocean_area => mesh%ocean_area nl => mesh%nl +nn_size => mesh%nn_size coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%coord_nod2D geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D elem2D_nodes(1:3, 1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem2D_nodes diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index 8d9ff12d4..2b23fc667 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -118,7 +118,7 @@ program main ! and additional arrays needed for ! fancy advection etc. !===================== - call check_mesh_consistency(mesh) + call check_mesh_consistency(partit, mesh) if (mype==0) t2=MPI_Wtime() call tracer_init(tracers, partit, mesh) ! allocate array of ocean tracers (derived type "t_tracer") @@ -129,7 +129,7 @@ program main write(*,*) 'FESOM ocean_setup... complete' t3=MPI_Wtime() endif - call forcing_setup(mesh) + call forcing_setup(partit, mesh) if (mype==0) t4=MPI_Wtime() if (use_ice) then @@ -197,6 +197,7 @@ program main endif DUMP_DIR='DUMP/' + INQUIRE(file=trim(dump_dir), EXIST=L_EXISTS) if (.not. L_EXISTS) call system('mkdir '//trim(dump_dir)) write (dump_filename, "(A7,I7.7)") "t_mesh.", mype @@ -217,8 +218,8 @@ program main ! read (mype+300) tracers_copy ! close (mype+300) -call par_ex(partit) -stop +!call par_ex(partit) +!stop ! ! if (mype==10) write(,) mesh1%ssh_stiff%values-mesh%ssh_stiff%value @@ -249,7 +250,6 @@ program main if (use_global_tides) then call foreph_ini(yearnew, month, partit) end if - do n=1, nsteps if (use_global_tides) then call foreph(partit, mesh) @@ -264,10 +264,9 @@ program main #if defined (__oifs) || defined (__oasis) seconds_til_now=INT(dt)*(n-1) #endif - call clock - + call clock !___compute horizontal velocity on nodes (originaly on elements)________ - call compute_vel_nodes(mesh) + call compute_vel_nodes(partit, mesh) !___model sea-ice step__________________________________________________ t1 = MPI_Wtime() if(use_ice) then @@ -299,15 +298,19 @@ program main t2 = MPI_Wtime() !___model ocean step____________________________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call oce_timestep_ale'//achar(27)//'[0m' + call oce_timestep_ale(n, tracers, partit, mesh) + t3 = MPI_Wtime() !___compute energy diagnostics..._______________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call compute_diagnostics(1)'//achar(27)//'[0m' call compute_diagnostics(1, tracers, partit, mesh) + t4 = MPI_Wtime() !___prepare output______________________________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call output (n)'//achar(27)//'[0m' call output (n, tracers, partit, mesh) + t5 = MPI_Wtime() call restart(n, .false., .false., tracers, partit, mesh) t6 = MPI_Wtime() @@ -320,7 +323,7 @@ program main end do call finalize_output() - + !___FINISH MODEL RUN________________________________________________________ call MPI_Barrier(MPI_COMM_FESOM, MPIERR) @@ -329,7 +332,7 @@ program main runtime_alltimesteps = real(t1-t0,real32) write(*,*) 'FESOM Run is finished, updating clock' endif - + mean_rtime(1) = rtime_oce mean_rtime(2) = rtime_oce_mixpres mean_rtime(3) = rtime_oce_dyn diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 98ac5b228..fe287b3c5 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -770,7 +770,6 @@ subroutine write_mean(entry, entry_index) end if end do end if - end subroutine @@ -927,7 +926,9 @@ subroutine do_output_callback(entry_index) entry%mypartit%mype=entry%mype_workaround ! for the thread callback, copy back the value of our mype as a workaround for errors with the cray envinronment (at least with ftn 2.5.9 and cray-mpich 7.5.3) call write_mean(entry, entry_index) +write(*,*) 1111111 if(entry%mypartit%mype == entry%root_rank) call assert_nf( nf_sync(entry%ncid), __LINE__ ) ! flush the file to disk after each write +write(*,*) 2222222 end subroutine diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 95c70702c..c3161c063 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2685,7 +2685,7 @@ subroutine oce_timestep_ale(n, tracers, partit, mesh) #include "associate_mesh_ass.h" t0=MPI_Wtime() - + ! water_flux = 0.0_WP ! heat_flux = 0.0_WP ! stress_surf= 0.0_WP @@ -2695,7 +2695,7 @@ subroutine oce_timestep_ale(n, tracers, partit, mesh) ! calculate equation of state, density, pressure and mixed layer depths if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call pressure_bv'//achar(27)//'[0m' call pressure_bv(tracers, partit, mesh) !!!!! HeRE change is made. It is linear EoS now. - + !___________________________________________________________________________ ! calculate calculate pressure gradient force if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call pressure_force_4_...'//achar(27)//'[0m' @@ -2704,18 +2704,19 @@ subroutine oce_timestep_ale(n, tracers, partit, mesh) else call pressure_force_4_zxxxx(tracers, partit, mesh) end if + !___________________________________________________________________________ ! calculate alpha and beta ! it will be used for KPP, Redi, GM etc. Shall we keep it on in general case? call sw_alpha_beta(tracers%data(1)%values, tracers%data(2)%values, partit, mesh) - + ! computes the xy gradient of a neutral surface; will be used by Redi, GM etc. call compute_sigma_xy(tracers%data(1)%values,tracers%data(2)%values, partit, mesh) - + ! compute both: neutral slope and tapered neutral slope. Can be later combined with compute_sigma_xy ! will be primarily used for computing Redi diffusivities. etc? - call compute_neutral_slope(partit, partit, mesh) - + call compute_neutral_slope(partit, mesh) + !___________________________________________________________________________ ! call status_check(partit) !___________________________________________________________________________ @@ -2740,7 +2741,7 @@ subroutine oce_timestep_ale(n, tracers, partit, mesh) if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call calc_cvmix_idemix'//achar(27)//'[0m' call calc_cvmix_idemix(partit, mesh) end if - + !___MAIN MIXING SCHEMES_____________________________________________________ ! use FESOM2.0 tuned k-profile parameterization for vertical mixing if (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) then @@ -2780,7 +2781,7 @@ subroutine oce_timestep_ale(n, tracers, partit, mesh) call mo_convect(partit, mesh) end if - + !___EXTENSION OF MIXING SCHEMES_____________________________________________ ! add CVMIX TIDAL mixing scheme of Simmons et al. 2004 "Tidally driven mixing ! in a numerical model of the ocean general circulation", ocean modelling to @@ -2802,7 +2803,7 @@ subroutine oce_timestep_ale(n, tracers, partit, mesh) !!PS if (any(ssh_rhs/=ssh_rhs)) write(*,*) n, mype,' --> found NaN ssh_rhs before compute_vel_rhs' !!PS if (any(ssh_rhs_old/=ssh_rhs_old)) write(*,*) n, mype,' --> found NaN ssh_rhs_old before compute_vel_rhs' !!PS if (any(abs(Wvel_e)>1.0e20)) write(*,*) n, mype,' --> found Inf Wvel_e before compute_vel_rhs' - + if(mom_adv/=3) then call compute_vel_rhs(partit, mesh) else @@ -2827,7 +2828,7 @@ subroutine oce_timestep_ale(n, tracers, partit, mesh) ! ssh_rhs=-alpha*\nabla\int(U_n+U_rhs)dz-(1-alpha)*... ! see "FESOM2: from finite elements to finte volumes, S. Danilov..." eq. (18) rhs call compute_ssh_rhs_ale(partit, mesh) - + ! Take updated ssh matrix and solve --> new ssh! t30=MPI_Wtime() call solve_ssh_ale(partit, mesh) @@ -2846,7 +2847,7 @@ subroutine oce_timestep_ale(n, tracers, partit, mesh) ! Update to hbar(n+3/2) and compute dhe to be used on the next step if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call compute_hbar_ale'//achar(27)//'[0m' call compute_hbar_ale(partit, mesh) - + !___________________________________________________________________________ ! - Current dynamic elevation alpha*hbar(n+1/2)+(1-alpha)*hbar(n-1/2) ! equation (14) Danlov et.al "the finite volume sea ice ocean model FESOM2 @@ -2882,7 +2883,7 @@ subroutine oce_timestep_ale(n, tracers, partit, mesh) if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call vert_vel_ale'//achar(27)//'[0m' call vert_vel_ale(partit, mesh) t7=MPI_Wtime() - + !___________________________________________________________________________ ! solve tracer equation if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call solve_tracers_ale'//achar(27)//'[0m' @@ -2902,6 +2903,7 @@ subroutine oce_timestep_ale(n, tracers, partit, mesh) ! togeather around 2.5% of model runtime call check_blowup(n, tracers, partit, mesh) t10=MPI_Wtime() + !___________________________________________________________________________ ! write out execution times for ocean step parts rtime_oce = rtime_oce + (t10-t0)-(t10-t9) diff --git a/src/oce_mesh.F90 b/src/oce_mesh.F90 index f063ba2d3..932a6ccd4 100755 --- a/src/oce_mesh.F90 +++ b/src/oce_mesh.F90 @@ -144,16 +144,9 @@ SUBROUTINE mesh_setup(partit, mesh) type(t_mesh), intent(inout) :: mesh type(t_partit), intent(inout), target :: partit - -write(*,*) 'CP 1' call set_mesh_transform_matrix !(rotated grid) call read_mesh(partit, mesh) call set_par_support(partit, mesh) -!!PS call find_levels(partit, mesh) -!!PS -!!PS if (use_cavity) call find_levels_cavity(partit, mesh) -!!PS -write(*,*) 'CP 2' call test_tri(partit, mesh) call load_edges(partit, mesh) call find_neighbors(partit, mesh) @@ -190,15 +183,18 @@ SUBROUTINE read_mesh(partit, mesh) character(len=MAX_PATH) :: dist_mesh_dir integer :: flag_wrongaux3d=0 integer :: ierror ! return error code - integer, allocatable, dimension(:) :: mapping - integer, allocatable, dimension(:,:) :: ibuff + integer, allocatable, dimension(:) :: mapping + integer, allocatable, dimension(:,:) :: ibuff real(kind=WP), allocatable, dimension(:,:) :: rbuff - integer, allocatable, dimension(:,:) :: auxbuff ! will be used for reading aux3d.out - integer fileunit, iostat - character(32) mesh_checksum + integer, allocatable, dimension(:,:) :: auxbuff ! will be used for reading aux3d.out + integer :: fileunit, iostat + character(32) :: mesh_checksum #include "associate_part_def.h" #include "associate_mesh_def.h" +mype=>partit%mype +npes=>partit%npes +MPI_COMM_FESOM=>partit%MPI_COMM_FESOM !mesh related files will be read in chunks of chunk_size chunk_size=100000 @@ -208,14 +204,12 @@ SUBROUTINE read_mesh(partit, mesh) !============================== allocate(mapping(chunk_size)) allocate(ibuff(chunk_size,4), rbuff(chunk_size,3)) - mapping=0 !============================== t0=MPI_Wtime() write(mype_string,'(i5.5)') mype - write(npes_string,"(I10)") npes + write(npes_string,"(I10)") npes dist_mesh_dir=trim(meshpath)//'dist_'//trim(ADJUSTL(npes_string))//'/' - !======================= ! rank partitioning vector ! will be read by 0 proc @@ -224,7 +218,8 @@ SUBROUTINE read_mesh(partit, mesh) file_name=trim(dist_mesh_dir)//'rpart.out' fileID=10 open(fileID, file=trim(file_name)) - allocate(part(npes+1)) + allocate(partit%part(npes+1)) + part=>partit%part read(fileID,*) n error_status=0 if (n/=npes) error_status=1 !set the error status for consistency in rpart @@ -240,16 +235,16 @@ SUBROUTINE read_mesh(partit, mesh) if (error_status/=0) then write(*,*) n write(*,*) 'error: NPES does not coincide with that of the mesh' - call par_ex(1) + call par_ex(partit, 1) STOP end if ! broadcasting partitioning vector to the other procs if (mype/=0) then - allocate(part(npes+1)) + allocate(partit%part(npes+1)) + part=>partit%part end if call MPI_BCast(part, npes+1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) if (mype==0) write(*,*) mype,'rpart is read' - !=========================== ! Lists of nodes and elements in global indexing. ! every proc reads its file @@ -264,13 +259,13 @@ SUBROUTINE read_mesh(partit, mesh) read(fileID,*) partit%myDim_nod2D read(fileID,*) partit%eDim_nod2D allocate(partit%myList_nod2D(partit%myDim_nod2D+partit%eDim_nod2D)) - read(fileID,*) myList_nod2D + read(fileID,*) partit%myList_nod2D read(fileID,*) partit%myDim_elem2D read(fileID,*) partit%eDim_elem2D read(fileID,*) partit%eXDim_elem2D allocate(partit%myList_elem2D(partit%myDim_elem2D+partit%eDim_elem2D+partit%eXDim_elem2D)) - read(fileID,*) myList_elem2D + read(fileID,*) partit%myList_elem2D read(fileID,*) partit%myDim_edge2D read(fileID,*) partit%eDim_edge2D @@ -301,7 +296,7 @@ SUBROUTINE read_mesh(partit, mesh) if (error_status/=0) then write(*,*) n write(*,*) 'error: nod2D/=part(npes+1)-1' - call par_ex(1) + call par_ex(partit, 1) STOP end if @@ -604,7 +599,8 @@ SUBROUTINE read_mesh(partit, mesh) read(fileID,*) com_nod2D%rPE(1:com_nod2D%rPEnum) !!$ ALLOCATE(com_nod2D%rptr(com_nod2D%rPEnum+1)) read(fileID,*) com_nod2D%rptr(1:com_nod2D%rPEnum+1) - ALLOCATE(com_nod2D%rlist(eDim_nod2D)) + ALLOCATE(partit%com_nod2D%rlist(eDim_nod2D)) + read(fileID,*) com_nod2D%rlist read(fileID,*) com_nod2D%sPEnum From 511082541719d39efd51e71aa7236b747f9c1406 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Fri, 8 Oct 2021 10:03:23 +0200 Subject: [PATCH 388/909] status_check has been moved into gen_partitioning.F90 --- src/fvom_main.F90 | 2 +- src/gen_modules_partitioning.F90 | 30 +++++++++++++++++++----------- src/io_meandata.F90 | 31 +++++++++++++++---------------- src/oce_ale.F90 | 2 +- src/oce_mesh.F90 | 1 + 5 files changed, 37 insertions(+), 29 deletions(-) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index 2b23fc667..90464e4c5 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -31,7 +31,7 @@ program main use update_atm_forcing_interface use before_oce_step_interface use oce_timestep_ale_interface -use par_support_interfaces +!use par_support_interfaces use read_mesh_interface use fesom_version_info_module use command_line_options_module diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index cc7d3c080..3e1b8dc21 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -22,11 +22,9 @@ subroutine set_par_support(partit, mesh) type(t_mesh), intent(in), target :: mesh end subroutine - subroutine init_gatherLists(partit, mesh) - USE MOD_MESH + subroutine init_gatherLists(partit) USE MOD_PARTIT implicit none - type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine end interface @@ -118,6 +116,7 @@ subroutine par_ex(partit, abort) ! finalizes MPI end subroutine par_ex !======================================================================= subroutine set_par_support(partit, mesh) + use par_support_interfaces use MOD_MESH use MOD_PARTIT implicit none @@ -441,24 +440,18 @@ subroutine set_par_support(partit, mesh) endif - call init_gatherLists(partit, mesh) + call init_gatherLists(partit) if(mype==0) write(*,*) 'Communication arrays are set' end subroutine set_par_support - - !=================================================================== -subroutine init_gatherLists(partit, mesh) - USE MOD_MESH +subroutine init_gatherLists(partit) USE MOD_PARTIT implicit none - type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit integer :: n2D, e2D, sum_loc_elem2D integer :: n, estart, nstart #include "associate_part_def.h" -#include "associate_mesh_def.h" #include "associate_part_ass.h" -#include "associate_mesh_ass.h" if (mype==0) then if (npes > 1) then @@ -506,3 +499,18 @@ subroutine init_gatherLists(partit, mesh) endif end subroutine init_gatherLists +!=================================================================== +subroutine status_check(partit) +use g_config +use mod_partit +implicit none +type(t_partit), intent(in), target :: partit +integer :: res +res=0 +call MPI_Allreduce (partit%pe_status, res, 1, MPI_INTEGER, MPI_SUM, partit%MPI_COMM_FESOM, partit%MPIerr) +if (res /= 0 ) then + if (partit%mype==0) write(*,*) 'Something Broke. Flushing and stopping...' + call par_ex(partit, 1) +endif +end subroutine status_check + diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index fe287b3c5..e13de9b38 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -16,7 +16,7 @@ module io_MEANDATA type Meandata private - type(t_partit), pointer :: mypartit + type(t_partit), pointer :: p_partit integer :: ndim integer :: glsize(2) integer :: accuracy @@ -711,7 +711,6 @@ subroutine assoc_ids(entry) ! subroutine write_mean(entry, entry_index) use mod_mesh - use mod_partit use io_gather_module implicit none type(Meandata), intent(inout) :: entry @@ -722,7 +721,7 @@ subroutine write_mean(entry, entry_index) ! Serial output implemented so far - if (entry%mypartit%mype==entry%root_rank) then + if (entry%p_partit%mype==entry%root_rank) then write(*,*) 'writing mean record for ', trim(entry%name), '; rec. count = ', entry%rec_count call assert_nf( nf_put_vara_double(entry%ncid, entry%Tid, entry%rec_count, 1, entry%ctime_copy, 1), __LINE__) end if @@ -732,16 +731,16 @@ subroutine write_mean(entry, entry_index) tag = 2 ! we can use a fixed tag here as we have an individual communicator for each output field !___________writing 8 byte real_________________________________________ if (entry%accuracy == i_real8) then - if(entry%mypartit%mype==entry%root_rank) then + if(entry%p_partit%mype==entry%root_rank) then if(.not. allocated(entry%aux_r8)) allocate(entry%aux_r8(size2)) end if do lev=1, size1 if(.not. entry%is_elem_based) then - call gather_nod2D (entry%local_values_r8_copy(lev,1:size(entry%local_values_r8_copy,dim=2)), entry%aux_r8, entry%root_rank, tag, entry%comm, entry%mypartit) + call gather_nod2D (entry%local_values_r8_copy(lev,1:size(entry%local_values_r8_copy,dim=2)), entry%aux_r8, entry%root_rank, tag, entry%comm, entry%p_partit) else - call gather_elem2D(entry%local_values_r8_copy(lev,1:size(entry%local_values_r8_copy,dim=2)), entry%aux_r8, entry%root_rank, tag, entry%comm, entry%mypartit) + call gather_elem2D(entry%local_values_r8_copy(lev,1:size(entry%local_values_r8_copy,dim=2)), entry%aux_r8, entry%root_rank, tag, entry%comm, entry%p_partit) end if - if (entry%mypartit%mype==entry%root_rank) then + if (entry%p_partit%mype==entry%root_rank) then if (entry%ndim==1) then call assert_nf( nf_put_vara_double(entry%ncid, entry%varID, (/1, entry%rec_count/), (/size2, 1/), entry%aux_r8, 1), __LINE__) elseif (entry%ndim==2) then @@ -752,16 +751,16 @@ subroutine write_mean(entry, entry_index) !___________writing 4 byte real _________________________________________ else if (entry%accuracy == i_real4) then - if(entry%mypartit%mype==entry%root_rank) then + if(entry%p_partit%mype==entry%root_rank) then if(.not. allocated(entry%aux_r4)) allocate(entry%aux_r4(size2)) end if do lev=1, size1 if(.not. entry%is_elem_based) then - call gather_real4_nod2D (entry%local_values_r4_copy(lev,1:size(entry%local_values_r4_copy,dim=2)), entry%aux_r4, entry%root_rank, tag, entry%comm, entry%mypartit) + call gather_real4_nod2D (entry%local_values_r4_copy(lev,1:size(entry%local_values_r4_copy,dim=2)), entry%aux_r4, entry%root_rank, tag, entry%comm, entry%p_partit) else - call gather_real4_elem2D(entry%local_values_r4_copy(lev,1:size(entry%local_values_r4_copy,dim=2)), entry%aux_r4, entry%root_rank, tag, entry%comm, entry%mypartit) + call gather_real4_elem2D(entry%local_values_r4_copy(lev,1:size(entry%local_values_r4_copy,dim=2)), entry%aux_r4, entry%root_rank, tag, entry%comm, entry%p_partit) end if - if (entry%mypartit%mype==entry%root_rank) then + if (entry%p_partit%mype==entry%root_rank) then if (entry%ndim==1) then call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/1, entry%rec_count/), (/size2, 1/), entry%aux_r4, 1), __LINE__) elseif (entry%ndim==2) then @@ -923,12 +922,12 @@ subroutine do_output_callback(entry_index) entry=>io_stream(entry_index) - entry%mypartit%mype=entry%mype_workaround ! for the thread callback, copy back the value of our mype as a workaround for errors with the cray envinronment (at least with ftn 2.5.9 and cray-mpich 7.5.3) + entry%p_partit%mype=entry%mype_workaround ! for the thread callback, copy back the value of our mype as a workaround for errors with the cray envinronment (at least with ftn 2.5.9 and cray-mpich 7.5.3) call write_mean(entry, entry_index) -write(*,*) 1111111 - if(entry%mypartit%mype == entry%root_rank) call assert_nf( nf_sync(entry%ncid), __LINE__ ) ! flush the file to disk after each write -write(*,*) 2222222 +write(*,*) 1111111, entry%p_partit%mype, entry%root_rank + if(entry%p_partit%mype == entry%root_rank) call assert_nf( nf_sync(entry%ncid), __LINE__ ) ! flush the file to disk after each write +write(*,*) 2222222, entry%p_partit%mype, entry%root_rank end subroutine @@ -1159,7 +1158,7 @@ subroutine def_stream_after_dimension_specific(entry, name, description, units, if(provided_mpi_thread_support_level < MPI_THREAD_MULTIPLE) call entry%thread%disable_async() entry%mype_workaround = partit%mype ! make a copy of the mype variable as there is an error with the cray compiler or environment which voids the global mype for our threads - entry%mypartit=>partit + entry%p_partit=>partit end subroutine diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index c3161c063..d387993cd 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2718,7 +2718,7 @@ subroutine oce_timestep_ale(n, tracers, partit, mesh) call compute_neutral_slope(partit, mesh) !___________________________________________________________________________ - ! call status_check(partit) + call status_check(partit) !___________________________________________________________________________ ! >>>>>> <<<<<< ! >>>>>> calculate vertical mixing coefficients for tracer (Kv) <<<<<< diff --git a/src/oce_mesh.F90 b/src/oce_mesh.F90 index 932a6ccd4..9cb698524 100755 --- a/src/oce_mesh.F90 +++ b/src/oce_mesh.F90 @@ -140,6 +140,7 @@ SUBROUTINE mesh_setup(partit, mesh) use find_levels_min_e2n_interface use find_neighbors_interface use mesh_areas_interface +use par_support_interfaces IMPLICIT NONE type(t_mesh), intent(inout) :: mesh type(t_partit), intent(inout), target :: partit From f071b73bd4d1119202fbae199a7788dfcefd4136 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Fri, 8 Oct 2021 10:30:45 +0200 Subject: [PATCH 389/909] is not compiling with gfortran, par_ex needs an interface --- src/fvom_main.F90 | 1 - src/gen_comm.F90 | 16 ---------------- src/gen_modules_partitioning.F90 | 21 +++++++++++++-------- src/oce_mesh.F90 | 4 +++- 4 files changed, 16 insertions(+), 26 deletions(-) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index 90464e4c5..f0b99e732 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -31,7 +31,6 @@ program main use update_atm_forcing_interface use before_oce_step_interface use oce_timestep_ale_interface -!use par_support_interfaces use read_mesh_interface use fesom_version_info_module use command_line_options_module diff --git a/src/gen_comm.F90 b/src/gen_comm.F90 index 1baab3fcb..6024e9345 100755 --- a/src/gen_comm.F90 +++ b/src/gen_comm.F90 @@ -654,19 +654,3 @@ subroutine mymesh(partit, mesh) ! shared edges which mype updates end subroutine mymesh !================================================================= -#ifndef FVOM_INIT -subroutine status_check(partit) -use g_config -use mod_partit -implicit none -type(t_partit), intent(in), target :: partit -integer :: res -res=0 -call MPI_Allreduce (partit%pe_status, res, 1, MPI_INTEGER, MPI_SUM, partit%MPI_COMM_FESOM, partit%MPIerr) -if (res /= 0 ) then - if (partit%mype==0) write(*,*) 'Something Broke. Flushing and stopping...' -!!! a restart file must be written here !!! - call par_ex(partit, 1) -endif -end subroutine status_check -#endif diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index 3e1b8dc21..c7471bc25 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -14,7 +14,7 @@ subroutine par_ex(partit, abort) integer,optional :: abort end subroutine - subroutine set_par_support(partit, mesh) + subroutine init_mpi_types(partit, mesh) use MOD_MESH use MOD_PARTIT implicit none @@ -30,6 +30,16 @@ subroutine init_gatherLists(partit) end interface end module +module mod_parsup + interface + subroutine par_ex(partit, abort) + USE MOD_PARTIT + implicit none + type(t_partit), intent(inout), target :: partit + integer,optional :: abort + end subroutine +end module mod_parsup + subroutine par_init(partit) ! initializes MPI USE o_PARAM USE MOD_PARTIT @@ -115,8 +125,7 @@ subroutine par_ex(partit, abort) ! finalizes MPI end subroutine par_ex !======================================================================= -subroutine set_par_support(partit, mesh) - use par_support_interfaces +subroutine init_mpi_types(partit, mesh) use MOD_MESH use MOD_PARTIT implicit none @@ -437,12 +446,8 @@ subroutine set_par_support(partit, mesh) deallocate(blocklen, displace) deallocate(blocklen_tmp, displace_tmp) - endif - - call init_gatherLists(partit) - if(mype==0) write(*,*) 'Communication arrays are set' -end subroutine set_par_support +end subroutine init_mpi_types !=================================================================== subroutine init_gatherLists(partit) USE MOD_PARTIT diff --git a/src/oce_mesh.F90 b/src/oce_mesh.F90 index 9cb698524..b4146b117 100755 --- a/src/oce_mesh.F90 +++ b/src/oce_mesh.F90 @@ -147,7 +147,9 @@ SUBROUTINE mesh_setup(partit, mesh) call set_mesh_transform_matrix !(rotated grid) call read_mesh(partit, mesh) - call set_par_support(partit, mesh) + call init_mpi_types(partit, mesh) + call init_gatherLists(partit) + if(mype==0) write(*,*) 'Communication arrays are set' call test_tri(partit, mesh) call load_edges(partit, mesh) call find_neighbors(partit, mesh) From ff2cb5971296dd687ba1cc6de4e6abd587f988bd Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Fri, 8 Oct 2021 10:40:56 +0200 Subject: [PATCH 390/909] changing case for all USE MOD_PARTIT --- src/cavity_param.F90 | 14 ++--- src/cpl_driver.F90 | 6 +- src/gen_bulk_formulae.F90 | 2 +- src/gen_comm.F90 | 6 +- src/gen_events.F90 | 2 +- src/gen_forcing_couple.F90 | 20 +++--- src/gen_forcing_init.F90 | 6 +- src/gen_halo_exchange.F90 | 98 ++++++++++++++--------------- src/gen_interpolation.F90 | 6 +- src/gen_model_setup.F90 | 4 +- src/gen_modules_clock.F90 | 2 +- src/gen_modules_cvmix_idemix.F90 | 2 +- src/gen_modules_cvmix_kpp.F90 | 2 +- src/gen_modules_cvmix_pp.F90 | 2 +- src/gen_modules_cvmix_tidal.F90 | 2 +- src/gen_modules_cvmix_tke.F90 | 2 +- src/gen_modules_diag.F90 | 2 +- src/gen_modules_partitioning.F90 | 6 +- src/gen_support.F90 | 6 +- src/ice_fct.F90 | 26 ++++---- src/ice_maEVP.F90 | 26 ++++---- src/ice_oce_coupling.F90 | 10 +-- src/ice_setup_step.F90 | 14 ++--- src/ice_thermo_oce.F90 | 4 +- src/io_gather.F90 | 14 ++--- src/io_meandata.F90 | 18 +++--- src/io_netcdf_workaround_module.F90 | 2 +- src/io_restart.F90 | 2 +- src/oce_adv_tra_driver.F90 | 8 +-- src/oce_adv_tra_fct.F90 | 8 +-- src/oce_adv_tra_hor.F90 | 12 ++-- src/oce_adv_tra_ver.F90 | 20 +++--- src/oce_ale.F90 | 52 +++++++-------- src/oce_ale_pressure_bv.F90 | 38 +++++------ src/oce_ale_tracer.F90 | 38 +++++------ src/oce_ale_vel_rhs.F90 | 4 +- src/oce_dyn.F90 | 20 +++--- src/oce_local.F90 | 4 +- src/oce_mesh.F90 | 24 +++---- src/oce_muscl_adv.F90 | 4 +- src/oce_spp.F90 | 4 +- src/oce_vel_rhs_vinv.F90 | 2 +- src/write_step_info.F90 | 8 +-- 43 files changed, 276 insertions(+), 276 deletions(-) diff --git a/src/cavity_param.F90 b/src/cavity_param.F90 index 24c91bfb5..c0334fe2c 100644 --- a/src/cavity_param.F90 +++ b/src/cavity_param.F90 @@ -2,7 +2,7 @@ module cavity_heat_water_fluxes_3eq_interface interface subroutine cavity_heat_water_fluxes_3eq(tracers, partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT use mod_tracer type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh @@ -19,7 +19,7 @@ subroutine cavity_heat_water_fluxes_3eq(tracers, partit, mesh) ! cavity line point to that point --> use their coordinates and depth subroutine compute_nrst_pnt2cavline(partit, mesh) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use o_PARAM , only: WP implicit none @@ -137,7 +137,7 @@ end subroutine compute_nrst_pnt2cavline ! adapted by P. SCholz for FESOM2.0 subroutine cavity_heat_water_fluxes_3eq(tracers, partit, mesh) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use MOD_TRACER use o_PARAM , only: density_0, WP use o_ARRAYS, only: heat_flux, water_flux, Unode, density_m_rho0,density_ref @@ -326,7 +326,7 @@ end subroutine cavity_heat_water_fluxes_3eq ! Reviewed by Qiang Wang subroutine cavity_heat_water_fluxes_2eq(tracers, partit, mesh) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use MOD_TRACER use o_PARAM , only: WP use o_ARRAYS, only: heat_flux, water_flux @@ -380,7 +380,7 @@ end subroutine cavity_heat_water_fluxes_2eq ! Moved to this separated routine by Qiang, 20.1.2012 subroutine cavity_momentum_fluxes(partit, mesh) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use o_PARAM , only: density_0, C_d, WP use o_ARRAYS, only: UV, Unode, stress_surf, stress_node_surf use i_ARRAYS, only: u_w, v_w @@ -429,7 +429,7 @@ end subroutine cavity_momentum_fluxes !_______________________________________________________________________________ subroutine cavity_ice_clean_vel(partit, mesh) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use i_ARRAYS, only: U_ice, V_ice implicit none type(t_partit), intent(inout), target :: partit @@ -453,7 +453,7 @@ end subroutine cavity_ice_clean_vel !_______________________________________________________________________________ subroutine cavity_ice_clean_ma(partit, mesh) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use i_ARRAYS, only: m_ice, m_snow, a_ice implicit none type(t_partit), intent(inout), target :: partit diff --git a/src/cpl_driver.F90 b/src/cpl_driver.F90 index ccefeb7f8..3eb6e6ec4 100755 --- a/src/cpl_driver.F90 +++ b/src/cpl_driver.F90 @@ -165,7 +165,7 @@ subroutine cpl_oasis3mct_define_unstr(partit, mesh) use mod_oasis_method, ONLY: oasis_get_debug, oasis_set_debug #endif use mod_mesh - use mod_partit + USE MOD_PARTIT use g_rotate_grid use mod_oasis, only: oasis_write_area, oasis_write_mask implicit none @@ -509,7 +509,7 @@ end subroutine cpl_oasis3mct_define_unstr subroutine cpl_oasis3mct_send(ind, data_array, action, partit) use o_param - use MOD_PARTIT + USE MOD_PARTIT implicit none save !--------------------------------------------------------------------- @@ -579,7 +579,7 @@ end subroutine cpl_oasis3mct_send subroutine cpl_oasis3mct_recv(ind, data_array, action, partit) use o_param use g_comm_auto - use MOD_PARTIT + USE MOD_PARTIT implicit none save !--------------------------------------------------------------------- diff --git a/src/gen_bulk_formulae.F90 b/src/gen_bulk_formulae.F90 index 61535feb3..2472ac322 100755 --- a/src/gen_bulk_formulae.F90 +++ b/src/gen_bulk_formulae.F90 @@ -1,7 +1,7 @@ MODULE gen_bulk ! Compute heat and momentum exchange coefficients use mod_mesh - use mod_partit + USE MOD_PARTIT use i_therm_param use i_arrays use g_forcing_arrays diff --git a/src/gen_comm.F90 b/src/gen_comm.F90 index 6024e9345..0447d5ac6 100755 --- a/src/gen_comm.F90 +++ b/src/gen_comm.F90 @@ -7,7 +7,7 @@ !======================================================================= subroutine communication_nodn(partit, mesh) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT implicit none type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit @@ -218,7 +218,7 @@ end subroutine communication_nodn !========================================================================== subroutine communication_elemn(partit, mesh) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT implicit none type(t_mesh), intent(in), target :: mesh @@ -526,7 +526,7 @@ end subroutine communication_elemn !========================================================================== subroutine mymesh(partit, mesh) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT implicit none type(t_mesh), intent(in), target :: mesh diff --git a/src/gen_events.F90 b/src/gen_events.F90 index 977a386eb..ab037ef25 100644 --- a/src/gen_events.F90 +++ b/src/gen_events.F90 @@ -91,7 +91,7 @@ end subroutine step_event !-------------------------------------------------------------------------------------------- ! subroutine handle_err(errcode, partit) - use mod_partit + USE MOD_PARTIT implicit none #include "netcdf.inc" diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index 682685162..24737a40d 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -2,7 +2,7 @@ module force_flux_consv_interface interface subroutine force_flux_consv(field2d, mask, n, h, do_stats, partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit real(kind=WP), intent (inout) :: field2d(partit%myDim_nod2D+partit%eDim_nod2D) @@ -16,7 +16,7 @@ module compute_residual_interface interface subroutine compute_residual(field2d, mask, n, partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit real(kind=WP), intent (in) :: field2d(partit%myDim_nod2D+partit%eDim_nod2D) @@ -29,7 +29,7 @@ module integrate_2D_interface interface subroutine integrate_2D(flux_global, flux_local, eff_vol, field2d, mask, partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit real(kind=WP), intent (out) :: flux_global(2), flux_local(2) @@ -44,7 +44,7 @@ module update_atm_forcing_interface interface subroutine update_atm_forcing(istep, tracers, partit,mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT use mod_tracer integer, intent(in) :: istep type(t_tracer), intent(in), target :: tracers @@ -57,7 +57,7 @@ subroutine update_atm_forcing(istep, tracers, partit,mesh) module net_rec_from_atm_interface interface subroutine net_rec_from_atm(action, partit) - use mod_partit + USE MOD_PARTIT logical, intent(in) :: action type(t_partit), intent(inout), target :: partit end subroutine @@ -68,7 +68,7 @@ subroutine net_rec_from_atm(action, partit) subroutine update_atm_forcing(istep, tracers, partit, mesh) use o_PARAM use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use MOD_TRACER use o_arrays use i_arrays @@ -393,7 +393,7 @@ SUBROUTINE force_flux_consv(field2d, mask, n, h, do_stats, partit, mesh) flux_correction_north, flux_correction_south, & flux_correction_total use mod_mesh - use mod_partit + USE MOD_PARTIT use cpl_driver, only : nrecv, cpl_recv, a2o_fcorr_stat use o_PARAM, only : mstep, WP use compute_residual_interface @@ -514,7 +514,7 @@ SUBROUTINE compute_residual(field2d, mask, n, partit, mesh) flux_correction_total use o_PARAM, only : WP use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use integrate_2D_interface IMPLICIT NONE @@ -549,7 +549,7 @@ END SUBROUTINE compute_residual ! SUBROUTINE integrate_2D(flux_global, flux_local, eff_vol, field2d, mask, partit, mesh) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use o_PARAM, only: WP IMPLICIT NONE type(t_mesh), intent(in), target :: mesh @@ -614,7 +614,7 @@ SUBROUTINE net_rec_from_atm(action, partit) use g_forcing_arrays use cpl_driver use o_PARAM, only: WP - use mod_partit + USE MOD_PARTIT IMPLICIT NONE LOGICAL, INTENT (IN) :: action diff --git a/src/gen_forcing_init.F90 b/src/gen_forcing_init.F90 index 0e2f6ecaa..227123e4a 100755 --- a/src/gen_forcing_init.F90 +++ b/src/gen_forcing_init.F90 @@ -2,7 +2,7 @@ module forcing_array_setup_interfaces interface subroutine forcing_array_setup(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -17,7 +17,7 @@ subroutine forcing_setup(partit, mesh) use g_CONFIG use g_sbf, only: sbc_ini use mod_mesh -use mod_partit +USE MOD_PARTIT use forcing_array_setup_interfaces implicit none type(t_mesh), intent(in), target :: mesh @@ -36,7 +36,7 @@ subroutine forcing_array_setup(partit, mesh) !inializing forcing fields use o_param use mod_mesh - use mod_partit + USE MOD_PARTIT use i_arrays use g_forcing_arrays use g_forcing_param diff --git a/src/gen_halo_exchange.F90 b/src/gen_halo_exchange.F90 index 7b9f66e6b..63dbb1116 100755 --- a/src/gen_halo_exchange.F90 +++ b/src/gen_halo_exchange.F90 @@ -27,7 +27,7 @@ module g_comm ! Only needed in debug mode subroutine check_mpi_comm(rn, sn, r_mpitype, s_mpitype, rPE, sPE, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer, intent(in) :: sn, rn, r_mpitype(:), s_mpitype(:), rPE(:), sPE(:) @@ -56,7 +56,7 @@ END SUBROUTINE check_mpi_comm subroutine exchange_nod2D_i(nod_array2D, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer, intent(inout) :: nod_array2D(:) @@ -72,7 +72,7 @@ END SUBROUTINE exchange_nod2D_i ! General version of the communication routine for 2D nodal fields subroutine exchange_nod2D_i_begin(nod_array2D, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer, intent(inout) :: nod_array2D(:) @@ -112,7 +112,7 @@ END SUBROUTINE exchange_nod2D_i_begin ! General version of the communication routine for 2D nodal fields subroutine exchange_nod2D(nod_array2D, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: nod_array2D(:) @@ -130,7 +130,7 @@ END SUBROUTINE exchange_nod2D ! General version of the communication routine for 2D nodal fields subroutine exchange_nod2D_begin(nod_array2D, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: nod_array2D(:) @@ -167,7 +167,7 @@ END SUBROUTINE exchange_nod2D_begin ! General version of the communication routine for 2D nodal fields subroutine exchange_nod2D_2fields(nod1_array2D, nod2_array2D, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: nod1_array2D(:) @@ -187,7 +187,7 @@ END SUBROUTINE exchange_nod2D_2fields ! General version of the communication routine for 2D nodal fields subroutine exchange_nod2D_2fields_begin(nod1_array2D, nod2_array2D, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: nod1_array2D(:) @@ -232,7 +232,7 @@ END SUBROUTINE exchange_nod2D_2fields_begin subroutine exchange_nod2D_3fields(nod1_array2D, nod2_array2D, nod3_array2D, partit) ! General version of the communication routine for 2D nodal fields use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: nod1_array2D(:) @@ -253,7 +253,7 @@ END SUBROUTINE exchange_nod2D_3fields subroutine exchange_nod2D_3fields_begin(nod1_array2D, nod2_array2D, nod3_array2D, partit) ! General version of the communication routine for 2D nodal fields use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: nod1_array2D(:) @@ -306,7 +306,7 @@ END SUBROUTINE exchange_nod2D_3fields_begin ! stored in (vertical, horizontal) format subroutine exchange_nod3D(nod_array3D, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: nod_array3D(:,:) @@ -323,7 +323,7 @@ END SUBROUTINE exchange_nod3D ! stored in (vertical, horizontal) format subroutine exchange_nod3D_begin(nod_array3D, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: nod_array3D(:,:) @@ -369,7 +369,7 @@ END SUBROUTINE exchange_nod3D_begin ! stored in (vertical, horizontal) format subroutine exchange_nod3D_2fields(nod1_array3D,nod2_array3D, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: nod1_array3D(:,:) @@ -388,7 +388,7 @@ subroutine exchange_nod3D_2fields_begin(nod1_array3D,nod2_array3D, partit) ! General version of the communication routine for 3D nodal fields ! stored in (vertical, horizontal) format use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: nod1_array3D(:,:) @@ -449,7 +449,7 @@ END SUBROUTINE exchange_nod3D_2fields_begin ! ======================================================================== subroutine exchange_nod3D_n(nod_array3D, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: nod_array3D(:,:,:) @@ -465,7 +465,7 @@ END SUBROUTINE exchange_nod3D_n ! stored in (vertical, horizontal) format subroutine exchange_nod3D_n_begin(nod_array3D, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: nod_array3D(:,:,:) @@ -523,7 +523,7 @@ END SUBROUTINE exchange_nod3D_n_begin SUBROUTINE exchange_nod_end(partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit @@ -534,7 +534,7 @@ END SUBROUTINE exchange_nod_end SUBROUTINE exchange_elem_end(partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit #include "associate_part_def.h" @@ -553,7 +553,7 @@ END SUBROUTINE exchange_elem_end !============================================================================= subroutine exchange_elem3D(elem_array3D, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: elem_array3D(:,:) @@ -569,7 +569,7 @@ END SUBROUTINE exchange_elem3D ! stored in (vertical, horizontal) format subroutine exchange_elem3D_begin(elem_array3D, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: elem_array3D(:,:) @@ -697,7 +697,7 @@ END SUBROUTINE exchange_elem3D_begin ! stored in (vertical, horizontal) format subroutine exchange_elem3D_n(elem_array3D, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: elem_array3D(:,:,:) @@ -714,7 +714,7 @@ subroutine exchange_elem3D_n_begin(elem_array3D, partit) ! General version of the communication routine for 3D elemental fields ! stored in (vertical, horizontal) format use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: elem_array3D(:,:,:) @@ -800,7 +800,7 @@ END SUBROUTINE exchange_elem3D_n_begin ! stored in (vertical, horizontal) format subroutine exchange_elem2D(elem_array2D, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: elem_array2D(:) @@ -818,7 +818,7 @@ END SUBROUTINE exchange_elem2D ! stored in (vertical, horizontal) format subroutine exchange_elem2D_begin(elem_array2D, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: elem_array2D(:) @@ -884,7 +884,7 @@ END SUBROUTINE exchange_elem2D_begin !Exchange with ALL(!) the neighbours subroutine exchange_elem2D_i(elem_array2D, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer, intent(inout) :: elem_array2D(:) @@ -902,7 +902,7 @@ END SUBROUTINE exchange_elem2D_i !Exchange with ALL(!) the neighbours subroutine exchange_elem2D_i_begin(elem_array2D, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer, intent(inout) :: elem_array2D(:) @@ -946,7 +946,7 @@ END SUBROUTINE exchange_elem2D_i_begin subroutine broadcast_nod3D(arr3D, arr3Dglobal, partit) ! Distribute the nodal information available on 0 PE to other PEs use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit INTEGER :: nz, counter,nl1 @@ -1012,7 +1012,7 @@ end subroutine broadcast_nod3D subroutine broadcast_nod2D(arr2D, arr2Dglobal, partit) ! A 2D version of the previous routine use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(in), target :: partit real(real64) :: arr2D(:) @@ -1062,7 +1062,7 @@ end subroutine broadcast_nod2D subroutine broadcast_elem3D(arr3D, arr3Dglobal, partit) ! Distribute the elemental information available on 0 PE to other PEs use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(in), target :: partit INTEGER :: nz, counter,nl1 @@ -1130,7 +1130,7 @@ end subroutine broadcast_elem3D subroutine broadcast_elem2D(arr2D, arr2Dglobal, partit) ! A 2D version of the previous routine use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(in), target :: partit integer :: i, n, nTS, sender, status(MPI_STATUS_SIZE) @@ -1180,7 +1180,7 @@ end subroutine broadcast_elem2D ! Use only with 3D arrays stored in (vertical, horizontal) way subroutine gather_nod3D(arr3D, arr3D_global, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit INTEGER :: nl1 @@ -1241,7 +1241,7 @@ subroutine gather_real4_nod3D(arr3D, arr3D_global, partit) ! ! Use only with 3D arrays stored in (vertical, horizontal) way use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit INTEGER :: nl1 @@ -1301,7 +1301,7 @@ subroutine gather_int2_nod3D(arr3D, arr3D_global, partit) ! ! Use only with 3D arrays stored in (vertical, horizontal) way use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit INTEGER :: nl1 @@ -1358,7 +1358,7 @@ end subroutine gather_int2_nod3D subroutine gather_nod2D(arr2D, arr2D_global, partit) ! Make nodal information available to master PE use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer :: n @@ -1411,7 +1411,7 @@ end subroutine gather_nod2D subroutine gather_real4_nod2D(arr2D, arr2D_global, partit) ! Make nodal information available to master PE use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer :: n @@ -1465,7 +1465,7 @@ end subroutine gather_real4_nod2D ! Make nodal information available to master PE subroutine gather_int2_nod2D(arr2D, arr2D_global, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer :: n @@ -1521,7 +1521,7 @@ subroutine gather_elem3D(arr3D, arr3D_global, partit) ! ! Use only with 3D arrays stored in (vertical, horizontal) way use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit INTEGER :: nl1 @@ -1584,7 +1584,7 @@ end subroutine gather_elem3D ! Use only with 3D arrays stored in (vertical, horizontal) way subroutine gather_real4_elem3D(arr3D, arr3D_global, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit INTEGER :: nl1 @@ -1648,7 +1648,7 @@ end subroutine gather_real4_elem3D ! Use only with 3D arrays stored in (vertical, horizontal) way subroutine gather_int2_elem3D(arr3D, arr3D_global, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit INTEGER :: nl1 @@ -1711,7 +1711,7 @@ end subroutine gather_int2_elem3D ! Make element information available to master PE subroutine gather_elem2D(arr2D, arr2D_global, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer :: n @@ -1768,7 +1768,7 @@ end subroutine gather_elem2D ! Make element information available to master PE subroutine gather_real4_elem2D(arr2D, arr2D_global, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer :: n @@ -1826,7 +1826,7 @@ end subroutine gather_real4_elem2D ! Make element information available to master PE subroutine gather_int2_elem2D(arr2D, arr2D_global, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer :: n @@ -1885,7 +1885,7 @@ end subroutine gather_int2_elem2D ! Use only with 3D arrays stored in (vertical, horizontal) way subroutine gather_real8to4_nod3D(arr3D, arr3D_global, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit INTEGER :: nl1 @@ -1947,7 +1947,7 @@ end subroutine gather_real8to4_nod3D ! Make nodal information available to master PE subroutine gather_real8to4_nod2D(arr2D, arr2D_global, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer :: n @@ -2001,7 +2001,7 @@ subroutine gather_real8to4_elem3D(arr3D, arr3D_global, partit) ! Make element information available to master PE ! Use only with 3D arrays stored in (vertical, horizontal) way use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit INTEGER :: nl1 @@ -2060,7 +2060,7 @@ end subroutine gather_real8to4_elem3D ! Make element information available to master PE subroutine gather_real8to4_elem2D(arr2D, arr2D_global, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer :: n @@ -2117,7 +2117,7 @@ end subroutine gather_real8to4_elem2D subroutine gather_elem2D_i(arr2D, arr2D_global, partit) ! Make element information available to master PE use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer :: n @@ -2156,7 +2156,7 @@ end subroutine gather_elem2D_i ! Make nodal information available to master PE subroutine gather_nod2D_i(arr2D, arr2D_global, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer :: n @@ -2209,7 +2209,7 @@ end subroutine gather_nod2D_i ! A 2D version of the previous routine subroutine gather_edg2D(arr2D, arr2Dglobal, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(in), target :: partit real(real64) :: arr2D(:) @@ -2250,7 +2250,7 @@ end subroutine gather_edg2D ! A 2D version of the previous routine subroutine gather_edg2D_i(arr2D, arr2Dglobal, partit) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer :: arr2D(:) diff --git a/src/gen_interpolation.F90 b/src/gen_interpolation.F90 index 3faf7eb9d..c43c78ab0 100755 --- a/src/gen_interpolation.F90 +++ b/src/gen_interpolation.F90 @@ -29,7 +29,7 @@ subroutine interp_2d_field_v2(num_lon_reg, num_lat_reg, lon_reg, lat_reg, data_r ! Coded by Qiang Wang ! Reviewed by ?? !------------------------------------------------------------------------------------- - use mod_partit + USE MOD_PARTIT use o_PARAM, only: WP implicit none integer :: n, i, ii, jj, k, nod_find @@ -163,7 +163,7 @@ subroutine interp_2d_field(num_lon_reg, num_lat_reg, lon_reg, lat_reg, data_reg, ! Coded by Qiang Wang ! Reviewed by ?? !------------------------------------------------------------------------------------- - use mod_partit + USE MOD_PARTIT use o_PARAM, only: WP implicit none integer :: n, i @@ -313,7 +313,7 @@ subroutine interp_3d_field(num_lon_reg, num_lat_reg, num_lay_reg, & ! Reviewed by ?? !------------------------------------------------------------------------------------- use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use o_param, only: WP implicit none integer :: n, i, flag,nz diff --git a/src/gen_model_setup.F90 b/src/gen_model_setup.F90 index 5a5aa0813..d9300b275 100755 --- a/src/gen_model_setup.F90 +++ b/src/gen_model_setup.F90 @@ -1,6 +1,6 @@ ! ============================================================== subroutine setup_model(partit) - use mod_partit + USE MOD_PARTIT use o_param use i_param use i_therm_param @@ -115,7 +115,7 @@ subroutine get_run_steps(nsteps, partit) ! Reviewed by ?? !-------------------------------------------------------------- use g_clock - use mod_partit + USE MOD_PARTIT implicit none type(t_partit), intent(in) :: partit diff --git a/src/gen_modules_clock.F90 b/src/gen_modules_clock.F90 index 28cbbfb67..ce243d5be 100755 --- a/src/gen_modules_clock.F90 +++ b/src/gen_modules_clock.F90 @@ -66,7 +66,7 @@ end subroutine clock !-------------------------------------------------------------------------------- ! subroutine clock_init(partit) - use mod_partit + USE MOD_PARTIT use g_config implicit none type(t_partit), intent(in), target :: partit diff --git a/src/gen_modules_cvmix_idemix.F90 b/src/gen_modules_cvmix_idemix.F90 index f8d9e941d..111223754 100644 --- a/src/gen_modules_cvmix_idemix.F90 +++ b/src/gen_modules_cvmix_idemix.F90 @@ -27,7 +27,7 @@ module g_cvmix_idemix use g_config , only: dt use o_param use mod_mesh - use mod_partit + USE MOD_PARTIT use o_arrays use g_comm_auto use g_read_other_NetCDF diff --git a/src/gen_modules_cvmix_kpp.F90 b/src/gen_modules_cvmix_kpp.F90 index 77019be15..88eb8ea61 100644 --- a/src/gen_modules_cvmix_kpp.F90 +++ b/src/gen_modules_cvmix_kpp.F90 @@ -23,7 +23,7 @@ module g_cvmix_kpp use g_config use o_param use mod_mesh - use mod_partit + USE MOD_PARTIT use mod_tracer use o_arrays use g_comm_auto diff --git a/src/gen_modules_cvmix_pp.F90 b/src/gen_modules_cvmix_pp.F90 index 6654644bd..b81578c14 100644 --- a/src/gen_modules_cvmix_pp.F90 +++ b/src/gen_modules_cvmix_pp.F90 @@ -25,7 +25,7 @@ module g_cvmix_pp use g_config use o_param use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use o_arrays use g_comm_auto use i_arrays diff --git a/src/gen_modules_cvmix_tidal.F90 b/src/gen_modules_cvmix_tidal.F90 index 8faa154c9..d6f18d180 100644 --- a/src/gen_modules_cvmix_tidal.F90 +++ b/src/gen_modules_cvmix_tidal.F90 @@ -15,7 +15,7 @@ module g_cvmix_tidal use g_config , only: dt use o_param use mod_mesh - use mod_partit + USE MOD_PARTIT use o_arrays use g_comm_auto use g_read_other_NetCDF diff --git a/src/gen_modules_cvmix_tke.F90 b/src/gen_modules_cvmix_tke.F90 index cd9555a7c..cf58dbe0e 100644 --- a/src/gen_modules_cvmix_tke.F90 +++ b/src/gen_modules_cvmix_tke.F90 @@ -26,7 +26,7 @@ module g_cvmix_tke use g_config , only: dt use o_param use mod_mesh - use mod_partit + USE MOD_PARTIT use o_arrays use g_comm_auto implicit none diff --git a/src/gen_modules_diag.F90 b/src/gen_modules_diag.F90 index ea324e6e3..98d96955a 100755 --- a/src/gen_modules_diag.F90 +++ b/src/gen_modules_diag.F90 @@ -2,7 +2,7 @@ module diagnostics use g_config use mod_mesh - use mod_partit + USE MOD_PARTIT use mod_tracer use g_clock use g_comm_auto diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index c7471bc25..f7f5e3bd8 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -16,7 +16,7 @@ subroutine par_ex(partit, abort) subroutine init_mpi_types(partit, mesh) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT implicit none type(t_partit), intent(in), target :: partit type(t_mesh), intent(in), target :: mesh @@ -127,7 +127,7 @@ end subroutine par_ex !======================================================================= subroutine init_mpi_types(partit, mesh) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT implicit none type(t_partit), intent(inout), target :: partit @@ -507,7 +507,7 @@ end subroutine init_gatherLists !=================================================================== subroutine status_check(partit) use g_config -use mod_partit +USE MOD_PARTIT implicit none type(t_partit), intent(in), target :: partit integer :: res diff --git a/src/gen_support.F90 b/src/gen_support.F90 index c8c619c22..6a2760329 100644 --- a/src/gen_support.F90 +++ b/src/gen_support.F90 @@ -3,7 +3,7 @@ !2. computing surface integrals of the FESOM fields module g_support USE MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use g_comm_auto use o_ARRAYS use g_config, only: dummy @@ -279,7 +279,7 @@ end subroutine smooth_elem3D !-------------------------------------------------------------------------------------------- ! subroutine integrate_nod_2D(data, int2D, partit, mesh) - use MOD_PARTIT + USE MOD_PARTIT use g_comm_auto IMPLICIT NONE @@ -308,7 +308,7 @@ end subroutine integrate_nod_2D !-------------------------------------------------------------------------------------------- ! subroutine integrate_nod_3D(data, int3D, partit, mesh) - use MOD_PARTIT + USE MOD_PARTIT use g_comm_auto IMPLICIT NONE diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index f6b2864cc..f26b11894 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -2,28 +2,28 @@ module ice_fct_interfaces interface subroutine ice_mass_matrix_fill(partit, mesh) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh end subroutine subroutine ice_solve_high_order(partit, mesh) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh end subroutine subroutine ice_solve_low_order(partit, mesh) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh end subroutine subroutine ice_fem_fct(tr_array_id, partit, mesh) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT integer :: tr_array_id type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh @@ -46,7 +46,7 @@ subroutine ice_fem_fct(tr_array_id, partit, mesh) ! ===================================================================== subroutine ice_TG_rhs(partit, mesh) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use i_Arrays use i_PARAM use o_PARAM @@ -116,7 +116,7 @@ end subroutine ice_TG_rhs subroutine ice_fct_init(partit, mesh) use o_PARAM use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use i_ARRAYS use ice_fct_interfaces implicit none @@ -166,7 +166,7 @@ end subroutine ice_fct_init ! subroutine ice_fct_solve(partit, mesh) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use ice_fct_interfaces implicit none type(t_partit), intent(inout), target :: partit @@ -201,7 +201,7 @@ subroutine ice_solve_low_order(partit, mesh) ! matrices acting on the field from the previous time step. The consistent ! mass matrix on the lhs is replaced with the lumped one. use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use MOD_TRACER use i_ARRAYS use i_PARAM @@ -259,7 +259,7 @@ end subroutine ice_solve_low_order !_______________________________________________________________________________ subroutine ice_solve_high_order(partit, mesh) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use MOD_TRACER use i_ARRAYS use o_PARAM @@ -351,7 +351,7 @@ subroutine ice_fem_fct(tr_array_id, partit, mesh) ! Turek. (kuzmin@math.uni-dortmund.de) ! use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use MOD_TRACER use i_arrays use i_param @@ -661,7 +661,7 @@ end subroutine ice_fem_fct SUBROUTINE ice_mass_matrix_fill(partit, mesh) ! Used in ice_fct inherited from FESOM use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use MOD_TRACER use i_PARAM use i_ARRAYS @@ -743,7 +743,7 @@ END SUBROUTINE ice_mass_matrix_fill ! subroutine ice_TG_rhs_div(partit, mesh) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use i_Arrays use i_PARAM use o_PARAM @@ -838,7 +838,7 @@ end subroutine ice_TG_rhs_div !_______________________________________________________________________________ subroutine ice_update_for_div(partit, mesh) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use MOD_TRACER use i_Arrays use i_PARAM diff --git a/src/ice_maEVP.F90 b/src/ice_maEVP.F90 index a6b0856e7..ec1fe0f41 100644 --- a/src/ice_maEVP.F90 +++ b/src/ice_maEVP.F90 @@ -2,35 +2,35 @@ module ice_maEVP_interfaces interface subroutine ssh2rhs(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine subroutine stress_tensor_a(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine subroutine stress2rhs_m(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine subroutine find_alpha_field_a(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine subroutine find_beta_field_a(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -49,7 +49,7 @@ subroutine stress_tensor_m(partit, mesh) use o_param use i_param use mod_mesh - use mod_partit + USE MOD_PARTIT use g_config use i_arrays @@ -147,7 +147,7 @@ subroutine ssh2rhs(partit, mesh) use o_param use i_param use mod_mesh - use mod_partit + USE MOD_PARTIT use g_config use i_arrays use i_therm_param @@ -230,7 +230,7 @@ subroutine stress2rhs_m(partit, mesh) use i_param use i_therm_param use mod_mesh - use mod_partit + USE MOD_PARTIT use g_config use i_arrays implicit none @@ -303,7 +303,7 @@ subroutine EVPdynamics_m(partit, mesh) use i_param use i_therm_param use mod_mesh - use mod_partit + USE MOD_PARTIT use g_config use i_arrays use o_arrays @@ -644,7 +644,7 @@ subroutine find_alpha_field_a(partit, mesh) use i_param use i_therm_param use mod_mesh - use mod_partit + USE MOD_PARTIT use g_config use i_arrays @@ -722,7 +722,7 @@ subroutine stress_tensor_a(partit, mesh) use o_param use i_param use mod_mesh - use mod_partit + USE MOD_PARTIT use g_config use i_arrays @@ -823,7 +823,7 @@ subroutine EVPdynamics_a(partit, mesh) use o_param use mod_mesh -use mod_partit +USE MOD_PARTIT use i_arrays USE o_arrays use i_param @@ -931,7 +931,7 @@ subroutine find_beta_field_a(partit, mesh) ! alpha \ne beta, but not aEVP). use mod_mesh -use mod_partit +USE MOD_PARTIT use o_param USE i_param use i_arrays diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index b0c629737..7fa784e9b 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -2,7 +2,7 @@ module ocean2ice_interface interface subroutine ocean2ice(tracers, partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT use mod_tracer type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh @@ -15,7 +15,7 @@ module oce_fluxes_interface interface subroutine oce_fluxes(tracers, partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT use mod_tracer type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh @@ -33,7 +33,7 @@ subroutine oce_fluxes_mom(partit, mesh) use o_PARAM use o_ARRAYS use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use i_ARRAYS use i_PARAM USE g_CONFIG @@ -117,7 +117,7 @@ subroutine ocean2ice(tracers, partit, mesh) use i_ARRAYS use MOD_MESH use MOD_TRACER - use MOD_PARTIT + USE MOD_PARTIT USE g_CONFIG use g_comm_auto implicit none @@ -194,7 +194,7 @@ subroutine oce_fluxes(tracers, partit, mesh) use MOD_MESH use MOD_TRACER - use MOD_PARTIT + USE MOD_PARTIT USE g_CONFIG use o_ARRAYS use i_ARRAYS diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index f1304b814..d57810586 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -2,7 +2,7 @@ module ice_array_setup_interface interface subroutine ice_array_setup(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT use mod_tracer type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh @@ -14,7 +14,7 @@ module ice_initial_state_interface interface subroutine ice_initial_state(tracers, partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT use mod_tracer type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh @@ -26,7 +26,7 @@ module ice_setup_interface interface subroutine ice_setup(tracers, partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT use mod_tracer type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh @@ -43,7 +43,7 @@ subroutine ice_setup(tracers, partit, mesh) use i_arrays use g_CONFIG use mod_mesh - use mod_partit + USE MOD_PARTIT use mod_tracer use ice_array_setup_interface use ice_initial_state_interface @@ -81,7 +81,7 @@ subroutine ice_array_setup(partit, mesh) use o_param use i_param use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT use i_arrays USE g_CONFIG @@ -192,7 +192,7 @@ end subroutine ice_array_setup ! Sea ice model step subroutine ice_timestep(step, partit, mesh) use mod_mesh -use mod_partit +USE MOD_PARTIT use i_arrays use o_param use g_CONFIG @@ -315,7 +315,7 @@ end subroutine ice_timestep subroutine ice_initial_state(tracers, partit, mesh) use i_ARRAYs use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use MOD_TRACER use o_PARAM use o_arrays diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index 9b6f17981..f15b8d1de 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -3,7 +3,7 @@ subroutine cut_off(partit, mesh) use o_param use i_arrays use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use g_config, only: use_cavity implicit none type(t_mesh), intent(in), target :: mesh @@ -87,7 +87,7 @@ subroutine thermodynamics(partit, mesh) use o_param use mod_mesh - use mod_partit + USE MOD_PARTIT use i_therm_param use i_param use i_arrays diff --git a/src/io_gather.F90 b/src/io_gather.F90 index e4a95454a..114a4f840 100644 --- a/src/io_gather.F90 +++ b/src/io_gather.F90 @@ -1,5 +1,5 @@ module io_gather_module - use MOD_PARTIT + USE MOD_PARTIT implicit none public init_io_gather, gather_nod2D, gather_real4_nod2D, gather_elem2D, gather_real4_elem2D private @@ -16,7 +16,7 @@ module io_gather_module subroutine init_io_gather(partit) - use MOD_PARTIT + USE MOD_PARTIT implicit none type(t_partit), intent(inout), target :: partit integer err @@ -60,7 +60,7 @@ subroutine init_nod2D_lists(partit) subroutine init_elem2D_lists(partit) - use MOD_PARTIT + USE MOD_PARTIT implicit none type(t_partit), intent(inout), target :: partit #include "associate_part_def.h" @@ -96,7 +96,7 @@ subroutine init_elem2D_lists(partit) ! thread-safe procedure subroutine gather_nod2D(arr2D, arr2D_global, root_rank, tag, io_comm, partit) - use MOD_PARTIT + USE MOD_PARTIT use, intrinsic :: iso_fortran_env, only: real64 implicit none type(t_partit), intent(inout), target :: partit @@ -123,7 +123,7 @@ subroutine gather_nod2D(arr2D, arr2D_global, root_rank, tag, io_comm, partit) ! thread-safe procedure subroutine gather_real4_nod2D(arr2D, arr2D_global, root_rank, tag, io_comm, partit) - use MOD_PARTIT + USE MOD_PARTIT use, intrinsic :: iso_fortran_env, only: real32 implicit none type(t_partit), intent(inout), target :: partit @@ -150,7 +150,7 @@ subroutine gather_real4_nod2D(arr2D, arr2D_global, root_rank, tag, io_comm, part ! thread-safe procedure subroutine gather_elem2D(arr2D, arr2D_global, root_rank, tag, io_comm, partit) - use MOD_PARTIT + USE MOD_PARTIT use, intrinsic :: iso_fortran_env, only: real64 implicit none type(t_partit), intent(inout), target :: partit @@ -177,7 +177,7 @@ subroutine gather_elem2D(arr2D, arr2D_global, root_rank, tag, io_comm, partit) ! thread-safe procedure subroutine gather_real4_elem2D(arr2D, arr2D_global, root_rank, tag, io_comm, partit) - use MOD_PARTIT + USE MOD_PARTIT use, intrinsic :: iso_fortran_env, only: real32 implicit none type(t_partit), intent(inout), target :: partit diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index e13de9b38..d0c237167 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -1,5 +1,5 @@ module io_MEANDATA - use MOD_PARTIT + USE MOD_PARTIT use o_PARAM, only : WP use, intrinsic :: iso_fortran_env, only: real64, real32 use io_data_strategy_module @@ -95,7 +95,7 @@ subroutine destructor(this) subroutine ini_mean_io(tracers, partit, mesh) use MOD_MESH use MOD_TRACER - use MOD_PARTIT + USE MOD_PARTIT use g_cvmix_tke use g_cvmix_idemix use g_cvmix_kpp @@ -544,7 +544,7 @@ subroutine ini_mean_io(tracers, partit, mesh) ! function mesh_dimname_from_dimsize(size, partit, mesh) result(name) use mod_mesh - use mod_partit + USE MOD_PARTIT use diagnostics #if defined (__icepack) use icedrv_main, only: ncat ! number of ice thickness cathegories @@ -580,7 +580,7 @@ function mesh_dimname_from_dimsize(size, partit, mesh) result(name) subroutine create_new_file(entry, partit, mesh) use g_clock use mod_mesh - use mod_partit + USE MOD_PARTIT use fesom_version_info_module use g_config use i_PARAM @@ -804,7 +804,7 @@ subroutine update_means subroutine output(istep, tracers, partit, mesh) use g_clock use mod_mesh - use mod_partit + USE MOD_PARTIT use mod_tracer use io_gather_module #if defined (__icepack) @@ -915,7 +915,7 @@ subroutine output(istep, tracers, partit, mesh) subroutine do_output_callback(entry_index) use mod_mesh -use mod_partit +USE MOD_PARTIT integer, intent(in) :: entry_index ! EO args type(Meandata), pointer :: entry @@ -946,7 +946,7 @@ subroutine finalize_output() ! subroutine def_stream3D(glsize, lcsize, name, description, units, data, freq, freq_unit, accuracy, partit, mesh, flip_array) use mod_mesh - use mod_partit + USE MOD_PARTIT implicit none type(t_partit), intent(inout), target :: partit integer, intent(in) :: glsize(2), lcsize(2) @@ -1011,7 +1011,7 @@ subroutine def_stream3D(glsize, lcsize, name, description, units, data, freq, fr ! subroutine def_stream2D(glsize, lcsize, name, description, units, data, freq, freq_unit, accuracy, partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT implicit none integer, intent(in) :: glsize, lcsize character(len=*), intent(in) :: name, description, units @@ -1088,7 +1088,7 @@ subroutine associate_new_stream(name, entry) subroutine def_stream_after_dimension_specific(entry, name, description, units, freq, freq_unit, accuracy, partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT use io_netcdf_workaround_module type(Meandata), intent(inout) :: entry character(len=*), intent(in) :: name, description, units diff --git a/src/io_netcdf_workaround_module.F90 b/src/io_netcdf_workaround_module.F90 index d572c97aa..562db612e 100644 --- a/src/io_netcdf_workaround_module.F90 +++ b/src/io_netcdf_workaround_module.F90 @@ -7,7 +7,7 @@ module io_netcdf_workaround_module integer function next_io_rank(communicator, async_netcdf_allowed, partit) result(result) - use MOD_PARTIT + USE MOD_PARTIT use mpi_topology_module integer, intent(in) :: communicator logical, intent(out) :: async_netcdf_allowed diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 2bd24a00a..48bbc50e6 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -3,7 +3,7 @@ MODULE io_RESTART use g_clock use g_comm_auto use mod_mesh - use mod_partit + USE MOD_PARTIT use mod_tracer use o_arrays use i_arrays diff --git a/src/oce_adv_tra_driver.F90 b/src/oce_adv_tra_driver.F90 index 305f128f4..f996280fe 100644 --- a/src/oce_adv_tra_driver.F90 +++ b/src/oce_adv_tra_driver.F90 @@ -3,7 +3,7 @@ module oce_adv_tra_driver_interfaces subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, partit, mesh) use MOD_MESH use MOD_TRACER - use MOD_PARTIT + USE MOD_PARTIT real(kind=WP), intent(in), target :: dt integer, intent(in) :: tr_num type(t_partit), intent(inout), target :: partit @@ -22,7 +22,7 @@ module oce_tra_adv_flux2dtracer_interface subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, partit, mesh, use_lo, ttf, lo) !update the solution for vertical and horizontal flux contributions use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT real(kind=WP), intent(in), target :: dt type(t_partit),intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh @@ -42,7 +42,7 @@ subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, partit, subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, partit, mesh) use MOD_MESH use MOD_TRACER - use MOD_PARTIT + USE MOD_PARTIT use g_comm_auto use oce_adv_tra_hor_interfaces use oce_adv_tra_ver_interfaces @@ -208,7 +208,7 @@ end subroutine do_oce_adv_tra subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, partit, mesh, use_lo, ttf, lo) use MOD_MESH use o_ARRAYS - use MOD_PARTIT + USE MOD_PARTIT use g_comm_auto implicit none real(kind=WP), intent(in), target :: dt diff --git a/src/oce_adv_tra_fct.F90 b/src/oce_adv_tra_fct.F90 index 5eb7993a9..0ff845918 100644 --- a/src/oce_adv_tra_fct.F90 +++ b/src/oce_adv_tra_fct.F90 @@ -3,7 +3,7 @@ module oce_adv_tra_fct_interfaces subroutine oce_adv_tra_fct_init(twork, partit, mesh) use MOD_MESH use MOD_TRACER - use MOD_PARTIT + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit),intent(inout), target :: partit type(t_tracer_work), intent(inout), target :: twork @@ -11,7 +11,7 @@ subroutine oce_adv_tra_fct_init(twork, partit, mesh) subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_plus, fct_minus, AUX, partit, mesh) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT real(kind=WP), intent(in), target :: dt type(t_partit),intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh @@ -33,7 +33,7 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, subroutine oce_adv_tra_fct_init(twork, partit, mesh) use MOD_MESH use MOD_TRACER - use MOD_PARTIT + USE MOD_PARTIT implicit none integer :: my_size type(t_mesh), intent(in) , target :: mesh @@ -75,7 +75,7 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, ! Adds limited fluxes to the LO solution use MOD_MESH use MOD_TRACER - use MOD_PARTIT + USE MOD_PARTIT use g_comm_auto implicit none real(kind=WP), intent(in), target :: dt diff --git a/src/oce_adv_tra_hor.F90 b/src/oce_adv_tra_hor.F90 index 714eccf68..6d38992b4 100644 --- a/src/oce_adv_tra_hor.F90 +++ b/src/oce_adv_tra_hor.F90 @@ -11,7 +11,7 @@ module oce_adv_tra_hor_interfaces subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, init_zero) use MOD_MESH use MOD_TRACER - use MOD_PARTIT + USE MOD_PARTIT type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) @@ -28,7 +28,7 @@ subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, init_zero) ! flux is not multiplied with dt subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, nboundary_lay, init_zero) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution @@ -43,7 +43,7 @@ subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_g ! it runs with FCT option only subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, init_zero) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution @@ -60,7 +60,7 @@ subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_gr !=============================================================================== subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, init_zero) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use g_comm_auto implicit none type(t_partit),intent(in), target :: partit @@ -217,7 +217,7 @@ end subroutine adv_tra_hor_upw1 subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, nboundary_lay, init_zero) use MOD_MESH use MOD_TRACER - use MOD_PARTIT + USE MOD_PARTIT use g_comm_auto implicit none type(t_partit),intent(in), target :: partit @@ -489,7 +489,7 @@ end subroutine adv_tra_hor_muscl subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, init_zero) use MOD_MESH use MOD_TRACER - use MOD_PARTIT + USE MOD_PARTIT use g_comm_auto implicit none type(t_partit),intent(in), target :: partit diff --git a/src/oce_adv_tra_ver.F90 b/src/oce_adv_tra_ver.F90 index eab9847a8..b4985e9c6 100644 --- a/src/oce_adv_tra_ver.F90 +++ b/src/oce_adv_tra_ver.F90 @@ -4,7 +4,7 @@ module oce_adv_tra_ver_interfaces ! updates the input tracer ttf subroutine adv_tra_vert_impl(dt, w, ttf, partit, mesh) use mod_mesh - use MOD_PARTIT + USE MOD_PARTIT real(kind=WP), intent(in), target :: dt type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh @@ -19,7 +19,7 @@ subroutine adv_tra_vert_impl(dt, w, ttf, partit, mesh) ! flux is not multiplied with dt subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, init_zero) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) @@ -35,7 +35,7 @@ subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, init_zero) ! flux is not multiplied with dt subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, init_zero) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution @@ -52,7 +52,7 @@ subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, init_zero) ! flux is not multiplied with dt subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, init_zero) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT real(kind=WP), intent(in), target :: dt type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh @@ -70,7 +70,7 @@ subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, init_zero) ! flux is not multiplied with dt subroutine adv_tra_ver_cdiff(w, ttf, partit, mesh, flux, init_zero) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh integer :: n, nz, nl1 @@ -86,7 +86,7 @@ subroutine adv_tra_ver_cdiff(w, ttf, partit, mesh, flux, init_zero) subroutine adv_tra_vert_impl(dt, w, ttf, partit, mesh) use MOD_MESH use MOD_TRACER - use MOD_PARTIT + USE MOD_PARTIT use g_comm_auto implicit none @@ -234,7 +234,7 @@ end subroutine adv_tra_vert_impl subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, init_zero) use MOD_MESH use MOD_TRACER - use MOD_PARTIT + USE MOD_PARTIT use g_comm_auto implicit none @@ -292,7 +292,7 @@ subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, init_zero) use MOD_MESH use o_ARRAYS use o_PARAM - use MOD_PARTIT + USE MOD_PARTIT implicit none type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh @@ -367,7 +367,7 @@ end subroutine adv_tra_ver_qr4c subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, init_zero) use MOD_MESH use MOD_TRACER - use MOD_PARTIT + USE MOD_PARTIT use g_comm_auto implicit none real(kind=WP), intent(in), target :: dt @@ -550,7 +550,7 @@ end subroutine adv_tra_vert_ppm subroutine adv_tra_ver_cdiff(w, ttf, partit, mesh, flux, init_zero) use MOD_MESH use MOD_TRACER - use MOD_PARTIT + USE MOD_PARTIT use g_comm_auto implicit none type(t_partit),intent(in), target :: partit diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index d387993cd..e8d306042 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2,77 +2,77 @@ module oce_ale_interfaces interface subroutine init_bottom_elem_thickness(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine subroutine init_bottom_node_thickness(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine subroutine init_surface_elem_depth(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine subroutine init_surface_node_depth(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine subroutine impl_vert_visc_ale(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine subroutine update_stiff_mat_ale(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine subroutine compute_ssh_rhs_ale(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine subroutine solve_ssh_ale(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine subroutine compute_hbar_ale(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine subroutine vert_vel_ale(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine subroutine update_thickness_ale(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -83,7 +83,7 @@ module oce_timestep_ale_interface interface subroutine oce_timestep_ale(n, tracers, partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT use mod_tracer integer, intent(in) :: n type(t_mesh), intent(in), target :: mesh @@ -255,7 +255,7 @@ end subroutine init_ale subroutine init_bottom_elem_thickness(partit, mesh) use o_PARAM use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use o_ARRAYS use g_config,only: use_partial_cell, partial_cell_thresh use g_comm_auto @@ -377,7 +377,7 @@ end subroutine init_bottom_elem_thickness subroutine init_bottom_node_thickness(partit, mesh) use o_PARAM use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use o_ARRAYS use g_config,only: use_partial_cell use g_comm_auto @@ -487,7 +487,7 @@ end subroutine init_bottom_node_thickness subroutine init_surface_elem_depth(partit, mesh) use o_PARAM use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use o_ARRAYS use g_config,only: use_cavity, use_cavity_partial_cell, cavity_partial_cell_thresh use g_comm_auto @@ -564,7 +564,7 @@ end subroutine init_surface_elem_depth subroutine init_surface_node_depth(partit, mesh) use o_PARAM use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use o_ARRAYS use g_config,only: use_cavity, use_cavity_partial_cell use g_comm_auto @@ -629,7 +629,7 @@ subroutine init_thickness_ale(partit, mesh) use g_config,only: dt, which_ale use o_PARAM use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use o_ARRAYS implicit none integer :: n, nz, elem, elnodes(3), nzmin, nzmax @@ -877,7 +877,7 @@ end subroutine init_thickness_ale subroutine update_thickness_ale(partit, mesh) use o_PARAM use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use o_ARRAYS use g_config,only: which_ale,lzstar_lev,min_hnode implicit none @@ -1078,7 +1078,7 @@ end subroutine update_thickness_ale subroutine restart_thickness_ale(partit, mesh) use o_PARAM use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use o_ARRAYS use g_config,only: which_ale,lzstar_lev,min_hnode implicit none @@ -1183,7 +1183,7 @@ end subroutine restart_thickness_ale subroutine init_stiff_mat_ale(partit, mesh) use o_PARAM use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use g_CONFIG implicit none @@ -1471,7 +1471,7 @@ subroutine update_stiff_mat_ale(partit, mesh) use o_PARAM use MOD_MESH use MOD_TRACER - use MOD_PARTIT + USE MOD_PARTIT use o_ARRAYS ! implicit none @@ -1582,7 +1582,7 @@ subroutine compute_ssh_rhs_ale(partit, mesh) use MOD_MESH use o_ARRAYS use o_PARAM - use MOD_PARTIT + USE MOD_PARTIT use g_comm_auto implicit none @@ -1694,7 +1694,7 @@ subroutine compute_hbar_ale(partit, mesh) use MOD_MESH use o_ARRAYS use o_PARAM - use MOD_PARTIT + USE MOD_PARTIT use g_comm_auto implicit none @@ -1805,7 +1805,7 @@ subroutine vert_vel_ale(partit, mesh) use MOD_MESH use o_ARRAYS use o_PARAM - use MOD_PARTIT + USE MOD_PARTIT use g_comm_auto use io_RESTART !!PS use i_arrays !!PS @@ -2325,7 +2325,7 @@ subroutine solve_ssh_ale(partit, mesh) use o_PARAM use MOD_MESH use o_ARRAYS -use MOD_PARTIT +USE MOD_PARTIT use g_comm_auto use g_config, only: which_ale ! @@ -2653,7 +2653,7 @@ subroutine oce_timestep_ale(n, tracers, partit, mesh) use MOD_TRACER use o_ARRAYS use o_PARAM - use MOD_PARTIT + USE MOD_PARTIT use g_comm_auto use io_RESTART !PS use i_ARRAYS !PS diff --git a/src/oce_ale_pressure_bv.F90 b/src/oce_ale_pressure_bv.F90 index f74ca11a9..9b5671a07 100644 --- a/src/oce_ale_pressure_bv.F90 +++ b/src/oce_ale_pressure_bv.F90 @@ -458,7 +458,7 @@ end subroutine pressure_bv subroutine pressure_force_4_linfs(tracers, partit, mesh) use g_config use mod_mesh - use MOD_PARTIT + USE MOD_PARTIT use mod_tracer use pressure_force_4_linfs_fullcell_interface use pressure_force_4_linfs_nemo_interface @@ -525,7 +525,7 @@ end subroutine pressure_force_4_linfs subroutine pressure_force_4_linfs_fullcell(partit, mesh) use o_PARAM use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use o_ARRAYS use g_config implicit none @@ -574,7 +574,7 @@ end subroutine pressure_force_4_linfs_fullcell subroutine pressure_force_4_linfs_nemo(tracers, partit, mesh) use o_PARAM use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use MOD_TRACER use o_ARRAYS use g_config @@ -750,7 +750,7 @@ end subroutine pressure_force_4_linfs_nemo subroutine pressure_force_4_linfs_shchepetkin(partit, mesh) use o_PARAM use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use o_ARRAYS use g_config implicit none @@ -1004,7 +1004,7 @@ end subroutine pressure_force_4_linfs_shchepetkin subroutine pressure_force_4_linfs_easypgf(tracers, partit, mesh) use o_PARAM use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use MOD_TRACER use o_ARRAYS use g_config @@ -1366,7 +1366,7 @@ end subroutine pressure_force_4_linfs_easypgf subroutine pressure_force_4_linfs_cubicspline(partit, mesh) use o_PARAM use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use o_ARRAYS use g_config implicit none @@ -1568,7 +1568,7 @@ end subroutine pressure_force_4_linfs_cubicspline subroutine pressure_force_4_linfs_cavity(partit, mesh) use o_PARAM use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use o_ARRAYS use g_config implicit none @@ -1780,7 +1780,7 @@ end subroutine pressure_force_4_linfs_cavity ! Calculate pressure gradient force (PGF) for full free surface case zlevel and zstar subroutine pressure_force_4_zxxxx(tracers, partit, mesh) use mod_mesh - use MOD_PARTIT + USE MOD_PARTIT use mod_tracer use g_config use pressure_force_4_zxxxx_shchepetkin_interface @@ -1820,7 +1820,7 @@ end subroutine pressure_force_4_zxxxx subroutine pressure_force_4_zxxxx_cubicspline(partit, mesh) use o_PARAM use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use o_ARRAYS use g_config implicit none @@ -2004,7 +2004,7 @@ end subroutine pressure_force_4_zxxxx_cubicspline subroutine pressure_force_4_zxxxx_shchepetkin(partit, mesh) use o_PARAM use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use o_ARRAYS use g_config use densityJM_components_interface @@ -2245,7 +2245,7 @@ end subroutine pressure_force_4_zxxxx_shchepetkin subroutine pressure_force_4_zxxxx_easypgf(tracers, partit, mesh) use o_PARAM use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use MOD_TRACER use o_ARRAYS use g_config @@ -2687,7 +2687,7 @@ end subroutine pressure_force_4_zxxxx_easypgf !=============================================================================== SUBROUTINE densityJM_local(t, s, pz, rho_out, partit, mesh) USE MOD_MESH -use MOD_PARTIT !, only: par_ex,pe_status +USE MOD_PARTIT !, only: par_ex,pe_status USE o_ARRAYS USE o_PARAM use densityJM_components_interface @@ -2725,7 +2725,7 @@ end subroutine densityJM_local !=============================================================================== SUBROUTINE densityJM_components(t, s, bulk_0, bulk_pz, bulk_pz2, rhopot, partit, mesh) USE MOD_MESH -use MOD_PARTIT !, only: par_ex,pe_status +USE MOD_PARTIT !, only: par_ex,pe_status USE o_ARRAYS USE o_PARAM IMPLICIT NONE @@ -2902,7 +2902,7 @@ subroutine sw_alpha_beta(TF1,SF1, partit, mesh) ! a_over_b=0.34765 psu*C^-1 @ S=40.0psu, ptmp=10.0C, p=4000db !----------------------------------------------------------------- use mod_mesh - use MOD_PARTIT + USE MOD_PARTIT use o_arrays use o_param use g_comm_auto @@ -2985,7 +2985,7 @@ subroutine compute_sigma_xy(TF1,SF1, partit, mesh) ! computes density gradient sigma_xy !------------------------------------------------------------------- use mod_mesh - use MOD_PARTIT + USE MOD_PARTIT use o_param use o_arrays use g_comm_auto @@ -3056,7 +3056,7 @@ end subroutine compute_sigma_xy !=============================================================================== subroutine compute_neutral_slope(partit, mesh) use o_ARRAYS - use MOD_PARTIT + USE MOD_PARTIT use MOD_MESH use o_param use g_config @@ -3108,7 +3108,7 @@ end subroutine compute_neutral_slope ! tracers%data(1)%values will be modified! subroutine insitu2pot(tracers, partit, mesh) use mod_mesh - use MOD_PARTIT + USE MOD_PARTIT use mod_tracer use o_param use o_arrays @@ -3155,7 +3155,7 @@ end subroutine insitu2pot SUBROUTINE density_linear(t, s, bulk_0, bulk_pz, bulk_pz2, rho_out, partit, mesh) !coded by Margarita Smolentseva, 21.05.2020 USE MOD_MESH -use MOD_PARTIT !, only: par_ex,pe_status +USE MOD_PARTIT !, only: par_ex,pe_status USE o_ARRAYS USE o_PARAM use g_config !, only: which_toy, toy_ocean @@ -3193,7 +3193,7 @@ subroutine init_ref_density(partit, mesh) ! Reviewed by ?? !___________________________________________________________________________ USE MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use o_PARAM use o_ARRAYS use densityJM_components_interface diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index ab9350f3b..48ab23279 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -2,7 +2,7 @@ module diff_part_hor_redi_interface interface subroutine diff_part_hor_redi(tr_num, tracer, partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT use mod_tracer integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracer @@ -15,7 +15,7 @@ module adv_tracers_ale_interface interface subroutine adv_tracers_ale(dt, tr_num, tracer, partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT use mod_tracer real(kind=WP), intent(in), target :: dt integer, intent(in), target :: tr_num @@ -29,7 +29,7 @@ module diff_ver_part_expl_ale_interface interface subroutine diff_ver_part_expl_ale(tr_num, tracer, partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT use mod_tracer integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracer @@ -42,7 +42,7 @@ module diff_ver_part_redi_expl_interface interface subroutine diff_ver_part_redi_expl(tr_num, tracer, partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT use mod_tracer integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracer @@ -55,7 +55,7 @@ module diff_ver_part_impl_ale_interface interface subroutine diff_ver_part_impl_ale(tr_num, tracer, partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT use mod_tracer integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracer @@ -68,7 +68,7 @@ module diff_tracers_ale_interface interface subroutine diff_tracers_ale(tr_num, tracer, partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT use mod_tracer integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracer @@ -81,7 +81,7 @@ module bc_surface_interface interface function bc_surface(n, id, sval, partit) use mod_mesh - use mod_partit + USE MOD_PARTIT integer , intent(in) :: n, id type(t_partit), intent(inout), target :: partit real(kind=WP) :: bc_surface @@ -93,7 +93,7 @@ module diff_part_bh_interface interface subroutine diff_part_bh(tr_num, tracer, partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT use mod_tracer integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracer @@ -106,7 +106,7 @@ module solve_tracers_ale_interface interface subroutine solve_tracers_ale(tracers, partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT use mod_tracer type(t_tracer), intent(inout), target :: tracers type(t_mesh), intent(in), target :: mesh @@ -123,7 +123,7 @@ subroutine solve_tracers_ale(tracers, partit, mesh) use o_PARAM, only: SPP, Fer_GM use o_arrays use mod_mesh - use mod_partit + USE MOD_PARTIT use mod_tracer use g_comm_auto use o_tracers @@ -211,7 +211,7 @@ end subroutine solve_tracers_ale subroutine adv_tracers_ale(dt, tr_num, tracers, partit, mesh) use g_config, only: flag_debug use mod_mesh - use mod_partit + USE MOD_PARTIT use mod_tracer use o_arrays use diagnostics, only: ldiag_DVD, compute_diag_dvd_2ndmoment_klingbeil_etal_2014, & @@ -264,7 +264,7 @@ end subroutine adv_tracers_ale !=============================================================================== subroutine diff_tracers_ale(tr_num, tracers, partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT use mod_tracer use o_arrays use o_tracers @@ -349,7 +349,7 @@ subroutine diff_ver_part_expl_ale(tr_num, tracers, partit, mesh) use o_ARRAYS use g_forcing_arrays use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use MOD_TRACER use g_config,only: dt @@ -426,12 +426,12 @@ end subroutine diff_ver_part_expl_ale ! vertical diffusivity augmented with Redi contribution [vertical flux of K(3,3)*d_zT] subroutine diff_ver_part_impl_ale(tr_num, tracers, partit, mesh) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use MOD_TRACER use o_PARAM use o_ARRAYS use i_ARRAYS - use MOD_PARTIT + USE MOD_PARTIT use g_CONFIG use g_forcing_arrays use o_mixing_KPP_mod !for ghats _GO_ @@ -891,7 +891,7 @@ end subroutine diff_ver_part_impl_ale subroutine diff_ver_part_redi_expl(tr_num, tracers, partit, mesh) use o_ARRAYS use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use MOD_TRACER USE o_param use g_config @@ -975,7 +975,7 @@ end subroutine diff_ver_part_redi_expl subroutine diff_part_hor_redi(tr_num, tracers, partit, mesh) use o_ARRAYS use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use MOD_TRACER use o_param use g_config @@ -1132,7 +1132,7 @@ end subroutine diff_part_hor_redi SUBROUTINE diff_part_bh(tr_num, tracers, partit, mesh) use o_ARRAYS use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use MOD_TRACER use o_param use g_config @@ -1210,7 +1210,7 @@ end subroutine diff_part_bh ! ID = 0 and 1 are reserved for temperature and salinity FUNCTION bc_surface(n, id, sval, partit) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT USE o_ARRAYS USE g_forcing_arrays USE g_config diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index ce2b8b0e6..d03a7941e 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -2,7 +2,7 @@ module momentum_adv_scalar_interface interface subroutine momentum_adv_scalar(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -14,7 +14,7 @@ subroutine momentum_adv_scalar(partit, mesh) !_______________________________________________________________________________ subroutine compute_vel_rhs(partit, mesh) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use o_ARRAYS use i_ARRAYS use i_therm_param diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 6943fb344..20fd081b4 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -15,7 +15,7 @@ module h_viscosity_leith_interface interface subroutine h_viscosity_leith(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -25,7 +25,7 @@ module visc_filt_harmon_interface interface subroutine visc_filt_harmon(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -35,7 +35,7 @@ module visc_filt_hbhmix_interface interface subroutine visc_filt_hbhmix(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -45,7 +45,7 @@ module visc_filt_biharm_interface interface subroutine visc_filt_biharm(option, partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT integer :: option type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit @@ -56,7 +56,7 @@ module visc_filt_bcksct_interface interface subroutine visc_filt_bcksct(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -66,7 +66,7 @@ module visc_filt_bilapl_interface interface subroutine visc_filt_bilapl(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -76,7 +76,7 @@ module visc_filt_bidiff_interface interface subroutine visc_filt_bidiff(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -86,7 +86,7 @@ module visc_filt_dbcksc_interface interface subroutine visc_filt_dbcksc(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -96,7 +96,7 @@ module backscatter_coef_interface interface subroutine backscatter_coef(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -106,7 +106,7 @@ module uke_update_interface interface subroutine uke_update(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine diff --git a/src/oce_local.F90 b/src/oce_local.F90 index 76fed1b6e..b20b95598 100755 --- a/src/oce_local.F90 +++ b/src/oce_local.F90 @@ -2,7 +2,7 @@ module com_global2local_interface interface subroutine com_global2local(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -12,7 +12,7 @@ subroutine com_global2local(partit, mesh) !============================================================================= SUBROUTINE com_global2local(partit, mesh) use MOD_MESH -use MOD_PARTIT +USE MOD_PARTIT IMPLICIT NONE type(t_mesh), intent(in), target :: mesh diff --git a/src/oce_mesh.F90 b/src/oce_mesh.F90 index b4146b117..262985e44 100755 --- a/src/oce_mesh.F90 +++ b/src/oce_mesh.F90 @@ -2,7 +2,7 @@ module read_mesh_interface interface subroutine read_mesh(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -12,7 +12,7 @@ module find_levels_interface interface subroutine find_levels(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -22,7 +22,7 @@ module find_levels_cavity_interface interface subroutine find_levels_cavity(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -32,7 +32,7 @@ module test_tri_interface interface subroutine test_tri(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -42,7 +42,7 @@ module load_edges_interface interface subroutine load_edges(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -52,7 +52,7 @@ module find_neighbors_interface interface subroutine find_neighbors(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -62,7 +62,7 @@ module mesh_areas_interface interface subroutine mesh_areas(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -72,7 +72,7 @@ module elem_center_interface interface subroutine elem_center(elem, x, y, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT integer :: elem real(kind=WP) :: x, y type(t_mesh), intent(inout), target :: mesh @@ -83,7 +83,7 @@ module edge_center_interface interface subroutine edge_center(n1, n2, x, y, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT integer :: n1, n2 real(kind=WP) :: x, y type(t_mesh), intent(inout), target :: mesh @@ -94,7 +94,7 @@ module mesh_auxiliary_arrays_interface interface subroutine mesh_auxiliary_arrays(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -104,7 +104,7 @@ module find_levels_min_e2n_interface interface subroutine find_levels_min_e2n(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -114,7 +114,7 @@ module check_total_volume_interface interface subroutine check_total_volume(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine diff --git a/src/oce_muscl_adv.F90 b/src/oce_muscl_adv.F90 index 8b73c738f..c85bc657d 100755 --- a/src/oce_muscl_adv.F90 +++ b/src/oce_muscl_adv.F90 @@ -2,7 +2,7 @@ module find_up_downwind_triangles_interface interface subroutine find_up_downwind_triangles(twork, partit, mesh) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use MOD_TRACER type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit @@ -30,7 +30,7 @@ subroutine find_up_downwind_triangles(twork, partit, mesh) ! adv_tracer_muscl subroutine muscl_adv_init(twork, partit, mesh) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use MOD_TRACER use o_ARRAYS use o_PARAM diff --git a/src/oce_spp.F90 b/src/oce_spp.F90 index c4b181b80..efb0e0b4d 100644 --- a/src/oce_spp.F90 +++ b/src/oce_spp.F90 @@ -11,7 +11,7 @@ subroutine cal_rejected_salt(partit, mesh) use o_arrays use mod_mesh -use mod_partit +USE MOD_PARTIT use g_comm_auto use o_tracers use g_forcing_arrays, only: thdgr @@ -48,7 +48,7 @@ end subroutine cal_rejected_salt subroutine app_rejected_salt(ttf, partit, mesh) use o_arrays use mod_mesh - use mod_partit + USE MOD_PARTIT use o_tracers use g_comm_auto implicit none diff --git a/src/oce_vel_rhs_vinv.F90 b/src/oce_vel_rhs_vinv.F90 index 1b5a1694b..08c39289a 100755 --- a/src/oce_vel_rhs_vinv.F90 +++ b/src/oce_vel_rhs_vinv.F90 @@ -2,7 +2,7 @@ module relative_vorticity_interface interface subroutine relative_vorticity(partit, mesh) use mod_mesh - use mod_partit + USE MOD_PARTIT type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index ffd6e86dc..3d248f3cf 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -2,7 +2,7 @@ module write_step_info_interface interface subroutine write_step_info(istep,outfreq,tracers,partit,mesh) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use MOD_TRACER integer :: istep,outfreq type(t_mesh), intent(in), target :: mesh @@ -15,7 +15,7 @@ module check_blowup_interface interface subroutine check_blowup(istep, tracers,partit,mesh) use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use MOD_TRACER integer :: istep type(t_mesh), intent(in), target :: mesh @@ -30,7 +30,7 @@ subroutine check_blowup(istep, tracers,partit,mesh) subroutine write_step_info(istep, outfreq, tracers, partit, mesh) use g_config, only: dt, use_ice use MOD_MESH - use MOD_PARTIT + USE MOD_PARTIT use MOD_TRACER use o_PARAM use o_ARRAYS @@ -243,7 +243,7 @@ subroutine check_blowup(istep, tracers, partit, mesh) use g_config, only: logfile_outfreq, which_ALE use MOD_MESH use MOD_TRACER - use MOD_PARTIT + USE MOD_PARTIT use o_PARAM use o_ARRAYS use i_ARRAYS From 8e93e8cda898ac24d2de88bde269e7cc240ae0c2 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Fri, 8 Oct 2021 10:43:59 +0200 Subject: [PATCH 391/909] minor fixes before making it to work with gfortran --- src/gen_modules_partitioning.F90 | 1 + src/oce_mesh.F90 | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index f7f5e3bd8..a5c9e258e 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -38,6 +38,7 @@ subroutine par_ex(partit, abort) type(t_partit), intent(inout), target :: partit integer,optional :: abort end subroutine + end interface end module mod_parsup subroutine par_init(partit) ! initializes MPI diff --git a/src/oce_mesh.F90 b/src/oce_mesh.F90 index 262985e44..6977ac2f2 100755 --- a/src/oce_mesh.F90 +++ b/src/oce_mesh.F90 @@ -149,7 +149,7 @@ SUBROUTINE mesh_setup(partit, mesh) call read_mesh(partit, mesh) call init_mpi_types(partit, mesh) call init_gatherLists(partit) - if(mype==0) write(*,*) 'Communication arrays are set' + if(partit%mype==0) write(*,*) 'Communication arrays are set' call test_tri(partit, mesh) call load_edges(partit, mesh) call find_neighbors(partit, mesh) From 5f8f18b253557d9949f1ac3915cb18888ad2b912 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Fri, 8 Oct 2021 11:26:10 +0200 Subject: [PATCH 392/909] adding module mod_parsup which shall contains the interfaces to the subroutins which use partit --- src/cavity_param.F90 | 7 + src/cpl_driver.F90 | 3 + src/fvom_main.F90 | 1 + src/gen_bulk_formulae.F90 | 3 +- src/gen_comm.F90 | 3 + src/gen_events.F90 | 1 + src/gen_forcing_couple.F90 | 10 + src/gen_forcing_init.F90 | 3 + src/gen_halo_exchange.F90 | 49 + src/gen_ic3d.F90 | 1 + src/gen_interpolation.F90 | 13 +- src/gen_model_setup.F90 | 2 + src/gen_modules_clock.F90 | 1 + src/gen_modules_cvmix_idemix.F90 | 1 + src/gen_modules_cvmix_kpp.F90 | 1 + src/gen_modules_cvmix_pp.F90 | 1 + src/gen_modules_cvmix_tidal.F90 | 1 + src/gen_modules_cvmix_tke.F90 | 1 + src/gen_modules_diag.F90 | 1 + src/gen_modules_gpot.F90 | 1 + src/gen_modules_partitioning.F90 | 37 +- src/gen_modules_read_NetCDF.F90 | 9 +- src/gen_support.F90 | 3 + src/gen_surface_forcing.F90 | 1 + src/ice_EVP.F90 | 7 + src/ice_fct.F90 | 13 + src/ice_maEVP.F90 | 13 + src/ice_oce_coupling.F90 | 5 + src/ice_setup_step.F90 | 7 + src/ice_thermo_oce.F90 | 2 + src/io_blowup.F90 | 1 + src/io_gather.F90 | 7 + src/io_meandata.F90 | 9 + src/io_mesh_info.F90 | 1 + src/io_netcdf_workaround_module.F90 | 1 + src/io_restart.F90 | 1 + src/oce_adv_tra_driver.F90 | 4 + src/oce_adv_tra_fct.F90 | 4 + src/oce_adv_tra_hor.F90 | 6 + src/oce_adv_tra_ver.F90 | 10 + src/oce_ale.F90 | 28 + src/oce_ale_mixing_kpp.F90 | 1 + src/oce_ale_mixing_pp.F90 | 1 + src/oce_ale_pressure_bv.F90 | 42 +- src/oce_ale_tracer.F90 | 19 + src/oce_ale_vel_rhs.F90 | 3 + src/oce_dyn.F90 | 23 + src/oce_fer_gm.F90 | 3 + src/oce_local.F90 | 3 + src/oce_mesh.F90 | 24 + src/oce_mo_conv.F90 | 1 + src/oce_muscl_adv.F90 | 4 + src/oce_setup_step.F90 | 9 + src/oce_shortwave_pene.F90 | 1 + src/oce_spp.F90 | 2 + src/oce_tracer_mod.F90 | 6 + src/oce_vel_rhs_vinv.F90 | 3 + src/temp/MOD_MESH.F90 | 329 ++++ src/temp/MOD_PARTIT.F90 | 189 ++ src/temp/MOD_READ_BINARY_ARRAYS.F90 | 118 ++ src/temp/MOD_TRACER.F90 | 228 +++ src/temp/MOD_WRITE_BINARY_ARRAYS.F90 | 160 ++ src/temp/gen_halo_exchange.F90 | 2381 +++++++++++++++++++++++++ src/temp/gen_modules_partitioning.F90 | 508 ++++++ src/temp/oce_adv_tra_driver.F90 | 278 +++ src/temp/oce_adv_tra_fct.F90 | 365 ++++ src/temp/oce_adv_tra_hor.F90 | 739 ++++++++ src/temp/oce_adv_tra_ver.F90 | 598 +++++++ src/temp/oce_modules.F90 | 267 +++ src/toy_channel_soufflet.F90 | 1 + src/write_step_info.F90 | 4 + 71 files changed, 6553 insertions(+), 30 deletions(-) create mode 100644 src/temp/MOD_MESH.F90 create mode 100644 src/temp/MOD_PARTIT.F90 create mode 100644 src/temp/MOD_READ_BINARY_ARRAYS.F90 create mode 100644 src/temp/MOD_TRACER.F90 create mode 100644 src/temp/MOD_WRITE_BINARY_ARRAYS.F90 create mode 100755 src/temp/gen_halo_exchange.F90 create mode 100644 src/temp/gen_modules_partitioning.F90 create mode 100644 src/temp/oce_adv_tra_driver.F90 create mode 100644 src/temp/oce_adv_tra_fct.F90 create mode 100644 src/temp/oce_adv_tra_hor.F90 create mode 100644 src/temp/oce_adv_tra_ver.F90 create mode 100755 src/temp/oce_modules.F90 diff --git a/src/cavity_param.F90 b/src/cavity_param.F90 index c0334fe2c..eb8591754 100644 --- a/src/cavity_param.F90 +++ b/src/cavity_param.F90 @@ -3,6 +3,7 @@ module cavity_heat_water_fluxes_3eq_interface subroutine cavity_heat_water_fluxes_3eq(tracers, partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use mod_tracer type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh @@ -20,6 +21,7 @@ subroutine cavity_heat_water_fluxes_3eq(tracers, partit, mesh) subroutine compute_nrst_pnt2cavline(partit, mesh) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use o_PARAM , only: WP implicit none @@ -138,6 +140,7 @@ end subroutine compute_nrst_pnt2cavline subroutine cavity_heat_water_fluxes_3eq(tracers, partit, mesh) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use MOD_TRACER use o_PARAM , only: density_0, WP use o_ARRAYS, only: heat_flux, water_flux, Unode, density_m_rho0,density_ref @@ -327,6 +330,7 @@ end subroutine cavity_heat_water_fluxes_3eq subroutine cavity_heat_water_fluxes_2eq(tracers, partit, mesh) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use MOD_TRACER use o_PARAM , only: WP use o_ARRAYS, only: heat_flux, water_flux @@ -381,6 +385,7 @@ end subroutine cavity_heat_water_fluxes_2eq subroutine cavity_momentum_fluxes(partit, mesh) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use o_PARAM , only: density_0, C_d, WP use o_ARRAYS, only: UV, Unode, stress_surf, stress_node_surf use i_ARRAYS, only: u_w, v_w @@ -430,6 +435,7 @@ end subroutine cavity_momentum_fluxes subroutine cavity_ice_clean_vel(partit, mesh) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use i_ARRAYS, only: U_ice, V_ice implicit none type(t_partit), intent(inout), target :: partit @@ -454,6 +460,7 @@ end subroutine cavity_ice_clean_vel subroutine cavity_ice_clean_ma(partit, mesh) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use i_ARRAYS, only: m_ice, m_snow, a_ice implicit none type(t_partit), intent(inout), target :: partit diff --git a/src/cpl_driver.F90 b/src/cpl_driver.F90 index 3eb6e6ec4..d52faf1f5 100755 --- a/src/cpl_driver.F90 +++ b/src/cpl_driver.F90 @@ -166,6 +166,7 @@ subroutine cpl_oasis3mct_define_unstr(partit, mesh) #endif use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use g_rotate_grid use mod_oasis, only: oasis_write_area, oasis_write_mask implicit none @@ -510,6 +511,7 @@ end subroutine cpl_oasis3mct_define_unstr subroutine cpl_oasis3mct_send(ind, data_array, action, partit) use o_param USE MOD_PARTIT + USE MOD_PARSUP implicit none save !--------------------------------------------------------------------- @@ -580,6 +582,7 @@ subroutine cpl_oasis3mct_recv(ind, data_array, action, partit) use o_param use g_comm_auto USE MOD_PARTIT + USE MOD_PARSUP implicit none save !--------------------------------------------------------------------- diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index f0b99e732..8ead2c9bc 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -10,6 +10,7 @@ program main USE MOD_MESH USE MOD_TRACER USE MOD_PARTIT +USE MOD_PARSUP USE o_ARRAYS USE o_PARAM USE i_PARAM diff --git a/src/gen_bulk_formulae.F90 b/src/gen_bulk_formulae.F90 index 2472ac322..12075a59e 100755 --- a/src/gen_bulk_formulae.F90 +++ b/src/gen_bulk_formulae.F90 @@ -1,7 +1,8 @@ MODULE gen_bulk ! Compute heat and momentum exchange coefficients use mod_mesh - USE MOD_PARTIT + USE MOD_PARTIT + USE MOD_PARSUP use i_therm_param use i_arrays use g_forcing_arrays diff --git a/src/gen_comm.F90 b/src/gen_comm.F90 index 0447d5ac6..82d9f0afa 100755 --- a/src/gen_comm.F90 +++ b/src/gen_comm.F90 @@ -8,6 +8,7 @@ subroutine communication_nodn(partit, mesh) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP implicit none type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit @@ -219,6 +220,7 @@ end subroutine communication_nodn subroutine communication_elemn(partit, mesh) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP implicit none type(t_mesh), intent(in), target :: mesh @@ -527,6 +529,7 @@ end subroutine communication_elemn subroutine mymesh(partit, mesh) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP implicit none type(t_mesh), intent(in), target :: mesh diff --git a/src/gen_events.F90 b/src/gen_events.F90 index ab037ef25..52d5decea 100644 --- a/src/gen_events.F90 +++ b/src/gen_events.F90 @@ -92,6 +92,7 @@ end subroutine step_event ! subroutine handle_err(errcode, partit) USE MOD_PARTIT + USE MOD_PARSUP implicit none #include "netcdf.inc" diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index 24737a40d..af1d2469d 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -3,6 +3,7 @@ module force_flux_consv_interface subroutine force_flux_consv(field2d, mask, n, h, do_stats, partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit real(kind=WP), intent (inout) :: field2d(partit%myDim_nod2D+partit%eDim_nod2D) @@ -17,6 +18,7 @@ module compute_residual_interface subroutine compute_residual(field2d, mask, n, partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit real(kind=WP), intent (in) :: field2d(partit%myDim_nod2D+partit%eDim_nod2D) @@ -30,6 +32,7 @@ module integrate_2D_interface subroutine integrate_2D(flux_global, flux_local, eff_vol, field2d, mask, partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit real(kind=WP), intent (out) :: flux_global(2), flux_local(2) @@ -45,6 +48,7 @@ module update_atm_forcing_interface subroutine update_atm_forcing(istep, tracers, partit,mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use mod_tracer integer, intent(in) :: istep type(t_tracer), intent(in), target :: tracers @@ -58,6 +62,7 @@ module net_rec_from_atm_interface interface subroutine net_rec_from_atm(action, partit) USE MOD_PARTIT + USE MOD_PARSUP logical, intent(in) :: action type(t_partit), intent(inout), target :: partit end subroutine @@ -69,6 +74,7 @@ subroutine update_atm_forcing(istep, tracers, partit, mesh) use o_PARAM use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use MOD_TRACER use o_arrays use i_arrays @@ -394,6 +400,7 @@ SUBROUTINE force_flux_consv(field2d, mask, n, h, do_stats, partit, mesh) flux_correction_total use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use cpl_driver, only : nrecv, cpl_recv, a2o_fcorr_stat use o_PARAM, only : mstep, WP use compute_residual_interface @@ -515,6 +522,7 @@ SUBROUTINE compute_residual(field2d, mask, n, partit, mesh) use o_PARAM, only : WP use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use integrate_2D_interface IMPLICIT NONE @@ -550,6 +558,7 @@ END SUBROUTINE compute_residual SUBROUTINE integrate_2D(flux_global, flux_local, eff_vol, field2d, mask, partit, mesh) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use o_PARAM, only: WP IMPLICIT NONE type(t_mesh), intent(in), target :: mesh @@ -615,6 +624,7 @@ SUBROUTINE net_rec_from_atm(action, partit) use cpl_driver use o_PARAM, only: WP USE MOD_PARTIT + USE MOD_PARSUP IMPLICIT NONE LOGICAL, INTENT (IN) :: action diff --git a/src/gen_forcing_init.F90 b/src/gen_forcing_init.F90 index 227123e4a..95dd108a7 100755 --- a/src/gen_forcing_init.F90 +++ b/src/gen_forcing_init.F90 @@ -3,6 +3,7 @@ module forcing_array_setup_interfaces subroutine forcing_array_setup(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -18,6 +19,7 @@ subroutine forcing_setup(partit, mesh) use g_sbf, only: sbc_ini use mod_mesh USE MOD_PARTIT +USE MOD_PARSUP use forcing_array_setup_interfaces implicit none type(t_mesh), intent(in), target :: mesh @@ -37,6 +39,7 @@ subroutine forcing_array_setup(partit, mesh) use o_param use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use i_arrays use g_forcing_arrays use g_forcing_param diff --git a/src/gen_halo_exchange.F90 b/src/gen_halo_exchange.F90 index 63dbb1116..cd74a7a60 100755 --- a/src/gen_halo_exchange.F90 +++ b/src/gen_halo_exchange.F90 @@ -28,6 +28,7 @@ module g_comm subroutine check_mpi_comm(rn, sn, r_mpitype, s_mpitype, rPE, sPE, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer, intent(in) :: sn, rn, r_mpitype(:), s_mpitype(:), rPE(:), sPE(:) @@ -57,6 +58,7 @@ END SUBROUTINE check_mpi_comm subroutine exchange_nod2D_i(nod_array2D, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer, intent(inout) :: nod_array2D(:) @@ -73,6 +75,7 @@ END SUBROUTINE exchange_nod2D_i subroutine exchange_nod2D_i_begin(nod_array2D, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer, intent(inout) :: nod_array2D(:) @@ -113,6 +116,7 @@ END SUBROUTINE exchange_nod2D_i_begin subroutine exchange_nod2D(nod_array2D, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: nod_array2D(:) @@ -131,6 +135,7 @@ END SUBROUTINE exchange_nod2D subroutine exchange_nod2D_begin(nod_array2D, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: nod_array2D(:) @@ -168,6 +173,7 @@ END SUBROUTINE exchange_nod2D_begin subroutine exchange_nod2D_2fields(nod1_array2D, nod2_array2D, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: nod1_array2D(:) @@ -188,6 +194,7 @@ END SUBROUTINE exchange_nod2D_2fields subroutine exchange_nod2D_2fields_begin(nod1_array2D, nod2_array2D, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: nod1_array2D(:) @@ -233,6 +240,7 @@ subroutine exchange_nod2D_3fields(nod1_array2D, nod2_array2D, nod3_array2D, part ! General version of the communication routine for 2D nodal fields use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: nod1_array2D(:) @@ -254,6 +262,7 @@ subroutine exchange_nod2D_3fields_begin(nod1_array2D, nod2_array2D, nod3_array2D ! General version of the communication routine for 2D nodal fields use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: nod1_array2D(:) @@ -307,6 +316,7 @@ END SUBROUTINE exchange_nod2D_3fields_begin subroutine exchange_nod3D(nod_array3D, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: nod_array3D(:,:) @@ -324,6 +334,7 @@ END SUBROUTINE exchange_nod3D subroutine exchange_nod3D_begin(nod_array3D, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: nod_array3D(:,:) @@ -370,6 +381,7 @@ END SUBROUTINE exchange_nod3D_begin subroutine exchange_nod3D_2fields(nod1_array3D,nod2_array3D, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: nod1_array3D(:,:) @@ -389,6 +401,7 @@ subroutine exchange_nod3D_2fields_begin(nod1_array3D,nod2_array3D, partit) ! stored in (vertical, horizontal) format use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: nod1_array3D(:,:) @@ -450,6 +463,7 @@ END SUBROUTINE exchange_nod3D_2fields_begin subroutine exchange_nod3D_n(nod_array3D, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: nod_array3D(:,:,:) @@ -466,6 +480,7 @@ END SUBROUTINE exchange_nod3D_n subroutine exchange_nod3D_n_begin(nod_array3D, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: nod_array3D(:,:,:) @@ -524,6 +539,7 @@ END SUBROUTINE exchange_nod3D_n_begin SUBROUTINE exchange_nod_end(partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit @@ -535,6 +551,7 @@ END SUBROUTINE exchange_nod_end SUBROUTINE exchange_elem_end(partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit #include "associate_part_def.h" @@ -554,6 +571,7 @@ END SUBROUTINE exchange_elem_end subroutine exchange_elem3D(elem_array3D, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: elem_array3D(:,:) @@ -570,6 +588,7 @@ END SUBROUTINE exchange_elem3D subroutine exchange_elem3D_begin(elem_array3D, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: elem_array3D(:,:) @@ -698,6 +717,7 @@ END SUBROUTINE exchange_elem3D_begin subroutine exchange_elem3D_n(elem_array3D, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: elem_array3D(:,:,:) @@ -715,6 +735,7 @@ subroutine exchange_elem3D_n_begin(elem_array3D, partit) ! stored in (vertical, horizontal) format use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: elem_array3D(:,:,:) @@ -801,6 +822,7 @@ END SUBROUTINE exchange_elem3D_n_begin subroutine exchange_elem2D(elem_array2D, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: elem_array2D(:) @@ -819,6 +841,7 @@ END SUBROUTINE exchange_elem2D subroutine exchange_elem2D_begin(elem_array2D, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit real(real64), intent(inout) :: elem_array2D(:) @@ -885,6 +908,7 @@ END SUBROUTINE exchange_elem2D_begin subroutine exchange_elem2D_i(elem_array2D, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer, intent(inout) :: elem_array2D(:) @@ -903,6 +927,7 @@ END SUBROUTINE exchange_elem2D_i subroutine exchange_elem2D_i_begin(elem_array2D, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer, intent(inout) :: elem_array2D(:) @@ -947,6 +972,7 @@ subroutine broadcast_nod3D(arr3D, arr3Dglobal, partit) ! Distribute the nodal information available on 0 PE to other PEs use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit INTEGER :: nz, counter,nl1 @@ -1013,6 +1039,7 @@ subroutine broadcast_nod2D(arr2D, arr2Dglobal, partit) ! A 2D version of the previous routine use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(in), target :: partit real(real64) :: arr2D(:) @@ -1063,6 +1090,7 @@ subroutine broadcast_elem3D(arr3D, arr3Dglobal, partit) ! Distribute the elemental information available on 0 PE to other PEs use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(in), target :: partit INTEGER :: nz, counter,nl1 @@ -1131,6 +1159,7 @@ subroutine broadcast_elem2D(arr2D, arr2Dglobal, partit) ! A 2D version of the previous routine use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(in), target :: partit integer :: i, n, nTS, sender, status(MPI_STATUS_SIZE) @@ -1181,6 +1210,7 @@ end subroutine broadcast_elem2D subroutine gather_nod3D(arr3D, arr3D_global, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit INTEGER :: nl1 @@ -1242,6 +1272,7 @@ subroutine gather_real4_nod3D(arr3D, arr3D_global, partit) ! Use only with 3D arrays stored in (vertical, horizontal) way use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit INTEGER :: nl1 @@ -1302,6 +1333,7 @@ subroutine gather_int2_nod3D(arr3D, arr3D_global, partit) ! Use only with 3D arrays stored in (vertical, horizontal) way use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit INTEGER :: nl1 @@ -1359,6 +1391,7 @@ subroutine gather_nod2D(arr2D, arr2D_global, partit) ! Make nodal information available to master PE use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer :: n @@ -1412,6 +1445,7 @@ subroutine gather_real4_nod2D(arr2D, arr2D_global, partit) ! Make nodal information available to master PE use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer :: n @@ -1466,6 +1500,7 @@ end subroutine gather_real4_nod2D subroutine gather_int2_nod2D(arr2D, arr2D_global, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer :: n @@ -1522,6 +1557,7 @@ subroutine gather_elem3D(arr3D, arr3D_global, partit) ! Use only with 3D arrays stored in (vertical, horizontal) way use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit INTEGER :: nl1 @@ -1585,6 +1621,7 @@ end subroutine gather_elem3D subroutine gather_real4_elem3D(arr3D, arr3D_global, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit INTEGER :: nl1 @@ -1649,6 +1686,7 @@ end subroutine gather_real4_elem3D subroutine gather_int2_elem3D(arr3D, arr3D_global, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit INTEGER :: nl1 @@ -1712,6 +1750,7 @@ end subroutine gather_int2_elem3D subroutine gather_elem2D(arr2D, arr2D_global, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer :: n @@ -1769,6 +1808,7 @@ end subroutine gather_elem2D subroutine gather_real4_elem2D(arr2D, arr2D_global, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer :: n @@ -1827,6 +1867,7 @@ end subroutine gather_real4_elem2D subroutine gather_int2_elem2D(arr2D, arr2D_global, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer :: n @@ -1886,6 +1927,7 @@ end subroutine gather_int2_elem2D subroutine gather_real8to4_nod3D(arr3D, arr3D_global, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit INTEGER :: nl1 @@ -1948,6 +1990,7 @@ end subroutine gather_real8to4_nod3D subroutine gather_real8to4_nod2D(arr2D, arr2D_global, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer :: n @@ -2002,6 +2045,7 @@ subroutine gather_real8to4_elem3D(arr3D, arr3D_global, partit) ! Use only with 3D arrays stored in (vertical, horizontal) way use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit INTEGER :: nl1 @@ -2061,6 +2105,7 @@ end subroutine gather_real8to4_elem3D subroutine gather_real8to4_elem2D(arr2D, arr2D_global, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer :: n @@ -2118,6 +2163,7 @@ subroutine gather_elem2D_i(arr2D, arr2D_global, partit) ! Make element information available to master PE use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer :: n @@ -2157,6 +2203,7 @@ end subroutine gather_elem2D_i subroutine gather_nod2D_i(arr2D, arr2D_global, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer :: n @@ -2210,6 +2257,7 @@ end subroutine gather_nod2D_i subroutine gather_edg2D(arr2D, arr2Dglobal, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(in), target :: partit real(real64) :: arr2D(:) @@ -2251,6 +2299,7 @@ end subroutine gather_edg2D subroutine gather_edg2D_i(arr2D, arr2Dglobal, partit) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_partit), intent(inout), target :: partit integer :: arr2D(:) diff --git a/src/gen_ic3d.F90 b/src/gen_ic3d.F90 index 3dc343132..2f283c9ed 100644 --- a/src/gen_ic3d.F90 +++ b/src/gen_ic3d.F90 @@ -14,6 +14,7 @@ MODULE g_ic3d USE o_ARRAYS USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE MOD_TRACER USE o_PARAM USE g_comm_auto diff --git a/src/gen_interpolation.F90 b/src/gen_interpolation.F90 index c43c78ab0..b4db77324 100755 --- a/src/gen_interpolation.F90 +++ b/src/gen_interpolation.F90 @@ -30,12 +30,13 @@ subroutine interp_2d_field_v2(num_lon_reg, num_lat_reg, lon_reg, lat_reg, data_r ! Reviewed by ?? !------------------------------------------------------------------------------------- USE MOD_PARTIT + USE MOD_PARSUP use o_PARAM, only: WP implicit none integer :: n, i, ii, jj, k, nod_find integer :: ind_lat_h, ind_lat_l, ind_lon_h, ind_lon_l integer, intent(in) :: num_lon_reg, num_lat_reg, num_mod - type(t_partit), intent(in) :: partit + type(t_partit), intent(inout) :: partit real(kind=WP) :: x, y, diff, d, dmin real(kind=WP) :: rt_lat1, rt_lat2, rt_lon1, rt_lon2 real(kind=WP) :: data(2,2) @@ -164,13 +165,14 @@ subroutine interp_2d_field(num_lon_reg, num_lat_reg, lon_reg, lat_reg, data_reg, ! Reviewed by ?? !------------------------------------------------------------------------------------- USE MOD_PARTIT + USE MOD_PARSUP use o_PARAM, only: WP implicit none integer :: n, i integer :: ind_lat_h, ind_lat_l, ind_lon_h, ind_lon_l integer, intent(in) :: num_lon_reg, num_lat_reg, num_mod integer, intent(in) :: phase_flag - type(t_partit), intent(in) :: partit + type(t_partit), intent(inout) :: partit real(kind=WP) :: x, y, diff real(kind=WP) :: rt_lat1, rt_lat2, rt_lon1, rt_lon2 real(kind=WP) :: data_ll, data_lh, data_hl, data_hh @@ -183,7 +185,7 @@ subroutine interp_2d_field(num_lon_reg, num_lat_reg, lon_reg, lat_reg, data_reg, if(lon_reg(1)<0.0_WP .or. lon_reg(num_lon_reg)>360._WP) then write(*,*) 'Error in 2D interpolation!' write(*,*) 'The regular grid is not in the proper longitude range.' - call par_ex + call par_ex(partit) stop end if @@ -314,6 +316,7 @@ subroutine interp_3d_field(num_lon_reg, num_lat_reg, num_lay_reg, & !------------------------------------------------------------------------------------- use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use o_param, only: WP implicit none integer :: n, i, flag,nz @@ -332,8 +335,8 @@ subroutine interp_3d_field(num_lon_reg, num_lat_reg, num_lay_reg, & real(kind=WP), intent(in) :: data_reg(num_lon_reg, num_lat_reg, num_lay_reg) real(kind=WP), intent(in) :: lon_mod(num_mod), lat_mod(num_mod), lay_mod(num_mod) real(kind=WP), intent(out) :: data_mod(num_mod_z,num_mod) - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" diff --git a/src/gen_model_setup.F90 b/src/gen_model_setup.F90 index d9300b275..1df49dd5b 100755 --- a/src/gen_model_setup.F90 +++ b/src/gen_model_setup.F90 @@ -1,6 +1,7 @@ ! ============================================================== subroutine setup_model(partit) USE MOD_PARTIT + USE MOD_PARSUP use o_param use i_param use i_therm_param @@ -116,6 +117,7 @@ subroutine get_run_steps(nsteps, partit) !-------------------------------------------------------------- use g_clock USE MOD_PARTIT + USE MOD_PARSUP implicit none type(t_partit), intent(in) :: partit diff --git a/src/gen_modules_clock.F90 b/src/gen_modules_clock.F90 index ce243d5be..8443e2633 100755 --- a/src/gen_modules_clock.F90 +++ b/src/gen_modules_clock.F90 @@ -67,6 +67,7 @@ end subroutine clock ! subroutine clock_init(partit) USE MOD_PARTIT + USE MOD_PARSUP use g_config implicit none type(t_partit), intent(in), target :: partit diff --git a/src/gen_modules_cvmix_idemix.F90 b/src/gen_modules_cvmix_idemix.F90 index 111223754..a7d4c1601 100644 --- a/src/gen_modules_cvmix_idemix.F90 +++ b/src/gen_modules_cvmix_idemix.F90 @@ -28,6 +28,7 @@ module g_cvmix_idemix use o_param use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use o_arrays use g_comm_auto use g_read_other_NetCDF diff --git a/src/gen_modules_cvmix_kpp.F90 b/src/gen_modules_cvmix_kpp.F90 index 88eb8ea61..caacc5105 100644 --- a/src/gen_modules_cvmix_kpp.F90 +++ b/src/gen_modules_cvmix_kpp.F90 @@ -24,6 +24,7 @@ module g_cvmix_kpp use o_param use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use mod_tracer use o_arrays use g_comm_auto diff --git a/src/gen_modules_cvmix_pp.F90 b/src/gen_modules_cvmix_pp.F90 index b81578c14..39dfa5673 100644 --- a/src/gen_modules_cvmix_pp.F90 +++ b/src/gen_modules_cvmix_pp.F90 @@ -26,6 +26,7 @@ module g_cvmix_pp use o_param use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use o_arrays use g_comm_auto use i_arrays diff --git a/src/gen_modules_cvmix_tidal.F90 b/src/gen_modules_cvmix_tidal.F90 index d6f18d180..8ee7937d8 100644 --- a/src/gen_modules_cvmix_tidal.F90 +++ b/src/gen_modules_cvmix_tidal.F90 @@ -16,6 +16,7 @@ module g_cvmix_tidal use o_param use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use o_arrays use g_comm_auto use g_read_other_NetCDF diff --git a/src/gen_modules_cvmix_tke.F90 b/src/gen_modules_cvmix_tke.F90 index cf58dbe0e..c286cf5f4 100644 --- a/src/gen_modules_cvmix_tke.F90 +++ b/src/gen_modules_cvmix_tke.F90 @@ -27,6 +27,7 @@ module g_cvmix_tke use o_param use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use o_arrays use g_comm_auto implicit none diff --git a/src/gen_modules_diag.F90 b/src/gen_modules_diag.F90 index 98d96955a..231345f2d 100755 --- a/src/gen_modules_diag.F90 +++ b/src/gen_modules_diag.F90 @@ -3,6 +3,7 @@ module diagnostics use g_config use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use mod_tracer use g_clock use g_comm_auto diff --git a/src/gen_modules_gpot.F90 b/src/gen_modules_gpot.F90 index 72fb2ffd2..d62414902 100644 --- a/src/gen_modules_gpot.F90 +++ b/src/gen_modules_gpot.F90 @@ -25,6 +25,7 @@ MODULE mo_tidal USE o_ARRAYS, only : ssh_gp USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE g_config, only : dt USE g_clock IMPLICIT NONE diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index a5c9e258e..495dc23b0 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -1,22 +1,28 @@ -module par_support_interfaces +module mod_parsup interface - subroutine par_init(partit) - USE o_PARAM + subroutine par_ex(partit, abort) USE MOD_PARTIT implicit none type(t_partit), intent(inout), target :: partit + integer,optional :: abort end subroutine + end interface +end module mod_parsup - subroutine par_ex(partit, abort) +module par_support_interfaces + interface + subroutine par_init(partit) + USE o_PARAM USE MOD_PARTIT + USE MOD_PARSUP implicit none type(t_partit), intent(inout), target :: partit - integer,optional :: abort end subroutine subroutine init_mpi_types(partit, mesh) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP implicit none type(t_partit), intent(in), target :: partit type(t_mesh), intent(in), target :: mesh @@ -24,26 +30,17 @@ subroutine init_mpi_types(partit, mesh) subroutine init_gatherLists(partit) USE MOD_PARTIT + USE MOD_PARSUP implicit none type(t_partit), intent(inout), target :: partit end subroutine end interface end module -module mod_parsup - interface - subroutine par_ex(partit, abort) - USE MOD_PARTIT - implicit none - type(t_partit), intent(inout), target :: partit - integer,optional :: abort - end subroutine - end interface -end module mod_parsup - subroutine par_init(partit) ! initializes MPI USE o_PARAM USE MOD_PARTIT + USE MOD_PARSUP implicit none type(t_partit), intent(inout), target :: partit integer :: i @@ -80,6 +77,7 @@ end subroutine par_init !================================================================= subroutine par_ex(partit, abort) ! finalizes MPI USE MOD_PARTIT +USE MOD_PARSUP #ifndef __oifs !For standalone and coupled ECHAM runs #if defined (__oasis) @@ -129,6 +127,7 @@ end subroutine par_ex subroutine init_mpi_types(partit, mesh) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP implicit none type(t_partit), intent(inout), target :: partit @@ -452,6 +451,7 @@ end subroutine init_mpi_types !=================================================================== subroutine init_gatherLists(partit) USE MOD_PARTIT + USE MOD_PARSUP implicit none type(t_partit), intent(inout), target :: partit integer :: n2D, e2D, sum_loc_elem2D @@ -509,9 +509,10 @@ end subroutine init_gatherLists subroutine status_check(partit) use g_config USE MOD_PARTIT +USE MOD_PARSUP implicit none -type(t_partit), intent(in), target :: partit -integer :: res +type(t_partit), intent(inout), target :: partit +integer :: res res=0 call MPI_Allreduce (partit%pe_status, res, 1, MPI_INTEGER, MPI_SUM, partit%MPI_COMM_FESOM, partit%MPIerr) if (res /= 0 ) then diff --git a/src/gen_modules_read_NetCDF.F90 b/src/gen_modules_read_NetCDF.F90 index 7faf8ec59..40491f84a 100755 --- a/src/gen_modules_read_NetCDF.F90 +++ b/src/gen_modules_read_NetCDF.F90 @@ -17,6 +17,7 @@ subroutine read_other_NetCDF(file, vari, itime, model_2Darray, check_dummy, part use o_param USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP implicit none #include "netcdf.inc" @@ -52,7 +53,7 @@ subroutine read_other_NetCDF(file, vari, itime, model_2Darray, check_dummy, part if (status.ne.nf_noerr)then print*,'ERROR: CANNOT READ runoff FILE CORRECTLY !!!!!' print*,'Error in opening netcdf file'//file - call par_ex + call par_ex(partit) stop endif @@ -166,6 +167,7 @@ subroutine read_surf_hydrography_NetCDF(file, vari, itime, model_2Darray, partit use o_param USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use g_rotate_grid implicit none #include "netcdf.inc" @@ -200,7 +202,7 @@ subroutine read_surf_hydrography_NetCDF(file, vari, itime, model_2Darray, partit if (status.ne.nf_noerr)then print*,'ERROR: CANNOT READ runoff FILE CORRECTLY !!!!!' print*,'Error in opening netcdf file'//file - call par_ex + call par_ex(partit) stop endif @@ -287,6 +289,7 @@ subroutine read_2ddata_on_grid_NetCDF(file, vari, itime, model_2Darray, partit, use o_param USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use g_rotate_grid implicit none @@ -316,7 +319,7 @@ subroutine read_2ddata_on_grid_NetCDF(file, vari, itime, model_2Darray, partit, if (status.ne.nf_noerr)then print*,'ERROR: CANNOT READ runoff FILE CORRECTLY !!!!!' print*,'Error in opening netcdf file'//file - call par_ex + call par_ex(partit) stop endif diff --git a/src/gen_support.F90 b/src/gen_support.F90 index 6a2760329..9c0f11681 100644 --- a/src/gen_support.F90 +++ b/src/gen_support.F90 @@ -4,6 +4,7 @@ module g_support USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use g_comm_auto use o_ARRAYS use g_config, only: dummy @@ -280,6 +281,7 @@ end subroutine smooth_elem3D ! subroutine integrate_nod_2D(data, int2D, partit, mesh) USE MOD_PARTIT + USE MOD_PARSUP use g_comm_auto IMPLICIT NONE @@ -309,6 +311,7 @@ end subroutine integrate_nod_2D ! subroutine integrate_nod_3D(data, int3D, partit, mesh) USE MOD_PARTIT + USE MOD_PARSUP use g_comm_auto IMPLICIT NONE diff --git a/src/gen_surface_forcing.F90 b/src/gen_surface_forcing.F90 index c53908e70..fdcdcc1db 100644 --- a/src/gen_surface_forcing.F90 +++ b/src/gen_surface_forcing.F90 @@ -35,6 +35,7 @@ MODULE g_sbf !! USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE o_ARRAYS USE o_PARAM USE g_comm_auto diff --git a/src/ice_EVP.F90 b/src/ice_EVP.F90 index a0c04c597..690118d7d 100755 --- a/src/ice_EVP.F90 +++ b/src/ice_EVP.F90 @@ -3,6 +3,7 @@ module ice_EVP_interfaces subroutine stress_tensor(ice_strength, partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit real(kind=WP), intent(in) :: ice_strength(partit%mydim_elem2D) @@ -11,6 +12,7 @@ subroutine stress_tensor(ice_strength, partit, mesh) subroutine stress2rhs(inv_areamass, ice_strength, partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit REAL(kind=WP), intent(in) :: inv_areamass(partit%myDim_nod2D), ice_strength(partit%mydim_elem2D) @@ -32,6 +34,7 @@ subroutine stress_tensor(ice_strength, partit, mesh) USE g_CONFIG USE MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP #if defined (__icepack) use icedrv_main, only: rdg_conv_elem, rdg_shear_elem, strength @@ -146,6 +149,7 @@ subroutine stress_tensor_no1(ice_strength, partit, mesh) USE g_CONFIG USE MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP implicit none type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit @@ -253,6 +257,7 @@ subroutine stress2rhs_e(partit, mesh) use g_config, only: use_cavity USE MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_mesh), intent(in), target :: mesh @@ -335,6 +340,7 @@ subroutine stress2rhs(inv_areamass, ice_strength, partit, mesh) USE i_arrays USE MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_mesh), intent(in), target :: mesh @@ -413,6 +419,7 @@ subroutine EVPdynamics(partit, mesh) use ice_EVP_interfaces USE MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP #if defined (__icepack) use icedrv_main, only: rdg_conv_elem, rdg_shear_elem, strength diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index f26b11894..e6b1acd60 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -3,6 +3,7 @@ module ice_fct_interfaces subroutine ice_mass_matrix_fill(partit, mesh) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh end subroutine @@ -10,6 +11,7 @@ subroutine ice_mass_matrix_fill(partit, mesh) subroutine ice_solve_high_order(partit, mesh) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh end subroutine @@ -17,6 +19,7 @@ subroutine ice_solve_high_order(partit, mesh) subroutine ice_solve_low_order(partit, mesh) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh end subroutine @@ -24,6 +27,7 @@ subroutine ice_solve_low_order(partit, mesh) subroutine ice_fem_fct(tr_array_id, partit, mesh) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP integer :: tr_array_id type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh @@ -47,6 +51,7 @@ subroutine ice_fem_fct(tr_array_id, partit, mesh) subroutine ice_TG_rhs(partit, mesh) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use i_Arrays use i_PARAM use o_PARAM @@ -117,6 +122,7 @@ subroutine ice_fct_init(partit, mesh) use o_PARAM use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use i_ARRAYS use ice_fct_interfaces implicit none @@ -167,6 +173,7 @@ end subroutine ice_fct_init subroutine ice_fct_solve(partit, mesh) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use ice_fct_interfaces implicit none type(t_partit), intent(inout), target :: partit @@ -202,6 +209,7 @@ subroutine ice_solve_low_order(partit, mesh) ! mass matrix on the lhs is replaced with the lumped one. use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use MOD_TRACER use i_ARRAYS use i_PARAM @@ -260,6 +268,7 @@ end subroutine ice_solve_low_order subroutine ice_solve_high_order(partit, mesh) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use MOD_TRACER use i_ARRAYS use o_PARAM @@ -352,6 +361,7 @@ subroutine ice_fem_fct(tr_array_id, partit, mesh) ! use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use MOD_TRACER use i_arrays use i_param @@ -662,6 +672,7 @@ SUBROUTINE ice_mass_matrix_fill(partit, mesh) ! Used in ice_fct inherited from FESOM use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use MOD_TRACER use i_PARAM use i_ARRAYS @@ -744,6 +755,7 @@ END SUBROUTINE ice_mass_matrix_fill subroutine ice_TG_rhs_div(partit, mesh) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use i_Arrays use i_PARAM use o_PARAM @@ -839,6 +851,7 @@ end subroutine ice_TG_rhs_div subroutine ice_update_for_div(partit, mesh) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use MOD_TRACER use i_Arrays use i_PARAM diff --git a/src/ice_maEVP.F90 b/src/ice_maEVP.F90 index ec1fe0f41..749bb2b31 100644 --- a/src/ice_maEVP.F90 +++ b/src/ice_maEVP.F90 @@ -3,6 +3,7 @@ module ice_maEVP_interfaces subroutine ssh2rhs(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -10,6 +11,7 @@ subroutine ssh2rhs(partit, mesh) subroutine stress_tensor_a(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -17,6 +19,7 @@ subroutine stress_tensor_a(partit, mesh) subroutine stress2rhs_m(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -24,6 +27,7 @@ subroutine stress2rhs_m(partit, mesh) subroutine find_alpha_field_a(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -31,6 +35,7 @@ subroutine find_alpha_field_a(partit, mesh) subroutine find_beta_field_a(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -50,6 +55,7 @@ subroutine stress_tensor_m(partit, mesh) use i_param use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use g_config use i_arrays @@ -148,6 +154,7 @@ subroutine ssh2rhs(partit, mesh) use i_param use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use g_config use i_arrays use i_therm_param @@ -231,6 +238,7 @@ subroutine stress2rhs_m(partit, mesh) use i_therm_param use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use g_config use i_arrays implicit none @@ -304,6 +312,7 @@ subroutine EVPdynamics_m(partit, mesh) use i_therm_param use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use g_config use i_arrays use o_arrays @@ -645,6 +654,7 @@ subroutine find_alpha_field_a(partit, mesh) use i_therm_param use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use g_config use i_arrays @@ -723,6 +733,7 @@ subroutine stress_tensor_a(partit, mesh) use i_param use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use g_config use i_arrays @@ -824,6 +835,7 @@ subroutine EVPdynamics_a(partit, mesh) use o_param use mod_mesh USE MOD_PARTIT +USE MOD_PARSUP use i_arrays USE o_arrays use i_param @@ -932,6 +944,7 @@ subroutine find_beta_field_a(partit, mesh) use mod_mesh USE MOD_PARTIT +USE MOD_PARSUP use o_param USE i_param use i_arrays diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index 7fa784e9b..30dedc505 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -3,6 +3,7 @@ module ocean2ice_interface subroutine ocean2ice(tracers, partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use mod_tracer type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh @@ -16,6 +17,7 @@ module oce_fluxes_interface subroutine oce_fluxes(tracers, partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use mod_tracer type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh @@ -34,6 +36,7 @@ subroutine oce_fluxes_mom(partit, mesh) use o_ARRAYS use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use i_ARRAYS use i_PARAM USE g_CONFIG @@ -118,6 +121,7 @@ subroutine ocean2ice(tracers, partit, mesh) use MOD_MESH use MOD_TRACER USE MOD_PARTIT + USE MOD_PARSUP USE g_CONFIG use g_comm_auto implicit none @@ -195,6 +199,7 @@ subroutine oce_fluxes(tracers, partit, mesh) use MOD_MESH use MOD_TRACER USE MOD_PARTIT + USE MOD_PARSUP USE g_CONFIG use o_ARRAYS use i_ARRAYS diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index d57810586..e6bc76fc4 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -3,6 +3,7 @@ module ice_array_setup_interface subroutine ice_array_setup(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use mod_tracer type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh @@ -15,6 +16,7 @@ module ice_initial_state_interface subroutine ice_initial_state(tracers, partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use mod_tracer type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh @@ -27,6 +29,7 @@ module ice_setup_interface subroutine ice_setup(tracers, partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use mod_tracer type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh @@ -44,6 +47,7 @@ subroutine ice_setup(tracers, partit, mesh) use g_CONFIG use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use mod_tracer use ice_array_setup_interface use ice_initial_state_interface @@ -82,6 +86,7 @@ subroutine ice_array_setup(partit, mesh) use i_param use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP use i_arrays USE g_CONFIG @@ -193,6 +198,7 @@ end subroutine ice_array_setup subroutine ice_timestep(step, partit, mesh) use mod_mesh USE MOD_PARTIT +USE MOD_PARSUP use i_arrays use o_param use g_CONFIG @@ -316,6 +322,7 @@ subroutine ice_initial_state(tracers, partit, mesh) use i_ARRAYs use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use MOD_TRACER use o_PARAM use o_arrays diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index f15b8d1de..5a4b82043 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -4,6 +4,7 @@ subroutine cut_off(partit, mesh) use i_arrays use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use g_config, only: use_cavity implicit none type(t_mesh), intent(in), target :: mesh @@ -88,6 +89,7 @@ subroutine thermodynamics(partit, mesh) use o_param use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use i_therm_param use i_param use i_arrays diff --git a/src/io_blowup.F90 b/src/io_blowup.F90 index 0d64878c1..1960ad72c 100644 --- a/src/io_blowup.F90 +++ b/src/io_blowup.F90 @@ -4,6 +4,7 @@ MODULE io_BLOWUP use g_comm_auto USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE MOD_TRACER use o_arrays use i_arrays diff --git a/src/io_gather.F90 b/src/io_gather.F90 index 114a4f840..155ddf9fe 100644 --- a/src/io_gather.F90 +++ b/src/io_gather.F90 @@ -1,5 +1,6 @@ module io_gather_module USE MOD_PARTIT + USE MOD_PARSUP implicit none public init_io_gather, gather_nod2D, gather_real4_nod2D, gather_elem2D, gather_real4_elem2D private @@ -17,6 +18,7 @@ module io_gather_module subroutine init_io_gather(partit) USE MOD_PARTIT + USE MOD_PARSUP implicit none type(t_partit), intent(inout), target :: partit integer err @@ -61,6 +63,7 @@ subroutine init_nod2D_lists(partit) subroutine init_elem2D_lists(partit) USE MOD_PARTIT + USE MOD_PARSUP implicit none type(t_partit), intent(inout), target :: partit #include "associate_part_def.h" @@ -97,6 +100,7 @@ subroutine init_elem2D_lists(partit) ! thread-safe procedure subroutine gather_nod2D(arr2D, arr2D_global, root_rank, tag, io_comm, partit) USE MOD_PARTIT + USE MOD_PARSUP use, intrinsic :: iso_fortran_env, only: real64 implicit none type(t_partit), intent(inout), target :: partit @@ -124,6 +128,7 @@ subroutine gather_nod2D(arr2D, arr2D_global, root_rank, tag, io_comm, partit) ! thread-safe procedure subroutine gather_real4_nod2D(arr2D, arr2D_global, root_rank, tag, io_comm, partit) USE MOD_PARTIT + USE MOD_PARSUP use, intrinsic :: iso_fortran_env, only: real32 implicit none type(t_partit), intent(inout), target :: partit @@ -151,6 +156,7 @@ subroutine gather_real4_nod2D(arr2D, arr2D_global, root_rank, tag, io_comm, part ! thread-safe procedure subroutine gather_elem2D(arr2D, arr2D_global, root_rank, tag, io_comm, partit) USE MOD_PARTIT + USE MOD_PARSUP use, intrinsic :: iso_fortran_env, only: real64 implicit none type(t_partit), intent(inout), target :: partit @@ -178,6 +184,7 @@ subroutine gather_elem2D(arr2D, arr2D_global, root_rank, tag, io_comm, partit) ! thread-safe procedure subroutine gather_real4_elem2D(arr2D, arr2D_global, root_rank, tag, io_comm, partit) USE MOD_PARTIT + USE MOD_PARSUP use, intrinsic :: iso_fortran_env, only: real32 implicit none type(t_partit), intent(inout), target :: partit diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index d0c237167..9c1443377 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -1,5 +1,6 @@ module io_MEANDATA USE MOD_PARTIT + USE MOD_PARSUP use o_PARAM, only : WP use, intrinsic :: iso_fortran_env, only: real64, real32 use io_data_strategy_module @@ -96,6 +97,7 @@ subroutine ini_mean_io(tracers, partit, mesh) use MOD_MESH use MOD_TRACER USE MOD_PARTIT + USE MOD_PARSUP use g_cvmix_tke use g_cvmix_idemix use g_cvmix_kpp @@ -545,6 +547,7 @@ subroutine ini_mean_io(tracers, partit, mesh) function mesh_dimname_from_dimsize(size, partit, mesh) result(name) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use diagnostics #if defined (__icepack) use icedrv_main, only: ncat ! number of ice thickness cathegories @@ -581,6 +584,7 @@ subroutine create_new_file(entry, partit, mesh) use g_clock use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use fesom_version_info_module use g_config use i_PARAM @@ -805,6 +809,7 @@ subroutine output(istep, tracers, partit, mesh) use g_clock use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use mod_tracer use io_gather_module #if defined (__icepack) @@ -916,6 +921,7 @@ subroutine output(istep, tracers, partit, mesh) subroutine do_output_callback(entry_index) use mod_mesh USE MOD_PARTIT +USE MOD_PARSUP integer, intent(in) :: entry_index ! EO args type(Meandata), pointer :: entry @@ -947,6 +953,7 @@ subroutine finalize_output() subroutine def_stream3D(glsize, lcsize, name, description, units, data, freq, freq_unit, accuracy, partit, mesh, flip_array) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP implicit none type(t_partit), intent(inout), target :: partit integer, intent(in) :: glsize(2), lcsize(2) @@ -1012,6 +1019,7 @@ subroutine def_stream3D(glsize, lcsize, name, description, units, data, freq, fr subroutine def_stream2D(glsize, lcsize, name, description, units, data, freq, freq_unit, accuracy, partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP implicit none integer, intent(in) :: glsize, lcsize character(len=*), intent(in) :: name, description, units @@ -1089,6 +1097,7 @@ subroutine associate_new_stream(name, entry) subroutine def_stream_after_dimension_specific(entry, name, description, units, freq, freq_unit, accuracy, partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use io_netcdf_workaround_module type(Meandata), intent(inout) :: entry character(len=*), intent(in) :: name, description, units diff --git a/src/io_mesh_info.F90 b/src/io_mesh_info.F90 index a682e753e..4b80bf9d0 100644 --- a/src/io_mesh_info.F90 +++ b/src/io_mesh_info.F90 @@ -1,6 +1,7 @@ module io_mesh_info USE MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP use g_config use g_comm_auto use o_PARAM diff --git a/src/io_netcdf_workaround_module.F90 b/src/io_netcdf_workaround_module.F90 index 562db612e..17271a9cb 100644 --- a/src/io_netcdf_workaround_module.F90 +++ b/src/io_netcdf_workaround_module.F90 @@ -8,6 +8,7 @@ module io_netcdf_workaround_module integer function next_io_rank(communicator, async_netcdf_allowed, partit) result(result) USE MOD_PARTIT + USE MOD_PARSUP use mpi_topology_module integer, intent(in) :: communicator logical, intent(out) :: async_netcdf_allowed diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 48bbc50e6..cab75b985 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -4,6 +4,7 @@ MODULE io_RESTART use g_comm_auto use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use mod_tracer use o_arrays use i_arrays diff --git a/src/oce_adv_tra_driver.F90 b/src/oce_adv_tra_driver.F90 index f996280fe..98e7a4fb6 100644 --- a/src/oce_adv_tra_driver.F90 +++ b/src/oce_adv_tra_driver.F90 @@ -4,6 +4,7 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, partit, mesh) use MOD_MESH use MOD_TRACER USE MOD_PARTIT + USE MOD_PARSUP real(kind=WP), intent(in), target :: dt integer, intent(in) :: tr_num type(t_partit), intent(inout), target :: partit @@ -23,6 +24,7 @@ subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, partit, !update the solution for vertical and horizontal flux contributions use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP real(kind=WP), intent(in), target :: dt type(t_partit),intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh @@ -43,6 +45,7 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, partit, mesh) use MOD_MESH use MOD_TRACER USE MOD_PARTIT + USE MOD_PARSUP use g_comm_auto use oce_adv_tra_hor_interfaces use oce_adv_tra_ver_interfaces @@ -209,6 +212,7 @@ subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, partit, use MOD_MESH use o_ARRAYS USE MOD_PARTIT + USE MOD_PARSUP use g_comm_auto implicit none real(kind=WP), intent(in), target :: dt diff --git a/src/oce_adv_tra_fct.F90 b/src/oce_adv_tra_fct.F90 index 0ff845918..d76c3aebc 100644 --- a/src/oce_adv_tra_fct.F90 +++ b/src/oce_adv_tra_fct.F90 @@ -4,6 +4,7 @@ subroutine oce_adv_tra_fct_init(twork, partit, mesh) use MOD_MESH use MOD_TRACER USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit),intent(inout), target :: partit type(t_tracer_work), intent(inout), target :: twork @@ -12,6 +13,7 @@ subroutine oce_adv_tra_fct_init(twork, partit, mesh) subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_plus, fct_minus, AUX, partit, mesh) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP real(kind=WP), intent(in), target :: dt type(t_partit),intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh @@ -34,6 +36,7 @@ subroutine oce_adv_tra_fct_init(twork, partit, mesh) use MOD_MESH use MOD_TRACER USE MOD_PARTIT + USE MOD_PARSUP implicit none integer :: my_size type(t_mesh), intent(in) , target :: mesh @@ -76,6 +79,7 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, use MOD_MESH use MOD_TRACER USE MOD_PARTIT + USE MOD_PARSUP use g_comm_auto implicit none real(kind=WP), intent(in), target :: dt diff --git a/src/oce_adv_tra_hor.F90 b/src/oce_adv_tra_hor.F90 index 6d38992b4..441372ba8 100644 --- a/src/oce_adv_tra_hor.F90 +++ b/src/oce_adv_tra_hor.F90 @@ -12,6 +12,7 @@ subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, init_zero) use MOD_MESH use MOD_TRACER USE MOD_PARTIT + USE MOD_PARSUP type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) @@ -29,6 +30,7 @@ subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, init_zero) subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, nboundary_lay, init_zero) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution @@ -44,6 +46,7 @@ subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_g subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, init_zero) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution @@ -61,6 +64,7 @@ subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_gr subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, init_zero) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use g_comm_auto implicit none type(t_partit),intent(in), target :: partit @@ -218,6 +222,7 @@ subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_g use MOD_MESH use MOD_TRACER USE MOD_PARTIT + USE MOD_PARSUP use g_comm_auto implicit none type(t_partit),intent(in), target :: partit @@ -490,6 +495,7 @@ subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_gr use MOD_MESH use MOD_TRACER USE MOD_PARTIT + USE MOD_PARSUP use g_comm_auto implicit none type(t_partit),intent(in), target :: partit diff --git a/src/oce_adv_tra_ver.F90 b/src/oce_adv_tra_ver.F90 index b4985e9c6..84ee55173 100644 --- a/src/oce_adv_tra_ver.F90 +++ b/src/oce_adv_tra_ver.F90 @@ -5,6 +5,7 @@ module oce_adv_tra_ver_interfaces subroutine adv_tra_vert_impl(dt, w, ttf, partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP real(kind=WP), intent(in), target :: dt type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh @@ -20,6 +21,7 @@ subroutine adv_tra_vert_impl(dt, w, ttf, partit, mesh) subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, init_zero) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) @@ -36,6 +38,7 @@ subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, init_zero) subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, init_zero) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution @@ -53,6 +56,7 @@ subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, init_zero) subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, init_zero) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP real(kind=WP), intent(in), target :: dt type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh @@ -71,6 +75,7 @@ subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, init_zero) subroutine adv_tra_ver_cdiff(w, ttf, partit, mesh, flux, init_zero) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh integer :: n, nz, nl1 @@ -87,6 +92,7 @@ subroutine adv_tra_vert_impl(dt, w, ttf, partit, mesh) use MOD_MESH use MOD_TRACER USE MOD_PARTIT + USE MOD_PARSUP use g_comm_auto implicit none @@ -235,6 +241,7 @@ subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, init_zero) use MOD_MESH use MOD_TRACER USE MOD_PARTIT + USE MOD_PARSUP use g_comm_auto implicit none @@ -293,6 +300,7 @@ subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, init_zero) use o_ARRAYS use o_PARAM USE MOD_PARTIT + USE MOD_PARSUP implicit none type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh @@ -368,6 +376,7 @@ subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, init_zero) use MOD_MESH use MOD_TRACER USE MOD_PARTIT + USE MOD_PARSUP use g_comm_auto implicit none real(kind=WP), intent(in), target :: dt @@ -551,6 +560,7 @@ subroutine adv_tra_ver_cdiff(w, ttf, partit, mesh, flux, init_zero) use MOD_MESH use MOD_TRACER USE MOD_PARTIT + USE MOD_PARSUP use g_comm_auto implicit none type(t_partit),intent(in), target :: partit diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index e8d306042..271ebe324 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -3,6 +3,7 @@ module oce_ale_interfaces subroutine init_bottom_elem_thickness(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -10,6 +11,7 @@ subroutine init_bottom_elem_thickness(partit, mesh) subroutine init_bottom_node_thickness(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -17,6 +19,7 @@ subroutine init_bottom_node_thickness(partit, mesh) subroutine init_surface_elem_depth(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -24,6 +27,7 @@ subroutine init_surface_elem_depth(partit, mesh) subroutine init_surface_node_depth(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -31,6 +35,7 @@ subroutine init_surface_node_depth(partit, mesh) subroutine impl_vert_visc_ale(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -38,6 +43,7 @@ subroutine impl_vert_visc_ale(partit, mesh) subroutine update_stiff_mat_ale(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -45,6 +51,7 @@ subroutine update_stiff_mat_ale(partit, mesh) subroutine compute_ssh_rhs_ale(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -52,6 +59,7 @@ subroutine compute_ssh_rhs_ale(partit, mesh) subroutine solve_ssh_ale(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -59,6 +67,7 @@ subroutine solve_ssh_ale(partit, mesh) subroutine compute_hbar_ale(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -66,6 +75,7 @@ subroutine compute_hbar_ale(partit, mesh) subroutine vert_vel_ale(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -73,6 +83,7 @@ subroutine vert_vel_ale(partit, mesh) subroutine update_thickness_ale(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -84,6 +95,7 @@ module oce_timestep_ale_interface subroutine oce_timestep_ale(n, tracers, partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use mod_tracer integer, intent(in) :: n type(t_mesh), intent(in), target :: mesh @@ -118,6 +130,7 @@ subroutine init_ale(partit, mesh) USE o_PARAM USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE o_ARRAYS USE g_config, only: which_ale, use_cavity, use_partial_cell USE g_forcing_param, only: use_virt_salt @@ -256,6 +269,7 @@ subroutine init_bottom_elem_thickness(partit, mesh) use o_PARAM use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use o_ARRAYS use g_config,only: use_partial_cell, partial_cell_thresh use g_comm_auto @@ -378,6 +392,7 @@ subroutine init_bottom_node_thickness(partit, mesh) use o_PARAM use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use o_ARRAYS use g_config,only: use_partial_cell use g_comm_auto @@ -488,6 +503,7 @@ subroutine init_surface_elem_depth(partit, mesh) use o_PARAM use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use o_ARRAYS use g_config,only: use_cavity, use_cavity_partial_cell, cavity_partial_cell_thresh use g_comm_auto @@ -565,6 +581,7 @@ subroutine init_surface_node_depth(partit, mesh) use o_PARAM use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use o_ARRAYS use g_config,only: use_cavity, use_cavity_partial_cell use g_comm_auto @@ -630,6 +647,7 @@ subroutine init_thickness_ale(partit, mesh) use o_PARAM use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use o_ARRAYS implicit none integer :: n, nz, elem, elnodes(3), nzmin, nzmax @@ -878,6 +896,7 @@ subroutine update_thickness_ale(partit, mesh) use o_PARAM use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use o_ARRAYS use g_config,only: which_ale,lzstar_lev,min_hnode implicit none @@ -1079,6 +1098,7 @@ subroutine restart_thickness_ale(partit, mesh) use o_PARAM use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use o_ARRAYS use g_config,only: which_ale,lzstar_lev,min_hnode implicit none @@ -1184,6 +1204,7 @@ subroutine init_stiff_mat_ale(partit, mesh) use o_PARAM use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use g_CONFIG implicit none @@ -1472,6 +1493,7 @@ subroutine update_stiff_mat_ale(partit, mesh) use MOD_MESH use MOD_TRACER USE MOD_PARTIT + USE MOD_PARSUP use o_ARRAYS ! implicit none @@ -1583,6 +1605,7 @@ subroutine compute_ssh_rhs_ale(partit, mesh) use o_ARRAYS use o_PARAM USE MOD_PARTIT + USE MOD_PARSUP use g_comm_auto implicit none @@ -1695,6 +1718,7 @@ subroutine compute_hbar_ale(partit, mesh) use o_ARRAYS use o_PARAM USE MOD_PARTIT + USE MOD_PARSUP use g_comm_auto implicit none @@ -1806,6 +1830,7 @@ subroutine vert_vel_ale(partit, mesh) use o_ARRAYS use o_PARAM USE MOD_PARTIT + USE MOD_PARSUP use g_comm_auto use io_RESTART !!PS use i_arrays !!PS @@ -2326,6 +2351,7 @@ subroutine solve_ssh_ale(partit, mesh) use MOD_MESH use o_ARRAYS USE MOD_PARTIT +USE MOD_PARSUP use g_comm_auto use g_config, only: which_ale ! @@ -2474,6 +2500,7 @@ subroutine impl_vert_visc_ale(partit, mesh) USE o_PARAM USE o_ARRAYS USE MOD_PARTIT +USE MOD_PARSUP USE g_CONFIG,only: dt IMPLICIT NONE @@ -2654,6 +2681,7 @@ subroutine oce_timestep_ale(n, tracers, partit, mesh) use o_ARRAYS use o_PARAM USE MOD_PARTIT + USE MOD_PARSUP use g_comm_auto use io_RESTART !PS use i_ARRAYS !PS diff --git a/src/oce_ale_mixing_kpp.F90 b/src/oce_ale_mixing_kpp.F90 index 93784e168..5c62871e1 100755 --- a/src/oce_ale_mixing_kpp.F90 +++ b/src/oce_ale_mixing_kpp.F90 @@ -9,6 +9,7 @@ MODULE o_mixing_KPP_mod USE o_PARAM USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE MOD_TRACER USE o_ARRAYS USE g_config diff --git a/src/oce_ale_mixing_pp.F90 b/src/oce_ale_mixing_pp.F90 index baed158c7..b4c7958d2 100644 --- a/src/oce_ale_mixing_pp.F90 +++ b/src/oce_ale_mixing_pp.F90 @@ -17,6 +17,7 @@ subroutine oce_mixing_pp(partit, mesh) ! USE MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP USE o_PARAM USE o_ARRAYS USE g_config diff --git a/src/oce_ale_pressure_bv.F90 b/src/oce_ale_pressure_bv.F90 index 9b5671a07..04e5a8654 100644 --- a/src/oce_ale_pressure_bv.F90 +++ b/src/oce_ale_pressure_bv.F90 @@ -3,6 +3,7 @@ module densityJM_components_interface subroutine densityJM_components(t, s, bulk_0, bulk_pz, bulk_pz2, rhopot, partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit real(kind=WP), intent(IN) :: t,s @@ -16,6 +17,7 @@ module density_linear_interface subroutine density_linear(t, s, bulk_0, bulk_pz, bulk_pz2, rho_out, partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit real(kind=WP), intent(IN) :: t,s @@ -29,6 +31,7 @@ module pressure_force_4_linfs_fullcell_interface subroutine pressure_force_4_linfs_fullcell(partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -39,6 +42,7 @@ module pressure_force_4_linfs_nemo_interface subroutine pressure_force_4_linfs_nemo(tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE MOD_TRACER type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit @@ -51,6 +55,7 @@ module pressure_force_4_linfs_shchepetkin_interface subroutine pressure_force_4_linfs_shchepetkin(partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -61,6 +66,7 @@ module pressure_force_4_linfs_easypgf_interface subroutine pressure_force_4_linfs_easypgf(tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE MOD_TRACER type(t_tracer), intent(in), target :: tracers type(t_partit), intent(inout), target :: partit @@ -73,6 +79,7 @@ module pressure_force_4_linfs_cubicspline_interface subroutine pressure_force_4_linfs_cubicspline(partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -83,6 +90,7 @@ module pressure_force_4_linfs_cavity_interface subroutine pressure_force_4_linfs_cavity(partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -93,6 +101,7 @@ module pressure_force_4_zxxxx_shchepetkin_interface subroutine pressure_force_4_zxxxx_shchepetkin(partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -103,6 +112,7 @@ module pressure_force_4_zxxxx_easypgf_interface subroutine pressure_force_4_zxxxx_easypgf(tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE MOD_TRACER type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit @@ -115,6 +125,7 @@ module pressure_force_4_zxxxx_cubicspline_interface subroutine pressure_force_4_zxxxx_cubicspline(partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -125,6 +136,7 @@ module init_ref_density_interface subroutine init_ref_density(partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -135,6 +147,7 @@ module insitu2pot_interface subroutine insitu2pot(tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE MOD_TRACER type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit @@ -147,6 +160,7 @@ module pressure_bv_interface subroutine pressure_bv(tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE MOD_TRACER type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit @@ -159,6 +173,7 @@ module pressure_force_4_linfs_interface subroutine pressure_force_4_linfs(tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE MOD_TRACER type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit @@ -171,6 +186,7 @@ module pressure_force_4_zxxxx_interface subroutine pressure_force_4_zxxxx(tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE MOD_TRACER type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit @@ -190,6 +206,7 @@ subroutine pressure_bv(tracers, partit, mesh) USE MOD_MESH USE MOD_TRACER USE MOD_PARTIT + USE MOD_PARSUP USE o_ARRAYS use i_arrays USE o_mixing_KPP_mod, only: dbsfc @@ -459,6 +476,7 @@ subroutine pressure_force_4_linfs(tracers, partit, mesh) use g_config use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use mod_tracer use pressure_force_4_linfs_fullcell_interface use pressure_force_4_linfs_nemo_interface @@ -526,6 +544,7 @@ subroutine pressure_force_4_linfs_fullcell(partit, mesh) use o_PARAM use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use o_ARRAYS use g_config implicit none @@ -575,6 +594,7 @@ subroutine pressure_force_4_linfs_nemo(tracers, partit, mesh) use o_PARAM use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use MOD_TRACER use o_ARRAYS use g_config @@ -751,6 +771,7 @@ subroutine pressure_force_4_linfs_shchepetkin(partit, mesh) use o_PARAM use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use o_ARRAYS use g_config implicit none @@ -1005,6 +1026,7 @@ subroutine pressure_force_4_linfs_easypgf(tracers, partit, mesh) use o_PARAM use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use MOD_TRACER use o_ARRAYS use g_config @@ -1367,6 +1389,7 @@ subroutine pressure_force_4_linfs_cubicspline(partit, mesh) use o_PARAM use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use o_ARRAYS use g_config implicit none @@ -1569,6 +1592,7 @@ subroutine pressure_force_4_linfs_cavity(partit, mesh) use o_PARAM use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use o_ARRAYS use g_config implicit none @@ -1781,6 +1805,7 @@ end subroutine pressure_force_4_linfs_cavity subroutine pressure_force_4_zxxxx(tracers, partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use mod_tracer use g_config use pressure_force_4_zxxxx_shchepetkin_interface @@ -1821,6 +1846,7 @@ subroutine pressure_force_4_zxxxx_cubicspline(partit, mesh) use o_PARAM use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use o_ARRAYS use g_config implicit none @@ -2005,6 +2031,7 @@ subroutine pressure_force_4_zxxxx_shchepetkin(partit, mesh) use o_PARAM use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use o_ARRAYS use g_config use densityJM_components_interface @@ -2246,6 +2273,7 @@ subroutine pressure_force_4_zxxxx_easypgf(tracers, partit, mesh) use o_PARAM use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use MOD_TRACER use o_ARRAYS use g_config @@ -2687,7 +2715,8 @@ end subroutine pressure_force_4_zxxxx_easypgf !=============================================================================== SUBROUTINE densityJM_local(t, s, pz, rho_out, partit, mesh) USE MOD_MESH -USE MOD_PARTIT !, only: par_ex,pe_status +USE MOD_PARTIT +USE MOD_PARSUP !, only: par_ex,pe_status USE o_ARRAYS USE o_PARAM use densityJM_components_interface @@ -2725,7 +2754,8 @@ end subroutine densityJM_local !=============================================================================== SUBROUTINE densityJM_components(t, s, bulk_0, bulk_pz, bulk_pz2, rhopot, partit, mesh) USE MOD_MESH -USE MOD_PARTIT !, only: par_ex,pe_status +USE MOD_PARTIT +USE MOD_PARSUP !, only: par_ex,pe_status USE o_ARRAYS USE o_PARAM IMPLICIT NONE @@ -2903,6 +2933,7 @@ subroutine sw_alpha_beta(TF1,SF1, partit, mesh) !----------------------------------------------------------------- use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use o_arrays use o_param use g_comm_auto @@ -2986,6 +3017,7 @@ subroutine compute_sigma_xy(TF1,SF1, partit, mesh) !------------------------------------------------------------------- use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use o_param use o_arrays use g_comm_auto @@ -3057,6 +3089,7 @@ end subroutine compute_sigma_xy subroutine compute_neutral_slope(partit, mesh) use o_ARRAYS USE MOD_PARTIT + USE MOD_PARSUP use MOD_MESH use o_param use g_config @@ -3109,6 +3142,7 @@ end subroutine compute_neutral_slope subroutine insitu2pot(tracers, partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use mod_tracer use o_param use o_arrays @@ -3155,7 +3189,8 @@ end subroutine insitu2pot SUBROUTINE density_linear(t, s, bulk_0, bulk_pz, bulk_pz2, rho_out, partit, mesh) !coded by Margarita Smolentseva, 21.05.2020 USE MOD_MESH -USE MOD_PARTIT !, only: par_ex,pe_status +USE MOD_PARTIT +USE MOD_PARSUP !, only: par_ex,pe_status USE o_ARRAYS USE o_PARAM use g_config !, only: which_toy, toy_ocean @@ -3194,6 +3229,7 @@ subroutine init_ref_density(partit, mesh) !___________________________________________________________________________ USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use o_PARAM use o_ARRAYS use densityJM_components_interface diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 48ab23279..699a85c67 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -3,6 +3,7 @@ module diff_part_hor_redi_interface subroutine diff_part_hor_redi(tr_num, tracer, partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use mod_tracer integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracer @@ -16,6 +17,7 @@ module adv_tracers_ale_interface subroutine adv_tracers_ale(dt, tr_num, tracer, partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use mod_tracer real(kind=WP), intent(in), target :: dt integer, intent(in), target :: tr_num @@ -30,6 +32,7 @@ module diff_ver_part_expl_ale_interface subroutine diff_ver_part_expl_ale(tr_num, tracer, partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use mod_tracer integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracer @@ -43,6 +46,7 @@ module diff_ver_part_redi_expl_interface subroutine diff_ver_part_redi_expl(tr_num, tracer, partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use mod_tracer integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracer @@ -56,6 +60,7 @@ module diff_ver_part_impl_ale_interface subroutine diff_ver_part_impl_ale(tr_num, tracer, partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use mod_tracer integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracer @@ -69,6 +74,7 @@ module diff_tracers_ale_interface subroutine diff_tracers_ale(tr_num, tracer, partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use mod_tracer integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracer @@ -82,6 +88,7 @@ module bc_surface_interface function bc_surface(n, id, sval, partit) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP integer , intent(in) :: n, id type(t_partit), intent(inout), target :: partit real(kind=WP) :: bc_surface @@ -94,6 +101,7 @@ module diff_part_bh_interface subroutine diff_part_bh(tr_num, tracer, partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use mod_tracer integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracer @@ -107,6 +115,7 @@ module solve_tracers_ale_interface subroutine solve_tracers_ale(tracers, partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use mod_tracer type(t_tracer), intent(inout), target :: tracers type(t_mesh), intent(in), target :: mesh @@ -124,6 +133,7 @@ subroutine solve_tracers_ale(tracers, partit, mesh) use o_arrays use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use mod_tracer use g_comm_auto use o_tracers @@ -212,6 +222,7 @@ subroutine adv_tracers_ale(dt, tr_num, tracers, partit, mesh) use g_config, only: flag_debug use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use mod_tracer use o_arrays use diagnostics, only: ldiag_DVD, compute_diag_dvd_2ndmoment_klingbeil_etal_2014, & @@ -265,6 +276,7 @@ end subroutine adv_tracers_ale subroutine diff_tracers_ale(tr_num, tracers, partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use mod_tracer use o_arrays use o_tracers @@ -350,6 +362,7 @@ subroutine diff_ver_part_expl_ale(tr_num, tracers, partit, mesh) use g_forcing_arrays use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use MOD_TRACER use g_config,only: dt @@ -427,11 +440,13 @@ end subroutine diff_ver_part_expl_ale subroutine diff_ver_part_impl_ale(tr_num, tracers, partit, mesh) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use MOD_TRACER use o_PARAM use o_ARRAYS use i_ARRAYS USE MOD_PARTIT + USE MOD_PARSUP use g_CONFIG use g_forcing_arrays use o_mixing_KPP_mod !for ghats _GO_ @@ -892,6 +907,7 @@ subroutine diff_ver_part_redi_expl(tr_num, tracers, partit, mesh) use o_ARRAYS use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use MOD_TRACER USE o_param use g_config @@ -976,6 +992,7 @@ subroutine diff_part_hor_redi(tr_num, tracers, partit, mesh) use o_ARRAYS use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use MOD_TRACER use o_param use g_config @@ -1133,6 +1150,7 @@ SUBROUTINE diff_part_bh(tr_num, tracers, partit, mesh) use o_ARRAYS use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use MOD_TRACER use o_param use g_config @@ -1211,6 +1229,7 @@ end subroutine diff_part_bh FUNCTION bc_surface(n, id, sval, partit) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE o_ARRAYS USE g_forcing_arrays USE g_config diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index d03a7941e..3ab04ac1a 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -3,6 +3,7 @@ module momentum_adv_scalar_interface subroutine momentum_adv_scalar(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -15,6 +16,7 @@ subroutine momentum_adv_scalar(partit, mesh) subroutine compute_vel_rhs(partit, mesh) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use o_ARRAYS use i_ARRAYS use i_therm_param @@ -159,6 +161,7 @@ END SUBROUTINE compute_vel_rhs subroutine momentum_adv_scalar(partit, mesh) USE MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP USE o_ARRAYS USE o_PARAM use g_comm_auto diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 20fd081b4..355ab9734 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -16,6 +16,7 @@ module h_viscosity_leith_interface subroutine h_viscosity_leith(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -26,6 +27,7 @@ module visc_filt_harmon_interface subroutine visc_filt_harmon(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -36,6 +38,7 @@ module visc_filt_hbhmix_interface subroutine visc_filt_hbhmix(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -46,6 +49,7 @@ module visc_filt_biharm_interface subroutine visc_filt_biharm(option, partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP integer :: option type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit @@ -57,6 +61,7 @@ module visc_filt_bcksct_interface subroutine visc_filt_bcksct(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -67,6 +72,7 @@ module visc_filt_bilapl_interface subroutine visc_filt_bilapl(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -77,6 +83,7 @@ module visc_filt_bidiff_interface subroutine visc_filt_bidiff(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -87,6 +94,7 @@ module visc_filt_dbcksc_interface subroutine visc_filt_dbcksc(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -97,6 +105,7 @@ module backscatter_coef_interface subroutine backscatter_coef(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -107,6 +116,7 @@ module uke_update_interface subroutine uke_update(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -121,6 +131,7 @@ subroutine uke_update(partit, mesh) SUBROUTINE update_vel(partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE o_ARRAYS USE o_PARAM USE g_CONFIG @@ -157,6 +168,7 @@ end subroutine update_vel subroutine compute_vel_nodes(partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE o_PARAM USE o_ARRAYS use g_comm_auto @@ -200,6 +212,7 @@ subroutine viscosity_filter(option, partit, mesh) use o_PARAM use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP use h_viscosity_leith_interface use visc_filt_harmon_interface use visc_filt_hbhmix_interface @@ -266,6 +279,7 @@ end subroutine viscosity_filter SUBROUTINE visc_filt_harmon(partit, mesh) USE MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP USE o_ARRAYS USE o_PARAM USE g_CONFIG @@ -309,6 +323,7 @@ end subroutine visc_filt_harmon SUBROUTINE visc_filt_biharm(option, partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE o_ARRAYS USE o_PARAM USE g_CONFIG @@ -414,6 +429,7 @@ end subroutine visc_filt_biharm SUBROUTINE visc_filt_hbhmix(partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE o_ARRAYS USE o_PARAM USE g_CONFIG @@ -505,6 +521,7 @@ SUBROUTINE h_viscosity_leith(partit, mesh) ! Coefficient of horizontal viscosity is a combination of the Leith (with Leith_c) and modified Leith (with Div_c) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE o_ARRAYS USE o_PARAM USE g_CONFIG @@ -608,6 +625,7 @@ END subroutine h_viscosity_leith SUBROUTINE visc_filt_bcksct(partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE o_ARRAYS USE o_PARAM USE g_CONFIG @@ -707,6 +725,7 @@ end subroutine visc_filt_bcksct SUBROUTINE visc_filt_bilapl(partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE o_ARRAYS USE o_PARAM USE g_CONFIG @@ -788,6 +807,7 @@ end subroutine visc_filt_bilapl SUBROUTINE visc_filt_bidiff(partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE o_ARRAYS USE o_PARAM USE g_CONFIG @@ -863,6 +883,7 @@ end subroutine visc_filt_bidiff SUBROUTINE visc_filt_dbcksc(partit, mesh) USE MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP USE o_ARRAYS USE o_PARAM USE g_CONFIG @@ -1019,6 +1040,7 @@ end subroutine visc_filt_dbcksc SUBROUTINE backscatter_coef(partit, mesh) USE MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP USE o_ARRAYS USE o_PARAM USE g_CONFIG @@ -1054,6 +1076,7 @@ end subroutine backscatter_coef SUBROUTINE uke_update(partit, mesh) USE MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP USE o_ARRAYS USE o_PARAM USE g_CONFIG diff --git a/src/oce_fer_gm.F90 b/src/oce_fer_gm.F90 index 4da9ea2c1..ab12e49ae 100644 --- a/src/oce_fer_gm.F90 +++ b/src/oce_fer_gm.F90 @@ -8,6 +8,7 @@ subroutine fer_solve_Gamma(partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE o_PARAM USE o_ARRAYS, ONLY: sigma_xy, fer_gamma, bvfreq, fer_c, fer_K USE g_CONFIG @@ -129,6 +130,7 @@ END subroutine fer_solve_Gamma subroutine fer_gamma2vel(partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE o_PARAM USE o_ARRAYS, ONLY: fer_gamma, fer_uv USE g_CONFIG @@ -169,6 +171,7 @@ subroutine init_Redi_GM(partit, mesh) !fer_compute_C_K_Redi USE o_PARAM USE o_ARRAYS, ONLY: fer_c, fer_k, fer_scal, Ki, bvfreq, MLD1_ind, neutral_slope, coriolis_node USE MOD_PARTIT + USE MOD_PARSUP USE g_CONFIG use g_comm_auto IMPLICIT NONE diff --git a/src/oce_local.F90 b/src/oce_local.F90 index b20b95598..0b9d1ac21 100755 --- a/src/oce_local.F90 +++ b/src/oce_local.F90 @@ -3,6 +3,7 @@ module com_global2local_interface subroutine com_global2local(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -13,6 +14,7 @@ subroutine com_global2local(partit, mesh) SUBROUTINE com_global2local(partit, mesh) use MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE type(t_mesh), intent(in), target :: mesh @@ -126,6 +128,7 @@ SUBROUTINE save_dist_mesh(partit, mesh) USE g_CONFIG USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE o_ARRAYS use com_global2local_interface IMPLICIT NONE diff --git a/src/oce_mesh.F90 b/src/oce_mesh.F90 index 6977ac2f2..994ca69c8 100755 --- a/src/oce_mesh.F90 +++ b/src/oce_mesh.F90 @@ -3,6 +3,7 @@ module read_mesh_interface subroutine read_mesh(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -13,6 +14,7 @@ module find_levels_interface subroutine find_levels(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -23,6 +25,7 @@ module find_levels_cavity_interface subroutine find_levels_cavity(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -33,6 +36,7 @@ module test_tri_interface subroutine test_tri(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -43,6 +47,7 @@ module load_edges_interface subroutine load_edges(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -53,6 +58,7 @@ module find_neighbors_interface subroutine find_neighbors(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -63,6 +69,7 @@ module mesh_areas_interface subroutine mesh_areas(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -73,6 +80,7 @@ module elem_center_interface subroutine elem_center(elem, x, y, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP integer :: elem real(kind=WP) :: x, y type(t_mesh), intent(inout), target :: mesh @@ -84,6 +92,7 @@ module edge_center_interface subroutine edge_center(n1, n2, x, y, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP integer :: n1, n2 real(kind=WP) :: x, y type(t_mesh), intent(inout), target :: mesh @@ -95,6 +104,7 @@ module mesh_auxiliary_arrays_interface subroutine mesh_auxiliary_arrays(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -105,6 +115,7 @@ module find_levels_min_e2n_interface subroutine find_levels_min_e2n(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -115,6 +126,7 @@ module check_total_volume_interface subroutine check_total_volume(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -130,6 +142,7 @@ subroutine check_total_volume(partit, mesh) SUBROUTINE mesh_setup(partit, mesh) USE MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP USE g_ROTATE_grid use read_mesh_interface use find_levels_interface @@ -170,6 +183,7 @@ SUBROUTINE read_mesh(partit, mesh) USE g_CONFIG USE MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP USE o_ARRAYS USE g_rotate_grid IMPLICIT NONE @@ -724,6 +738,7 @@ END subroutine read_mesh subroutine find_levels(partit, mesh) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use o_PARAM use g_config ! @@ -925,6 +940,7 @@ end subroutine find_levels subroutine find_levels_cavity(partit, mesh) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use o_PARAM use g_config ! @@ -1357,6 +1373,7 @@ end subroutine find_levels_cavity subroutine find_levels_min_e2n(partit, mesh) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use o_PARAM use g_config use g_comm_auto @@ -1399,6 +1416,7 @@ end subroutine find_levels_min_e2n SUBROUTINE test_tri(partit, mesh) USE MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP USE o_PARAM USE g_CONFIG use g_rotate_grid @@ -1450,6 +1468,7 @@ END SUBROUTINE test_tri SUBROUTINE load_edges(partit, mesh) USE MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP USE o_PARAM USE g_CONFIG IMPLICIT NONE @@ -1691,6 +1710,7 @@ SUBROUTINE find_neighbors(partit, mesh) USE o_PARAM USE MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP USE g_ROTATE_grid use g_comm_auto use elem_center_interface @@ -1874,6 +1894,7 @@ end subroutine elem_center SUBROUTINE mesh_areas(partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE o_PARAM USE o_arrays, only: dum_3d_n USE g_ROTATE_GRID @@ -2145,6 +2166,7 @@ SUBROUTINE mesh_auxiliary_arrays(partit, mesh) USE MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP USE o_PARAM USE i_PARAM USE o_ARRAYS @@ -2493,6 +2515,7 @@ END SUBROUTINE mesh_auxiliary_arrays SUBROUTINE check_mesh_consistency(partit, mesh) USE MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP USE o_PARAM USE g_ROTATE_GRID use g_comm_auto @@ -2548,6 +2571,7 @@ END SUBROUTINE check_mesh_consistency subroutine check_total_volume(partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE o_PARAM use g_comm_auto use o_ARRAYS diff --git a/src/oce_mo_conv.F90 b/src/oce_mo_conv.F90 index cdcff4857..f8866f633 100644 --- a/src/oce_mo_conv.F90 +++ b/src/oce_mo_conv.F90 @@ -5,6 +5,7 @@ subroutine mo_convect(partit, mesh) USE o_PARAM USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE o_ARRAYS USE g_config use i_arrays diff --git a/src/oce_muscl_adv.F90 b/src/oce_muscl_adv.F90 index c85bc657d..10507e9dd 100755 --- a/src/oce_muscl_adv.F90 +++ b/src/oce_muscl_adv.F90 @@ -3,6 +3,7 @@ module find_up_downwind_triangles_interface subroutine find_up_downwind_triangles(twork, partit, mesh) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use MOD_TRACER type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit @@ -31,6 +32,7 @@ subroutine find_up_downwind_triangles(twork, partit, mesh) subroutine muscl_adv_init(twork, partit, mesh) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use MOD_TRACER use o_ARRAYS use o_PARAM @@ -121,6 +123,7 @@ end SUBROUTINE muscl_adv_init SUBROUTINE find_up_downwind_triangles(twork, partit, mesh) USE MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP USE MOD_TRACER USE o_ARRAYS USE o_PARAM @@ -289,6 +292,7 @@ SUBROUTINE fill_up_dn_grad(twork, partit, mesh) USE o_PARAM USE MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP USE MOD_TRACER USE o_ARRAYS IMPLICIT NONE diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 0d83734d6..3a54aeef8 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -3,6 +3,7 @@ module oce_initial_state_interface subroutine oce_initial_state(tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use mod_tracer type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit @@ -15,6 +16,7 @@ module tracer_init_interface subroutine tracer_init(tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use mod_tracer type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit @@ -27,6 +29,7 @@ module ocean_setup_interface subroutine ocean_setup(tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use mod_tracer type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit @@ -39,6 +42,7 @@ module before_oce_step_interface subroutine before_oce_step(tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use mod_tracer type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit @@ -53,6 +57,7 @@ subroutine before_oce_step(tracers, partit, mesh) subroutine ocean_setup(tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP USE MOD_TRACER USE o_PARAM USE o_ARRAYS @@ -216,6 +221,7 @@ end subroutine ocean_setup SUBROUTINE tracer_init(tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP USE MOD_TRACER USE DIAGNOSTICS, only: ldiag_DVD USE g_ic3d @@ -313,6 +319,7 @@ END SUBROUTINE tracer_init SUBROUTINE arrays_init(num_tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP USE o_ARRAYS USE o_PARAM use g_comm_auto @@ -569,6 +576,7 @@ END SUBROUTINE arrays_init SUBROUTINE oce_initial_state(tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT +USE MOD_PARSUP USE MOD_TRACER USE o_ARRAYS USE g_config @@ -733,6 +741,7 @@ end subroutine oce_initial_state SUBROUTINE before_oce_step(tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE MOD_TRACER USE o_ARRAYS USE g_config diff --git a/src/oce_shortwave_pene.F90 b/src/oce_shortwave_pene.F90 index f18e926ca..d548d8156 100644 --- a/src/oce_shortwave_pene.F90 +++ b/src/oce_shortwave_pene.F90 @@ -6,6 +6,7 @@ subroutine cal_shortwave_rad(partit, mesh) ! Ref.: Morel and Antoine 1994, Sweeney et al. 2005 USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE o_PARAM USE o_ARRAYS USE g_CONFIG diff --git a/src/oce_spp.F90 b/src/oce_spp.F90 index efb0e0b4d..f59d09b9c 100644 --- a/src/oce_spp.F90 +++ b/src/oce_spp.F90 @@ -12,6 +12,7 @@ subroutine cal_rejected_salt(partit, mesh) use o_arrays use mod_mesh USE MOD_PARTIT +USE MOD_PARSUP use g_comm_auto use o_tracers use g_forcing_arrays, only: thdgr @@ -49,6 +50,7 @@ subroutine app_rejected_salt(ttf, partit, mesh) use o_arrays use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP use o_tracers use g_comm_auto implicit none diff --git a/src/oce_tracer_mod.F90 b/src/oce_tracer_mod.F90 index 67c9b7b26..5b0528724 100755 --- a/src/oce_tracer_mod.F90 +++ b/src/oce_tracer_mod.F90 @@ -3,6 +3,7 @@ MODULE o_tracers USE MOD_MESH USE MOD_TRACER USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE interface @@ -10,6 +11,7 @@ subroutine tracer_gradient_z(ttf, partit, mesh) USE MOD_MESH USE MOD_TRACER USE MOD_PARTIT + USE MOD_PARSUP IMPLICIT NONE type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit @@ -25,6 +27,7 @@ SUBROUTINE tracer_gradient_elements(ttf, partit, mesh) !computes elemental gradient of tracer USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE MOD_TRACER USE o_PARAM USE o_ARRAYS @@ -58,6 +61,7 @@ END SUBROUTINE tracer_gradient_elements SUBROUTINE init_tracers_AB(tr_num, tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE MOD_TRACER use g_config, only: flag_debug use o_arrays @@ -100,6 +104,7 @@ SUBROUTINE relax_to_clim(tr_num, tracers, partit, mesh) use o_arrays USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE MOD_TRACER IMPLICIT NONE @@ -143,6 +148,7 @@ SUBROUTINE tracer_gradient_z(ttf, partit, mesh) !computes vertical gradient of tracer USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE MOD_TRACER USE o_PARAM USE o_ARRAYS diff --git a/src/oce_vel_rhs_vinv.F90 b/src/oce_vel_rhs_vinv.F90 index 08c39289a..b81ccf727 100755 --- a/src/oce_vel_rhs_vinv.F90 +++ b/src/oce_vel_rhs_vinv.F90 @@ -3,6 +3,7 @@ module relative_vorticity_interface subroutine relative_vorticity(partit, mesh) use mod_mesh USE MOD_PARTIT + USE MOD_PARSUP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -17,6 +18,7 @@ subroutine relative_vorticity(partit, mesh) USE o_ARRAYS USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use g_comm_auto IMPLICIT NONE integer :: n, nz, el(2), enodes(2), nl1, nl2, edge, ul1, ul2, nl12, ul12 @@ -111,6 +113,7 @@ subroutine compute_vel_rhs_vinv(partit, mesh) !vector invariant USE o_ARRAYS USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE g_CONFIG use g_comm_auto use relative_vorticity_interface diff --git a/src/temp/MOD_MESH.F90 b/src/temp/MOD_MESH.F90 new file mode 100644 index 000000000..4eb0c23e1 --- /dev/null +++ b/src/temp/MOD_MESH.F90 @@ -0,0 +1,329 @@ +!========================================================== +MODULE MOD_MESH +USE O_PARAM +USE MOD_WRITE_BINARY_ARRAYS +USE MOD_READ_BINARY_ARRAYS +USE, intrinsic :: ISO_FORTRAN_ENV +IMPLICIT NONE +SAVE +integer, parameter :: MAX_ADJACENT=32 ! Max allowed number of adjacent nodes + +TYPE SPARSE_MATRIX + integer :: nza + integer :: dim + real(kind=WP), allocatable, dimension(:) :: values + integer(int32), allocatable, dimension(:) :: colind + integer(int32), allocatable, dimension(:) :: rowptr + integer(int32), allocatable, dimension(:) :: colind_loc + integer(int32), allocatable, dimension(:) :: rowptr_loc +END TYPE SPARSE_MATRIX + +TYPE T_MESH +integer :: nod2D ! the number of 2D nodes +real(kind=WP) :: ocean_area, ocean_areawithcav +real(kind=WP), allocatable, dimension(:,:) :: coord_nod2D, geo_coord_nod2D +integer :: edge2D ! the number of 2D edges +integer :: edge2D_in ! the number of internal 2D edges +integer :: elem2D ! the number of 2D elements +integer, allocatable, dimension(:,:) :: elem2D_nodes ! elem2D_nodes(:,n) lists; 3 nodes of element n +integer, allocatable, dimension(:,:) :: edges ! edge(:,n) lists 2 nodes; edge n +integer, allocatable, dimension(:,:) :: edge_tri ! edge_tri(:,n) lists 2 + ! elements containing edge n: the first one is to left + ! of the line directed to the second node +integer, allocatable, dimension(:,:) :: elem_edges ! elem_edges(:,n) are edges of element n. +real(kind=WP), allocatable, dimension(:) :: elem_area +real(kind=WP), allocatable, dimension(:,:) :: edge_dxdy, edge_cross_dxdy +real(kind=WP), allocatable, dimension(:) :: elem_cos, metric_factor +integer, allocatable, dimension(:,:) :: elem_neighbors +integer, allocatable, dimension(:,:) :: nod_in_elem2D +real(kind=WP), allocatable, dimension(:,:) :: x_corners, y_corners ! cornes for the scalar points +integer, allocatable, dimension(:) :: nod_in_elem2D_num +real(kind=WP), allocatable, dimension(:) :: depth ! depth(n) is the depths at node n +real(kind=WP), allocatable, dimension(:,:) :: gradient_vec + ! coefficients of linear reconstruction + ! of velocities on elements +real(kind=WP), allocatable, dimension(:,:) :: gradient_sca ! Coefficients to compute gradient of scalars + ! on elements +INTEGER, ALLOCATABLE, DIMENSION(:) :: bc_index_nod2D(:) + ! vertical structure +! +! +!___vertical mesh info__________________________________________________________ +! total number of layers +integer :: nl + +! initial layer, mid-depth layer and element depth +real(kind=WP), allocatable, dimension(:) :: zbar, Z,elem_depth + +! upper boudnary index of all vertical vertice/element loops, default==1 but when +! cavity is used becomes index of cavity-ocean boundary at vertices and elements +integer, allocatable, dimension(:) :: ulevels, ulevels_nod2D, ulevels_nod2D_max + +! number of levels at elem and vertices considering bottom topography +integer, allocatable, dimension(:) :: nlevels, nlevels_nod2D, nlevels_nod2D_min + +! +! +!___horizontal mesh info________________________________________________________ +real(kind=WP), allocatable, dimension(:,:) :: area, area_inv, areasvol, areasvol_inv +real(kind=WP), allocatable, dimension(:) :: mesh_resolution + +! +! +!___cavity mesh info____________________________________________________________ +! level index of cavity-ocean boundary at vertices and elements +! --> see: ulevels, ulevels_nod2D (fvom_main) + +! vertice/element yes=1/no=0 flag if cavity exists +integer, allocatable, dimension(:) :: cavity_flag_n, cavity_flag_e + +! depth of cavity-ocean interface +real(kind=WP), allocatable, dimension(:) :: cavity_depth + + +real(kind=WP), allocatable, dimension(:,:) :: cavity_nrst_cavlpnt_xyz + +! +! +!___Elevation stiffness matrix__________________________________________________ +type(sparse_matrix) :: ssh_stiff + +!#if defined (__oasis) +real(kind=WP), allocatable, dimension(:) :: lump2d_south, lump2d_north +integer, allocatable, dimension(:) :: ind_south, ind_north +!#endif + +integer :: nn_size +integer, allocatable, dimension(:) :: nn_num +integer, allocatable, dimension(:,:) :: nn_pos + +!_______________________________________________________________________________ +! Arrays added for ALE implementation: +! --> layer thinkness at node and depthlayer for t=n and t=n+1 +real(kind=WP), allocatable,dimension(:,:) :: hnode, hnode_new, zbar_3d_n, Z_3d_n + +! --> layer thinkness at elements, interpolated from hnode +real(kind=WP), allocatable,dimension(:,:) :: helem + +! --> thinkness of bottom elem (important for partial cells) +real(kind=WP), allocatable,dimension(:) :: bottom_elem_thickness +real(kind=WP), allocatable,dimension(:) :: bottom_node_thickness + +! --> The increment of total fluid depth on elements. It is used to update the matrix +real(kind=WP), allocatable,dimension(:) :: dhe + +! --> hbar, hbar_old: correspond to the elevation, but on semi-integer time steps. +real(kind=WP), allocatable,dimension(:) :: hbar, hbar_old + +! --> auxiliary array to store depth of layers and depth of mid level due to changing +! layer thinkness at every node +real(kind=WP), allocatable,dimension(:) :: zbar_n, Z_n + +! new bottom depth at node and element due to partial cells +real(kind=WP), allocatable,dimension(:) :: zbar_n_bot +real(kind=WP), allocatable,dimension(:) :: zbar_e_bot + +! new depth of cavity-ocean interface at node and element due to partial cells +real(kind=WP), allocatable,dimension(:) :: zbar_n_srf +real(kind=WP), allocatable,dimension(:) :: zbar_e_srf + +character(:), allocatable :: representative_checksum + +contains + procedure write_t_mesh + procedure read_t_mesh + generic :: write(unformatted) => write_t_mesh + generic :: read(unformatted) => read_t_mesh +END TYPE T_MESH + +contains + +! Unformatted writing for t_mesh +subroutine write_t_mesh(mesh, unit, iostat, iomsg) + IMPLICIT NONE + class(t_mesh), intent(in) :: mesh + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: i, j, k + integer :: s1, s2, s3 + ! write records (giving sizes for the allocation for arrays) + write(unit, iostat=iostat, iomsg=iomsg) mesh%nod2D + write(unit, iostat=iostat, iomsg=iomsg) mesh%ocean_area + write(unit, iostat=iostat, iomsg=iomsg) mesh%ocean_areawithcav + write(unit, iostat=iostat, iomsg=iomsg) mesh%edge2D + write(unit, iostat=iostat, iomsg=iomsg) mesh%edge2D_in + write(unit, iostat=iostat, iomsg=iomsg) mesh%elem2D + call write_bin_array(mesh%elem2D_nodes, unit, iostat, iomsg) + call write_bin_array(mesh%edges, unit, iostat, iomsg) + call write_bin_array(mesh%edge_tri, unit, iostat, iomsg) + call write_bin_array(mesh%elem_edges, unit, iostat, iomsg) + call write_bin_array(mesh%elem_area, unit, iostat, iomsg) + call write_bin_array(mesh%edge_dxdy, unit, iostat, iomsg) + + call write_bin_array(mesh%edge_cross_dxdy, unit, iostat, iomsg) + call write_bin_array(mesh%elem_cos, unit, iostat, iomsg) + call write_bin_array(mesh%metric_factor, unit, iostat, iomsg) + call write_bin_array(mesh%elem_neighbors, unit, iostat, iomsg) + call write_bin_array(mesh%nod_in_elem2D, unit, iostat, iomsg) + call write_bin_array(mesh%x_corners, unit, iostat, iomsg) + call write_bin_array(mesh%y_corners, unit, iostat, iomsg) + call write_bin_array(mesh%nod_in_elem2D_num, unit, iostat, iomsg) + call write_bin_array(mesh%depth, unit, iostat, iomsg) + call write_bin_array(mesh%gradient_vec, unit, iostat, iomsg) + call write_bin_array(mesh%gradient_sca, unit, iostat, iomsg) + call write_bin_array(mesh%bc_index_nod2D, unit, iostat, iomsg) + + write(unit, iostat=iostat, iomsg=iomsg) mesh%nl + + call write_bin_array(mesh%zbar, unit, iostat, iomsg) + call write_bin_array(mesh%Z, unit, iostat, iomsg) + call write_bin_array(mesh%elem_depth, unit, iostat, iomsg) + call write_bin_array(mesh%ulevels, unit, iostat, iomsg) + call write_bin_array(mesh%ulevels_nod2D, unit, iostat, iomsg) + call write_bin_array(mesh%ulevels_nod2D_max, unit, iostat, iomsg) + call write_bin_array(mesh%nlevels, unit, iostat, iomsg) + call write_bin_array(mesh%nlevels_nod2D, unit, iostat, iomsg) + call write_bin_array(mesh%nlevels_nod2D_min, unit, iostat, iomsg) + call write_bin_array(mesh%area, unit, iostat, iomsg) + call write_bin_array(mesh%area_inv, unit, iostat, iomsg) + call write_bin_array(mesh%areasvol, unit, iostat, iomsg) + call write_bin_array(mesh%areasvol_inv, unit, iostat, iomsg) + call write_bin_array(mesh%mesh_resolution, unit, iostat, iomsg) + + call write_bin_array(mesh%cavity_flag_n, unit, iostat, iomsg) + call write_bin_array(mesh%cavity_flag_e, unit, iostat, iomsg) + call write_bin_array(mesh%cavity_depth, unit, iostat, iomsg) + call write_bin_array(mesh%cavity_nrst_cavlpnt_xyz, unit, iostat, iomsg) + + write(unit, iostat=iostat, iomsg=iomsg) mesh%ssh_stiff%dim + write(unit, iostat=iostat, iomsg=iomsg) mesh%ssh_stiff%nza + + call write_bin_array(mesh%ssh_stiff%rowptr, unit, iostat, iomsg) + call write_bin_array(mesh%ssh_stiff%colind, unit, iostat, iomsg) + call write_bin_array(mesh%ssh_stiff%values, unit, iostat, iomsg) + call write_bin_array(mesh%ssh_stiff%colind_loc, unit, iostat, iomsg) + call write_bin_array(mesh%ssh_stiff%rowptr_loc, unit, iostat, iomsg) + + call write_bin_array(mesh%lump2d_south, unit, iostat, iomsg) + call write_bin_array(mesh%lump2d_north, unit, iostat, iomsg) + call write_bin_array(mesh%ind_south, unit, iostat, iomsg) + call write_bin_array(mesh%ind_north, unit, iostat, iomsg) + write(unit, iostat=iostat, iomsg=iomsg) mesh%nn_size + call write_bin_array(mesh%nn_num, unit, iostat, iomsg) + call write_bin_array(mesh%nn_pos, unit, iostat, iomsg) + call write_bin_array(mesh%hnode, unit, iostat, iomsg) + call write_bin_array(mesh%hnode_new, unit, iostat, iomsg) + call write_bin_array(mesh%zbar_3d_n, unit, iostat, iomsg) + call write_bin_array(mesh%Z_3d_n, unit, iostat, iomsg) + call write_bin_array(mesh%helem, unit, iostat, iomsg) + call write_bin_array(mesh%bottom_elem_thickness, unit, iostat, iomsg) + call write_bin_array(mesh%bottom_node_thickness, unit, iostat, iomsg) + call write_bin_array(mesh%dhe, unit, iostat, iomsg) + call write_bin_array(mesh%hbar, unit, iostat, iomsg) + call write_bin_array(mesh%hbar_old, unit, iostat, iomsg) + call write_bin_array(mesh%zbar_n, unit, iostat, iomsg) + call write_bin_array(mesh%Z_n, unit, iostat, iomsg) + call write_bin_array(mesh%zbar_n_bot, unit, iostat, iomsg) + call write_bin_array(mesh%zbar_e_bot, unit, iostat, iomsg) + call write_bin_array(mesh%zbar_n_srf, unit, iostat, iomsg) + call write_bin_array(mesh%zbar_e_srf, unit, iostat, iomsg) +! call write_bin_array(mesh%representative_checksum, unit, iostat, iomsg) +end subroutine write_t_mesh + +! Unformatted reading for t_mesh +subroutine read_t_mesh(mesh, unit, iostat, iomsg) + IMPLICIT NONE + class(t_mesh), intent(inout) :: mesh + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: i, j, k + integer :: s1, s2, s3 + ! write records (giving sizes for the allocation for arrays) + read(unit, iostat=iostat, iomsg=iomsg) mesh%nod2D + read(unit, iostat=iostat, iomsg=iomsg) mesh%ocean_area + read(unit, iostat=iostat, iomsg=iomsg) mesh%ocean_areawithcav + read(unit, iostat=iostat, iomsg=iomsg) mesh%edge2D + read(unit, iostat=iostat, iomsg=iomsg) mesh%edge2D_in + read(unit, iostat=iostat, iomsg=iomsg) mesh%elem2D + + call read_bin_array(mesh%elem2D_nodes, unit, iostat, iomsg) + call read_bin_array(mesh%edges, unit, iostat, iomsg) + call read_bin_array(mesh%edge_tri, unit, iostat, iomsg) + call read_bin_array(mesh%elem_edges, unit, iostat, iomsg) + call read_bin_array(mesh%elem_area, unit, iostat, iomsg) + call read_bin_array(mesh%edge_dxdy, unit, iostat, iomsg) + + call read_bin_array(mesh%edge_cross_dxdy, unit, iostat, iomsg) + call read_bin_array(mesh%elem_cos, unit, iostat, iomsg) + call read_bin_array(mesh%metric_factor, unit, iostat, iomsg) + call read_bin_array(mesh%elem_neighbors, unit, iostat, iomsg) + call read_bin_array(mesh%nod_in_elem2D, unit, iostat, iomsg) + call read_bin_array(mesh%x_corners, unit, iostat, iomsg) + call read_bin_array(mesh%y_corners, unit, iostat, iomsg) + call read_bin_array(mesh%nod_in_elem2D_num, unit, iostat, iomsg) + call read_bin_array(mesh%depth, unit, iostat, iomsg) + call read_bin_array(mesh%gradient_vec, unit, iostat, iomsg) + call read_bin_array(mesh%gradient_sca, unit, iostat, iomsg) + call read_bin_array(mesh%bc_index_nod2D, unit, iostat, iomsg) + + read(unit, iostat=iostat, iomsg=iomsg) mesh%nl + + call read_bin_array(mesh%zbar, unit, iostat, iomsg) + call read_bin_array(mesh%Z, unit, iostat, iomsg) + call read_bin_array(mesh%elem_depth, unit, iostat, iomsg) + call read_bin_array(mesh%ulevels, unit, iostat, iomsg) + call read_bin_array(mesh%ulevels_nod2D, unit, iostat, iomsg) + call read_bin_array(mesh%ulevels_nod2D_max, unit, iostat, iomsg) + call read_bin_array(mesh%nlevels, unit, iostat, iomsg) + call read_bin_array(mesh%nlevels_nod2D, unit, iostat, iomsg) + call read_bin_array(mesh%nlevels_nod2D_min, unit, iostat, iomsg) + call read_bin_array(mesh%area, unit, iostat, iomsg) + call read_bin_array(mesh%area_inv, unit, iostat, iomsg) + call read_bin_array(mesh%areasvol, unit, iostat, iomsg) + call read_bin_array(mesh%areasvol_inv, unit, iostat, iomsg) + call read_bin_array(mesh%mesh_resolution, unit, iostat, iomsg) + + call read_bin_array(mesh%cavity_flag_n, unit, iostat, iomsg) + call read_bin_array(mesh%cavity_flag_e, unit, iostat, iomsg) + call read_bin_array(mesh%cavity_depth, unit, iostat, iomsg) + call read_bin_array(mesh%cavity_nrst_cavlpnt_xyz, unit, iostat, iomsg) + + read(unit, iostat=iostat, iomsg=iomsg) mesh%ssh_stiff%dim + read(unit, iostat=iostat, iomsg=iomsg) mesh%ssh_stiff%nza + + call read_bin_array(mesh%ssh_stiff%rowptr, unit, iostat, iomsg) + call read_bin_array(mesh%ssh_stiff%colind, unit, iostat, iomsg) + call read_bin_array(mesh%ssh_stiff%values, unit, iostat, iomsg) + call read_bin_array(mesh%ssh_stiff%colind_loc, unit, iostat, iomsg) + call read_bin_array(mesh%ssh_stiff%rowptr_loc, unit, iostat, iomsg) + + call read_bin_array(mesh%lump2d_south, unit, iostat, iomsg) + call read_bin_array(mesh%lump2d_north, unit, iostat, iomsg) + call read_bin_array(mesh%ind_south, unit, iostat, iomsg) + call read_bin_array(mesh%ind_north, unit, iostat, iomsg) + read(unit, iostat=iostat, iomsg=iomsg) mesh%nn_size + call read_bin_array(mesh%nn_num, unit, iostat, iomsg) + call read_bin_array(mesh%nn_pos, unit, iostat, iomsg) + call read_bin_array(mesh%hnode, unit, iostat, iomsg) + call read_bin_array(mesh%hnode_new, unit, iostat, iomsg) + call read_bin_array(mesh%zbar_3d_n, unit, iostat, iomsg) + call read_bin_array(mesh%Z_3d_n, unit, iostat, iomsg) + call read_bin_array(mesh%helem, unit, iostat, iomsg) + call read_bin_array(mesh%bottom_elem_thickness, unit, iostat, iomsg) + call read_bin_array(mesh%bottom_node_thickness, unit, iostat, iomsg) + call read_bin_array(mesh%dhe, unit, iostat, iomsg) + call read_bin_array(mesh%hbar, unit, iostat, iomsg) + call read_bin_array(mesh%hbar_old, unit, iostat, iomsg) + call read_bin_array(mesh%zbar_n, unit, iostat, iomsg) + call read_bin_array(mesh%Z_n, unit, iostat, iomsg) + call read_bin_array(mesh%zbar_n_bot, unit, iostat, iomsg) + call read_bin_array(mesh%zbar_e_bot, unit, iostat, iomsg) + call read_bin_array(mesh%zbar_n_srf, unit, iostat, iomsg) + call read_bin_array(mesh%zbar_e_srf, unit, iostat, iomsg) +! call read_bin_array(mesh%representative_checksum, unit, iostat, iomsg) +end subroutine read_t_mesh +end module MOD_MESH +!========================================================== + diff --git a/src/temp/MOD_PARTIT.F90 b/src/temp/MOD_PARTIT.F90 new file mode 100644 index 000000000..bd3b7dec2 --- /dev/null +++ b/src/temp/MOD_PARTIT.F90 @@ -0,0 +1,189 @@ +!========================================================== +! Variables to organize parallel work +module MOD_PARTIT +USE O_PARAM +USE, intrinsic :: ISO_FORTRAN_ENV +USE MOD_WRITE_BINARY_ARRAYS +USE MOD_READ_BINARY_ARRAYS +IMPLICIT NONE +SAVE +include 'mpif.h' +integer, parameter :: MAX_LAENDERECK=16 +integer, parameter :: MAX_NEIGHBOR_PARTITIONS=32 + + +type com_struct + integer :: rPEnum ! the number of PE I receive info from + integer, dimension(MAX_NEIGHBOR_PARTITIONS) :: rPE ! their list + integer, dimension(MAX_NEIGHBOR_PARTITIONS+1) :: rptr ! allocatables to the list of nodes + integer, dimension(:), allocatable :: rlist ! the list of nodes + integer :: sPEnum ! send part + integer, dimension(MAX_NEIGHBOR_PARTITIONS) :: sPE + integer, dimension(MAX_NEIGHBOR_PARTITIONS) :: sptr + integer, dimension(:), allocatable :: slist + integer, dimension(:), allocatable :: req ! request for MPI_Wait + integer :: nreq ! number of requests for MPI_Wait + ! (to combine halo exchange of several fields) + contains + procedure WRITE_T_COM_STRUCT + procedure READ_T_COM_STRUCT + generic :: write(unformatted) => WRITE_T_COM_STRUCT + generic :: read(unformatted) => READ_T_COM_STRUCT +end type com_struct + +TYPE T_PARTIT + integer :: MPI_COMM_FESOM ! FESOM communicator (for ocean only runs if often a copy of MPI_COMM_WORLD) + + type(com_struct) :: com_nod2D + type(com_struct) :: com_elem2D + type(com_struct) :: com_elem2D_full + + ! MPI Datatypes for interface exchange + ! Element fields (2D; 2D integer; 3D with nl-1 or nl levels, 1 - 4 values) + ! small halo and / or full halo + !!! s(r)_mpitype_* are constructed during the runtime ans shall not be dumped!!! + integer, allocatable :: s_mpitype_elem2D(:,:), r_mpitype_elem2D(:,:) + integer, allocatable :: s_mpitype_elem2D_full_i(:), r_mpitype_elem2D_full_i(:) + integer, allocatable :: s_mpitype_elem2D_full(:,:), r_mpitype_elem2D_full(:,:) + integer, allocatable :: s_mpitype_elem3D(:,:,:), r_mpitype_elem3D(:,:,:) + integer, allocatable :: s_mpitype_elem3D_full(:,:,:),r_mpitype_elem3D_full(:,:,:) + + ! Nodal fields (2D; 2D integer; 3D with nl-1 or nl levels, one, two, or three values) + integer, allocatable :: s_mpitype_nod2D(:), r_mpitype_nod2D(:) + integer, allocatable :: s_mpitype_nod2D_i(:), r_mpitype_nod2D_i(:) + integer, allocatable :: s_mpitype_nod3D(:,:,:), r_mpitype_nod3D(:,:,:) + + ! general MPI part + integer :: MPIERR + integer :: npes + integer :: mype + integer :: maxPEnum=100 + integer, allocatable, dimension(:) :: part + + ! Mesh partition + integer :: myDim_nod2D, eDim_nod2D + integer, allocatable, dimension(:) :: myList_nod2D + integer :: myDim_elem2D, eDim_elem2D, eXDim_elem2D + integer, allocatable, dimension(:) :: myList_elem2D + integer :: myDim_edge2D, eDim_edge2D + integer, allocatable, dimension(:) :: myList_edge2D + + integer :: pe_status = 0 ! if /=0 then something is wrong + !!! remPtr_* are constructed during the runtime ans shall not be dumped!!! + integer, allocatable :: remPtr_nod2D(:), remList_nod2D(:) + integer, allocatable :: remPtr_elem2D(:), remList_elem2D(:) + + logical :: elem_full_flag + contains + procedure WRITE_T_PARTIT + procedure READ_T_PARTIT + generic :: write(unformatted) => WRITE_T_PARTIT + generic :: read(unformatted) => READ_T_PARTIT +END TYPE T_PARTIT +contains + +! Unformatted writing for COM_STRUCT TYPE +subroutine WRITE_T_COM_STRUCT(tstruct, unit, iostat, iomsg) + IMPLICIT NONE + class(COM_STRUCT), intent(in) :: tstruct + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + write(unit, iostat=iostat, iomsg=iomsg) tstruct%rPEnum + call write1d_int_static(tstruct%rPE, unit, iostat, iomsg) + call write1d_int_static(tstruct%rptr, unit, iostat, iomsg) + call write_bin_array(tstruct%rlist, unit, iostat, iomsg) + write(unit, iostat=iostat, iomsg=iomsg) tstruct%sPEnum + call write1d_int_static(tstruct%sPE, unit, iostat, iomsg) + call write1d_int_static(tstruct%sptr, unit, iostat, iomsg) + call write_bin_array(tstruct%slist, unit, iostat, iomsg) + ! req is constructed during the runtime + ! call write_bin_array(tstruct%req, unit, iostat, iomsg) + write(unit, iostat=iostat, iomsg=iomsg) tstruct%nreq +end subroutine WRITE_T_COM_STRUCT + +subroutine READ_T_COM_STRUCT(tstruct, unit, iostat, iomsg) + IMPLICIT NONE + class(COM_STRUCT), intent(inout) :: tstruct + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + read(unit, iostat=iostat, iomsg=iomsg) tstruct%rPEnum + call read1d_int_static(tstruct%rPE, unit, iostat, iomsg) + call read1d_int_static(tstruct%rptr, unit, iostat, iomsg) + call read_bin_array(tstruct%rlist, unit, iostat, iomsg) + read(unit, iostat=iostat, iomsg=iomsg) tstruct%sPEnum + call read1d_int_static(tstruct%sPE, unit, iostat, iomsg) + call read1d_int_static(tstruct%sptr, unit, iostat, iomsg) + call read_bin_array(tstruct%slist, unit, iostat, iomsg) +! req is constructed during the runtime +! call read_bin_array(tstruct%req, unit, iostat, iomsg) + read(unit, iostat=iostat, iomsg=iomsg) tstruct%nreq +end subroutine READ_T_COM_STRUCT + +! Unformatted writing for T_PARTIT +subroutine WRITE_T_PARTIT(partit, unit, iostat, iomsg) + IMPLICIT NONE + class(T_PARTIT), intent(in) :: partit + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + write(unit, iostat=iostat, iomsg=iomsg) partit%com_nod2D + write(unit, iostat=iostat, iomsg=iomsg) partit%com_elem2D + write(unit, iostat=iostat, iomsg=iomsg) partit%com_elem2D_full + + write(unit, iostat=iostat, iomsg=iomsg) partit%npes + write(unit, iostat=iostat, iomsg=iomsg) partit%mype + write(unit, iostat=iostat, iomsg=iomsg) partit%maxPEnum + call write_bin_array(partit%part, unit, iostat, iomsg) + + write(unit, iostat=iostat, iomsg=iomsg) partit%myDim_nod2D + write(unit, iostat=iostat, iomsg=iomsg) partit%eDim_nod2D + call write_bin_array(partit%myList_nod2D, unit, iostat, iomsg) + + write(unit, iostat=iostat, iomsg=iomsg) partit%myDim_elem2D + write(unit, iostat=iostat, iomsg=iomsg) partit%eDim_elem2D + write(unit, iostat=iostat, iomsg=iomsg) partit%eXDim_elem2D + call write_bin_array(partit%myList_elem2D, unit, iostat, iomsg) + + write(unit, iostat=iostat, iomsg=iomsg) partit%myDim_edge2D + write(unit, iostat=iostat, iomsg=iomsg) partit%eDim_edge2D + call write_bin_array(partit%myList_edge2D, unit, iostat, iomsg) + write(unit, iostat=iostat, iomsg=iomsg) partit%pe_status +end subroutine WRITE_T_PARTIT +! Unformatted reading for T_PARTIT +subroutine READ_T_PARTIT(partit, unit, iostat, iomsg) + IMPLICIT NONE + class(T_PARTIT), intent(inout) :: partit + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + read(unit, iostat=iostat, iomsg=iomsg) partit%com_nod2D + read(unit, iostat=iostat, iomsg=iomsg) partit%com_elem2D + read(unit, iostat=iostat, iomsg=iomsg) partit%com_elem2D_full + + read(unit, iostat=iostat, iomsg=iomsg) partit%npes + read(unit, iostat=iostat, iomsg=iomsg) partit%mype + read(unit, iostat=iostat, iomsg=iomsg) partit%maxPEnum + call read_bin_array(partit%part, unit, iostat, iomsg) + + read(unit, iostat=iostat, iomsg=iomsg) partit%myDim_nod2D + read(unit, iostat=iostat, iomsg=iomsg) partit%eDim_nod2D + call read_bin_array(partit%myList_nod2D, unit, iostat, iomsg) + + read(unit, iostat=iostat, iomsg=iomsg) partit%myDim_elem2D + read(unit, iostat=iostat, iomsg=iomsg) partit%eDim_elem2D + read(unit, iostat=iostat, iomsg=iomsg) partit%eXDim_elem2D + call read_bin_array(partit%myList_elem2D, unit, iostat, iomsg) + + read(unit, iostat=iostat, iomsg=iomsg) partit%myDim_edge2D + read(unit, iostat=iostat, iomsg=iomsg) partit%eDim_edge2D + call read_bin_array(partit%myList_edge2D, unit, iostat, iomsg) + read(unit, iostat=iostat, iomsg=iomsg) partit%pe_status +end subroutine READ_T_PARTIT + +end module MOD_PARTIT diff --git a/src/temp/MOD_READ_BINARY_ARRAYS.F90 b/src/temp/MOD_READ_BINARY_ARRAYS.F90 new file mode 100644 index 000000000..87f0b2389 --- /dev/null +++ b/src/temp/MOD_READ_BINARY_ARRAYS.F90 @@ -0,0 +1,118 @@ +!========================================================== +! +!------------------------------------------------------------------------------------------ +! useful interface (read_bin_array) for reading arbitary binary arrays into an opened file +MODULE MOD_READ_BINARY_ARRAYS +use o_PARAM +private +public :: read_bin_array, read1d_int_static +INTERFACE read_bin_array + MODULE PROCEDURE read1d_real, read1d_int, read1d_char, read2d_real, read2d_int, read3d_real, read3d_int +END INTERFACE +contains +subroutine read1d_real(arr, unit, iostat, iomsg) + real(kind=WP), intent(inout), allocatable :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + read(unit, iostat=iostat, iomsg=iomsg) s1 + if (s1==0) return + allocate(arr(s1)) + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) +end subroutine read1d_real + +subroutine read1d_int(arr, unit, iostat, iomsg) + integer, intent(inout), allocatable :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + read(unit, iostat=iostat, iomsg=iomsg) s1 + if (s1==0) return + allocate(arr(s1)) + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) +end subroutine read1d_int + +subroutine read1d_char(arr, unit, iostat, iomsg) + character, intent(inout), allocatable :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + read(unit, iostat=iostat, iomsg=iomsg) s1 + if (s1==0) return + allocate(arr(s1)) + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) +end subroutine read1d_char + +subroutine read1d_int_static(arr, unit, iostat, iomsg) + IMPLICIT NONE + integer, intent(inout) :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + read(unit, iostat=iostat, iomsg=iomsg) s1 + if (s1==0) return + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) +end subroutine read1d_int_static + +subroutine read2d_real(arr, unit, iostat, iomsg) + real(kind=WP), intent(inout), allocatable :: arr(:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2 + + read(unit, iostat=iostat, iomsg=iomsg) s1, s2 + if ((s1==0) .or. (s2==0)) return + allocate(arr(s1, s2)) + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2) +end subroutine read2d_real + +subroutine read2d_int(arr, unit, iostat, iomsg) + integer, intent(inout), allocatable :: arr(:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2 + + read(unit, iostat=iostat, iomsg=iomsg) s1, s2 + if ((s1==0) .or. (s2==0)) return + allocate(arr(s1, s2)) + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2) +end subroutine read2d_int + +subroutine read3d_real(arr, unit, iostat, iomsg) + real(kind=WP), intent(inout), allocatable :: arr(:,:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2, s3 + + read(unit, iostat=iostat, iomsg=iomsg) s1, s2, s3 + if ((s1==0) .or. (s2==0) .or. (s3==0)) return + allocate(arr(s1,s2,s3)) + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2, 1:s3) +end subroutine read3d_real + +subroutine read3d_int(arr, unit, iostat, iomsg) + integer, intent(inout), allocatable :: arr(:,:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2, s3 + + read(unit, iostat=iostat, iomsg=iomsg) s1, s2, s3 + if ((s1==0) .or. (s2==0) .or. (s3==0)) return + allocate(arr(s1,s2,s3)) + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2, 1:s3) +end subroutine read3d_int +end module MOD_READ_BINARY_ARRAYS +!========================================================== + diff --git a/src/temp/MOD_TRACER.F90 b/src/temp/MOD_TRACER.F90 new file mode 100644 index 000000000..8e8247830 --- /dev/null +++ b/src/temp/MOD_TRACER.F90 @@ -0,0 +1,228 @@ +!========================================================== +MODULE MOD_TRACER +USE O_PARAM +USE, intrinsic :: ISO_FORTRAN_ENV +USE MOD_WRITE_BINARY_ARRAYS +USE MOD_READ_BINARY_ARRAYS +IMPLICIT NONE +SAVE + +TYPE T_TRACER_DATA +real(kind=WP), allocatable, dimension(:,:) :: values, valuesAB ! instant values & Adams-Bashfort interpolation +logical :: smooth_bh_tra=.false. +real(kind=WP) :: gamma0_tra, gamma1_tra, gamma2_tra +logical :: i_vert_diff =.false. +character(20) :: tra_adv_hor, tra_adv_ver, tra_adv_lim ! type of the advection scheme for this tracer +real(kind=WP) :: tra_adv_ph = 1. ! a parameter to be used in horizontal advection (for MUSCL it is the fraction of fourth-order contribution in the solution) +real(kind=WP) :: tra_adv_pv = 1. ! a parameter to be used in horizontal advection (for QR4C it is the fraction of fourth-order contribution in the solution) +integer :: ID + +contains + procedure WRITE_T_TRACER_DATA + procedure READ_T_TRACER_DATA + generic :: write(unformatted) => WRITE_T_TRACER_DATA + generic :: read(unformatted) => READ_T_TRACER_DATA +END TYPE T_TRACER_DATA + + +TYPE T_TRACER_WORK +!auxuary arrays to work with tracers: +real(kind=WP), allocatable :: del_ttf(:,:) +real(kind=WP), allocatable :: del_ttf_advhoriz(:,:),del_ttf_advvert(:,:) +!_______________________________________________________________________________ +! in case ldiag_DVD=.true. --> calculate discrete variance decay (DVD) +real(kind=WP), allocatable :: tr_dvd_horiz(:,:,:), tr_dvd_vert(:,:,:) +! The fct part +real(kind=WP),allocatable,dimension(:,:) :: fct_LO ! Low-order solution +real(kind=WP),allocatable,dimension(:,:) :: adv_flux_hor ! Antidif. horiz. contrib. from edges / backup for iterafive fct scheme +real(kind=WP),allocatable,dimension(:,:) :: adv_flux_ver ! Antidif. vert. fluxes from nodes / backup for iterafive fct scheme + +real(kind=WP),allocatable,dimension(:,:) :: fct_ttf_max,fct_ttf_min +real(kind=WP),allocatable,dimension(:,:) :: fct_plus,fct_minus +! MUSCL type reconstruction +integer,allocatable,dimension(:) :: nboundary_lay +integer,allocatable,dimension(:,:) :: edge_up_dn_tri +real(kind=WP),allocatable,dimension(:,:,:) :: edge_up_dn_grad + +contains + procedure WRITE_T_TRACER_WORK + procedure READ_T_TRACER_WORK + generic :: write(unformatted) => WRITE_T_TRACER_WORK + generic :: read(unformatted) => READ_T_TRACER_WORK +END TYPE T_TRACER_WORK + +! auxury type for reading namelist.tra +TYPE NML_TRACER_LIST_TYPE + INTEGER :: ID =-1 + CHARACTER(len=4) :: adv_hor ='NONE' + CHARACTER(len=4) :: adv_ver ='NONE' + CHARACTER(len=4) :: adv_lim ='NONE' + REAL(kind=WP) :: adv_ph =1. + REAL(kind=WP) :: adv_pv =1. +END TYPE NML_TRACER_LIST_TYPE + +TYPE T_TRACER +! total number of tracers: +integer :: num_tracers=2 +type(t_tracer_data), allocatable :: data(:) +type(t_tracer_work) :: work +! general options for all tracers (can be moved to T_TRACER is needed) +! bharmonic diffusion for tracers. We recommend to use this option in very high resolution runs (Redi is generally off there). +logical :: smooth_bh_tra = .false. +real(kind=WP) :: gamma0_tra = 0.0005 +real(kind=WP) :: gamma1_tra = 0.0125 +real(kind=WP) :: gamma2_tra = 0. +logical :: i_vert_diff = .true. + +contains +procedure WRITE_T_TRACER +procedure READ_T_TRACER +generic :: write(unformatted) => WRITE_T_TRACER +generic :: read(unformatted) => READ_T_TRACER +END TYPE T_TRACER + +contains + +! Unformatted writing for T_TRACER_DATA +subroutine WRITE_T_TRACER_DATA(tdata, unit, iostat, iomsg) + IMPLICIT NONE + class(T_TRACER_DATA), intent(in) :: tdata + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + call write_bin_array(tdata%values, unit, iostat, iomsg) + call write_bin_array(tdata%valuesAB, unit, iostat, iomsg) + write(unit, iostat=iostat, iomsg=iomsg) tdata%smooth_bh_tra + write(unit, iostat=iostat, iomsg=iomsg) tdata%gamma0_tra + write(unit, iostat=iostat, iomsg=iomsg) tdata%gamma1_tra + write(unit, iostat=iostat, iomsg=iomsg) tdata%gamma2_tra + write(unit, iostat=iostat, iomsg=iomsg) tdata%i_vert_diff + write(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_hor + write(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_ver + write(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_lim + write(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_ph + write(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_pv + write(unit, iostat=iostat, iomsg=iomsg) tdata%ID +end subroutine WRITE_T_TRACER_DATA + +! Unformatted reading for T_TRACER_DATA +subroutine READ_T_TRACER_DATA(tdata, unit, iostat, iomsg) + IMPLICIT NONE + class(T_TRACER_DATA), intent(inout) :: tdata + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + call read_bin_array(tdata%values, unit, iostat, iomsg) + call read_bin_array(tdata%valuesAB, unit, iostat, iomsg) + read(unit, iostat=iostat, iomsg=iomsg) tdata%smooth_bh_tra + read(unit, iostat=iostat, iomsg=iomsg) tdata%gamma0_tra + read(unit, iostat=iostat, iomsg=iomsg) tdata%gamma1_tra + read(unit, iostat=iostat, iomsg=iomsg) tdata%gamma2_tra + read(unit, iostat=iostat, iomsg=iomsg) tdata%i_vert_diff + read(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_hor + read(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_ver + read(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_lim + read(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_ph + read(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_pv + read(unit, iostat=iostat, iomsg=iomsg) tdata%ID +end subroutine READ_T_TRACER_DATA + +! Unformatted writing for T_TRACER_WORK +subroutine WRITE_T_TRACER_WORK(twork, unit, iostat, iomsg) + IMPLICIT NONE + class(T_TRACER_WORK), intent(in) :: twork + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + call write_bin_array(twork%del_ttf, unit, iostat, iomsg) + call write_bin_array(twork%del_ttf_advhoriz, unit, iostat, iomsg) + call write_bin_array(twork%del_ttf_advvert, unit, iostat, iomsg) + call write_bin_array(twork%tr_dvd_horiz, unit, iostat, iomsg) + call write_bin_array(twork%tr_dvd_vert, unit, iostat, iomsg) + call write_bin_array(twork%fct_LO, unit, iostat, iomsg) + call write_bin_array(twork%adv_flux_hor, unit, iostat, iomsg) + call write_bin_array(twork%adv_flux_ver, unit, iostat, iomsg) + call write_bin_array(twork%fct_ttf_max, unit, iostat, iomsg) + call write_bin_array(twork%fct_ttf_min, unit, iostat, iomsg) + call write_bin_array(twork%fct_plus, unit, iostat, iomsg) + call write_bin_array(twork%fct_minus, unit, iostat, iomsg) + call write_bin_array(twork%nboundary_lay, unit, iostat, iomsg) + call write_bin_array(twork%edge_up_dn_tri, unit, iostat, iomsg) + call write_bin_array(twork%edge_up_dn_grad, unit, iostat, iomsg) +end subroutine WRITE_T_TRACER_WORK + +! Unformatted reading for T_TRACER_WORK +subroutine READ_T_TRACER_WORK(twork, unit, iostat, iomsg) + IMPLICIT NONE + class(T_TRACER_WORK), intent(inout) :: twork + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + call read_bin_array(twork%del_ttf, unit, iostat, iomsg) + call read_bin_array(twork%del_ttf_advhoriz, unit, iostat, iomsg) + call read_bin_array(twork%del_ttf_advvert, unit, iostat, iomsg) + call read_bin_array(twork%tr_dvd_horiz, unit, iostat, iomsg) + call read_bin_array(twork%tr_dvd_vert, unit, iostat, iomsg) + call read_bin_array(twork%fct_LO, unit, iostat, iomsg) + call read_bin_array(twork%adv_flux_hor, unit, iostat, iomsg) + call read_bin_array(twork%adv_flux_ver, unit, iostat, iomsg) + call read_bin_array(twork%fct_ttf_max, unit, iostat, iomsg) + call read_bin_array(twork%fct_ttf_min, unit, iostat, iomsg) + call read_bin_array(twork%fct_plus, unit, iostat, iomsg) + call read_bin_array(twork%fct_minus, unit, iostat, iomsg) + call read_bin_array(twork%nboundary_lay, unit, iostat, iomsg) + call read_bin_array(twork%edge_up_dn_tri, unit, iostat, iomsg) + call read_bin_array(twork%edge_up_dn_grad, unit, iostat, iomsg) +end subroutine READ_T_TRACER_WORK + +! Unformatted writing for T_TRACER +subroutine WRITE_T_TRACER(tracer, unit, iostat, iomsg) + IMPLICIT NONE + class(T_TRACER), intent(in) :: tracer + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: i + + write(unit, iostat=iostat, iomsg=iomsg) tracer%num_tracers + do i=1, tracer%num_tracers + write(unit, iostat=iostat, iomsg=iomsg) tracer%data(i) + end do + write(unit, iostat=iostat, iomsg=iomsg) tracer%work + write(unit, iostat=iostat, iomsg=iomsg) tracer%smooth_bh_tra + write(unit, iostat=iostat, iomsg=iomsg) tracer%gamma0_tra + write(unit, iostat=iostat, iomsg=iomsg) tracer%gamma1_tra + write(unit, iostat=iostat, iomsg=iomsg) tracer%gamma2_tra + write(unit, iostat=iostat, iomsg=iomsg) tracer%i_vert_diff +end subroutine WRITE_T_TRACER + +! Unformatted reading for T_TRACER +subroutine READ_T_TRACER(tracer, unit, iostat, iomsg) + IMPLICIT NONE + class(T_TRACER), intent(inout) :: tracer + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: i + + read(unit, iostat=iostat, iomsg=iomsg) tracer%num_tracers +! write(*,*) 'number of tracers to read: ', tracer%num_tracers + allocate(tracer%data(tracer%num_tracers)) + do i=1, tracer%num_tracers + read(unit, iostat=iostat, iomsg=iomsg) tracer%data(i) +! write(*,*) 'tracer info:', tracer%data(i)%ID, TRIM(tracer%data(i)%tra_adv_hor), TRIM(tracer%data(i)%tra_adv_ver), TRIM(tracer%data(i)%tra_adv_lim) + end do + read(unit, iostat=iostat, iomsg=iomsg) tracer%work + read(unit, iostat=iostat, iomsg=iomsg) tracer%smooth_bh_tra + read(unit, iostat=iostat, iomsg=iomsg) tracer%gamma0_tra + read(unit, iostat=iostat, iomsg=iomsg) tracer%gamma1_tra + read(unit, iostat=iostat, iomsg=iomsg) tracer%gamma2_tra + read(unit, iostat=iostat, iomsg=iomsg) tracer%i_vert_diff +end subroutine READ_T_TRACER +end module MOD_TRACER +!========================================================== + diff --git a/src/temp/MOD_WRITE_BINARY_ARRAYS.F90 b/src/temp/MOD_WRITE_BINARY_ARRAYS.F90 new file mode 100644 index 000000000..4f03b5cea --- /dev/null +++ b/src/temp/MOD_WRITE_BINARY_ARRAYS.F90 @@ -0,0 +1,160 @@ +!========================================================== +! +!------------------------------------------------------------------------------------------ +! useful interface (write_bin_array) for writing arbitary binary arrays into an opened file +MODULE MOD_WRITE_BINARY_ARRAYS +use o_PARAM +private +public :: write_bin_array, write1d_int_static +INTERFACE write_bin_array + MODULE PROCEDURE write1d_real, write1d_int, write1d_char, write2d_real, write2d_int, write3d_real, write3d_int +END INTERFACE +contains + +subroutine write1d_real(arr, unit, iostat, iomsg) + real(kind=WP), intent(in), allocatable :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + if (allocated(arr)) then + s1=size(arr, 1) + write(unit, iostat=iostat, iomsg=iomsg) s1 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) + else + s1=0 + write(unit, iostat=iostat, iomsg=iomsg) s1 + end if +end subroutine write1d_real + +subroutine write1d_int(arr, unit, iostat, iomsg) + integer, intent(in), allocatable :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + if (allocated(arr)) then + s1=size(arr, 1) + write(unit, iostat=iostat, iomsg=iomsg) s1 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) + else + s1=0 + write(unit, iostat=iostat, iomsg=iomsg) s1 + end if +end subroutine write1d_int + +subroutine write1d_char(arr, unit, iostat, iomsg) + character, intent(in), allocatable :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + if (allocated(arr)) then + s1=size(arr, 1) + write(unit, iostat=iostat, iomsg=iomsg) s1 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) + else + s1=0 + write(unit, iostat=iostat, iomsg=iomsg) s1 + end if +end subroutine write1d_char + +subroutine write1d_int_static(arr, unit, iostat, iomsg) + IMPLICIT NONE + integer, intent(in) :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + s1=size(arr, 1) + write(unit, iostat=iostat, iomsg=iomsg) s1 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) +end subroutine write1d_int_static + +subroutine write2d_real(arr, unit, iostat, iomsg) + real(kind=WP), intent(in), allocatable :: arr(:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2 + + if (allocated(arr)) then + s1=size(arr, 1) + s2=size(arr, 2) + write(unit, iostat=iostat, iomsg=iomsg) s1, s2 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2) + else + s1=0 + s2=0 + write(unit, iostat=iostat, iomsg=iomsg) s1, s2 + end if +end subroutine write2d_real + +subroutine write2d_int(arr, unit, iostat, iomsg) + integer, intent(in), allocatable :: arr(:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2 + + if (allocated(arr)) then + s1=size(arr, 1) + s2=size(arr, 2) + write(unit, iostat=iostat, iomsg=iomsg) s1, s2 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2) + else + s1=0 + s2=0 + write(unit, iostat=iostat, iomsg=iomsg) s1, s2 + end if +end subroutine write2d_int + + +subroutine write3d_real(arr, unit, iostat, iomsg) + real(kind=WP), intent(in), allocatable :: arr(:,:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2, s3 + + if (allocated(arr)) then + s1=size(arr, 1) + s2=size(arr, 2) + s3=size(arr, 3) + write(unit, iostat=iostat, iomsg=iomsg) s1, s2, s3 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2, 1:s3) + else + s1=0 + s2=0 + s3=0 + write(unit, iostat=iostat, iomsg=iomsg) s1, s2, s3 + end if +end subroutine write3d_real + +subroutine write3d_int(arr, unit, iostat, iomsg) + integer, intent(in), allocatable :: arr(:,:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2, s3 + + if (allocated(arr)) then + s1=size(arr, 1) + s2=size(arr, 2) + s3=size(arr, 3) + write(unit, iostat=iostat, iomsg=iomsg) s1, s2, s3 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2, 1:s3) + else + s1=0 + s2=0 + s3=0 + write(unit, iostat=iostat, iomsg=iomsg) s1, s2, s3 + end if +end subroutine write3d_int +end module MOD_WRITE_BINARY_ARRAYS +!========================================================== + diff --git a/src/temp/gen_halo_exchange.F90 b/src/temp/gen_halo_exchange.F90 new file mode 100755 index 000000000..7b9f66e6b --- /dev/null +++ b/src/temp/gen_halo_exchange.F90 @@ -0,0 +1,2381 @@ +! ======================================================================== +! Halo exchange routines + broadcast routines that collect information +! on the entire field (needed for output) +! The routines here are very similar, difference is the data type and +! exchange pattern. +! exchange_nod2D_i(arr(myDim_nod2D+eDim_nod2D)) INTEGER +! exchange_nod2D(arr(myDim_nod2D+eDim_nod2D)) WP +! exchange_nod3D(arr(nl-1,myDim_nod2D+eDim_nod2D)) WP +! exchange_nod3D_full(arr(nl,myDim_nod2D+eDim_nod2D)) WP +! exchange_edge2D(edge_array2D) WP not used currently !!! no buffer!!! +! exchange_edge3D(edge_array3D) WP not used currently !!! no buffer!!! +! exchange_elem3D(elem_array3D) WP +! exchange_elem2d_full +! exchange_elem2d_full_i +! ======================================================================== + +module g_comm + + use, intrinsic :: ISO_FORTRAN_ENV + + implicit none + +contains + +#ifdef DEBUG +! General version of the communication routine for 2D nodal fields +! Only needed in debug mode +subroutine check_mpi_comm(rn, sn, r_mpitype, s_mpitype, rPE, sPE, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer, intent(in) :: sn, rn, r_mpitype(:), s_mpitype(:), rPE(:), sPE(:) +integer :: n, sdebug, rdebug, status(MPI_STATUS_SIZE), request +#include "associate_part_def.h" +#include "associate_part_ass.h" +DO n=1,rn + CALL MPI_TYPE_SIZE(r_mpitype(n), rdebug, MPIerr) + CALL MPI_ISEND(rdebug, 1, MPI_INTEGER, rPE(n), 10, MPI_COMM_FESOM, request, MPIerr) +END DO +DO n=1, sn + call MPI_RECV(sdebug, 1, MPI_INTEGER, sPE(n), 10, MPI_COMM_FESOM, & + status, MPIerr) + call MPI_TYPE_SIZE(s_mpitype(n), rdebug, MPIerr) + if (sdebug /= rdebug) then + print *, "Mismatching MPI send/recieve message lengths." + print *,"Send/receive process numbers: ", mype, '/', sPE(n) + print *,"Number of send/receive bytes: ", sdebug, '/', rdebug + call MPI_ABORT( MPI_COMM_FESOM, 1 ) + end if +END DO +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) +END SUBROUTINE check_mpi_comm +#endif + + +subroutine exchange_nod2D_i(nod_array2D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer, intent(inout) :: nod_array2D(:) +#include "associate_part_def.h" +#include "associate_part_ass.h" +if (npes > 1) then + call exchange_nod2D_i_begin(nod_array2D, partit) + call exchange_nod_end(partit) +endif +END SUBROUTINE exchange_nod2D_i + +!============================================================================= +! General version of the communication routine for 2D nodal fields +subroutine exchange_nod2D_i_begin(nod_array2D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer, intent(inout) :: nod_array2D(:) +integer :: n, sn, rn +#include "associate_part_def.h" +#include "associate_part_ass.h" + + if (npes > 1) then + + sn=com_nod2D%sPEnum + rn=com_nod2D%rPEnum + + ! Check MPI point-to-point communication for consistency +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_nod2D_i, s_mpitype_nod2D_i, & + com_nod2D%rPE, com_nod2D%sPE) +#endif + + DO n=1,rn + + call MPI_IRECV(nod_array2D, 1, r_mpitype_nod2D_i(n), com_nod2D%rPE(n), & + com_nod2D%rPE(n), MPI_COMM_FESOM, com_nod2D%req(n), MPIerr) + END DO + + DO n=1, sn + + call MPI_ISEND(nod_array2D, 1, s_mpitype_nod2D_i(n), com_nod2D%sPE(n), & + mype, MPI_COMM_FESOM, com_nod2D%req(rn+n), MPIerr) + END DO + + com_nod2D%nreq = rn+sn + + endif +END SUBROUTINE exchange_nod2D_i_begin + +! ======================================================================== +! General version of the communication routine for 2D nodal fields +subroutine exchange_nod2D(nod_array2D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod_array2D(:) +#include "associate_part_def.h" +#include "associate_part_ass.h" + + if (npes > 1) then + call exchange_nod2D_begin(nod_array2D, partit) + call exchange_nod_end(partit) + end if + +END SUBROUTINE exchange_nod2D + +! ======================================================================== +! General version of the communication routine for 2D nodal fields +subroutine exchange_nod2D_begin(nod_array2D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod_array2D(:) +integer :: n, sn, rn +#include "associate_part_def.h" +#include "associate_part_ass.h" + + if (npes > 1) then + + sn=com_nod2D%sPEnum + rn=com_nod2D%rPEnum + + ! Check MPI point-to-point communication for consistency +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_nod2D, s_mpitype_nod2D, & + com_nod2D%rPE, com_nod2D%sPE) +#endif + + DO n=1,rn + call MPI_IRECV(nod_array2D, 1, r_mpitype_nod2D(n), com_nod2D%rPE(n), & + com_nod2D%rPE(n), MPI_COMM_FESOM, com_nod2D%req(n), MPIerr) + END DO + DO n=1, sn + call MPI_ISEND(nod_array2D, 1, s_mpitype_nod2D(n), com_nod2D%sPE(n), & + mype, MPI_COMM_FESOM, com_nod2D%req(rn+n), MPIerr) + END DO + + com_nod2D%nreq = rn+sn + + end if + +END SUBROUTINE exchange_nod2D_begin +!=============================================== +! General version of the communication routine for 2D nodal fields +subroutine exchange_nod2D_2fields(nod1_array2D, nod2_array2D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod1_array2D(:) +real(real64), intent(inout) :: nod2_array2D(:) +#include "associate_part_def.h" +#include "associate_part_ass.h" + + + if (npes > 1) then + call exchange_nod2D_2fields_begin(nod1_array2D, nod2_array2D, partit) + call exchange_nod_end(partit) + end if + +END SUBROUTINE exchange_nod2D_2fields + +! ======================================================================== +! General version of the communication routine for 2D nodal fields +subroutine exchange_nod2D_2fields_begin(nod1_array2D, nod2_array2D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod1_array2D(:) +real(real64), intent(inout) :: nod2_array2D(:) +integer :: n, sn, rn +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes > 1) then + + sn=com_nod2D%sPEnum + rn=com_nod2D%rPEnum + + ! Check MPI point-to-point communication for consistency +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_nod2D, s_mpitype_nod2D, & + com_nod2D%rPE, com_nod2D%sPE) +#endif + + DO n=1,rn + call MPI_IRECV(nod1_array2D, 1, r_mpitype_nod2D(n), com_nod2D%rPE(n), & + com_nod2D%rPE(n), MPI_COMM_FESOM, com_nod2D%req(2*n-1), MPIerr) + + call MPI_IRECV(nod2_array2D, 1, r_mpitype_nod2D(n), com_nod2D%rPE(n), & + com_nod2D%rPE(n)+npes, MPI_COMM_FESOM, com_nod2D%req(2*n), MPIerr) + END DO + DO n=1, sn + call MPI_ISEND(nod1_array2D, 1, s_mpitype_nod2D(n), com_nod2D%sPE(n), & + mype, MPI_COMM_FESOM, com_nod2D%req(2*rn+2*n-1), MPIerr) + + call MPI_ISEND(nod2_array2D, 1, s_mpitype_nod2D(n), com_nod2D%sPE(n), & + mype+npes, MPI_COMM_FESOM, com_nod2D%req(2*rn+2*n), MPIerr) + END DO + + com_nod2D%nreq = 2*(rn+sn) + +end if + +END SUBROUTINE exchange_nod2D_2fields_begin + +!=============================================== +subroutine exchange_nod2D_3fields(nod1_array2D, nod2_array2D, nod3_array2D, partit) +! General version of the communication routine for 2D nodal fields +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod1_array2D(:) +real(real64), intent(inout) :: nod2_array2D(:) +real(real64), intent(inout) :: nod3_array2D(:) +#include "associate_part_def.h" +#include "associate_part_ass.h" + + + if (npes > 1) then + call exchange_nod2D_3fields_begin(nod1_array2D, nod2_array2D, nod3_array2D, partit) + call exchange_nod_end(partit) + end if + +END SUBROUTINE exchange_nod2D_3fields + +! ======================================================================== +subroutine exchange_nod2D_3fields_begin(nod1_array2D, nod2_array2D, nod3_array2D, partit) +! General version of the communication routine for 2D nodal fields +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod1_array2D(:) +real(real64), intent(inout) :: nod2_array2D(:) +real(real64), intent(inout) :: nod3_array2D(:) +integer :: n, sn, rn +#include "associate_part_def.h" +#include "associate_part_ass.h" + + if (npes > 1) then + + sn=com_nod2D%sPEnum + rn=com_nod2D%rPEnum + + ! Check MPI point-to-point communication for consistency +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_nod2D, s_mpitype_nod2D, & + com_nod2D%rPE, com_nod2D%sPE) +#endif + + DO n=1,rn + call MPI_IRECV(nod1_array2D, 1, r_mpitype_nod2D(n), com_nod2D%rPE(n), & + com_nod2D%rPE(n), MPI_COMM_FESOM, com_nod2D%req(3*n-2), MPIerr) + + call MPI_IRECV(nod2_array2D, 1, r_mpitype_nod2D(n), com_nod2D%rPE(n), & + com_nod2D%rPE(n)+npes, MPI_COMM_FESOM, com_nod2D%req(3*n-1), MPIerr) + + call MPI_IRECV(nod3_array2D, 1, r_mpitype_nod2D(n), com_nod2D%rPE(n), & + com_nod2D%rPE(n)+2*npes, MPI_COMM_FESOM, com_nod2D%req(3*n), MPIerr) + END DO + DO n=1, sn + call MPI_ISEND(nod1_array2D, 1, s_mpitype_nod2D(n), com_nod2D%sPE(n), & + mype, MPI_COMM_FESOM, com_nod2D%req(3*rn+3*n-2), MPIerr) + + call MPI_ISEND(nod2_array2D, 1, s_mpitype_nod2D(n), com_nod2D%sPE(n), & + mype+npes, MPI_COMM_FESOM, com_nod2D%req(3*rn+3*n-1), MPIerr) + + call MPI_ISEND(nod3_array2D, 1, s_mpitype_nod2D(n), com_nod2D%sPE(n), & + mype+2*npes, MPI_COMM_FESOM, com_nod2D%req(3*rn+3*n), MPIerr) + END DO + + com_nod2D%nreq = 3*(rn+sn) + +end if + +END SUBROUTINE exchange_nod2D_3fields_begin + +! ======================================================================== +! General version of the communication routine for 3D nodal fields +! stored in (vertical, horizontal) format +subroutine exchange_nod3D(nod_array3D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod_array3D(:,:) + +if (partit%npes > 1) then + call exchange_nod3D_begin(nod_array3D, partit) + call exchange_nod_end(partit) +endif + +END SUBROUTINE exchange_nod3D + +! ======================================================================== +! General version of the communication routine for 3D nodal fields +! stored in (vertical, horizontal) format +subroutine exchange_nod3D_begin(nod_array3D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod_array3D(:,:) +integer :: n, sn, rn +integer :: nz, nl1 +#include "associate_part_def.h" +#include "associate_part_ass.h" + + if (npes > 1) then + sn=com_nod2D%sPEnum + rn=com_nod2D%rPEnum + + nl1=ubound(nod_array3D,1) + + if ((nl1ubound(r_mpitype_nod3D, 2))) then + if (mype==0) then + print *,'Subroutine exchange_nod3D not implemented for',nl1,'layers.' + print *,'Adding the MPI datatypes is easy, see oce_modules.F90.' + endif + call par_ex(partit, 1) + endif + + ! Check MPI point-to-point communication for consistency +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_nod3D(:,nl1,1), s_mpitype_nod3D(:,nl1,1), & + com_nod2D%rPE, com_nod2D%sPE) +#endif + DO n=1,rn + call MPI_IRECV(nod_array3D, 1, r_mpitype_nod3D(n,nl1,1), com_nod2D%rPE(n), & + com_nod2D%rPE(n), MPI_COMM_FESOM, com_nod2D%req(n), MPIerr) + END DO + DO n=1, sn + call MPI_ISEND(nod_array3D, 1, s_mpitype_nod3D(n,nl1,1), com_nod2D%sPE(n), & + mype, MPI_COMM_FESOM, com_nod2D%req(rn+n), MPIerr) + END DO + com_nod2D%nreq = rn+sn + + endif +END SUBROUTINE exchange_nod3D_begin + +! ======================================================================== +! General version of the communication routine for 3D nodal fields +! stored in (vertical, horizontal) format +subroutine exchange_nod3D_2fields(nod1_array3D,nod2_array3D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod1_array3D(:,:) +real(real64), intent(inout) :: nod2_array3D(:,:) +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes > 1) then + call exchange_nod3D_2fields_begin(nod1_array3D,nod2_array3D, partit) + call exchange_nod_end(partit) +endif +END SUBROUTINE exchange_nod3D_2fields + +! ======================================================================== +subroutine exchange_nod3D_2fields_begin(nod1_array3D,nod2_array3D, partit) +! General version of the communication routine for 3D nodal fields +! stored in (vertical, horizontal) format +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod1_array3D(:,:) +real(real64), intent(inout) :: nod2_array3D(:,:) +integer :: n, sn, rn +integer :: nz, nl1, nl2 +#include "associate_part_def.h" +#include "associate_part_ass.h" + + if (npes > 1) then + sn=com_nod2D%sPEnum + rn=com_nod2D%rPEnum + + nl1 = ubound(nod1_array3D,1) + + if ((nl1ubound(r_mpitype_nod3D, 2))) then + if (mype==0) then + print *,'Subroutine exchange_nod3D not implemented for',nl1,'layers.' + print *,'Adding the MPI datatypes is easy, see oce_modules.F90.' + endif + call par_ex(1) + endif + + nl2 = ubound(nod2_array3D,1) + if ((nl2ubound(r_mpitype_nod3D, 2))) then + if (mype==0) then + print *,'Subroutine exchange_nod3D not implemented for',nl2,'layers.' + print *,'Adding the MPI datatypes is easy, see oce_modules.F90.' + endif + call par_ex(1) + endif + +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_nod3D(:,nl1,1), s_mpitype_nod3D(:,nl1,1), & + com_nod2D%rPE, com_nod2D%sPE) +#endif + + DO n=1,rn + call MPI_IRECV(nod1_array3D, 1, r_mpitype_nod3D(n,nl1,1), com_nod2D%rPE(n), & + com_nod2D%rPE(n), MPI_COMM_FESOM, com_nod2D%req(2*n-1), MPIerr) + + call MPI_IRECV(nod2_array3D, 1, r_mpitype_nod3D(n,nl2,1), com_nod2D%rPE(n), & + com_nod2D%rPE(n)+npes, MPI_COMM_FESOM, com_nod2D%req(2*n ), MPIerr) + END DO + + DO n=1, sn + call MPI_ISEND(nod1_array3D, 1, s_mpitype_nod3D(n,nl1,1), com_nod2D%sPE(n), & + mype, MPI_COMM_FESOM, com_nod2D%req(2*rn+2*n-1), MPIerr) + + call MPI_ISEND(nod2_array3D, 1, s_mpitype_nod3D(n,nl2,1), com_nod2D%sPE(n), & + mype+npes, MPI_COMM_FESOM, com_nod2D%req(2*rn+2*n), MPIerr) + END DO + + com_nod2D%nreq = 2*(rn+sn) + + endif +END SUBROUTINE exchange_nod3D_2fields_begin +! ======================================================================== +subroutine exchange_nod3D_n(nod_array3D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod_array3D(:,:,:) +if (partit%npes>1) then + call exchange_nod3D_n_begin(nod_array3D, partit) + call exchange_nod_end(partit) +endif + +END SUBROUTINE exchange_nod3D_n + +!================================================= +! General version of the communication routine for 3D nodal fields +! stored in (vertical, horizontal) format +subroutine exchange_nod3D_n_begin(nod_array3D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod_array3D(:,:,:) +integer :: n, sn, rn +integer :: nz, nl1, n_val +#include "associate_part_def.h" +#include "associate_part_ass.h" +if (npes>1) then + ! nod_array3D(n_val,nl1,nod2D_size) + nl1 = ubound(nod_array3D,2) + n_val = ubound(nod_array3D,1) + if ((nl1ubound(r_mpitype_nod3D, 2)) .or. (n_val > 3)) then + ! This routine also works for swapped dimensions nod_array3D(nl1,n_val, nod2D_size) + nl1 = ubound(nod_array3D,1) + n_val = ubound(nod_array3D,2) + + if ((nl1ubound(r_mpitype_nod3D, 2)) .or. (n_val > 3)) then + if (mype==0) then + print *,'Subroutine exchange_nod3D_n not implemented for' + print *,nl1,'layers and / or ',n_val,'values per element.' + print *,'Adding the MPI datatypes is easy, see oce_modules.F90.' + endif + call par_ex(1) + endif + endif + sn=com_nod2D%sPEnum + rn=com_nod2D%rPEnum + + ! Check MPI point-to-point communication for consistency +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_nod3D(:,nl1,n_val), & + s_mpitype_nod3D(:,nl1,n_val), com_nod2D%rPE, com_nod2D%sPE) +#endif + + DO n=1,rn + call MPI_IRECV(nod_array3D, 1, r_mpitype_nod3D(n,nl1,n_val), com_nod2D%rPE(n), & + com_nod2D%rPE(n), MPI_COMM_FESOM, com_nod2D%req(n), MPIerr) + END DO + + DO n=1, sn + call MPI_ISEND(nod_array3D, 1, s_mpitype_nod3D(n,nl1,n_val), com_nod2D%sPE(n), & + mype, MPI_COMM_FESOM, com_nod2D%req(rn+n), MPIerr) + END DO + + com_nod2D%nreq = rn+sn + + endif + + +END SUBROUTINE exchange_nod3D_n_begin + +!======================================= +! AND WAITING +!======================================= + +SUBROUTINE exchange_nod_end(partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit + +if (partit%npes > 1) & + call MPI_WAITALL(partit%com_nod2D%nreq, partit%com_nod2D%req, MPI_STATUSES_IGNORE, partit%MPIerr) + +END SUBROUTINE exchange_nod_end + +SUBROUTINE exchange_elem_end(partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_part_ass.h" + + if (npes > 1) then + if (elem_full_flag) then + call MPI_WAITALL(com_elem2D_full%nreq, & + com_elem2D_full%req, MPI_STATUSES_IGNORE, MPIerr) + else + call MPI_WAITALL(com_elem2D%nreq, & + com_elem2D%req, MPI_STATUSES_IGNORE, MPIerr) + endif + end if +END SUBROUTINE exchange_elem_end +!============================================================================= +subroutine exchange_elem3D(elem_array3D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: elem_array3D(:,:) +#include "associate_part_def.h" +#include "associate_part_ass.h" + +call exchange_elem3D_begin(elem_array3D, partit) +call exchange_elem_end(partit) + +END SUBROUTINE exchange_elem3D +!=========================================== +! General version of the communication routine for 3D elemental fields +! stored in (vertical, horizontal) format +subroutine exchange_elem3D_begin(elem_array3D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: elem_array3D(:,:) +integer :: n, sn, rn, nl1 +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes> 1) then + + nl1=ubound(elem_array3D,1) + + if (ubound(elem_array3D,2)<=myDim_elem2D+eDim_elem2D) then + + elem_full_flag = .false. + + sn=com_elem2D%sPEnum + rn=com_elem2D%rPEnum + + if (nl1==ubound(r_mpitype_elem3D, 2) .or. nl1==ubound(r_mpitype_elem3D, 2)-1) then + + ! Check MPI point-to-point communication for consistency +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_elem3D(:,nl1,1), s_mpitype_elem3D(:,nl1,1), & + com_elem2D%rPE, com_elem2D%sPE) +#endif + + DO n=1,rn + call MPI_IRECV(elem_array3D, 1, r_mpitype_elem3D(n,nl1,1), com_elem2D%rPE(n), & + com_elem2D%rPE(n), MPI_COMM_FESOM, & + com_elem2D%req(n), MPIerr) + END DO + DO n=1, sn + call MPI_ISEND(elem_array3D, 1, s_mpitype_elem3D(n,nl1,1), com_elem2D%sPE(n), & + mype, MPI_COMM_FESOM, & + com_elem2D%req(rn+n), MPIerr) + END DO + + elseif (nl1 <= 4) then + ! In fact, this is a 2D-array with up to 4 values, e.g. derivatives + + ! Check MPI point-to-point communication for consistency +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_elem2D(:,nl1), s_mpitype_elem2D(:,nl1), & + com_elem2D%rPE, com_elem2D%sPE) +#endif + + DO n=1,rn + call MPI_IRECV(elem_array3D, 1, r_mpitype_elem2D(n,nl1), com_elem2D%rPE(n), & + com_elem2D%rPE(n), MPI_COMM_FESOM, & + com_elem2D%req(n), MPIerr) + END DO + DO n=1, sn + call MPI_ISEND(elem_array3D, 1, s_mpitype_elem2D(n,nl1), com_elem2D%sPE(n), & + mype, MPI_COMM_FESOM, & + com_elem2D%req(rn+n), MPIerr) + END DO + else + if (mype==0) print *,'Sorry, no MPI datatype prepared for',nl1,'values per element (exchange_elem3D)' + call par_ex(1) + endif + + com_elem2D%nreq = rn+sn + + else + + elem_full_flag = .true. + + sn=com_elem2D_full%sPEnum + rn=com_elem2D_full%rPEnum + + if (nl1==ubound(r_mpitype_elem3D_full, 2) .or. nl1==ubound(r_mpitype_elem3D_full, 2)-1) then + ! Check MPI point-to-point communication for consistency +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_elem3D_full(:,nl1,1), & + s_mpitype_elem3D_full(:,nl1,1), com_elem2D_full%rPE, com_elem2D_full%sPE) +#endif + + DO n=1,rn + call MPI_IRECV(elem_array3D, 1, r_mpitype_elem3D_full(n,nl1,1), & + com_elem2D_full%rPE(n), & + com_elem2D_full%rPE(n), MPI_COMM_FESOM, & + com_elem2D_full%req(n), MPIerr) + END DO + DO n=1, sn + call MPI_ISEND(elem_array3D, 1, s_mpitype_elem3D_full(n,nl1,1), & + com_elem2D_full%sPE(n), & + mype, MPI_COMM_FESOM, & + com_elem2D_full%req(rn+n), MPIerr) + END DO + elseif (nl1 <= 4) then + ! Check MPI point-to-point communication for consistency +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_elem2D_full(:,nl1), & + s_mpitype_elem2D_full(:,nl1), com_elem2D_full%rPE, com_elem2D_full%sPE) +#endif + + ! In fact, this is a 2D-array with up to 4 values, e.g. derivatives + DO n=1,rn + call MPI_IRECV(elem_array3D, 1, r_mpitype_elem2D_full(n,nl1), & + com_elem2D_full%rPE(n), & + com_elem2D_full%rPE(n), MPI_COMM_FESOM, & + com_elem2D_full%req(n), MPIerr) + END DO + DO n=1, sn + call MPI_ISEND(elem_array3D, 1, s_mpitype_elem2D_full(n,nl1), & + com_elem2D_full%sPE(n), & + mype, MPI_COMM_FESOM, & + com_elem2D_full%req(rn+n), MPIerr) + END DO + else + if (mype==0) print *,'Sorry, no MPI datatype prepared for',nl1,'values per element (exchange_elem3D)' + call par_ex(1) + endif + + com_elem2D_full%nreq = rn+sn + + endif + +endif + +END SUBROUTINE exchange_elem3D_begin + +!============================================================================= +! General version of the communication routine for 3D elemental fields +! stored in (vertical, horizontal) format +subroutine exchange_elem3D_n(elem_array3D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: elem_array3D(:,:,:) +#include "associate_part_def.h" +#include "associate_part_ass.h" + + if (npes> 1) then + call exchange_elem3D_n_begin(elem_array3D, partit) + call exchange_elem_end(partit) + endif +END SUBROUTINE exchange_elem3D_n +!============================================================================= +subroutine exchange_elem3D_n_begin(elem_array3D, partit) +! General version of the communication routine for 3D elemental fields +! stored in (vertical, horizontal) format +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: elem_array3D(:,:,:) +integer :: n, sn, rn, n_val, nl1 +#include "associate_part_def.h" +#include "associate_part_ass.h" + + if (npes> 1) then + nl1 = ubound(elem_array3D,2) + n_val = ubound(elem_array3D,1) + + if ((nl1ubound(r_mpitype_elem3D, 2)) .or. (n_val > 4)) then + + ! This routine also works for swapped dimensions elem_array3D(nl1,n_val, elem2D_size) + nl1= ubound(elem_array3D,1) + n_val = ubound(elem_array3D,2) + + if ((nl1ubound(r_mpitype_elem3D, 2)) .or. (n_val > 4)) then + if (mype==0) then + print *,'Subroutine exchange_elem3D_n not implemented for' + print *,nl1,'layers and / or ',n_val,'values per element.' + print *,'Adding the MPI datatypes is easy, see oce_modules.F90.' + endif + call par_ex(1) + endif + endif + + if (ubound(elem_array3D,3)<=myDim_elem2D+eDim_elem2D) then + + elem_full_flag = .false. + + sn=com_elem2D%sPEnum + rn=com_elem2D%rPEnum + + ! Check MPI point-to-point communication for consistency +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_elem3D(:,nl1,n_val), & + s_mpitype_elem3D(:,nl1,n_val), com_elem2D%rPE, com_elem2D%sPE) +#endif + + DO n=1,rn + call MPI_IRECV(elem_array3D, 1, r_mpitype_elem3D(n,nl1,n_val), com_elem2D%rPE(n), & + com_elem2D%rPE(n), MPI_COMM_FESOM, com_elem2D%req(n), MPIerr) + END DO + DO n=1, sn + call MPI_ISEND(elem_array3D, 1, s_mpitype_elem3D(n,nl1,n_val), com_elem2D%sPE(n), & + mype, MPI_COMM_FESOM, com_elem2D%req(rn+n), MPIerr) + END DO + + com_elem2D%nreq = rn+sn + + else + + elem_full_flag = .true. + + sn=com_elem2D_full%sPEnum + rn=com_elem2D_full%rPEnum + + ! Check MPI point-to-point communication for consistency +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_elem3D_full(:,nl1,n_val), & + s_mpitype_elem3D_full(:,nl1,n_val), com_elem2D_full%rPE, com_elem2D_full%sPE) +#endif + + DO n=1,rn + call MPI_IRECV(elem_array3D, 1, r_mpitype_elem3D_full(n,nl1,n_val), com_elem2D_full%rPE(n), & + com_elem2D_full%rPE(n), MPI_COMM_FESOM, com_elem2D_full%req(n), MPIerr) + END DO + DO n=1, sn + call MPI_ISEND(elem_array3D, 1, s_mpitype_elem3D_full(n,nl1,n_val), com_elem2D_full%sPE(n), & + mype, MPI_COMM_FESOM, com_elem2D_full%req(rn+n), MPIerr) + END DO + + com_elem2D_full%nreq = rn+sn + + end if + + +endif +END SUBROUTINE exchange_elem3D_n_begin +!======================================================================== +! General version of the communication routine for 3D elemental fields +! stored in (vertical, horizontal) format +subroutine exchange_elem2D(elem_array2D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: elem_array2D(:) +#include "associate_part_def.h" +#include "associate_part_ass.h" + + if (npes> 1) then + call exchange_elem2D_begin(elem_array2D, partit) + call exchange_elem_end(partit) + end if + +END SUBROUTINE exchange_elem2D +!======================================================================== +! General version of the communication routine for 3D elemental fields +! stored in (vertical, horizontal) format +subroutine exchange_elem2D_begin(elem_array2D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: elem_array2D(:) +integer :: n, sn, rn +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes> 1) then + + if (ubound(elem_array2D,1)<=myDim_elem2D+eDim_elem2D) then + + elem_full_flag = .false. + + sn=com_elem2D%sPEnum + rn=com_elem2D%rPEnum + + ! Check MPI point-to-point communication for consistency +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_elem2D(:,1), s_mpitype_elem2D(:,1), & + com_elem2D%rPE, com_elem2D%sPE) +#endif + + DO n=1,rn + call MPI_IRECV(elem_array2D, 1, r_mpitype_elem2D(n,1), com_elem2D%rPE(n), & + com_elem2D%rPE(n), MPI_COMM_FESOM, com_elem2D%req(n), MPIerr) + END DO + DO n=1, sn + call MPI_ISEND(elem_array2D, 1, s_mpitype_elem2D(n,1), com_elem2D%sPE(n), & + mype, MPI_COMM_FESOM, com_elem2D%req(rn+n), MPIerr) + END DO + + com_elem2D%nreq = rn+sn + + else + elem_full_flag = .true. + + sn=com_elem2D_full%sPEnum + rn=com_elem2D_full%rPEnum + + ! Check MPI point-to-point communication for consistency +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_elem2D_full(:,1), s_mpitype_elem2D_full(:,1), & + com_elem2D_full%rPE, com_elem2D_full%sPE) +#endif + + DO n=1,rn + call MPI_IRECV(elem_array2D, 1, r_mpitype_elem2D_full(n,1), com_elem2D_full%rPE(n), & + com_elem2D_full%rPE(n), MPI_COMM_FESOM, com_elem2D_full%req(n), MPIerr) + END DO + DO n=1, sn + call MPI_ISEND(elem_array2D, 1, s_mpitype_elem2D_full(n,1), com_elem2D_full%sPE(n), & + mype, MPI_COMM_FESOM, com_elem2D_full%req(rn+n), MPIerr) + END DO + + com_elem2D_full%nreq = rn+sn + + end if + +end if + +END SUBROUTINE exchange_elem2D_begin +! ======================================================================== +!Exchange with ALL(!) the neighbours +subroutine exchange_elem2D_i(elem_array2D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer, intent(inout) :: elem_array2D(:) +integer :: n, sn, rn +#include "associate_part_def.h" +#include "associate_part_ass.h" + + if (npes> 1) then + call exchange_elem2D_i_begin(elem_array2D, partit) + call exchange_elem_end(partit) +end if + +END SUBROUTINE exchange_elem2D_i +!============================================================================= +!Exchange with ALL(!) the neighbours +subroutine exchange_elem2D_i_begin(elem_array2D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer, intent(inout) :: elem_array2D(:) +integer :: n, sn, rn +#include "associate_part_def.h" +#include "associate_part_ass.h" + + if (npes> 1) then + + elem_full_flag = .true. + + sn=com_elem2D_full%sPEnum + rn=com_elem2D_full%rPEnum + + ! Check MPI point-to-point communication for consistency +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_elem2D_full_i, s_mpitype_elem2D_full_i, & + com_elem2D_full%rPE, com_elem2D_full%sPE) +#endif + + DO n=1,rn + call MPI_IRECV(elem_array2D, 1, r_mpitype_elem2D_full_i(n), com_elem2D_full%rPE(n), & + com_elem2D_full%rPE(n), MPI_COMM_FESOM, com_elem2D_full%req(n), MPIerr) + END DO + + DO n=1, sn + + call MPI_ISEND(elem_array2D, 1, s_mpitype_elem2D_full_i(n), com_elem2D_full%sPE(n), & + mype, MPI_COMM_FESOM, com_elem2D_full%req(rn+n), MPIerr) + END DO + + com_elem2D_full%nreq = rn+sn + +end if + +END SUBROUTINE exchange_elem2D_i_begin +! ======================================================================== +! Broadcast routines +! Many because of different sizes. +! ======================================================================== +subroutine broadcast_nod3D(arr3D, arr3Dglobal, partit) +! Distribute the nodal information available on 0 PE to other PEs +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nz, counter,nl1 +integer :: i, n, nTS, sender, status(MPI_STATUS_SIZE) +real(real64) :: arr3D(:,:) +real(real64) :: arr3Dglobal(:,:) +integer :: node_size +INTEGER, ALLOCATABLE, DIMENSION(:) :: irecvbuf +real(real64), ALLOCATABLE, DIMENSION(:) :: sendbuf, recvbuf +#include "associate_part_def.h" +#include "associate_part_ass.h" + +node_size=myDim_nod2D+eDim_nod2D +nl1=ubound(arr3D,1) +IF ( mype == 0 ) THEN + if (npes>1) then + arr3D(:,1:node_size)=arr3Dglobal(:,myList_nod2D(1:node_size)) + end if + DO n = 1, npes-1 + CALL MPI_RECV( nTS, 1, MPI_INTEGER, MPI_ANY_SOURCE, & + 0, MPI_COMM_FESOM, status, MPIerr ) + sender = status(MPI_SOURCE) + ALLOCATE(sendbuf(nTS*nl1), irecvbuf(nTS)) + + CALL MPI_RECV(irecvbuf(1), nTS, MPI_INTEGER, sender, & + 1, MPI_COMM_FESOM, status, MPIerr ) + counter=0 + DO i = 1, nTS + DO nz=1, nl1 + counter=counter+1 + sendbuf(counter) = arr3Dglobal(nz,irecvbuf(i)) + ENDDO + ENDDO + + CALL MPI_SEND(sendbuf(1), nTS*nl1, MPI_DOUBLE_PRECISION, & + sender, 2, MPI_COMM_FESOM, MPIerr ) + + DEALLOCATE(irecvbuf, sendbuf) + ENDDO +ELSE + CALL MPI_SEND( node_size, 1, MPI_INTEGER, 0, 0, MPI_COMM_FESOM, MPIerr ) + CALL MPI_SEND( myList_nod2D(1), node_size, MPI_INTEGER, 0, 1, & + MPI_COMM_FESOM, MPIerr ) + + ALLOCATE(recvbuf(node_size*nl1)) + CALL MPI_RECV( recvbuf(1), node_size*nl1, MPI_DOUBLE_PRECISION, 0, & + 2, MPI_COMM_FESOM, status, MPIerr ) + counter=0 + DO n = 1, node_size + DO nz=1, nl1 + counter=counter+1 + arr3D(nz,n)=recvbuf(counter) + ENDDO + ENDDO + + DEALLOCATE(recvbuf) +ENDIF +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) +end subroutine broadcast_nod3D +! +!============================================================================ +! +subroutine broadcast_nod2D(arr2D, arr2Dglobal, partit) +! A 2D version of the previous routine +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(in), target :: partit +real(real64) :: arr2D(:) +real(real64) :: arr2Dglobal(:) +integer :: i, n, nTS, sender, status(MPI_STATUS_SIZE) +INTEGER, ALLOCATABLE, DIMENSION(:) :: irecvbuf +real(real64), ALLOCATABLE, DIMENSION(:) :: sendbuf +integer :: node_size +#include "associate_part_def.h" +#include "associate_part_ass.h" + +node_size=myDim_nod2D+eDim_nod2D + +IF ( mype == 0 ) THEN + if (npes>1) then + arr2D(1:node_size)=arr2Dglobal(myList_nod2D(1:node_size)) + end if + DO n = 1, npes-1 + CALL MPI_RECV( nTS, 1, MPI_INTEGER, MPI_ANY_SOURCE, & + 0, MPI_COMM_FESOM, status, MPIerr ) + sender = status(MPI_SOURCE) + ALLOCATE(sendbuf(nTS), irecvbuf(nTS)) + + CALL MPI_RECV(irecvbuf(1), nTS, MPI_INTEGER, sender, & + 1, MPI_COMM_FESOM, status, MPIerr ) + DO i = 1, nTS + sendbuf(i) = arr2Dglobal(irecvbuf(i)) + ENDDO + + CALL MPI_SEND(sendbuf(1), nTS, MPI_DOUBLE_PRECISION, & + sender, 2, MPI_COMM_FESOM, MPIerr ) + + DEALLOCATE(irecvbuf, sendbuf) + ENDDO +ELSE + CALL MPI_SEND( node_size, 1, MPI_INTEGER, 0, 0, MPI_COMM_FESOM, MPIerr ) + CALL MPI_SEND( myList_nod2D(1), node_size, MPI_INTEGER, 0, 1, & + MPI_COMM_FESOM, MPIerr ) + CALL MPI_RECV( arr2D(1), node_size, MPI_DOUBLE_PRECISION, 0, & + 2, MPI_COMM_FESOM, status, MPIerr ) +ENDIF +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) +end subroutine broadcast_nod2D +! +!============================================================================ +! +subroutine broadcast_elem3D(arr3D, arr3Dglobal, partit) +! Distribute the elemental information available on 0 PE to other PEs +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(in), target :: partit +INTEGER :: nz, counter,nl1 +integer :: i, n, nTS, sender, status(MPI_STATUS_SIZE) +real(real64) :: arr3D(:,:) +real(real64) :: arr3Dglobal(:,:) +integer :: elem_size + +INTEGER, ALLOCATABLE, DIMENSION(:) :: irecvbuf +real(real64), ALLOCATABLE, DIMENSION(:) :: sendbuf, recvbuf +#include "associate_part_def.h" +#include "associate_part_ass.h" + +elem_size=myDim_elem2D+eDim_elem2D + +nl1=ubound(arr3D,1) +IF ( mype == 0 ) THEN + if (npes>1) then + arr3D(:,1:elem_size)=arr3Dglobal(:,myList_elem2D(1:elem_size)) + end if + DO n = 1, npes-1 + CALL MPI_RECV( nTS, 1, MPI_INTEGER, MPI_ANY_SOURCE, & + 0, MPI_COMM_FESOM, status, MPIerr ) + sender = status(MPI_SOURCE) + ALLOCATE(sendbuf(nTS*nl1), irecvbuf(nTS)) + + CALL MPI_RECV(irecvbuf(1), nTS, MPI_INTEGER, sender, & + 1, MPI_COMM_FESOM, status, MPIerr ) + counter=0 + DO i = 1, nTS + DO nz=1, nl1 + counter=counter+1 + sendbuf(counter) = arr3Dglobal(nz,irecvbuf(i)) + ENDDO + ENDDO + + CALL MPI_SEND(sendbuf(1), nTS*nl1, MPI_DOUBLE_PRECISION, & + sender, 2, MPI_COMM_FESOM, MPIerr ) + + DEALLOCATE(irecvbuf, sendbuf) + ENDDO +ELSE + CALL MPI_SEND( elem_size, 1, MPI_INTEGER, 0, 0, MPI_COMM_FESOM, MPIerr ) + CALL MPI_SEND( myList_elem2D(1), elem_size, MPI_INTEGER, 0, 1, & + MPI_COMM_FESOM, MPIerr ) + + ALLOCATE(recvbuf(elem_size*nl1)) + CALL MPI_RECV( recvbuf(1), elem_size*nl1, MPI_DOUBLE_PRECISION, 0, & + 2, MPI_COMM_FESOM, status, MPIerr ) + counter=0 + DO n = 1, elem_size + DO nz=1, nl1 + counter=counter+1 + arr3D(nz,n)=recvbuf(counter) + ENDDO + ENDDO + + DEALLOCATE(recvbuf) +ENDIF +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) +end subroutine broadcast_elem3D +! +!============================================================================ +! +subroutine broadcast_elem2D(arr2D, arr2Dglobal, partit) +! A 2D version of the previous routine +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(in), target :: partit +integer :: i, n, nTS, sender, status(MPI_STATUS_SIZE) +real(real64) :: arr2D(:) +real(real64) :: arr2Dglobal(:) +integer :: elem_size +INTEGER, ALLOCATABLE, DIMENSION(:) :: irecvbuf +real(real64), ALLOCATABLE, DIMENSION(:) :: sendbuf +#include "associate_part_def.h" +#include "associate_part_ass.h" + +elem_size=myDim_elem2D+eDim_elem2D + +IF ( mype == 0 ) THEN + if (npes>1) then + arr2D(1:elem_size)=arr2Dglobal(myList_elem2D(1:elem_size)) + end if + DO n = 1, npes-1 + CALL MPI_RECV( nTS, 1, MPI_INTEGER, MPI_ANY_SOURCE, & + 0, MPI_COMM_FESOM, status, MPIerr ) + sender = status(MPI_SOURCE) + ALLOCATE(sendbuf(1:nTS), irecvbuf(nTS)) + + CALL MPI_RECV(irecvbuf(1), nTS, MPI_INTEGER, sender, & + 1, MPI_COMM_FESOM, status, MPIerr ) + DO i = 1, nTS + sendbuf(i) = arr2Dglobal(irecvbuf(i)) + ENDDO + + CALL MPI_SEND(sendbuf(1), nTS, MPI_DOUBLE_PRECISION, & + sender, 2, MPI_COMM_FESOM, MPIerr ) + + DEALLOCATE(irecvbuf, sendbuf) + ENDDO +ELSE + CALL MPI_SEND( elem_size, 1, MPI_INTEGER, 0, 0, MPI_COMM_FESOM, MPIerr ) + CALL MPI_SEND( myList_elem2D(1), elem_size, MPI_INTEGER, 0, 1, & + MPI_COMM_FESOM, MPIerr ) + CALL MPI_RECV( arr2D(1), elem_size, MPI_DOUBLE_PRECISION, 0, & + 2, MPI_COMM_FESOM, status, MPIerr ) +ENDIF +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) +end subroutine broadcast_elem2D +! +!============================================================================ +! Make nodal information available to master PE +! Use only with 3D arrays stored in (vertical, horizontal) way +subroutine gather_nod3D(arr3D, arr3D_global, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +real(real64) :: arr3D(:,:) +real(real64) :: arr3D_global(:,:) +real(real64), allocatable :: recvbuf(:,:) +integer :: req(partit%npes-1) +integer :: start, n3D +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes> 1) then +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) + +nl1=ubound(arr3D,1) + +! Consider MPI-datatypes to recv directly into arr3D_global! + +IF ( mype == 0 ) THEN + + if (npes>1) then + allocate(recvbuf(nl1,ubound(arr3D_global,2))) + + do n = 1, npes-1 + n3D = (remPtr_nod2D(n+1) - remPtr_nod2D(n))*nl1 + start = remPtr_nod2D(n) + call MPI_IRECV(recvbuf(1,start), n3D, MPI_DOUBLE_PRECISION, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr3D_global(1:nl1,myList_nod2D(1:myDim_nod2D)) = arr3D(1:nl1,1:myDim_nod2D) + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr3D_global(1:nl1, remList_nod2D(1 : remPtr_nod2D(npes)-1)) & + = recvbuf(1:nl1, 1 : remPtr_nod2D(npes)-1) + + deallocate(recvbuf) + + else + arr3D_global(:,:) = arr3D(:,:) + endif + +ELSE + + call MPI_SEND( arr3D, myDim_nod2D*nl1, MPI_DOUBLE_PRECISION, 0, 2, MPI_COMM_FESOM, MPIerr ) + +ENDIF + +end if +end subroutine gather_nod3D +! +!============================================================================ +! +subroutine gather_real4_nod3D(arr3D, arr3D_global, partit) + +! Make nodal information available to master PE +! +! Use only with 3D arrays stored in (vertical, horizontal) way +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +real(real32) :: arr3D(:,:) +real(real32) :: arr3D_global(:,:) +real(real32), allocatable :: recvbuf(:,:) +integer :: req(partit%npes-1) +integer :: start, n3D +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes> 1) then +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) + +nl1=ubound(arr3D,1) + +! Consider MPI-datatypes to recv directly into arr3D_global! + +IF ( mype == 0 ) THEN + + if (npes>1) then + allocate(recvbuf(nl1,ubound(arr3D_global,2))) + + do n = 1, npes-1 + n3D = (remPtr_nod2D(n+1) - remPtr_nod2D(n))*nl1 + start = remPtr_nod2D(n) + call MPI_IRECV(recvbuf(1,start), n3D, MPI_REAL, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr3D_global(1:nl1,myList_nod2D(1:myDim_nod2D)) = arr3D(1:nl1,1:myDim_nod2D) + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr3D_global(1:nl1, remList_nod2D(1 : remPtr_nod2D(npes)-1)) & + = recvbuf(1:nl1, 1 : remPtr_nod2D(npes)-1) + + deallocate(recvbuf) + + else + arr3D_global(:,:) = arr3D(:,:) + endif + +ELSE + + call MPI_SEND( arr3D, myDim_nod2D*nl1, MPI_REAL, 0, 2, MPI_COMM_FESOM, MPIerr ) + +ENDIF + +end if +end subroutine gather_real4_nod3D +!======================================================= + +subroutine gather_int2_nod3D(arr3D, arr3D_global, partit) + +! Make nodal information available to master PE +! +! Use only with 3D arrays stored in (vertical, horizontal) way +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +integer(int16) :: arr3D(:,:) +integer(int16) :: arr3D_global(:,:) +integer(int16), allocatable :: recvbuf(:,:) +integer :: req(partit%npes-1) +integer :: start, n3D +#include "associate_part_def.h" +#include "associate_part_ass.h" + + +if (npes> 1) then +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) + +nl1=ubound(arr3D,1) + +! Consider MPI-datatypes to recv directly into arr3D_global! + +IF ( mype == 0 ) THEN + + if (npes>1) then + allocate(recvbuf(nl1,ubound(arr3D_global,2))) + + do n = 1, npes-1 + n3D = (remPtr_nod2D(n+1) - remPtr_nod2D(n))*nl1 + start = remPtr_nod2D(n) + call MPI_IRECV(recvbuf(1,start), n3D, MPI_SHORT, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr3D_global(1:nl1,myList_nod2D(1:myDim_nod2D)) = arr3D(1:nl1,1:myDim_nod2D) + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr3D_global(1:nl1, remList_nod2D(1 : remPtr_nod2D(npes)-1)) & + = recvbuf(1:nl1, 1 : remPtr_nod2D(npes)-1) + + deallocate(recvbuf) + + else + arr3D_global(:,:) = arr3D(:,:) + endif + +ELSE + + call MPI_SEND( arr3D, myDim_nod2D*nl1, MPI_SHORT, 0, 2, MPI_COMM_FESOM, MPIerr ) + +ENDIF + +end if +end subroutine gather_int2_nod3D +!============================================== +subroutine gather_nod2D(arr2D, arr2D_global, partit) +! Make nodal information available to master PE +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +real(real64) :: arr2D(:) +real(real64) :: arr2D_global(:) +real(real64), allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, n2D +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes> 1) then + +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) + +! Consider MPI-datatypes to recv directly into arr2D_global! + +IF ( mype == 0 ) THEN + + if (npes>1) then + allocate(recvbuf(ubound(arr2D_global,1))) + do n = 1, npes-1 + n2D = remPtr_nod2D(n+1) - remPtr_nod2D(n) + start = remPtr_nod2D(n) + call MPI_IRECV(recvbuf(start), n2D, MPI_DOUBLE_PRECISION, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr2D_global(myList_nod2D(1:myDim_nod2D)) = arr2D(1:myDim_nod2D) + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr2D_global(remList_nod2D(1 : remPtr_nod2D(npes)-1)) & + = recvbuf(1 : remPtr_nod2D(npes)-1) + deallocate(recvbuf) + else + + arr2D_global(:) = arr2D(:) + + endif + +ELSE + + call MPI_SEND( arr2D, myDim_nod2D, MPI_DOUBLE_PRECISION, 0, 2, MPI_COMM_FESOM, MPIerr ) + +ENDIF + +endif +end subroutine gather_nod2D +!============================================== +subroutine gather_real4_nod2D(arr2D, arr2D_global, partit) +! Make nodal information available to master PE +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +real(real32) :: arr2D(:) +real(real32) :: arr2D_global(:) +real(real32), allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, n2D +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes> 1) then + +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) + +! Consider MPI-datatypes to recv directly into arr2D_global! + +IF ( mype == 0 ) THEN + + if (npes>1) then + allocate(recvbuf(ubound(arr2D_global,1))) + do n = 1, npes-1 + n2D = remPtr_nod2D(n+1) - remPtr_nod2D(n) + start = remPtr_nod2D(n) + call MPI_IRECV(recvbuf(start), n2D, MPI_REAL, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr2D_global(myList_nod2D(1:myDim_nod2D)) = arr2D(1:myDim_nod2D) + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr2D_global(remList_nod2D(1 : remPtr_nod2D(npes)-1)) & + = recvbuf(1 : remPtr_nod2D(npes)-1) + deallocate(recvbuf) + else + + arr2D_global(:) = arr2D(:) + + endif + +ELSE + + call MPI_SEND( arr2D, myDim_nod2D, MPI_REAL, 0, 2, MPI_COMM_FESOM, MPIerr ) + +ENDIF + +endif +end subroutine gather_real4_nod2D + +!============================================== +! Make nodal information available to master PE +subroutine gather_int2_nod2D(arr2D, arr2D_global, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +integer(int16) :: arr2D(:) +integer(int16) :: arr2D_global(:) +integer(int16), allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, n2D +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes> 1) then + +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) + +! Consider MPI-datatypes to recv directly into arr2D_global! + +IF ( mype == 0 ) THEN + + if (npes>1) then + allocate(recvbuf(ubound(arr2D_global,1))) + do n = 1, npes-1 + n2D = remPtr_nod2D(n+1) - remPtr_nod2D(n) + start = remPtr_nod2D(n) + call MPI_IRECV(recvbuf(start), n2D, MPI_SHORT, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr2D_global(myList_nod2D(1:myDim_nod2D)) = arr2D(1:myDim_nod2D) + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr2D_global(remList_nod2D(1 : remPtr_nod2D(npes)-1)) & + = recvbuf(1 : remPtr_nod2D(npes)-1) + deallocate(recvbuf) + else + + arr2D_global(:) = arr2D(:) + + endif + +ELSE + + call MPI_SEND( arr2D, myDim_nod2D, MPI_SHORT, 0, 2, MPI_COMM_FESOM, MPIerr ) + +ENDIF + +endif +end subroutine gather_int2_nod2D + +!============================================================================ +subroutine gather_elem3D(arr3D, arr3D_global, partit) +! Make element information available to master PE +! +! Use only with 3D arrays stored in (vertical, horizontal) way +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +real(real64) :: arr3D(:,:) +real(real64) :: arr3D_global(:,:) +real(real64), allocatable :: recvbuf(:,:) +integer :: req(partit%npes-1) +integer :: start, e3D, ende, err_alloc +integer :: max_loc_Dim, i, status(MPI_STATUS_SIZE) +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes> 1) then +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) + +nl1=ubound(arr3D,1) + +! Consider MPI-datatypes to recv directly into arr3D_global +! (Carefull with duplicate interface elements, coming from two +! PEs at once!) + +IF ( mype == 0 ) THEN + + if (npes>1) then +! + allocate(recvbuf(nl1,remPtr_elem2D(npes))) + + do n = 1, npes-1 + e3D = (remPtr_elem2D(n+1) - remPtr_elem2D(n))*nl1 + start = remPtr_elem2D(n) + call MPI_IRECV(recvbuf(1,start), e3D, MPI_DOUBLE_PRECISION, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr3D_global(1:nl1,myList_elem2D(1:myDim_elem2D)) = arr3D(1:nl1,1:myDim_elem2D) + + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr3D_global(1:nl1, remList_elem2D(1 : remPtr_elem2D(npes)-1)) & + = recvbuf(1:nl1, 1 : remPtr_elem2D(npes)-1) + + deallocate(recvbuf) + + else + arr3D_global(:,:) = arr3D(:,:) + endif + +ELSE + + call MPI_SEND( arr3D, myDim_elem2D*nl1, MPI_DOUBLE_PRECISION, 0, 2, MPI_COMM_FESOM, MPIerr ) + +ENDIF + +endif +end subroutine gather_elem3D + +!=================================================================== +! Make element information available to master PE +! Use only with 3D arrays stored in (vertical, horizontal) way +subroutine gather_real4_elem3D(arr3D, arr3D_global, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +real(real32) :: arr3D(:,:) +real(real32) :: arr3D_global(:,:) +real(real32), allocatable :: recvbuf(:,:) +integer :: req(partit%npes-1) +integer :: start, e3D, ende, err_alloc +integer :: max_loc_Dim, i, status(MPI_STATUS_SIZE) +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes> 1) then +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) + +nl1=ubound(arr3D,1) + +! Consider MPI-datatypes to recv directly into arr3D_global +! (Carefull with duplicate interface elements, coming from two +! PEs at once!) + +IF ( mype == 0 ) THEN + + if (npes>1) then +! + allocate(recvbuf(nl1,remPtr_elem2D(npes))) + + do n = 1, npes-1 + e3D = (remPtr_elem2D(n+1) - remPtr_elem2D(n))*nl1 + start = remPtr_elem2D(n) + call MPI_IRECV(recvbuf(1,start), e3D, MPI_REAL, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr3D_global(1:nl1,myList_elem2D(1:myDim_elem2D)) = arr3D(1:nl1,1:myDim_elem2D) + + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr3D_global(1:nl1, remList_elem2D(1 : remPtr_elem2D(npes)-1)) & + = recvbuf(1:nl1, 1 : remPtr_elem2D(npes)-1) + + deallocate(recvbuf) + + else + arr3D_global(:,:) = arr3D(:,:) + endif + +ELSE + + call MPI_SEND( arr3D, myDim_elem2D*nl1, MPI_REAL, 0, 2, MPI_COMM_FESOM, MPIerr ) + +ENDIF + +endif +end subroutine gather_real4_elem3D + + +!=================================================================== +! Make element information available to master PE +! Use only with 3D arrays stored in (vertical, horizontal) way +subroutine gather_int2_elem3D(arr3D, arr3D_global, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +integer(int16) :: arr3D(:,:) +integer(int16) :: arr3D_global(:,:) +integer(int16), allocatable :: recvbuf(:,:) +integer :: req(partit%npes-1) +integer :: start, e3D, ende, err_alloc +integer :: max_loc_Dim, i, status(MPI_STATUS_SIZE) +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes> 1) then +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) + +nl1=ubound(arr3D,1) + +! Consider MPI-datatypes to recv directly into arr3D_global +! (Carefull with duplicate interface elements, coming from two +! PEs at once!) + +IF ( mype == 0 ) THEN + + if (npes>1) then +! + allocate(recvbuf(nl1,remPtr_elem2D(npes))) + + do n = 1, npes-1 + e3D = (remPtr_elem2D(n+1) - remPtr_elem2D(n))*nl1 + start = remPtr_elem2D(n) + call MPI_IRECV(recvbuf(1,start), e3D, MPI_SHORT, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr3D_global(1:nl1,myList_elem2D(1:myDim_elem2D)) = arr3D(1:nl1,1:myDim_elem2D) + + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr3D_global(1:nl1, remList_elem2D(1 : remPtr_elem2D(npes)-1)) & + = recvbuf(1:nl1, 1 : remPtr_elem2D(npes)-1) + + deallocate(recvbuf) + + else + arr3D_global(:,:) = arr3D(:,:) + endif + +ELSE + + call MPI_SEND( arr3D, myDim_elem2D*nl1, MPI_SHORT, 0, 2, MPI_COMM_FESOM, MPIerr ) + +ENDIF + +endif +end subroutine gather_int2_elem3D + + +!============================================== +! Make element information available to master PE +subroutine gather_elem2D(arr2D, arr2D_global, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +real(real64) :: arr2D(:) +real(real64) :: arr2D_global(:) +real(real64), allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, e2D +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes> 1) then +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) + +! Consider MPI-datatypes to recv directly into arr2D_global! + +IF ( mype == 0 ) THEN + + if (npes>1) then + + allocate(recvbuf(remPtr_elem2D(npes))) + + do n = 1, npes-1 + e2D = remPtr_elem2D(n+1) - remPtr_elem2D(n) + start = remPtr_elem2D(n) + call MPI_IRECV(recvbuf(start), e2D, MPI_DOUBLE_PRECISION, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr2D_global(myList_elem2D(1:myDim_elem2D)) = arr2D(1:myDim_elem2D) + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr2D_global(remList_elem2D(1 : remPtr_elem2D(npes)-1)) & + = recvbuf(1 : remPtr_elem2D(npes)-1) + + deallocate(recvbuf) + + else + + arr2D_global(:) = arr2D(:) + + endif + +ELSE + + call MPI_SEND( arr2D, myDim_elem2D, MPI_DOUBLE_PRECISION, 0, 2, MPI_COMM_FESOM, MPIerr ) + +ENDIF +end if + +end subroutine gather_elem2D + +!================================================ +! Make element information available to master PE +subroutine gather_real4_elem2D(arr2D, arr2D_global, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +real(real32) :: arr2D(:) +real(real32) :: arr2D_global(:) +real(real32), allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, e2D +#include "associate_part_def.h" +#include "associate_part_ass.h" + + +if (npes> 1) then +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) + +! Consider MPI-datatypes to recv directly into arr2D_global! + +IF ( mype == 0 ) THEN + + if (npes>1) then + + allocate(recvbuf(remPtr_elem2D(npes))) + + do n = 1, npes-1 + e2D = remPtr_elem2D(n+1) - remPtr_elem2D(n) + start = remPtr_elem2D(n) + call MPI_IRECV(recvbuf(start), e2D, MPI_REAL, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr2D_global(myList_elem2D(1:myDim_elem2D)) = arr2D(1:myDim_elem2D) + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr2D_global(remList_elem2D(1 : remPtr_elem2D(npes)-1)) & + = recvbuf(1 : remPtr_elem2D(npes)-1) + + deallocate(recvbuf) + + else + + arr2D_global(:) = arr2D(:) + + endif + +ELSE + + call MPI_SEND( arr2D, myDim_elem2D, MPI_REAL, 0, 2, MPI_COMM_FESOM, MPIerr ) + +ENDIF +end if + +end subroutine gather_real4_elem2D + +!================================================ +! Make element information available to master PE +subroutine gather_int2_elem2D(arr2D, arr2D_global, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +integer(int16) :: arr2D(:) +integer(int16) :: arr2D_global(:) +integer(int16), allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, e2D +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes> 1) then +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) + +! Consider MPI-datatypes to recv directly into arr2D_global! + +IF ( mype == 0 ) THEN + + if (npes>1) then + + allocate(recvbuf(remPtr_elem2D(npes))) + + do n = 1, npes-1 + e2D = remPtr_elem2D(n+1) - remPtr_elem2D(n) + start = remPtr_elem2D(n) + call MPI_IRECV(recvbuf(start), e2D, MPI_SHORT, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr2D_global(myList_elem2D(1:myDim_elem2D)) = arr2D(1:myDim_elem2D) + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr2D_global(remList_elem2D(1 : remPtr_elem2D(npes)-1)) & + = recvbuf(1 : remPtr_elem2D(npes)-1) + + deallocate(recvbuf) + + else + + arr2D_global(:) = arr2D(:) + + endif + +ELSE + + call MPI_SEND( arr2D, myDim_elem2D, MPI_SHORT, 0, 2, MPI_COMM_FESOM, MPIerr ) + +ENDIF +end if + +end subroutine gather_int2_elem2D + + +!============================================================================ +! Make nodal information available to master PE +! Use only with 3D arrays stored in (vertical, horizontal) way +subroutine gather_real8to4_nod3D(arr3D, arr3D_global, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +real(real64) :: arr3D(:,:) +real(real32) :: arr3D_global(:,:) +integer :: req(partit%npes-1) +integer :: start, n3D, ierr +real(real32), allocatable :: recvbuf(:,:) +real(real32), allocatable :: sendbuf(:,:) +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes> 1) then + +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) +nl1=ubound(arr3D,1) + +! Consider MPI-datatypes to recv directly into arr3D_global! + +IF ( mype == 0 ) THEN + + if (npes>1) then + allocate(recvbuf(nl1, ubound(arr3D_global,2))) + + do n = 1, npes-1 + n3D = (remPtr_nod2D(n+1) - remPtr_nod2D(n))*nl1 + start = remPtr_nod2D(n) + call MPI_IRECV(recvbuf(1,start), n3D, MPI_REAL, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr3D_global(1:nl1,myList_nod2D(1:myDim_nod2D)) = arr3D(1:nl1,1:myDim_nod2D) + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr3D_global(1:nl1, remList_nod2D(1 : remPtr_nod2D(npes)-1)) & + = recvbuf(1:nl1, 1 : remPtr_nod2D(npes)-1) + + deallocate(recvbuf) + + else + arr3D_global(:,:) = arr3D(:,:) + endif + +ELSE + + allocate(sendbuf(nl1,myDim_nod2D)) + sendbuf(1:nl1,1:myDim_nod2D) = arr3D(1:nl1,1:myDim_nod2D) + + call MPI_SEND(sendbuf, myDim_nod2D*nl1, MPI_REAL, 0, 2, MPI_COMM_FESOM, MPIerr ) + deallocate(sendbuf) + +ENDIF + +end if + +end subroutine gather_real8to4_nod3D +!============================================== +! Make nodal information available to master PE +subroutine gather_real8to4_nod2D(arr2D, arr2D_global, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +real(real64) :: arr2D(:) +real(real32) :: arr2D_global(:) +real(real32) :: sendbuf(partit%myDim_nod2D) +real(real64), allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, n2D +#include "associate_part_def.h" +#include "associate_part_ass.h" + +! Consider MPI-datatypes to recv directly into arr2D_global! + + if (npes> 1) then +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) +IF ( mype == 0 ) THEN + + if (npes>1) then + allocate(recvbuf(ubound(arr2D_global,1))) + do n = 1, npes-1 + n2D = remPtr_nod2D(n+1) - remPtr_nod2D(n) + start = remPtr_nod2D(n) + call MPI_IRECV(recvbuf(start), n2D, MPI_REAL, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr2D_global(myList_nod2D(1:myDim_nod2D)) = arr2D(1:myDim_nod2D) + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr2D_global(remList_nod2D(1 : remPtr_nod2D(npes)-1)) & + = recvbuf(1 : remPtr_nod2D(npes)-1) + deallocate(recvbuf) + else + + arr2D_global(:) = arr2D(:) + + endif + +ELSE + sendbuf(1:myDim_nod2D) = real(arr2D(1:myDim_nod2D),real32) + + call MPI_SEND(sendbuf, myDim_nod2D, MPI_REAL, 0, 2, MPI_COMM_FESOM, MPIerr ) + +ENDIF + +end if +end subroutine gather_real8to4_nod2D +!============================================================================ +subroutine gather_real8to4_elem3D(arr3D, arr3D_global, partit) +! Make element information available to master PE +! Use only with 3D arrays stored in (vertical, horizontal) way +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +real(real64) :: arr3D(:,:) +real(real32) :: arr3D_global(:,:) +integer :: req(partit%npes-1) +integer :: start, e3D +real(real32), allocatable :: recvbuf(:,:) +real(real32), allocatable :: sendbuf(:,:) +#include "associate_part_def.h" +#include "associate_part_ass.h" + + +if (npes> 1) then +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) +nl1=ubound(arr3D,1) + +! Consider MPI-datatypes to recv directly into arr3D_global! + +IF ( mype == 0 ) THEN + + if (npes>1) then + allocate(recvbuf(nl1,remPtr_elem2D(npes))) + + do n = 1, npes-1 + e3D = (remPtr_elem2D(n+1) - remPtr_elem2D(n))*nl1 + start = remPtr_elem2D(n) + call MPI_IRECV(recvbuf(1,start), e3D, MPI_REAL, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr3D_global(1:nl1,myList_elem2D(1:myDim_elem2D)) = arr3D(1:nl1,1:myDim_elem2D) + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr3D_global(1:nl1, remList_elem2D(1 : remPtr_elem2D(npes)-1)) & + = recvbuf(1:nl1, 1 : remPtr_elem2D(npes)-1) + + deallocate(recvbuf) + + else + arr3D_global(:,:) = arr3D(:,:) + endif + +ELSE + allocate(sendbuf(nl1,myDim_elem2D)) + sendbuf(1:nl1,1:myDim_elem2D) = arr3D(1:nl1,1:myDim_elem2D) + + call MPI_SEND(sendbuf, myDim_elem2D*nl1, MPI_REAL, 0, 2, MPI_COMM_FESOM, MPIerr ) + deallocate(sendbuf) +ENDIF + +end if +end subroutine gather_real8to4_elem3D +!================================================ +! Make element information available to master PE +subroutine gather_real8to4_elem2D(arr2D, arr2D_global, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +real(real64) :: arr2D(:) +real(real32) :: arr2D_global(:) +real(real32), allocatable :: recvbuf(:) +real(real32) :: sendbuf(partit%myDim_elem2D) +integer :: req(partit%npes-1) +integer :: start, e2D +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes> 1) then + +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) +! Consider MPI-datatypes to recv directly into arr2D_global! + +IF ( mype == 0 ) THEN + + if (npes>1) then + allocate(recvbuf(remPtr_elem2D(npes))) + + do n = 1, npes-1 + e2D = remPtr_elem2D(n+1) - remPtr_elem2D(n) + start = remPtr_elem2D(n) + call MPI_IRECV(recvbuf(start), e2D, MPI_REAL, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr2D_global(myList_elem2D(1:myDim_elem2D)) = arr2D(1:myDim_elem2D) + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr2D_global(remList_elem2D(1 : remPtr_elem2D(npes)-1)) & + = recvbuf(1 : remPtr_elem2D(npes)-1) + + deallocate(recvbuf) + + else + + arr2D_global(:) = arr2D(:) + + endif + +ELSE + + sendbuf(1:myDim_elem2D) = real(arr2D(1:myDim_elem2D),real32) + call MPI_SEND(sendbuf, myDim_elem2D, MPI_REAL, 0, 2, MPI_COMM_FESOM, MPIerr ) + +ENDIF + +end if +end subroutine gather_real8to4_elem2D +!============================================== +subroutine gather_elem2D_i(arr2D, arr2D_global, partit) +! Make element information available to master PE +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +integer :: arr2D(:) +integer :: arr2D_global(:) +integer, allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, e2D +#include "associate_part_def.h" +#include "associate_part_ass.h" + + +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) + ! Consider MPI-datatypes to recv directly into arr2D_global! + IF ( mype == 0 ) THEN + if (npes > 1) then + allocate(recvbuf(remPtr_elem2D(npes))) + do n = 1, npes-1 + e2D = remPtr_elem2D(n+1) - remPtr_elem2D(n) + start = remPtr_elem2D(n) + call MPI_IRECV(recvbuf(start), e2D, MPI_INTEGER, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + arr2D_global(myList_elem2D(1:myDim_elem2D)) = arr2D(1:myDim_elem2D) + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + arr2D_global(remList_elem2D(1 : remPtr_elem2D(npes)-1)) & + = recvbuf(1 : remPtr_elem2D(npes)-1) + deallocate(recvbuf) + else + arr2D_global(:) = arr2D(:) + endif + ELSE + call MPI_SEND(arr2D, myDim_elem2D, MPI_INTEGER, 0, 2, MPI_COMM_FESOM, MPIerr ) + ENDIF +end subroutine gather_elem2D_i +!============================================== +! Make nodal information available to master PE +subroutine gather_nod2D_i(arr2D, arr2D_global, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +integer :: arr2D(:) +integer :: arr2D_global(:) +integer, allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, n2D +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes> 1) then + +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) + +! Consider MPI-datatypes to recv directly into arr2D_global! + +IF ( mype == 0 ) THEN + + if (npes>1) then + allocate(recvbuf(ubound(arr2D_global, 1))) + do n = 1, npes-1 + n2D = remPtr_nod2D(n+1) - remPtr_nod2D(n) + start = remPtr_nod2D(n) + call MPI_IRECV(recvbuf(start), n2D, MPI_INTEGER, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr2D_global(myList_nod2D(1:myDim_nod2D)) = arr2D(1:myDim_nod2D) + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr2D_global(remList_nod2D(1 : remPtr_nod2D(npes)-1)) & + = recvbuf(1 : remPtr_nod2D(npes)-1) + deallocate(recvbuf) + else + + arr2D_global(:) = arr2D(:) + + endif + +ELSE + + call MPI_SEND( arr2D, myDim_nod2D, MPI_INTEGER, 0, 2, MPI_COMM_FESOM, MPIerr ) + +ENDIF + +endif +end subroutine gather_nod2D_i +!============================================================================ +! A 2D version of the previous routine +subroutine gather_edg2D(arr2D, arr2Dglobal, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(in), target :: partit +real(real64) :: arr2D(:) +real(real64) :: arr2Dglobal(:) +integer :: i, n, buf_size, sender, status(MPI_STATUS_SIZE) +INTEGER, ALLOCATABLE, DIMENSION(:) :: ibuf +REAL(real64), ALLOCATABLE, DIMENSION(:) :: rbuf +#include "associate_part_def.h" +#include "associate_part_ass.h" + +IF ( mype == 0 ) THEN + arr2Dglobal(myList_edge2D(1:myDim_edge2D))=arr2D(1:myDim_edge2D) + DO n = 1, npes-1 + CALL MPI_RECV( buf_size, 1, MPI_INTEGER, MPI_ANY_SOURCE, & + 0, MPI_COMM_FESOM, status, MPIerr ) + sender = status(MPI_SOURCE) + ALLOCATE(rbuf(buf_size), ibuf(buf_size)) + + CALL MPI_RECV(ibuf(1), buf_size, MPI_INTEGER, sender, & + 1, MPI_COMM_FESOM, status, MPIerr ) + + CALL MPI_RECV(rbuf(1), buf_size, MPI_DOUBLE_PRECISION, sender, & + 2, MPI_COMM_FESOM, status, MPIerr ) + arr2Dglobal(ibuf)=rbuf + DEALLOCATE(ibuf, rbuf) + ENDDO +ELSE + CALL MPI_SEND( myDim_edge2D, 1, MPI_INTEGER, 0, 0, MPI_COMM_FESOM, MPIerr ) + CALL MPI_SEND( myList_edge2D(1), myDim_edge2D, MPI_INTEGER, 0, 1, & + MPI_COMM_FESOM, MPIerr ) + CALL MPI_SEND( arr2D(1), myDim_edge2D, MPI_DOUBLE_PRECISION, 0, 2,& + MPI_COMM_FESOM, MPIerr ) +ENDIF +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) +end subroutine gather_edg2D +! +!============================================================================ +! A 2D version of the previous routine +subroutine gather_edg2D_i(arr2D, arr2Dglobal, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: arr2D(:) +integer :: arr2Dglobal(:) +integer :: i, n, buf_size, sender, status(MPI_STATUS_SIZE) +INTEGER, ALLOCATABLE, DIMENSION(:) :: ibuf, vbuf +#include "associate_part_def.h" +#include "associate_part_ass.h" + +IF ( mype == 0 ) THEN + arr2Dglobal(myList_edge2D(1:myDim_edge2D))=arr2D(1:myDim_edge2D) + DO n = 1, npes-1 + CALL MPI_RECV( buf_size, 1, MPI_INTEGER, MPI_ANY_SOURCE, & + 0, MPI_COMM_FESOM, status, MPIerr ) + sender = status(MPI_SOURCE) + ALLOCATE(ibuf(buf_size), vbuf(buf_size)) + + CALL MPI_RECV(ibuf(1), buf_size, MPI_INTEGER, sender, & + 1, MPI_COMM_FESOM, status, MPIerr ) + + CALL MPI_RECV(vbuf(1), buf_size, MPI_INTEGER, sender, & + 2, MPI_COMM_FESOM, status, MPIerr ) + arr2Dglobal(ibuf)=vbuf + DEALLOCATE(ibuf, vbuf) + ENDDO +ELSE + CALL MPI_SEND( myDim_edge2D, 1, MPI_INTEGER, 0, 0, MPI_COMM_FESOM, MPIerr ) + CALL MPI_SEND( myList_edge2D(1), myDim_edge2D, MPI_INTEGER, 0, 1, & + MPI_COMM_FESOM, MPIerr ) + CALL MPI_SEND( arr2D(1), myDim_edge2D, MPI_INTEGER, 0, 2,& + MPI_COMM_FESOM, MPIerr ) +ENDIF +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) +end subroutine gather_edg2D_i +!============================================== + +end module g_comm + + + +module g_comm_auto +use g_comm +implicit none +interface exchange_nod + module procedure exchange_nod2D + module procedure exchange_nod2D_i + module procedure exchange_nod2D_2fields + module procedure exchange_nod2D_3fields + module procedure exchange_nod3D + module procedure exchange_nod3D_2fields + module procedure exchange_nod3D_n +end interface exchange_nod + +interface exchange_nod_begin + module procedure exchange_nod2D_begin + module procedure exchange_nod2D_i_begin + module procedure exchange_nod2D_2fields_begin + module procedure exchange_nod2D_3fields_begin + module procedure exchange_nod3D_begin + module procedure exchange_nod3D_2fields_begin + module procedure exchange_nod3D_n_begin +end interface exchange_nod_begin + +!!$interface exchange_edge +!!$ module procedure exchange_edge2D +!!$! module procedure exchange_edge3D ! not available, not used +!!$end interface exchange_edge + +interface exchange_elem + module procedure exchange_elem3D + module procedure exchange_elem3D_n + module procedure exchange_elem2d + module procedure exchange_elem2d_i +end interface exchange_elem + +interface exchange_elem_begin + module procedure exchange_elem3D_begin + module procedure exchange_elem3D_n_begin + module procedure exchange_elem2d_begin + module procedure exchange_elem2d_i_begin +end interface exchange_elem_begin + + +interface broadcast_nod + module procedure broadcast_nod3D + module procedure broadcast_nod2D +end interface broadcast_nod + +interface broadcast_elem + module procedure broadcast_elem3D + module procedure broadcast_elem2D +end interface broadcast_elem + +interface gather_nod + module procedure gather_nod3D + module procedure gather_nod2D + module procedure gather_real4_nod3D + module procedure gather_real4_nod2D + module procedure gather_int2_nod3D + module procedure gather_int2_nod2D + module procedure gather_real8to4_nod3D + module procedure gather_real8to4_nod2D + module procedure gather_nod2D_i +end interface gather_nod + +interface gather_elem + module procedure gather_elem3D + module procedure gather_elem2D + module procedure gather_real4_elem3D + module procedure gather_real4_elem2D + module procedure gather_int2_elem3D + module procedure gather_int2_elem2D + module procedure gather_real8to4_elem3D + module procedure gather_real8to4_elem2D + module procedure gather_elem2D_i +end interface gather_elem + +interface gather_edge + module procedure gather_edg2D + module procedure gather_edg2D_i +end interface gather_edge + + +private ! hides items not listed on public statement +public :: exchange_nod,exchange_elem,broadcast_nod,broadcast_elem, & + gather_nod, gather_elem, exchange_nod_begin, exchange_nod_end, exchange_elem_begin, & + exchange_elem_end, gather_edge +end module g_comm_auto diff --git a/src/temp/gen_modules_partitioning.F90 b/src/temp/gen_modules_partitioning.F90 new file mode 100644 index 000000000..cc7d3c080 --- /dev/null +++ b/src/temp/gen_modules_partitioning.F90 @@ -0,0 +1,508 @@ +module par_support_interfaces + interface + subroutine par_init(partit) + USE o_PARAM + USE MOD_PARTIT + implicit none + type(t_partit), intent(inout), target :: partit + end subroutine + + subroutine par_ex(partit, abort) + USE MOD_PARTIT + implicit none + type(t_partit), intent(inout), target :: partit + integer,optional :: abort + end subroutine + + subroutine set_par_support(partit, mesh) + use MOD_MESH + use MOD_PARTIT + implicit none + type(t_partit), intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + end subroutine + + subroutine init_gatherLists(partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + implicit none + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + end subroutine + end interface +end module + +subroutine par_init(partit) ! initializes MPI + USE o_PARAM + USE MOD_PARTIT + implicit none + type(t_partit), intent(inout), target :: partit + integer :: i + integer :: provided_mpi_thread_support_level + character(:), allocatable :: provided_mpi_thread_support_level_name + +#ifndef __oasis + call MPI_Comm_Size(MPI_COMM_WORLD,partit%npes,i) + call MPI_Comm_Rank(MPI_COMM_WORLD,partit%mype,i) + partit%MPI_COMM_FESOM=MPI_COMM_WORLD +#else + call MPI_Comm_Size(MPI_COMM_FESOM,partit%npes,i) + call MPI_Comm_Rank(MPI_COMM_FESOM,partit%mype,i) +#endif + + if(partit%mype==0) then + call MPI_Query_thread(provided_mpi_thread_support_level, i) + if(provided_mpi_thread_support_level == MPI_THREAD_SINGLE) then + provided_mpi_thread_support_level_name = "MPI_THREAD_SINGLE" + else if(provided_mpi_thread_support_level == MPI_THREAD_FUNNELED) then + provided_mpi_thread_support_level_name = "MPI_THREAD_FUNNELED" + else if(provided_mpi_thread_support_level == MPI_THREAD_SERIALIZED) then + provided_mpi_thread_support_level_name = "MPI_THREAD_SERIALIZED" + else if(provided_mpi_thread_support_level == MPI_THREAD_MULTIPLE) then + provided_mpi_thread_support_level_name = "MPI_THREAD_MULTIPLE" + else + provided_mpi_thread_support_level_name = "unknown" + end if + write(*,*) 'MPI has been initialized, provided MPI thread support level: ', & + provided_mpi_thread_support_level_name,provided_mpi_thread_support_level + write(*, *) 'Running on ', partit%npes, ' PEs' + end if +end subroutine par_init +!================================================================= +subroutine par_ex(partit, abort) ! finalizes MPI +USE MOD_PARTIT +#ifndef __oifs +!For standalone and coupled ECHAM runs +#if defined (__oasis) + use mod_prism +#endif + implicit none + type(t_partit), intent(inout), target :: partit + integer,optional :: abort + +#ifndef __oasis + if (present(abort)) then + if (partit%mype==0) write(*,*) 'Run finished unexpectedly!' + call MPI_ABORT(partit%MPI_COMM_FESOM, 1 ) + else + call MPI_Barrier(partit%MPI_COMM_FESOM,partit%MPIerr) + call MPI_Finalize(partit%MPIerr) + endif +#else + if (.not. present(abort)) then + if (partit%mype==0) print *, 'FESOM calls MPI_Barrier before calling prism_terminate' + call MPI_Barrier(MPI_COMM_WORLD, partit%MPIerr) + end if + call prism_terminate_proto(MPIerr) + if (partit%mype==0) print *, 'FESOM calls MPI_Barrier before calling MPI_Finalize' + call MPI_Barrier(MPI_COMM_WORLD, partit%MPIerr) + + if (partit%mype==0) print *, 'FESOM calls MPI_Finalize' + call MPI_Finalize(MPIerr) +#endif + if (partit%mype==0) print *, 'fesom should stop with exit status = 0' +#endif +#if defined (__oifs) +!OIFS coupling doesnt call prism_terminate_proto and uses MPI_COMM_FESOM + implicit none + integer,optional :: abort + if (present(abort)) then + if (partit%mype==0) write(*,*) 'Run finished unexpectedly!' + call MPI_ABORT( partit%MPI_COMM_FESOM, 1 ) + else + call MPI_Barrier(partit%MPI_COMM_FESOM,partit%MPIerr) + call MPI_Finalize(partit%MPIerr) + endif +#endif + +end subroutine par_ex +!======================================================================= +subroutine set_par_support(partit, mesh) + use MOD_MESH + use MOD_PARTIT + implicit none + + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + integer :: n, offset + integer :: i, max_nb, nb, nini, nend, nl1, n_val + integer, allocatable :: blocklen(:), displace(:) + integer, allocatable :: blocklen_tmp(:), displace_tmp(:) + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + ! + ! In the distributed memory version, most of the job is already done + ! at the initialization phase and is taken into account in read_mesh + ! routine. Here, MPI datatypes are built and buffers for MPI wait requests + ! are allocated. + + if (npes > 1) then + +!================================================ +! MPI REQUEST BUFFERS +!================================================ + if (.not. allocated(com_nod2D%req)) allocate(com_nod2D%req( 3*com_nod2D%rPEnum + 3*com_nod2D%sPEnum)) + if (.not. allocated(com_elem2D%req)) allocate(com_elem2D%req( 3*com_elem2D%rPEnum + 3*com_elem2D%sPEnum)) + if (.not. allocated(com_elem2D_full%req)) allocate(com_elem2D_full%req(3*com_elem2D_full%rPEnum + 3*com_elem2D_full%sPEnum)) +!================================================ +! MPI DATATYPES +!================================================ + ! Build MPI Data types for halo exchange: Elements + allocate(partit%r_mpitype_elem2D(com_elem2D%rPEnum,4)) ! 2D, small halo + allocate(partit%s_mpitype_elem2D(com_elem2D%sPEnum,4)) + allocate(partit%r_mpitype_elem2D_full_i(com_elem2D_full%rPEnum)) ! 2D, wide halo, integer + allocate(partit%s_mpitype_elem2D_full_i(com_elem2D_full%sPEnum)) + allocate(partit%r_mpitype_elem2D_full(com_elem2D_full%rPEnum,4)) ! 2D, wide halo + allocate(partit%s_mpitype_elem2D_full(com_elem2D_full%sPEnum,4)) + allocate(partit%r_mpitype_elem3D(com_elem2D%rPEnum, nl-1:nl,4)) ! 3D, small halo + allocate(partit%s_mpitype_elem3D(com_elem2D%sPEnum, nl-1:nl,4)) + allocate(partit%r_mpitype_elem3D_full(com_elem2D_full%rPEnum, nl-1:nl,4)) ! 3D, wide halo + allocate(partit%s_mpitype_elem3D_full(com_elem2D_full%sPEnum, nl-1:nl,4)) +!after the allocation we just reassotiate ALL pointers again here +#include "associate_part_ass.h" + ! Upper limit for the length of the local interface between the neighbor PEs + max_nb = max( & + maxval(com_elem2D%rptr(2:com_elem2D%rPEnum+1) - com_elem2D%rptr(1:com_elem2D%rPEnum)), & + maxval(com_elem2D%sptr(2:com_elem2D%sPEnum+1) - com_elem2D%sptr(1:com_elem2D%sPEnum)), & + maxval(com_elem2D_full%rptr(2:com_elem2D_full%rPEnum+1) - com_elem2D_full%rptr(1:com_elem2D_full%rPEnum)), & + maxval(com_elem2D_full%sptr(2:com_elem2D_full%sPEnum+1) - com_elem2D_full%sptr(1:com_elem2D_full%sPEnum))) + + allocate(displace(max_nb), blocklen(max_nb)) + allocate(displace_tmp(max_nb), blocklen_tmp(max_nb)) + + + do n=1,com_elem2D%rPEnum + nb = 1 + nini = com_elem2D%rptr(n) + nend = com_elem2D%rptr(n+1) - 1 + displace(:) = 0 + displace(1) = com_elem2D%rlist(nini) -1 ! C counting, start at 0 + blocklen(:) = 1 + do i=nini+1, nend + if (com_elem2D%rlist(i) /= com_elem2D%rlist(i-1) + 1) then + ! New block + nb = nb+1 + displace(nb) = com_elem2D%rlist(i) -1 + else + blocklen(nb) = blocklen(nb)+1 + endif + enddo + + DO n_val=1,4 + + blocklen_tmp(1:nb) = blocklen(1:nb)*n_val + displace_tmp(1:nb) = displace(1:nb)*n_val + + call MPI_TYPE_INDEXED(nb, blocklen_tmp, displace_tmp, MPI_DOUBLE_PRECISION, & + r_mpitype_elem2D(n,n_val), MPIerr) + + call MPI_TYPE_COMMIT(r_mpitype_elem2D(n,n_val), MPIerr) + + DO nl1=nl-1, nl + + blocklen_tmp(1:nb) = blocklen(1:nb)*n_val*nl1 + displace_tmp(1:nb) = displace(1:nb)*n_val*nl1 + + call MPI_TYPE_INDEXED(nb, blocklen_tmp, displace_tmp, MPI_DOUBLE_PRECISION, & + r_mpitype_elem3D(n,nl1,n_val), MPIerr) + + call MPI_TYPE_COMMIT(r_mpitype_elem3D(n,nl1,n_val), MPIerr) + ENDDO + ENDDO + enddo + + do n=1,com_elem2D%sPEnum + nb = 1 + nini = com_elem2D%sptr(n) + nend = com_elem2D%sptr(n+1) - 1 + displace(:) = 0 + displace(1) = com_elem2D%slist(nini) -1 ! C counting, start at 0 + blocklen(:) = 1 + do i=nini+1, nend + if (com_elem2D%slist(i) /= com_elem2D%slist(i-1) + 1) then + ! New block + nb = nb+1 + displace(nb) = com_elem2D%slist(i) -1 + else + blocklen(nb) = blocklen(nb)+1 + endif + enddo + + DO n_val=1,4 + + blocklen_tmp(1:nb) = blocklen(1:nb)*n_val + displace_tmp(1:nb) = displace(1:nb)*n_val + + call MPI_TYPE_INDEXED(nb, blocklen_tmp, displace_tmp, MPI_DOUBLE_PRECISION, & + s_mpitype_elem2D(n, n_val), MPIerr) + + call MPI_TYPE_COMMIT(s_mpitype_elem2D(n, n_val), MPIerr) + + DO nl1=nl-1, nl + + blocklen_tmp(1:nb) = blocklen(1:nb)*n_val*nl1 + displace_tmp(1:nb) = displace(1:nb)*n_val*nl1 + + call MPI_TYPE_INDEXED(nb, blocklen_tmp, displace_tmp, MPI_DOUBLE_PRECISION, & + s_mpitype_elem3D(n,nl1,n_val), MPIerr) + + call MPI_TYPE_COMMIT(s_mpitype_elem3D(n,nl1,n_val), MPIerr) + ENDDO + ENDDO + enddo + + do n=1,com_elem2D_full%rPEnum + nb = 1 + nini = com_elem2D_full%rptr(n) + nend = com_elem2D_full%rptr(n+1) - 1 + displace(:) = 0 + displace(1) = com_elem2D_full%rlist(nini) -1 ! C counting, start at 0 + blocklen(:) = 1 + do i=nini+1, nend + if (com_elem2D_full%rlist(i) /= com_elem2D_full%rlist(i-1) + 1) then + ! New block + nb = nb+1 + displace(nb) = com_elem2D_full%rlist(i) -1 + else + blocklen(nb) = blocklen(nb)+1 + endif + enddo + + call MPI_TYPE_INDEXED(nb, blocklen,displace,MPI_INTEGER, r_mpitype_elem2D_full_i(n),MPIerr) + + call MPI_TYPE_COMMIT(r_mpitype_elem2D_full_i(n), MPIerr) + + DO n_val=1,4 + + call MPI_TYPE_INDEXED(nb, blocklen, displace, MPI_DOUBLE_PRECISION, & + r_mpitype_elem2D_full(n,n_val), MPIerr) + call MPI_TYPE_COMMIT(r_mpitype_elem2D_full(n, n_val), MPIerr) + + DO nl1=nl-1, nl + + blocklen_tmp(1:nb) = blocklen(1:nb)*n_val*nl1 + displace_tmp(1:nb) = displace(1:nb)*n_val*nl1 + + call MPI_TYPE_INDEXED(nb, blocklen_tmp, displace_tmp, MPI_DOUBLE_PRECISION, & + r_mpitype_elem3D_full(n,nl1,n_val), MPIerr) + + call MPI_TYPE_COMMIT(r_mpitype_elem3D_full(n,nl1,n_val), MPIerr) + ENDDO + ENDDO + enddo + + do n=1,com_elem2D_full%sPEnum + nb = 1 + nini = com_elem2D_full%sptr(n) + nend = com_elem2D_full%sptr(n+1) - 1 + displace(:) = 0 + displace(1) = com_elem2D_full%slist(nini) -1 ! C counting, start at 0 + blocklen(:) = 1 + do i=nini+1, nend + if (com_elem2D_full%slist(i) /= com_elem2D_full%slist(i-1) + 1) then + ! New block + nb = nb+1 + displace(nb) = com_elem2D_full%slist(i) -1 + else + blocklen(nb) = blocklen(nb)+1 + endif + enddo + + call MPI_TYPE_INDEXED(nb, blocklen,displace,MPI_INTEGER, s_mpitype_elem2D_full_i(n), MPIerr) + + call MPI_TYPE_COMMIT(s_mpitype_elem2D_full_i(n), MPIerr) + + DO n_val=1,4 + call MPI_TYPE_INDEXED(nb, blocklen, displace, MPI_DOUBLE_PRECISION, & + s_mpitype_elem2D_full(n,n_val),MPIerr) + call MPI_TYPE_COMMIT(s_mpitype_elem2D_full(n,n_val), MPIerr) + + DO nl1=nl-1, nl + + blocklen_tmp(1:nb) = blocklen(1:nb)*n_val*nl1 + displace_tmp(1:nb) = displace(1:nb)*n_val*nl1 + + call MPI_TYPE_INDEXED(nb, blocklen_tmp, displace_tmp, MPI_DOUBLE_PRECISION, & + s_mpitype_elem3D_full(n,nl1,n_val), MPIerr) + + call MPI_TYPE_COMMIT(s_mpitype_elem3D_full(n,nl1,n_val), MPIerr) + ENDDO + ENDDO + enddo + + deallocate(displace, blocklen) + deallocate(displace_tmp, blocklen_tmp) + + + ! Build MPI Data types for halo exchange: Nodes + + allocate(partit%r_mpitype_nod2D(com_nod2D%rPEnum)) ! 2D + allocate(partit%s_mpitype_nod2D(com_nod2D%sPEnum)) + allocate(partit%r_mpitype_nod2D_i(com_nod2D%rPEnum)) ! 2D integer + allocate(partit%s_mpitype_nod2D_i(com_nod2D%sPEnum)) + + allocate(partit%r_mpitype_nod3D(com_nod2D%rPEnum,nl-1:nl,3)) ! 3D with nl-1 or nl layers, 1-3 values + allocate(partit%s_mpitype_nod3D(com_nod2D%sPEnum,nl-1:nl,3)) +!after the allocation we just reassotiate ALL pointers again here +#include "associate_part_ass.h" + + ! Upper limit for the length of the local interface between the neighbor PEs + max_nb = max(maxval(com_nod2D%rptr(2:com_nod2D%rPEnum+1) - com_nod2D%rptr(1:com_nod2D%rPEnum)), & + maxval(com_nod2D%sptr(2:com_nod2D%sPEnum+1) - com_nod2D%sptr(1:com_nod2D%sPEnum))) + + allocate(displace(max_nb), blocklen(max_nb)) + allocate(displace_tmp(max_nb), blocklen_tmp(max_nb)) + + do n=1,com_nod2D%rPEnum + nb = 1 + nini = com_nod2D%rptr(n) + nend = com_nod2D%rptr(n+1) - 1 + displace(:) = 0 + displace(1) = com_nod2D%rlist(nini) -1 ! C counting, start at 0 + blocklen(:) = 1 + do i=nini+1, nend + if (com_nod2D%rlist(i) /= com_nod2D%rlist(i-1) + 1) then + ! New block + nb = nb+1 + displace(nb) = com_nod2D%rlist(i) -1 + else + blocklen(nb) = blocklen(nb)+1 + endif + enddo + + call MPI_TYPE_INDEXED(nb, blocklen, displace, MPI_DOUBLE_PRECISION, & + r_mpitype_nod2D(n), MPIerr) + + call MPI_TYPE_INDEXED(nb, blocklen, displace, MPI_INTEGER, & + r_mpitype_nod2D_i(n), MPIerr) + + call MPI_TYPE_COMMIT(r_mpitype_nod2D(n), MPIerr) + call MPI_TYPE_COMMIT(r_mpitype_nod2D_i(n), MPIerr) + + DO nl1=nl-1, nl + DO n_val=1,3 + + blocklen_tmp(1:nb) = blocklen(1:nb)*n_val*nl1 + displace_tmp(1:nb) = displace(1:nb)*n_val*nl1 + + call MPI_TYPE_INDEXED(nb, blocklen_tmp, displace_tmp, MPI_DOUBLE_PRECISION, & + r_mpitype_nod3D(n,nl1,n_val), MPIerr) + + call MPI_TYPE_COMMIT(r_mpitype_nod3D(n,nl1,n_val), MPIerr) + ENDDO + ENDDO + enddo + + do n=1,com_nod2D%sPEnum + nb = 1 + nini = com_nod2D%sptr(n) + nend = com_nod2D%sptr(n+1) - 1 + displace(:) = 0 + displace(1) = com_nod2D%slist(nini) -1 ! C counting, start at 0 + blocklen(:) = 1 + do i=nini+1, nend + if (com_nod2D%slist(i) /= com_nod2D%slist(i-1) + 1) then + ! New block + nb = nb+1 + displace(nb) = com_nod2D%slist(i) -1 + else + blocklen(nb) = blocklen(nb)+1 + endif + enddo + + call MPI_TYPE_INDEXED(nb, blocklen, displace, MPI_DOUBLE_PRECISION, & + s_mpitype_nod2D(n), MPIerr) + + call MPI_TYPE_INDEXED(nb, blocklen, displace, MPI_INTEGER, & + s_mpitype_nod2D_i(n), MPIerr) + + call MPI_TYPE_COMMIT(s_mpitype_nod2D(n), MPIerr) + call MPI_TYPE_COMMIT(s_mpitype_nod2D_i(n), MPIerr) + + DO nl1=nl-1, nl + DO n_val=1,3 + + blocklen_tmp(1:nb) = blocklen(1:nb)*n_val*nl1 + displace_tmp(1:nb) = displace(1:nb)*n_val*nl1 + + call MPI_TYPE_INDEXED(nb, blocklen_tmp, displace_tmp, MPI_DOUBLE_PRECISION, & + s_mpitype_nod3D(n,nl1,n_val), MPIerr) + + call MPI_TYPE_COMMIT(s_mpitype_nod3D(n,nl1,n_val), MPIerr) + ENDDO + ENDDO + enddo + + deallocate(blocklen, displace) + deallocate(blocklen_tmp, displace_tmp) + + endif + + call init_gatherLists(partit, mesh) + if(mype==0) write(*,*) 'Communication arrays are set' +end subroutine set_par_support + + +!=================================================================== +subroutine init_gatherLists(partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + implicit none + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: n2D, e2D, sum_loc_elem2D + integer :: n, estart, nstart +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + if (mype==0) then + + if (npes > 1) then + + allocate(partit%remPtr_nod2D(npes)) + allocate(partit%remPtr_elem2D(npes)) +!reassociate the pointers to the just allocated arrays +#include "associate_part_ass.h" + remPtr_nod2D(1) = 1 + remPtr_elem2D(1) = 1 + + do n=1, npes-1 + call MPI_RECV(n2D, 1, MPI_INTEGER, n, 0, MPI_COMM_FESOM, MPI_STATUS_IGNORE, MPIerr ) + call MPI_RECV(e2D, 1, MPI_INTEGER, n, 1, MPI_COMM_FESOM, MPI_STATUS_IGNORE, MPIerr ) + + remPtr_nod2D(n+1) = remPtr_nod2D(n) + n2D + remPtr_elem2D(n+1) = remPtr_elem2D(n) + e2D + enddo + + allocate(partit%remList_nod2D(remPtr_nod2D(npes))) ! this should be nod2D - myDim_nod2D + allocate(partit%remList_elem2D(remPtr_elem2D(npes))) ! this is > elem2D, because the elements overlap. + ! Consider optimization: avoid multiple communication + ! of the same elem from different PEs. +!reassociate the pointers to the just allocated arrays +#include "associate_part_ass.h" + + do n=1, npes-1 + nstart = remPtr_nod2D(n) + n2D = remPtr_nod2D(n+1) - remPtr_nod2D(n) + call MPI_RECV(remList_nod2D(nstart), n2D, MPI_INTEGER, n, 2, MPI_COMM_FESOM, & + MPI_STATUS_IGNORE, MPIerr ) + estart = remPtr_elem2D(n) + e2D = remPtr_elem2D(n+1) - remPtr_elem2D(n) + call MPI_RECV(remList_elem2D(estart),e2D, MPI_INTEGER, n, 3, MPI_COMM_FESOM, & + MPI_STATUS_IGNORE, MPIerr ) + + enddo + end if + else + + call MPI_SEND(myDim_nod2D, 1, MPI_INTEGER, 0, 0, MPI_COMM_FESOM, MPIerr ) + call MPI_SEND(myDim_elem2D, 1, MPI_INTEGER, 0, 1, MPI_COMM_FESOM, MPIerr ) + call MPI_SEND(myList_nod2D, myDim_nod2D, MPI_INTEGER, 0, 2, MPI_COMM_FESOM, MPIerr ) + call MPI_SEND(myList_elem2D, myDim_elem2D, MPI_INTEGER, 0, 3, MPI_COMM_FESOM, MPIerr ) + + endif +end subroutine init_gatherLists diff --git a/src/temp/oce_adv_tra_driver.F90 b/src/temp/oce_adv_tra_driver.F90 new file mode 100644 index 000000000..511e903b7 --- /dev/null +++ b/src/temp/oce_adv_tra_driver.F90 @@ -0,0 +1,278 @@ +module oce_adv_tra_driver_interfaces + interface + subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, partit, mesh) + use MOD_MESH + use MOD_TRACER + use MOD_PARTIT + real(kind=WP), intent(in), target :: dt + integer, intent(in) :: tr_num + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(inout), target :: tracers + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(in), target :: W(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in), target :: WI(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in), target :: WE(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + end subroutine + end interface +end module + +module oce_tra_adv_flux2dtracer_interface + interface + subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, partit, mesh, use_lo, ttf, lo) + !update the solution for vertical and horizontal flux contributions + use MOD_MESH + use MOD_PARTIT + real(kind=WP), intent(in), target :: dt + type(t_partit),intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(inout) :: dttf_h(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: dttf_v(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux_h(mesh%nl-1, partit%myDim_edge2D) + real(kind=WP), intent(inout) :: flux_v(mesh%nl, partit%myDim_nod2D) + logical, optional :: use_lo + real(kind=WP), optional :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), optional :: lo (mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + end subroutine + end interface +end module +! +! +!=============================================================================== +subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, partit, mesh) + use MOD_MESH + use MOD_TRACER + use MOD_PARTIT + use g_comm_auto + use oce_adv_tra_hor_interfaces + use oce_adv_tra_ver_interfaces + use oce_adv_tra_fct_interfaces + use oce_tra_adv_flux2dtracer_interface + implicit none + real(kind=WP), intent(in), target :: dt + integer, intent(in) :: tr_num + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(inout), target :: tracers + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(in), target :: W(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in), target :: WI(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in), target :: WE(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + + real(kind=WP), pointer, dimension (:,:) :: pwvel + real(kind=WP), pointer, dimension (:,:) :: ttf, ttfAB, fct_LO + real(kind=WP), pointer, dimension (:,:) :: adv_flux_hor, adv_flux_ver, dttf_h, dttf_v + real(kind=WP), pointer, dimension (:,:) :: fct_ttf_min, fct_ttf_max + real(kind=WP), pointer, dimension (:,:) :: fct_plus, fct_minus + + integer, pointer, dimension (:) :: nboundary_lay + real(kind=WP), pointer, dimension (:,:,:) :: edge_up_dn_grad + + integer :: el(2), enodes(2), nz, n, e + integer :: nl12, nu12, nl1, nl2, nu1, nu2 + real(kind=WP) :: cLO, cHO, deltaX1, deltaY1, deltaX2, deltaY2 + real(kind=WP) :: qc, qu, qd + real(kind=WP) :: tvert(mesh%nl), tvert_e(mesh%nl), a, b, c, d, da, db, dg, vflux, Tupw1 + real(kind=WP) :: Tmean, Tmean1, Tmean2, num_ord + real(kind=WP) :: opth, optv + logical :: do_zero_flux + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + ttf => tracers%data(tr_num)%values + ttfAB => tracers%data(tr_num)%valuesAB + opth = tracers%data(tr_num)%tra_adv_ph + optv = tracers%data(tr_num)%tra_adv_pv + fct_LO => tracers%work%fct_LO + adv_flux_ver => tracers%work%adv_flux_ver + adv_flux_hor => tracers%work%adv_flux_hor + edge_up_dn_grad => tracers%work%edge_up_dn_grad + nboundary_lay => tracers%work%nboundary_lay + fct_ttf_min => tracers%work%fct_ttf_min + fct_ttf_max => tracers%work%fct_ttf_max + fct_plus => tracers%work%fct_plus + fct_minus => tracers%work%fct_minus + dttf_h => tracers%work%del_ttf_advhoriz + dttf_v => tracers%work%del_ttf_advvert + !___________________________________________________________________________ + ! compute FCT horzontal and vertical low order solution as well as lw order + ! part of antidiffusive flux + if (trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') then + ! compute the low order upwind horizontal flux + ! init_zero=.true. : zero the horizontal flux before computation + ! init_zero=.false. : input flux will be substracted + call adv_tra_hor_upw1(vel, ttf, partit, mesh, adv_flux_hor, init_zero=.true.) + ! update the LO solution for horizontal contribution + fct_LO=0.0_WP + do e=1, myDim_edge2D + enodes=edges(:,e) + el=edge_tri(:,e) + nl1=nlevels(el(1))-1 + nu1=ulevels(el(1)) + nl2=0 + nu2=0 + if(el(2)>0) then + nl2=nlevels(el(2))-1 + nu2=ulevels(el(2)) + end if + + nl12 = max(nl1,nl2) + nu12 = nu1 + if (nu2>0) nu12 = min(nu1,nu2) + + !!PS do nz=1, max(nl1, nl2) + do nz=nu12, nl12 + fct_LO(nz, enodes(1))=fct_LO(nz, enodes(1))+adv_flux_hor(nz, e) + fct_LO(nz, enodes(2))=fct_LO(nz, enodes(2))-adv_flux_hor(nz, e) + end do + end do + ! compute the low order upwind vertical flux (explicit part only) + ! zero the input/output flux before computation + call adv_tra_ver_upw1(we, ttf, partit, mesh, adv_flux_ver, init_zero=.true.) + ! update the LO solution for vertical contribution + do n=1, myDim_nod2D + nu1 = ulevels_nod2D(n) + nl1 = nlevels_nod2D(n) + !!PS do nz=1, nlevels_nod2D(n)-1 + do nz= nu1, nl1-1 + fct_LO(nz,n)=(ttf(nz,n)*hnode(nz,n)+(fct_LO(nz,n)+(adv_flux_ver(nz, n)-adv_flux_ver(nz+1, n)))*dt/areasvol(nz,n))/hnode_new(nz,n) + end do + end do + if (w_split) then !wvel/=wvel_e + ! update for implicit contribution (w_split option) + call adv_tra_vert_impl(dt, wi, fct_LO, partit, mesh) + ! compute the low order upwind vertical flux (full vertical velocity) + ! zero the input/output flux before computation + ! --> compute here low order part of vertical anti diffusive fluxes, + ! has to be done on the full vertical velocity w + call adv_tra_ver_upw1(w, ttf, partit, mesh, adv_flux_ver, init_zero=.true.) + end if + call exchange_nod(fct_LO, partit) + end if + + do_zero_flux=.true. + if (trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') do_zero_flux=.false. + !___________________________________________________________________________ + ! do horizontal tracer advection, in case of FCT high order solution + SELECT CASE(trim(tracers%data(tr_num)%tra_adv_hor)) + CASE('MUSCL') + ! compute the untidiffusive horizontal flux (init_zero=.false.: input is the LO horizontal flux computed above) + call adv_tra_hor_muscl(vel, ttfAB, partit, mesh, opth, adv_flux_hor, edge_up_dn_grad, nboundary_lay, init_zero=do_zero_flux) + CASE('MFCT') + call adv_tra_hor_mfct(vel, ttfAB, partit, mesh, opth, adv_flux_hor, edge_up_dn_grad, init_zero=do_zero_flux) + CASE('UPW1') + call adv_tra_hor_upw1(vel, ttfAB, partit, mesh, adv_flux_hor, init_zero=do_zero_flux) + CASE DEFAULT !unknown + if (mype==0) write(*,*) 'Unknown horizontal advection type ', trim(tracers%data(tr_num)%tra_adv_hor), '! Check your namelists!' + call par_ex(partit, 1) + END SELECT + if (trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') then + pwvel=>w + else + pwvel=>we + end if + !___________________________________________________________________________ + ! do vertical tracer advection, in case of FCT high order solution + SELECT CASE(trim(tracers%data(tr_num)%tra_adv_ver)) + CASE('QR4C') + ! compute the untidiffusive vertical flux (init_zero=.false.:input is the LO vertical flux computed above) + call adv_tra_ver_qr4c ( pwvel, ttfAB, partit, mesh, optv, adv_flux_ver, init_zero=do_zero_flux) + CASE('CDIFF') + call adv_tra_ver_cdiff( pwvel, ttfAB, partit, mesh, adv_flux_ver, init_zero=do_zero_flux) + CASE('PPM') + call adv_tra_vert_ppm(dt, pwvel, ttfAB, partit, mesh, adv_flux_ver, init_zero=do_zero_flux) + CASE('UPW1') + call adv_tra_ver_upw1 ( pwvel, ttfAB, partit, mesh, adv_flux_ver, init_zero=do_zero_flux) + CASE DEFAULT !unknown + if (mype==0) write(*,*) 'Unknown vertical advection type ', trim(tracers%data(tr_num)%tra_adv_ver), '! Check your namelists!' + call par_ex(1) + ! --> be aware the vertical implicite part in case without FCT is done in + ! oce_ale_tracer.F90 --> subroutine diff_ver_part_impl_ale(tr_num, partit, mesh) + ! for do_wimpl=.true. + END SELECT + !___________________________________________________________________________ + ! + if (trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') then + !edge_up_dn_grad will be used as an auxuary array here + call oce_tra_adv_fct(dt, ttf, fct_LO, adv_flux_hor, adv_flux_ver, fct_ttf_min, fct_ttf_max, fct_plus, fct_minus, edge_up_dn_grad, partit, mesh) + call oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, adv_flux_hor, adv_flux_ver, partit, mesh, use_lo=.TRUE., ttf=ttf, lo=fct_LO) + else + call oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, adv_flux_hor, adv_flux_ver, partit, mesh) + end if +end subroutine do_oce_adv_tra +! +! +!=============================================================================== +subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, partit, mesh, use_lo, ttf, lo) + use MOD_MESH + use o_ARRAYS + use MOD_PARTIT + use g_comm_auto + implicit none + real(kind=WP), intent(in), target :: dt + type(t_partit),intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(inout) :: dttf_h(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: dttf_v(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux_h(mesh%nl-1, partit%myDim_edge2D) + real(kind=WP), intent(inout) :: flux_v(mesh%nl, partit%myDim_nod2D) + logical, optional :: use_lo + real(kind=WP), optional :: lo (mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), optional :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + integer :: n, nz, k, elem, enodes(3), num, el(2), nu12, nl12, nu1, nu2, nl1, nl2, edge +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + !___________________________________________________________________________ + ! c. Update the solution + ! Vertical + if (present(use_lo)) then + if (use_lo) then + do n=1, myDim_nod2d + nu1 = ulevels_nod2D(n) + nl1 = nlevels_nod2D(n) + !!PS do nz=1,nlevels_nod2D(n)-1 + do nz=nu1, nl1-1 + dttf_v(nz,n)=dttf_v(nz,n)-ttf(nz,n)*hnode(nz,n)+LO(nz,n)*hnode_new(nz,n) + end do + end do + end if + end if + + do n=1, myDim_nod2d + nu1 = ulevels_nod2D(n) + nl1 = nlevels_nod2D(n) + do nz=nu1,nl1-1 + dttf_v(nz,n)=dttf_v(nz,n) + (flux_v(nz,n)-flux_v(nz+1,n))*dt/areasvol(nz,n) + end do + end do + + + ! Horizontal + do edge=1, myDim_edge2D + enodes(1:2)=edges(:,edge) + el=edge_tri(:,edge) + nl1=nlevels(el(1))-1 + nu1=ulevels(el(1)) + + nl2=0 + nu2=0 + if(el(2)>0) then + nl2=nlevels(el(2))-1 + nu2=ulevels(el(2)) + end if + + nl12 = max(nl1,nl2) + nu12 = nu1 + if (nu2>0) nu12 = min(nu1,nu2) + + !!PS do nz=1, max(nl1, nl2) + do nz=nu12, nl12 + dttf_h(nz,enodes(1))=dttf_h(nz,enodes(1))+flux_h(nz,edge)*dt/areasvol(nz,enodes(1)) + dttf_h(nz,enodes(2))=dttf_h(nz,enodes(2))-flux_h(nz,edge)*dt/areasvol(nz,enodes(2)) + end do + end do +end subroutine oce_tra_adv_flux2dtracer diff --git a/src/temp/oce_adv_tra_fct.F90 b/src/temp/oce_adv_tra_fct.F90 new file mode 100644 index 000000000..5eb7993a9 --- /dev/null +++ b/src/temp/oce_adv_tra_fct.F90 @@ -0,0 +1,365 @@ +module oce_adv_tra_fct_interfaces + interface + subroutine oce_adv_tra_fct_init(twork, partit, mesh) + use MOD_MESH + use MOD_TRACER + use MOD_PARTIT + type(t_mesh), intent(in), target :: mesh + type(t_partit),intent(inout), target :: partit + type(t_tracer_work), intent(inout), target :: twork + end subroutine + + subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_plus, fct_minus, AUX, partit, mesh) + use MOD_MESH + use MOD_PARTIT + real(kind=WP), intent(in), target :: dt + type(t_partit),intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(inout) :: fct_ttf_min(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: fct_ttf_max(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: lo (mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: adf_h(mesh%nl-1, partit%myDim_edge2D) + real(kind=WP), intent(inout) :: adf_v(mesh%nl, partit%myDim_nod2D) + real(kind=WP), intent(inout) :: fct_plus(mesh%nl-1, partit%myDim_edge2D) + real(kind=WP), intent(inout) :: fct_minus(mesh%nl, partit%myDim_nod2D) + real(kind=WP), intent(inout) :: AUX(:,:,:) !a large auxuary array + end subroutine + end interface +end module +! +! +!=============================================================================== +subroutine oce_adv_tra_fct_init(twork, partit, mesh) + use MOD_MESH + use MOD_TRACER + use MOD_PARTIT + implicit none + integer :: my_size + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer_work), intent(inout), target :: twork +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + my_size=myDim_nod2D+eDim_nod2D + allocate(twork%fct_LO(nl-1, my_size)) ! Low-order solution + allocate(twork%adv_flux_hor(nl-1,partit%myDim_edge2D)) ! antidiffusive hor. contributions / from edges + allocate(twork%adv_flux_ver(nl, partit%myDim_nod2D)) ! antidiffusive ver. fluxes / from nodes + + allocate(twork%fct_ttf_max(nl-1, my_size),twork%fct_ttf_min(nl-1, my_size)) + allocate(twork%fct_plus(nl-1, my_size), twork%fct_minus(nl-1, my_size)) + ! Initialize with zeros: + twork%fct_LO=0.0_WP + twork%adv_flux_hor=0.0_WP + twork%adv_flux_ver=0.0_WP + twork%fct_ttf_max=0.0_WP + twork%fct_ttf_min=0.0_WP + twork%fct_plus=0.0_WP + twork%fct_minus=0.0_WP + + if (mype==0) write(*,*) 'FCT is initialized' +end subroutine oce_adv_tra_fct_init + +! +! +!=============================================================================== +subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_plus, fct_minus, AUX, partit, mesh) + ! + ! 3D Flux Corrected Transport scheme + ! Limits antidiffusive fluxes==the difference in flux HO-LO + ! LO ==Low-order (first-order upwind) + ! HO ==High-order (3rd/4th order gradient reconstruction method) + ! Adds limited fluxes to the LO solution + use MOD_MESH + use MOD_TRACER + use MOD_PARTIT + use g_comm_auto + implicit none + real(kind=WP), intent(in), target :: dt + type(t_mesh), intent(in), target :: mesh + type(t_partit),intent(inout), target :: partit + real(kind=WP), intent(inout) :: fct_ttf_min(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: fct_ttf_max(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: lo (mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: adf_h(mesh%nl-1, partit%myDim_edge2D) + real(kind=WP), intent(inout) :: adf_v(mesh%nl, partit%myDim_nod2D) + real(kind=WP), intent(inout) :: fct_plus (mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: fct_minus(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: AUX(:,:,:) !a large auxuary array, let us use twork%edge_up_dn_grad(1:4, 1:NL-2, 1:partit%myDim_edge2D) to save space + integer :: n, nz, k, elem, enodes(3), num, el(2), nl1, nl2, nu1, nu2, nl12, nu12, edge + real(kind=WP) :: flux, ae,tvert_max(mesh%nl-1),tvert_min(mesh%nl-1) + real(kind=WP) :: flux_eps=1e-16 + real(kind=WP) :: bignumber=1e3 + integer :: vlimit=1 + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + ! -------------------------------------------------------------------------- + ! ttf is the tracer field on step n + ! del_ttf is the increment + ! vlimit sets the version of limiting, see below + ! -------------------------------------------------------------------------- + !___________________________________________________________________________ + ! a1. max, min between old solution and updated low-order solution per node + do n=1,myDim_nod2D + edim_nod2d + nu1 = ulevels_nod2D(n) + nl1 = nlevels_nod2D(n) + do nz=nu1, nl1-1 + fct_ttf_max(nz,n)=max(LO(nz,n), ttf(nz,n)) + fct_ttf_min(nz,n)=min(LO(nz,n), ttf(nz,n)) + end do + end do + + !___________________________________________________________________________ + ! a2. Admissible increments on elements + ! (only layers below the first and above the last layer) + ! look for max, min bounds for each element --> AUX here auxilary array + do elem=1, myDim_elem2D + enodes=elem2D_nodes(:,elem) + nu1 = ulevels(elem) + nl1 = nlevels(elem) + do nz=nu1, nl1-1 + AUX(1,nz,elem)=maxval(fct_ttf_max(nz,enodes)) + AUX(2,nz,elem)=minval(fct_ttf_min(nz,enodes)) + end do + if (nl1<=nl-1) then + do nz=nl1,nl-1 + AUX(1,nz,elem)=-bignumber + AUX(2,nz,elem)= bignumber + end do + endif + end do ! --> do elem=1, myDim_elem2D + + !___________________________________________________________________________ + ! a3. Bounds on clusters and admissible increments + ! Vertical1: In this version we look at the bounds on the clusters + ! above and below, which leaves wide bounds because typically + ! vertical gradients are larger. + if(vlimit==1) then + !Horizontal + do n=1, myDim_nod2D + nu1 = ulevels_nod2D(n) + nl1 = nlevels_nod2D(n) + + !___________________________________________________________________ + do nz=nu1,nl1-1 + ! max,min horizontal bound in cluster around node n in every + ! vertical layer + ! nod_in_elem2D --> elem indices of which node n is surrounded + ! nod_in_elem2D_num --> max number of surrounded elem + tvert_max(nz)= maxval(AUX(1,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) + tvert_min(nz)= minval(AUX(2,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) + end do + + !___________________________________________________________________ + ! calc max,min increment of surface layer with respect to low order + ! solution + fct_ttf_max(nu1,n)=tvert_max(nu1)-LO(nu1,n) + fct_ttf_min(nu1,n)=tvert_min(nu1)-LO(nu1,n) + + ! calc max,min increment from nz-1:nz+1 with respect to low order + ! solution at layer nz + do nz=nu1+1,nl1-2 + fct_ttf_max(nz,n)=maxval(tvert_max(nz-1:nz+1))-LO(nz,n) + fct_ttf_min(nz,n)=minval(tvert_min(nz-1:nz+1))-LO(nz,n) + end do + ! calc max,min increment of bottom layer -1 with respect to low order + ! solution + nz=nl1-1 + fct_ttf_max(nz,n)=tvert_max(nz)-LO(nz,n) + fct_ttf_min(nz,n)=tvert_min(nz)-LO(nz,n) + end do + end if + + !___________________________________________________________________________ + ! Vertical2: Similar to the version above, but the vertical bounds are more + ! local + if(vlimit==2) then + do n=1, myDim_nod2D + nu1 = ulevels_nod2D(n) + nl1 = nlevels_nod2D(n) + do nz=nu1,nl1-1 + tvert_max(nz)= maxval(AUX(1,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) + tvert_min(nz)= minval(AUX(2,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) + end do + do nz=nu1+1, nl1-2 + tvert_max(nz)=max(tvert_max(nz),maxval(fct_ttf_max(nz-1:nz+1,n))) + tvert_min(nz)=min(tvert_min(nz),minval(fct_ttf_max(nz-1:nz+1,n))) + end do + do nz=nu1,nl1-1 + fct_ttf_max(nz,n)=tvert_max(nz)-LO(nz,n) + fct_ttf_min(nz,n)=tvert_min(nz)-LO(nz,n) + end do + end do + end if + + !___________________________________________________________________________ + ! Vertical3: Vertical bounds are taken into account only if they are narrower than the + ! horizontal ones + if(vlimit==3) then + do n=1, myDim_nod2D + nu1 = ulevels_nod2D(n) + nl1 = nlevels_nod2D(n) + do nz=nu1, nl1-1 + tvert_max(nz)= maxval(AUX(1,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) + tvert_min(nz)= minval(AUX(2,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) + end do + do nz=nu1+1, nl1-2 + tvert_max(nz)=min(tvert_max(nz),maxval(fct_ttf_max(nz-1:nz+1,n))) + tvert_min(nz)=max(tvert_min(nz),minval(fct_ttf_max(nz-1:nz+1,n))) + end do + do nz=nu1, nl1-1 + fct_ttf_max(nz,n)=tvert_max(nz)-LO(nz,n) + fct_ttf_min(nz,n)=tvert_min(nz)-LO(nz,n) + end do + end do + end if + + !___________________________________________________________________________ + ! b1. Split positive and negative antidiffusive contributions + ! --> sum all positive (fct_plus), negative (fct_minus) antidiffusive + ! horizontal element and vertical node contribution to node n and layer nz + ! see. R. Löhner et al. "finite element flux corrected transport (FEM-FCT) + ! for the euler and navier stoke equation + do n=1, myDim_nod2D + nu1 = ulevels_nod2D(n) + nl1 = nlevels_nod2D(n) + do nz=nu1,nl1-1 + fct_plus(nz,n)=0._WP + fct_minus(nz,n)=0._WP + end do + end do + + !Vertical + do n=1, myDim_nod2D + nu1 = ulevels_nod2D(n) + nl1 = nlevels_nod2D(n) + do nz=nu1,nl1-1 +! fct_plus(nz,n)=fct_plus(nz,n)+ & +! (max(0.0_WP,adf_v(nz,n))+max(0.0_WP,-adf_v(nz+1,n))) & +! /hnode(nz,n) +! fct_minus(nz,n)=fct_minus(nz,n)+ & +! (min(0.0_WP,adf_v(nz,n))+min(0.0_WP,-adf_v(nz+1,n))) & +! /hnode(nz,n) + fct_plus(nz,n) =fct_plus(nz,n) +(max(0.0_WP,adf_v(nz,n))+max(0.0_WP,-adf_v(nz+1,n))) + fct_minus(nz,n)=fct_minus(nz,n)+(min(0.0_WP,adf_v(nz,n))+min(0.0_WP,-adf_v(nz+1,n))) + end do + end do + + !Horizontal + do edge=1, myDim_edge2D + enodes(1:2)=edges(:,edge) + el=edge_tri(:,edge) + nl1=nlevels(el(1))-1 + nu1=ulevels(el(1)) + nl2=0 + nu2=0 + if(el(2)>0) then + nl2=nlevels(el(2))-1 + nu2=ulevels(el(2)) + end if + + nl12 = max(nl1,nl2) + nu12 = nu1 + if (nu2>0) nu12 = min(nu1,nu2) + + do nz=nu12, nl12 + fct_plus (nz,enodes(1))=fct_plus (nz,enodes(1)) + max(0.0_WP, adf_h(nz,edge)) + fct_minus(nz,enodes(1))=fct_minus(nz,enodes(1)) + min(0.0_WP, adf_h(nz,edge)) + fct_plus (nz,enodes(2))=fct_plus (nz,enodes(2)) + max(0.0_WP,-adf_h(nz,edge)) + fct_minus(nz,enodes(2))=fct_minus(nz,enodes(2)) + min(0.0_WP,-adf_h(nz,edge)) + end do + end do + + !___________________________________________________________________________ + ! b2. Limiting factors + do n=1,myDim_nod2D + nu1=ulevels_nod2D(n) + nl1=nlevels_nod2D(n) + do nz=nu1,nl1-1 + flux=fct_plus(nz,n)*dt/areasvol(nz,n)+flux_eps + fct_plus(nz,n)=min(1.0_WP,fct_ttf_max(nz,n)/flux) + flux=fct_minus(nz,n)*dt/areasvol(nz,n)-flux_eps + fct_minus(nz,n)=min(1.0_WP,fct_ttf_min(nz,n)/flux) + end do + end do + + ! fct_minus and fct_plus must be known to neighbouring PE + call exchange_nod(fct_plus, fct_minus, partit) + + !___________________________________________________________________________ + ! b3. Limiting + !Vertical + do n=1, myDim_nod2D + nu1=ulevels_nod2D(n) + nl1=nlevels_nod2D(n) + + !_______________________________________________________________________ + nz=nu1 + ae=1.0_WP + flux=adf_v(nz,n) + if(flux>=0.0_WP) then + ae=min(ae,fct_plus(nz,n)) + else + ae=min(ae,fct_minus(nz,n)) + end if + adf_v(nz,n)=ae*adf_v(nz,n) + + !_______________________________________________________________________ + do nz=nu1+1,nl1-1 + ae=1.0_WP + flux=adf_v(nz,n) + if(flux>=0._WP) then + ae=min(ae,fct_minus(nz-1,n)) + ae=min(ae,fct_plus(nz,n)) + else + ae=min(ae,fct_plus(nz-1,n)) + ae=min(ae,fct_minus(nz,n)) + end if + adf_v(nz,n)=ae*adf_v(nz,n) + end do + ! the bottom flux is always zero + end do + + call exchange_nod_end(partit) ! fct_plus, fct_minus + + !Horizontal + do edge=1, myDim_edge2D + enodes(1:2)=edges(:,edge) + el=edge_tri(:,edge) + nu1=ulevels(el(1)) + nl1=nlevels(el(1))-1 + nl2=0 + nu2=0 + if(el(2)>0) then + nu2=ulevels(el(2)) + nl2=nlevels(el(2))-1 + end if + + nl12 = max(nl1,nl2) + nu12 = nu1 + if (nu2>0) nu12 = min(nu1,nu2) + + do nz=nu12, nl12 + ae=1.0_WP + flux=adf_h(nz,edge) + + if(flux>=0._WP) then + ae=min(ae,fct_plus(nz,enodes(1))) + ae=min(ae,fct_minus(nz,enodes(2))) + else + ae=min(ae,fct_minus(nz,enodes(1))) + ae=min(ae,fct_plus(nz,enodes(2))) + endif + + adf_h(nz,edge)=ae*adf_h(nz,edge) + end do + end do +end subroutine oce_tra_adv_fct diff --git a/src/temp/oce_adv_tra_hor.F90 b/src/temp/oce_adv_tra_hor.F90 new file mode 100644 index 000000000..714eccf68 --- /dev/null +++ b/src/temp/oce_adv_tra_hor.F90 @@ -0,0 +1,739 @@ +!=============================================================================================================================== +!**************** routines for horizontal tracer advection *********************** +module oce_adv_tra_hor_interfaces + interface +! (low order upwind) +! returns flux given at edges which contributes with +! plus sign into 1st. node and with the minus sign into the 2nd node +! IF init_zero=.TRUE. : flux will be set to zero before computation +! IF init_zero=.FALSE. : flux=flux-input flux +! flux is not multiplied with dt + subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, init_zero) + use MOD_MESH + use MOD_TRACER + use MOD_PARTIT + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) + logical, optional :: init_zero + end subroutine +!=============================================================================== +! MUSCL +! returns flux given at edges which contributes with +! plus sign into 1st. node and with the minus sign into the 2nd node +! IF init_zero=.TRUE. : flux will be set to zero before computation +! IF init_zero=.FALSE. : flux=flux-input flux +! flux is not multiplied with dt + subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, nboundary_lay, init_zero) + use MOD_MESH + use MOD_PARTIT + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution + real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) + integer, intent(in) :: nboundary_lay(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, partit%myDim_edge2D) + logical, optional :: init_zero + end subroutine +! a not stable version of MUSCL (reconstruction in the vicinity of bottom topography is not upwind) +! it runs with FCT option only + subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, init_zero) + use MOD_MESH + use MOD_PARTIT + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution + real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) + real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, partit%myDim_edge2D) + logical, optional :: init_zero + end subroutine + end interface +end module +! +! +!=============================================================================== +subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, init_zero) + use MOD_MESH + use MOD_PARTIT + use g_comm_auto + implicit none + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) + logical, optional :: init_zero + real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2 + real(kind=WP) :: a, vflux + integer :: el(2), enodes(2), nz, edge + integer :: nu12, nl12, nl1, nl2, nu1, nu2 + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + if (present(init_zero))then + if (init_zero) flux=0.0_WP + else + flux=0.0_WP + end if + + ! The result is the low-order solution horizontal fluxes + ! They are put into flux + !___________________________________________________________________________ + do edge=1, myDim_edge2D + ! local indice of nodes that span up edge ed + enodes=edges(:,edge) + + ! local index of element that contribute to edge + el=edge_tri(:,edge) + + ! number of layers -1 at elem el(1) + nl1=nlevels(el(1))-1 + + ! index off surface layer in case of cavity !=1 + nu1=ulevels(el(1)) + + ! edge_cross_dxdy(1:2,ed)... dx,dy distance from element centroid el(1) to + ! center of edge --> needed to calc flux perpedicular to edge from elem el(1) + deltaX1=edge_cross_dxdy(1,edge) + deltaY1=edge_cross_dxdy(2,edge) + a=r_earth*elem_cos(el(1)) + + !_______________________________________________________________________ + ! same parameter but for other element el(2) that contributes to edge ed + ! if el(2)==0 than edge is boundary edge + nl2=0 + nu2=0 + if(el(2)>0) then + deltaX2=edge_cross_dxdy(3,edge) + deltaY2=edge_cross_dxdy(4,edge) + ! number of layers -1 at elem el(2) + nl2=nlevels(el(2))-1 + nu2=ulevels(el(2)) + a=0.5_WP*(a+r_earth*elem_cos(el(2))) + end if + + !_______________________________________________________________________ + ! nl12 ... minimum number of layers -1 between element el(1) & el(2) that + ! contribute to edge ed + ! nu12 ... upper index of layers between element el(1) & el(2) that + ! contribute to edge ed + ! be carefull !!! --> if ed is a boundary edge than el(1)~=0 and el(2)==0 + ! that means nl1>0, nl2==0, n2=min(nl1,nl2)=0 !!! + nl12=min(nl1,nl2) + nu12=max(nu1,nu2) + + !_______________________________________________________________________ + ! (A) goes only into this loop when the edge has only facing element + ! el(1) --> so the edge is a boundary edge --> this is for ocean + ! surface in case of cavity + do nz=nu1, nu12-1 + !____________________________________________________________________ + ! volume flux across the segments + vflux=(-VEL(2,nz,el(1))*deltaX1 + VEL(1,nz,el(1))*deltaY1)*helem(nz,el(1)) + + !____________________________________________________________________ + ! 1st. low order upwind solution + flux(nz, edge)=-0.5_WP*( & + ttf(nz, enodes(1))*(vflux+abs(vflux))+ & + ttf(nz, enodes(2))*(vflux-abs(vflux)) & + )-flux(nz, edge) + end do + + !_______________________________________________________________________ + ! (B) goes only into this loop when the edge has only facing elemenmt + ! el(2) --> so the edge is a boundary edge --> this is for ocean + ! surface in case of cavity + if (nu2 > 0) then + do nz=nu2, nu12-1 + !___________________________________________________________ + ! volume flux across the segments + vflux=(VEL(2,nz,el(2))*deltaX2 - VEL(1,nz,el(2))*deltaY2)*helem(nz,el(2)) + + !___________________________________________________________ + ! 1st. low order upwind solution + flux(nz, edge)=-0.5_WP*( & + ttf(nz, enodes(1))*(vflux+abs(vflux))+ & + ttf(nz, enodes(2))*(vflux-abs(vflux)))-flux(nz, edge) + end do + end if + + !_______________________________________________________________________ + ! (C) Both segments + ! loop over depth layers from top (nu12) to nl12 + ! be carefull !!! --> if ed is a boundary edge, el(2)==0 than nl12=0 so + ! you wont enter in this loop + do nz=nu12, nl12 + !___________________________________________________________________ + ! 1st. low order upwind solution + ! here already assumed that ed is NOT! a boundary edge so el(2) should exist + vflux=(-VEL(2,nz,el(1))*deltaX1 + VEL(1,nz,el(1))*deltaY1)*helem(nz,el(1)) & + +(VEL(2,nz,el(2))*deltaX2 - VEL(1,nz,el(2))*deltaY2)*helem(nz,el(2)) + + flux(nz, edge)=-0.5_WP*( & + ttf(nz, enodes(1))*(vflux+abs(vflux))+ & + ttf(nz, enodes(2))*(vflux-abs(vflux)))-flux(nz, edge) + end do + + !_______________________________________________________________________ + ! (D) remaining segments on the left or on the right + do nz=nl12+1, nl1 + !____________________________________________________________________ + ! volume flux across the segments + vflux=(-VEL(2,nz,el(1))*deltaX1 + VEL(1,nz,el(1))*deltaY1)*helem(nz,el(1)) + !____________________________________________________________________ + ! 1st. low order upwind solution + flux(nz, edge)=-0.5_WP*( & + ttf(nz, enodes(1))*(vflux+abs(vflux))+ & + ttf(nz, enodes(2))*(vflux-abs(vflux)) & + )-flux(nz, edge) + end do + + !_______________________________________________________________________ + ! (E) remaining segments on the left or on the right + do nz=nl12+1, nl2 + !_______________________________________________________________ + ! volume flux across the segments + vflux=(VEL(2,nz,el(2))*deltaX2 - VEL(1,nz,el(2))*deltaY2)*helem(nz,el(2)) + !_______________________________________________________________ + ! 1st. low order upwind solution + flux(nz, edge)=-0.5_WP*( & + ttf(nz, enodes(1))*(vflux+abs(vflux))+ & + ttf(nz, enodes(2))*(vflux-abs(vflux)))-flux(nz, edge) + end do + end do +end subroutine adv_tra_hor_upw1 +! +! +!=============================================================================== +subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, nboundary_lay, init_zero) + use MOD_MESH + use MOD_TRACER + use MOD_PARTIT + use g_comm_auto + implicit none + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution + real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) + integer, intent(in) :: nboundary_lay(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, partit%myDim_edge2D) + logical, optional :: init_zero + real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2 + real(kind=WP) :: Tmean1, Tmean2, cHO + real(kind=WP) :: c_lo(2) + real(kind=WP) :: a, vflux + integer :: el(2), enodes(2), nz, edge + integer :: nu12, nl12, nl1, nl2, nu1, nu2 + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + if (present(init_zero))then + if (init_zero) flux=0.0_WP + else + flux=0.0_WP + end if + + ! The result is the low-order solution horizontal fluxes + ! They are put into flux + !___________________________________________________________________________ + do edge=1, myDim_edge2D + ! local indice of nodes that span up edge ed + enodes=edges(:,edge) + + ! local index of element that contribute to edge + el=edge_tri(:,edge) + + ! number of layers -1 at elem el(1) + nl1=nlevels(el(1))-1 + + ! index off surface layer in case of cavity !=1 + nu1=ulevels(el(1)) + + ! edge_cross_dxdy(1:2,ed)... dx,dy distance from element centroid el(1) to + ! center of edge --> needed to calc flux perpedicular to edge from elem el(1) + deltaX1=edge_cross_dxdy(1,edge) + deltaY1=edge_cross_dxdy(2,edge) + a=r_earth*elem_cos(el(1)) + + !_______________________________________________________________________ + ! same parameter but for other element el(2) that contributes to edge ed + ! if el(2)==0 than edge is boundary edge + nl2=0 + nu2=0 + if(el(2)>0) then + deltaX2=edge_cross_dxdy(3,edge) + deltaY2=edge_cross_dxdy(4,edge) + ! number of layers -1 at elem el(2) + nl2=nlevels(el(2))-1 + nu2=ulevels(el(2)) + a=0.5_WP*(a+r_earth*elem_cos(el(2))) + end if + + !_______________________________________________________________________ + ! n2 ... minimum number of layers -1 between element el(1) & el(2) that + ! contribute to edge ed + ! nu12 ... upper index of layers between element el(1) & el(2) that + ! contribute to edge ed + ! be carefull !!! --> if ed is a boundary edge than el(1)~=0 and el(2)==0 + ! that means nl1>0, nl2==0, n2=min(nl1,nl2)=0 !!! + nl12=min(nl1,nl2) + nu12=max(nu1,nu2) + + !_______________________________________________________________________ + ! (A) goes only into this loop when the edge has only facing element + ! el(1) --> so the edge is a boundary edge --> this is for ocean + ! surface in case of cavity + do nz=nu1, nu12-1 + c_lo(1)=real(max(sign(1, nboundary_lay(enodes(1))-nz), 0),WP) + c_lo(2)=real(max(sign(1, nboundary_lay(enodes(2))-nz), 0),WP) + + !____________________________________________________________________ + Tmean2=ttf(nz, enodes(2))- & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(2,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(4,nz,edge))/6.0_WP*c_lo(2) + + Tmean1=ttf(nz, enodes(1))+ & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(1,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(3,nz,edge))/6.0_WP*c_lo(1) + + !____________________________________________________________________ + ! volume flux across the segments + vflux=(-VEL(2,nz,el(1))*deltaX1 + VEL(1,nz,el(1))*deltaY1)*helem(nz,el(1)) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) + end do + + !_______________________________________________________________________ + ! (B) goes only into this loop when the edge has only facing elemenmt + ! el(2) --> so the edge is a boundary edge --> this is for ocean + ! surface in case of cavity + if (nu2 > 0) then + do nz=nu2, nu12-1 + c_lo(1)=real(max(sign(1, nboundary_lay(enodes(1))-nz), 0),WP) + c_lo(2)=real(max(sign(1, nboundary_lay(enodes(2))-nz), 0),WP) + + !_______________________________________________________________ + Tmean2=ttf(nz, enodes(2))- & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(2,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(4,nz,edge))/6.0_WP*c_lo(2) + + Tmean1=ttf(nz, enodes(1))+ & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(1,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(3,nz,edge))/6.0_WP*c_lo(1) + + !_______________________________________________________________ + ! volume flux across the segments + vflux=(VEL(2,nz,el(2))*deltaX2 - VEL(1,nz,el(2))*deltaY2)*helem(nz,el(2)) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) + end do + end if + + !_______________________________________________________________________ + ! (C) Both segments + ! loop over depth layers from top to n2 + ! be carefull !!! --> if ed is a boundary edge, el(2)==0 than n2=0 so + ! you wont enter in this loop + do nz=nu12, nl12 + c_lo(1)=real(max(sign(1, nboundary_lay(enodes(1))-nz), 0),WP) + c_lo(2)=real(max(sign(1, nboundary_lay(enodes(2))-nz), 0),WP) + + !___________________________________________________________________ + ! MUSCL-type reconstruction + ! check if upwind or downwind triagle is necessary + ! + ! cross product between velocity vector and cross vector edge-elem-center + ! cross product > 0 --> angle vec_v and (dx,dy) --> [0 180] --> upwind triangle + ! cross product < 0 --> angle vec_v and (dx,dy) --> [180 360] --> downwind triangle + ! + ! o o ! o o + ! / \ / \ ! / \ / \ + ! / \ \ vec_v / \ ! / \ / / \ + ! / up \ \ / dn \ ! / up \ / / dn \ + ! o-------o----+---->o-------o ! o-------o----+---->o-------o + ! 1 / 2 ! 1 \vec_v + ! /vec_v ! \ + ! --> downwind triangle ! --> upwind triangle + ! + ! edge_up_dn_grad(1,nz,edge) ... gradTR_x upwind + ! edge_up_dn_grad(2,nz,edge) ... gradTR_x downwind + ! edge_up_dn_grad(3,nz,edge) ... gradTR_y upwind + ! edge_up_dn_grad(4,nz,edge) ... gradTR_y downwind + + !___________________________________________________________________ + ! use downwind triangle to interpolate Tracer to edge center with + ! fancy scheme --> Linear upwind reconstruction + ! T_n+0.5 = T_n+1 - 1/2*deltax*GRADIENT + ! --> GRADIENT = 2/3 GRAD_edgecenter + 1/3 GRAD_downwindtri + ! T_n+0.5 = T_n+1 - 2/6*(T_n+1-T_n) + 1/6*gradT_down + ! --> edge_up_dn_grad ... contains already elemental tracer gradient + ! of up and dn wind triangle + ! --> Tmean2 ... edge center interpolated Tracer using tracer + ! gradient info from upwind triangle + Tmean2=ttf(nz, enodes(2))- & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(2,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(4,nz,edge))/6.0_WP*c_lo(2) + + ! use upwind triangle to interpolate Tracer to edge center with + ! fancy scheme --> Linear upwind reconstruction + ! T_n+0.5 = T_n + 1/2*deltax*GRADIENT + ! --> GRADIENT = 2/3 GRAD_edgecenter + 1/3 GRAD_downwindtri + ! T_n+0.5 = T_n + 2/6*(T_n+1-T_n) + 1/6*gradT_down + ! --> Tmean1 ... edge center interpolated Tracer using tracer + ! gradient info from downwind triangle + Tmean1=ttf(nz, enodes(1))+ & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(1,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(3,nz,edge))/6.0_WP*c_lo(1) + + !___________________________________________________________________ + ! volume flux along the edge segment ed + ! netto volume flux along segment that comes from edge node 1 and 2 + ! + ! + ! C1 (centroid el(1)) --> (u1,v1) + ! x + ! ^ + ! (dx1,dy1) | + ! |---> vec_n1 (dy1,-dx1)--> project vec_u1 onto vec_n1 --> -v1*dx1+u1*dy1 --> + ! | | + ! enodes(1) o----------O---------o enodes(2) |-> calculate volume flux out of/in + ! vflux_________/| | the volume of enode1(enode2) through + ! |---> vec_n2 (dy2,-dx2)--> project vec_u2 onto vec_n2 --> -v2*dx2+u2*dy2 --> sections of dx1,dy1 and dx2,dy2 + ! (dx2,dy2) | --> vflux + ! v + ! x + ! C2 (centroid el(2)) --> (u2,v2) + + ! here already assumed that ed is NOT! a boundary edge so el(2) should exist + vflux=(-VEL(2,nz,el(1))*deltaX1 + VEL(1,nz,el(1))*deltaY1)*helem(nz,el(1)) & + +(VEL(2,nz,el(2))*deltaX2 - VEL(1,nz,el(2))*deltaY2)*helem(nz,el(2)) + + !___________________________________________________________________ + ! (1-num_ord) is done with 3rd order upwind + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) + end do + + !_______________________________________________________________________ + ! (D) remaining segments on the left or on the right + do nz=nl12+1, nl1 + c_lo(1)=real(max(sign(1, nboundary_lay(enodes(1))-nz), 0),WP) + c_lo(2)=real(max(sign(1, nboundary_lay(enodes(2))-nz), 0),WP) + + !____________________________________________________________________ + Tmean2=ttf(nz, enodes(2))- & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(2,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(4,nz,edge))/6.0_WP*c_lo(2) + + Tmean1=ttf(nz, enodes(1))+ & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(1,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(3,nz,edge))/6.0_WP*c_lo(1) + + !____________________________________________________________________ + ! volume flux across the segments + vflux=(-VEL(2,nz,el(1))*deltaX1 + VEL(1,nz,el(1))*deltaY1)*helem(nz,el(1)) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) + end do + + !_______________________________________________________________________ + ! (E) remaining segments on the left or on the right + do nz=nl12+1, nl2 + c_lo(1)=real(max(sign(1, nboundary_lay(enodes(1))-nz), 0),WP) + c_lo(2)=real(max(sign(1, nboundary_lay(enodes(2))-nz), 0),WP) + + !____________________________________________________________________ + Tmean2=ttf(nz, enodes(2))- & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(2,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(4,nz,edge))/6.0_WP*c_lo(2) + + Tmean1=ttf(nz, enodes(1))+ & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(1,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(3,nz,edge))/6.0_WP*c_lo(1) + + !____________________________________________________________________ + ! volume flux across the segments + vflux=(VEL(2,nz,el(2))*deltaX2 - VEL(1,nz,el(2))*deltaY2)*helem(nz,el(2)) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) + end do + end do +end subroutine adv_tra_hor_muscl +! +! +!=============================================================================== + subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, init_zero) + use MOD_MESH + use MOD_TRACER + use MOD_PARTIT + use g_comm_auto + implicit none + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution + real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) + real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, partit%myDim_edge2D) + logical, optional :: init_zero + real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2 + real(kind=WP) :: Tmean1, Tmean2, cHO + real(kind=WP) :: a, vflux + integer :: el(2), enodes(2), nz, edge + integer :: nu12, nl12, nl1, nl2, nu1, nu2 + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + if (present(init_zero))then + if (init_zero) flux=0.0_WP + else + flux=0.0_WP + end if + + ! The result is the low-order solution horizontal fluxes + ! They are put into flux + !___________________________________________________________________________ + do edge=1, myDim_edge2D + ! local indice of nodes that span up edge ed + enodes=edges(:,edge) + + ! local index of element that contribute to edge + el=edge_tri(:,edge) + + ! number of layers -1 at elem el(1) + nl1=nlevels(el(1))-1 + + ! index off surface layer in case of cavity !=1 + nu1=ulevels(el(1)) + + ! edge_cross_dxdy(1:2,ed)... dx,dy distance from element centroid el(1) to + ! center of edge --> needed to calc flux perpedicular to edge from elem el(1) + deltaX1=edge_cross_dxdy(1,edge) + deltaY1=edge_cross_dxdy(2,edge) + a=r_earth*elem_cos(el(1)) + + !_______________________________________________________________________ + ! same parameter but for other element el(2) that contributes to edge ed + ! if el(2)==0 than edge is boundary edge + nl2=0 + nu2=0 + if(el(2)>0) then + deltaX2=edge_cross_dxdy(3,edge) + deltaY2=edge_cross_dxdy(4,edge) + ! number of layers -1 at elem el(2) + nl2=nlevels(el(2))-1 + nu2=ulevels(el(2)) + a=0.5_WP*(a+r_earth*elem_cos(el(2))) + end if + + !_______________________________________________________________________ + ! n2 ... minimum number of layers -1 between element el(1) & el(2) that + ! contribute to edge ed + ! nu12 ... upper index of layers between element el(1) & el(2) that + ! contribute to edge ed + ! be carefull !!! --> if ed is a boundary edge than el(1)~=0 and el(2)==0 + ! that means nl1>0, nl2==0, n2=min(nl1,nl2)=0 !!! + nl12=min(nl1,nl2) + nu12=max(nu1,nu2) + + !_______________________________________________________________________ + ! (A) goes only into this loop when the edge has only facing element + ! el(1) --> so the edge is a boundary edge --> this is for ocean + ! surface in case of cavity + do nz=nu1, nu12-1 + !____________________________________________________________________ + Tmean2=ttf(nz, enodes(2))- & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(2,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(4,nz,edge))/6.0_WP + + Tmean1=ttf(nz, enodes(1))+ & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(1,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(3,nz,edge))/6.0_WP + + !____________________________________________________________________ + ! volume flux across the segments + vflux=(-VEL(2,nz,el(1))*deltaX1 + VEL(1,nz,el(1))*deltaY1)*helem(nz,el(1)) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) + end do + + !_______________________________________________________________________ + ! (B) goes only into this loop when the edge has only facing elemenmt + ! el(2) --> so the edge is a boundary edge --> this is for ocean + ! surface in case of cavity + if (nu2 > 0) then + do nz=nu2,nu12-1 + !___________________________________________________________________ + Tmean2=ttf(nz, enodes(2))- & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(2,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(4,nz,edge))/6.0_WP + + Tmean1=ttf(nz, enodes(1))+ & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(1,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(3,nz,edge))/6.0_WP + !___________________________________________________________________ + ! volume flux across the segments + vflux=(VEL(2,nz,el(2))*deltaX2 - VEL(1,nz,el(2))*deltaY2)*helem(nz,el(2)) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) + end do + end if + + !_______________________________________________________________________ + ! (C) Both segments + ! loop over depth layers from top to n2 + ! be carefull !!! --> if ed is a boundary edge, el(2)==0 than n2=0 so + ! you wont enter in this loop + do nz=nu12, nl12 + !___________________________________________________________________ + ! MUSCL-type reconstruction + ! check if upwind or downwind triagle is necessary + ! + ! cross product between velocity vector and cross vector edge-elem-center + ! cross product > 0 --> angle vec_v and (dx,dy) --> [0 180] --> upwind triangle + ! cross product < 0 --> angle vec_v and (dx,dy) --> [180 360] --> downwind triangle + ! + ! o o ! o o + ! / \ / \ ! / \ / \ + ! / \ \ vec_v / \ ! / \ / / \ + ! / up \ \ / dn \ ! / up \ / / dn \ + ! o-------o----+---->o-------o ! o-------o----+---->o-------o + ! 1 / 2 ! 1 \vec_v + ! /vec_v ! \ + ! --> downwind triangle ! --> upwind triangle + ! + ! edge_up_dn_grad(1,nz,edge) ... gradTR_x upwind + ! edge_up_dn_grad(2,nz,edge) ... gradTR_x downwind + ! edge_up_dn_grad(3,nz,edge) ... gradTR_y upwind + ! edge_up_dn_grad(4,nz,edge) ... gradTR_y downwind + + !___________________________________________________________________ + ! use downwind triangle to interpolate Tracer to edge center with + ! fancy scheme --> Linear upwind reconstruction + ! T_n+0.5 = T_n+1 - 1/2*deltax*GRADIENT + ! --> GRADIENT = 2/3 GRAD_edgecenter + 1/3 GRAD_downwindtri + ! T_n+0.5 = T_n+1 - 2/6*(T_n+1-T_n) + 1/6*gradT_down + ! --> edge_up_dn_grad ... contains already elemental tracer gradient + ! of up and dn wind triangle + ! --> Tmean2 ... edge center interpolated Tracer using tracer + ! gradient info from upwind triangle + Tmean2=ttf(nz, enodes(2))- & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(2,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(4,nz,edge))/6.0_WP + + ! use upwind triangle to interpolate Tracer to edge center with + ! fancy scheme --> Linear upwind reconstruction + ! T_n+0.5 = T_n + 1/2*deltax*GRADIENT + ! --> GRADIENT = 2/3 GRAD_edgecenter + 1/3 GRAD_downwindtri + ! T_n+0.5 = T_n + 2/6*(T_n+1-T_n) + 1/6*gradT_down + ! --> Tmean1 ... edge center interpolated Tracer using tracer + ! gradient info from downwind triangle + Tmean1=ttf(nz, enodes(1))+ & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(1,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(3,nz,edge))/6.0_WP + !___________________________________________________________________ + ! volume flux along the edge segment ed + ! netto volume flux along segment that comes from edge node 1 and 2 + ! + ! + ! C1 (centroid el(1)) --> (u1,v1) + ! x + ! ^ + ! (dx1,dy1) | + ! |---> vec_n1 (dy1,-dx1)--> project vec_u1 onto vec_n1 --> -v1*dx1+u1*dy1 --> + ! | | + ! enodes(1) o----------O---------o enodes(2) |-> calculate volume flux out of/in + ! vflux_________/| | the volume of enode1(enode2) through + ! |---> vec_n2 (dy2,-dx2)--> project vec_u2 onto vec_n2 --> -v2*dx2+u2*dy2 --> sections of dx1,dy1 and dx2,dy2 + ! (dx2,dy2) | --> vflux + ! v + ! x + ! C2 (centroid el(2)) --> (u2,v2) + + ! here already assumed that ed is NOT! a boundary edge so el(2) should exist + vflux=(-VEL(2,nz,el(1))*deltaX1 + VEL(1,nz,el(1))*deltaY1)*helem(nz,el(1)) & + +(VEL(2,nz,el(2))*deltaX2 - VEL(1,nz,el(2))*deltaY2)*helem(nz,el(2)) + + !___________________________________________________________________ + ! (1-num_ord) is done with 3rd order upwind + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) + end do + + !_______________________________________________________________________ + ! (D) remaining segments on the left or on the right + do nz=nl12+1, nl1 + !____________________________________________________________________ + Tmean2=ttf(nz, enodes(2))- & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(2,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(4,nz,edge))/6.0_WP + + Tmean1=ttf(nz, enodes(1))+ & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(1,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(3,nz,edge))/6.0_WP + + !____________________________________________________________________ + ! volume flux across the segments + vflux=(-VEL(2,nz,el(1))*deltaX1 + VEL(1,nz,el(1))*deltaY1)*helem(nz,el(1)) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) + end do + + !_______________________________________________________________________ + ! (E) remaining segments on the left or on the right + do nz=nl12+1, nl2 + !____________________________________________________________________ + Tmean2=ttf(nz, enodes(2))- & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(2,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(4,nz,edge))/6.0_WP + + Tmean1=ttf(nz, enodes(1))+ & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(1,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(3,nz,edge))/6.0_WP + + !____________________________________________________________________ + ! volume flux across the segments + vflux=(VEL(2,nz,el(2))*deltaX2 - VEL(1,nz,el(2))*deltaY2)*helem(nz,el(2)) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) + end do + end do +end subroutine adv_tra_hor_mfct + diff --git a/src/temp/oce_adv_tra_ver.F90 b/src/temp/oce_adv_tra_ver.F90 new file mode 100644 index 000000000..eab9847a8 --- /dev/null +++ b/src/temp/oce_adv_tra_ver.F90 @@ -0,0 +1,598 @@ +module oce_adv_tra_ver_interfaces + interface +! implicit 1st order upwind vertical advection with to solve for fct_LO +! updates the input tracer ttf + subroutine adv_tra_vert_impl(dt, w, ttf, partit, mesh) + use mod_mesh + use MOD_PARTIT + real(kind=WP), intent(in), target :: dt + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(inout) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + end subroutine +!=============================================================================== +! 1st order upwind (explicit) +! returns flux given at vertical interfaces of scalar volumes +! IF init_zero=.TRUE. : flux will be set to zero before computation +! IF init_zero=.FALSE. : flux=flux-input flux +! flux is not multiplied with dt + subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, init_zero) + use MOD_MESH + use MOD_PARTIT + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) + logical, optional :: init_zero + end subroutine +!=============================================================================== +! QR (4th order centerd) +! returns flux given at vertical interfaces of scalar volumes +! IF init_zero=.TRUE. : flux will be set to zero before computation +! IF init_zero=.FALSE. : flux=flux-input flux +! flux is not multiplied with dt + subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, init_zero) + use MOD_MESH + use MOD_PARTIT + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) + logical, optional :: init_zero + end subroutine +!=============================================================================== +! Vertical advection with PPM reconstruction (5th order) +! returns flux given at vertical interfaces of scalar volumes +! IF init_zero=.TRUE. : flux will be set to zero before computation +! IF init_zero=.FALSE. : flux=flux-input flux +! flux is not multiplied with dt + subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, init_zero) + use MOD_MESH + use MOD_PARTIT + real(kind=WP), intent(in), target :: dt + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + integer :: n, nz, nl1 + real(kind=WP) :: tvert(mesh%nl), tv + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) + logical, optional :: init_zero + end subroutine +! central difference reconstruction (2nd order, use only with FCT) +! returns flux given at vertical interfaces of scalar volumes +! IF init_zero=.TRUE. : flux will be set to zero before computation +! IF init_zero=.FALSE. : flux=flux-input flux +! flux is not multiplied with dt + subroutine adv_tra_ver_cdiff(w, ttf, partit, mesh, flux, init_zero) + use MOD_MESH + use MOD_PARTIT + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + integer :: n, nz, nl1 + real(kind=WP) :: tvert(mesh%nl), tv + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) + logical, optional :: init_zero + end subroutine + end interface +end module +!=============================================================================== +subroutine adv_tra_vert_impl(dt, w, ttf, partit, mesh) + use MOD_MESH + use MOD_TRACER + use MOD_PARTIT + use g_comm_auto + + implicit none + real(kind=WP), intent(in) , target :: dt + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in) , target :: mesh + real(kind=WP), intent(inout) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP) :: a(mesh%nl), b(mesh%nl), c(mesh%nl), tr(mesh%nl) + real(kind=WP) :: cp(mesh%nl), tp(mesh%nl) + integer :: nz, n, nzmax, nzmin, tr_num + real(kind=WP) :: m, zinv, dt_inv, dz + real(kind=WP) :: c1, v_adv + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + dt_inv=1.0_WP/dt + + !___________________________________________________________________________ + ! loop over local nodes + do n=1,myDim_nod2D + + ! initialise + a = 0.0_WP + b = 0.0_WP + c = 0.0_WP + tr = 0.0_WP + tp = 0.0_WP + cp = 0.0_WP + + ! max. number of levels at node n + nzmax=nlevels_nod2D(n) + + ! upper surface index, in case of cavity !=1 + nzmin=ulevels_nod2D(n) + + !___________________________________________________________________________ + ! Here can not exchange zbar_n & Z_n with zbar_3d_n & Z_3d_n because + ! they be calculate from the actualized mesh with hnode_new + ! calculate new zbar (depth of layers) and Z (mid depths of layers) + ! depending on layer thinkness over depth at node n + ! Be carefull here vertical operation have to be done on NEW vertical mesh !!! + zbar_n=0.0_WP + Z_n=0.0_WP + zbar_n(nzmax)=zbar_n_bot(n) + Z_n(nzmax-1) =zbar_n(nzmax) + hnode_new(nzmax-1,n)/2.0_WP + do nz=nzmax-1,nzmin+1,-1 + zbar_n(nz) = zbar_n(nz+1) + hnode_new(nz,n) + Z_n(nz-1) = zbar_n(nz) + hnode_new(nz-1,n)/2.0_WP + end do + zbar_n(nzmin) = zbar_n(nzmin+1) + hnode_new(nzmin,n) + + !_______________________________________________________________________ + ! Regular part of coefficients: --> surface layer + nz=nzmin + + ! 1/dz(nz) + zinv=1.0_WP*dt ! no .../(zbar(1)-zbar(2)) because of ALE + + !!PS a(nz)=0.0_WP + !!PS v_adv=zinv*areasvol(nz+1,n)/areasvol(nz,n) + !!PS b(nz)= hnode_new(nz,n)+W(nz, n)*zinv-min(0._WP, W(nz+1, n))*v_adv + !!PS c(nz)=-max(0._WP, W(nz+1, n))*v_adv + + a(nz)=0.0_WP + v_adv=zinv*area(nz ,n)/areasvol(nz,n) + b(nz)= hnode_new(nz,n)+W(nz, n)*v_adv + + v_adv=zinv*area(nz+1,n)/areasvol(nz,n) + b(nz)= b(nz)-min(0._WP, W(nz+1, n))*v_adv + c(nz)=-max(0._WP, W(nz+1, n))*v_adv + + !_______________________________________________________________________ + ! Regular part of coefficients: --> 2nd...nl-2 layer + do nz=nzmin+1, nzmax-2 + ! update from the vertical advection + v_adv=zinv*area(nz ,n)/areasvol(nz,n) + a(nz)=min(0._WP, W(nz, n))*v_adv + b(nz)=hnode_new(nz,n)+max(0._WP, W(nz, n))*v_adv + + v_adv=zinv*area(nz+1,n)/areasvol(nz,n) + b(nz)=b(nz)-min(0._WP, W(nz+1, n))*v_adv + c(nz)= -max(0._WP, W(nz+1, n))*v_adv + end do ! --> do nz=2, nzmax-2 + + !_______________________________________________________________________ + ! Regular part of coefficients: --> nl-1 layer + nz=nzmax-1 + ! update from the vertical advection + !!PS a(nz)= min(0._WP, W(nz, n))*zinv + !!PS b(nz)=hnode_new(nz,n)+max(0._WP, W(nz, n))*zinv + !!PS c(nz)=0.0_WP + v_adv=zinv*area(nz ,n)/areasvol(nz,n) + a(nz)= min(0._WP, W(nz, n))*v_adv + b(nz)=hnode_new(nz,n)+max(0._WP, W(nz, n))*v_adv + c(nz)=0.0_WP + + !_______________________________________________________________________ + nz=nzmin + dz=hnode_new(nz,n) ! It would be (zbar(nz)-zbar(nz+1)) if not ALE + tr(nz)=-(b(nz)-dz)*ttf(nz,n)-c(nz)*ttf(nz+1,n) + + do nz=nzmin+1,nzmax-2 + dz=hnode_new(nz,n) + tr(nz)=-a(nz)*ttf(nz-1,n)-(b(nz)-dz)*ttf(nz,n)-c(nz)*ttf(nz+1,n) + end do + nz=nzmax-1 + dz=hnode_new(nz,n) + tr(nz)=-a(nz)*ttf(nz-1,n)-(b(nz)-dz)*ttf(nz,n) + + !_______________________________________________________________________ + nz = nzmin + cp(nz) = c(nz)/b(nz) + tp(nz) = tr(nz)/b(nz) + + ! solve for vectors c-prime and t, s-prime + do nz = nzmin+1,nzmax-1 + m = b(nz)-cp(nz-1)*a(nz) + cp(nz) = c(nz)/m + tp(nz) = (tr(nz)-tp(nz-1)*a(nz))/m + end do + + !_______________________________________________________________________ + ! start with back substitution + tr(nzmax-1) = tp(nzmax-1) + + ! solve for x from the vectors c-prime and d-prime + do nz = nzmax-2, nzmin, -1 + tr(nz) = tp(nz)-cp(nz)*tr(nz+1) + end do + + !_______________________________________________________________________ + ! update tracer + do nz=nzmin,nzmax-1 + ttf(nz,n)=ttf(nz,n)+tr(nz) + end do + end do ! --> do n=1,myDim_nod2D +end subroutine adv_tra_vert_impl +! +! +!=============================================================================== +subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, init_zero) + use MOD_MESH + use MOD_TRACER + use MOD_PARTIT + use g_comm_auto + + implicit none + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP) :: tvert(mesh%nl) + integer :: n, nz, nzmax, nzmin + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) + logical, optional :: init_zero +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + if (present(init_zero))then + if (init_zero) flux=0.0_WP + else + flux=0.0_WP + end if + + do n=1, myDim_nod2D + !_______________________________________________________________________ + nzmax=nlevels_nod2D(n) + nzmin=ulevels_nod2D(n) + + !_______________________________________________________________________ + ! vert. flux at surface layer + nz=nzmin + flux(nz,n)=-W(nz,n)*ttf(nz,n)*area(nz,n)-flux(nz,n) + + !_______________________________________________________________________ + ! vert. flux at bottom layer --> zero bottom flux + nz=nzmax + flux(nz,n)= 0.0_WP-flux(nz,n) + + !_______________________________________________________________________ + ! Be carefull have to do vertical tracer advection here on old vertical grid + ! also horizontal advection is done on old mesh (see helem contains old + ! mesh information) + !_______________________________________________________________________ + ! vert. flux at remaining levels + do nz=nzmin+1,nzmax-1 + flux(nz,n)=-0.5*( & + ttf(nz ,n)*(W(nz,n)+abs(W(nz,n)))+ & + ttf(nz-1,n)*(W(nz,n)-abs(W(nz,n))))*area(nz,n)-flux(nz,n) + end do + end do +end subroutine adv_tra_ver_upw1 +! +! +!=============================================================================== +subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, init_zero) + use MOD_MESH + use o_ARRAYS + use o_PARAM + use MOD_PARTIT + implicit none + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) + logical, optional :: init_zero + real(kind=WP) :: tvert(mesh%nl) + integer :: n, nz, nzmax, nzmin + real(kind=WP) :: Tmean, Tmean1, Tmean2 + real(kind=WP) :: qc, qu, qd + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + if (present(init_zero))then + if (init_zero) flux=0.0_WP + else + flux=0.0_WP + end if + + do n=1, myDim_nod2D + !_______________________________________________________________________ + nzmax=nlevels_nod2D(n) + nzmin=ulevels_nod2D(n) + !_______________________________________________________________________ + ! vert. flux at surface layer + nz=nzmin + flux(nz,n)=-ttf(nz,n)*W(nz,n)*area(nz,n)-flux(nz,n) + + !_______________________________________________________________________ + ! vert. flux 2nd layer --> centered differences + nz=nzmin+1 + flux(nz,n)=-0.5_WP*(ttf(nz-1,n)+ttf(nz,n))*W(nz,n)*area(nz,n)-flux(nz,n) + + !_______________________________________________________________________ + ! vert. flux at bottom - 1 layer --> centered differences + nz=nzmax-1 + flux(nz,n)=-0.5_WP*(ttf(nz-1,n)+ttf(nz,n))*W(nz,n)*area(nz,n)-flux(nz,n) + + !_______________________________________________________________________ + ! vert. flux at bottom layer --> zero bottom flux + nz=nzmax + flux(nz,n)= 0.0_WP-flux(nz,n) + + !_______________________________________________________________________ + ! Be carefull have to do vertical tracer advection here on old vertical grid + ! also horizontal advection is done on old mesh (see helem contains old + ! mesh information) + !_______________________________________________________________________ + ! vert. flux at remaining levels + do nz=nzmin+2,nzmax-2 + !centered (4th order) + qc=(ttf(nz-1,n)-ttf(nz ,n))/(Z_3d_n(nz-1,n)-Z_3d_n(nz ,n)) + qu=(ttf(nz ,n)-ttf(nz+1,n))/(Z_3d_n(nz ,n)-Z_3d_n(nz+1,n)) + qd=(ttf(nz-2,n)-ttf(nz-1,n))/(Z_3d_n(nz-2,n)-Z_3d_n(nz-1,n)) + + Tmean1=ttf(nz ,n)+(2*qc+qu)*(zbar_3d_n(nz,n)-Z_3d_n(nz ,n))/3.0_WP + Tmean2=ttf(nz-1,n)+(2*qc+qd)*(zbar_3d_n(nz,n)-Z_3d_n(nz-1,n))/3.0_WP + Tmean =(W(nz,n)+abs(W(nz,n)))*Tmean1+(W(nz,n)-abs(W(nz,n)))*Tmean2 + ! flux(nz,n)=-0.5_WP*(num_ord*(Tmean1+Tmean2)*W(nz,n)+(1.0_WP-num_ord)*Tmean)*area(nz,n)-flux(nz,n) + flux(nz,n)=(-0.5_WP*(1.0_WP-num_ord)*Tmean - num_ord*(0.5_WP*(Tmean1+Tmean2))*W(nz,n))*area(nz,n)-flux(nz,n) + end do + end do +end subroutine adv_tra_ver_qr4c +! +! +!=============================================================================== +subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, init_zero) + use MOD_MESH + use MOD_TRACER + use MOD_PARTIT + use g_comm_auto + implicit none + real(kind=WP), intent(in), target :: dt + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in) , target :: mesh + real(kind=WP), intent(in) :: ttf (mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) + logical, optional :: init_zero + real(kind=WP) :: tvert(mesh%nl), tv(mesh%nl), aL, aR, aj, x + real(kind=WP) :: dzjm1, dzj, dzjp1, dzjp2, deltaj, deltajp1 + integer :: n, nz, nzmax, nzmin + integer :: overshoot_counter, counter + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + if (present(init_zero))then + if (init_zero) flux=0.0_WP + else + flux=0.0_WP + end if + + ! -------------------------------------------------------------------------- + ! Vertical advection + ! -------------------------------------------------------------------------- + ! A piecewise parabolic scheme for uniformly-spaced layers. + ! See Colella and Woodward, JCP, 1984, 174-201. It can be coded so as to to take + ! non-uniformity into account, but this is more cumbersome. This is the version for AB + ! time stepping + ! -------------------------------------------------------------------------- + overshoot_counter=0 + counter =0 + do n=1, myDim_nod2D + !_______________________________________________________________________ + !Interpolate to zbar...depth levels --> all quantities (tracer ...) are + ! calculated on mid depth levels + ! nzmax ... number of depth levels at node n + nzmax=nlevels_nod2D(n) + nzmin=ulevels_nod2D(n) + + ! tracer at surface level + tv(nzmin)=ttf(nzmin,n) + ! tracer at surface+1 level +! tv(2)=-ttf(1,n)*min(sign(1.0, W(2,n)), 0._WP)+ttf(2,n)*max(sign(1.0, W(2,n)), 0._WP) +! tv(3)=-ttf(2,n)*min(sign(1.0, W(3,n)), 0._WP)+ttf(3,n)*max(sign(1.0, W(3,n)), 0._WP) + tv(nzmin+1)=0.5*(ttf(nzmin, n)+ttf(nzmin+1,n)) + ! tacer at bottom-1 level + tv(nzmax-1)=-ttf(nzmax-2,n)*min(sign(1.0_wp, W(nzmax-1,n)), 0._WP)+ttf(nzmax-1,n)*max(sign(1.0_wp, W(nzmax-1,n)), 0._WP) +! tv(nzmax-1)=0.5_WP*(ttf(nzmax-2,n)+ttf(nzmax-1,n)) + ! tracer at bottom level + tv(nzmax)=ttf(nzmax-1,n) + + !_______________________________________________________________________ + ! calc tracer for surface+2 until depth-2 layer + ! see Colella and Woodward, JCP, 1984, 174-201 --> equation (1.9) + ! loop over layers (segments) + !!PS do nz=3, nzmax-3 + do nz=nzmin+1, nzmax-3 + !___________________________________________________________________ + ! for uniform spaced vertical grids --> piecewise parabolic method (ppm) + ! equation (1.9) + ! tv(nz)=(7.0_WP*(ttf(nz-1,n)+ttf(nz,n))-(ttf(nz-2,n)+ttf(nz+1,n)))/12.0_WP + + !___________________________________________________________________ + ! for non-uniformity spaced vertical grids --> piecewise parabolic + ! method (ppm) see see Colella and Woodward, JCP, 1984, 174-201 + ! --> full equation (1.6), (1.7) and (1.8) + dzjm1 = hnode_new(nz-1,n) + dzj = hnode_new(nz ,n) + dzjp1 = hnode_new(nz+1,n) + dzjp2 = hnode_new(nz+2,n) + ! Be carefull here vertical operation have to be done on NEW vertical mesh !!! + + !___________________________________________________________________ + ! equation (1.7) + ! --> Here deltaj is the average slope in the jth zone of the parabola + ! with zone averages a_(j-1) and a_j, a_(j+1) + ! --> a_j^n + deltaj = dzj/(dzjm1+dzj+dzjp1)* & + ( & + (2._WP*dzjm1+dzj )/(dzjp1+dzj)*(ttf(nz+1,n)-ttf(nz ,n)) + & + (dzj +2._WP*dzjp1)/(dzjm1+dzj)*(ttf(nz ,n)-ttf(nz-1,n)) & + ) + ! --> a_(j+1)^n + deltajp1 = dzjp1/(dzj+dzjp1+dzjp2)* & + ( & + (2._WP*dzj+dzjp1 )/(dzjp2+dzjp1)*(ttf(nz+2,n)-ttf(nz+1,n)) + & + (dzjp1+2._WP*dzjp2)/(dzj +dzjp1)*(ttf(nz+1,n)-ttf(nz ,n)) & + ) + !___________________________________________________________________ + ! condition (1.8) + ! --> This modification leads to a somewhat steeper representation of + ! discontinuities in the solution. It also guarantees that a_(j+0.5) + ! lies in the range of values defined by a_j; and a_(j+1); + if ( (ttf(nz+1,n)-ttf(nz ,n))*(ttf(nz ,n)-ttf(nz-1,n)) > 0._WP ) then + deltaj = min( abs(deltaj), & + 2._WP*abs(ttf(nz+1,n)-ttf(nz ,n)),& + 2._WP*abs(ttf(nz ,n)-ttf(nz-1,n)) & + )*sign(1.0_WP,deltaj) + else + deltaj = 0.0_WP + endif + if ( (ttf(nz+2,n)-ttf(nz+1,n))*(ttf(nz+1,n)-ttf(nz ,n)) > 0._WP ) then + deltajp1 = min( abs(deltajp1),& + 2._WP*abs(ttf(nz+2,n)-ttf(nz+1,n)),& + 2._WP*abs(ttf(nz+1,n)-ttf(nz,n)) & + )*sign(1.0_WP,deltajp1) + else + deltajp1 = 0.0_WP + endif + !___________________________________________________________________ + ! equation (1.6) + ! --> calcualte a_(j+0.5) + ! nz+1 is the interface betweel layers (segments) nz and nz+1 + tv(nz+1)= ttf(nz,n) & + + dzj/(dzj+dzjp1)*(ttf(nz+1,n)-ttf(nz,n)) & + + 1._WP/(dzjm1+dzj+dzjp1+dzjp2) * & + ( & + (2._WP*dzjp1*dzj)/(dzj+dzjp1)* & + ((dzjm1+dzj)/(2._WP*dzj+dzjp1) - (dzjp2+dzjp1)/(2._WP*dzjp1+dzj))*(ttf(nz+1,n)-ttf(nz,n)) & + - dzj*(dzjm1+dzj)/(2._WP*dzj+dzjp1)*deltajp1 & + + dzjp1*(dzjp1+dzjp2)/(dzj+2._WP*dzjp1)*deltaj & + ) + !tv(nz+1)=max(min(ttf(nz, n), ttf(nz+1, n)), min(max(ttf(nz, n), ttf(nz+1, n)), tv(nz+1))) + end do ! --> do nz=2,nzmax-3 + + tvert(1:nzmax)=0._WP + ! loop over layers (segments) + do nz=nzmin, nzmax-1 + if ((W(nz,n)<=0._WP) .AND. (W(nz+1,n)>=0._WP)) CYCLE + counter=counter+1 + aL=tv(nz) + aR=tv(nz+1) + if ((aR-ttf(nz, n))*(ttf(nz, n)-aL)<=0._WP) then + ! write(*,*) aL, ttf(nz, n), aR + overshoot_counter=overshoot_counter+1 + aL =ttf(nz, n) + aR =ttf(nz, n) + end if + if ((aR-aL)*(ttf(nz, n)-0.5_WP*(aL+aR))> (aR-aL)**2/6._WP) then + aL =3._WP*ttf(nz, n)-2._WP*aR + end if + if ((aR-aL)*(ttf(nz, n)-0.5_WP*(aR+aL))<-(aR-aL)**2/6._WP) then + aR =3._WP*ttf(nz, n)-2._WP*aL + end if + + dzj = hnode(nz,n) + aj=6.0_WP*(ttf(nz, n)-0.5_WP*(aL+aR)) + + if (W(nz,n)>0._WP) then + x=min(W(nz,n)*dt/dzj, 1._WP) + tvert(nz )=(-aL-0.5_WP*x*(aR-aL+(1._WP-2._WP/3._WP*x)*aj)) + tvert(nz )=tvert(nz) ! compute 2nd moment for DVD + tvert(nz )=tvert(nz)*area(nz,n)*W(nz,n) + end if + + if (W(nz+1,n)<0._WP) then + x=min(-W(nz+1,n)*dt/dzj, 1._WP) + tvert(nz+1)=(-aR+0.5_WP*x*(aR-aL-(1._WP-2._WP/3._WP*x)*aj)) + tvert(nz+1)=tvert(nz+1) ! compute 2nd moment for DVD + tvert(nz+1)=tvert(nz+1)*area(nz+1,n)*W(nz+1,n) + end if + end do + + !_______________________________________________________________________ + ! Surface flux + tvert(nzmin)= -tv(nzmin)*W(nzmin,n)*area(nzmin,n) + ! Zero bottom flux + tvert(nzmax)=0.0_WP + flux(nzmin:nzmax, n)=tvert(nzmin:nzmax)-flux(nzmin:nzmax, n) + end do ! --> do n=1, myDim_nod2D +! if (mype==0) write(*,*) 'PPM overshoot statistics:', real(overshoot_counter)/real(counter) +end subroutine adv_tra_vert_ppm +! +! +!=============================================================================== +subroutine adv_tra_ver_cdiff(w, ttf, partit, mesh, flux, init_zero) + use MOD_MESH + use MOD_TRACER + use MOD_PARTIT + use g_comm_auto + implicit none + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) + logical, optional :: init_zero + integer :: n, nz, nzmax, nzmin + real(kind=WP) :: tvert(mesh%nl), tv +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + if (present(init_zero))then + if (init_zero) flux=0.0_WP + else + flux=0.0_WP + end if + + do n=1, myDim_nod2D + !_______________________________________________________________________ + nzmax=nlevels_nod2D(n)-1 + nzmin=ulevels_nod2D(n) + + !_______________________________________________________________________ + ! Surface flux + tvert(nzmin)= -W(nzmin,n)*ttf(nzmin,n)*area(nzmin,n) + + !_______________________________________________________________________ + ! Zero bottom flux + tvert(nzmax+1)=0.0_WP + + !_______________________________________________________________________ + ! Other levels + do nz=nzmin+1, nzmax + tv=0.5_WP*(ttf(nz-1,n)+ttf(nz,n)) + tvert(nz)= -tv*W(nz,n)*area(nz,n) + end do + + !_______________________________________________________________________ + flux(nzmin:nzmax, n)=tvert(nzmin:nzmax)-flux(nzmin:nzmax, n) + end do ! --> do n=1, myDim_nod2D +end subroutine adv_tra_ver_cdiff diff --git a/src/temp/oce_modules.F90 b/src/temp/oce_modules.F90 new file mode 100755 index 000000000..3576ef01f --- /dev/null +++ b/src/temp/oce_modules.F90 @@ -0,0 +1,267 @@ + +! Modules of cell-vertex ocean model +! S. Danilov, 2012 (sergey.danilov@awi.de) +! SI units are used + +!========================================================== +MODULE o_PARAM +integer, parameter :: WP=8 ! Working precision +integer, parameter :: MAX_PATH=4096 ! Maximum file path length +integer :: mstep +real(kind=WP), parameter :: pi=3.14159265358979 +real(kind=WP), parameter :: rad=pi/180.0_WP +real(kind=WP), parameter :: density_0=1030.0_WP +real(kind=WP), parameter :: density_0_r=1.0_WP/density_0 ! [m^3/kg] +real(kind=WP), parameter :: g=9.81_WP +real(kind=WP), parameter :: r_earth=6367500.0_WP +real(kind=WP), parameter :: omega=2*pi/(3600.0_WP*24.0_WP) +real(kind=WP), parameter :: vcpw=4.2e6 ![J/m^3/K] water heat cap +real(kind=WP), parameter :: inv_vcpw = 1._WP / vcpw ! inverse, to replace divide by multiply +real(kind=WP), parameter :: small=1.0e-8 !small value +integer :: state_equation = 1 !1 - full equation of state, 0 - linear equation of state + +real(kind=WP) :: C_d= 0.0025_WP ! Bottom drag coefficient +real(kind=WP) :: kappa=0.4 !von Karman's constant +real(kind=WP) :: mix_coeff_PP=0.01_WP ! mixing coef for PP scheme +real(kind=WP) :: gamma0=0.01! [m/s], gamma0*len*dt is the background viscosity +real(kind=WP) :: gamma1=0.1! [non dim.], or computation of the flow aware viscosity +real(kind=WP) :: gamma2=10.! [s/m], is only used in easy backscatter option +real(kind=WP) :: Div_c =1.0_WP !modified Leith viscosity weight +real(kind=WP) :: Leith_c=1.0_WP !Leith viscosity weight. It needs vorticity! +real(kind=WP) :: easy_bs_return=1.0 !backscatter option only (how much to return) +real(kind=WP) :: A_ver=0.001_WP ! Vertical harm. visc. +integer :: visc_option=5 +logical :: uke_scaling=.true. +real(kind=WP) :: uke_scaling_factor=1._WP +real(kind=WP) :: rosb_dis=1._WP +integer :: smooth_back=2 +integer :: smooth_dis=2 +integer :: smooth_back_tend=4 +real(kind=WP) :: K_back=600._WP +real(kind=WP) :: c_back=0.1_8 +real(kind=WP) :: K_hor=10._WP +real(kind=WP) :: K_ver=0.00001_WP +real(kind=WP) :: scale_area=2.0e8 +real(kind=WP) :: surf_relax_T= 0.0_WP +real(kind=WP) :: surf_relax_S= 10.0_WP/(60*3600.0_WP*24) +logical :: balance_salt_water =.true. +real(kind=WP) :: clim_relax= 1.0_WP/(10*3600.0_WP*24) +real(kind=WP) :: clim_decay, clim_growth + ! set to 0.0 if no relaxation +logical :: ref_sss_local=.false. +real(kind=WP) :: ref_sss=34.7 +logical :: Fer_GM =.false. !flag for Ferrari et al. (2010) GM scheme +real(kind=WP) :: K_GM_max = 3000. +real(kind=WP) :: K_GM_min = 2.0 +integer :: K_GM_bvref = 2 ! 0...surface, 1...bottom mixlay, 2...mean over mixlay +real(kind=WP) :: K_GM_resscalorder = 2.0 +real(kind=WP) :: K_GM_rampmax = 40.0 ! Resol >K_GM_rampmax[km] GM full +real(kind=WP) :: K_GM_rampmin = 30.0 ! Resol replace string by int comparison +real(KIND=WP) :: Ricr = 0.3_WP ! critical bulk Richardson Number +real(KIND=WP) :: concv = 1.6_WP ! constant for pure convection (eqn. 23) (Large 1.5-1.6; MOM default 1.8) + +logical :: hbl_diag =.false. ! writen boundary layer depth +logical :: use_global_tides=.false. ! tidal potential will be computed and used in the SSH gradient computation +! Time stepping +! real(kind=WP) :: alpha=1.0_WP, theta=1.0_WP ! implicitness for +real(kind=WP) :: alpha=1.0_WP, theta=1.0_WP ! implicitness for + ! elevation and divergence +real(kind=WP) :: epsilon=0.1_WP ! AB2 offset +! Tracers +logical :: i_vert_visc= .true. +logical :: w_split =.false. +real(kind=WP) :: w_max_cfl=1.e-5_WP + +logical :: SPP=.false. + +TYPE tracer_source3d_type + integer :: locID + integer :: ID + integer, allocatable, dimension(:) :: ind2 +END TYPE tracer_source3d_type + +type(tracer_source3d_type), & + allocatable, dimension(:) :: ptracers_restore +integer :: ptracers_restore_total=0 + + +! Momentum +logical :: free_slip=.false. + ! false=no slip +integer :: mom_adv=2 + ! 1 vector control volumes, p1 velocities + ! 2 scalar control volumes + ! 3 vector invariant + +logical :: open_b=.false. ! Reserved + +!_______________________________________________________________________________ +!--> mixing enhancement than can be applied via subroutine mo_convect(mesh) +! additionally to every mixing scheme i.e. KPP, PP, cvmix_KPP, cvmix_PP, cvmix_TKE + +! Switch for Monin-Obukov TB04 mixing --> can be additionally applied for all mixing schemes +! --> definetely recommented for KPP +logical :: use_momix = .true. !.false. !Monin-Obukhov -> TB04 mixing on/off +real(kind=WP) :: momix_lat = -50.0_WP ! latitudinal treshhold to apply mo_on enhanced +! convection +logical :: use_instabmix = .true. +real(kind=WP) :: instabmix_kv = 0.1 + +! Switch for enhanced wind mixing --> nasty trick from pp mixing in FESOM1.4 +logical :: use_windmix = .false. +real(kind=WP) :: windmix_kv = 1.e-3 +integer :: windmix_nl = 2 + +!_______________________________________________________________________________ +! use non-constant reference density if .false. density_ref=density_0 +logical :: use_density_ref = .false. +real(kind=WP) :: density_ref_T = 2.0_WP +real(kind=WP) :: density_ref_S = 34.0_WP + +!_______________________________________________________________________________ +! use k-profile nonlocal fluxes +logical :: use_kpp_nonlclflx = .false. + +!_______________________________________________________________________________ +! *** active tracer cutoff +logical :: limit_salinity=.true. !set an allowed range for salinity +real(kind=WP) :: salinity_min=5.0 !minimal salinity +real(kind=WP) :: coeff_limit_salinity=0.0023 !m/s, coefficient to restore s to s_min + + namelist /tracer_cutoff/ limit_salinity, salinity_min, coeff_limit_salinity + +! *** others *** + real(kind=WP) :: time_sum=0.0 ! for runtime estimate + +!___________________________________________ +! Pressure Gradient Force calculation (pgf) +! calculation of pgf either: +! only linfs: +! > 'nemo' ... like NEMO (interpolate to elemental depth, inter-/extrapolation) +! linfs, zlevel, zstar: +! > 'shchepetkin' ... based on density jacobian +! > 'cubicspline' ... like in FESOM1.4 +! > 'easypgf' ... interpolate pressure on elemental depth +character(20) :: which_pgf='shchepetkin' + + + NAMELIST /oce_dyn/ state_equation, C_d, A_ver, gamma0, gamma1, gamma2, Leith_c, Div_c, easy_bs_return, & + scale_area, mom_adv, free_slip, i_vert_visc, w_split, w_max_cfl, SPP,& + Fer_GM, K_GM_max, K_GM_min, K_GM_bvref, K_GM_resscalorder, K_GM_rampmax, K_GM_rampmin, & + scaling_Ferreira, scaling_Rossby, scaling_resolution, scaling_FESOM14, & + Redi, visc_sh_limit, mix_scheme, Ricr, concv, which_pgf, visc_option, alpha, theta, use_density_ref, & + K_back, c_back, uke_scaling, uke_scaling_factor, smooth_back, smooth_dis, & + smooth_back_tend, rosb_dis + + NAMELIST /tracer_phys/ diff_sh_limit, Kv0_const, double_diffusion, K_ver, K_hor, surf_relax_T, surf_relax_S, & + balance_salt_water, clim_relax, ref_sss_local, ref_sss, & + use_momix, momix_lat, momix_kv, & + use_instabmix, instabmix_kv, & + use_windmix, windmix_kv, windmix_nl, & + use_kpp_nonlclflx + +END MODULE o_PARAM +!========================================================== +MODULE o_ARRAYS +USE o_PARAM +IMPLICIT NONE +! Arrays are described in subroutine array_setup +real(kind=WP), allocatable, target :: Wvel(:,:), Wvel_e(:,:), Wvel_i(:,:) +real(kind=WP), allocatable :: UV(:,:,:) +real(kind=WP), allocatable :: UV_rhs(:,:,:), UV_rhsAB(:,:,:) +real(kind=WP), allocatable :: uke(:,:), v_back(:,:), uke_back(:,:), uke_dis(:,:), uke_dif(:,:) +real(kind=WP), allocatable :: uke_rhs(:,:), uke_rhs_old(:,:) +real(kind=WP), allocatable :: UV_dis_tend(:,:,:), UV_back_tend(:,:,:), UV_total_tend(:,:,:), UV_dis_tend_node(:,:,:) +real(kind=WP), allocatable :: UV_dis_posdef_b2(:,:), UV_dis_posdef(:,:), UV_back_posdef(:,:) +real(kind=WP), allocatable :: eta_n(:), d_eta(:) +real(kind=WP), allocatable :: ssh_rhs(:), hpressure(:,:) +real(kind=WP), allocatable :: CFL_z(:,:) +real(kind=WP), allocatable :: stress_surf(:,:) +real(kind=WP), allocatable :: stress_node_surf(:,:) +REAL(kind=WP), ALLOCATABLE :: stress_atmoce_x(:) +REAL(kind=WP), ALLOCATABLE :: stress_atmoce_y(:) +real(kind=WP), allocatable :: heat_flux(:), Tsurf(:) +real(kind=WP), allocatable :: heat_flux_in(:) !to keep the unmodified (by SW penetration etc.) heat flux +real(kind=WP), allocatable :: water_flux(:), Ssurf(:) +real(kind=WP), allocatable :: virtual_salt(:), relax_salt(:) +real(kind=WP), allocatable :: Tclim(:,:), Sclim(:,:) +real(kind=WP), allocatable :: Visc(:,:) +real(kind=WP), allocatable :: Tsurf_t(:,:), Ssurf_t(:,:) +real(kind=WP), allocatable :: tau_x_t(:,:), tau_y_t(:,:) +real(kind=WP), allocatable :: heat_flux_t(:,:), heat_rel_t(:,:), heat_rel(:) +real(kind=WP), allocatable :: coriolis(:), coriolis_node(:) +real(kind=WP), allocatable :: relax2clim(:) +real(kind=WP), allocatable :: MLD1(:), MLD2(:) +integer, allocatable :: MLD1_ind(:), MLD2_ind(:) +real(kind=WP), allocatable :: ssh_gp(:) +!Tracer gradients&RHS +real(kind=WP), allocatable :: ttrhs(:,:) +real(kind=WP), allocatable :: tr_xy(:,:,:) +real(kind=WP), allocatable :: tr_z(:,:) + +! Auxiliary arrays for vector-invariant form of momentum advection +real(kind=WP), allocatable,dimension(:,:) :: vorticity + +!Viscosity and diff coefs +real(kind=WP), allocatable,dimension(:,:) :: Av,Kv +real(kind=WP), allocatable,dimension(:,:,:) :: Kv_double +real(kind=WP), allocatable,dimension(:) :: Kv0 +!Velocities interpolated to nodes +real(kind=WP), allocatable,dimension(:,:,:) :: Unode + +! Auxiliary arrays to store Redi-GM fields +real(kind=WP), allocatable,dimension(:,:,:) :: neutral_slope +real(kind=WP), allocatable,dimension(:,:,:) :: slope_tapered +real(kind=WP), allocatable,dimension(:,:,:) :: sigma_xy +real(kind=WP), allocatable,dimension(:,:) :: sw_beta, sw_alpha +real(kind=WP), allocatable,dimension(:) :: dens_flux +!real(kind=WP), allocatable,dimension(:,:,:) :: tsh, tsv, tsh_nodes +!real(kind=WP), allocatable,dimension(:,:) :: hd_flux,vd_flux +!Isoneutral diffusivities (or xy diffusivities if Redi=.false) +real(kind=WP), allocatable :: Ki(:,:) + +! --> auxiliary array to store an intermediate part of the rhs computations. +real(kind=WP), allocatable,dimension(:) :: ssh_rhs_old !, ssh_rhs_old2 !PS +real(kind=WP) :: is_nonlinfs + +!_______________________________________________________________________________ +! Arrays added for pressure gradient force calculation +real(kind=WP), allocatable,dimension(:,:) :: density_m_rho0 +real(kind=WP), allocatable,dimension(:,:) :: density_m_rho0_slev +real(kind=WP), allocatable,dimension(:,:) :: density_ref +real(kind=WP), allocatable,dimension(:,:) :: density_dmoc +real(kind=WP), allocatable,dimension(:,:) :: pgf_x, pgf_y + +!_______________________________________________________________________________ +!!PS ! dummy arrays +real(kind=WP), allocatable,dimension(:,:) :: dum_3d_n !, dum_3d_e +!!PS real(kind=WP), allocatable,dimension(:) :: dum_2d_n, dum_2d_e + +!_______________________________________________________________________________ +!Monin-Obukhov correction +real(kind=WP),allocatable :: mo(:,:),mixlength(:) +!GM_stuff +real(kind=WP),allocatable :: bvfreq(:,:),mixlay_dep(:),bv_ref(:) + +real(kind=WP), allocatable :: fer_UV(:,:,:), fer_wvel(:,:) +real(kind=WP), target, allocatable :: fer_c(:), fer_scal(:), fer_K(:,:), fer_gamma(:,:,:) + +real(kind=WP), allocatable :: ice_rejected_salt(:) +END MODULE o_ARRAYS +!========================================================== diff --git a/src/toy_channel_soufflet.F90 b/src/toy_channel_soufflet.F90 index fa64b9e13..bf355e527 100644 --- a/src/toy_channel_soufflet.F90 +++ b/src/toy_channel_soufflet.F90 @@ -1,6 +1,7 @@ MODULE Toy_Channel_Soufflet USE MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP USE MOD_TRACER USE o_ARRAYS USE o_PARAM diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index 3d248f3cf..73a11cafb 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -3,6 +3,7 @@ module write_step_info_interface subroutine write_step_info(istep,outfreq,tracers,partit,mesh) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use MOD_TRACER integer :: istep,outfreq type(t_mesh), intent(in), target :: mesh @@ -16,6 +17,7 @@ module check_blowup_interface subroutine check_blowup(istep, tracers,partit,mesh) use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use MOD_TRACER integer :: istep type(t_mesh), intent(in), target :: mesh @@ -31,6 +33,7 @@ subroutine write_step_info(istep, outfreq, tracers, partit, mesh) use g_config, only: dt, use_ice use MOD_MESH USE MOD_PARTIT + USE MOD_PARSUP use MOD_TRACER use o_PARAM use o_ARRAYS @@ -244,6 +247,7 @@ subroutine check_blowup(istep, tracers, partit, mesh) use MOD_MESH use MOD_TRACER USE MOD_PARTIT + USE MOD_PARSUP use o_PARAM use o_ARRAYS use i_ARRAYS From 831f932931ec4aa4376ae32ca31f78a03d2c2854 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Fri, 8 Oct 2021 11:45:53 +0200 Subject: [PATCH 393/909] correcting minoir things regarding interfaces and arguments for par_ex --- src/fvom_init.F90 | 2 +- src/gen_halo_exchange.F90 | 12 ++++----- src/gen_ic3d.F90 | 34 ++++++++++++------------ src/gen_model_setup.F90 | 2 +- src/gen_modules_cvmix_idemix.F90 | 4 +-- src/gen_modules_cvmix_kpp.F90 | 2 +- src/gen_surface_forcing.F90 | 45 ++++++++++++++++---------------- src/io_blowup.F90 | 2 +- src/io_meandata.F90 | 2 +- src/io_restart.F90 | 12 ++++----- src/oce_adv_tra_driver.F90 | 2 +- src/oce_ale.F90 | 4 +-- src/oce_ale_pressure_bv.F90 | 38 +++++++++++++-------------- src/oce_ale_vel_rhs.F90 | 2 +- src/oce_mesh.F90 | 18 ++++++------- src/oce_setup_step.F90 | 3 --- src/write_step_info.F90 | 2 +- 17 files changed, 92 insertions(+), 94 deletions(-) diff --git a/src/fvom_init.F90 b/src/fvom_init.F90 index 774365f8b..22a0ec4be 100755 --- a/src/fvom_init.F90 +++ b/src/fvom_init.F90 @@ -1172,7 +1172,7 @@ subroutine find_levels_cavity(mesh) print *, achar(27)//'[31m' //'____________________________________________________________'//achar(27)//'[0m' print *, achar(27)//'[7;31m'//' -[ERROR]->: Cavity geometry constrains did not converge !!! *\(>︿<)/*'//achar(27)//'[0m' write(*,*) - call par_ex(0) + call par_ex(partit, 0) else write(*,*) print *, achar(27)//'[32m' //'____________________________________________________________'//achar(27)//'[0m' diff --git a/src/gen_halo_exchange.F90 b/src/gen_halo_exchange.F90 index cd74a7a60..ee3bfb9f4 100755 --- a/src/gen_halo_exchange.F90 +++ b/src/gen_halo_exchange.F90 @@ -422,7 +422,7 @@ subroutine exchange_nod3D_2fields_begin(nod1_array3D,nod2_array3D, partit) print *,'Subroutine exchange_nod3D not implemented for',nl1,'layers.' print *,'Adding the MPI datatypes is easy, see oce_modules.F90.' endif - call par_ex(1) + call par_ex(partit, 1) endif nl2 = ubound(nod2_array3D,1) @@ -431,7 +431,7 @@ subroutine exchange_nod3D_2fields_begin(nod1_array3D,nod2_array3D, partit) print *,'Subroutine exchange_nod3D not implemented for',nl2,'layers.' print *,'Adding the MPI datatypes is easy, see oce_modules.F90.' endif - call par_ex(1) + call par_ex(partit, 1) endif #ifdef DEBUG @@ -503,7 +503,7 @@ subroutine exchange_nod3D_n_begin(nod_array3D, partit) print *,nl1,'layers and / or ',n_val,'values per element.' print *,'Adding the MPI datatypes is easy, see oce_modules.F90.' endif - call par_ex(1) + call par_ex(partit, 1) endif endif sn=com_nod2D%sPEnum @@ -647,7 +647,7 @@ subroutine exchange_elem3D_begin(elem_array3D, partit) END DO else if (mype==0) print *,'Sorry, no MPI datatype prepared for',nl1,'values per element (exchange_elem3D)' - call par_ex(1) + call par_ex(partit, 1) endif com_elem2D%nreq = rn+sn @@ -700,7 +700,7 @@ subroutine exchange_elem3D_begin(elem_array3D, partit) END DO else if (mype==0) print *,'Sorry, no MPI datatype prepared for',nl1,'values per element (exchange_elem3D)' - call par_ex(1) + call par_ex(partit, 1) endif com_elem2D_full%nreq = rn+sn @@ -759,7 +759,7 @@ subroutine exchange_elem3D_n_begin(elem_array3D, partit) print *,nl1,'layers and / or ',n_val,'values per element.' print *,'Adding the MPI datatypes is easy, see oce_modules.F90.' endif - call par_ex(1) + call par_ex(partit, 1) endif endif diff --git a/src/gen_ic3d.F90 b/src/gen_ic3d.F90 index 2f283c9ed..0e0c9f9b8 100644 --- a/src/gen_ic3d.F90 +++ b/src/gen_ic3d.F90 @@ -68,20 +68,20 @@ MODULE g_ic3d SUBROUTINE nc_readGrid(partit) ! Read time array and grid from nc file IMPLICIT NONE - type(t_partit), intent(in) :: partit - integer :: iost !I/O status - integer :: ncid ! netcdf file id - integer :: i + type(t_partit), intent(inout) :: partit + integer :: iost !I/O status + integer :: ncid ! netcdf file id + integer :: i ! ID dimensions and variables: - integer :: id_lon - integer :: id_lat - integer :: id_lond - integer :: id_latd - integer :: id_depth - integer :: id_depthd - integer :: nf_start(4) - integer :: nf_edges(4) - integer :: ierror ! return error code + integer :: id_lon + integer :: id_lat + integer :: id_lond + integer :: id_latd + integer :: id_depth + integer :: id_depthd + integer :: nf_start(4) + integer :: nf_edges(4) + integer :: ierror ! return error code !open file if (partit%mype==0) then @@ -568,11 +568,11 @@ SUBROUTINE nc_end END SUBROUTINE nc_end - SUBROUTINE check_nferr(iost,fname, partit ) + SUBROUTINE check_nferr(iost,fname, partit) IMPLICIT NONE - type(t_partit), intent(in) :: partit - character(len=MAX_PATH), intent(in) :: fname - integer, intent(in) :: iost + type(t_partit), intent(inout) :: partit + character(len=MAX_PATH), intent(in) :: fname + integer, intent(in) :: iost if (iost .ne. NF_NOERR) then write(*,*) 'ERROR: I/O status= "',trim(nf_strerror(iost)),'";',iost,' file= ', trim(fname) call par_ex (partit) diff --git a/src/gen_model_setup.F90 b/src/gen_model_setup.F90 index 1df49dd5b..b7ded86ba 100755 --- a/src/gen_model_setup.F90 +++ b/src/gen_model_setup.F90 @@ -120,7 +120,7 @@ subroutine get_run_steps(nsteps, partit) USE MOD_PARSUP implicit none - type(t_partit), intent(in) :: partit + type(t_partit), intent(inout) :: partit integer, intent(inout) :: nsteps integer :: i, temp_year, temp_mon, temp_fleapyear diff --git a/src/gen_modules_cvmix_idemix.F90 b/src/gen_modules_cvmix_idemix.F90 index a7d4c1601..a949cd2d0 100644 --- a/src/gen_modules_cvmix_idemix.F90 +++ b/src/gen_modules_cvmix_idemix.F90 @@ -237,7 +237,7 @@ subroutine init_cvmix_idemix(partit, mesh) write(*,*) ' idemix_botforc_file' write(*,*) '____________________________________________________________________' end if - call par_ex(0) + call par_ex(partit, 0) end if !_______________________________________________________________________ @@ -260,7 +260,7 @@ subroutine init_cvmix_idemix(partit, mesh) write(*,*) ' idemix_botforc_file' write(*,*) '____________________________________________________________________' end if - call par_ex(0) + call par_ex(partit, 0) end if !_______________________________________________________________________ diff --git a/src/gen_modules_cvmix_kpp.F90 b/src/gen_modules_cvmix_kpp.F90 index caacc5105..c0e0dc6c6 100644 --- a/src/gen_modules_cvmix_kpp.F90 +++ b/src/gen_modules_cvmix_kpp.F90 @@ -559,7 +559,7 @@ subroutine calc_cvmix_kpp(tracers, partit, mesh) else write(*,*) " --> Error: this kpp_internalmix scheme is not supported" write(*,*) " for the mixing below the OBL, either KPP or PP !" - call par_ex + call par_ex(partit) end if !___________________________________________________________________ diff --git a/src/gen_surface_forcing.F90 b/src/gen_surface_forcing.F90 index fdcdcc1db..55570ee37 100644 --- a/src/gen_surface_forcing.F90 +++ b/src/gen_surface_forcing.F90 @@ -206,7 +206,7 @@ SUBROUTINE nc_readTimeGrid(flf, partit) end if call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) - call check_nferr(iost,flf%file_name) + call check_nferr(iost,flf%file_name,partit) ! get dimensions if (partit%mype==0) then @@ -222,7 +222,7 @@ SUBROUTINE nc_readTimeGrid(flf, partit) end if end if call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) - call check_nferr(iost,flf%file_name) + call check_nferr(iost,flf%file_name,partit) if (partit%mype==0) then iost = nf_inq_dimid(ncid, "LON", id_lond) @@ -237,7 +237,7 @@ SUBROUTINE nc_readTimeGrid(flf, partit) end if end if call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) - call check_nferr(iost,flf%file_name) + call check_nferr(iost,flf%file_name,partit) if (partit%mype==0) then iost = nf_inq_dimid(ncid, "TIME", id_timed) @@ -249,7 +249,7 @@ SUBROUTINE nc_readTimeGrid(flf, partit) end if end if call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) - call check_nferr(iost,flf%file_name) + call check_nferr(iost,flf%file_name,partit) ! get variable id if (partit%mype==0) then @@ -265,7 +265,7 @@ SUBROUTINE nc_readTimeGrid(flf, partit) end if end if call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) - call check_nferr(iost,flf%file_name) + call check_nferr(iost,flf%file_name,partit) if (partit%mype==0) then iost = nf_inq_varid(ncid, "LON", id_lon) if (iost .ne. NF_NOERR) then @@ -279,7 +279,7 @@ SUBROUTINE nc_readTimeGrid(flf, partit) end if end if call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) - call check_nferr(iost,flf%file_name) + call check_nferr(iost,flf%file_name,partit) if (partit%mype==0) then iost = nf_inq_varid(ncid, "TIME", id_time) @@ -291,23 +291,23 @@ SUBROUTINE nc_readTimeGrid(flf, partit) end if end if call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) - call check_nferr(iost,flf%file_name) + call check_nferr(iost,flf%file_name,partit) ! get dimensions size if (partit%mype==0) then iost = nf_inq_dimlen(ncid, id_latd, flf%nc_Nlat) end if call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) - call check_nferr(iost,flf%file_name) + call check_nferr(iost,flf%file_name,partit) if (partit%mype==0) then iost = nf_inq_dimlen(ncid, id_lond, flf%nc_Nlon) end if call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) - call check_nferr(iost,flf%file_name) + call check_nferr(iost,flf%file_name,partit) if (partit%mype==0) then iost = nf_inq_dimlen(ncid, id_timed,flf%nc_Ntime) end if call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) - call check_nferr(iost,flf%file_name) + call check_nferr(iost,flf%file_name,partit) flf%nc_Nlon=flf%nc_Nlon+2 !for the halo in case of periodic boundary call MPI_BCast(flf%nc_Nlon, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) call MPI_BCast(flf%nc_Nlat, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) @@ -330,7 +330,7 @@ SUBROUTINE nc_readTimeGrid(flf, partit) iost = nf_get_vara_double(ncid, id_lat, nf_start, nf_edges, flf%nc_lat) end if call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) - call check_nferr(iost,flf%file_name) + call check_nferr(iost,flf%file_name,partit) ! read lon if (partit%mype==0) then @@ -341,7 +341,7 @@ SUBROUTINE nc_readTimeGrid(flf, partit) flf%nc_lon(flf%nc_Nlon) =flf%nc_lon(2) end if call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) - call check_nferr(iost,flf%file_name) + call check_nferr(iost,flf%file_name,partit) !____________________________________________________________________________ ! read time axis from file if (partit%mype==0) then @@ -352,7 +352,7 @@ SUBROUTINE nc_readTimeGrid(flf, partit) end if call MPI_BCast(flf%nc_time, flf%nc_Ntime, MPI_DOUBLE_PRECISION, 0, partit%MPI_COMM_FESOM, ierror) call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) - call check_nferr(iost,flf%file_name) + call check_nferr(iost,flf%file_name,partit) ! digg for calendar attribute in time axis variable if (partit%mype==0 .and. use_flpyrcheck) then @@ -386,7 +386,7 @@ SUBROUTINE nc_readTimeGrid(flf, partit) write(*,*) ' message block in gen_surface_forcing.F90.' write(*,*) '____________________________________________________________' print *, achar(27)//'[0m' - call par_ex(0) + call par_ex(partit, 0) end if elseif ((trim(flf%calendar).eq.'julian') .or. & (trim(flf%calendar).eq.'gregorian') .or. & @@ -407,7 +407,7 @@ SUBROUTINE nc_readTimeGrid(flf, partit) write(*,*) ' gen_surface_forcing.F90' write(*,*) '____________________________________________________________' print *, achar(27)//'[0m' - call par_ex(0) + call par_ex(partit, 0) end if else print *, achar(27)//'[31m' @@ -426,7 +426,7 @@ SUBROUTINE nc_readTimeGrid(flf, partit) write(*,*) ' example with ncdump -h forcing_file.nc ' write(*,*) '____________________________________________________________' print *, achar(27)//'[0m' - call par_ex(0) + call par_ex(partit, 0) end if end if @@ -459,7 +459,7 @@ SUBROUTINE nc_readTimeGrid(flf, partit) iost = nf_close(ncid) end if call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) - call check_nferr(iost,flf%file_name) + call check_nferr(iost,flf%file_name,partit) if (ic_cyclic) then flf%nc_lon(1) =flf%nc_lon(1)-360._WP @@ -921,7 +921,7 @@ SUBROUTINE sbc_ini(partit, mesh) if (mype==0) WRITE(*,*) ' file : ', 'namelist_bc.nml',' open ok' else if (mype==0) WRITE(*,*) 'ERROR: --> bad opening file : ', 'namelist_bc.nml',' ; iostat=',iost - call par_ex + call par_ex(partit) stop endif READ( nm_sbc_unit, nml=nam_sbc, iostat=iost ) @@ -1260,14 +1260,15 @@ SUBROUTINE sbc_end & qns, emp, qsr) END SUBROUTINE sbc_end - SUBROUTINE check_nferr(iost,fname) + SUBROUTINE check_nferr(iost,fname, partit) IMPLICIT NONE - character(len=MAX_PATH), intent(in) :: fname - integer, intent(in) :: iost + type(t_partit), intent(inout), target :: partit + character(len=MAX_PATH), intent(in) :: fname + integer, intent(in) :: iost if (iost .ne. NF_NOERR) then write(*,*) 'ERROR: I/O status= "',trim(nf_strerror(iost)),'";',iost,' file= ',fname - call par_ex + call par_ex(partit) stop endif END SUBROUTINE check_nferr diff --git a/src/io_blowup.F90 b/src/io_blowup.F90 index 1960ad72c..db12a33ce 100644 --- a/src/io_blowup.F90 +++ b/src/io_blowup.F90 @@ -481,7 +481,7 @@ end subroutine assoc_ids subroutine was_error(id, partit) implicit none type(nc_file), intent(inout) :: id - type(t_partit), intent(in) :: partit + type(t_partit), intent(inout) :: partit integer :: k, status, ierror call MPI_BCast(id%error_count, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 9c1443377..885538539 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -128,7 +128,7 @@ subroutine ini_mean_io(tracers, partit, mesh) if (mype==0) WRITE(*,*) ' file : ', 'namelist.io',' open ok' else if (mype==0) WRITE(*,*) 'ERROR: --> bad opening file : ', 'namelist.io',' ; iostat=',iost - call par_ex + call par_ex(partit) stop endif READ(nm_io_unit, nml=nml_listsize, iostat=iost ) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index cab75b985..18a684ab3 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -271,7 +271,7 @@ subroutine restart(istep, l_write, l_read, tracers, partit, mesh) else write(*,*) 'You did not specify a supported outputflag.' write(*,*) 'The program will stop to give you opportunity to do it.' - call par_ex(1) + call par_ex(partit, 1) stop endif @@ -529,7 +529,7 @@ subroutine write_restart(id, istep, partit, mesh) order=2 else if (mype==0) write(*,*) 'the shape of the array in the restart file and the grid size are different' - call par_ex + call par_ex(partit) stop end if if (mype==0) allocate(aux (size_gen)) @@ -555,7 +555,7 @@ subroutine write_restart(id, istep, partit, mesh) if (mype==0) deallocate(aux) else if (mype==0) write(*,*) 'not supported shape of array in restart file' - call par_ex + call par_ex(partit) stop end if call was_error(id, partit); c=1 @@ -607,7 +607,7 @@ subroutine read_restart(id, partit, mesh, arg) write(*,*) '____________________________________________________________________' print *, achar(27)//'[0m' write(*,*) - call par_ex + call par_ex(partit) end if if (.not. present(arg)) then @@ -664,7 +664,7 @@ subroutine read_restart(id, partit, mesh, arg) order=2 else if (mype==0) write(*,*) 'the shape of the array in the restart file and the grid size are different' - call par_ex + call par_ex(partit) stop end if if (mype==0) allocate(aux (size_gen)) @@ -691,7 +691,7 @@ subroutine read_restart(id, partit, mesh, arg) if (mype==0) deallocate(aux) else if (mype==0) write(*,*) 'not supported shape of array in restart file when reading restart' - call par_ex + call par_ex(partit) stop end if call was_error(id, partit); c=1 diff --git a/src/oce_adv_tra_driver.F90 b/src/oce_adv_tra_driver.F90 index 98e7a4fb6..875f43052 100644 --- a/src/oce_adv_tra_driver.F90 +++ b/src/oce_adv_tra_driver.F90 @@ -190,7 +190,7 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, partit, mesh) call adv_tra_ver_upw1 ( pwvel, ttfAB, partit, mesh, adv_flux_ver, init_zero=do_zero_flux) CASE DEFAULT !unknown if (mype==0) write(*,*) 'Unknown vertical advection type ', trim(tracers%data(tr_num)%tra_adv_ver), '! Check your namelists!' - call par_ex(1) + call par_ex(partit, 1) ! --> be aware the vertical implicite part in case without FCT is done in ! oce_ale_tracer.F90 --> subroutine diff_ver_part_impl_ale(tr_num, partit, mesh) ! for do_wimpl=.true. diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 271ebe324..2af4fb229 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -878,7 +878,7 @@ subroutine init_thickness_ale(partit, mesh) write(*,*) write(*,*) '____________________________________________________________' write(*,*) 'The vertical ALE discretisation ', which_ale,' is currently not supported!!!' - call par_ex(1) + call par_ex(partit, 1) end if endif @@ -2267,7 +2267,7 @@ subroutine vert_vel_ale(partit, mesh) write(*,*) end if end do -!!PS call par_ex(1) +!!PS call par_ex(partit, 1) endif !___________________________________________________________________________ diff --git a/src/oce_ale_pressure_bv.F90 b/src/oce_ale_pressure_bv.F90 index 04e5a8654..29042954e 100644 --- a/src/oce_ale_pressure_bv.F90 +++ b/src/oce_ale_pressure_bv.F90 @@ -288,7 +288,7 @@ subroutine pressure_bv(tracers, partit, mesh) call densityJM_components(t, s, bulk_0(nz), bulk_pz(nz), bulk_pz2(nz), rhopot(nz), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(1) + call par_ex(partit, 1) end select end do @@ -352,7 +352,7 @@ subroutine pressure_bv(tracers, partit, mesh) call densityJM_components(t, s, bulk_0(nz), bulk_pz(nz), bulk_pz2(nz), rhopot(nz), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(1) + call par_ex(partit, 1) end select !_______________________________________________________________ rho(nz)= bulk_0(nz) + Z_3d_n(nz,node)*(bulk_pz(nz) + Z_3d_n(nz,node)*bulk_pz2(nz)) @@ -510,7 +510,7 @@ subroutine pressure_force_4_linfs(tracers, partit, mesh) write(*,*) ' see in namelist.oce --> which_pgf = sergey, ' write(*,*) ' shchepetkin, easypgf ' write(*,*) '________________________________________________________' - call par_ex(1) + call par_ex(partit, 1) end if !___________________________________________________________________________ @@ -531,7 +531,7 @@ subroutine pressure_force_4_linfs(tracers, partit, mesh) write(*,*) ' see in namelist.oce --> which_pgf = nemo, ' write(*,*) ' shchepetkin, cubicspline ' write(*,*) '________________________________________________________' - call par_ex(1) + call par_ex(partit, 1) end if end if end subroutine pressure_force_4_linfs @@ -727,7 +727,7 @@ subroutine pressure_force_4_linfs_nemo(tracers, partit, mesh) call densityJM_components(interp_n_temp, interp_n_salt, bulk_0, bulk_pz, bulk_pz2, rhopot, partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(1) + call par_ex(partit, 1) end select interp_n_dens(ni) = bulk_0 + Z_n(nle)*(bulk_pz + Z_n(nle)*bulk_pz2) !!PS interp_n_dens(ni) = interp_n_dens(ni)*rhopot/(interp_n_dens(ni)+0.1_WP*Z_n(nle))*real(state_equation))-density_0 @@ -1086,7 +1086,7 @@ subroutine pressure_force_4_linfs_easypgf(tracers, partit, mesh) call densityJM_components(density_ref_T, density_ref_S, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2, dref_rhopot, partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(1) + call par_ex(partit, 1) end select end if @@ -1176,7 +1176,7 @@ subroutine pressure_force_4_linfs_easypgf(tracers, partit, mesh) call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(1) + call par_ex(partit, 1) end select rho_at_Zn(ni) = bulk_0(ni) + Z_n(nlz)*(bulk_pz(ni) + Z_n(nlz)*bulk_pz2(ni)) rho_at_Zn(ni) = rho_at_Zn(ni)*rhopot(ni)/(rho_at_Zn(ni)+0.1_WP*Z_n(nlz)*real(state_equation))-aux_dref @@ -1215,7 +1215,7 @@ subroutine pressure_force_4_linfs_easypgf(tracers, partit, mesh) call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(1) + call par_ex(partit, 1) end select rho_at_Zn(ni) = bulk_0(ni) + Z_n(nlz)*(bulk_pz(ni) + Z_n(nlz)*bulk_pz2(ni)) rho_at_Zn(ni) = rho_at_Zn(ni)*rhopot(ni)/(rho_at_Zn(ni)+0.1_WP*Z_n(nlz)*real(state_equation))-aux_dref @@ -1318,7 +1318,7 @@ subroutine pressure_force_4_linfs_easypgf(tracers, partit, mesh) call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(1) + call par_ex(partit, 1) end select rho_at_Zn(ni) = bulk_0(ni) + Z_n(nlz)*(bulk_pz(ni) + Z_n(nlz)*bulk_pz2(ni)) rho_at_Zn(ni) = rho_at_Zn(ni)*rhopot(ni)/(rho_at_Zn(ni)+0.1_WP*Z_n(nlz)*real(state_equation))-aux_dref @@ -1357,7 +1357,7 @@ subroutine pressure_force_4_linfs_easypgf(tracers, partit, mesh) call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(1) + call par_ex(partit, 1) end select rho_at_Zn(ni) = bulk_0(ni) + Z_n(nlz)*(bulk_pz(ni) + Z_n(nlz)*bulk_pz2(ni)) rho_at_Zn(ni) = rho_at_Zn(ni)*rhopot(ni)/(rho_at_Zn(ni)+0.1_WP*Z_n(nlz)*real(state_equation))-aux_dref @@ -1830,7 +1830,7 @@ subroutine pressure_force_4_zxxxx(tracers, partit, mesh) write(*,*) ' see in namelist.oce --> which_pgf = ' write(*,*) ' shchepetkin, cubicspline, easypgf ' write(*,*) '________________________________________________________' - call par_ex(1) + call par_ex(partit, 1) end if end subroutine pressure_force_4_zxxxx ! @@ -2333,7 +2333,7 @@ subroutine pressure_force_4_zxxxx_easypgf(tracers, partit, mesh) call densityJM_components(density_ref_T, density_ref_S, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2, dref_rhopot, partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(1) + call par_ex(partit, 1) end select end if @@ -2412,7 +2412,7 @@ subroutine pressure_force_4_zxxxx_easypgf(tracers, partit, mesh) call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(1) + call par_ex(partit, 1) end select rho_at_Zn(ni) = bulk_0(ni) + Z_n(nlz)*(bulk_pz(ni) + Z_n(nlz)*bulk_pz2(ni)) rho_at_Zn(ni) = rho_at_Zn(ni)*rhopot(ni)/(rho_at_Zn(ni)+0.1_WP*Z_n(nlz)*real(state_equation))-aux_dref @@ -2461,7 +2461,7 @@ subroutine pressure_force_4_zxxxx_easypgf(tracers, partit, mesh) call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(1) + call par_ex(partit, 1) end select rho_at_Zn(ni) = bulk_0(ni) + Z_n(nlz)*(bulk_pz(ni) + Z_n(nlz)*bulk_pz2(ni)) rho_at_Zn(ni) = rho_at_Zn(ni)*rhopot(ni)/(rho_at_Zn(ni)+0.1_WP*Z_n(nlz)*real(state_equation))-aux_dref @@ -2475,7 +2475,7 @@ subroutine pressure_force_4_zxxxx_easypgf(tracers, partit, mesh) ! --> this is not wanted !!! write(*,*) ' --> would do second order surface boundary density extrapolation' write(*,*) ' This is not wanted, model stops here' - call par_ex(0) + call par_ex(partit, 0) end if end do !_______________________________________________________________________ @@ -2545,7 +2545,7 @@ subroutine pressure_force_4_zxxxx_easypgf(tracers, partit, mesh) call densityJM_components(temp_at_Zn(3), salt_at_Zn(3), bulk_0(3), bulk_pz(3), bulk_pz2(3), rhopot(3), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(1) + call par_ex(partit, 1) end select rho_at_Zn = bulk_0 + Z_n(nlz)*(bulk_pz + Z_n(nlz)*bulk_pz2) rho_at_Zn = rho_at_Zn*rhopot/(rho_at_Zn+0.1_WP*Z_n(nlz)*real(state_equation))-aux_dref @@ -2626,7 +2626,7 @@ subroutine pressure_force_4_zxxxx_easypgf(tracers, partit, mesh) call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(1) + call par_ex(partit, 1) end select rho_at_Zn(ni) = bulk_0(ni) + Z_n(nlz)*(bulk_pz(ni) + Z_n(nlz)*bulk_pz2(ni)) rho_at_Zn(ni) = rho_at_Zn(ni)*rhopot(ni)/(rho_at_Zn(ni)+0.1_WP*Z_n(nlz)*real(state_equation))-aux_dref @@ -2675,7 +2675,7 @@ subroutine pressure_force_4_zxxxx_easypgf(tracers, partit, mesh) call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(1) + call par_ex(partit, 1) end select rho_at_Zn(ni) = bulk_0(ni) + Z_n(nlz)*(bulk_pz(ni) + Z_n(nlz)*bulk_pz2(ni)) rho_at_Zn(ni) = rho_at_Zn(ni)*rhopot(ni)/(rho_at_Zn(ni)+0.1_WP*Z_n(nlz)*real(state_equation))-aux_dref @@ -2692,7 +2692,7 @@ subroutine pressure_force_4_zxxxx_easypgf(tracers, partit, mesh) write(*,*) ' idx = ', idx write(*,*) ' nle = ', nle write(*,*) ' nln = ', nlevels_nod2D(elnodes)-1 - call par_ex(0) + call par_ex(partit, 0) end if end do !_______________________________________________________________________ diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index 3ab04ac1a..1ca0d1d93 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -119,7 +119,7 @@ subroutine compute_vel_rhs(partit, mesh) ! advection if (mom_adv==1) then if (mype==0) write(*,*) 'in moment not adapted mom_adv advection typ for ALE, check your namelist' - call par_ex(1) + call par_ex(partit, 1) elseif (mom_adv==2) then call momentum_adv_scalar(partit, mesh) end if diff --git a/src/oce_mesh.F90 b/src/oce_mesh.F90 index 994ca69c8..e24343e6d 100755 --- a/src/oce_mesh.F90 +++ b/src/oce_mesh.F90 @@ -412,7 +412,7 @@ SUBROUTINE read_mesh(partit, mesh) write(*,*) '____________________________________________________________________' print *, achar(27)//'[0m' write(*,*) - call par_ex(0) + call par_ex(partit, 0) !___________________________________________________________________________ ! check if rotation needs to be applied to an unrotated mesh elseif ((mype==0) .and. (.not. force_rotation) .and. (flag_checkmustrot==1) .and. (.not. toy_ocean)) then @@ -433,7 +433,7 @@ SUBROUTINE read_mesh(partit, mesh) write(*,*) '____________________________________________________________________' print *, achar(27)//'[0m' write(*,*) - call par_ex(0) + call par_ex(partit, 0) end if @@ -525,7 +525,7 @@ SUBROUTINE read_mesh(partit, mesh) call MPI_BCast(mesh%nl, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) if (mesh%nl < 3) then write(*,*) '!!!Number of levels is less than 3, model will stop!!!' - call par_ex + call par_ex(partit) stop end if allocate(mesh%zbar(mesh%nl)) ! allocate the array for storing the standard depths @@ -596,7 +596,7 @@ SUBROUTINE read_mesh(partit, mesh) write(*,*) '____________________________________________________________________' print *, achar(27)//'[0m' write(*,*) - call par_ex(0) + call par_ex(partit, 0) end if ! ============================== @@ -997,7 +997,7 @@ subroutine find_levels_cavity(partit, mesh) write(*,*) '____________________________________________________________________' print *, achar(27)//'[0m' write(*,*) - call par_ex + call par_ex(partit) end if end if @@ -1085,7 +1085,7 @@ subroutine find_levels_cavity(partit, mesh) write(*,*) '____________________________________________________________________' print *, achar(27)//'[0m' write(*,*) - call par_ex + call par_ex(partit) end if end if @@ -1244,7 +1244,7 @@ subroutine find_levels_cavity(partit, mesh) write(*,*) '____________________________________________________________________' print *, achar(27)//'[0m' write(*,*) - call par_ex + call par_ex(partit) end if end if @@ -1818,7 +1818,7 @@ SUBROUTINE find_neighbors(partit, mesh) END DO if (elem1<2) then write(*,*) 'Insufficient number of neighbors ', myList_elem2D(elem) - call par_ex(1) + call par_ex(partit, 1) STOP end if END DO @@ -2562,7 +2562,7 @@ SUBROUTINE check_mesh_consistency(partit, mesh) write(*,*) '***end level area_test***' end if -!call par_ex +!call par_ex(partit) !stop END SUBROUTINE check_mesh_consistency ! diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 3a54aeef8..171fec405 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -50,9 +50,6 @@ subroutine before_oce_step(tracers, partit, mesh) end subroutine end interface end module - -! -! !_______________________________________________________________________________ subroutine ocean_setup(tracers, partit, mesh) USE MOD_MESH diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index 73a11cafb..7c158c281 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -531,6 +531,6 @@ subroutine check_blowup(istep, tracers, partit, mesh) end if call blowup(istep, tracers, partit, mesh) if (mype==0) write(*,*) ' --> finished writing blow up file' - call par_ex + call par_ex(partit) endif end subroutine From 81646243f0754c7e2ab850a72d13df65d049e305 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Fri, 8 Oct 2021 11:50:33 +0200 Subject: [PATCH 394/909] USE MOD_PARSUP was used in par_ex which is wrong --- src/gen_modules_partitioning.F90 | 1 - 1 file changed, 1 deletion(-) diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index 495dc23b0..3740ad0fa 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -77,7 +77,6 @@ end subroutine par_init !================================================================= subroutine par_ex(partit, abort) ! finalizes MPI USE MOD_PARTIT -USE MOD_PARSUP #ifndef __oifs !For standalone and coupled ECHAM runs #if defined (__oasis) From bdbc0bbfc43d968ace19b06f2be2a4b39204dd46 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Fri, 8 Oct 2021 12:48:33 +0200 Subject: [PATCH 395/909] ocean_areawithcav was forgotten in associate_mesh_ass.h --- src/associate_mesh_ass.h | 1 + 1 file changed, 1 insertion(+) diff --git a/src/associate_mesh_ass.h b/src/associate_mesh_ass.h index ebeb51a51..018f3e347 100644 --- a/src/associate_mesh_ass.h +++ b/src/associate_mesh_ass.h @@ -5,6 +5,7 @@ edge2D_in => mesh%edge2D_in ocean_area => mesh%ocean_area nl => mesh%nl nn_size => mesh%nn_size +ocean_areawithcav => mesh%ocean_areawithcav coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%coord_nod2D geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D elem2D_nodes(1:3, 1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem2D_nodes From f9bf1e1f272119c87dfe54ab2adf0682e4d76576 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 11 Oct 2021 12:17:04 +0200 Subject: [PATCH 396/909] the partitioner shall work now with mod_partit. I see that there a lot of whings whichin partitioner which are not used. it shall be cleaned up next. --- mesh_part/CMakeLists.txt | 2 +- src/associate_mesh_ini.h | 71 ++++++++++ src/associate_part_ass.h | 2 +- src/fort_part.c | 6 +- src/fvom_init.F90 | 291 ++++++++++++++++++--------------------- src/gen_comm.F90 | 32 ++--- src/io_meandata.F90 | 2 - src/oce_local.F90 | 32 ++--- 8 files changed, 241 insertions(+), 197 deletions(-) create mode 100644 src/associate_mesh_ini.h diff --git a/mesh_part/CMakeLists.txt b/mesh_part/CMakeLists.txt index afe0c5e58..54cdc4b59 100644 --- a/mesh_part/CMakeLists.txt +++ b/mesh_part/CMakeLists.txt @@ -4,7 +4,7 @@ project(fesom_ini C Fortran) # get our source files set(src_home ${CMAKE_CURRENT_LIST_DIR}/../src) -set(sources_Fortran ${src_home}/MOD_MESH.F90 ${src_home}/oce_modules.F90 ${src_home}/gen_modules_config.F90 ${src_home}/gen_modules_partitioning.F90 ${src_home}/gen_modules_rotate_grid.F90 ${src_home}/fvom_init.F90 ${src_home}/oce_local.F90 ${src_home}/gen_comm.F90 ${src_home}/MOD_READ_BINARY_ARRAYS.F90 ${src_home}/MOD_WRITE_BINARY_ARRAYS.F90) +set(sources_Fortran ${src_home}/MOD_MESH.F90 ${src_home}/oce_modules.F90 ${src_home}/gen_modules_config.F90 ${src_home}/gen_modules_partitioning.F90 ${src_home}/gen_modules_rotate_grid.F90 ${src_home}/fvom_init.F90 ${src_home}/oce_local.F90 ${src_home}/gen_comm.F90 ${src_home}/MOD_READ_BINARY_ARRAYS.F90 ${src_home}/MOD_WRITE_BINARY_ARRAYS.F90 ${src_home}/MOD_PARTIT.F90) set(sources_C ${src_home}/fort_part.c) diff --git a/src/associate_mesh_ini.h b/src/associate_mesh_ini.h new file mode 100644 index 000000000..2a89de07a --- /dev/null +++ b/src/associate_mesh_ini.h @@ -0,0 +1,71 @@ +integer , pointer :: nod2D +integer , pointer :: elem2D +integer , pointer :: edge2D +integer , pointer :: edge2D_in +real(kind=WP) , pointer :: ocean_area +integer , pointer :: nl +real(kind=WP), dimension(:,:), pointer :: coord_nod2D, geo_coord_nod2D +integer, dimension(:,:) , pointer :: elem2D_nodes +integer, dimension(:,:) , pointer :: edges +integer, dimension(:,:) , pointer :: edge_tri +integer, dimension(:,:) , pointer :: elem_edges +real(kind=WP), dimension(:) , pointer :: elem_area +real(kind=WP), dimension(:,:), pointer :: edge_dxdy, edge_cross_dxdy +real(kind=WP), dimension(:) , pointer :: elem_cos, metric_factor +integer, dimension(:,:), pointer :: elem_neighbors +integer, dimension(:,:), pointer :: nod_in_elem2D +real(kind=WP), dimension(:,:), pointer :: x_corners, y_corners +integer, dimension(:) , pointer :: nod_in_elem2D_num +real(kind=WP), dimension(:) , pointer :: depth +real(kind=WP), dimension(:,:), pointer :: gradient_vec +real(kind=WP), dimension(:,:), pointer :: gradient_sca +integer, dimension(:) , pointer :: bc_index_nod2D +real(kind=WP), dimension(:) , pointer :: zbar, Z, elem_depth +integer, dimension(:) , pointer :: nlevels, nlevels_nod2D +real(kind=WP), dimension(:,:), pointer :: area, area_inv +real(kind=WP), dimension(:) , pointer :: mesh_resolution +integer, dimension(:) , pointer :: cavity_flag, ulevels_nod2D, ulevels +real(kind=WP), dimension(:) , pointer :: cavity_depth +type(sparse_matrix) , pointer :: ssh_stiff + +nod2D => mesh%nod2D +elem2D => mesh%elem2D +edge2D => mesh%edge2D +edge2D_in => mesh%edge2D_in +ocean_area => mesh%ocean_area +nl => mesh%nl + +coord_nod2D => mesh%coord_nod2D +geo_coord_nod2D => mesh%geo_coord_nod2D +elem2D_nodes => mesh%elem2D_nodes +edges => mesh%edges +edge_tri => mesh%edge_tri +elem_edges => mesh%elem_edges +elem_area => mesh%elem_area +edge_dxdy => mesh%edge_dxdy +edge_cross_dxdy => mesh%edge_cross_dxdy +elem_cos => mesh%elem_cos +metric_factor => mesh%metric_factor +elem_neighbors => mesh%elem_neighbors +nod_in_elem2D => mesh%nod_in_elem2D +x_corners => mesh%x_corners +y_corners => mesh%y_corners +nod_in_elem2D_num => mesh%nod_in_elem2D_num +depth => mesh%depth +gradient_vec => mesh%gradient_vec +gradient_sca => mesh%gradient_sca +bc_index_nod2D => mesh%bc_index_nod2D +zbar => mesh%zbar +Z => mesh%Z +elem_depth => mesh%elem_depth +nlevels => mesh%nlevels +nlevels_nod2D => mesh%nlevels_nod2D +area => mesh%area +area_inv => mesh%area_inv +mesh_resolution => mesh%mesh_resolution +ssh_stiff => mesh%ssh_stiff +!!$cavity_flag_n => mesh%cavity_flag_n +!!$cavity_flag_e => mesh%cavity_flag_e +ulevels_nod2D => mesh%ulevels_nod2D +ulevels => mesh%ulevels +cavity_depth => mesh%cavity_depth diff --git a/src/associate_part_ass.h b/src/associate_part_ass.h index 33d9f27d0..7ded6750f 100644 --- a/src/associate_part_ass.h +++ b/src/associate_part_ass.h @@ -60,4 +60,4 @@ r_mpitype_nod2D_i(1:com_nod2D%rPEnum) => partit%r_mpitype_nod2D_i s_mpitype_nod3D(1:com_nod2D%sPEnum, lb:ub, 1:3) => partit%s_mpitype_nod3D r_mpitype_nod3D(1:com_nod2D%rPEnum, lb:ub, 1:3) => partit%r_mpitype_nod3D -part(1:npes+1) => partit%part +part => partit%part diff --git a/src/fort_part.c b/src/fort_part.c index 837596771..9d903a73d 100644 --- a/src/fort_part.c +++ b/src/fort_part.c @@ -44,7 +44,7 @@ #if METIS_VERSION == 5 /* ---------------- METIS 5 part ------------------------ */ #include "metis.h" -void partit(idx_t *n, idx_t *ptr, idx_t *adj, idx_t *wgt, idx_t *np, idx_t *part) +void do_partit(idx_t *n, idx_t *ptr, idx_t *adj, idx_t *wgt, idx_t *np, idx_t *part) { int i, j, wgt_type; idx_t opt[METIS_NOPTIONS]; @@ -278,7 +278,7 @@ void partit(idx_t *n, idx_t *ptr, idx_t *adj, idx_t *wgt, idx_t *np, idx_t *part wgt_loc[vertex_loc]=wgt[j]; } - partit(&n_loc, ptr_loc, adj_loc, wgt_loc, np, part_loc); + do_partit(&n_loc, ptr_loc, adj_loc, wgt_loc, np, part_loc); /* Convert the partitioned graph back to the current level indexing */ for (j=0; j<*n; j++) @@ -302,7 +302,7 @@ void partit(idx_t *n, idx_t *ptr, idx_t *adj, idx_t *wgt, idx_t *np, idx_t *part #elif METIS_VERSION == 4 /* ---------------- METIS 4 part ------------------------ */ -void partit(int *n, int *ptr, int *adj, int *wgt, int *np, int *part) +void do_partit(int *n, int *ptr, int *adj, int *wgt, int *np, int *part) { int opt[5]; int numfl=1; /* 0: C-numbering ; 1: F-numbering*/ diff --git a/src/fvom_init.F90 b/src/fvom_init.F90 index 22a0ec4be..ded178dbf 100755 --- a/src/fvom_init.F90 +++ b/src/fvom_init.F90 @@ -14,7 +14,7 @@ program MAIN use o_PARAM use MOD_MESH - use g_PARSUP + use MOD_PARTIT use g_CONFIG use g_rotate_grid @@ -58,15 +58,19 @@ subroutine stiff_mat_ini(mesh) end subroutine stiff_mat_ini end interface interface - subroutine set_par_support_ini(mesh) + subroutine set_par_support_ini(partit, mesh) use mod_mesh - type(t_mesh), intent(inout) , target :: mesh + use mod_partit + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine set_par_support_ini end interface interface - subroutine communication_ini(mesh) + subroutine communication_ini(partit, mesh) use mod_mesh - type(t_mesh), intent(inout) , target :: mesh + use mod_partit + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine communication_ini end interface @@ -86,7 +90,8 @@ end subroutine find_levels_cavity character(len=MAX_PATH) :: nmlfile !> name of configuration namelist file integer :: start_t, interm_t, finish_t, rate_t - type(t_mesh), target, save :: mesh + type(t_mesh), target, save :: mesh + type(t_partit), target, save :: partit call system_clock(start_t, rate_t) interm_t = start_t @@ -126,12 +131,12 @@ end subroutine find_levels_cavity interm_t = finish_t call stiff_mat_ini(mesh) - call set_par_support_ini(mesh) + call set_par_support_ini(partit, mesh) call system_clock(finish_t) print '("**** Partitioning time = ",f12.3," seconds. ****")', & real(finish_t-interm_t)/real(rate_t) interm_t = finish_t - call communication_ini(mesh) + call communication_ini(partit, mesh) call system_clock(finish_t) print '("**** Storing partitioned mesh time = ",f12.3," seconds. ****")', & real(finish_t-interm_t)/real(rate_t) @@ -144,7 +149,6 @@ end program MAIN subroutine read_mesh_ini(mesh) USE MOD_MESH USE o_PARAM -USE g_PARSUP use g_CONFIG use g_rotate_grid ! @@ -162,11 +166,9 @@ subroutine read_mesh_ini(mesh) ! =================== ! Surface mesh ! =================== - if (mype==0) then - print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' - print *, achar(27)//'[7;1m' //' -->: read elem2d.out & nod2d.out '//achar(27)//'[0m' - end if - + print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;1m' //' -->: read elem2d.out & nod2d.out '//achar(27)//'[0m' + open (20,file=trim(meshpath)//'nod2d.out', status='old') open (21,file=trim(meshpath)//'elem2d.out', status='old') READ(20,*) mesh%nod2D @@ -202,15 +204,8 @@ subroutine read_mesh_ini(mesh) mesh%elem2D_nodes(1:3,:) = reshape(elem_data, shape(mesh%elem2D_nodes(1:3,:))) mesh%elem2D_nodes(4,:) = mesh%elem2D_nodes(1,:) end if - + deallocate(elem_data) -!!$ do n=1,elem2D -!!$ read(21,*) n1,n2,n3 -!!$ elem2D_nodes(1,n)=n1 -!!$ elem2D_nodes(2,n)=n2 -!!$ elem2D_nodes(3,n)=n3 -!!$ end do - ! CLOSE(21) write(*,*) '=========================' @@ -223,21 +218,18 @@ END SUBROUTINE read_mesh_ini subroutine read_mesh_cavity(mesh) use mod_mesh use o_PARAM - use g_PARSUP use g_CONFIG implicit none type(t_mesh), intent(inout), target :: mesh integer :: node - character(len=MAX_PATH) :: fname + character(len=MAX_PATH) :: fname logical :: file_exist=.False. #include "associate_mesh_ini.h" !___________________________________________________________________________ - if (mype==0) then - print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' - print *, achar(27)//'[7;1m' //' -->: read cavity depth '//achar(27)//'[0m' - end if + print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;1m' //' -->: read cavity depth '//achar(27)//'[0m' !___________________________________________________________________________ ! read depth of cavity-ocean boundary @@ -249,12 +241,10 @@ subroutine read_mesh_cavity(mesh) allocate(mesh%cavity_depth(mesh%nod2D)) cavity_depth => mesh%cavity_depth else - if (mype==0) then - write(*,*) '____________________________________________________________________' - write(*,*) ' ERROR: could not find cavity file: cavity_depth.out' - write(*,*) ' --> stop partitioning here !' - write(*,*) '____________________________________________________________________' - end if + write(*,*) '____________________________________________________________________' + write(*,*) ' ERROR: could not find cavity file: cavity_depth.out' + write(*,*) ' --> stop partitioning here !' + write(*,*) '____________________________________________________________________' stop end if @@ -314,7 +304,6 @@ END SUBROUTINE test_tri_ini SUBROUTINE find_edges_ini(mesh) USE MOD_MESH USE o_PARAM -USE g_PARSUP USE g_CONFIG use g_rotate_grid IMPLICIT NONE @@ -341,10 +330,8 @@ end subroutine elem_center ! (a) find edges. To make the procedure fast ! one needs neighbourhood arrays ! ==================== -if (mype==0) then - print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' - print *, achar(27)//'[7;1m' //' -->: compute edge connectivity '//achar(27)//'[0m' -end if +print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' +print *, achar(27)//'[7;1m' //' -->: compute edge connectivity '//achar(27)//'[0m' allocate(ne_num(nod2d), ne_pos(MAX_ADJACENT, nod2D), nn_num(nod2D)) ne_num=0 @@ -654,7 +641,6 @@ END SUBROUTINE find_edges_ini subroutine find_levels(mesh) use g_config use mod_mesh - use g_parsup implicit none INTEGER :: nodes(3), elems(3), eledges(3) integer :: elem, elem1, j, n, nneighb, q, node, i, nz @@ -666,10 +652,8 @@ subroutine find_levels(mesh) #include "associate_mesh_ini.h" - if (mype==0) then - print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' - print *, achar(27)//'[7;1m' //' -->: read bottom depth '//achar(27)//'[0m' - end if + print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;1m' //' -->: read bottom depth '//achar(27)//'[0m' ALLOCATE(mesh%depth(nod2D)) depth => mesh%depth !required after the allocation, otherwise the pointer remains undefined @@ -697,11 +681,9 @@ subroutine find_levels(mesh) if(depth(2)>0) depth=-depth ! depth is negative !___________________________________________________________________________ - if (mype==0) then - print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' - print *, achar(27)//'[7;1m' //' -->: compute elem, vertice bottom depth index '//achar(27)//'[0m' - end if - + print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;1m' //' -->: compute elem, vertice bottom depth index '//achar(27)//'[0m' + allocate(mesh%nlevels(elem2D)) nlevels => mesh%nlevels !required after the allocation, otherwise the pointer remains undefined allocate(mesh%nlevels_nod2D(nod2D)) @@ -741,16 +723,13 @@ subroutine find_levels(mesh) !___________________________________________________________________________ ! write out vertical level indices before iterative geometric adaption to ! exclude isolated cells - if (mype==0) then - !_______________________________________________________________________ - file_name=trim(meshpath)//'elvls_raw.out' - open(fileID, file=file_name) - do n=1,elem2D - write(fileID,*) nlevels(n) - end do - close(fileID) - endif - + !_______________________________________________________________________ + file_name=trim(meshpath)//'elvls_raw.out' + open(fileID, file=file_name) + do n=1,elem2D + write(fileID,*) nlevels(n) + end do + close(fileID) !___________________________________________________________________________ ! check for isolated cells (cells with at least two boundary faces or three ! boundary vertices) and eliminate them --> FESOM2.0 doesn't like these kind @@ -841,30 +820,27 @@ subroutine find_levels(mesh) !___________________________________________________________________________ ! write vertical level indices into file - if (mype==0) then - !_______________________________________________________________________ - file_name=trim(meshpath)//'elvls.out' - open(fileID, file=file_name) - do n=1,elem2D - write(fileID,*) nlevels(n) - end do - close(fileID) - - !_______________________________________________________________________ - file_name=trim(meshpath)//'nlvls.out' - open(fileID, file=file_name) - do n=1,nod2D - write(fileID,*) nlevels_nod2D(n) - end do - close(fileID) + !_______________________________________________________________________ + file_name=trim(meshpath)//'elvls.out' + open(fileID, file=file_name) + do n=1,elem2D + write(fileID,*) nlevels(n) + end do + close(fileID) + !_______________________________________________________________________ + file_name=trim(meshpath)//'nlvls.out' + open(fileID, file=file_name) + do n=1,nod2D + write(fileID,*) nlevels_nod2D(n) + end do + close(fileID) - !_______________________________________________________________________ - write(*,*) '=========================' - write(*,*) 'Mesh is read : ', 'nod2D=', nod2D,' elem2D=', elem2D, ' nl=', nl - write(*,*) 'Min/max depth on mype: ', -zbar(minval(nlevels)),-zbar(maxval(nlevels)) - write(*,*) '3D tracer nodes on mype ', sum(nlevels_nod2d)-(elem2D) - write(*,*) '=========================' - endif + !_______________________________________________________________________ + write(*,*) '=========================' + write(*,*) 'Mesh is read : ', 'nod2D=', nod2D,' elem2D=', elem2D, ' nl=', nl + write(*,*) 'Min/max depth on mype: ', -zbar(minval(nlevels)),-zbar(maxval(nlevels)) + write(*,*) '3D tracer nodes on mype ', sum(nlevels_nod2d)-(elem2D) + write(*,*) '=========================' end subroutine find_levels ! @@ -875,7 +851,6 @@ end subroutine find_levels subroutine find_levels_cavity(mesh) use mod_mesh use g_config - use g_parsup implicit none integer :: nodes(3), elems(3) integer :: elem, node, nz, j, idx @@ -888,11 +863,8 @@ subroutine find_levels_cavity(mesh) type(t_mesh), intent(inout), target :: mesh #include "associate_mesh_ini.h" !___________________________________________________________________________ - if (mype==0) then - print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' - print *, achar(27)//'[7;1m' //' -->: compute elem,vertice cavity depth index '//achar(27)//'[0m' - end if - + print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;1m' //' -->: compute elem,vertice cavity depth index '//achar(27)//'[0m' !___________________________________________________________________________ allocate(mesh%ulevels(elem2D)) ulevels => mesh%ulevels @@ -931,16 +903,13 @@ subroutine find_levels_cavity(mesh) ! write out cavity mesh files for vertice and elemental position of ! vertical cavity-ocean boundary before the iterative geometric adaption to ! eliminate isolated cells - if (mype==0) then - ! write out elemental cavity-ocean boundary level - file_name=trim(meshpath)//'cavity_elvls_raw.out' - open(20, file=file_name) - do elem=1,elem2D - write(20,*) ulevels(elem) - enddo - close(20) - endif - + ! write out elemental cavity-ocean boundary level + file_name=trim(meshpath)//'cavity_elvls_raw.out' + open(20, file=file_name) + do elem=1,elem2D + write(20,*) ulevels(elem) + enddo + close(20) !___________________________________________________________________________ ! Eliminate cells that have two cavity boundary faces --> should not be ! possible in FESOM2.0 @@ -1056,7 +1025,7 @@ subroutine find_levels_cavity(mesh) do elem=1,elem2D if (ulevels(elem)>=nlevels(elem)) then - if (mype==0) write(*,*) ' -[check]->: elem cavity depth deeper or equal bottom depth, elem=',elem + write(*,*) ' -[check]->: elem cavity depth deeper or equal bottom depth, elem=',elem exit_flag2 = 0 end if @@ -1073,20 +1042,20 @@ subroutine find_levels_cavity(mesh) do node=1,nod2D !___________________________________________________________________ if (ulevels_nod2D(node)>=nlevels_nod2D(node)) then - if (mype==0) write(*,*) ' -[check]->: vertice cavity depth deeper or equal bottom depth, node=', node + write(*,*) ' -[check]->: vertice cavity depth deeper or equal bottom depth, node=', node exit_flag2 = 0 end if !___________________________________________________________________ if (nlevels_nod2D(node)-ulevels_nod2D(node)<3) then - if (mype==0) write(*,*) ' -[check]->: less than three valid vertice ocean layers, node=', node + write(*,*) ' -[check]->: less than three valid vertice ocean layers, node=', node exit_flag2 = 0 end if end do ! --> do node=1,nod2D do elem=1,elem2D if (ulevels(elem)< maxval(ulevels_nod2D(elem2D_nodes(:,elem))) ) then - if (mype==0) write(*,*) ' -[check]->: found elem cavity shallower than its valid maximum cavity vertice depths, elem=', elem2d + write(*,*) ' -[check]->: found elem cavity shallower than its valid maximum cavity vertice depths, elem=', elem2d exit_flag2 = 0 end if end do ! --> do elem=1,elem2D @@ -1172,36 +1141,32 @@ subroutine find_levels_cavity(mesh) print *, achar(27)//'[31m' //'____________________________________________________________'//achar(27)//'[0m' print *, achar(27)//'[7;31m'//' -[ERROR]->: Cavity geometry constrains did not converge !!! *\(>︿<)/*'//achar(27)//'[0m' write(*,*) - call par_ex(partit, 0) + stop else write(*,*) print *, achar(27)//'[32m' //'____________________________________________________________'//achar(27)//'[0m' print *, ' -['//achar(27)//'[7;32m'//' OK '//achar(27)//'[0m'//']->: Cavity geometry constrains did converge !!! *\(^o^)/*' - - write(*,*) end if !___________________________________________________________________________ ! write out cavity mesh files for vertice and elemental position of ! vertical cavity-ocean boundary - if (mype==0) then - ! write out elemental cavity-ocean boundary level - file_name=trim(meshpath)//'cavity_elvls.out' - open(20, file=file_name) - do elem=1,elem2D - write(20,*) ulevels(elem) - enddo - close(20) - - ! write out vertice cavity-ocean boundary level + yes/no cavity flag - file_name=trim(meshpath)//'cavity_nlvls.out' - open(20, file=file_name) - do node=1,nod2D - write(20,*) ulevels_nod2D(node) - enddo - close(20) - endif + ! write out elemental cavity-ocean boundary level + file_name=trim(meshpath)//'cavity_elvls.out' + open(20, file=file_name) + do elem=1,elem2D + write(20,*) ulevels(elem) + enddo + close(20) + ! write out vertice cavity-ocean boundary level + yes/no cavity flag + file_name=trim(meshpath)//'cavity_nlvls.out' + open(20, file=file_name) + do node=1,nod2D + write(20,*) ulevels_nod2D(node) + enddo + close(20) + end subroutine find_levels_cavity @@ -1256,7 +1221,6 @@ end subroutine elem_center SUBROUTINE find_elem_neighbors_ini(mesh) ! For each element three its element neighbors are found USE MOD_MESH -USE g_PARSUP implicit none integer :: elem, eledges(3), elem1, j, n, elnodes(3) type(t_mesh), intent(inout), target :: mesh @@ -1289,7 +1253,6 @@ SUBROUTINE find_elem_neighbors_ini(mesh) if (elem1<2) then write(*,*) 'find_elem_neighbors_ini:Insufficient number of neighbors ',elem write(*,*) 'find_elem_neighbors_ini:Elem neighbors ',elem_neighbors(:,elem) - if (mype==0) then write(*,*) '____________________________________________________________________' write(*,*) ' ERROR: The mesh you want to partitioning contains triangles that' write(*,*) ' have just one neighbor, this was OK for FESOM1.4 but not' @@ -1311,8 +1274,7 @@ SUBROUTINE find_elem_neighbors_ini(mesh) write(*,*) ' eliminate these triangles and the corresponding ' write(*,*) ' unconnected vertice and try to re-partitioning again ' write(*,*) '____________________________________________________________________' - end if - STOP + STOP end if END DO @@ -1414,10 +1376,10 @@ end subroutine stiff_mat_ini !=================================================================== ! Setup of communication arrays -subroutine communication_ini(mesh) +subroutine communication_ini(partit, mesh) use MOD_MESH USE g_CONFIG - USE g_PARSUP + USE MOD_PARTIT use omp_lib implicit none @@ -1425,13 +1387,15 @@ subroutine communication_ini(mesh) character*10 :: npes_string character(MAX_PATH) :: dist_mesh_dir LOGICAL :: L_EXISTS - type(t_mesh), intent(inout), target :: mesh + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" #include "associate_mesh_ini.h" +#include "associate_part_ass.h" !only my - if (mype==0) then - print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' - print *, achar(27)//'[7;1m' //' -->: compute communication arrays '//achar(27)//'[0m' - end if + print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;1m' //' -->: compute communication arrays '//achar(27)//'[0m' + ! Create the distributed mesh subdirectory write(npes_string,"(I10)") npes dist_mesh_dir=trim(meshpath)//'dist_'//trim(ADJUSTL(npes_string))//'/' @@ -1451,45 +1415,49 @@ subroutine communication_ini(mesh) !$OMP DO do n = 0, npes-1 mype = n ! mype is threadprivate and must not be iterator - call communication_nodn(mesh) - call communication_elemn(mesh) - call save_dist_mesh(mesh) ! Write out communication file com_infoxxxxx.out + call communication_nodn(partit, mesh) + call communication_elemn(partit, mesh) + call save_dist_mesh(partit, mesh) ! Write out communication file com_infoxxxxx.out end do !$OMP END DO !$OMP END PARALLEL deallocate(mesh%elem_neighbors) deallocate(mesh%elem_edges) - deallocate(part) + deallocate(partit%part) write(*,*) 'Communication arrays have been set up' end subroutine communication_ini !================================================================= -subroutine set_par_support_ini(mesh) - use g_PARSUP +subroutine set_par_support_ini(partit, mesh) use iso_c_binding, only: idx_t=>C_INT32_T use MOD_MESH + use MOD_PARTIT use g_config implicit none - interface - subroutine check_partitioning(mesh) + subroutine check_partitioning(partit, mesh) use MOD_MESH - type(t_mesh), intent(inout) , target :: mesh + use MOD_PARTIT + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine check_partitioning end interface integer :: n, j, k, nini, nend, ierr integer(idx_t) :: np(10) - type(t_mesh), intent(inout), target :: mesh - + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit interface - subroutine partit(n,ptr,adj,wgt,np,part) bind(C) + subroutine do_partit(n,ptr,adj,wgt,np,part) bind(C) use iso_c_binding, only: idx_t=>C_INT32_T integer(idx_t), intent(in) :: n, ptr(*), adj(*), wgt(*), np(*) integer(idx_t), intent(out) :: part(*) - end subroutine partit + end subroutine do_partit end interface + +#include "associate_part_def.h" #include "associate_mesh_ini.h" +#include "associate_part_ass.h" if (mype==0) then print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' @@ -1499,7 +1467,7 @@ end subroutine partit ! Construct partitioning vector if (n_levels<1 .OR. n_levels>10) then print *,'Number of hierarchic partition levels is out of range [1-10]! Aborting...' - call MPI_ABORT( MPI_COMM_FESOM, 1 ) + stop end if np(:) = n_part(:) ! Number of partitions on each hierarchy level @@ -1511,7 +1479,8 @@ end subroutine partit np(n_levels+1) = 0 end if - allocate(part(nod2D)) + allocate(partit%part(nod2D)) + part=>partit%part part=0 npes = PRODUCT(np(1:n_levels)) @@ -1521,10 +1490,12 @@ end subroutine partit end if write(*,*) 'Calling partit for npes=', np - call partit(ssh_stiff%dim, ssh_stiff%rowptr, ssh_stiff%colind, & + call do_partit(ssh_stiff%dim, ssh_stiff%rowptr, ssh_stiff%colind, & nlevels_nod2D, np, part) - call check_partitioning(mesh) +write(*,*) 'DONE' +write(*,*) size(partit%part) + call check_partitioning(partit, mesh) write(*,*) 'Partitioning is done.' @@ -1536,7 +1507,7 @@ end subroutine partit deallocate(mesh%nlevels_nod2D) end subroutine set_par_support_ini !======================================================================= -subroutine check_partitioning(mesh) +subroutine check_partitioning(partit, mesh) ! In general, METIS 5 has several advantages compared to METIS 4, e.g., ! * neighbouring tasks get neighbouring partitions (important for multicore computers!) @@ -1550,26 +1521,30 @@ subroutine check_partitioning(mesh) ! trying not to spoil the load balance. use MOD_MESH - use g_PARSUP + use MOD_PARTIT + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(inout), target :: mesh integer :: i, j, k, n, n_iso, n_iter, is, ie, kmax, np, cnt - integer :: nod_per_partition(2,0:npes-1) + integer :: nod_per_partition(2,0:partit%npes-1) integer :: max_nod_per_part(2), min_nod_per_part(2) integer :: average_nod_per_part(2), node_neighb_part(100) logical :: already_counted, found_part integer :: max_adjacent_nodes integer, allocatable :: ne_part(:), ne_part_num(:), ne_part_load(:,:) - type(t_mesh), intent(inout), target :: mesh +#include "associate_part_def.h" #include "associate_mesh_ini.h" - +#include "associate_part_ass.h" !just for partit%part + if (mype==0) then print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' print *, achar(27)//'[7;1m' //' -->: check partitioning '//achar(27)//'[0m' - end if + end if + ! Check load balancing - do i=0,npes-1 + do i=0, npes-1 nod_per_partition(1,i) = count(part(:) == i) - nod_per_partition(2,i) = sum(nlevels_nod2D,part(:) == i) + nod_per_partition(2,i) = sum(nlevels_nod2D, part(:) == i) enddo min_nod_per_part(1) = minval( nod_per_partition(1,:)) diff --git a/src/gen_comm.F90 b/src/gen_comm.F90 index 82d9f0afa..26cc0c613 100755 --- a/src/gen_comm.F90 +++ b/src/gen_comm.F90 @@ -13,25 +13,29 @@ subroutine communication_nodn(partit, mesh) type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit integer :: n, np, prank, el, r_count, s_count, q, i, j, nod, k, l - integer :: num_send(0:npes-1), num_recv(0:npes-1), nd_count + integer :: num_send(0:partit%npes-1), num_recv(0:partit%npes-1), nd_count integer, allocatable :: recv_from_pe(:), send_to_pes(:,:) logical :: max_laendereck_too_small=.false. integer :: IERR #include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" +#include "associate_mesh_ini.h" +#include "associate_part_ass.h" !part only ! Assume we have 2D partitioning vector in part. Find communication rules ! Reduce allocation: find all neighboring PE - nd_count = count(part(1:nod2d) == mype) +write(*,*) nod2d +write(*,*) MAX_LAENDERECK +write(*,*) nd_count +write(*,*) allocated(partit%myList_nod2D) +write(*,*) partit%mype allocate(recv_from_pe(nod2d), send_to_pes(MAX_LAENDERECK,nd_count), & - myList_nod2D(nd_count), STAT=IERR) + partit%myList_nod2D(nd_count), STAT=IERR) if (IERR /= 0) then write (*,*) 'Could not allocate arrays in communication_nodn' stop endif + myList_nod2D=>partit%myList_nod2D nd_count = 0 do n=1,nod2D ! Checks if element el has nodes that belong to different partitions @@ -166,7 +170,6 @@ subroutine communication_nodn(partit, mesh) write (*,*) 'Could not allocate arrays in communication_nodn' stop endif - com_nod2D=>partit%com_nod2D do np = 1,com_nod2D%rPEnum prank = com_nod2D%rPE(np) @@ -229,12 +232,11 @@ subroutine communication_elemn(partit, mesh) logical :: max_laendereck_too_small=.false. integer :: n, k, ep, np, prank, el, nod integer :: p, q, j, elem, i, l, r_count, s_count, el_count - integer :: num_send(0:npes-1), num_recv(0:npes-1) + integer :: num_send(0:partit%npes-1), num_recv(0:partit%npes-1) integer :: IERR #include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" +#include "associate_mesh_ini.h" +#include "associate_part_ass.h" !part only ! Assume we have 2D partitioning vector in part. Find communication ! rules. An elem is external to element n if neither of its nodes ! belongs to PE, but it is among the neighbors. Element n belongs to PE if @@ -248,7 +250,9 @@ subroutine communication_elemn(partit, mesh) !=========================================== ! com_elem2D !=========================================== - + com_elem2D =>partit%com_elem2D + com_elem2D_full=>partit%com_elem2D_full + allocate(recv_from_pe(elem2D), STAT=IERR) if (IERR /= 0) then write (*,*) 'Could not allocate arrays in communication_elemn' @@ -371,7 +375,6 @@ subroutine communication_elemn(partit, mesh) r_count = 0 eDim_elem2D=com_elem2D%rptr(com_elem2D%rPEnum+1)-1 allocate(partit%com_elem2D%rlist(eDim_elem2D)) - com_elem2D=>partit%com_elem2D !not needed? do np = 1,com_elem2D%rPEnum prank = com_elem2D%rPE(np) do el = 1, elem2D @@ -384,7 +387,6 @@ subroutine communication_elemn(partit, mesh) s_count = 0 allocate(partit%com_elem2D%slist(com_elem2D%sptr(com_elem2D%sPEnum+1)-1)) - com_elem2D=>partit%com_elem2D! not needed? do np = 1,com_elem2D%sPEnum prank = com_elem2D%sPE(np) do l = 1, el_count @@ -498,7 +500,6 @@ subroutine communication_elemn(partit, mesh) r_count = 0 allocate(partit%com_elem2D_full%rlist(com_elem2D_full%rptr(com_elem2D_full%rPEnum+1)-1)) - com_elem2D_full=>partit%com_elem2D_full !not needed? do np = 1,com_elem2D_full%rPEnum prank = com_elem2D_full%rPE(np) do el = 1, elem2D @@ -511,7 +512,6 @@ subroutine communication_elemn(partit, mesh) s_count = 0 allocate(com_elem2D_full%slist(com_elem2D_full%sptr(com_elem2D_full%sPEnum+1)-1)) - com_elem2D_full=>partit%com_elem2D_full !not needed? do np = 1,com_elem2D_full%sPEnum prank = com_elem2D_full%sPE(np) do l = 1, el_count diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 885538539..1d34ae0ef 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -931,9 +931,7 @@ subroutine do_output_callback(entry_index) entry%p_partit%mype=entry%mype_workaround ! for the thread callback, copy back the value of our mype as a workaround for errors with the cray envinronment (at least with ftn 2.5.9 and cray-mpich 7.5.3) call write_mean(entry, entry_index) -write(*,*) 1111111, entry%p_partit%mype, entry%root_rank if(entry%p_partit%mype == entry%root_rank) call assert_nf( nf_sync(entry%ncid), __LINE__ ) ! flush the file to disk after each write -write(*,*) 2222222, entry%p_partit%mype, entry%root_rank end subroutine diff --git a/src/oce_local.F90 b/src/oce_local.F90 index 0b9d1ac21..eea0ec10b 100755 --- a/src/oce_local.F90 +++ b/src/oce_local.F90 @@ -24,8 +24,8 @@ SUBROUTINE com_global2local(partit, mesh) INTEGER, ALLOCATABLE, DIMENSION(:) :: temp #include "associate_part_def.h" -#include "associate_part_ass.h" #include "associate_mesh_ini.h" +#include "associate_part_ass.h" allocate(temp(max(nod2D, elem2D))) ! ========= @@ -143,12 +143,11 @@ SUBROUTINE save_dist_mesh(partit, mesh) integer n1, n2, flag, eledges(4) #include "associate_part_def.h" -#include "associate_mesh_def.h" +#include "associate_mesh_ini.h" +#include "associate_part_ass.h" -!!$ allocate(temp(nod2D)) ! serves for mapping -!!$ allocate(ncount(npes+1)) write(mype_string,'(i5.5)') mype - write(npes_string,"(I10)") npes + write(npes_string,"(I10)") npes dist_mesh_dir=trim(meshpath)//'dist_'//trim(ADJUSTL(npes_string))//'/' ! ============================== @@ -198,7 +197,8 @@ SUBROUTINE save_dist_mesh(partit, mesh) write(fileID,*) myList_elem2D(1:myDim_elem2D), com_elem2D%rlist(1:eDim_elem2D), temp(1:eXDim_elem2D) deallocate(temp) - allocate(myList_edge2D(4*myDim_elem2D)) + allocate(partit%myList_edge2D(4*myDim_elem2D)) + myList_edge2D=>partit%myList_edge2D counter = 0 do n=1, edge2D do q=1,2 @@ -230,14 +230,14 @@ SUBROUTINE save_dist_mesh(partit, mesh) write(fileID,*) myDim_edge2D write(fileID,*) eDim_edge2D write(fileID,*) myList_edge2D(1:myDim_edge2D +eDim_edge2D) - deallocate(myList_edge2D) + deallocate(partit%myList_edge2D) close(fileID) ! ========================= ! communication information ! ========================= - call com_global2local(mesh) ! Do not call this subroutine earlier, global numbering is needed! + call com_global2local(partit, mesh) ! Do not call this subroutine earlier, global numbering is needed! file_name=trim(dist_mesh_dir)//'com_info'//trim(mype_string)//'.out' fileID=103+mype !skip unit range 100--102 open(fileID, file=file_name) @@ -250,13 +250,13 @@ SUBROUTINE save_dist_mesh(partit, mesh) write(fileID,*) com_nod2D%sPE(1:com_nod2D%sPEnum) write(fileID,*) com_nod2D%sptr(1:com_nod2D%sPEnum+1) write(fileID,*) com_nod2D%slist - deallocate(myList_nod2D) + deallocate(partit%myList_nod2D) !!$ deallocate(com_nod2D%rPE) !!$ deallocate(com_nod2D%rptr) - deallocate(com_nod2D%rlist) + deallocate(partit%com_nod2D%rlist) !!$ deallocate(com_nod2D%sPE) !!$ deallocate(com_nod2D%sptr) - deallocate(com_nod2D%slist) + deallocate(partit%com_nod2D%slist) write(fileID,*) com_elem2D%rPEnum write(fileID,*) com_elem2D%rPE(1:com_elem2D%rPEnum) @@ -266,13 +266,13 @@ SUBROUTINE save_dist_mesh(partit, mesh) write(fileID,*) com_elem2D%sPE(1:com_elem2D%sPEnum) write(fileID,*) com_elem2D%sptr(1:com_elem2D%sPEnum+1) write(fileID,*) com_elem2D%slist - deallocate(myList_elem2D) + deallocate(partit%myList_elem2D) !!$ deallocate(com_elem2D%rPE) !!$ deallocate(com_elem2D%rptr) - deallocate(com_elem2D%rlist) + deallocate(partit%com_elem2D%rlist) !!$ deallocate(com_elem2D%sPE) !!$ deallocate(com_elem2D%sptr) - deallocate(com_elem2D%slist) + deallocate(partit%com_elem2D%slist) write(fileID,*) com_elem2D_full%rPEnum write(fileID,*) com_elem2D_full%rPE(1:com_elem2D_full%rPEnum) @@ -284,10 +284,10 @@ SUBROUTINE save_dist_mesh(partit, mesh) write(fileID,*) com_elem2D_full%slist !!$ deallocate(com_elem2D_full%rPE) !!$ deallocate(com_elem2D_full%rptr) - deallocate(com_elem2D_full%rlist) + deallocate(partit%com_elem2D_full%rlist) !!$ deallocate(com_elem2D_full%sPE) !!$ deallocate(com_elem2D_full%sptr) - deallocate(com_elem2D_full%slist) + deallocate(partit%com_elem2D_full%slist) close(fileID) ! ================================ ! mapping ( PE contiguous 2D numbering) From 492313b2875a6c6f2b0e9aaf960d93ad16a179ee Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 11 Oct 2021 17:12:12 +0200 Subject: [PATCH 397/909] icepack adopted to the modifications made in the new version of the code --- src/fvom_main.F90 | 8 +- src/gen_events.F90 | 2 +- src/gen_halo_exchange.F90 | 14 +-- src/gen_ic3d.F90 | 4 +- src/gen_interpolation.F90 | 4 +- src/gen_model_setup.F90 | 4 +- src/gen_modules_cvmix_idemix.F90 | 4 +- src/gen_modules_cvmix_kpp.F90 | 2 +- src/gen_modules_cvmix_tidal.F90 | 2 +- src/gen_modules_partitioning.F90 | 49 +++++----- src/gen_modules_read_NetCDF.F90 | 6 +- src/gen_surface_forcing.F90 | 10 +- src/ice_setup_step.F90 | 2 +- src/ice_thermo_cpl.F90 | 67 ++++---------- src/ice_thermo_oce.F90 | 15 +-- src/{ => icepack_drivers}/associate_mesh.h | 58 ++++-------- src/icepack_drivers/icedrv_advection.F90 | 51 ++++------- src/icepack_drivers/icedrv_io.F90 | 102 ++++++++++----------- src/icepack_drivers/icedrv_main.F90 | 9 +- src/icepack_drivers/icedrv_set.F90 | 23 +++-- src/icepack_drivers/icedrv_step.F90 | 3 +- src/icepack_drivers/icedrv_system.F90 | 19 +++- src/icepack_drivers/icedrv_transfer.F90 | 5 +- src/io_blowup.F90 | 4 +- src/io_meandata.F90 | 8 +- src/io_restart.F90 | 18 ++-- src/oce_adv_tra_driver.F90 | 4 +- src/oce_ale.F90 | 4 +- src/oce_ale_pressure_bv.F90 | 38 ++++---- src/oce_ale_tracer.F90 | 2 +- src/oce_ale_vel_rhs.F90 | 2 +- src/oce_dyn.F90 | 2 +- src/oce_mesh.F90 | 22 ++--- src/oce_setup_step.F90 | 6 +- src/write_step_info.F90 | 2 +- 35 files changed, 261 insertions(+), 314 deletions(-) rename src/{ => icepack_drivers}/associate_mesh.h (80%) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index 8ead2c9bc..a9cb7142f 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -150,9 +150,9 @@ program main ! Setup icepack !===================== if (mype==0) write(*,*) 'Icepack: reading namelists from namelist.icepack' - call set_icepack + call set_icepack(partit) call alloc_icepack - call init_icepack(tracers%data(1), partit, mesh) + call init_icepack(tracers%data(1), mesh) if (mype==0) write(*,*) 'Icepack: setup complete' #endif call clock_newyear ! check if it is a new year @@ -218,7 +218,7 @@ program main ! read (mype+300) tracers_copy ! close (mype+300) -!call par_ex(partit) +!call par_ex(partit%MPI_COMM_FESOM, partit%mype) !stop ! ! if (mype==10) write(,) mesh1%ssh_stiff%values-mesh%ssh_stiff%value @@ -381,6 +381,6 @@ program main write(*,*) end if ! call clock_finish - call par_ex(partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) end program main diff --git a/src/gen_events.F90 b/src/gen_events.F90 index 52d5decea..8a7f0e318 100644 --- a/src/gen_events.F90 +++ b/src/gen_events.F90 @@ -100,7 +100,7 @@ subroutine handle_err(errcode, partit) integer :: errcode write(*,*) 'Error: ', nf_strerror(errcode) - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) stop end subroutine handle_err ! diff --git a/src/gen_halo_exchange.F90 b/src/gen_halo_exchange.F90 index ee3bfb9f4..c1dbd3eac 100755 --- a/src/gen_halo_exchange.F90 +++ b/src/gen_halo_exchange.F90 @@ -354,7 +354,7 @@ subroutine exchange_nod3D_begin(nod_array3D, partit) print *,'Subroutine exchange_nod3D not implemented for',nl1,'layers.' print *,'Adding the MPI datatypes is easy, see oce_modules.F90.' endif - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) endif ! Check MPI point-to-point communication for consistency @@ -422,7 +422,7 @@ subroutine exchange_nod3D_2fields_begin(nod1_array3D,nod2_array3D, partit) print *,'Subroutine exchange_nod3D not implemented for',nl1,'layers.' print *,'Adding the MPI datatypes is easy, see oce_modules.F90.' endif - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) endif nl2 = ubound(nod2_array3D,1) @@ -431,7 +431,7 @@ subroutine exchange_nod3D_2fields_begin(nod1_array3D,nod2_array3D, partit) print *,'Subroutine exchange_nod3D not implemented for',nl2,'layers.' print *,'Adding the MPI datatypes is easy, see oce_modules.F90.' endif - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) endif #ifdef DEBUG @@ -503,7 +503,7 @@ subroutine exchange_nod3D_n_begin(nod_array3D, partit) print *,nl1,'layers and / or ',n_val,'values per element.' print *,'Adding the MPI datatypes is easy, see oce_modules.F90.' endif - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) endif endif sn=com_nod2D%sPEnum @@ -647,7 +647,7 @@ subroutine exchange_elem3D_begin(elem_array3D, partit) END DO else if (mype==0) print *,'Sorry, no MPI datatype prepared for',nl1,'values per element (exchange_elem3D)' - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) endif com_elem2D%nreq = rn+sn @@ -700,7 +700,7 @@ subroutine exchange_elem3D_begin(elem_array3D, partit) END DO else if (mype==0) print *,'Sorry, no MPI datatype prepared for',nl1,'values per element (exchange_elem3D)' - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) endif com_elem2D_full%nreq = rn+sn @@ -759,7 +759,7 @@ subroutine exchange_elem3D_n_begin(elem_array3D, partit) print *,nl1,'layers and / or ',n_val,'values per element.' print *,'Adding the MPI datatypes is easy, see oce_modules.F90.' endif - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) endif endif diff --git a/src/gen_ic3d.F90 b/src/gen_ic3d.F90 index 0e0c9f9b8..405f0fa4c 100644 --- a/src/gen_ic3d.F90 +++ b/src/gen_ic3d.F90 @@ -501,7 +501,7 @@ SUBROUTINE do_ic3d(tracers, partit, mesh) elseif (current_tracer==tracers%num_tracers) then if (partit%mype==0) write(*,*) "idlist contains tracer which is not listed in tracer_id!" if (partit%mype==0) write(*,*) "check your namelists!" - call par_ex(partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop end if END DO @@ -575,7 +575,7 @@ SUBROUTINE check_nferr(iost,fname, partit) integer, intent(in) :: iost if (iost .ne. NF_NOERR) then write(*,*) 'ERROR: I/O status= "',trim(nf_strerror(iost)),'";',iost,' file= ', trim(fname) - call par_ex (partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop endif END SUBROUTINE diff --git a/src/gen_interpolation.F90 b/src/gen_interpolation.F90 index b4db77324..e2adbd26e 100755 --- a/src/gen_interpolation.F90 +++ b/src/gen_interpolation.F90 @@ -49,7 +49,7 @@ subroutine interp_2d_field_v2(num_lon_reg, num_lat_reg, lon_reg, lat_reg, data_r if(lon_reg(1)<0.0 .or. lon_reg(num_lon_reg)>360.) then write(*,*) 'Error in 2D interpolation!' write(*,*) 'The regular grid is not in the proper longitude range.' - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) stop end if @@ -185,7 +185,7 @@ subroutine interp_2d_field(num_lon_reg, num_lat_reg, lon_reg, lat_reg, data_reg, if(lon_reg(1)<0.0_WP .or. lon_reg(num_lon_reg)>360._WP) then write(*,*) 'Error in 2D interpolation!' write(*,*) 'The regular grid is not in the proper longitude range.' - call par_ex(partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop end if diff --git a/src/gen_model_setup.F90 b/src/gen_model_setup.F90 index b7ded86ba..fd4d3ebc5 100755 --- a/src/gen_model_setup.F90 +++ b/src/gen_model_setup.F90 @@ -103,7 +103,7 @@ subroutine setup_model(partit) write(*,*) '____________________________________________________________________' print *, achar(27)//'[0m' write(*,*) - call par_ex(partit, 0) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 0) endif @@ -154,7 +154,7 @@ subroutine get_run_steps(nsteps, partit) else write(*,*) 'Run length unit ', run_length_unit, ' is not defined.' write(*,*) 'Please check and update the code.' - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) stop end if diff --git a/src/gen_modules_cvmix_idemix.F90 b/src/gen_modules_cvmix_idemix.F90 index a949cd2d0..a49b77585 100644 --- a/src/gen_modules_cvmix_idemix.F90 +++ b/src/gen_modules_cvmix_idemix.F90 @@ -237,7 +237,7 @@ subroutine init_cvmix_idemix(partit, mesh) write(*,*) ' idemix_botforc_file' write(*,*) '____________________________________________________________________' end if - call par_ex(partit, 0) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 0) end if !_______________________________________________________________________ @@ -260,7 +260,7 @@ subroutine init_cvmix_idemix(partit, mesh) write(*,*) ' idemix_botforc_file' write(*,*) '____________________________________________________________________' end if - call par_ex(partit, 0) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 0) end if !_______________________________________________________________________ diff --git a/src/gen_modules_cvmix_kpp.F90 b/src/gen_modules_cvmix_kpp.F90 index c0e0dc6c6..81c35cfdd 100644 --- a/src/gen_modules_cvmix_kpp.F90 +++ b/src/gen_modules_cvmix_kpp.F90 @@ -559,7 +559,7 @@ subroutine calc_cvmix_kpp(tracers, partit, mesh) else write(*,*) " --> Error: this kpp_internalmix scheme is not supported" write(*,*) " for the mixing below the OBL, either KPP or PP !" - call par_ex(partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) end if !___________________________________________________________________ diff --git a/src/gen_modules_cvmix_tidal.F90 b/src/gen_modules_cvmix_tidal.F90 index 8ee7937d8..162ea6ec4 100644 --- a/src/gen_modules_cvmix_tidal.F90 +++ b/src/gen_modules_cvmix_tidal.F90 @@ -153,7 +153,7 @@ subroutine init_cvmix_tidal(partit, mesh) write(*,*) ' --> check your namelist.cvmix, tidal_botforc_file & ' write(*,*) '____________________________________________________________________' end if - call par_ex(partit, 0) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 0) end if !_______________________________________________________________________ diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index 3740ad0fa..d99a1e667 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -1,10 +1,11 @@ module mod_parsup interface - subroutine par_ex(partit, abort) + subroutine par_ex(COMM, mype, abort) USE MOD_PARTIT implicit none - type(t_partit), intent(inout), target :: partit - integer,optional :: abort + integer, intent(in) :: COMM + integer, intent(in) :: mype + integer, optional, intent(in) :: abort end subroutine end interface end module mod_parsup @@ -75,7 +76,7 @@ subroutine par_init(partit) ! initializes MPI end if end subroutine par_init !================================================================= -subroutine par_ex(partit, abort) ! finalizes MPI +subroutine par_ex(COMM, mype, abort) ! finalizes MPI USE MOD_PARTIT #ifndef __oifs !For standalone and coupled ECHAM runs @@ -83,41 +84,43 @@ subroutine par_ex(partit, abort) ! finalizes MPI use mod_prism #endif implicit none - type(t_partit), intent(inout), target :: partit - integer,optional :: abort + integer, intent(in) :: COMM + integer, intent(in) :: mype + integer, optional, intent(in) :: abort + integer :: error #ifndef __oasis if (present(abort)) then - if (partit%mype==0) write(*,*) 'Run finished unexpectedly!' - call MPI_ABORT(partit%MPI_COMM_FESOM, 1 ) + if (mype==0) write(*,*) 'Run finished unexpectedly!' + call MPI_ABORT(COMM, 1 ) else - call MPI_Barrier(partit%MPI_COMM_FESOM,partit%MPIerr) - call MPI_Finalize(partit%MPIerr) + call MPI_Barrier(COMM, error) + call MPI_Finalize(error) endif #else if (.not. present(abort)) then - if (partit%mype==0) print *, 'FESOM calls MPI_Barrier before calling prism_terminate' - call MPI_Barrier(MPI_COMM_WORLD, partit%MPIerr) + if (mype==0) print *, 'FESOM calls MPI_Barrier before calling prism_terminate' + call MPI_Barrier(MPI_COMM_WORLD, error) end if - call prism_terminate_proto(MPIerr) - if (partit%mype==0) print *, 'FESOM calls MPI_Barrier before calling MPI_Finalize' - call MPI_Barrier(MPI_COMM_WORLD, partit%MPIerr) + call prism_terminate_proto(error) + if (mype==0) print *, 'FESOM calls MPI_Barrier before calling MPI_Finalize' + call MPI_Barrier(MPI_COMM_WORLD, error) - if (partit%mype==0) print *, 'FESOM calls MPI_Finalize' - call MPI_Finalize(MPIerr) + if (mype==0) print *, 'FESOM calls MPI_Finalize' + call MPI_Finalize(error) #endif - if (partit%mype==0) print *, 'fesom should stop with exit status = 0' + if (mype==0) print *, 'fesom should stop with exit status = 0' #endif #if defined (__oifs) !OIFS coupling doesnt call prism_terminate_proto and uses MPI_COMM_FESOM implicit none integer,optional :: abort if (present(abort)) then - if (partit%mype==0) write(*,*) 'Run finished unexpectedly!' - call MPI_ABORT( partit%MPI_COMM_FESOM, 1 ) + if (mype==0) write(*,*) 'Run finished unexpectedly!' + call MPI_ABORT(COMM, 1 ) else - call MPI_Barrier(partit%MPI_COMM_FESOM,partit%MPIerr) - call MPI_Finalize(partit%MPIerr) + call MPI_Barrier(COMM, error) + call MPI_Finalize(error) endif #endif @@ -516,7 +519,7 @@ subroutine status_check(partit) call MPI_Allreduce (partit%pe_status, res, 1, MPI_INTEGER, MPI_SUM, partit%MPI_COMM_FESOM, partit%MPIerr) if (res /= 0 ) then if (partit%mype==0) write(*,*) 'Something Broke. Flushing and stopping...' - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) endif end subroutine status_check diff --git a/src/gen_modules_read_NetCDF.F90 b/src/gen_modules_read_NetCDF.F90 index 40491f84a..3213e808d 100755 --- a/src/gen_modules_read_NetCDF.F90 +++ b/src/gen_modules_read_NetCDF.F90 @@ -53,7 +53,7 @@ subroutine read_other_NetCDF(file, vari, itime, model_2Darray, check_dummy, part if (status.ne.nf_noerr)then print*,'ERROR: CANNOT READ runoff FILE CORRECTLY !!!!!' print*,'Error in opening netcdf file'//file - call par_ex(partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop endif @@ -202,7 +202,7 @@ subroutine read_surf_hydrography_NetCDF(file, vari, itime, model_2Darray, partit if (status.ne.nf_noerr)then print*,'ERROR: CANNOT READ runoff FILE CORRECTLY !!!!!' print*,'Error in opening netcdf file'//file - call par_ex(partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop endif @@ -319,7 +319,7 @@ subroutine read_2ddata_on_grid_NetCDF(file, vari, itime, model_2Darray, partit, if (status.ne.nf_noerr)then print*,'ERROR: CANNOT READ runoff FILE CORRECTLY !!!!!' print*,'Error in opening netcdf file'//file - call par_ex(partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop endif diff --git a/src/gen_surface_forcing.F90 b/src/gen_surface_forcing.F90 index 55570ee37..508524526 100644 --- a/src/gen_surface_forcing.F90 +++ b/src/gen_surface_forcing.F90 @@ -386,7 +386,7 @@ SUBROUTINE nc_readTimeGrid(flf, partit) write(*,*) ' message block in gen_surface_forcing.F90.' write(*,*) '____________________________________________________________' print *, achar(27)//'[0m' - call par_ex(partit, 0) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 0) end if elseif ((trim(flf%calendar).eq.'julian') .or. & (trim(flf%calendar).eq.'gregorian') .or. & @@ -407,7 +407,7 @@ SUBROUTINE nc_readTimeGrid(flf, partit) write(*,*) ' gen_surface_forcing.F90' write(*,*) '____________________________________________________________' print *, achar(27)//'[0m' - call par_ex(partit, 0) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 0) end if else print *, achar(27)//'[31m' @@ -426,7 +426,7 @@ SUBROUTINE nc_readTimeGrid(flf, partit) write(*,*) ' example with ncdump -h forcing_file.nc ' write(*,*) '____________________________________________________________' print *, achar(27)//'[0m' - call par_ex(partit, 0) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 0) end if end if @@ -921,7 +921,7 @@ SUBROUTINE sbc_ini(partit, mesh) if (mype==0) WRITE(*,*) ' file : ', 'namelist_bc.nml',' open ok' else if (mype==0) WRITE(*,*) 'ERROR: --> bad opening file : ', 'namelist_bc.nml',' ; iostat=',iost - call par_ex(partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop endif READ( nm_sbc_unit, nml=nam_sbc, iostat=iost ) @@ -1268,7 +1268,7 @@ SUBROUTINE check_nferr(iost,fname, partit) if (iost .ne. NF_NOERR) then write(*,*) 'ERROR: I/O status= "',trim(nf_strerror(iost)),'";',iost,' file= ',fname - call par_ex(partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop endif END SUBROUTINE check_nferr diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index e6bc76fc4..fb0bddf10 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -242,7 +242,7 @@ subroutine ice_timestep(step, partit, mesh) call EVPdynamics_a(partit, mesh) CASE DEFAULT if (mype==0) write(*,*) 'a non existing EVP scheme specified!' - call par_ex(partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop END SELECT diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index 87b77bde0..2c5fc650e 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -1,5 +1,5 @@ #if defined (__oasis) -subroutine thermodynamics(mesh) +subroutine thermodynamics(partit, mesh) !=================================================================== ! @@ -14,53 +14,17 @@ subroutine thermodynamics(mesh) ! Wolfgang Dorn (AWI), Oct-2012 (h0min adapted) ! !=================================================================== - !---- variables from oce_modules.F90 -#if 0 - use o_param, only: ref_sss, ref_sss_local -#ifdef use_fullfreesurf - use o_array, only: real_salt_flux -#endif - use g_parsup, only: myDim_nod2D, eDim_nod2D -#ifdef use_cavity -#else -#endif - - !---- variables from ice_modules.F90 - use i_dyn_parms, only: Cd_oce_ice - use i_therm_parms, only: rhowat, rhoice, rhosno, cc, cl, con, consn, Sice -#if defined (__oifs) - use i_array, only: a_ice, m_ice, m_snow, u_ice, v_ice, u_w, v_w & - , fresh_wa_flux, net_heat_flux, oce_heat_flux, ice_heat_flux, enthalpyoffuse, S_oc_array, T_oc_array -#else - use i_array, only: a_ice, m_ice, m_snow, u_ice, v_ice, u_w, v_w & - , fresh_wa_flux, net_heat_flux, oce_heat_flux, ice_heat_flux, S_oc_array, T_oc_array -#endif - - !---- variables from gen_modules_config.F90 - use g_config, only: dt - - !---- variables from gen_modules_forcing.F90 -#if defined (__oifs) - use g_forcing_arrays, only: shortwave, evap_no_ifrac, sublimation & - , prec_rain, prec_snow, runoff, evaporation, thdgr, thdgrsn, flice & - , enthalpyoffuse -#else - use g_forcing_arrays, only: shortwave, evap_no_ifrac, sublimation & - , prec_rain, prec_snow, runoff, evaporation, thdgr, thdgrsn, flice -#endif - !---- variables from gen_modules_rotate_grid.F90 - use g_rotate_grid, only: r2g -#endif use o_param - use mod_mesh + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP use i_therm_param use i_param use i_arrays use g_config use g_forcing_param use g_forcing_arrays - use g_parsup use g_comm_auto use g_rotate_grid implicit none @@ -85,11 +49,20 @@ subroutine thermodynamics(mesh) real(kind=WP) :: geolon, geolat !---- minimum and maximum of the lead closing parameter real(kind=WP) :: h0min = 0.5, h0max = 1.5 - type(t_mesh), intent(in) , target :: mesh real(kind=WP), parameter :: Aimin = 0.001, himin = 0.005 -#include "associate_mesh.h" + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + + integer, pointer :: myDim_nod2D, eDim_nod2D + integer, dimension(:), pointer :: ulevels_nod2D + real(kind=WP), dimension(:,:),pointer :: geo_coord_nod2D + + myDim_nod2d=>partit%myDim_nod2D + eDim_nod2D =>partit%eDim_nod2D + ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D + geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D rsss = ref_sss @@ -132,14 +105,14 @@ subroutine thermodynamics(mesh) #if defined (__oifs) !---- different lead closing parameter for NH and SH - call r2g(geolon, geolat, coord_nod2d(1,inod), coord_nod2d(2,inod)) - if (geolat.lt.0.) then - h0min = 1.0 - h0max = 1.0 - else + if (geo_coord_nod2D(2, inod)>0) then h0min = 0.3 h0max = 0.3 + else + h0min = 1.0 + h0max = 1.0 endif + !---- For AWI-CM3 we calculate ice surface temp and albedo in fesom, ! then send those to OpenIFS where they are used to calucate the ! energy fluxes ---! diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index 5a4b82043..b2f196770 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -110,11 +110,14 @@ subroutine thermodynamics(partit, mesh) real(kind=WP), allocatable :: ustar_aux(:) real(kind=WP) lid_clo -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" - + integer, pointer :: myDim_nod2D, eDim_nod2D + integer, dimension(:), pointer :: ulevels_nod2D + real(kind=WP), dimension(:,:),pointer :: geo_coord_nod2D + + myDim_nod2d=>partit%myDim_nod2D + eDim_nod2D =>partit%eDim_nod2D + ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D + geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D rsss=ref_sss ! u_ice and v_ice are at nodes @@ -132,7 +135,7 @@ subroutine thermodynamics(partit, mesh) (v_ice(i)-v_w(i))**2) ustar_aux(i)=sqrt(ustar*Cd_oce_ice) END DO - call exchange_nod(ustar_aux, partit) !TODO Why do we need it? + call exchange_nod(ustar_aux, partit) ! ================ ! end: friction velocity ! ================ diff --git a/src/associate_mesh.h b/src/icepack_drivers/associate_mesh.h similarity index 80% rename from src/associate_mesh.h rename to src/icepack_drivers/associate_mesh.h index 9edc60383..3d1b2edb8 100644 --- a/src/associate_mesh.h +++ b/src/icepack_drivers/associate_mesh.h @@ -1,11 +1,14 @@ -integer , pointer :: nod2D -integer , pointer :: elem2D -integer , pointer :: edge2D +integer , pointer :: nod2D, myDim_nod2D, eDim_nod2D +integer , pointer :: elem2D, myDim_elem2D, eDim_elem2D, eXDim_elem2D +integer , pointer :: edge2D, myDim_edge2D, eDim_edge2D integer , pointer :: edge2D_in real(kind=WP) , pointer :: ocean_area real(kind=WP) , pointer :: ocean_areawithcav integer , pointer :: nl integer , pointer :: nn_size + + + real(kind=WP), dimension(:,:), pointer :: coord_nod2D, geo_coord_nod2D integer, dimension(:,:) , pointer :: elem2D_nodes integer, dimension(:,:) , pointer :: edges @@ -60,45 +63,16 @@ ocean_area => mesh%ocean_area ocean_areawithcav => mesh%ocean_areawithcav nl => mesh%nl nn_size => mesh%nn_size -!!$coord_nod2D => mesh%coord_nod2D -!!$geo_coord_nod2D => mesh%geo_coord_nod2D -!!$elem2D_nodes => mesh%elem2D_nodes -!!$edges => mesh%edges -!!$edge_tri => mesh%edge_tri -!!$elem_edges => mesh%elem_edges -!!$elem_area => mesh%elem_area -!!$node_area => mesh%node_area -!!$edge_dxdy => mesh%edge_dxdy -!!$edge_cross_dxdy => mesh%edge_cross_dxdy -!!$elem_cos => mesh%elem_cos -!!$metric_factor => mesh%metric_factor -!!$elem_neighbors => mesh%elem_neighbors -!!$nod_in_elem2D => mesh%nod_in_elem2D -!!$x_corners => mesh%x_corners -!!$y_corners => mesh%y_corners -!!$nod_in_elem2D_num => mesh%nod_in_elem2D_num -!!$depth => mesh%depth -!!$gradient_vec => mesh%gradient_vec -!!$gradient_sca => mesh%gradient_sca -!!$bc_index_nod2D => mesh%bc_index_nod2D -!!$zbar => mesh%zbar -!!$Z => mesh%Z -!!$elem_depth => mesh%elem_depth -!!$nlevels => mesh%nlevels -!!$nlevels_nod2D => mesh%nlevels_nod2D -!!$nlevels_nod2D_min => mesh%nlevels_nod2D_min -!!$area => mesh%area -!!$area2 => mesh%area2 -!!$area_inv => mesh%area_inv -!!$mesh_resolution => mesh%mesh_resolution -!!$ssh_stiff => mesh%ssh_stiff -!!$cavity_flag => mesh%cavity_flag -!!$cavity_lev_nod2D => mesh%cavity_lev_nod2D -!!$cavity_lev_elem2D => mesh%cavity_lev_elem2D -!!$cavity_depth => mesh%cavity_depth -!!$ulevels => mesh%ulevels -!!$ulevels_nod2D => mesh%ulevels_nod2D -!!$ulevels_nod2D_max => mesh%ulevels_nod2D_max + + +myDim_nod2D => p_partit%myDim_nod2D +eDim_nod2D => p_partit%eDim_nod2D +myDim_elem2D => p_partit%myDim_elem2D +eDim_elem2D => p_partit%eDim_elem2D +eXDim_elem2D => p_partit%eXDim_elem2D +myDim_edge2D => p_partit%myDim_edge2D +eDim_edge2D => p_partit%eDim_edge2D + coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%coord_nod2D geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D diff --git a/src/icepack_drivers/icedrv_advection.F90 b/src/icepack_drivers/icedrv_advection.F90 index 35d4a8adf..554b0c51e 100644 --- a/src/icepack_drivers/icedrv_advection.F90 +++ b/src/icepack_drivers/icedrv_advection.F90 @@ -39,7 +39,6 @@ subroutine tg_rhs_icepack(mesh, trc) use mod_mesh use i_param - use g_parsup use o_param use g_config @@ -57,7 +56,7 @@ subroutine tg_rhs_icepack(mesh, trc) integer(kind=int_kind) :: n, q, row, & elem, elnodes(3) -#include "../associate_mesh.h" +#include "associate_mesh.h" ! Taylor-Galerkin (Lax-Wendroff) rhs @@ -100,14 +99,13 @@ end subroutine tg_rhs_icepack module subroutine init_advection_icepack(mesh) use o_param - use g_parsup use mod_mesh implicit none type(t_mesh), intent(in), target :: mesh -#include "../associate_mesh.h" +#include "associate_mesh.h" ! Initialization of arrays necessary to implement FCT algorithm allocate(trl(nx)) ! low-order solutions @@ -141,7 +139,6 @@ subroutine fill_mass_matrix_icepack(mesh) use mod_mesh use i_param - use g_parsup implicit none @@ -152,7 +149,7 @@ subroutine fill_mass_matrix_icepack(mesh) integer(kind=int_kind) :: flag=0 ,iflag=0 type(t_mesh), intent(in), target :: mesh -#include "../associate_mesh.h" +#include "associate_mesh.h" allocate(col_pos(nx)) @@ -215,9 +212,7 @@ subroutine solve_low_order_icepack(mesh, trc) ! mass matrix on the lhs is replaced with the lumped one. use mod_mesh - use i_param - use g_parsup - + use i_param implicit none @@ -226,7 +221,7 @@ subroutine solve_low_order_icepack(mesh, trc) type(t_mesh), target, intent(in) :: mesh real(kind=dbl_kind), dimension(nx), intent(inout) :: trc -#include "../associate_mesh.h" +#include "associate_mesh.h" gamma = ice_gamma_fct ! Added diffusivity parameter ! Adjust it to ensure posivity of solution @@ -241,7 +236,7 @@ subroutine solve_low_order_icepack(mesh, trc) (1.0_WP-gamma) * trc(row) enddo - call exchange_nod(trl) + call exchange_nod(trl, p_partit) ! Low-order solution must be known to neighbours @@ -252,9 +247,7 @@ end subroutine solve_low_order_icepack subroutine solve_high_order_icepack(mesh, trc) use mod_mesh - use i_param - use g_parsup - + use i_param implicit none @@ -264,7 +257,7 @@ subroutine solve_high_order_icepack(mesh, trc) type(t_mesh), target, intent(in) :: mesh real(kind=dbl_kind), dimension(nx), intent(inout) :: trc -#include "../associate_mesh.h" +#include "associate_mesh.h" ! Taylor-Galerkin solution @@ -273,7 +266,7 @@ subroutine solve_high_order_icepack(mesh, trc) d_tr(row) = rhs_tr(row) / area(1,row) end do - call exchange_nod(d_tr) + call exchange_nod(d_tr, p_partit) ! Iterate do n = 1, num_iter_solve - 1 @@ -288,7 +281,7 @@ subroutine solve_high_order_icepack(mesh, trc) do row = 1, nx_nh d_tr(row) = trl(row) enddo - call exchange_nod(d_tr) + call exchange_nod(d_tr, p_partit) enddo end subroutine solve_high_order_icepack @@ -308,9 +301,7 @@ subroutine fem_fct_icepack(mesh, trc) use mod_mesh use o_param - use i_param - use g_parsup - + use i_param integer(kind=int_kind) :: icoef(3,3), n, q, elem, elnodes(3), row real (kind=dbl_kind), allocatable, dimension(:) :: tmax, tmin @@ -318,7 +309,7 @@ subroutine fem_fct_icepack(mesh, trc) type(t_mesh), target, intent(in) :: mesh real(kind=dbl_kind), dimension(nx), intent(inout) :: trc -#include "../associate_mesh.h" +#include "associate_mesh.h" gamma = ice_gamma_fct ! It should coinside with gamma in ! ts_solve_low_order @@ -412,7 +403,7 @@ subroutine fem_fct_icepack(mesh, trc) enddo ! pminus and pplus are to be known to neighbouting PE - call exchange_nod(icepminus, icepplus) + call exchange_nod(icepminus, icepplus, p_partit) !======================== ! Limiting @@ -446,7 +437,7 @@ subroutine fem_fct_icepack(mesh, trc) enddo enddo - call exchange_nod(trc) + call exchange_nod(trc, p_partit) deallocate(tmin, tmax) @@ -459,8 +450,6 @@ subroutine tg_rhs_div_icepack(mesh, trc) use mod_mesh use o_param use i_param - use g_parsup - implicit none @@ -470,7 +459,7 @@ subroutine tg_rhs_div_icepack(mesh, trc) type(t_mesh), target, intent(in) :: mesh real(kind=dbl_kind), dimension(nx), intent(inout) :: trc -#include "../associate_mesh.h" +#include "associate_mesh.h" ! Computes the rhs in a Taylor-Galerkin way (with urrayspwind ! type of correction for the advection operator). @@ -526,9 +515,7 @@ subroutine update_for_div_icepack(mesh, trc) use mod_mesh use o_param - use i_param - use g_parsup - + use i_param implicit none @@ -539,7 +526,7 @@ subroutine update_for_div_icepack(mesh, trc) type(t_mesh), target, intent(in) :: mesh real(kind=dbl_kind), dimension(nx), intent(inout) :: trc -#include "../associate_mesh.h" +#include "associate_mesh.h" ! Computes Taylor-Galerkin solution ! first approximation @@ -548,7 +535,7 @@ subroutine update_for_div_icepack(mesh, trc) d_tr(row) = rhs_trdiv(row) / area(1,row) enddo - call exchange_nod(d_tr) + call exchange_nod(d_tr, p_partit) ! Iterate @@ -564,7 +551,7 @@ subroutine update_for_div_icepack(mesh, trc) do row = 1, nx_nh d_tr(row) = trl(row) enddo - call exchange_nod(d_tr) + call exchange_nod(d_tr, p_partit) enddo trc = trc + d_tr diff --git a/src/icepack_drivers/icedrv_io.F90 b/src/icepack_drivers/icedrv_io.F90 index cb07079f4..7d8bb7c4d 100644 --- a/src/icepack_drivers/icedrv_io.F90 +++ b/src/icepack_drivers/icedrv_io.F90 @@ -21,7 +21,6 @@ module subroutine init_io_icepack(mesh) use mod_mesh - use g_parsup use io_meandata, only: def_stream3D, def_stream2D implicit none @@ -64,7 +63,7 @@ module subroutine init_io_icepack(mesh) namelist /nml_listsize / io_listsize namelist /nml_list_icepack / io_list_icepack -#include "../associate_mesh.h" +#include "associate_mesh.h" ! Get the tracers information from icepack call icepack_query_tracer_indices(nt_Tsfc_out=nt_Tsfc, nt_sice_out=nt_sice, & @@ -95,7 +94,7 @@ module subroutine init_io_icepack(mesh) if (mype==0) write(*,*) ' file : ', 'namelist.io',' open ok' else if (mype==0) write(*,*) 'ERROR: --> bad opening file : ','namelist.io',' ; iostat=',iost - call par_ex + call par_ex(p_partit%MPI_COMM_FESOM, p_partit%mype) stop end if open( unit=nm_icepack_unit, file='namelist.icepack', form='formatted', access='sequential', status='old', iostat=iost ) @@ -103,7 +102,7 @@ module subroutine init_io_icepack(mesh) if (mype==0) write(*,*) ' file : ', 'namelist.icepack',' open ok' else if (mype==0) write(*,*) 'ERROR: --> bad opening file : ','namelist.icepack',' ; iostat=',iost - call par_ex + call par_ex(p_partit%MPI_COMM_FESOM, p_partit%mype) stop end if @@ -124,147 +123,147 @@ module subroutine init_io_icepack(mesh) do i=1, io_listsize select case (trim(io_list_icepack(i)%id)) case ('aice0 ') - call def_stream2D(nod2D, nx_nh, 'aice0', 'open water fraction', 'none', aice0(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'aice0', 'open water fraction', 'none', aice0(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) case ('aicen ') - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'aicen', 'sea ice concentration', 'none', aicen(:,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'aicen', 'sea ice concentration', 'none', aicen(:,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) case ('vicen ') - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'vicen', 'volume per unit area of ice', 'm', vicen(:,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'vicen', 'volume per unit area of ice', 'm', vicen(:,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) case ('vsnon ') - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'vsnon', 'volume per unit area of snow', 'm', vsnon(:,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'vsnon', 'volume per unit area of snow', 'm', vsnon(:,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) case ('aice ') - call def_stream2D(nod2D, nx_nh, 'aice', 'sea ice concentration', 'none', aice(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'aice', 'sea ice concentration', 'none', aice(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) case ('vice ') - call def_stream2D(nod2D, nx_nh, 'vice', 'volume per unit area of ice', 'm', vice(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'vice', 'volume per unit area of ice', 'm', vice(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) case ('vsno ') - call def_stream2D(nod2D, nx_nh, 'vsno', 'volume per unit area of snow', 'm', vsno(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'vsno', 'volume per unit area of snow', 'm', vsno(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) ! Sea ice velocity components case ('uvel ') - call def_stream2D(nod2D, nx_nh, 'uvel', 'x-component of ice velocity', 'm/s', uvel(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'uvel', 'x-component of ice velocity', 'm/s', uvel(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) case ('vvel ') - call def_stream2D(nod2D, nx_nh, 'vvel', 'y-component of ice velocity', 'm/s', vvel(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'vvel', 'y-component of ice velocity', 'm/s', vvel(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) ! Sea ice or snow surface temperature case ('Tsfc ') - call def_stream2D(nod2D, nx_nh, 'Tsfc', 'sea ice surf. temperature', 'degC', trcr(:,nt_Tsfc), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'Tsfc', 'sea ice surf. temperature', 'degC', trcr(:,nt_Tsfc), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) case ('Tsfcn ') - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'Tsfcn', 'sea ice surf. temperature', 'degC', trcrn(:,nt_Tsfc,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'Tsfcn', 'sea ice surf. temperature', 'degC', trcrn(:,nt_Tsfc,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) case ('strength ') - call def_stream2D(nod2D, nx_nh, 'strength', 'sea ice strength', 'N', strength(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'strength', 'sea ice strength', 'N', strength(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) ! If the following tracers are not defined they will not be outputed case ('iagen ') if (tr_iage) then - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'iage', 'sea ice age', 's', trcrn(:,nt_iage,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'iage', 'sea ice age', 's', trcrn(:,nt_iage,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) end if case ('FYn ') if (tr_FY) then - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'FY', 'first year ice', 'none', trcrn(:,nt_FY,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'FY', 'first year ice', 'none', trcrn(:,nt_FY,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) end if case ('lvln ') if (tr_lvl) then - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'alvl', 'ridged sea ice area', 'none', trcrn(:,nt_alvl,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'vlvl', 'ridged sea ice volume', 'm', trcrn(:,nt_vlvl,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'alvl', 'ridged sea ice area', 'none', trcrn(:,nt_alvl,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'vlvl', 'ridged sea ice volume', 'm', trcrn(:,nt_vlvl,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) end if case ('pond_cesmn') if (tr_pond_cesm) then - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'apnd', 'melt pond area fraction', 'none', trcrn(:,nt_apnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'hpnd', 'melt pond depth', 'm', trcrn(:,nt_hpnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'apnd', 'melt pond area fraction', 'none', trcrn(:,nt_apnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'hpnd', 'melt pond depth', 'm', trcrn(:,nt_hpnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) end if case ('pond_topon') if (tr_pond_topo) then - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'apnd', 'melt pond area fraction', 'none', trcrn(:,nt_apnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'hpnd', 'melt pond depth', 'm', trcrn(:,nt_hpnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'ipnd', 'melt pond refrozen lid thickness', 'm', trcrn(:,nt_ipnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'apnd', 'melt pond area fraction', 'none', trcrn(:,nt_apnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'hpnd', 'melt pond depth', 'm', trcrn(:,nt_hpnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'ipnd', 'melt pond refrozen lid thickness', 'm', trcrn(:,nt_ipnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) end if case ('pond_lvln ') if (tr_pond_lvl) then - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'apnd', 'melt pond area fraction', 'none', trcrn(:,nt_apnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'hpnd', 'melt pond depth', 'm', trcrn(:,nt_hpnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'ipnd', 'melt pond refrozen lid thickness', 'm', trcrn(:,nt_ipnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'apnd', 'melt pond area fraction', 'none', trcrn(:,nt_apnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'hpnd', 'melt pond depth', 'm', trcrn(:,nt_hpnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'ipnd', 'melt pond refrozen lid thickness', 'm', trcrn(:,nt_ipnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) end if case ('brinen ') if (tr_brine) then - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'fbri', 'volume fraction of ice with dynamic salt', 'none', trcrn(:,nt_fbri,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'fbri', 'volume fraction of ice with dynamic salt', 'none', trcrn(:,nt_fbri,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) end if case ('qicen ') do k = 1,nilyr ! Separate variable for each sea ice layer write(trname,'(A6,i1)') 'qicen_', k write(longname,'(A22,i1)') 'sea ice enthalpy lyr: ', k units='J/m3' - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), trim(trname), trim(longname), trim(units), trcrn(:,nt_qice+k-1,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), trim(trname), trim(longname), trim(units), trcrn(:,nt_qice+k-1,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) end do case ('sicen ') do k = 1,nilyr ! Separate variable for each sea ice layer write(trname,'(A6,i1)') 'sicen_', k write(longname,'(A22,i1)') 'sea ice salinity lyr: ', k units='psu' - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), trim(trname), trim(longname), trim(units), trcrn(:,nt_sice+k-1,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), trim(trname), trim(longname), trim(units), trcrn(:,nt_sice+k-1,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) end do case ('qsnon ') do k = 1,nslyr ! Separate variable for each snow layer write(trname,'(A6,i1)') 'qsnon_', k write(longname,'(A19,i1)') 'snow enthalpy lyr: ', k units='J/m3' - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), trim(trname), trim(longname), trim(units), trcrn(:,nt_qsno+k-1,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), trim(trname), trim(longname), trim(units), trcrn(:,nt_qsno+k-1,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) end do ! Average over categories case ('iage ') if (tr_iage) then - call def_stream2D(nod2D, nx_nh, 'iage', 'sea ice age', 's', trcr(:,nt_iage), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'iage', 'sea ice age', 's', trcr(:,nt_iage), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) end if case ('FY ') if (tr_FY) then - call def_stream2D(nod2D, nx_nh, 'FY', 'first year ice', 'none', trcr(:,nt_FY), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'FY', 'first year ice', 'none', trcr(:,nt_FY), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) end if case ('lvl ') if (tr_lvl) then - call def_stream2D(nod2D, nx_nh, 'alvl', 'ridged sea ice area', 'none', trcr(:,nt_alvl), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) - call def_stream2D(nod2D, nx_nh, 'vlvl', 'ridged sea ice volume', 'm', trcr(:,nt_vlvl), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'alvl', 'ridged sea ice area', 'none', trcr(:,nt_alvl), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) + call def_stream2D(nod2D, nx_nh, 'vlvl', 'ridged sea ice volume', 'm', trcr(:,nt_vlvl), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) end if case ('pond_cesm ') if (tr_pond_cesm) then - call def_stream2D(nod2D, nx_nh, 'apnd', 'melt pond area fraction', 'none', trcr(:,nt_apnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) - call def_stream2D(nod2D, nx_nh, 'hpnd', 'melt pond depth', 'm', trcr(:,nt_hpnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'apnd', 'melt pond area fraction', 'none', trcr(:,nt_apnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) + call def_stream2D(nod2D, nx_nh, 'hpnd', 'melt pond depth', 'm', trcr(:,nt_hpnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) end if case ('pond_topo ') if (tr_pond_topo) then - call def_stream2D(nod2D, nx_nh, 'apnd', 'melt pond area fraction', 'none', trcr(:,nt_apnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) - call def_stream2D(nod2D, nx_nh, 'hpnd', 'melt pond depth', 'm', trcr(:,nt_hpnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) - call def_stream2D(nod2D, nx_nh, 'ipnd', 'melt pond refrozen lid thickness', 'm', trcr(:,nt_ipnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'apnd', 'melt pond area fraction', 'none', trcr(:,nt_apnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) + call def_stream2D(nod2D, nx_nh, 'hpnd', 'melt pond depth', 'm', trcr(:,nt_hpnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) + call def_stream2D(nod2D, nx_nh, 'ipnd', 'melt pond refrozen lid thickness', 'm', trcr(:,nt_ipnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) end if case ('pond_lvl ') if (tr_pond_lvl) then - call def_stream2D(nod2D, nx_nh, 'apnd', 'melt pond area fraction', 'none', trcr(:,nt_apnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) - call def_stream2D(nod2D, nx_nh, 'hpnd', 'melt pond depth', 'm', trcr(:,nt_hpnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) - !call def_stream2D(nod2D, nx_nh, 'ipnd', 'melt pond refrozen lid thickness', 'm', trcr(:,nt_ipnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'apnd', 'melt pond area fraction', 'none', trcr(:,nt_apnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) + call def_stream2D(nod2D, nx_nh, 'hpnd', 'melt pond depth', 'm', trcr(:,nt_hpnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) + !call def_stream2D(nod2D, nx_nh, 'ipnd', 'melt pond refrozen lid thickness', 'm', trcr(:,nt_ipnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) end if case ('brine ') if (tr_brine) then - call def_stream2D(nod2D, nx_nh, 'fbri', 'volume fraction of ice with dynamic salt', 'none', trcr(:,nt_fbri), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'fbri', 'volume fraction of ice with dynamic salt', 'none', trcr(:,nt_fbri), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) end if case ('qice ') do k = 1,nilyr ! Separate variable for each sea ice layer write(trname,'(A6,i1)') 'qicen_', k write(longname,'(A22,i1)') 'sea ice enthalpy lyr: ', k units='J/m3' - call def_stream2D(nod2D, nx_nh, trim(trname), trim(longname), trim(units), trcr(:,nt_qice+k-1), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, trim(trname), trim(longname), trim(units), trcr(:,nt_qice+k-1), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) end do case ('sice ') do k = 1,nilyr ! Separate variable for each sea ice layer write(trname,'(A6,i1)') 'sicen_', k write(longname,'(A22,i1)') 'sea ice salinity lyr: ', k units='psu' - call def_stream2D(nod2D, nx_nh, trim(trname), trim(longname), trim(units), trcr(:,nt_sice+k-1), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, trim(trname), trim(longname), trim(units), trcr(:,nt_sice+k-1), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) end do case ('qsno ') do k = 1,nslyr ! Separate variable for each snow layer write(trname,'(A6,i1)') 'qsnon_', k write(longname,'(A19,i1)') 'snow enthalpy lyr: ', k units='J/m3' - call def_stream2D(nod2D, nx_nh, trim(trname), trim(longname), trim(units), trcr(:,nt_qsno+k-1), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, trim(trname), trim(longname), trim(units), trcr(:,nt_qsno+k-1), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) end do case ('rdg_conv ') - call def_stream2D(nod2D, nx_nh, 'rdg_conv', 'Convergence term for ridging', '1/s', rdg_conv(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'rdg_conv', 'Convergence term for ridging', '1/s', rdg_conv(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) case ('rdg_shear ') - call def_stream2D(nod2D, nx_nh, 'rdg_shear', 'Shear term for ridging', '1/s', rdg_shear(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'rdg_shear', 'Shear term for ridging', '1/s', rdg_shear(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) case default if (mype==0) write(*,*) 'stream ', io_list_icepack(i)%id, ' is not defined !' end select @@ -279,7 +278,6 @@ end subroutine init_io_icepack module subroutine init_restart_icepack(year, mesh) use mod_mesh - use g_parsup use g_config, only: runid, ResultPath use io_restart, only: ip_id, def_variable_2d, def_dim @@ -311,7 +309,7 @@ module subroutine init_restart_icepack(year, mesh) tr_zaero, tr_bgc_Fe, & tr_bgc_hum -#include "../associate_mesh.h" +#include "associate_mesh.h" ! Get the tracers information from icepack call icepack_query_tracer_indices(nt_Tsfc_out=nt_Tsfc, nt_sice_out=nt_sice, & diff --git a/src/icepack_drivers/icedrv_main.F90 b/src/icepack_drivers/icedrv_main.F90 index 7dfa9700b..e6fc705f6 100644 --- a/src/icepack_drivers/icedrv_main.F90 +++ b/src/icepack_drivers/icedrv_main.F90 @@ -10,7 +10,7 @@ module icedrv_main use icedrv_kinds use icedrv_constants - use g_parsup, only: mype + use mod_partit implicit none @@ -64,6 +64,8 @@ module icedrv_main integer (kind=int_kind), save :: max_ntrcr ! number of tracers in total integer (kind=int_kind), save :: nfreq ! number of wave frequencies ! HARDWIRED FOR NOW integer (kind=int_kind), save :: ndtd ! dynamic time steps per thermodynamic time step + type(t_partit), pointer, save :: p_partit ! a pointer to the mesh partitioning (has been accessed via "use g_parsup" in the previous versions) + integer (kind=int_kind), save :: mype ! a copy of a mype which has been accessed via "use g_parsup" in the previous versions !======================================================================= ! 2. State variabels for icepack @@ -751,8 +753,10 @@ module icedrv_main interface ! Read icepack namelists, setup the model parameter and write diagnostics - module subroutine set_icepack() + module subroutine set_icepack(partit) + use mod_partit implicit none + type(t_partit), intent(inout), target :: partit end subroutine set_icepack ! Set up hemispheric masks @@ -860,7 +864,6 @@ module subroutine step_icepack(mesh, time_evp, time_advec, time_therm) use mod_mesh use g_config, only: dt use i_PARAM, only: whichEVP - use g_parsup use icepack_intfc, only: icepack_ice_strength implicit none real (kind=dbl_kind), intent(out) :: & diff --git a/src/icepack_drivers/icedrv_set.F90 b/src/icepack_drivers/icedrv_set.F90 index 4638073f3..bc7af715f 100644 --- a/src/icepack_drivers/icedrv_set.F90 +++ b/src/icepack_drivers/icedrv_set.F90 @@ -18,15 +18,12 @@ use icepack_intfc, only: icepack_query_tracer_indices use icepack_intfc, only: icepack_warnings_flush use icepack_intfc, only: icepack_warnings_aborted - use icedrv_system, only: icedrv_system_abort + use icedrv_system, only: icedrv_system_abort, icedrv_system_init contains - module subroutine set_icepack() + module subroutine set_icepack(partit) - use g_parsup, only: myDim_nod2D, eDim_nod2D, & - myDim_elem2D, eDim_elem2D, & - mpi_comm_fesom use i_param, only: whichEVP use i_param, only: cd_oce_ice, Pstar, c_pressure use i_therm_param, only: albw @@ -34,6 +31,7 @@ module subroutine set_icepack() implicit none ! local variables + type(t_partit), intent(inout), target :: partit character(len=char_len) :: nml_filename, diag_filename character(len=*), parameter :: subname = '(set_icepack)' @@ -256,12 +254,13 @@ module subroutine set_icepack() !----------------------------------------------------------------- ! Derived quantities used by the icepack model !----------------------------------------------------------------- - - nx = myDim_nod2D + eDim_nod2D - nx_elem = myDim_elem2D + eDim_elem2D - nx_nh = myDim_nod2D - nx_elem_nh = myDim_elem2D - + call icedrv_system_init(partit) + p_partit => partit + nx = p_partit%myDim_nod2D + p_partit%eDim_nod2D + nx_elem = p_partit%myDim_elem2D + p_partit%eDim_elem2D + nx_nh = p_partit%myDim_nod2D + nx_elem_nh = p_partit%myDim_elem2D + mype = p_partit%mype ncat = nicecat ! number of categories nfsd = nfsdcat ! number of floe size categories nilyr = nicelyr ! number of ice layers per category @@ -870,7 +869,7 @@ module subroutine set_icepack() if (icepack_warnings_aborted()) call icedrv_system_abort(string=subname, & file=__FILE__,line= __LINE__) - call mpi_barrier(mpi_comm_fesom,mpi_error) + call mpi_barrier(p_partit%mpi_comm_fesom, mpi_error) end subroutine set_icepack diff --git a/src/icepack_drivers/icedrv_step.F90 b/src/icepack_drivers/icedrv_step.F90 index ed5d047eb..b97f1e917 100644 --- a/src/icepack_drivers/icedrv_step.F90 +++ b/src/icepack_drivers/icedrv_step.F90 @@ -1121,7 +1121,6 @@ module subroutine step_icepack(mesh, time_evp, time_advec, time_therm) use icepack_intfc, only: icepack_ice_strength use g_config, only: dt use i_PARAM, only: whichEVP - use g_parsup use mod_mesh implicit none @@ -1247,7 +1246,7 @@ module subroutine step_icepack(mesh, time_evp, time_advec, time_therm) call EVPdynamics_a(mesh) case default if (mype==0) write(*,*) 'A non existing EVP scheme specified!' - call par_ex + call par_ex(p_partit%MPI_COMM_FESOM, p_partit%mype) stop end select diff --git a/src/icepack_drivers/icedrv_system.F90 b/src/icepack_drivers/icedrv_system.F90 index 8a130bbd7..40ce82251 100644 --- a/src/icepack_drivers/icedrv_system.F90 +++ b/src/icepack_drivers/icedrv_system.F90 @@ -7,16 +7,16 @@ module icedrv_system use icedrv_kinds - use g_parsup, only: par_ex + use mod_parsup, only: par_ex use icedrv_constants, only: ice_stderr use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted - + use mod_partit implicit none - public :: icedrv_system_abort + public :: icedrv_system_abort, icedrv_system_init private - + type(t_partit), save, pointer :: p_partit ! a pointer to the mesh partitioning (has been accessed via "use g_parsup" in the original code) !======================================================================= contains @@ -49,12 +49,21 @@ subroutine icedrv_system_abort(icell, istep, string, file, line) ! Stop FESOM2 - call par_ex(1) + call par_ex(p_partit%MPI_COMM_FESOM, p_partit%mype, 1) stop end subroutine icedrv_system_abort !======================================================================= + subroutine icedrv_system_init(partit) + implicit none + type(t_partit), intent(inout), target :: partit + + p_partit => partit + end subroutine icedrv_system_init + +!======================================================================= + end module icedrv_system diff --git a/src/icepack_drivers/icedrv_transfer.F90 b/src/icepack_drivers/icedrv_transfer.F90 index 80e51d97a..32e916823 100644 --- a/src/icepack_drivers/icedrv_transfer.F90 +++ b/src/icepack_drivers/icedrv_transfer.F90 @@ -32,7 +32,6 @@ module subroutine fesom_to_icepack(mesh) use o_param, only: mstep use mod_mesh use mod_tracer - use g_parsup use g_clock implicit none @@ -62,7 +61,7 @@ module subroutine fesom_to_icepack(mesh) type(t_mesh), target, intent(in) :: mesh -#include "../associate_mesh.h" +#include "associate_mesh.h" ! Ice @@ -145,7 +144,7 @@ module subroutine fesom_to_icepack(mesh) rdg_shear(i) = ty / tvol enddo - call exchange_nod(rdg_conv, rdg_shear) + call exchange_nod(rdg_conv, rdg_shear, p_partit) ! Clock variables diff --git a/src/io_blowup.F90 b/src/io_blowup.F90 index db12a33ce..a4bbae11d 100644 --- a/src/io_blowup.F90 +++ b/src/io_blowup.F90 @@ -401,7 +401,7 @@ subroutine write_blowup(id, istep, partit, mesh) if (mype==0) deallocate(aux2) else if (mype==0) write(*,*) 'not supported shape of array in restart file' - call par_ex(partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop end if end do @@ -492,7 +492,7 @@ subroutine was_error(id, partit) if (status .ne. nf_noerr) then if (partit%mype==0) write(*,*) 'error counter=', k if (partit%mype==0) call handle_err(status, partit) - call par_ex(partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop end if end do diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 1d34ae0ef..e70b48982 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -128,7 +128,7 @@ subroutine ini_mean_io(tracers, partit, mesh) if (mype==0) WRITE(*,*) ' file : ', 'namelist.io',' open ok' else if (mype==0) WRITE(*,*) 'ERROR: --> bad opening file : ', 'namelist.io',' ; iostat=',iost - call par_ex(partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop endif READ(nm_io_unit, nml=nml_listsize, iostat=iost ) @@ -832,7 +832,7 @@ subroutine output(istep, tracers, partit, mesh) call ini_mean_io(tracers, partit, mesh) call init_io_gather(partit) #if defined (__icepack) - call init_io_icepack(partit, mesh) + call init_io_icepack(mesh) !icapack has its copy of p_partit => partit #endif call init_io_gather(partit) end if @@ -862,7 +862,7 @@ subroutine output(istep, tracers, partit, mesh) else write(*,*) 'You did not specify a supported outputflag.' write(*,*) 'The program will stop to give you opportunity to do it.' - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) stop endif @@ -1120,7 +1120,7 @@ subroutine def_stream_after_dimension_specific(entry, name, description, units, allocate(data_strategy_nf_float_type :: entry%data_strategy) else if (partit%mype==0) write(*,*) 'not supported output accuracy:',accuracy,'for',trim(name) - call par_ex(partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop endif ! accuracy diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 18a684ab3..a9d2aac22 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -230,13 +230,13 @@ subroutine restart(istep, l_write, l_read, tracers, partit, mesh) call ini_ocean_io(yearnew, tracers, partit, mesh) if (use_ice) call ini_ice_io (yearnew, partit, mesh) #if defined(__icepack) - if (use_ice) call init_restart_icepack(yearnew, partit, mesh) + if (use_ice) call init_restart_icepack(yearnew, mesh) !icapack has its copy of p_partit => partit #endif else call ini_ocean_io(yearold, tracers, partit, mesh) if (use_ice) call ini_ice_io (yearold, partit, mesh) #if defined(__icepack) - if (use_ice) call init_restart_icepack(yearold, partit, mesh) + if (use_ice) call init_restart_icepack(yearold, mesh) !icapack has its copy of p_partit => partit #endif end if @@ -271,7 +271,7 @@ subroutine restart(istep, l_write, l_read, tracers, partit, mesh) else write(*,*) 'You did not specify a supported outputflag.' write(*,*) 'The program will stop to give you opportunity to do it.' - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) stop endif @@ -529,7 +529,7 @@ subroutine write_restart(id, istep, partit, mesh) order=2 else if (mype==0) write(*,*) 'the shape of the array in the restart file and the grid size are different' - call par_ex(partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop end if if (mype==0) allocate(aux (size_gen)) @@ -555,7 +555,7 @@ subroutine write_restart(id, istep, partit, mesh) if (mype==0) deallocate(aux) else if (mype==0) write(*,*) 'not supported shape of array in restart file' - call par_ex(partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop end if call was_error(id, partit); c=1 @@ -607,7 +607,7 @@ subroutine read_restart(id, partit, mesh, arg) write(*,*) '____________________________________________________________________' print *, achar(27)//'[0m' write(*,*) - call par_ex(partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) end if if (.not. present(arg)) then @@ -664,7 +664,7 @@ subroutine read_restart(id, partit, mesh, arg) order=2 else if (mype==0) write(*,*) 'the shape of the array in the restart file and the grid size are different' - call par_ex(partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop end if if (mype==0) allocate(aux (size_gen)) @@ -691,7 +691,7 @@ subroutine read_restart(id, partit, mesh, arg) if (mype==0) deallocate(aux) else if (mype==0) write(*,*) 'not supported shape of array in restart file when reading restart' - call par_ex(partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop end if call was_error(id, partit); c=1 @@ -785,7 +785,7 @@ subroutine was_error(id, partit) if (status .ne. nf_noerr) then if (partit%mype==0) write(*,*) 'error counter=', k if (partit%mype==0) call handle_err(status, partit) - call par_ex(partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop end if end do diff --git a/src/oce_adv_tra_driver.F90 b/src/oce_adv_tra_driver.F90 index 875f43052..54d9603c7 100644 --- a/src/oce_adv_tra_driver.F90 +++ b/src/oce_adv_tra_driver.F90 @@ -169,7 +169,7 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, partit, mesh) call adv_tra_hor_upw1(vel, ttfAB, partit, mesh, adv_flux_hor, init_zero=do_zero_flux) CASE DEFAULT !unknown if (mype==0) write(*,*) 'Unknown horizontal advection type ', trim(tracers%data(tr_num)%tra_adv_hor), '! Check your namelists!' - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) END SELECT if (trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') then pwvel=>w @@ -190,7 +190,7 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, partit, mesh) call adv_tra_ver_upw1 ( pwvel, ttfAB, partit, mesh, adv_flux_ver, init_zero=do_zero_flux) CASE DEFAULT !unknown if (mype==0) write(*,*) 'Unknown vertical advection type ', trim(tracers%data(tr_num)%tra_adv_ver), '! Check your namelists!' - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) ! --> be aware the vertical implicite part in case without FCT is done in ! oce_ale_tracer.F90 --> subroutine diff_ver_part_impl_ale(tr_num, partit, mesh) ! for do_wimpl=.true. diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 2af4fb229..fb4ee6336 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -878,7 +878,7 @@ subroutine init_thickness_ale(partit, mesh) write(*,*) write(*,*) '____________________________________________________________' write(*,*) 'The vertical ALE discretisation ', which_ale,' is currently not supported!!!' - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end if endif @@ -2267,7 +2267,7 @@ subroutine vert_vel_ale(partit, mesh) write(*,*) end if end do -!!PS call par_ex(partit, 1) +!!PS call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) endif !___________________________________________________________________________ diff --git a/src/oce_ale_pressure_bv.F90 b/src/oce_ale_pressure_bv.F90 index 29042954e..5f3284cdf 100644 --- a/src/oce_ale_pressure_bv.F90 +++ b/src/oce_ale_pressure_bv.F90 @@ -288,7 +288,7 @@ subroutine pressure_bv(tracers, partit, mesh) call densityJM_components(t, s, bulk_0(nz), bulk_pz(nz), bulk_pz2(nz), rhopot(nz), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end select end do @@ -352,7 +352,7 @@ subroutine pressure_bv(tracers, partit, mesh) call densityJM_components(t, s, bulk_0(nz), bulk_pz(nz), bulk_pz2(nz), rhopot(nz), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end select !_______________________________________________________________ rho(nz)= bulk_0(nz) + Z_3d_n(nz,node)*(bulk_pz(nz) + Z_3d_n(nz,node)*bulk_pz2(nz)) @@ -510,7 +510,7 @@ subroutine pressure_force_4_linfs(tracers, partit, mesh) write(*,*) ' see in namelist.oce --> which_pgf = sergey, ' write(*,*) ' shchepetkin, easypgf ' write(*,*) '________________________________________________________' - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end if !___________________________________________________________________________ @@ -531,7 +531,7 @@ subroutine pressure_force_4_linfs(tracers, partit, mesh) write(*,*) ' see in namelist.oce --> which_pgf = nemo, ' write(*,*) ' shchepetkin, cubicspline ' write(*,*) '________________________________________________________' - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end if end if end subroutine pressure_force_4_linfs @@ -727,7 +727,7 @@ subroutine pressure_force_4_linfs_nemo(tracers, partit, mesh) call densityJM_components(interp_n_temp, interp_n_salt, bulk_0, bulk_pz, bulk_pz2, rhopot, partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end select interp_n_dens(ni) = bulk_0 + Z_n(nle)*(bulk_pz + Z_n(nle)*bulk_pz2) !!PS interp_n_dens(ni) = interp_n_dens(ni)*rhopot/(interp_n_dens(ni)+0.1_WP*Z_n(nle))*real(state_equation))-density_0 @@ -1086,7 +1086,7 @@ subroutine pressure_force_4_linfs_easypgf(tracers, partit, mesh) call densityJM_components(density_ref_T, density_ref_S, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2, dref_rhopot, partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end select end if @@ -1176,7 +1176,7 @@ subroutine pressure_force_4_linfs_easypgf(tracers, partit, mesh) call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end select rho_at_Zn(ni) = bulk_0(ni) + Z_n(nlz)*(bulk_pz(ni) + Z_n(nlz)*bulk_pz2(ni)) rho_at_Zn(ni) = rho_at_Zn(ni)*rhopot(ni)/(rho_at_Zn(ni)+0.1_WP*Z_n(nlz)*real(state_equation))-aux_dref @@ -1215,7 +1215,7 @@ subroutine pressure_force_4_linfs_easypgf(tracers, partit, mesh) call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end select rho_at_Zn(ni) = bulk_0(ni) + Z_n(nlz)*(bulk_pz(ni) + Z_n(nlz)*bulk_pz2(ni)) rho_at_Zn(ni) = rho_at_Zn(ni)*rhopot(ni)/(rho_at_Zn(ni)+0.1_WP*Z_n(nlz)*real(state_equation))-aux_dref @@ -1318,7 +1318,7 @@ subroutine pressure_force_4_linfs_easypgf(tracers, partit, mesh) call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end select rho_at_Zn(ni) = bulk_0(ni) + Z_n(nlz)*(bulk_pz(ni) + Z_n(nlz)*bulk_pz2(ni)) rho_at_Zn(ni) = rho_at_Zn(ni)*rhopot(ni)/(rho_at_Zn(ni)+0.1_WP*Z_n(nlz)*real(state_equation))-aux_dref @@ -1357,7 +1357,7 @@ subroutine pressure_force_4_linfs_easypgf(tracers, partit, mesh) call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end select rho_at_Zn(ni) = bulk_0(ni) + Z_n(nlz)*(bulk_pz(ni) + Z_n(nlz)*bulk_pz2(ni)) rho_at_Zn(ni) = rho_at_Zn(ni)*rhopot(ni)/(rho_at_Zn(ni)+0.1_WP*Z_n(nlz)*real(state_equation))-aux_dref @@ -1830,7 +1830,7 @@ subroutine pressure_force_4_zxxxx(tracers, partit, mesh) write(*,*) ' see in namelist.oce --> which_pgf = ' write(*,*) ' shchepetkin, cubicspline, easypgf ' write(*,*) '________________________________________________________' - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end if end subroutine pressure_force_4_zxxxx ! @@ -2333,7 +2333,7 @@ subroutine pressure_force_4_zxxxx_easypgf(tracers, partit, mesh) call densityJM_components(density_ref_T, density_ref_S, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2, dref_rhopot, partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end select end if @@ -2412,7 +2412,7 @@ subroutine pressure_force_4_zxxxx_easypgf(tracers, partit, mesh) call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end select rho_at_Zn(ni) = bulk_0(ni) + Z_n(nlz)*(bulk_pz(ni) + Z_n(nlz)*bulk_pz2(ni)) rho_at_Zn(ni) = rho_at_Zn(ni)*rhopot(ni)/(rho_at_Zn(ni)+0.1_WP*Z_n(nlz)*real(state_equation))-aux_dref @@ -2461,7 +2461,7 @@ subroutine pressure_force_4_zxxxx_easypgf(tracers, partit, mesh) call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end select rho_at_Zn(ni) = bulk_0(ni) + Z_n(nlz)*(bulk_pz(ni) + Z_n(nlz)*bulk_pz2(ni)) rho_at_Zn(ni) = rho_at_Zn(ni)*rhopot(ni)/(rho_at_Zn(ni)+0.1_WP*Z_n(nlz)*real(state_equation))-aux_dref @@ -2475,7 +2475,7 @@ subroutine pressure_force_4_zxxxx_easypgf(tracers, partit, mesh) ! --> this is not wanted !!! write(*,*) ' --> would do second order surface boundary density extrapolation' write(*,*) ' This is not wanted, model stops here' - call par_ex(partit, 0) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 0) end if end do !_______________________________________________________________________ @@ -2545,7 +2545,7 @@ subroutine pressure_force_4_zxxxx_easypgf(tracers, partit, mesh) call densityJM_components(temp_at_Zn(3), salt_at_Zn(3), bulk_0(3), bulk_pz(3), bulk_pz2(3), rhopot(3), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end select rho_at_Zn = bulk_0 + Z_n(nlz)*(bulk_pz + Z_n(nlz)*bulk_pz2) rho_at_Zn = rho_at_Zn*rhopot/(rho_at_Zn+0.1_WP*Z_n(nlz)*real(state_equation))-aux_dref @@ -2626,7 +2626,7 @@ subroutine pressure_force_4_zxxxx_easypgf(tracers, partit, mesh) call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end select rho_at_Zn(ni) = bulk_0(ni) + Z_n(nlz)*(bulk_pz(ni) + Z_n(nlz)*bulk_pz2(ni)) rho_at_Zn(ni) = rho_at_Zn(ni)*rhopot(ni)/(rho_at_Zn(ni)+0.1_WP*Z_n(nlz)*real(state_equation))-aux_dref @@ -2675,7 +2675,7 @@ subroutine pressure_force_4_zxxxx_easypgf(tracers, partit, mesh) call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end select rho_at_Zn(ni) = bulk_0(ni) + Z_n(nlz)*(bulk_pz(ni) + Z_n(nlz)*bulk_pz2(ni)) rho_at_Zn(ni) = rho_at_Zn(ni)*rhopot(ni)/(rho_at_Zn(ni)+0.1_WP*Z_n(nlz)*real(state_equation))-aux_dref @@ -2692,7 +2692,7 @@ subroutine pressure_force_4_zxxxx_easypgf(tracers, partit, mesh) write(*,*) ' idx = ', idx write(*,*) ' nle = ', nle write(*,*) ' nln = ', nlevels_nod2D(elnodes)-1 - call par_ex(partit, 0) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 0) end if end do !_______________________________________________________________________ diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 699a85c67..ed5145ec2 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -1265,7 +1265,7 @@ FUNCTION bc_surface(n, id, sval, partit) if (partit%mype==0) write(*,*) 'invalid ID '//trim(id_string)//' specified in boundary conditions' if (partit%mype==0) write(*,*) 'the model will stop!' end if - call par_ex(partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop END SELECT RETURN diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index 1ca0d1d93..98c730732 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -119,7 +119,7 @@ subroutine compute_vel_rhs(partit, mesh) ! advection if (mom_adv==1) then if (mype==0) write(*,*) 'in moment not adapted mom_adv advection typ for ALE, check your namelist' - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) elseif (mom_adv==2) then call momentum_adv_scalar(partit, mesh) end if diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 355ab9734..36b9f6d04 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -271,7 +271,7 @@ subroutine viscosity_filter(option, partit, mesh) call visc_filt_dbcksc(partit, mesh) CASE DEFAULT if (partit%mype==0) write(*,*) 'mixing scheme with option ' , option, 'has not yet been implemented' - call par_ex(partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop END SELECT end subroutine viscosity_filter diff --git a/src/oce_mesh.F90 b/src/oce_mesh.F90 index e24343e6d..0bb876ae7 100755 --- a/src/oce_mesh.F90 +++ b/src/oce_mesh.F90 @@ -252,7 +252,7 @@ SUBROUTINE read_mesh(partit, mesh) if (error_status/=0) then write(*,*) n write(*,*) 'error: NPES does not coincide with that of the mesh' - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) STOP end if ! broadcasting partitioning vector to the other procs @@ -313,7 +313,7 @@ SUBROUTINE read_mesh(partit, mesh) if (error_status/=0) then write(*,*) n write(*,*) 'error: nod2D/=part(npes+1)-1' - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) STOP end if @@ -412,7 +412,7 @@ SUBROUTINE read_mesh(partit, mesh) write(*,*) '____________________________________________________________________' print *, achar(27)//'[0m' write(*,*) - call par_ex(partit, 0) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 0) !___________________________________________________________________________ ! check if rotation needs to be applied to an unrotated mesh elseif ((mype==0) .and. (.not. force_rotation) .and. (flag_checkmustrot==1) .and. (.not. toy_ocean)) then @@ -433,7 +433,7 @@ SUBROUTINE read_mesh(partit, mesh) write(*,*) '____________________________________________________________________' print *, achar(27)//'[0m' write(*,*) - call par_ex(partit, 0) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 0) end if @@ -525,7 +525,7 @@ SUBROUTINE read_mesh(partit, mesh) call MPI_BCast(mesh%nl, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) if (mesh%nl < 3) then write(*,*) '!!!Number of levels is less than 3, model will stop!!!' - call par_ex(partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop end if allocate(mesh%zbar(mesh%nl)) ! allocate the array for storing the standard depths @@ -596,7 +596,7 @@ SUBROUTINE read_mesh(partit, mesh) write(*,*) '____________________________________________________________________' print *, achar(27)//'[0m' write(*,*) - call par_ex(partit, 0) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 0) end if ! ============================== @@ -997,7 +997,7 @@ subroutine find_levels_cavity(partit, mesh) write(*,*) '____________________________________________________________________' print *, achar(27)//'[0m' write(*,*) - call par_ex(partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) end if end if @@ -1085,7 +1085,7 @@ subroutine find_levels_cavity(partit, mesh) write(*,*) '____________________________________________________________________' print *, achar(27)//'[0m' write(*,*) - call par_ex(partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) end if end if @@ -1244,7 +1244,7 @@ subroutine find_levels_cavity(partit, mesh) write(*,*) '____________________________________________________________________' print *, achar(27)//'[0m' write(*,*) - call par_ex(partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) end if end if @@ -1818,7 +1818,7 @@ SUBROUTINE find_neighbors(partit, mesh) END DO if (elem1<2) then write(*,*) 'Insufficient number of neighbors ', myList_elem2D(elem) - call par_ex(partit, 1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) STOP end if END DO @@ -2562,7 +2562,7 @@ SUBROUTINE check_mesh_consistency(partit, mesh) write(*,*) '***end level area_test***' end if -!call par_ex(partit) +!call par_ex(partit%MPI_COMM_FESOM, partit%mype) !stop END SUBROUTINE check_mesh_consistency ! diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 171fec405..2842b69f0 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -118,7 +118,7 @@ subroutine ocean_setup(tracers, partit, mesh) case ('cvmix_TKE+cvmix_IDEMIX') ; mix_scheme_nmb = 56 case default stop "!not existing mixing scheme!" - call par_ex(partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) end select ! initialise fesom1.4 like KPP @@ -252,7 +252,7 @@ SUBROUTINE tracer_init(tracers, partit, mesh) if (mype==0) WRITE(*,*) ' file : ', 'namelist.tra',' open ok' else if (mype==0) WRITE(*,*) 'ERROR: --> bad opening file : ', 'namelist.tra',' ; iostat=',iost - call par_ex(partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop end if @@ -726,7 +726,7 @@ SUBROUTINE oce_initial_state(tracers, partit, mesh) if (mype==0) write(*,*) 'invalid ID '//trim(id_string)//' specified for '//trim(i_string)//' th tracer!!!' if (mype==0) write(*,*) 'the model will stop!' end if - call par_ex(partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop END SELECT END DO diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index 7c158c281..ac4d1d73f 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -531,6 +531,6 @@ subroutine check_blowup(istep, tracers, partit, mesh) end if call blowup(istep, tracers, partit, mesh) if (mype==0) write(*,*) ' --> finished writing blow up file' - call par_ex(partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) endif end subroutine From 02974a565264a7b353febcb8fc3e69eb93a96405 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 11 Oct 2021 17:14:27 +0200 Subject: [PATCH 398/909] init_io_gather was called twice in the output subroutine --- src/io_meandata.F90 | 1 - 1 file changed, 1 deletion(-) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index e70b48982..59c6a6fbf 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -830,7 +830,6 @@ subroutine output(istep, tracers, partit, mesh) ctime=timeold+(dayold-1.)*86400 if (lfirst) then call ini_mean_io(tracers, partit, mesh) - call init_io_gather(partit) #if defined (__icepack) call init_io_icepack(mesh) !icapack has its copy of p_partit => partit #endif From 92e068f12abad2edcad8627c670d24db8a94751b Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 11 Oct 2021 20:50:24 +0200 Subject: [PATCH 399/909] partit was forgotten when calling EVP rheology --- src/icepack_drivers/icedrv_step.F90 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/icepack_drivers/icedrv_step.F90 b/src/icepack_drivers/icedrv_step.F90 index b97f1e917..38937781e 100644 --- a/src/icepack_drivers/icedrv_step.F90 +++ b/src/icepack_drivers/icedrv_step.F90 @@ -1239,11 +1239,11 @@ module subroutine step_icepack(mesh, time_evp, time_advec, time_therm) select case (whichEVP) case (0) - call EVPdynamics(mesh) + call EVPdynamics (p_partit, mesh) case (1) - call EVPdynamics_m(mesh) + call EVPdynamics_m(p_partit, mesh) case (2) - call EVPdynamics_a(mesh) + call EVPdynamics_a(p_partit, mesh) case default if (mype==0) write(*,*) 'A non existing EVP scheme specified!' call par_ex(p_partit%MPI_COMM_FESOM, p_partit%mype) From 156c4518ce47086aab876e9e56716b06744ac228 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Tue, 12 Oct 2021 10:00:08 +0200 Subject: [PATCH 400/909] no need to use g_config in src/gen_modules_partitioning.F90 --- src/gen_modules_partitioning.F90 | 1 - 1 file changed, 1 deletion(-) diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index d99a1e667..1c74cc724 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -509,7 +509,6 @@ subroutine init_gatherLists(partit) end subroutine init_gatherLists !=================================================================== subroutine status_check(partit) -use g_config USE MOD_PARTIT USE MOD_PARSUP implicit none From a934996d08dc6e64aac508e3685b6bedcbe3065e Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 1 Nov 2021 14:41:34 +0100 Subject: [PATCH 401/909] be able to write transposed 3D output files via preprocessor definition (as in 3bcd313, but intentionally without a switch to turn it off) --- src/io_meandata.F90 | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 59c6a6fbf..75395e0a6 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -637,8 +637,7 @@ subroutine create_new_file(entry, partit, mesh) call assert_nf( nf_put_att_text(entry%ncid, entry%tID, 'axis', len_trim('T'), trim('T')), __LINE__) call assert_nf( nf_put_att_text(entry%ncid, entry%tID, 'stored_direction', len_trim('increasing'), trim('increasing')), __LINE__) - call assert_nf( nf_def_var(entry%ncid, trim(entry%name), entry%data_strategy%netcdf_type(), entry%ndim+1, & - (/entry%dimid(1:entry%ndim), entry%recID/), entry%varID), __LINE__) + call assert_nf( nf_def_var(entry%ncid, trim(entry%name), entry%data_strategy%netcdf_type(), entry%ndim+1, (/entry%dimid(entry%ndim:1:-1), entry%recID/), entry%varID), __LINE__) !CHUNKING stuff (netcdf libraries not always compited with it) !if (entry%ndim==2) then ! call assert_nf( nf_def_var_chunking(entry%ncid, entry%varID, NF_CHUNKED, (/1, entry%glsize(1)/)), __LINE__); @@ -748,7 +747,7 @@ subroutine write_mean(entry, entry_index) if (entry%ndim==1) then call assert_nf( nf_put_vara_double(entry%ncid, entry%varID, (/1, entry%rec_count/), (/size2, 1/), entry%aux_r8, 1), __LINE__) elseif (entry%ndim==2) then - call assert_nf( nf_put_vara_double(entry%ncid, entry%varID, (/lev, 1, entry%rec_count/), (/1, size2, 1/), entry%aux_r8, 1), __LINE__) + call assert_nf( nf_put_vara_double(entry%ncid, entry%varID, (/1, lev, entry%rec_count/), (/size2, 1, 1/), entry%aux_r8, 1), __LINE__) end if end if end do @@ -768,7 +767,7 @@ subroutine write_mean(entry, entry_index) if (entry%ndim==1) then call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/1, entry%rec_count/), (/size2, 1/), entry%aux_r4, 1), __LINE__) elseif (entry%ndim==2) then - call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/lev, 1, entry%rec_count/), (/1, size2, 1/), entry%aux_r4, 1), __LINE__) + call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/1, lev, entry%rec_count/), (/size2, 1, 1/), entry%aux_r4, 1), __LINE__) end if end if end do From 5dfc060859cdd103977f5e95fe4cac5ef97e92d3 Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 1 Nov 2021 22:10:20 +0100 Subject: [PATCH 402/909] add module file for derived type of dynamics --- src/MOD_DYN.F90 | 176 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 176 insertions(+) create mode 100644 src/MOD_DYN.F90 diff --git a/src/MOD_DYN.F90 b/src/MOD_DYN.F90 new file mode 100644 index 000000000..920a77b4c --- /dev/null +++ b/src/MOD_DYN.F90 @@ -0,0 +1,176 @@ +!========================================================== +MODULE MOD_DYN +USE O_PARAM +USE, intrinsic :: ISO_FORTRAN_ENV +USE MOD_WRITE_BINARY_ARRAYS +USE MOD_READ_BINARY_ARRAYS +IMPLICIT NONE +SAVE + +! +! +!_______________________________________________________________________________ +TYPE T_solverinfo + integer :: maxiter=2000 + integer :: restarts=15 + integer :: fillin=3 + integer :: lutype=2 + integer :: nrhs=1 + real(kind=WP) :: droptol=1.e-7 + real(kind=WP) :: soltol =1e-10 !1.e-10 + logical :: lfirst=.true. +END TYPE T_solverinfo + +! +! +!_______________________________________________________________________________ +! set main structure for dynamicss, contains viscosity options and parameters + +! option for momentum advection +TYPE T_DYN + ! instant zonal merdional velocity & Adams-Bashfort rhs + real(kind=WP), allocatable, dimension(:,:,:):: uv, uv_rhs, uv_rhsAB + + ! instant vertical velm explicite+implicite part + real(kind=WP), allocatable, dimension(:,:) :: w, w_e, w_i, cfl_z + + real(kind=WP), allocatable, dimension(:,:,:):: uvnode, uvnode_rhs + + real(kind=WP), allocatable, dimension(:) :: eta_n, d_eta, ssh_rhs, ssh_rhs_old + + ! summarizes solver input parameter + type(t_solverinfo) :: solverinfo + + + ! visc_option=... + ! 1=Harmonic Leith parameterization; + ! 2=Laplacian+Leith+biharmonic background + ! 3=Biharmonic Leith parameterization + ! 4=Biharmonic flow aware + ! 5=Kinematic (easy) Backscatter + ! 6=Biharmonic flow aware (viscosity depends on velocity Laplacian) + ! 7=Biharmonic flow aware (viscosity depends on velocity differences) + ! 8=Dynamic Backscatter + integer :: visc_opt = 5 + + ! gamma0 [m/s], backgroung viscosity= gamma0*len, it should be as small + ! as possible (keep it < 0.01 m/s). + ! gamma1 [nodim], for computation of the flow aware viscosity + ! gamma2 [s/m], is only used in easy backscatter option + real(kind=WP) :: gamma0_visc = 0.03 + real(kind=WP) :: gamma1_visc = 0.1 + real(kind=WP) :: gamma2_visc = 0.285 + + ! div_c the strength of the modified Leith viscosity, nondimensional, 0.3 -- 1.0 + ! leith the strength of the Leith viscosity + real(kind=WP) :: div_c_visc = 0.5 + real(kind=WP) :: leith_c_visc = 0.05 + + ! coefficient for returned sub-gridscale energy, to be used with visc_option=5 + ! (easy backscatter) + real(kind=WP) :: easy_bs_return= 1.5 + + logical :: use_ivertvisc = .true. + integer :: momadv_opt = 2 + + ! Switch on free slip + logical :: use_freeslip = .false. + + ! do implicite, explicite spliting of vertical velocity + logical :: use_wsplit = .false. + ! maximum allowed CFL criteria in vertical (0.5 < w_max_cfl < 1.) + ! in older FESOM it used to be w_exp_max=1.e-3 + real(kind=WP) :: wsplit_maxcfl= 1.0 + + !___________________________________________________________________________ + contains + procedure WRITE_T_DYN + procedure READ_T_DYN + generic :: write(unformatted) => WRITE_T_DYN + generic :: read(unformatted) => READ_T_DYN +END TYPE T_DYN + +contains + +! +! +!_______________________________________________________________________________ +! set unformatted writing and reading for T_DYN +subroutine WRITE_T_DYN(dynamics, unit, iostat, iomsg) + IMPLICIT NONE + class(T_DYN), intent(in) :: dynamics + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + !___________________________________________________________________________ + call write_bin_array(dynamics%uv , unit, iostat, iomsg) + call write_bin_array(dynamics%uv_rhs , unit, iostat, iomsg) + call write_bin_array(dynamics%uv_rhsAB , unit, iostat, iomsg) + call write_bin_array(dynamics%uvnode , unit, iostat, iomsg) + call write_bin_array(dynamics%uvnode_rhs, unit, iostat, iomsg) + + call write_bin_array(dynamics%w , unit, iostat, iomsg) + call write_bin_array(dynamics%w_e , unit, iostat, iomsg) + call write_bin_array(dynamics%w_i , unit, iostat, iomsg) + + call write_bin_array(dynamics%cfl_z , unit, iostat, iomsg) + + !___________________________________________________________________________ + write(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_opt + write(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma0_visc + write(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma1_visc + write(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma2_visc + write(unit, iostat=iostat, iomsg=iomsg) dynamics%div_c_visc + write(unit, iostat=iostat, iomsg=iomsg) dynamics%leith_c_visc + + !___________________________________________________________________________ + write(unit, iostat=iostat, iomsg=iomsg) dynamics%use_ivertvisc + write(unit, iostat=iostat, iomsg=iomsg) dynamics%momadv_opt + + !___________________________________________________________________________ + write(unit, iostat=iostat, iomsg=iomsg) dynamics%use_freeslip + write(unit, iostat=iostat, iomsg=iomsg) dynamics%use_wsplit + write(unit, iostat=iostat, iomsg=iomsg) dynamics%wsplit_maxcfl + +end subroutine WRITE_T_DYN + +subroutine READ_T_DYN(dynamics, unit, iostat, iomsg) + IMPLICIT NONE + class(T_DYN), intent(inout) :: dynamics + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + !___________________________________________________________________________ + call read_bin_array(dynamics%uv , unit, iostat, iomsg) + call read_bin_array(dynamics%uv_rhs , unit, iostat, iomsg) + call read_bin_array(dynamics%uv_rhsAB , unit, iostat, iomsg) + call read_bin_array(dynamics%uvnode , unit, iostat, iomsg) + call read_bin_array(dynamics%uvnode_rhs, unit, iostat, iomsg) + + call read_bin_array(dynamics%w , unit, iostat, iomsg) + call read_bin_array(dynamics%w_e , unit, iostat, iomsg) + call read_bin_array(dynamics%w_i , unit, iostat, iomsg) + + call read_bin_array(dynamics%cfl_z , unit, iostat, iomsg) + + !___________________________________________________________________________ + read(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_opt + read(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma0_visc + read(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma1_visc + read(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma2_visc + read(unit, iostat=iostat, iomsg=iomsg) dynamics%div_c_visc + read(unit, iostat=iostat, iomsg=iomsg) dynamics%leith_c_visc + + !___________________________________________________________________________ + read(unit, iostat=iostat, iomsg=iomsg) dynamics%use_ivertvisc + read(unit, iostat=iostat, iomsg=iomsg) dynamics%momadv_opt + + !___________________________________________________________________________ + read(unit, iostat=iostat, iomsg=iomsg) dynamics%use_freeslip + read(unit, iostat=iostat, iomsg=iomsg) dynamics%use_wsplit + read(unit, iostat=iostat, iomsg=iomsg) dynamics%wsplit_maxcfl + +end subroutine READ_T_DYN + +END MODULE MOD_DYN \ No newline at end of file From a885f156f80562f5f1999e5cc1fbfd257fc22738 Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 1 Nov 2021 22:10:57 +0100 Subject: [PATCH 403/909] add initialisation for dynamics derived type --- src/oce_setup_step.F90 | 138 ++++++++++++++++++++++++++++++++++++++--- 1 file changed, 128 insertions(+), 10 deletions(-) diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 2842b69f0..6469eb99f 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -11,6 +11,7 @@ subroutine oce_initial_state(tracers, partit, mesh) end subroutine end interface end module + module tracer_init_interface interface subroutine tracer_init(tracers, partit, mesh) @@ -24,6 +25,21 @@ subroutine tracer_init(tracers, partit, mesh) end subroutine end interface end module + +module dynamics_init_interface + interface + subroutine dynamics_init(dynamics, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_DYN + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_dyn) , intent(inout), target :: dynamics + end subroutine + end interface +end module + module ocean_setup_interface interface subroutine ocean_setup(tracers, partit, mesh) @@ -37,6 +53,7 @@ subroutine ocean_setup(tracers, partit, mesh) end subroutine end interface end module + module before_oce_step_interface interface subroutine before_oce_step(tracers, partit, mesh) @@ -51,11 +68,12 @@ subroutine before_oce_step(tracers, partit, mesh) end interface end module !_______________________________________________________________________________ -subroutine ocean_setup(tracers, partit, mesh) +subroutine ocean_setup(dynamics, tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP USE MOD_TRACER +USE MOD_DYN USE o_PARAM USE o_ARRAYS USE g_config @@ -69,9 +87,10 @@ subroutine ocean_setup(tracers, partit, mesh) use oce_initial_state_interface use oce_adv_tra_fct_interfaces IMPLICIT NONE -type(t_mesh), intent(inout), target :: mesh -type(t_partit), intent(inout), target :: partit +type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracers +type(t_partit), intent(inout), target :: partit +type(t_mesh) , intent(inout), target :: mesh integer :: n !___setup virt_salt_flux____________________________________________________ ! if the ale thinkness remain unchanged (like in 'linfs' case) the vitrual @@ -181,9 +200,9 @@ subroutine ocean_setup(tracers, partit, mesh) SELECT CASE (TRIM(which_toy)) CASE ("soufflet") !forcing update for soufflet testcase if (mod(mstep, soufflet_forc_update)==0) then - call initial_state_soufflet(tracers, partit, mesh) + call initial_state_soufflet(dynamics, tracers, partit, mesh) call compute_zonal_mean_ini(partit, mesh) - call compute_zonal_mean(tracers, partit, mesh) + call compute_zonal_mean(dynamics, tracers, partit, mesh) end if END SELECT else @@ -313,6 +332,103 @@ END SUBROUTINE tracer_init ! ! !_______________________________________________________________________________ +SUBROUTINE dynamics_init(dynamics, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + IMPLICIT NONE + integer :: elem_size, node_size + integer, save :: nm_unit = 104 ! unit to open namelist file, skip 100-102 for cray + integer :: iost + + integer :: visc_opt + real(kind=WP) :: gamma0_visc, gamma1_visc, gamma2_visc + real(kind=WP) :: div_c_visc, leith_c_visc, easybackscat_return + logical :: use_ivertvisc + integer :: momadv_opt + logical :: use_freeslip + logical :: use_wsplit + real(kind=WP) :: wsplit_maxcfl + + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_dyn) , intent(inout), target :: dynamics + + ! define dynamics namelist parameter + namelist /dynamics_visc / visc_opt, gamma0_visc, gamma1_visc, gamma2_visc, & + div_c_visc, leith_c_visc, use_ivertvisc, easy_bs_return + namelist /dynamics_general / momadv_opt, use_freeslip, use_wsplit, wsplit_maxcfl + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + ! open and read namelist for I/O + open(unit=nm_unit, file='namelist.dyn', form='formatted', access='sequential', status='old', iostat=iost ) + if (iost == 0) then + if (mype==0) write(*,*) ' file : ', 'namelist.dyn',' open ok' + else + if (mype==0) write(*,*) 'ERROR: --> bad opening file : ', 'namelist.dyn',' ; iostat=',iost + call par_ex(partit%MPI_COMM_FESOM, partit%mype) + stop + end if + read(nm_unit, nml=dynamics_visc , iostat=iost) + read(nm_unit, nml=dynamics_general, iostat=iost) + close(nm_unit) + + ! define local vertice & elem array size + elem_size=myDim_elem2D+eDim_elem2D + node_size=myDim_nod2D+eDim_nod2D + + ! allocate data arrays in derived type + allocate(dynamics%uv( 2, nl-1, elem_size)) + allocate(dynamics%uv_rhs( 2, nl-1, elem_size)) + allocate(dynamics%uv_rhsAB( 2, nl-1, elem_size)) + allocate(dynamics%uvnode( 2, nl-1, node_size)) + allocate(dynamics%uvnode_rhs(2, nl-1, node_size)) + dynamics%uv = 0.0_WP + dynamics%uv_rhs = 0.0_WP + dynamics%uv_rhsAB = 0.0_WP + dynamics%uvnode = 0.0_WP + dynamics%uvnode_rhs = 0.0_WP + + allocate(dynamics%w( nl, node_size)) + allocate(dynamics%w_e( nl, node_size)) + allocate(dynamics%w_i( nl, node_size)) + allocate(dynamics%cfl_z( nl, node_size)) + dynamics%w = 0.0_WP + dynamics%w_e = 0.0_WP + dynamics%w_i = 0.0_WP + dynamics%cfl_z = 0.0_WP + + allocate(dynamics%eta_n( node_size)) + allocate(dynamics%d_eta( node_size)) + allocate(dynamics%ssh_rhs( node_size)) + allocate(dynamics%ssh_rhs_old(node_size)) + dynamics%eta_n = 0.0_WP + dynamics%d_eta = 0.0_WP + dynamics%ssh_rhs = 0.0_WP + dynamics%ssh_rhs_old= 0.0_WP + + ! set parameters in derived type + dynamics%visc_opt = visc_opt + dynamics%gamma0_visc = gamma0_visc + dynamics%gamma1_visc = gamma1_visc + dynamics%gamma2_visc = gamma2_visc + dynamics%div_c_visc = div_c_visc + dynamics%leith_c_visc = leith_c_visc + dynamics%use_ivertvisc = use_ivertvisc + dynamics%momadv_opt = momadv_opt + dynamics%use_freeslip = use_freeslip + dynamics%use_wsplit = use_wsplit + dynamics%wsplit_maxcfl = wsplit_maxcfl + +END SUBROUTINE dynamics_init +! +! +!_______________________________________________________________________________ SUBROUTINE arrays_init(num_tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT @@ -345,7 +461,7 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) ! Velocities ! ================ !allocate(stress_diag(2, elem_size))!delete me -allocate(UV(2, nl-1, elem_size)) +!!PS allocate(UV(2, nl-1, elem_size)) allocate(UV_rhs(2,nl-1, elem_size)) allocate(UV_rhsAB(2,nl-1, elem_size)) allocate(Visc(nl-1, elem_size)) @@ -494,7 +610,7 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) ! Initialize with zeros ! ================= - UV=0.0_WP +!!PS UV=0.0_WP UV_rhs=0.0_WP UV_rhsAB=0.0_WP ! @@ -735,20 +851,22 @@ end subroutine oce_initial_state ! !========================================================================== ! Here we do things (if applicable) before the ocean timestep will be made -SUBROUTINE before_oce_step(tracers, partit, mesh) +SUBROUTINE before_oce_step(dynamics, tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP USE MOD_TRACER + use MOD_DYN USE o_ARRAYS USE g_config USE Toy_Channel_Soufflet implicit none integer :: i, k, counter, rcounter3, id character(len=10) :: i_string, id_string - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers + type(t_dyn) , intent(inout), target :: dynamics #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -759,7 +877,7 @@ SUBROUTINE before_oce_step(tracers, partit, mesh) SELECT CASE (TRIM(which_toy)) CASE ("soufflet") !forcing update for soufflet testcase if (mod(mstep, soufflet_forc_update)==0) then - call compute_zonal_mean(tracers, partit, mesh) + call compute_zonal_mean(dynamics, tracers, partit, mesh) end if END SELECT end if From 268706f65528af54385ef5eb0a1c0b390775f618 Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 1 Nov 2021 22:12:20 +0100 Subject: [PATCH 404/909] exchange UV with dynamics derived type dynamics%uv --- src/cavity_param.F90 | 12 +- src/fvom_main.F90 | 26 ++-- src/gen_modules_diag.F90 | 50 ++++---- src/ice_oce_coupling.F90 | 41 ++++--- src/io_blowup.F90 | 33 ++--- src/io_meandata.F90 | 26 ++-- src/io_restart.F90 | 23 ++-- src/oce_ale.F90 | 70 +++++++---- src/oce_ale_tracer.F90 | 34 ++++-- src/oce_ale_vel_rhs.F90 | 31 +++-- src/oce_dyn.F90 | 226 +++++++++++++++++++++++------------ src/oce_modules.F90 | 2 +- src/oce_vel_rhs_vinv.F90 | 38 ++++-- src/toy_channel_soufflet.F90 | 31 +++-- src/write_step_info.F90 | 66 ++++++---- 15 files changed, 455 insertions(+), 254 deletions(-) diff --git a/src/cavity_param.F90 b/src/cavity_param.F90 index eb8591754..3c3236c39 100644 --- a/src/cavity_param.F90 +++ b/src/cavity_param.F90 @@ -382,25 +382,29 @@ end subroutine cavity_heat_water_fluxes_2eq !_______________________________________________________________________________ ! Compute the momentum fluxes under ice cavity ! Moved to this separated routine by Qiang, 20.1.2012 -subroutine cavity_momentum_fluxes(partit, mesh) +subroutine cavity_momentum_fluxes(dynamics, partit, mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN use o_PARAM , only: density_0, C_d, WP - use o_ARRAYS, only: UV, Unode, stress_surf, stress_node_surf + use o_ARRAYS, only: Unode, stress_surf, stress_node_surf use i_ARRAYS, only: u_w, v_w implicit none !___________________________________________________________________________ + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh integer :: elem, elnodes(3), nzmin, node real(kind=WP) :: aux - + real(kind=WP), dimension(:,:,:), pointer :: UV + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV=>dynamics%uv(:,:,:) !___________________________________________________________________________ do elem=1,myDim_elem2D diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index a9cb7142f..e9f4cb699 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -8,9 +8,10 @@ program main USE MOD_MESH -USE MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP +USE MOD_TRACER +USE MOD_DYN USE o_ARRAYS USE o_PARAM USE i_PARAM @@ -56,10 +57,10 @@ program main real(kind=real32) :: runtime_alltimesteps -type(t_mesh), target, save :: mesh -type(t_tracer), target, save :: tracers -type(t_partit), target, save :: partit - +type(t_mesh) , target, save :: mesh +type(t_partit), target, save :: partit +type(t_tracer), target, save :: tracers +type(t_dyn) , target, save :: dynamics character(LEN=256) :: dump_dir, dump_filename logical :: L_EXISTS @@ -122,6 +123,7 @@ program main if (mype==0) t2=MPI_Wtime() call tracer_init(tracers, partit, mesh) ! allocate array of ocean tracers (derived type "t_tracer") + call dynamics_init(dynamics, partit, mesh) ! allocate array of ocean dynamics (derived type "t_tracer") call arrays_init(tracers%num_tracers, partit, mesh) ! allocate other arrays (to be refactured same as tracers in the future) call ocean_setup(tracers, partit, mesh) @@ -139,7 +141,7 @@ program main if (mype==0) write(*,*) 'EVP scheme option=', whichEVP endif if (mype==0) t5=MPI_Wtime() - call compute_diagnostics(0, tracers, partit, mesh) ! allocate arrays for diagnostic + call compute_diagnostics(0, dynamics, tracers, partit, mesh) ! allocate arrays for diagnostic #if defined (__oasis) call cpl_oasis3mct_define_unstr(partit, mesh) if(mype==0) write(*,*) 'FESOM ----> cpl_oasis3mct_define_unstr nsend, nrecv:',nsend, nrecv @@ -164,7 +166,7 @@ program main ! if l_write is TRUE the restart will be forced ! if l_read the restart will be read ! as an example, for reading restart one does: call restart(0, .false., .false., .true., tracers, partit, mesh) - call restart(0, .false., r_restart, tracers, partit, mesh) ! istep, l_write, l_read + call restart(0, .false., r_restart, dynamics, tracers, partit, mesh) ! istep, l_write, l_read if (mype==0) t7=MPI_Wtime() ! store grid information into netcdf file if (.not. r_restart) call write_mesh_info(partit, mesh) @@ -272,7 +274,7 @@ program main if(use_ice) then !___compute fluxes from ocean to ice________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call ocean2ice(n)'//achar(27)//'[0m' - call ocean2ice(tracers, partit, mesh) + call ocean2ice(dynamics, tracers, partit, mesh) !___compute update of atmospheric forcing____________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call update_atm_forcing(n)'//achar(27)//'[0m' @@ -291,7 +293,7 @@ program main if (ice_update) call ice_timestep(n, partit, mesh) !___compute fluxes to the ocean: heat, freshwater, momentum_________ if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call oce_fluxes_mom...'//achar(27)//'[0m' - call oce_fluxes_mom(partit, mesh) ! momentum only + call oce_fluxes_mom(dynamics, partit, mesh) ! momentum only call oce_fluxes(tracers, partit, mesh) end if call before_oce_step(tracers, partit, mesh) ! prepare the things if required @@ -304,15 +306,15 @@ program main t3 = MPI_Wtime() !___compute energy diagnostics..._______________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call compute_diagnostics(1)'//achar(27)//'[0m' - call compute_diagnostics(1, tracers, partit, mesh) + call compute_diagnostics(1, dynamics, tracers, partit, mesh) t4 = MPI_Wtime() !___prepare output______________________________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call output (n)'//achar(27)//'[0m' - call output (n, tracers, partit, mesh) + call output (n, dynamics, tracers, partit, mesh) t5 = MPI_Wtime() - call restart(n, .false., .false., tracers, partit, mesh) + call restart(n, .false., .false., dynamics, tracers, partit, mesh) t6 = MPI_Wtime() rtime_fullice = rtime_fullice + t2 - t1 diff --git a/src/gen_modules_diag.F90 b/src/gen_modules_diag.F90 index 231345f2d..5a0e47f90 100755 --- a/src/gen_modules_diag.F90 +++ b/src/gen_modules_diag.F90 @@ -4,7 +4,8 @@ module diagnostics use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - use mod_tracer + use MOD_TRACER + use MOD_DYN use g_clock use g_comm_auto use o_ARRAYS @@ -149,19 +150,21 @@ subroutine diag_curl_stress_surf(mode, partit, mesh) end subroutine diag_curl_stress_surf ! ============================================================== !3D curl(velocity) -subroutine diag_curl_vel3(mode, partit, mesh) +subroutine diag_curl_vel3(mode, dynamics, partit, mesh) implicit none - type(t_mesh), intent(in), target :: mesh + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh integer, intent(in) :: mode logical, save :: firstcall=.true. integer :: enodes(2), el(2), ed, n, nz, nl1, nl2, nl12, nu1, nu2, nu12 real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2, c1 - + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" -#include "associate_mesh_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) !===================== if (firstcall) then !allocate the stuff at the first call @@ -229,21 +232,24 @@ subroutine diag_curl_vel3(mode, partit, mesh) end subroutine diag_curl_vel3 ! ============================================================== !energy budget -subroutine diag_energy(mode, partit, mesh) +subroutine diag_energy(mode, dynamics, partit, mesh) implicit none - type(t_mesh), intent(in), target :: mesh + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh integer, intent(in) :: mode logical, save :: firstcall=.true. integer :: n, nz, k, i, elem, nzmax, nzmin, elnodes(3) integer :: iup, ilo real(kind=WP) :: ux, vx, uy, vy, tvol, rval(2) real(kind=WP) :: geo_grad_x(3), geo_grad_y(3), geo_u(3), geo_v(3) - + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" -#include "associate_mesh_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) + !===================== if (firstcall) then !allocate the stuff at the first call allocate(wrhof(nl, myDim_nod2D), rhof(nl, myDim_nod2D)) @@ -401,12 +407,13 @@ subroutine diag_energy(mode, partit, mesh) END DO end subroutine diag_energy ! ============================================================== -subroutine diag_densMOC(mode, tracers, partit, mesh) +subroutine diag_densMOC(mode, dynamics, tracers, partit, mesh) implicit none integer, intent(in) :: mode - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in), target :: tracers + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in) , target :: tracers + type(t_dyn) , intent(in) , target :: dynamics integer :: nz, snz, elem, nzmax, nzmin, elnodes(3), is, ie, pos integer :: e, edge, enodes(2), eelems(2) real(kind=WP) :: div, deltaX, deltaY, locz @@ -417,10 +424,12 @@ subroutine diag_densMOC(mode, tracers, partit, mesh) real(kind=WP), save, allocatable :: std_dens_w(:,:), std_dens_VOL1(:,:), std_dens_VOL2(:,:) logical, save :: firstcall_s=.true., firstcall_e=.true. real(kind=WP), dimension(:,:), pointer :: temp, salt + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" -#include "associate_mesh_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) temp=>tracers%data(1)%values(:,:) salt=>tracers%data(2)%values(:,:) @@ -657,20 +666,21 @@ subroutine diag_densMOC(mode, tracers, partit, mesh) end subroutine diag_densMOC ! ============================================================== -subroutine compute_diagnostics(mode, tracers, partit, mesh) +subroutine compute_diagnostics(mode, dynamics, tracers, partit, mesh) implicit none - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in), target :: tracers + type(t_tracer), intent(inout), target :: tracers + type(t_dyn) , intent(inout), target :: dynamics integer, intent(in) :: mode !constructor mode (0=only allocation; any other=do diagnostic) real(kind=WP) :: val !1. solver diagnostic if (ldiag_solver) call diag_solver(mode, partit, mesh) !2. compute curl(stress_surf) if (lcurt_stress_surf) call diag_curl_stress_surf(mode, partit, mesh) !3. compute curl(velocity) - if (ldiag_curl_vel3) call diag_curl_vel3(mode, partit, mesh) + if (ldiag_curl_vel3) call diag_curl_vel3(mode, dynamics, partit, mesh) !4. compute energy budget - if (ldiag_energy) call diag_energy(mode, partit, mesh) + if (ldiag_energy) call diag_energy(mode, dynamics, partit, mesh) !5. print integrated temperature if (ldiag_salt3d) then if (mod(mstep,logfile_outfreq)==0) then @@ -681,7 +691,7 @@ subroutine compute_diagnostics(mode, tracers, partit, mesh) end if end if !6. MOC in density coordinate - if (ldiag_dMOC) call diag_densMOC(mode, tracers, partit, mesh) + if (ldiag_dMOC) call diag_densMOC(mode, dynamics, tracers, partit, mesh) end subroutine compute_diagnostics diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index 30dedc505..a0aa70ceb 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -1,13 +1,16 @@ module ocean2ice_interface interface - subroutine ocean2ice(tracers, partit, mesh) + subroutine ocean2ice(dynamics, tracers, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP use mod_tracer + use MOD_DYN + type(t_dyn) , intent(in) , target :: dynamics + type(t_tracer), intent(in) , target :: tracers type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(inout), target :: tracers + type(t_mesh) , intent(in) , target :: mesh + end subroutine end interface end module @@ -20,8 +23,8 @@ subroutine oce_fluxes(tracers, partit, mesh) USE MOD_PARSUP use mod_tracer type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(inout), target :: tracers + type(t_mesh) , intent(in) , target :: mesh + type(t_tracer), intent(in) , target :: tracers end subroutine end interface end module @@ -29,7 +32,7 @@ subroutine oce_fluxes(tracers, partit, mesh) ! ! !_______________________________________________________________________________ -subroutine oce_fluxes_mom(partit, mesh) +subroutine oce_fluxes_mom(dynamics, partit, mesh) ! transmits the relevant fields from the ice to the ocean model ! use o_PARAM @@ -37,6 +40,7 @@ subroutine oce_fluxes_mom(partit, mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN use i_ARRAYS use i_PARAM USE g_CONFIG @@ -50,8 +54,9 @@ subroutine oce_fluxes_mom(partit, mesh) integer :: n, elem, elnodes(3),n1 real(kind=WP) :: aux, aux1 + type(t_dyn) , intent(in) , target :: dynamics type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -105,39 +110,43 @@ subroutine oce_fluxes_mom(partit, mesh) END DO !___________________________________________________________________________ - if (use_cavity) call cavity_momentum_fluxes(partit, mesh) + if (use_cavity) call cavity_momentum_fluxes(dynamics, partit, mesh) end subroutine oce_fluxes_mom ! ! !_______________________________________________________________________________ -subroutine ocean2ice(tracers, partit, mesh) +subroutine ocean2ice(dynamics, tracers, partit, mesh) ! transmits the relevant fields from the ocean to the ice model use o_PARAM - use o_ARRAYS use i_ARRAYS use MOD_MESH + use MOD_DYN use MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP USE g_CONFIG use g_comm_auto implicit none - + type(t_dyn) , intent(in) , target :: dynamics + type(t_tracer), intent(in) , target :: tracers type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers + type(t_mesh) , intent(in) , target :: mesh + + integer :: n, elem, k real(kind=WP) :: uw, vw, vol - real(kind=WP), dimension(:,:), pointer :: temp, salt + real(kind=WP), dimension(:,:) , pointer :: temp, salt + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - temp=>tracers%data(1)%values(:,:) - salt=>tracers%data(2)%values(:,:) + temp => tracers%data(1)%values(:,:) + salt => tracers%data(2)%values(:,:) + UV => dynamics%uv(:,:,:) ! the arrays in the ice model are renamed diff --git a/src/io_blowup.F90 b/src/io_blowup.F90 index a4bbae11d..9ad2146c8 100644 --- a/src/io_blowup.F90 +++ b/src/io_blowup.F90 @@ -2,10 +2,11 @@ MODULE io_BLOWUP use g_config use g_clock use g_comm_auto - USE MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_TRACER + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_TRACER + USE MOD_DYN use o_arrays use i_arrays implicit none @@ -64,12 +65,13 @@ MODULE io_BLOWUP !_______________________________________________________________________________ ! ini_ocean_io initializes bid datatype which contains information of all variables need to be written into ! the ocean restart file. This is the only place need to be modified if a new variable is added! - subroutine ini_blowup_io(year, tracers, partit, mesh) + subroutine ini_blowup_io(year, dynamics, tracers, partit, mesh) implicit none integer, intent(in) :: year - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in), target :: tracers + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in) , target :: tracers + type(t_dyn) , intent(in) , target :: dynamics integer :: ncid, j integer :: varid character(500) :: longname @@ -108,8 +110,8 @@ subroutine ini_blowup_io(year, tracers, partit, mesh) !___Define the netCDF variables for 3D fields_______________________________ call def_variable(bid, 'hnode' , (/nl-1, nod2D/) , 'ALE stuff', '?', hnode); call def_variable(bid, 'helem' , (/nl-1, elem2D/) , 'Element layer thickness', 'm/s', helem(:,:)); - call def_variable(bid, 'u' , (/nl-1, elem2D/) , 'zonal velocity', 'm/s', UV(1,:,:)); - call def_variable(bid, 'v' , (/nl-1, elem2D/) , 'meridional velocity', 'm/s', UV(2,:,:)); + call def_variable(bid, 'u' , (/nl-1, elem2D/) , 'zonal velocity', 'm/s', dynamics%uv(1,:,:)); + call def_variable(bid, 'v' , (/nl-1, elem2D/) , 'meridional velocity', 'm/s', dynamics%uv(2,:,:)); call def_variable(bid, 'u_rhs' , (/nl-1, elem2D/) , 'zonal velocity', 'm/s', UV_rhs(1,:,:)); call def_variable(bid, 'v_rhs' , (/nl-1, elem2D/) , 'meridional velocity', 'm/s', UV_rhs(2,:,:)); call def_variable(bid, 'urhs_AB' , (/nl-1, elem2D/) , 'Adams–Bashforth for u', 'm/s', UV_rhsAB(1,:,:)); @@ -173,15 +175,16 @@ end subroutine ini_blowup_io ! ! !_______________________________________________________________________________ - subroutine blowup(istep, tracers, partit, mesh) + subroutine blowup(istep, dynamics, tracers, partit, mesh) implicit none - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in), target :: tracers + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in) , target :: tracers + type(t_dyn) , intent(in) , target :: dynamics integer :: istep ctime=timeold+(dayold-1.)*86400 - call ini_blowup_io(yearnew, tracers, partit, mesh) + call ini_blowup_io(yearnew, dynamics, tracers, partit, mesh) if(partit%mype==0) write(*,*)'Do output (netCDF, blowup) ...' if(partit%mype==0) write(*,*)' --> call assoc_ids(bid)' call assoc_ids(bid, partit) ; call was_error(bid, partit) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 59c6a6fbf..969bbb69a 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -93,11 +93,12 @@ subroutine destructor(this) end subroutine -subroutine ini_mean_io(tracers, partit, mesh) +subroutine ini_mean_io(dynamics, tracers, partit, mesh) use MOD_MESH use MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN use g_cvmix_tke use g_cvmix_idemix use g_cvmix_kpp @@ -111,9 +112,10 @@ subroutine ini_mean_io(tracers, partit, mesh) integer,dimension(15) :: sel_forcvar=0 character(len=10) :: id_string - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in), target :: tracers + type(t_tracer), intent(in) , target :: tracers + type(t_dyn) , intent(in) , target :: dynamics namelist /nml_listsize/ io_listsize namelist /nml_list / io_list @@ -315,9 +317,9 @@ subroutine ini_mean_io(tracers, partit, mesh) CASE ('Kv ') call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'Kv', 'vertical diffusivity Kv', 'm2/s', Kv(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('u ') - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u', 'horizontal velocity','m/s', uv(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u', 'horizontal velocity','m/s', dynamics.uv(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('v ') - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v', 'meridional velocity','m/s', uv(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v', 'meridional velocity','m/s', dynamics.uv(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('w ') call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'w', 'vertical velocity', 'm/s', Wvel(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('Av ') @@ -429,8 +431,8 @@ subroutine ini_mean_io(tracers, partit, mesh) call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'unod', 'horizontal velocity at nodes', 'm/s', Unode(1,:,:), 1, 'm', i_real8, partit, mesh) call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'vnod', 'meridional velocity at nodes', 'm/s', Unode(2,:,:), 1, 'm', i_real8, partit, mesh) - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'um', 'horizontal velocity', 'm/s', uv(1,:,:), 1, 'm', i_real4, partit, mesh) - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'vm', 'meridional velocity', 'm/s', uv(2,:,:), 1, 'm', i_real4, partit, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'um', 'horizontal velocity', 'm/s', dynamics%uv(1,:,:), 1, 'm', i_real4, partit, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'vm', 'meridional velocity', 'm/s', dynamics%uv(2,:,:), 1, 'm', i_real4, partit, mesh) call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'wm', 'vertical velocity', 'm/s', Wvel(:,:), 1, 'm', i_real8, partit, mesh) call def_stream(elem2D, myDim_elem2D, 'utau_surf', '(u, tau) at the surface', 'N/(m s)', utau_surf(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) @@ -805,11 +807,12 @@ subroutine update_means ! !-------------------------------------------------------------------------------------------- ! -subroutine output(istep, tracers, partit, mesh) +subroutine output(istep, dynamics, tracers, partit, mesh) use g_clock use mod_mesh USE MOD_PARTIT USE MOD_PARSUP + use MOD_DYN use mod_tracer use io_gather_module #if defined (__icepack) @@ -821,15 +824,16 @@ subroutine output(istep, tracers, partit, mesh) integer :: n, k logical :: do_output type(Meandata), pointer :: entry - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in), target :: tracers + type(t_tracer), intent(in) , target :: tracers + type(t_dyn) , intent(in) , target :: dynamics character(:), allocatable :: filepath real(real64) :: rtime !timestamp of the record ctime=timeold+(dayold-1.)*86400 if (lfirst) then - call ini_mean_io(tracers, partit, mesh) + call ini_mean_io(dynamics, tracers, partit, mesh) #if defined (__icepack) call init_io_icepack(mesh) !icapack has its copy of p_partit => partit #endif diff --git a/src/io_restart.F90 b/src/io_restart.F90 index a9d2aac22..4d8a4d722 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -6,6 +6,7 @@ MODULE io_RESTART USE MOD_PARTIT USE MOD_PARSUP use mod_tracer + use MOD_DYN use o_arrays use i_arrays use g_cvmix_tke @@ -79,7 +80,7 @@ MODULE io_RESTART !-------------------------------------------------------------------------------------------- ! ini_ocean_io initializes oid datatype which contains information of all variables need to be written into ! the ocean restart file. This is the only place need to be modified if a new variable is added! -subroutine ini_ocean_io(year, tracers, partit, mesh) +subroutine ini_ocean_io(year, dynamics, tracers, partit, mesh) implicit none integer, intent(in) :: year @@ -89,9 +90,10 @@ subroutine ini_ocean_io(year, tracers, partit, mesh) character(500) :: filename character(500) :: trname, units character(4) :: cyear - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in), target :: tracers + type(t_tracer), intent(in) , target :: tracers + type(t_dyn) , intent(in) , target :: dynamics #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -120,8 +122,8 @@ subroutine ini_ocean_io(year, tracers, partit, mesh) call def_variable(oid, 'hnode', (/nl-1, nod2D/), 'nodal layer thickness', 'm', hnode); !___Define the netCDF variables for 3D fields_______________________________ - call def_variable(oid, 'u', (/nl-1, elem2D/), 'zonal velocity', 'm/s', UV(1,:,:)); - call def_variable(oid, 'v', (/nl-1, elem2D/), 'meridional velocity', 'm/s', UV(2,:,:)); + call def_variable(oid, 'u', (/nl-1, elem2D/), 'zonal velocity', 'm/s', dynamics.uv(1,:,:)); + call def_variable(oid, 'v', (/nl-1, elem2D/), 'meridional velocity', 'm/s', dynamics.uv(2,:,:)); call def_variable(oid, 'urhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for u', 'm/s', UV_rhsAB(1,:,:)); call def_variable(oid, 'vrhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for v', 'm/s', UV_rhsAB(2,:,:)); @@ -207,7 +209,7 @@ end subroutine ini_ice_io ! !-------------------------------------------------------------------------------------------- ! -subroutine restart(istep, l_write, l_read, tracers, partit, mesh) +subroutine restart(istep, l_write, l_read, dynamics, tracers, partit, mesh) #if defined(__icepack) use icedrv_main, only: init_restart_icepack @@ -222,18 +224,19 @@ subroutine restart(istep, l_write, l_read, tracers, partit, mesh) logical :: l_write, l_read logical :: is_restart integer :: mpierr - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in), target :: tracers + type(t_tracer), intent(in) , target :: tracers + type(t_dyn) , intent(in) , target :: dynamics ctime=timeold+(dayold-1.)*86400 if (.not. l_read) then - call ini_ocean_io(yearnew, tracers, partit, mesh) + call ini_ocean_io(yearnew, dynamics, tracers, partit, mesh) if (use_ice) call ini_ice_io (yearnew, partit, mesh) #if defined(__icepack) if (use_ice) call init_restart_icepack(yearnew, mesh) !icapack has its copy of p_partit => partit #endif else - call ini_ocean_io(yearold, tracers, partit, mesh) + call ini_ocean_io(yearold, dynamics, tracers, partit, mesh) if (use_ice) call ini_ice_io (yearold, partit, mesh) #if defined(__icepack) if (use_ice) call init_restart_icepack(yearold, mesh) !icapack has its copy of p_partit => partit diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index fb4ee6336..7caab848a 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -64,20 +64,25 @@ subroutine solve_ssh_ale(partit, mesh) type(t_partit), intent(inout), target :: partit end subroutine - subroutine compute_hbar_ale(partit, mesh) + subroutine compute_hbar_ale(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine - subroutine vert_vel_ale(partit, mesh) + subroutine vert_vel_ale(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + USE MOD_DYN + type(t_dyn) , intent(in) , target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh end subroutine subroutine update_thickness_ale(partit, mesh) @@ -1599,13 +1604,14 @@ end subroutine update_stiff_mat_ale !"FESOM2: from finite elements to finite volumes" ! ! ssh_rhs = alpha * grad[ int_hbot^hbar(n+0.5)( u^n+deltau)dz + W(n+0.5) ] -subroutine compute_ssh_rhs_ale(partit, mesh) +subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) use g_config,only: which_ALE,dt use MOD_MESH - use o_ARRAYS + use o_ARRAYS, only: ssh_rhs, ssh_rhs_old, UV_rhs, water_flux use o_PARAM USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN use g_comm_auto implicit none @@ -1615,14 +1621,16 @@ subroutine compute_ssh_rhs_ale(partit, mesh) integer :: ed, el(2), enodes(2), nz, n, nzmin, nzmax real(kind=WP) :: c1, c2, deltaX1, deltaX2, deltaY1, deltaY2 real(kind=WP) :: dumc1_1, dumc1_2, dumc2_1, dumc2_2 !!PS - type(t_mesh), intent(inout), target :: mesh + type(t_dyn) , intent(in) , target :: dynamics type(t_partit), intent(inout), target :: partit - + type(t_mesh) , intent(in) , target :: mesh + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV=>dynamics%uv(:,:,:) ssh_rhs=0.0_WP !___________________________________________________________________________ @@ -1712,13 +1720,14 @@ end subroutine compute_ssh_rhs_ale ! hbar(n+0.5) = hbar(n-0.5) - tau*ssh_rhs_old ! ! in S. Danilov et al.: "FESOM2: from finite elements to finite volumes" -subroutine compute_hbar_ale(partit, mesh) +subroutine compute_hbar_ale(dynamics, partit, mesh) use g_config,only: dt, which_ALE, use_cavity use MOD_MESH - use o_ARRAYS + use o_ARRAYS, only: ssh_rhs, ssh_rhs_old, water_flux use o_PARAM USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN use g_comm_auto implicit none @@ -1730,13 +1739,16 @@ subroutine compute_hbar_ale(partit, mesh) integer :: ed, el(2), enodes(2), nz,n, elnodes(3), elem, nzmin, nzmax real(kind=WP) :: c1, c2, deltaX1, deltaX2, deltaY1, deltaY2 - type(t_mesh), intent(inout), target :: mesh + type(t_dyn) , intent(inout), target :: dynamics + type(t_mesh) , intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV=>dynamics%uv(:,:,:) !___________________________________________________________________________ ! compute the rhs @@ -1824,13 +1836,15 @@ end subroutine compute_hbar_ale ! > for zlevel: dh_k/dt_k=1 != 0 ! > for zstar : dh_k/dt_k=1...kbot-1 != 0 ! -subroutine vert_vel_ale(partit, mesh) +subroutine vert_vel_ale(dynamics, partit, mesh) use g_config,only: dt, which_ALE, min_hnode, lzstar_lev, flag_warn_cflz use MOD_MESH - use o_ARRAYS + use o_ARRAYS, only: Wvel, fer_Wvel, fer_UV, CFL_z, water_flux, ssh_rhs, & + ssh_rhs_old, eta_n, d_eta, Wvel_e, Wvel_i use o_PARAM USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN use g_comm_auto use io_RESTART !!PS use i_arrays !!PS @@ -1845,13 +1859,16 @@ subroutine vert_vel_ale(partit, mesh) real(kind=WP) :: dhbar_total, dhbar_rest, distrib_dhbar_int !PS real(kind=WP), dimension(:), allocatable :: max_dhbar2distr,cumsum_maxdhbar,distrib_dhbar integer , dimension(:), allocatable :: idx - type(t_mesh), intent(inout), target :: mesh + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV=>dynamics%uv(:,:,:) !___________________________________________________________________________ ! Contributions from levels in divergence @@ -2495,27 +2512,30 @@ end subroutine solve_ssh_ale ! ! !=============================================================================== -subroutine impl_vert_visc_ale(partit, mesh) +subroutine impl_vert_visc_ale(dynamics, partit, mesh) USE MOD_MESH USE o_PARAM USE o_ARRAYS USE MOD_PARTIT USE MOD_PARSUP +USE MOD_DYN USE g_CONFIG,only: dt IMPLICIT NONE -type(t_mesh), intent(inout), target :: mesh +type(t_mesh) , intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit +type(t_dyn) , intent(inout), target :: dynamics real(kind=WP) :: a(mesh%nl-1), b(mesh%nl-1), c(mesh%nl-1), ur(mesh%nl-1), vr(mesh%nl-1) real(kind=WP) :: cp(mesh%nl-1), up(mesh%nl-1), vp(mesh%nl-1) integer :: nz, elem, nzmax, nzmin, elnodes(3) real(kind=WP) :: zinv, m, friction, wu, wd - +real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" +UV=>dynamics%uv(:,:,:) DO elem=1,myDim_elem2D elnodes=elem2D_nodes(:,elem) @@ -2674,10 +2694,11 @@ end subroutine impl_vert_visc_ale ! ! !=============================================================================== -subroutine oce_timestep_ale(n, tracers, partit, mesh) +subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) use g_config use MOD_MESH use MOD_TRACER + use MOD_DYN use o_ARRAYS use o_PARAM USE MOD_PARTIT @@ -2700,9 +2721,10 @@ subroutine oce_timestep_ale(n, tracers, partit, mesh) use write_step_info_interface use check_blowup_interface IMPLICIT NONE - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers + type(t_dyn) , intent(inout), target :: dynamics real(kind=8) :: t0,t1, t2, t30, t3, t4, t5, t6, t7, t8, t9, t10, loc, glo integer :: n, node @@ -2861,7 +2883,7 @@ subroutine oce_timestep_ale(n, tracers, partit, mesh) t30=MPI_Wtime() call solve_ssh_ale(partit, mesh) - if ((toy_ocean) .AND. (TRIM(which_toy)=="soufflet")) call relax_zonal_vel(partit, mesh) + if ((toy_ocean) .AND. (TRIM(which_toy)=="soufflet")) call relax_zonal_vel(dynamics, partit, mesh) t3=MPI_Wtime() ! estimate new horizontal velocity u^(n+1) @@ -2874,7 +2896,7 @@ subroutine oce_timestep_ale(n, tracers, partit, mesh) ! Update to hbar(n+3/2) and compute dhe to be used on the next step if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call compute_hbar_ale'//achar(27)//'[0m' - call compute_hbar_ale(partit, mesh) + call compute_hbar_ale(dynamics, partit, mesh) !___________________________________________________________________________ ! - Current dynamic elevation alpha*hbar(n+1/2)+(1-alpha)*hbar(n-1/2) @@ -2909,7 +2931,7 @@ subroutine oce_timestep_ale(n, tracers, partit, mesh) ! The main step of ALE procedure --> this is were the magic happens --> here ! is decided how change in hbar is distributed over the vertical layers if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call vert_vel_ale'//achar(27)//'[0m' - call vert_vel_ale(partit, mesh) + call vert_vel_ale(dynamics, partit, mesh) t7=MPI_Wtime() !___________________________________________________________________________ @@ -2925,11 +2947,11 @@ subroutine oce_timestep_ale(n, tracers, partit, mesh) t9=MPI_Wtime() !___________________________________________________________________________ ! write out global fields for debugging - call write_step_info(n,logfile_outfreq, tracers, partit, mesh) + call write_step_info(n,logfile_outfreq, dynamics, tracers, partit, mesh) ! check model for blowup --> ! write_step_info and check_blowup require ! togeather around 2.5% of model runtime - call check_blowup(n, tracers, partit, mesh) + call check_blowup(n, dynamics, tracers, partit, mesh) t10=MPI_Wtime() !___________________________________________________________________________ diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index ed5145ec2..336817503 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -127,13 +127,14 @@ subroutine solve_tracers_ale(tracers, partit, mesh) ! !=============================================================================== ! Driving routine Here with ALE changes!!! -subroutine solve_tracers_ale(tracers, partit, mesh) +subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) use g_config use o_PARAM, only: SPP, Fer_GM - use o_arrays + use o_arrays, only: Wvel, Wvel_e, fer_Wvel, fer_UV use mod_mesh USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN use mod_tracer use g_comm_auto use o_tracers @@ -142,15 +143,19 @@ subroutine solve_tracers_ale(tracers, partit, mesh) use diff_tracers_ale_interface implicit none + type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracers - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit integer :: tr_num, node, nzmax, nzmin - + real(kind=WP), dimension(:,:,:), pointer :: UV + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) + !___________________________________________________________________________ if (SPP) call cal_rejected_salt(partit, mesh) if (SPP) call app_rejected_salt(tracers%data(2)%values, partit, mesh) @@ -218,12 +223,13 @@ end subroutine solve_tracers_ale ! ! !=============================================================================== -subroutine adv_tracers_ale(dt, tr_num, tracers, partit, mesh) +subroutine adv_tracers_ale(dt, tr_num, dynamics, tracers, partit, mesh) use g_config, only: flag_debug use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - use mod_tracer + use MOD_TRACER + use MOD_DYN use o_arrays use diagnostics, only: ldiag_DVD, compute_diag_dvd_2ndmoment_klingbeil_etal_2014, & compute_diag_dvd_2ndmoment_burchard_etal_2008, compute_diag_dvd @@ -234,9 +240,10 @@ subroutine adv_tracers_ale(dt, tr_num, tracers, partit, mesh) real(kind=WP), intent(in), target :: dt integer :: node, nz integer, intent(in) :: tr_num - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers + type(t_dyn) , intent(inout), target :: dynamics ! del_ttf ... initialised and setted to zero in call init_tracers_AB(tr_num) ! --> del_ttf ... equivalent to R_T^n in Danilov etal FESOM2: "from finite element ! to finite volume". At the end R_T^n should contain all advection therms and @@ -256,7 +263,7 @@ subroutine adv_tracers_ale(dt, tr_num, tracers, partit, mesh) ! here --> add horizontal advection part to del_ttf(nz,n) = del_ttf(nz,n) + ... tracers%work%del_ttf_advhoriz = 0.0_WP tracers%work%del_ttf_advvert = 0.0_WP - call do_oce_adv_tra(dt, UV, wvel, wvel_i, wvel_e, tr_num, tracers, partit, mesh) + call do_oce_adv_tra(dt, dynamics%uv, wvel, wvel_i, wvel_e, tr_num, tracers, partit, mesh) !___________________________________________________________________________ ! update array for total tracer flux del_ttf with the fluxes from horizontal ! and vertical advection @@ -1146,29 +1153,34 @@ end subroutine diff_part_hor_redi ! ! !=============================================================================== -SUBROUTINE diff_part_bh(tr_num, tracers, partit, mesh) - use o_ARRAYS +SUBROUTINE diff_part_bh(tr_num, dynamics, tracers, partit, mesh) + use o_ARRAYS, only: use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use MOD_TRACER + use MOD_DYN use o_param use g_config use g_comm_auto IMPLICIT NONE integer, intent(in), target :: tr_num + type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracers - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit real(kind=WP) :: u1, v1, len, vi, tt, ww integer :: nz, ed, el(2), en(2), k, elem, nl1, ul1 real(kind=WP), allocatable :: temporary_ttf(:,:) real(kind=WP), pointer :: ttf(:,:) + real(kind=WP), dimension(:,:,:), pointer :: UV + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) ttf => tracers%data(tr_num)%values ed=myDim_nod2D+eDim_nod2D diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index 98c730732..66a6cdbfb 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -1,11 +1,14 @@ module momentum_adv_scalar_interface interface - subroutine momentum_adv_scalar(partit, mesh) + subroutine momentum_adv_scalar(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine end interface end module @@ -13,11 +16,12 @@ subroutine momentum_adv_scalar(partit, mesh) ! ! !_______________________________________________________________________________ -subroutine compute_vel_rhs(partit, mesh) +subroutine compute_vel_rhs(dynamics, partit, mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP - use o_ARRAYS + USE MOD_DYN + use o_ARRAYS, only: UV_rhs, UV_rhsAB, eta_n, coriolis, ssh_gp, pgf_x, pgf_y use i_ARRAYS use i_therm_param use o_PARAM @@ -29,8 +33,9 @@ subroutine compute_vel_rhs(partit, mesh) use momentum_adv_scalar_interface implicit none - type(t_mesh), intent(in), target :: mesh + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh integer :: elem, elnodes(3), nz, nzmax, nzmin real(kind=WP) :: ff, mm real(kind=WP) :: Fx, Fy, pre(3) @@ -38,10 +43,13 @@ subroutine compute_vel_rhs(partit, mesh) real(kind=WP) :: t1, t2, t3, t4 real(kind=WP) :: p_ice(3), p_air(3), p_eta(3) integer :: use_pice + real(kind=WP), dimension(:,:,:), pointer :: UV + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV=>dynamics%uv(:,:,:) t1=MPI_Wtime() use_pice=0 @@ -121,7 +129,7 @@ subroutine compute_vel_rhs(partit, mesh) if (mype==0) write(*,*) 'in moment not adapted mom_adv advection typ for ALE, check your namelist' call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) elseif (mom_adv==2) then - call momentum_adv_scalar(partit, mesh) + call momentum_adv_scalar(dynamics, partit, mesh) end if t3=MPI_Wtime() @@ -158,27 +166,32 @@ END SUBROUTINE compute_vel_rhs ! Momentum advection on scalar control volumes with ALE adaption--> exchange zinv(nz) ! against hnode(nz,node) !_______________________________________________________________________________ -subroutine momentum_adv_scalar(partit, mesh) +subroutine momentum_adv_scalar(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP -USE o_ARRAYS +use MOD_DYN +USE o_ARRAYS, only: Wvel_e, UV_rhsAB USE o_PARAM use g_comm_auto IMPLICIT NONE -type(t_mesh), intent(in), target :: mesh +type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit +type(t_mesh) , intent(in) , target :: mesh + integer :: n, nz, el1, el2 integer :: nl1, nl2, ul1, ul2, nod(2), el, ed, k, nle, ule real(kind=WP) :: un1(1:mesh%nl-1), un2(1:mesh%nl-1) real(kind=WP) :: wu(1:mesh%nl), wv(1:mesh%nl) real(kind=WP) :: Unode_rhs(2,mesh%nl-1,partit%myDim_nod2d+partit%eDim_nod2D) +real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV=>dynamics%uv(:,:,:) !___________________________________________________________________________ ! 1st. compute vertical momentum advection component: w * du/dz, w*dv/dz diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 36b9f6d04..2a2ea25d8 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -13,112 +13,142 @@ ! 5. Leith_c=? (need to be adjusted) module h_viscosity_leith_interface interface - subroutine h_viscosity_leith(partit, mesh) + subroutine h_viscosity_leith(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine end interface end module module visc_filt_harmon_interface interface - subroutine visc_filt_harmon(partit, mesh) + subroutine visc_filt_harmon(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine end interface end module module visc_filt_hbhmix_interface interface - subroutine visc_filt_hbhmix(partit, mesh) + subroutine visc_filt_hbhmix(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine end interface end module module visc_filt_biharm_interface interface - subroutine visc_filt_biharm(option, partit, mesh) + subroutine visc_filt_biharm(option, dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN integer :: option - type(t_mesh), intent(in), target :: mesh + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine end interface end module module visc_filt_bcksct_interface interface - subroutine visc_filt_bcksct(partit, mesh) + subroutine visc_filt_bcksct(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine end interface end module module visc_filt_bilapl_interface interface - subroutine visc_filt_bilapl(partit, mesh) + subroutine visc_filt_bilapl(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine end interface end module module visc_filt_bidiff_interface interface - subroutine visc_filt_bidiff(partit, mesh) + subroutine visc_filt_bidiff(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine end interface end module module visc_filt_dbcksc_interface interface - subroutine visc_filt_dbcksc(partit, mesh) + subroutine visc_filt_dbcksc(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine end interface end module module backscatter_coef_interface interface - subroutine backscatter_coef(partit, mesh) + subroutine backscatter_coef(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine end interface end module module uke_update_interface interface - subroutine uke_update(partit, mesh) + subroutine uke_update(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine end interface end module @@ -128,11 +158,13 @@ subroutine uke_update(partit, mesh) ! Contains routines needed for computations of dynamics. ! includes: update_vel, compute_vel_nodes ! =================================================================== -SUBROUTINE update_vel(partit, mesh) +SUBROUTINE update_vel(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP - USE o_ARRAYS + USE MOD_DYN + + USE o_ARRAYS, only: d_eta, eta_n, UV_rhs USE o_PARAM USE g_CONFIG use g_comm_auto @@ -140,13 +172,16 @@ SUBROUTINE update_vel(partit, mesh) integer :: elem, elnodes(3), nz, m, nzmax, nzmin real(kind=WP) :: eta(3) real(kind=WP) :: Fx, Fy - type(t_mesh), intent(in), target :: mesh + type(t_dyn) , intent(inout), target :: dynamics + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV=>dynamics%uv(:,:,:) DO elem=1, myDim_elem2D elnodes=elem2D_nodes(:,elem) @@ -165,23 +200,27 @@ SUBROUTINE update_vel(partit, mesh) call exchange_elem(UV, partit) end subroutine update_vel !========================================================================== -subroutine compute_vel_nodes(partit, mesh) +subroutine compute_vel_nodes(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN USE o_PARAM USE o_ARRAYS use g_comm_auto IMPLICIT NONE integer :: n, nz, k, elem, nln, uln, nle, ule real(kind=WP) :: tx, ty, tvol - type(t_mesh), intent(in), target :: mesh + + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit - + type(t_mesh) , intent(in) , target :: mesh + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV=>dynamics%uv(:,:,:) DO n=1, myDim_nod2D uln = ulevels_nod2D(n) @@ -208,11 +247,12 @@ subroutine compute_vel_nodes(partit, mesh) call exchange_nod(Unode, partit) end subroutine compute_vel_nodes !=========================================================================== -subroutine viscosity_filter(option, partit, mesh) +subroutine viscosity_filter(option, dynamics, partit, mesh) use o_PARAM use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP +use MOD_DYN use h_viscosity_leith_interface use visc_filt_harmon_interface use visc_filt_hbhmix_interface @@ -224,7 +264,8 @@ subroutine viscosity_filter(option, partit, mesh) use backscatter_coef_interface IMPLICIT NONE integer :: option -type(t_mesh), intent(in), target :: mesh +type(t_dyn) , intent(inout), target :: dynamics +type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit ! Driving routine @@ -241,34 +282,34 @@ subroutine viscosity_filter(option, partit, mesh) ! ==== ! Harmonic Leith parameterization ! ==== - call h_viscosity_leith(partit, mesh) - call visc_filt_harmon(partit, mesh) + call h_viscosity_leith(dynamics, partit, mesh) + call visc_filt_harmon(dynamics, partit, mesh) CASE (2) ! === ! Laplacian+Leith+biharmonic background ! === - call h_viscosity_leith(partit, mesh) - call visc_filt_hbhmix(partit, mesh) + call h_viscosity_leith(dynamics, partit, mesh) + call visc_filt_hbhmix(dynamics, partit, mesh) CASE (3) ! === ! Biharmonic Leith parameterization ! === - call h_viscosity_leith(partit, mesh) - call visc_filt_biharm(2, partit, mesh) + call h_viscosity_leith(dynamics, partit, mesh) + call visc_filt_biharm(2, dynamics, partit, mesh) CASE (4) ! === ! Biharmonic+upwind-type ! === - call visc_filt_biharm(1, partit, mesh) + call visc_filt_biharm(1, dynamics, partit, mesh) CASE (5) - call visc_filt_bcksct(partit, mesh) + call visc_filt_bcksct(dynamics, partit, mesh) CASE (6) - call visc_filt_bilapl(partit, mesh) + call visc_filt_bilapl(dynamics, partit, mesh) CASE (7) - call visc_filt_bidiff(partit, mesh) + call visc_filt_bidiff(dynamics, partit, mesh) CASE (8) - call backscatter_coef(partit, mesh) - call visc_filt_dbcksc(partit, mesh) + call backscatter_coef(dynamics, partit, mesh) + call visc_filt_dbcksc(dynamics, partit, mesh) CASE DEFAULT if (partit%mype==0) write(*,*) 'mixing scheme with option ' , option, 'has not yet been implemented' call par_ex(partit%MPI_COMM_FESOM, partit%mype) @@ -276,24 +317,27 @@ subroutine viscosity_filter(option, partit, mesh) END SELECT end subroutine viscosity_filter ! =================================================================== -SUBROUTINE visc_filt_harmon(partit, mesh) +SUBROUTINE visc_filt_harmon(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP -USE o_ARRAYS +USE MOD_DYN +USE o_ARRAYS, only: Visc, UV_rhs USE o_PARAM USE g_CONFIG IMPLICIT NONE real(kind=WP) :: u1, v1, le(2), len, vi integer :: nz, ed, el(2) , nzmin,nzmax -type(t_mesh), intent(in), target :: mesh +type(t_dyn) , intent(inout), target :: dynamics +type(t_mesh) , intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - +real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" +UV => dynamics%uv(:,:,:) ! An analog of harmonic viscosity operator. ! It adds to the rhs(0) Visc*(u1+u2+u3-3*u0)/area @@ -320,11 +364,12 @@ SUBROUTINE visc_filt_harmon(partit, mesh) END DO end subroutine visc_filt_harmon ! =================================================================== -SUBROUTINE visc_filt_biharm(option, partit, mesh) +SUBROUTINE visc_filt_biharm(option, dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP - USE o_ARRAYS + use MOD_DYN + USE o_ARRAYS, only: Visc, UV_rhs USE o_PARAM USE g_CONFIG use g_comm_auto @@ -335,13 +380,15 @@ SUBROUTINE visc_filt_biharm(option, partit, mesh) real(kind=WP) :: u1, v1, vi, len integer :: ed, el(2), nz, option, nzmin, nzmax real(kind=WP), allocatable :: U_c(:,:), V_c(:,:) - type(t_mesh), intent(in), target :: mesh + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit - + type(t_mesh) , intent(in) , target :: mesh + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) ! Filter is applied twice. ed=myDim_elem2D+eDim_elem2D @@ -426,11 +473,12 @@ SUBROUTINE visc_filt_biharm(option, partit, mesh) end subroutine visc_filt_biharm ! =================================================================== -SUBROUTINE visc_filt_hbhmix(partit, mesh) +SUBROUTINE visc_filt_hbhmix(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP - USE o_ARRAYS + use MOD_DYN + USE o_ARRAYS, only: Visc, UV_rhs USE o_PARAM USE g_CONFIG use g_comm_auto @@ -443,13 +491,15 @@ SUBROUTINE visc_filt_hbhmix(partit, mesh) real(kind=WP) :: u1, v1, vi, len, crosslen, le(2) integer :: ed, el(2), nz, nzmin, nzmax real(kind=WP), allocatable :: U_c(:,:), V_c(:,:) - type(t_mesh), intent(in), target :: mesh + type(t_dyn), intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit - + type(t_mesh), intent(in), target :: mesh + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) ! Filter is applied twice. ed=myDim_elem2D+eDim_elem2D @@ -516,12 +566,13 @@ SUBROUTINE visc_filt_hbhmix(partit, mesh) end subroutine visc_filt_hbhmix ! =================================================================== -SUBROUTINE h_viscosity_leith(partit, mesh) +SUBROUTINE h_viscosity_leith(dynamics, partit, mesh) ! ! Coefficient of horizontal viscosity is a combination of the Leith (with Leith_c) and modified Leith (with Div_c) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP + use MOD_DYN USE o_ARRAYS USE o_PARAM USE g_CONFIG @@ -531,9 +582,10 @@ SUBROUTINE h_viscosity_leith(partit, mesh) integer :: elem, nl1, nz, elnodes(3), n, k, nt, ul1 real(kind=WP) :: leithx, leithy real(kind=WP), allocatable :: aux(:,:) - type(t_mesh), intent(in), target :: mesh + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit - + type(t_mesh) , intent(in) , target :: mesh + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -622,11 +674,12 @@ SUBROUTINE h_viscosity_leith(partit, mesh) deallocate(aux) END subroutine h_viscosity_leith ! ======================================================================= -SUBROUTINE visc_filt_bcksct(partit, mesh) +SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP - USE o_ARRAYS + use MOD_DYN + USE o_ARRAYS, only: UV_rhs USE o_PARAM USE g_CONFIG USE g_comm_auto @@ -635,13 +688,15 @@ SUBROUTINE visc_filt_bcksct(partit, mesh) real(kind=8) :: u1, v1, len, vi integer :: nz, ed, el(2), nelem(3),k, elem, nzmin, nzmax real(kind=8), allocatable :: U_b(:,:), V_b(:,:), U_c(:,:), V_c(:,:) - type(t_mesh), intent(in), target :: mesh + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit - + type(t_mesh) , intent(in) , target :: mesh + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) ! An analog of harmonic viscosity operator. ! Same as visc_filt_h, but with the backscatter. @@ -722,11 +777,12 @@ end subroutine visc_filt_bcksct ! \nu=|3u_c-u_n1-u_n2-u_n3|*sqrt(S_c)/100. There is an additional term ! in viscosity that is proportional to the velocity amplitude squared. ! The coefficient has to be selected experimentally. -SUBROUTINE visc_filt_bilapl(partit, mesh) +SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP - USE o_ARRAYS + use MOD_DYN + USE o_ARRAYS, only: UV_rhs USE o_PARAM USE g_CONFIG USE g_comm_auto @@ -734,13 +790,17 @@ SUBROUTINE visc_filt_bilapl(partit, mesh) real(kind=8) :: u1, v1, vi, len integer :: ed, el(2), nz, nzmin, nzmax real(kind=8), allocatable :: U_c(:,:), V_c(:,:) - type(t_mesh), intent(in), target :: mesh + + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit - + type(t_mesh) , intent(in) , target :: mesh + + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) ed=myDim_elem2D+eDim_elem2D allocate(U_c(nl-1,ed), V_c(nl-1, ed)) @@ -804,11 +864,12 @@ end subroutine visc_filt_bilapl ! On each edge, \nu=sqrt(|u_c1-u_c2|*sqrt(S_c1+S_c2)/100) ! The effect is \nu^2 ! Quadratic in velocity term can be introduced if needed. -SUBROUTINE visc_filt_bidiff(partit, mesh) +SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP - USE o_ARRAYS + use MOD_DYN + USE o_ARRAYS, only: UV_rhs USE o_PARAM USE g_CONFIG USE g_comm_auto @@ -816,13 +877,16 @@ SUBROUTINE visc_filt_bidiff(partit, mesh) real(kind=8) :: u1, v1, vi, len integer :: ed, el(2), nz, nzmin, nzmax real(kind=8), allocatable :: U_c(:,:), V_c(:,:) - type(t_mesh), intent(in), target :: mesh + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit - + type(t_mesh) , intent(in) , target :: mesh + + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) ! ed=myDim_elem2D+eDim_elem2D allocate(U_c(nl-1,ed), V_c(nl-1, ed)) @@ -880,11 +944,13 @@ end subroutine visc_filt_bidiff ! =================================================================== -SUBROUTINE visc_filt_dbcksc(partit, mesh) +SUBROUTINE visc_filt_dbcksc(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP -USE o_ARRAYS +use MOD_DYN +USE o_ARRAYS, only: UV_rhs, v_back, UV_dis_tend, UV_total_tend, UV_back_tend, & + uke, uke_dif USE o_PARAM USE g_CONFIG USE g_comm_auto @@ -896,12 +962,15 @@ SUBROUTINE visc_filt_dbcksc(partit, mesh) integer :: nz, ed, el(2) real(kind=8), allocatable :: U_c(:,:), V_c(:,:), UV_back(:,:,:), UV_dis(:,:,:), uke_d(:,:) real(kind=8), allocatable :: uuu(:) -type(t_mesh), intent(in), target :: mesh +type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit +type(t_mesh) , intent(in) , target :: mesh +real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" +UV => dynamics%uv(:,:,:) ! An analog of harmonic viscosity operator. ! It adds to the rhs(0) Visc*(u1+u2+u3-3*u0)/area @@ -1028,7 +1097,7 @@ SUBROUTINE visc_filt_dbcksc(partit, mesh) UV_back_tend=UV_back uke_dif=uke_d - call uke_update(partit, mesh) + call uke_update(dynamics, partit, mesh) deallocate(V_c,U_c) deallocate(UV_dis,UV_back) deallocate(uke_d) @@ -1073,11 +1142,13 @@ SUBROUTINE backscatter_coef(partit, mesh) end subroutine backscatter_coef !=========================================================================== -SUBROUTINE uke_update(partit, mesh) +SUBROUTINE uke_update(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP -USE o_ARRAYS +use MOD_DYN +USE o_ARRAYS, only: uke_rhs, uke_dif, uke_back, uke_dis, uke, UV_dis_tend, uv_back_tend, uke_rhs_old, & + bvfreq, coriolis_node USE o_PARAM USE g_CONFIG use g_comm_auto @@ -1089,18 +1160,22 @@ SUBROUTINE uke_update(partit, mesh) !Why is it necessary to implement the length of the array? It doesn't work without! !integer, intent(in) :: t_levels -type(t_mesh), intent(in), target :: mesh +type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit +type(t_mesh) , intent(in) , target :: mesh + real(kind=8) :: hall, h1_eta, hnz, vol integer :: elnodes(3), nz, ed, edi, node, j, elem, q real(kind=8), allocatable :: uuu(:), work_array(:), U_work(:,:), V_work(:,:), rosb_array(:,:), work_uv(:) integer :: kk, nzmax, el real(kind=8) :: c1, rosb, vel_u, vel_v, vel_uv, scaling, reso real*8 :: c_min=0.5, f_min=1.e-6, r_max=200000., ex, ey, a1, a2, len_reg, dist_reg(2) ! Are those values still correct? +real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" -#include "associate_mesh_ass.h" +#include "associate_mesh_ass.h" +UV => dynamics%uv(:,:,:) !rosb_dis=1._8 !Should be variable to control how much of the dissipated energy is backscattered !rossby_num=2 @@ -1241,3 +1316,4 @@ SUBROUTINE uke_update(partit, mesh) end subroutine uke_update ! =================================================================== + diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index 3576ef01f..e34b07cf4 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -183,7 +183,7 @@ MODULE o_ARRAYS IMPLICIT NONE ! Arrays are described in subroutine array_setup real(kind=WP), allocatable, target :: Wvel(:,:), Wvel_e(:,:), Wvel_i(:,:) -real(kind=WP), allocatable :: UV(:,:,:) +!!PS real(kind=WP), allocatable :: UV(:,:,:) real(kind=WP), allocatable :: UV_rhs(:,:,:), UV_rhsAB(:,:,:) real(kind=WP), allocatable :: uke(:,:), v_back(:,:), uke_back(:,:), uke_dis(:,:), uke_dif(:,:) real(kind=WP), allocatable :: uke_rhs(:,:), uke_rhs_old(:,:) diff --git a/src/oce_vel_rhs_vinv.F90 b/src/oce_vel_rhs_vinv.F90 index b81ccf727..f7bf16720 100755 --- a/src/oce_vel_rhs_vinv.F90 +++ b/src/oce_vel_rhs_vinv.F90 @@ -1,11 +1,14 @@ module relative_vorticity_interface interface - subroutine relative_vorticity(partit, mesh) + subroutine relative_vorticity(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + use MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine end interface end module @@ -14,21 +17,26 @@ subroutine relative_vorticity(partit, mesh) ! (curl u+f)\times u+grad(u^2/2)+w du/dz ! ! =================================================================== -subroutine relative_vorticity(partit, mesh) - USE o_ARRAYS +subroutine relative_vorticity(dynamics, partit, mesh) + USE o_ARRAYS, only: vorticity USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN use g_comm_auto IMPLICIT NONE integer :: n, nz, el(2), enodes(2), nl1, nl2, edge, ul1, ul2, nl12, ul12 real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2, c1 - type(t_mesh), intent(in), target :: mesh + + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" -#include "associate_mesh_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) !!PS DO n=1,myDim_nod2D !!PS nl1 = nlevels_nod2D(n)-1 @@ -108,18 +116,23 @@ subroutine relative_vorticity(partit, mesh) ! Now it the relative vorticity known on neighbors too end subroutine relative_vorticity ! ========================================================================== -subroutine compute_vel_rhs_vinv(partit, mesh) !vector invariant +subroutine compute_vel_rhs_vinv(dynamics, partit, mesh) !vector invariant USE o_PARAM - USE o_ARRAYS + USE o_ARRAYS, only: UV_rhs, UV_rhsAB, eta_n, coriolis_node, hpressure, vorticity + USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP + use MOD_DYN USE g_CONFIG use g_comm_auto use relative_vorticity_interface IMPLICIT NONE - type(t_mesh), intent(in), target :: mesh + + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + integer :: n, n1, nz, elem, elnodes(3), nl1, j, nzmin,nzmax real(kind=WP) :: a, b, c, da, db, dc, dg, ff(3), gg, eta(3), pre(3), Fx, Fy,w real(kind=WP) :: uvert(mesh%nl,2), umean, vmean, friction @@ -127,11 +140,12 @@ subroutine compute_vel_rhs_vinv(partit, mesh) !vector invariant real(kind=WP) :: KE_node(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP) :: dZ_inv(2:mesh%nl-1), dzbar_inv(mesh%nl-1), elem_area_inv real(kind=WP) :: density0_inv = 1./density_0 - + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" -#include "associate_mesh_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) w = 0.0_WP @@ -195,7 +209,7 @@ subroutine compute_vel_rhs_vinv(partit, mesh) !vector invariant END DO END DO - call relative_vorticity(partit, mesh) + call relative_vorticity(dynamics, partit, mesh) ! ==================== ! Sea level and pressure contribution -\nabla(g\eta +hpressure/rho_0+V^2/2) ! and the Coriolis force (elemental part) diff --git a/src/toy_channel_soufflet.F90 b/src/toy_channel_soufflet.F90 index bf355e527..cf34e60ee 100644 --- a/src/toy_channel_soufflet.F90 +++ b/src/toy_channel_soufflet.F90 @@ -3,6 +3,7 @@ MODULE Toy_Channel_Soufflet USE MOD_PARTIT USE MOD_PARSUP USE MOD_TRACER + USE MOD_DYN USE o_ARRAYS USE o_PARAM USE g_config @@ -44,12 +45,15 @@ MODULE Toy_Channel_Soufflet ! !-------------------------------------------------------------------------------------------- ! -subroutine relax_zonal_vel(partit, mesh) +subroutine relax_zonal_vel(dynamics, partit, mesh) implicit none integer :: elem, nz, nn, nn1 real(kind=WP) :: a, yy, uzon - type(t_mesh), intent(in), target :: mesh + + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -171,17 +175,20 @@ subroutine compute_zonal_mean_ini(partit, mesh) ! no division by 0 is occurring end subroutine compute_zonal_mean_ini !========================================================================== -subroutine compute_zonal_mean(tracers, partit, mesh) +subroutine compute_zonal_mean(dynamics, tracers, partit, mesh) implicit none integer :: elem, nz, m, elnodes(3) real(kind=8), allocatable :: zvel1D(:), znum1D(:) - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers + type(t_dyn) , intent(inout), target :: dynamics + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) ztem=0. zvel=0. @@ -235,22 +242,25 @@ subroutine compute_zonal_mean(tracers, partit, mesh) end subroutine compute_zonal_mean ! ==================================================================================== -subroutine initial_state_soufflet(tracers, partit, mesh) +subroutine initial_state_soufflet(dynamics, tracers, partit, mesh) ! Profiles Soufflet 2016 (OM) implicit none - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers + type(t_dyn) , intent(inout), target :: dynamics integer :: n, nz, elnodes(3) real(kind=8) :: dst, yn, Fy, Lx ! real(kind=8) :: Ljet,rhomax,Sb, drho_No, drho_So ! real(kind=8) :: z_No, z_So,dz_No,dz_So, drhosurf_No, drhosurf_So, zsurf real(kind=8) :: d_No(mesh%nl-1), d_So(mesh%nl-1), rho_No(mesh%nl-1), rho_So(mesh%nl-1) + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) dy=ysize/nybins/r_earth @@ -355,18 +365,21 @@ subroutine initial_state_soufflet(tracers, partit, mesh) write(*,*) mype, 'Vel', maxval(UV(1,:,:)), minval(UV(1,:,:)) END subroutine initial_state_soufflet ! =============================================================================== -subroutine energy_out_soufflet(partit, mesh) +subroutine energy_out_soufflet(dynamics, partit, mesh) implicit none real(kind=8) :: tke(2), aux(2), ww, wwaux integer :: elem, nz, m, elnodes(3), nybins real(kind=8), allocatable :: zvel1D(:), znum1D(:) - type(t_mesh), intent(in), target :: mesh + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit - + type(t_mesh) , intent(in) , target :: mesh + +real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" +UV => dynamics%uv(:,:,:) nybins=100 diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index ac4d1d73f..c83e5ec8f 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -1,42 +1,48 @@ module write_step_info_interface interface - subroutine write_step_info(istep,outfreq,tracers,partit,mesh) + subroutine write_step_info(istep,outfreq,dynamics, tracers,partit,mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use MOD_TRACER + use MOD_DYN integer :: istep,outfreq - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in), target :: tracers + type(t_tracer), intent(in) , target :: tracers + type(t_dyn) , intent(in) , target :: dynamics end subroutine end interface end module module check_blowup_interface interface - subroutine check_blowup(istep, tracers,partit,mesh) + subroutine check_blowup(istep, dynamics, tracers,partit,mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use MOD_TRACER + use MOD_DYN integer :: istep type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in), target :: tracers + type(t_dyn) , intent(in) , target :: dynamics end subroutine end interface end module ! ! !=============================================================================== -subroutine write_step_info(istep, outfreq, tracers, partit, mesh) +subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) use g_config, only: dt, use_ice use MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - use MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_TRACER + use MOD_DYN use o_PARAM - use o_ARRAYS + use o_ARRAYS, only: eta_n, d_eta, water_flux, heat_flux, Wvel, Unode, CFL_z, & + pgf_x, pgf_y, Av, Kv use i_ARRAYS use g_comm_auto implicit none @@ -52,13 +58,17 @@ subroutine write_step_info(istep, outfreq, tracers, partit, mesh) max_cfl_z, max_pgfx, max_pgfy, max_kv, max_av real(kind=WP) :: int_deta , int_dhbar real(kind=WP) :: loc, loc_eta, loc_hbar, loc_deta, loc_dhbar, loc_wflux,loc_hflux, loc_temp, loc_salt - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in), target :: tracers + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in) , target :: tracers + type(t_dyn) , intent(in) , target :: dynamics + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" -#include "associate_mesh_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) + if (mod(istep,outfreq)==0) then !_______________________________________________________________________ @@ -242,14 +252,16 @@ end subroutine write_step_info ! ! !=============================================================================== -subroutine check_blowup(istep, tracers, partit, mesh) +subroutine check_blowup(istep, dynamics, tracers, partit, mesh) use g_config, only: logfile_outfreq, which_ALE use MOD_MESH - use MOD_TRACER - USE MOD_PARTIT - USE MOD_PARSUP + use MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_DYN use o_PARAM - use o_ARRAYS + use o_ARRAYS, only: eta_n, d_eta, ssh_rhs, ssh_rhs_old, water_flux, stress_surf, & + Wvel, CFL_z, heat_flux, Kv, Av use i_ARRAYS use g_comm_auto use io_BLOWUP @@ -259,14 +271,18 @@ subroutine check_blowup(istep, tracers, partit, mesh) implicit none integer :: n, nz, istep, found_blowup_loc=0, found_blowup=0 - integer :: el, elidx - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in), target :: tracers + integer :: el, elidx + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in) , target :: tracers + type(t_dyn) , intent(in) , target :: dynamics + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" -#include "associate_mesh_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) + !___________________________________________________________________________ ! ! if (mod(istep,logfile_outfreq)==0) then ! ! if (mype==0) then @@ -509,7 +525,7 @@ subroutine check_blowup(istep, tracers, partit, mesh) ! moment only over CPU mype==0 call MPI_AllREDUCE(found_blowup_loc , found_blowup , 1, MPI_INTEGER, MPI_MAX, MPI_COMM_FESOM, MPIerr) if (found_blowup==1) then - call write_step_info(istep,1,tracers,partit,mesh) + call write_step_info(istep, 1, dynamics, tracers,partit,mesh) if (mype==0) then call sleep(1) write(*,*) @@ -529,7 +545,7 @@ subroutine check_blowup(istep, tracers, partit, mesh) write(*,*) ' _____.,-#%&$@%#&#~,._____' write(*,*) end if - call blowup(istep, tracers, partit, mesh) + call blowup(istep, dynamics, tracers, partit, mesh) if (mype==0) write(*,*) ' --> finished writing blow up file' call par_ex(partit%MPI_COMM_FESOM, partit%mype) endif From 52c7ccdb56de4ff4709ebee38f2fca07d4686980 Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 1 Nov 2021 22:19:06 +0100 Subject: [PATCH 405/909] fix bug --- src/io_restart.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 4d8a4d722..e198507ab 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -122,8 +122,8 @@ subroutine ini_ocean_io(year, dynamics, tracers, partit, mesh) call def_variable(oid, 'hnode', (/nl-1, nod2D/), 'nodal layer thickness', 'm', hnode); !___Define the netCDF variables for 3D fields_______________________________ - call def_variable(oid, 'u', (/nl-1, elem2D/), 'zonal velocity', 'm/s', dynamics.uv(1,:,:)); - call def_variable(oid, 'v', (/nl-1, elem2D/), 'meridional velocity', 'm/s', dynamics.uv(2,:,:)); + call def_variable(oid, 'u', (/nl-1, elem2D/), 'zonal velocity', 'm/s', dynamics%uv(1,:,:)); + call def_variable(oid, 'v', (/nl-1, elem2D/), 'meridional velocity', 'm/s', dynamics%uv(2,:,:)); call def_variable(oid, 'urhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for u', 'm/s', UV_rhsAB(1,:,:)); call def_variable(oid, 'vrhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for v', 'm/s', UV_rhsAB(2,:,:)); From 5f58dfedc9ab41deabce926cb33db4cf9f40a7f6 Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 1 Nov 2021 22:23:27 +0100 Subject: [PATCH 406/909] fix bug --- src/io_meandata.F90 | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 0d91521fa..4acbd17eb 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -317,9 +317,9 @@ subroutine ini_mean_io(dynamics, tracers, partit, mesh) CASE ('Kv ') call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'Kv', 'vertical diffusivity Kv', 'm2/s', Kv(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('u ') - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u', 'horizontal velocity','m/s', dynamics.uv(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u', 'horizontal velocity','m/s', dynamics%uv(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('v ') - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v', 'meridional velocity','m/s', dynamics.uv(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v', 'meridional velocity','m/s', dynamics%uv(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('w ') call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'w', 'vertical velocity', 'm/s', Wvel(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('Av ') @@ -639,7 +639,8 @@ subroutine create_new_file(entry, partit, mesh) call assert_nf( nf_put_att_text(entry%ncid, entry%tID, 'axis', len_trim('T'), trim('T')), __LINE__) call assert_nf( nf_put_att_text(entry%ncid, entry%tID, 'stored_direction', len_trim('increasing'), trim('increasing')), __LINE__) - call assert_nf( nf_def_var(entry%ncid, trim(entry%name), entry%data_strategy%netcdf_type(), entry%ndim+1, (/entry%dimid(entry%ndim:1:-1), entry%recID/), entry%varID), __LINE__) + call assert_nf( nf_def_var(entry%ncid, trim(entry%name), entry%data_strategy%netcdf_type(), entry%ndim+1, & + (/entry%dimid(1:entry%ndim), entry%recID/), entry%varID), __LINE__) !CHUNKING stuff (netcdf libraries not always compited with it) !if (entry%ndim==2) then ! call assert_nf( nf_def_var_chunking(entry%ncid, entry%varID, NF_CHUNKED, (/1, entry%glsize(1)/)), __LINE__); @@ -749,7 +750,7 @@ subroutine write_mean(entry, entry_index) if (entry%ndim==1) then call assert_nf( nf_put_vara_double(entry%ncid, entry%varID, (/1, entry%rec_count/), (/size2, 1/), entry%aux_r8, 1), __LINE__) elseif (entry%ndim==2) then - call assert_nf( nf_put_vara_double(entry%ncid, entry%varID, (/1, lev, entry%rec_count/), (/size2, 1, 1/), entry%aux_r8, 1), __LINE__) + call assert_nf( nf_put_vara_double(entry%ncid, entry%varID, (/lev, 1, entry%rec_count/), (/1, size2, 1/), entry%aux_r8, 1), __LINE__) end if end if end do @@ -769,7 +770,7 @@ subroutine write_mean(entry, entry_index) if (entry%ndim==1) then call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/1, entry%rec_count/), (/size2, 1/), entry%aux_r4, 1), __LINE__) elseif (entry%ndim==2) then - call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/1, lev, entry%rec_count/), (/size2, 1, 1/), entry%aux_r4, 1), __LINE__) + call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/lev, 1, entry%rec_count/), (/1, size2, 1/), entry%aux_r4, 1), __LINE__) end if end if end do From 251353e6fb8ce1cd7e77f17628aff87281587645 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 1 Nov 2021 18:15:24 +0100 Subject: [PATCH 407/909] skeleton for FESOM main in three parts (as required for IFS coupling) --- src/fvom.F90 | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100755 src/fvom.F90 diff --git a/src/fvom.F90 b/src/fvom.F90 new file mode 100755 index 000000000..82affbbf4 --- /dev/null +++ b/src/fvom.F90 @@ -0,0 +1,26 @@ +module fvom_module + implicit none + public fesom_init, fesom_runloop, fesom_finalize + private + +contains + + subroutine fesom_init(nsteps) + integer, intent(out) :: nsteps + ! EO parameters + + end subroutine + + + subroutine fesom_runloop(nsteps) + integer, intent(in) :: nsteps + ! EO parameters + + end subroutine + + + subroutine fesom_finalize() + + end subroutine + +end module From f5ce2a0b6274f98617e7c04d8126da3d6bf33f34 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 2 Nov 2021 10:54:03 +0100 Subject: [PATCH 408/909] add modue to save state (e.g. derived types) between calls to fesom_runloop --- src/fvom.F90 | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/fvom.F90 b/src/fvom.F90 index 82affbbf4..7d3faa7f0 100755 --- a/src/fvom.F90 +++ b/src/fvom.F90 @@ -1,3 +1,12 @@ +! synopsis: save any derived types we initialize +! so they can be reused after fesom_init +module fvom_types_storage_module + +end module + +! synopsis: main FESOM program split into 3 parts +! this way FESOM can e.g. be used as a library with an external time loop driver +! used with IFS-FESOM module fvom_module implicit none public fesom_init, fesom_runloop, fesom_finalize From 866db5a8c135401135344e12723a38ba08019e95 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 2 Nov 2021 11:09:03 +0100 Subject: [PATCH 409/909] move FESOM initialization to separate subroutine --- src/fvom.F90 | 217 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 217 insertions(+) diff --git a/src/fvom.F90 b/src/fvom.F90 index 7d3faa7f0..34310a41f 100755 --- a/src/fvom.F90 +++ b/src/fvom.F90 @@ -15,8 +15,225 @@ module fvom_module contains subroutine fesom_init(nsteps) + USE MOD_MESH + USE MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + USE o_ARRAYS + USE o_PARAM + USE i_PARAM + use i_ARRAYS + use g_clock + use g_config + use g_comm_auto + use g_forcing_arrays + use io_RESTART + use io_MEANDATA + use io_mesh_info + use diagnostics + use mo_tidal + use tracer_init_interface + use ocean_setup_interface + use ice_setup_interface + use ocean2ice_interface + use oce_fluxes_interface + use update_atm_forcing_interface + use before_oce_step_interface + use oce_timestep_ale_interface + use read_mesh_interface + use fesom_version_info_module + use command_line_options_module + ! Define icepack module +#if defined (__icepack) + use icedrv_main, only: set_icepack, init_icepack, alloc_icepack +#endif + +#if defined (__oasis) + use cpl_driver +#endif + + implicit none + integer, intent(out) :: nsteps ! EO parameters + + integer :: n, offset, row, i, provided + integer, pointer :: mype, npes, MPIerr, MPI_COMM_FESOM + real(kind=WP) :: t0, t1, t2, t3, t4, t5, t6, t7, t8, t0_ice, t1_ice, t0_frc, t1_frc + real(kind=WP) :: rtime_fullice, rtime_write_restart, rtime_write_means, rtime_compute_diag, rtime_read_forcing + real(kind=real32) :: rtime_setup_mesh, rtime_setup_ocean, rtime_setup_forcing + real(kind=real32) :: rtime_setup_ice, rtime_setup_other, rtime_setup_restart + real(kind=real32) :: mean_rtime(15), max_rtime(15), min_rtime(15) + real(kind=real32) :: runtime_alltimesteps + + + type(t_mesh), target, save :: mesh + type(t_tracer), target, save :: tracers + type(t_partit), target, save :: partit + + + character(LEN=256) :: dump_dir, dump_filename + logical :: L_EXISTS + type(t_mesh), target, save :: mesh_copy + type(t_tracer), target, save :: tracers_copy + + character(LEN=MPI_MAX_LIBRARY_VERSION_STRING) :: mpi_version_txt + integer mpi_version_len + + + if(command_argument_count() > 0) then + call command_line_options%parse() + stop + end if + +#ifndef __oifs + !ECHAM6-FESOM2 coupling: cpl_oasis3mct_init is called here in order to avoid circular dependencies between modules (cpl_driver and g_PARSUP) + !OIFS-FESOM2 coupling: does not require MPI_INIT here as this is done by OASIS + call MPI_INIT_THREAD(MPI_THREAD_MULTIPLE, provided, i) +#endif + + +#if defined (__oasis) + call cpl_oasis3mct_init(partit%MPI_COMM_FESOM) +#endif + t1 = MPI_Wtime() + + call par_init(partit) + + mype =>partit%mype + MPIerr =>partit%MPIerr + MPI_COMM_FESOM=>partit%MPI_COMM_FESOM + npes =>partit%npes + if(mype==0) then + write(*,*) + print *,"FESOM2 git SHA: "//fesom_git_sha() + call MPI_Get_library_version(mpi_version_txt, mpi_version_len, MPIERR) + print *,"MPI library version: "//trim(mpi_version_txt) + print *, achar(27)//'[32m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;32m'//' --> FESOM BUILDS UP MODEL CONFIGURATION '//achar(27)//'[0m' + end if + !===================== + ! Read configuration data, + ! load the mesh and fill in + ! auxiliary mesh arrays + !===================== + call setup_model(partit) ! Read Namelists, always before clock_init + call clock_init(partit) ! read the clock file + call get_run_steps(nsteps, partit) + call mesh_setup(partit, mesh) + + if (mype==0) write(*,*) 'FESOM mesh_setup... complete' + + !===================== + ! Allocate field variables + ! and additional arrays needed for + ! fancy advection etc. + !===================== + call check_mesh_consistency(partit, mesh) + if (mype==0) t2=MPI_Wtime() + + call tracer_init(tracers, partit, mesh) ! allocate array of ocean tracers (derived type "t_tracer") + call arrays_init(tracers%num_tracers, partit, mesh) ! allocate other arrays (to be refactured same as tracers in the future) + call ocean_setup(tracers, partit, mesh) + + if (mype==0) then + write(*,*) 'FESOM ocean_setup... complete' + t3=MPI_Wtime() + endif + call forcing_setup(partit, mesh) + + if (mype==0) t4=MPI_Wtime() + if (use_ice) then + call ice_setup(tracers, partit, mesh) + ice_steps_since_upd = ice_ave_steps-1 + ice_update=.true. + if (mype==0) write(*,*) 'EVP scheme option=', whichEVP + endif + if (mype==0) t5=MPI_Wtime() + call compute_diagnostics(0, tracers, partit, mesh) ! allocate arrays for diagnostic +#if defined (__oasis) + call cpl_oasis3mct_define_unstr(partit, mesh) + if(mype==0) write(*,*) 'FESOM ----> cpl_oasis3mct_define_unstr nsend, nrecv:',nsend, nrecv +#endif + +#if defined (__icepack) + !===================== + ! Setup icepack + !===================== + if (mype==0) write(*,*) 'Icepack: reading namelists from namelist.icepack' + call set_icepack(partit) + call alloc_icepack + call init_icepack(tracers%data(1), mesh) + if (mype==0) write(*,*) 'Icepack: setup complete' +#endif + call clock_newyear ! check if it is a new year + if (mype==0) t6=MPI_Wtime() + !___CREATE NEW RESTART FILE IF APPLICABLE___________________________________ + ! The interface to the restart module is made via call restart ! + ! The inputs are: istep, l_write, l_create + ! if istep is not zero it will be decided whether restart shall be written + ! if l_write is TRUE the restart will be forced + ! if l_read the restart will be read + ! as an example, for reading restart one does: call restart(0, .false., .false., .true., tracers, partit, mesh) + call restart(0, .false., r_restart, tracers, partit, mesh) ! istep, l_write, l_read + if (mype==0) t7=MPI_Wtime() + ! store grid information into netcdf file + if (.not. r_restart) call write_mesh_info(partit, mesh) + + !___IF RESTART WITH ZLEVEL OR ZSTAR IS DONE, ALSO THE ACTUAL LEVELS AND ____ + !___MIDDEPTH LEVELS NEEDS TO BE CALCULATET AT RESTART_______________________ + if (r_restart) then + call restart_thickness_ale(partit, mesh) + end if + if (mype==0) then + t8=MPI_Wtime() + + rtime_setup_mesh = real( t2 - t1 ,real32) + rtime_setup_ocean = real( t3 - t2 ,real32) + rtime_setup_forcing = real( t4 - t3 ,real32) + rtime_setup_ice = real( t5 - t4 ,real32) + rtime_setup_restart = real( t7 - t6 ,real32) + rtime_setup_other = real((t8 - t7) + (t6 - t5) ,real32) + + write(*,*) '==========================================' + write(*,*) 'MODEL SETUP took on mype=0 [seconds] ' + write(*,*) 'runtime setup total ',real(t8-t1,real32) + write(*,*) ' > runtime setup mesh ',rtime_setup_mesh + write(*,*) ' > runtime setup ocean ',rtime_setup_ocean + write(*,*) ' > runtime setup forcing ',rtime_setup_forcing + write(*,*) ' > runtime setup ice ',rtime_setup_ice + write(*,*) ' > runtime setup restart ',rtime_setup_restart + write(*,*) ' > runtime setup other ',rtime_setup_other + write(*,*) '============================================' + endif + + DUMP_DIR='DUMP/' + INQUIRE(file=trim(dump_dir), EXIST=L_EXISTS) + if (.not. L_EXISTS) call system('mkdir '//trim(dump_dir)) + + write (dump_filename, "(A7,I7.7)") "t_mesh.", mype + open (mype+300, file=TRIM(DUMP_DIR)//trim(dump_filename), status='replace', form="unformatted") + write (mype+300) mesh + close (mype+300) + + ! open (mype+300, file=trim(dump_filename), status='old', form="unformatted") + ! read (mype+300) mesh_copy + ! close (mype+300) + + write (dump_filename, "(A9,I7.7)") "t_tracer.", mype + open (mype+300, file=TRIM(DUMP_DIR)//trim(dump_filename), status='replace', form="unformatted") + write (mype+300) tracers + close (mype+300) + + ! open (mype+300, file=trim(dump_filename), status='old', form="unformatted") + ! read (mype+300) tracers_copy + ! close (mype+300) + + !call par_ex(partit%MPI_COMM_FESOM, partit%mype) + !stop + ! + ! if (mype==10) write(,) mesh1%ssh_stiff%values-mesh%ssh_stiff%value + end subroutine From ce5024ca9b5ee482faf0f96e00e643c3c3d5a256 Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 2 Nov 2021 11:30:07 +0100 Subject: [PATCH 410/909] fix remaining compiler error for the derived dyamics%uv --- src/fvom_main.F90 | 8 +++-- src/oce_ale.F90 | 35 ++++++++++-------- src/oce_ale_tracer.F90 | 36 ++++++++++++------- src/oce_setup_step.F90 | 82 +++++++++++++++++++++++++----------------- 4 files changed, 97 insertions(+), 64 deletions(-) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index e9f4cb699..46ecfa650 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -125,7 +125,7 @@ program main call tracer_init(tracers, partit, mesh) ! allocate array of ocean tracers (derived type "t_tracer") call dynamics_init(dynamics, partit, mesh) ! allocate array of ocean dynamics (derived type "t_tracer") call arrays_init(tracers%num_tracers, partit, mesh) ! allocate other arrays (to be refactured same as tracers in the future) - call ocean_setup(tracers, partit, mesh) + call ocean_setup(dynamics, tracers, partit, mesh) if (mype==0) then write(*,*) 'FESOM ocean_setup... complete' @@ -294,14 +294,16 @@ program main !___compute fluxes to the ocean: heat, freshwater, momentum_________ if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call oce_fluxes_mom...'//achar(27)//'[0m' call oce_fluxes_mom(dynamics, partit, mesh) ! momentum only + if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call oce_fluxes...'//achar(27)//'[0m' call oce_fluxes(tracers, partit, mesh) end if - call before_oce_step(tracers, partit, mesh) ! prepare the things if required + if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call before_oce_step...'//achar(27)//'[0m' + call before_oce_step(dynamics, tracers, partit, mesh) ! prepare the things if required t2 = MPI_Wtime() !___model ocean step____________________________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call oce_timestep_ale'//achar(27)//'[0m' - call oce_timestep_ale(n, tracers, partit, mesh) + call oce_timestep_ale(n, dynamics, tracers, partit, mesh) t3 = MPI_Wtime() !___compute energy diagnostics..._______________________________________ diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 7caab848a..48388545c 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -32,12 +32,14 @@ subroutine init_surface_node_depth(partit, mesh) type(t_partit), intent(inout), target :: partit end subroutine - subroutine impl_vert_visc_ale(partit, mesh) + subroutine impl_vert_visc_ale(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + USE MOD_DYN + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit + type(t_dyn) , intent(inout), target :: dynamics end subroutine subroutine update_stiff_mat_ale(partit, mesh) @@ -48,12 +50,14 @@ subroutine update_stiff_mat_ale(partit, mesh) type(t_partit), intent(inout), target :: partit end subroutine - subroutine compute_ssh_rhs_ale(partit, mesh) + subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + USE MOD_DYN + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit + type(t_dyn) , intent(inout), target :: dynamics end subroutine subroutine solve_ssh_ale(partit, mesh) @@ -97,15 +101,17 @@ subroutine update_thickness_ale(partit, mesh) module oce_timestep_ale_interface interface - subroutine oce_timestep_ale(n, tracers, partit, mesh) + subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - use mod_tracer - integer, intent(in) :: n - type(t_mesh), intent(in), target :: mesh + use MOD_TRACER + use MOD_DYN + integer, intent(in) :: n + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers + type(t_dyn) , intent(inout), target :: dynamics end subroutine end interface end module @@ -1625,7 +1631,6 @@ subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh real(kind=WP), dimension(:,:,:), pointer :: UV - #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -2855,17 +2860,17 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) !!PS if (any(abs(Wvel_e)>1.0e20)) write(*,*) n, mype,' --> found Inf Wvel_e before compute_vel_rhs' if(mom_adv/=3) then - call compute_vel_rhs(partit, mesh) + call compute_vel_rhs(dynamics, partit, mesh) else call compute_vel_rhs_vinv(partit, mesh) end if !___________________________________________________________________________ - call viscosity_filter(visc_option, partit, mesh) + call viscosity_filter(visc_option, dynamics, partit, mesh) !___________________________________________________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call impl_vert_visc_ale'//achar(27)//'[0m' - if(i_vert_visc) call impl_vert_visc_ale(partit, mesh) + if(i_vert_visc) call impl_vert_visc_ale(dynamics, partit, mesh) t2=MPI_Wtime() !___________________________________________________________________________ @@ -2877,7 +2882,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call compute_ssh_rhs_ale'//achar(27)//'[0m' ! ssh_rhs=-alpha*\nabla\int(U_n+U_rhs)dz-(1-alpha)*... ! see "FESOM2: from finite elements to finte volumes, S. Danilov..." eq. (18) rhs - call compute_ssh_rhs_ale(partit, mesh) + call compute_ssh_rhs_ale(dynamics, partit, mesh) ! Take updated ssh matrix and solve --> new ssh! t30=MPI_Wtime() @@ -2889,7 +2894,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) ! estimate new horizontal velocity u^(n+1) ! u^(n+1) = u* + [-g * tau * theta * grad(eta^(n+1)-eta^(n)) ] if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call update_vel'//achar(27)//'[0m' - call update_vel(partit, mesh) + call update_vel(partit, dynamics, mesh) ! --> eta_(n) --> eta_(n+1) = eta_(n) + deta = eta_(n) + (eta_(n+1) + eta_(n)) t4=MPI_Wtime() @@ -2937,7 +2942,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) !___________________________________________________________________________ ! solve tracer equation if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call solve_tracers_ale'//achar(27)//'[0m' - call solve_tracers_ale(tracers, partit, mesh) + call solve_tracers_ale(dynamics, tracers, partit, mesh) t8=MPI_Wtime() !___________________________________________________________________________ diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 336817503..c962697bb 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -14,15 +14,17 @@ subroutine diff_part_hor_redi(tr_num, tracer, partit, mesh) end module module adv_tracers_ale_interface interface - subroutine adv_tracers_ale(dt, tr_num, tracer, partit, mesh) + subroutine adv_tracers_ale(dt, tr_num, dynamics, tracer, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP use mod_tracer + use MOD_DYN real(kind=WP), intent(in), target :: dt integer, intent(in), target :: tr_num + type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracer - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit end subroutine end interface @@ -71,14 +73,16 @@ subroutine diff_ver_part_impl_ale(tr_num, tracer, partit, mesh) end module module diff_tracers_ale_interface interface - subroutine diff_tracers_ale(tr_num, tracer, partit, mesh) + subroutine diff_tracers_ale(tr_num, dynamics, tracer, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP use mod_tracer + use MOD_DYN integer, intent(in), target :: tr_num + type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracer - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit end subroutine end interface @@ -98,28 +102,32 @@ function bc_surface(n, id, sval, partit) end module module diff_part_bh_interface interface - subroutine diff_part_bh(tr_num, tracer, partit, mesh) + subroutine diff_part_bh(tr_num, dynamics, tracer, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP use mod_tracer + use MOD_DYN integer, intent(in), target :: tr_num + type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracer - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit end subroutine end interface end module module solve_tracers_ale_interface interface - subroutine solve_tracers_ale(tracers, partit, mesh) + subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - use mod_tracer + use mod_tracer + use MOD_DYN type(t_tracer), intent(inout), target :: tracers - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit + type(t_dyn) , intent(inout), target :: dynamics end subroutine end interface end module @@ -177,10 +185,10 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) call init_tracers_AB(tr_num, tracers, partit, mesh) ! advect tracers if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call adv_tracers_ale'//achar(27)//'[0m' - call adv_tracers_ale(dt, tr_num, tracers, partit, mesh) + call adv_tracers_ale(dt, tr_num, dynamics, tracers, partit, mesh) ! diffuse tracers if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call diff_tracers_ale'//achar(27)//'[0m' - call diff_tracers_ale(tr_num, tracers, partit, mesh) + call diff_tracers_ale(tr_num, dynamics, tracers, partit, mesh) ! relax to salt and temp climatology if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call relax_to_clim'//achar(27)//'[0m' ! if ((toy_ocean) .AND. ((tr_num==1) .AND. (TRIM(which_toy)=="soufflet"))) then @@ -280,11 +288,12 @@ end subroutine adv_tracers_ale ! ! !=============================================================================== -subroutine diff_tracers_ale(tr_num, tracers, partit, mesh) +subroutine diff_tracers_ale(tr_num, dynamics, tracers, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP use mod_tracer + use MOD_DYN use o_arrays use o_tracers use diff_part_hor_redi_interface @@ -296,6 +305,7 @@ subroutine diff_tracers_ale(tr_num, tracers, partit, mesh) integer :: n, nzmax, nzmin integer, intent(in), target :: tr_num + type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracers type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit @@ -357,7 +367,7 @@ subroutine diff_tracers_ale(tr_num, tracers, partit, mesh) !init_tracers will set it to zero for the next timestep !init_tracers will set it to zero for the next timestep if (tracers%smooth_bh_tra) then - call diff_part_bh(tr_num, tracers, partit, mesh) ! alpply biharmonic diffusion (implemented as filter) + call diff_part_bh(tr_num, dynamics, tracers, partit, mesh) ! alpply biharmonic diffusion (implemented as filter) end if end subroutine diff_tracers_ale ! diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 6469eb99f..c54297598 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -42,28 +42,32 @@ subroutine dynamics_init(dynamics, partit, mesh) module ocean_setup_interface interface - subroutine ocean_setup(tracers, partit, mesh) + subroutine ocean_setup(dynamics, tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use mod_tracer - type(t_mesh), intent(in), target :: mesh + use MOD_DYN + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers + type(t_dyn) , intent(inout), target :: dynamics end subroutine end interface end module module before_oce_step_interface interface - subroutine before_oce_step(tracers, partit, mesh) + subroutine before_oce_step(dynamics, tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use mod_tracer - type(t_mesh), intent(in), target :: mesh + use MOD_DYN + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers + type(t_dyn) , intent(inout), target :: dynamics end subroutine end interface end module @@ -354,29 +358,28 @@ SUBROUTINE dynamics_init(dynamics, partit, mesh) type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_dyn) , intent(inout), target :: dynamics - - ! define dynamics namelist parameter - namelist /dynamics_visc / visc_opt, gamma0_visc, gamma1_visc, gamma2_visc, & - div_c_visc, leith_c_visc, use_ivertvisc, easy_bs_return - namelist /dynamics_general / momadv_opt, use_freeslip, use_wsplit, wsplit_maxcfl - #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - ! open and read namelist for I/O - open(unit=nm_unit, file='namelist.dyn', form='formatted', access='sequential', status='old', iostat=iost ) - if (iost == 0) then - if (mype==0) write(*,*) ' file : ', 'namelist.dyn',' open ok' - else - if (mype==0) write(*,*) 'ERROR: --> bad opening file : ', 'namelist.dyn',' ; iostat=',iost - call par_ex(partit%MPI_COMM_FESOM, partit%mype) - stop - end if - read(nm_unit, nml=dynamics_visc , iostat=iost) - read(nm_unit, nml=dynamics_general, iostat=iost) - close(nm_unit) +!!PS ! define dynamics namelist parameter +!!PS namelist /dynamics_visc / visc_opt, gamma0_visc, gamma1_visc, gamma2_visc, & +!!PS div_c_visc, leith_c_visc, use_ivertvisc, easy_bs_return +!!PS namelist /dynamics_general / momadv_opt, use_freeslip, use_wsplit, wsplit_maxcfl +!!PS +!!PS ! open and read namelist for I/O +!!PS open(unit=nm_unit, file='namelist.dyn', form='formatted', access='sequential', status='old', iostat=iost ) +!!PS if (iost == 0) then +!!PS if (mype==0) write(*,*) ' file : ', 'namelist.dyn',' open ok' +!!PS else +!!PS if (mype==0) write(*,*) 'ERROR: --> bad opening file : ', 'namelist.dyn',' ; iostat=',iost +!!PS call par_ex(partit%MPI_COMM_FESOM, partit%mype) +!!PS stop +!!PS end if +!!PS read(nm_unit, nml=dynamics_visc , iostat=iost) +!!PS read(nm_unit, nml=dynamics_general, iostat=iost) +!!PS close(nm_unit) ! define local vertice & elem array size elem_size=myDim_elem2D+eDim_elem2D @@ -413,17 +416,30 @@ SUBROUTINE dynamics_init(dynamics, partit, mesh) dynamics%ssh_rhs_old= 0.0_WP ! set parameters in derived type - dynamics%visc_opt = visc_opt - dynamics%gamma0_visc = gamma0_visc - dynamics%gamma1_visc = gamma1_visc - dynamics%gamma2_visc = gamma2_visc - dynamics%div_c_visc = div_c_visc - dynamics%leith_c_visc = leith_c_visc - dynamics%use_ivertvisc = use_ivertvisc - dynamics%momadv_opt = momadv_opt - dynamics%use_freeslip = use_freeslip - dynamics%use_wsplit = use_wsplit - dynamics%wsplit_maxcfl = wsplit_maxcfl +!!PS dynamics%visc_opt = visc_opt +!!PS dynamics%gamma0_visc = gamma0_visc +!!PS dynamics%gamma1_visc = gamma1_visc +!!PS dynamics%gamma2_visc = gamma2_visc +!!PS dynamics%div_c_visc = div_c_visc +!!PS dynamics%leith_c_visc = leith_c_visc +!!PS dynamics%use_ivertvisc = use_ivertvisc +!!PS dynamics%momadv_opt = momadv_opt +!!PS dynamics%use_freeslip = use_freeslip +!!PS dynamics%use_wsplit = use_wsplit +!!PS dynamics%wsplit_maxcfl = wsplit_maxcfl + + dynamics%visc_opt = visc_option + dynamics%gamma0_visc = gamma0 + dynamics%gamma1_visc = gamma1 + dynamics%gamma2_visc = gamma2 + dynamics%div_c_visc = Div_c + dynamics%leith_c_visc = Leith_c + dynamics%use_ivertvisc = i_vert_visc + dynamics%momadv_opt = mom_adv + dynamics%use_freeslip = free_slip + dynamics%use_wsplit = w_split + dynamics%wsplit_maxcfl = w_max_cfl + END SUBROUTINE dynamics_init ! From 13299903961ce5b1c937ba6db959c2925b81acea Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 2 Nov 2021 11:57:57 +0100 Subject: [PATCH 411/909] set flag_debug=.true. --- src/gen_modules_config.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gen_modules_config.F90 b/src/gen_modules_config.F90 index f265ea898..b9d3d1807 100755 --- a/src/gen_modules_config.F90 +++ b/src/gen_modules_config.F90 @@ -107,7 +107,7 @@ module g_config real(kind=WP) :: cavity_partial_cell_thresh=0.0_WP ! same as partial_cell_tresh but for surface logical :: toy_ocean=.false. ! Ersatz forcing has to be supplied character(100) :: which_toy="soufflet" - logical :: flag_debug=.false. ! prints name of actual subroutine he is in + logical :: flag_debug=.true. ! prints name of actual subroutine he is in logical :: flag_warn_cflz=.true. ! switches off cflz warning namelist /run_config/ use_ice,use_floatice, use_sw_pene, use_cavity, & use_cavity_partial_cell, cavity_partial_cell_thresh, toy_ocean, which_toy, flag_debug, flag_warn_cflz From 385a9ebc36460fbfbb95a46fc6a205104bf346f6 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 2 Nov 2021 12:29:24 +0100 Subject: [PATCH 412/909] store all variables from the FESOM main program in a separate type which we could reuse across multiple calls to fesom_runloop --- src/fvom.F90 | 264 ++++++++++++++++++++++++++------------------------- 1 file changed, 135 insertions(+), 129 deletions(-) diff --git a/src/fvom.F90 b/src/fvom.F90 index 34310a41f..6d5ebfab6 100755 --- a/src/fvom.F90 +++ b/src/fvom.F90 @@ -1,63 +1,48 @@ ! synopsis: save any derived types we initialize ! so they can be reused after fesom_init -module fvom_types_storage_module - -end module - -! synopsis: main FESOM program split into 3 parts -! this way FESOM can e.g. be used as a library with an external time loop driver -! used with IFS-FESOM -module fvom_module - implicit none - public fesom_init, fesom_runloop, fesom_finalize - private - -contains - - subroutine fesom_init(nsteps) - USE MOD_MESH - USE MOD_TRACER - USE MOD_PARTIT - USE MOD_PARSUP - USE o_ARRAYS - USE o_PARAM - USE i_PARAM - use i_ARRAYS - use g_clock - use g_config - use g_comm_auto - use g_forcing_arrays - use io_RESTART - use io_MEANDATA - use io_mesh_info - use diagnostics - use mo_tidal - use tracer_init_interface - use ocean_setup_interface - use ice_setup_interface - use ocean2ice_interface - use oce_fluxes_interface - use update_atm_forcing_interface - use before_oce_step_interface - use oce_timestep_ale_interface - use read_mesh_interface - use fesom_version_info_module - use command_line_options_module - ! Define icepack module +module fesom_main_storage_module + USE MOD_MESH + USE MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + USE o_ARRAYS + USE o_PARAM + USE i_PARAM + use i_ARRAYS + use g_clock + use g_config + use g_comm_auto + use g_forcing_arrays + use io_RESTART + use io_MEANDATA + use io_mesh_info + use diagnostics + use mo_tidal + use tracer_init_interface + use ocean_setup_interface + use ice_setup_interface + use ocean2ice_interface + use oce_fluxes_interface + use update_atm_forcing_interface + use before_oce_step_interface + use oce_timestep_ale_interface + use read_mesh_interface + use fesom_version_info_module + use command_line_options_module + ! Define icepack module #if defined (__icepack) - use icedrv_main, only: set_icepack, init_icepack, alloc_icepack + use icedrv_main, only: set_icepack, init_icepack, alloc_icepack #endif #if defined (__oasis) - use cpl_driver + use cpl_driver #endif - implicit none - - integer, intent(out) :: nsteps - ! EO parameters + implicit none + + type :: fesom_main_storage_type - integer :: n, offset, row, i, provided + integer :: n, nsteps, offset, row, i, provided integer, pointer :: mype, npes, MPIerr, MPI_COMM_FESOM real(kind=WP) :: t0, t1, t2, t3, t4, t5, t6, t7, t8, t0_ice, t1_ice, t0_frc, t1_frc real(kind=WP) :: rtime_fullice, rtime_write_restart, rtime_write_means, rtime_compute_diag, rtime_read_forcing @@ -67,20 +52,40 @@ subroutine fesom_init(nsteps) real(kind=real32) :: runtime_alltimesteps - type(t_mesh), target, save :: mesh - type(t_tracer), target, save :: tracers - type(t_partit), target, save :: partit + type(t_mesh) mesh + type(t_tracer) tracers + type(t_partit) partit character(LEN=256) :: dump_dir, dump_filename logical :: L_EXISTS - type(t_mesh), target, save :: mesh_copy - type(t_tracer), target, save :: tracers_copy + type(t_mesh) mesh_copy + type(t_tracer) tracers_copy character(LEN=MPI_MAX_LIBRARY_VERSION_STRING) :: mpi_version_txt integer mpi_version_len + + end type + type(fesom_main_storage_type), save, target :: f + +end module +! synopsis: main FESOM program split into 3 parts +! this way FESOM can e.g. be used as a library with an external time loop driver +! used with IFS-FESOM +module fvom_module + implicit none + public fesom_init, fesom_runloop, fesom_finalize + private + +contains + + subroutine fesom_init(fesom_used_nsteps) + use fesom_main_storage_module + integer, intent(out) :: fesom_used_nsteps + ! EO parameters + if(command_argument_count() > 0) then call command_line_options%parse() stop @@ -89,26 +94,26 @@ subroutine fesom_init(nsteps) #ifndef __oifs !ECHAM6-FESOM2 coupling: cpl_oasis3mct_init is called here in order to avoid circular dependencies between modules (cpl_driver and g_PARSUP) !OIFS-FESOM2 coupling: does not require MPI_INIT here as this is done by OASIS - call MPI_INIT_THREAD(MPI_THREAD_MULTIPLE, provided, i) + call MPI_INIT_THREAD(MPI_THREAD_MULTIPLE, f%provided, f%i) #endif #if defined (__oasis) call cpl_oasis3mct_init(partit%MPI_COMM_FESOM) #endif - t1 = MPI_Wtime() + f%t1 = MPI_Wtime() - call par_init(partit) + call par_init(f%partit) - mype =>partit%mype - MPIerr =>partit%MPIerr - MPI_COMM_FESOM=>partit%MPI_COMM_FESOM - npes =>partit%npes - if(mype==0) then + f%mype =>f%partit%mype + f%MPIerr =>f%partit%MPIerr + f%MPI_COMM_FESOM=>f%partit%MPI_COMM_FESOM + f%npes =>f%partit%npes + if(f%mype==0) then write(*,*) print *,"FESOM2 git SHA: "//fesom_git_sha() - call MPI_Get_library_version(mpi_version_txt, mpi_version_len, MPIERR) - print *,"MPI library version: "//trim(mpi_version_txt) + call MPI_Get_library_version(f%mpi_version_txt, f%mpi_version_len, f%MPIERR) + print *,"MPI library version: "//trim(f%mpi_version_txt) print *, achar(27)//'[32m' //'____________________________________________________________'//achar(27)//'[0m' print *, achar(27)//'[7;32m'//' --> FESOM BUILDS UP MODEL CONFIGURATION '//achar(27)//'[0m' end if @@ -117,57 +122,57 @@ subroutine fesom_init(nsteps) ! load the mesh and fill in ! auxiliary mesh arrays !===================== - call setup_model(partit) ! Read Namelists, always before clock_init - call clock_init(partit) ! read the clock file - call get_run_steps(nsteps, partit) - call mesh_setup(partit, mesh) + call setup_model(f%partit) ! Read Namelists, always before clock_init + call clock_init(f%partit) ! read the clock file + call get_run_steps(f%nsteps, f%partit) + call mesh_setup(f%partit, f%mesh) - if (mype==0) write(*,*) 'FESOM mesh_setup... complete' + if (f%mype==0) write(*,*) 'FESOM mesh_setup... complete' !===================== ! Allocate field variables ! and additional arrays needed for ! fancy advection etc. !===================== - call check_mesh_consistency(partit, mesh) - if (mype==0) t2=MPI_Wtime() + call check_mesh_consistency(f%partit, f%mesh) + if (f%mype==0) f%t2=MPI_Wtime() - call tracer_init(tracers, partit, mesh) ! allocate array of ocean tracers (derived type "t_tracer") - call arrays_init(tracers%num_tracers, partit, mesh) ! allocate other arrays (to be refactured same as tracers in the future) - call ocean_setup(tracers, partit, mesh) + call tracer_init(f%tracers, f%partit, f%mesh) ! allocate array of ocean tracers (derived type "t_tracer") + call arrays_init(f%tracers%num_tracers, f%partit, f%mesh) ! allocate other arrays (to be refactured same as tracers in the future) + call ocean_setup(f%tracers, f%partit, f%mesh) - if (mype==0) then + if (f%mype==0) then write(*,*) 'FESOM ocean_setup... complete' - t3=MPI_Wtime() + f%t3=MPI_Wtime() endif - call forcing_setup(partit, mesh) + call forcing_setup(f%partit, f%mesh) - if (mype==0) t4=MPI_Wtime() + if (f%mype==0) f%t4=MPI_Wtime() if (use_ice) then - call ice_setup(tracers, partit, mesh) + call ice_setup(f%tracers, f%partit, f%mesh) ice_steps_since_upd = ice_ave_steps-1 ice_update=.true. - if (mype==0) write(*,*) 'EVP scheme option=', whichEVP + if (f%mype==0) write(*,*) 'EVP scheme option=', whichEVP endif - if (mype==0) t5=MPI_Wtime() - call compute_diagnostics(0, tracers, partit, mesh) ! allocate arrays for diagnostic + if (f%mype==0) f%t5=MPI_Wtime() + call compute_diagnostics(0, f%tracers, f%partit, f%mesh) ! allocate arrays for diagnostic #if defined (__oasis) - call cpl_oasis3mct_define_unstr(partit, mesh) - if(mype==0) write(*,*) 'FESOM ----> cpl_oasis3mct_define_unstr nsend, nrecv:',nsend, nrecv + call cpl_oasis3mct_define_unstr(f%partit, f%mesh) + if(f%mype==0) write(*,*) 'FESOM ----> cpl_oasis3mct_define_unstr nsend, nrecv:',nsend, nrecv #endif #if defined (__icepack) !===================== ! Setup icepack !===================== - if (mype==0) write(*,*) 'Icepack: reading namelists from namelist.icepack' - call set_icepack(partit) + if (f%mype==0) write(*,*) 'Icepack: reading namelists from namelist.icepack' + call set_icepack(f%partit) call alloc_icepack - call init_icepack(tracers%data(1), mesh) - if (mype==0) write(*,*) 'Icepack: setup complete' + call init_icepack(f%tracers%data(1), f%mesh) + if (f%mype==0) write(*,*) 'Icepack: setup complete' #endif call clock_newyear ! check if it is a new year - if (mype==0) t6=MPI_Wtime() + if (f%mype==0) f%t6=MPI_Wtime() !___CREATE NEW RESTART FILE IF APPLICABLE___________________________________ ! The interface to the restart module is made via call restart ! ! The inputs are: istep, l_write, l_create @@ -175,66 +180,67 @@ subroutine fesom_init(nsteps) ! if l_write is TRUE the restart will be forced ! if l_read the restart will be read ! as an example, for reading restart one does: call restart(0, .false., .false., .true., tracers, partit, mesh) - call restart(0, .false., r_restart, tracers, partit, mesh) ! istep, l_write, l_read - if (mype==0) t7=MPI_Wtime() + call restart(0, .false., r_restart, f%tracers, f%partit, f%mesh) ! istep, l_write, l_read + if (f%mype==0) f%t7=MPI_Wtime() ! store grid information into netcdf file - if (.not. r_restart) call write_mesh_info(partit, mesh) + if (.not. r_restart) call write_mesh_info(f%partit, f%mesh) !___IF RESTART WITH ZLEVEL OR ZSTAR IS DONE, ALSO THE ACTUAL LEVELS AND ____ !___MIDDEPTH LEVELS NEEDS TO BE CALCULATET AT RESTART_______________________ if (r_restart) then - call restart_thickness_ale(partit, mesh) + call restart_thickness_ale(f%partit, f%mesh) end if - if (mype==0) then - t8=MPI_Wtime() + if (f%mype==0) then + f%t8=MPI_Wtime() - rtime_setup_mesh = real( t2 - t1 ,real32) - rtime_setup_ocean = real( t3 - t2 ,real32) - rtime_setup_forcing = real( t4 - t3 ,real32) - rtime_setup_ice = real( t5 - t4 ,real32) - rtime_setup_restart = real( t7 - t6 ,real32) - rtime_setup_other = real((t8 - t7) + (t6 - t5) ,real32) + f%rtime_setup_mesh = real( f%t2 - f%t1 ,real32) + f%rtime_setup_ocean = real( f%t3 - f%t2 ,real32) + f%rtime_setup_forcing = real( f%t4 - f%t3 ,real32) + f%rtime_setup_ice = real( f%t5 - f%t4 ,real32) + f%rtime_setup_restart = real( f%t7 - f%t6 ,real32) + f%rtime_setup_other = real((f%t8 - f%t7) + (f%t6 - f%t5) ,real32) write(*,*) '==========================================' write(*,*) 'MODEL SETUP took on mype=0 [seconds] ' - write(*,*) 'runtime setup total ',real(t8-t1,real32) - write(*,*) ' > runtime setup mesh ',rtime_setup_mesh - write(*,*) ' > runtime setup ocean ',rtime_setup_ocean - write(*,*) ' > runtime setup forcing ',rtime_setup_forcing - write(*,*) ' > runtime setup ice ',rtime_setup_ice - write(*,*) ' > runtime setup restart ',rtime_setup_restart - write(*,*) ' > runtime setup other ',rtime_setup_other + write(*,*) 'runtime setup total ',real(f%t8-f%t1,real32) + write(*,*) ' > runtime setup mesh ',f%rtime_setup_mesh + write(*,*) ' > runtime setup ocean ',f%rtime_setup_ocean + write(*,*) ' > runtime setup forcing ',f%rtime_setup_forcing + write(*,*) ' > runtime setup ice ',f%rtime_setup_ice + write(*,*) ' > runtime setup restart ',f%rtime_setup_restart + write(*,*) ' > runtime setup other ',f%rtime_setup_other write(*,*) '============================================' endif - DUMP_DIR='DUMP/' - INQUIRE(file=trim(dump_dir), EXIST=L_EXISTS) - if (.not. L_EXISTS) call system('mkdir '//trim(dump_dir)) + f%dump_dir='DUMP/' + INQUIRE(file=trim(f%dump_dir), EXIST=f%L_EXISTS) + if (.not. f%L_EXISTS) call system('mkdir '//trim(f%dump_dir)) - write (dump_filename, "(A7,I7.7)") "t_mesh.", mype - open (mype+300, file=TRIM(DUMP_DIR)//trim(dump_filename), status='replace', form="unformatted") - write (mype+300) mesh - close (mype+300) + write (f%dump_filename, "(A7,I7.7)") "t_mesh.", f%mype + open (f%mype+300, file=TRIM(f%dump_dir)//trim(f%dump_filename), status='replace', form="unformatted") + write (f%mype+300) f%mesh + close (f%mype+300) - ! open (mype+300, file=trim(dump_filename), status='old', form="unformatted") - ! read (mype+300) mesh_copy - ! close (mype+300) + ! open (f%mype+300, file=trim(f%dump_filename), status='old', form="unformatted") + ! read (f%mype+300) f%mesh_copy + ! close (f%mype+300) - write (dump_filename, "(A9,I7.7)") "t_tracer.", mype - open (mype+300, file=TRIM(DUMP_DIR)//trim(dump_filename), status='replace', form="unformatted") - write (mype+300) tracers - close (mype+300) + write (f%dump_filename, "(A9,I7.7)") "t_tracer.", f%mype + open (f%mype+300, file=TRIM(f%dump_dir)//trim(f%dump_filename), status='replace', form="unformatted") + write (f%mype+300) f%tracers + close (f%mype+300) - ! open (mype+300, file=trim(dump_filename), status='old', form="unformatted") - ! read (mype+300) tracers_copy - ! close (mype+300) + ! open (f%mype+300, file=trim(f%dump_filename), status='old', form="unformatted") + ! read (f%mype+300) f%tracers_copy + ! close (f%mype+300) - !call par_ex(partit%MPI_COMM_FESOM, partit%mype) + !call par_ex(f%partit%MPI_COMM_FESOM, f%partit%mype) !stop ! - ! if (mype==10) write(,) mesh1%ssh_stiff%values-mesh%ssh_stiff%value + ! if (f%mype==10) write(,) f%mesh1%ssh_stiff%values-f%mesh%ssh_stiff%value + fesom_used_nsteps = f%nsteps end subroutine From 7120218065530a6b19ef42c7580319a2cf238dc5 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 2 Nov 2021 12:41:27 +0100 Subject: [PATCH 413/909] - rename total fesom timsteps paramerter - remove timing variables which are lokal to the runloop --- src/fvom.F90 | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/fvom.F90 b/src/fvom.F90 index 6d5ebfab6..6230ea7b5 100755 --- a/src/fvom.F90 +++ b/src/fvom.F90 @@ -45,7 +45,6 @@ module fesom_main_storage_module integer :: n, nsteps, offset, row, i, provided integer, pointer :: mype, npes, MPIerr, MPI_COMM_FESOM real(kind=WP) :: t0, t1, t2, t3, t4, t5, t6, t7, t8, t0_ice, t1_ice, t0_frc, t1_frc - real(kind=WP) :: rtime_fullice, rtime_write_restart, rtime_write_means, rtime_compute_diag, rtime_read_forcing real(kind=real32) :: rtime_setup_mesh, rtime_setup_ocean, rtime_setup_forcing real(kind=real32) :: rtime_setup_ice, rtime_setup_other, rtime_setup_restart real(kind=real32) :: mean_rtime(15), max_rtime(15), min_rtime(15) @@ -81,9 +80,9 @@ module fvom_module contains - subroutine fesom_init(fesom_used_nsteps) + subroutine fesom_init(fesom_total_nsteps) use fesom_main_storage_module - integer, intent(out) :: fesom_used_nsteps + integer, intent(out) :: fesom_total_nsteps ! EO parameters if(command_argument_count() > 0) then @@ -240,7 +239,7 @@ subroutine fesom_init(fesom_used_nsteps) ! if (f%mype==10) write(,) f%mesh1%ssh_stiff%values-f%mesh%ssh_stiff%value - fesom_used_nsteps = f%nsteps + fesom_total_nsteps = f%nsteps end subroutine From e1904a95a6d079b43d26d33b796f8a96ab5354c9 Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 2 Nov 2021 17:14:44 +0100 Subject: [PATCH 414/909] fix bug in fvom_main.F90 and oce_dyn.F90 --- src/fvom_main.F90 | 3 ++- src/oce_dyn.F90 | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index 46ecfa650..020860ff4 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -25,6 +25,7 @@ program main use io_mesh_info use diagnostics use mo_tidal +use dynamics_init_interface use tracer_init_interface use ocean_setup_interface use ice_setup_interface @@ -268,7 +269,7 @@ program main #endif call clock !___compute horizontal velocity on nodes (originaly on elements)________ - call compute_vel_nodes(partit, mesh) + call compute_vel_nodes(partit, dynamics, mesh) !___model sea-ice step__________________________________________________ t1 = MPI_Wtime() if(use_ice) then diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 2a2ea25d8..403239a12 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -206,7 +206,7 @@ subroutine compute_vel_nodes(dynamics, partit, mesh) USE MOD_PARSUP USE MOD_DYN USE o_PARAM - USE o_ARRAYS + USE o_ARRAYS, only: Unode use g_comm_auto IMPLICIT NONE integer :: n, nz, k, elem, nln, uln, nle, ule From 8a0d9ca11b030971e548d81ab5788fa9a13c82d4 Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 2 Nov 2021 20:33:52 +0100 Subject: [PATCH 415/909] fix bug in fvom_main.F90 --- src/fvom_main.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index 020860ff4..9c618fc3a 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -269,7 +269,7 @@ program main #endif call clock !___compute horizontal velocity on nodes (originaly on elements)________ - call compute_vel_nodes(partit, dynamics, mesh) + call compute_vel_nodes(dynamics, partit, mesh) !___model sea-ice step__________________________________________________ t1 = MPI_Wtime() if(use_ice) then From 6517a25cc209309cdef9810ea30f2f7ac9f3d12e Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 2 Nov 2021 20:46:36 +0100 Subject: [PATCH 416/909] fix bug oce_ale.F90 --- src/fvom_main.F90 | 1 + src/oce_ale.F90 | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index 9c618fc3a..a5c2c16de 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -270,6 +270,7 @@ program main call clock !___compute horizontal velocity on nodes (originaly on elements)________ call compute_vel_nodes(dynamics, partit, mesh) + !___model sea-ice step__________________________________________________ t1 = MPI_Wtime() if(use_ice) then diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 48388545c..3b1486925 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2894,7 +2894,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) ! estimate new horizontal velocity u^(n+1) ! u^(n+1) = u* + [-g * tau * theta * grad(eta^(n+1)-eta^(n)) ] if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call update_vel'//achar(27)//'[0m' - call update_vel(partit, dynamics, mesh) + call update_vel(dynamics, partit, mesh) ! --> eta_(n) --> eta_(n+1) = eta_(n) + deta = eta_(n) + (eta_(n+1) + eta_(n)) t4=MPI_Wtime() From 80f443d7735a9ce577fb1af40e4d17ceb0b38673 Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 2 Nov 2021 21:03:17 +0100 Subject: [PATCH 417/909] fix bug in src/oce_ale_vel_rhs.F90 src/oce_ale.F90 --- src/oce_ale.F90 | 2 ++ src/oce_ale_vel_rhs.F90 | 14 ++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 3b1486925..a356ad406 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2723,6 +2723,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) use pressure_force_4_linfs_interface use pressure_force_4_zxxxx_interface use solve_tracers_ale_interface + use compute_vel_rhs_interface use write_step_info_interface use check_blowup_interface IMPLICIT NONE @@ -2866,6 +2867,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) end if !___________________________________________________________________________ + if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call viscosity_filter'//achar(27)//'[0m' call viscosity_filter(visc_option, dynamics, partit, mesh) !___________________________________________________________________________ diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index 66a6cdbfb..b9e9385fe 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -13,6 +13,20 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) end interface end module +module compute_vel_rhs_interface + interface + subroutine compute_vel_rhs(dynamics, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + + end subroutine + end interface +end module ! ! !_______________________________________________________________________________ From 4f751f9ca187f2b69391d00fe7752f852e93558e Mon Sep 17 00:00:00 2001 From: a270042 Date: Tue, 2 Nov 2021 22:15:52 +0100 Subject: [PATCH 418/909] fix bug and compiler issue --- src/fvom_main.F90 | 16 ++++++------- src/oce_ale.F90 | 52 ++++++++++++++++++++--------------------- src/oce_ale_vel_rhs.F90 | 1 + src/oce_dyn.F90 | 13 ++++------- src/oce_setup_step.F90 | 23 ++++++++---------- src/write_step_info.F90 | 4 ++-- 6 files changed, 51 insertions(+), 58 deletions(-) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index a5c2c16de..c23cb5f76 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -8,9 +8,9 @@ program main USE MOD_MESH +USE MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP -USE MOD_TRACER USE MOD_DYN USE o_ARRAYS USE o_PARAM @@ -58,10 +58,11 @@ program main real(kind=real32) :: runtime_alltimesteps -type(t_mesh) , target, save :: mesh -type(t_partit), target, save :: partit -type(t_tracer), target, save :: tracers -type(t_dyn) , target, save :: dynamics +type(t_mesh) , target, save :: mesh +type(t_tracer), target, save :: tracers +type(t_partit), target, save :: partit +type(t_dyn) , target, save :: dynamics + character(LEN=256) :: dump_dir, dump_filename logical :: L_EXISTS @@ -123,8 +124,8 @@ program main call check_mesh_consistency(partit, mesh) if (mype==0) t2=MPI_Wtime() + call dynamics_init(dynamics, partit, mesh) call tracer_init(tracers, partit, mesh) ! allocate array of ocean tracers (derived type "t_tracer") - call dynamics_init(dynamics, partit, mesh) ! allocate array of ocean dynamics (derived type "t_tracer") call arrays_init(tracers%num_tracers, partit, mesh) ! allocate other arrays (to be refactured same as tracers in the future) call ocean_setup(dynamics, tracers, partit, mesh) @@ -269,6 +270,7 @@ program main #endif call clock !___compute horizontal velocity on nodes (originaly on elements)________ + if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call compute_vel_nodes'//achar(27)//'[0m' call compute_vel_nodes(dynamics, partit, mesh) !___model sea-ice step__________________________________________________ @@ -296,10 +298,8 @@ program main !___compute fluxes to the ocean: heat, freshwater, momentum_________ if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call oce_fluxes_mom...'//achar(27)//'[0m' call oce_fluxes_mom(dynamics, partit, mesh) ! momentum only - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call oce_fluxes...'//achar(27)//'[0m' call oce_fluxes(tracers, partit, mesh) end if - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call before_oce_step...'//achar(27)//'[0m' call before_oce_step(dynamics, tracers, partit, mesh) ! prepare the things if required t2 = MPI_Wtime() !___model ocean step____________________________________________________ diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index a356ad406..171fbdd49 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -37,9 +37,9 @@ subroutine impl_vert_visc_ale(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_DYN - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_dyn) , intent(inout), target :: dynamics + type(t_dyn), intent(inout), target :: dynamics end subroutine subroutine update_stiff_mat_ale(partit, mesh) @@ -54,10 +54,10 @@ subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN - type(t_mesh) , intent(in) , target :: mesh + use MOD_DYN + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_dyn) , intent(inout), target :: dynamics + type(t_dyn), intent(inout), target :: dynamics end subroutine subroutine solve_ssh_ale(partit, mesh) @@ -73,10 +73,9 @@ subroutine compute_hbar_ale(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_DYN - type(t_dyn) , intent(inout), target :: dynamics + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - + type(t_dyn), intent(inout), target :: dynamics end subroutine subroutine vert_vel_ale(dynamics, partit, mesh) @@ -84,9 +83,9 @@ subroutine vert_vel_ale(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_DYN - type(t_dyn) , intent(in) , target :: dynamics + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh + type(t_dyn), intent(inout), target :: dynamics end subroutine subroutine update_thickness_ale(partit, mesh) @@ -105,13 +104,13 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - use MOD_TRACER + use mod_tracer use MOD_DYN - integer, intent(in) :: n - type(t_mesh) , intent(in) , target :: mesh + integer, intent(in) :: n + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers - type(t_dyn) , intent(inout), target :: dynamics + type(t_dyn), intent(inout), target :: dynamics end subroutine end interface end module @@ -1627,9 +1626,9 @@ subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) integer :: ed, el(2), enodes(2), nz, n, nzmin, nzmax real(kind=WP) :: c1, c2, deltaX1, deltaX2, deltaY1, deltaY2 real(kind=WP) :: dumc1_1, dumc1_2, dumc2_1, dumc2_2 !!PS - type(t_dyn) , intent(in) , target :: dynamics + type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh + type(t_dyn), intent(inout), target :: dynamics real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -1744,9 +1743,9 @@ subroutine compute_hbar_ale(dynamics, partit, mesh) integer :: ed, el(2), enodes(2), nz,n, elnodes(3), elem, nzmin, nzmax real(kind=WP) :: c1, c2, deltaX1, deltaX2, deltaY1, deltaY2 - type(t_dyn) , intent(inout), target :: dynamics - type(t_mesh) , intent(inout), target :: mesh + type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit + type(t_dyn) , intent(inout), target :: dynamics real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" @@ -1865,10 +1864,9 @@ subroutine vert_vel_ale(dynamics, partit, mesh) real(kind=WP), dimension(:), allocatable :: max_dhbar2distr,cumsum_maxdhbar,distrib_dhbar integer , dimension(:), allocatable :: idx type(t_dyn) , intent(inout), target :: dynamics + type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(inout), target :: mesh real(kind=WP), dimension(:,:,:), pointer :: UV - #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -2520,14 +2518,14 @@ end subroutine solve_ssh_ale subroutine impl_vert_visc_ale(dynamics, partit, mesh) USE MOD_MESH USE o_PARAM -USE o_ARRAYS +USE o_ARRAYS, only: UV_rhs, Av, Wvel_i, stress_surf USE MOD_PARTIT USE MOD_PARSUP USE MOD_DYN USE g_CONFIG,only: dt IMPLICIT NONE -type(t_mesh) , intent(inout), target :: mesh +type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit type(t_dyn) , intent(inout), target :: dynamics @@ -2699,7 +2697,7 @@ end subroutine impl_vert_visc_ale ! ! !=============================================================================== -subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) +subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) use g_config use MOD_MESH use MOD_TRACER @@ -2722,15 +2720,15 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) use pressure_bv_interface use pressure_force_4_linfs_interface use pressure_force_4_zxxxx_interface - use solve_tracers_ale_interface use compute_vel_rhs_interface + use solve_tracers_ale_interface use write_step_info_interface use check_blowup_interface IMPLICIT NONE - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers - type(t_dyn) , intent(inout), target :: dynamics + type(t_dyn), intent(inout), target :: dynamics real(kind=8) :: t0,t1, t2, t30, t3, t4, t5, t6, t7, t8, t9, t10, loc, glo integer :: n, node @@ -2872,7 +2870,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) !___________________________________________________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call impl_vert_visc_ale'//achar(27)//'[0m' - if(i_vert_visc) call impl_vert_visc_ale(dynamics, partit, mesh) + if(i_vert_visc) call impl_vert_visc_ale(dynamics,partit, mesh) t2=MPI_Wtime() !___________________________________________________________________________ diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index b9e9385fe..b2b5d0c38 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -27,6 +27,7 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) end subroutine end interface end module + ! ! !_______________________________________________________________________________ diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 403239a12..784cc9e62 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -13,12 +13,10 @@ ! 5. Leith_c=? (need to be adjusted) module h_viscosity_leith_interface interface - subroutine h_viscosity_leith(dynamics, partit, mesh) + subroutine h_viscosity_leith(partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN - type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh @@ -282,19 +280,19 @@ subroutine viscosity_filter(option, dynamics, partit, mesh) ! ==== ! Harmonic Leith parameterization ! ==== - call h_viscosity_leith(dynamics, partit, mesh) + call h_viscosity_leith(partit, mesh) call visc_filt_harmon(dynamics, partit, mesh) CASE (2) ! === ! Laplacian+Leith+biharmonic background ! === - call h_viscosity_leith(dynamics, partit, mesh) + call h_viscosity_leith(partit, mesh) call visc_filt_hbhmix(dynamics, partit, mesh) CASE (3) ! === ! Biharmonic Leith parameterization ! === - call h_viscosity_leith(dynamics, partit, mesh) + call h_viscosity_leith(partit, mesh) call visc_filt_biharm(2, dynamics, partit, mesh) CASE (4) ! === @@ -566,7 +564,7 @@ SUBROUTINE visc_filt_hbhmix(dynamics, partit, mesh) end subroutine visc_filt_hbhmix ! =================================================================== -SUBROUTINE h_viscosity_leith(dynamics, partit, mesh) +SUBROUTINE h_viscosity_leith(partit, mesh) ! ! Coefficient of horizontal viscosity is a combination of the Leith (with Leith_c) and modified Leith (with Div_c) USE MOD_MESH @@ -582,7 +580,6 @@ SUBROUTINE h_viscosity_leith(dynamics, partit, mesh) integer :: elem, nl1, nz, elnodes(3), n, k, nt, ul1 real(kind=WP) :: leithx, leithy real(kind=WP), allocatable :: aux(:,:) - type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index c54297598..544b5db87 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -48,14 +48,13 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) USE MOD_PARSUP use mod_tracer use MOD_DYN - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers - type(t_dyn) , intent(inout), target :: dynamics + type(t_dyn), intent(inout), target :: dynamics end subroutine end interface end module - module before_oce_step_interface interface subroutine before_oce_step(dynamics, tracers, partit, mesh) @@ -64,10 +63,10 @@ subroutine before_oce_step(dynamics, tracers, partit, mesh) USE MOD_PARSUP use mod_tracer use MOD_DYN - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers - type(t_dyn) , intent(inout), target :: dynamics + type(t_dyn), intent(inout), target :: dynamics end subroutine end interface end module @@ -91,10 +90,10 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) use oce_initial_state_interface use oce_adv_tra_fct_interfaces IMPLICIT NONE -type(t_dyn) , intent(inout), target :: dynamics -type(t_tracer), intent(inout), target :: tracers +type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit -type(t_mesh) , intent(inout), target :: mesh +type(t_tracer), intent(inout), target :: tracers +type(t_dyn), intent(inout), target :: dynamics integer :: n !___setup virt_salt_flux____________________________________________________ ! if the ale thinkness remain unchanged (like in 'linfs' case) the vitrual @@ -439,8 +438,6 @@ SUBROUTINE dynamics_init(dynamics, partit, mesh) dynamics%use_freeslip = free_slip dynamics%use_wsplit = w_split dynamics%wsplit_maxcfl = w_max_cfl - - END SUBROUTINE dynamics_init ! ! @@ -872,17 +869,17 @@ SUBROUTINE before_oce_step(dynamics, tracers, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_TRACER - use MOD_DYN + USE MOD_DYN USE o_ARRAYS USE g_config USE Toy_Channel_Soufflet implicit none integer :: i, k, counter, rcounter3, id character(len=10) :: i_string, id_string - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers - type(t_dyn) , intent(inout), target :: dynamics + type(t_dyn), intent(inout), target :: dynamics #include "associate_part_def.h" #include "associate_mesh_def.h" diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index c83e5ec8f..5c68ff9af 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -1,6 +1,6 @@ module write_step_info_interface interface - subroutine write_step_info(istep,outfreq,dynamics, tracers,partit,mesh) + subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP @@ -16,7 +16,7 @@ subroutine write_step_info(istep,outfreq,dynamics, tracers,partit,mesh) end module module check_blowup_interface interface - subroutine check_blowup(istep, dynamics, tracers,partit,mesh) + subroutine check_blowup(istep, dynamics, tracers, partit, mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP From 132295ca3b2b921e6e4a38baf5d49e31c69998b7 Mon Sep 17 00:00:00 2001 From: a270042 Date: Tue, 2 Nov 2021 22:40:19 +0100 Subject: [PATCH 419/909] exchange UV_rhsAB against derived type dynamics%uv_rhsAB --- src/io_blowup.F90 | 4 ++-- src/io_restart.F90 | 4 ++-- src/oce_ale_vel_rhs.F90 | 24 ++++++++++++++---------- src/oce_modules.F90 | 3 ++- src/oce_setup_step.F90 | 4 ++-- 5 files changed, 22 insertions(+), 17 deletions(-) diff --git a/src/io_blowup.F90 b/src/io_blowup.F90 index 9ad2146c8..ea0e063db 100644 --- a/src/io_blowup.F90 +++ b/src/io_blowup.F90 @@ -114,8 +114,8 @@ subroutine ini_blowup_io(year, dynamics, tracers, partit, mesh) call def_variable(bid, 'v' , (/nl-1, elem2D/) , 'meridional velocity', 'm/s', dynamics%uv(2,:,:)); call def_variable(bid, 'u_rhs' , (/nl-1, elem2D/) , 'zonal velocity', 'm/s', UV_rhs(1,:,:)); call def_variable(bid, 'v_rhs' , (/nl-1, elem2D/) , 'meridional velocity', 'm/s', UV_rhs(2,:,:)); - call def_variable(bid, 'urhs_AB' , (/nl-1, elem2D/) , 'Adams–Bashforth for u', 'm/s', UV_rhsAB(1,:,:)); - call def_variable(bid, 'vrhs_AB' , (/nl-1, elem2D/) , 'Adams–Bashforth for v', 'm/s', UV_rhsAB(2,:,:)); + call def_variable(bid, 'urhs_AB' , (/nl-1, elem2D/) , 'Adams–Bashforth for u', 'm/s', dynamics%uv_rhsAB(1,:,:)); + call def_variable(bid, 'vrhs_AB' , (/nl-1, elem2D/) , 'Adams–Bashforth for v', 'm/s', dynamics%uv_rhsAB(2,:,:)); call def_variable(bid, 'zbar_n_bot' , (/nod2D/) , 'node bottom depth', 'm', zbar_n_bot); call def_variable(bid, 'zbar_e_bot' , (/elem2d/) , 'elem bottom depth', 'm', zbar_e_bot); call def_variable(bid, 'bottom_node_thickness' , (/nod2D/) , 'node bottom thickness', 'm', bottom_node_thickness); diff --git a/src/io_restart.F90 b/src/io_restart.F90 index e198507ab..fdd5512ca 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -124,8 +124,8 @@ subroutine ini_ocean_io(year, dynamics, tracers, partit, mesh) !___Define the netCDF variables for 3D fields_______________________________ call def_variable(oid, 'u', (/nl-1, elem2D/), 'zonal velocity', 'm/s', dynamics%uv(1,:,:)); call def_variable(oid, 'v', (/nl-1, elem2D/), 'meridional velocity', 'm/s', dynamics%uv(2,:,:)); - call def_variable(oid, 'urhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for u', 'm/s', UV_rhsAB(1,:,:)); - call def_variable(oid, 'vrhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for v', 'm/s', UV_rhsAB(2,:,:)); + call def_variable(oid, 'urhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for u', 'm/s', dynamics%uv_rhsAB(1,:,:)); + call def_variable(oid, 'vrhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for v', 'm/s', dynamics%uv_rhsAB(2,:,:)); !___Save restart variables for TKE and IDEMIX_________________________________ if (trim(mix_scheme)=='cvmix_TKE' .or. trim(mix_scheme)=='cvmix_TKE+IDEMIX') then diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index b2b5d0c38..fd3faa7bd 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -1,6 +1,7 @@ -module momentum_adv_scalar_interface + +module compute_vel_rhs_interface interface - subroutine momentum_adv_scalar(dynamics, partit, mesh) + subroutine compute_vel_rhs(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP @@ -13,9 +14,9 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) end interface end module -module compute_vel_rhs_interface +module momentum_adv_scalar_interface interface - subroutine compute_vel_rhs(dynamics, partit, mesh) + subroutine momentum_adv_scalar(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP @@ -28,6 +29,7 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) end interface end module + ! ! !_______________________________________________________________________________ @@ -36,7 +38,7 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_DYN - use o_ARRAYS, only: UV_rhs, UV_rhsAB, eta_n, coriolis, ssh_gp, pgf_x, pgf_y + use o_ARRAYS, only: UV_rhs, eta_n, coriolis, ssh_gp, pgf_x, pgf_y use i_ARRAYS use i_therm_param use o_PARAM @@ -58,13 +60,14 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) real(kind=WP) :: t1, t2, t3, t4 real(kind=WP) :: p_ice(3), p_air(3), p_eta(3) integer :: use_pice - real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhsAB #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV=>dynamics%uv(:,:,:) + UV =>dynamics%uv(:,:,:) + UV_rhsAB =>dynamics%uv_rhsAB(:,:,:) t1=MPI_Wtime() use_pice=0 @@ -186,7 +189,7 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP use MOD_DYN -USE o_ARRAYS, only: Wvel_e, UV_rhsAB +USE o_ARRAYS, only: Wvel_e USE o_PARAM use g_comm_auto IMPLICIT NONE @@ -200,13 +203,14 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) real(kind=WP) :: un1(1:mesh%nl-1), un2(1:mesh%nl-1) real(kind=WP) :: wu(1:mesh%nl), wv(1:mesh%nl) real(kind=WP) :: Unode_rhs(2,mesh%nl-1,partit%myDim_nod2d+partit%eDim_nod2D) -real(kind=WP), dimension(:,:,:), pointer :: UV +real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhsAB #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV=>dynamics%uv(:,:,:) + UV =>dynamics%uv(:,:,:) + UV_rhsAB=>dynamics%uv_rhsAB(:,:,:) !___________________________________________________________________________ ! 1st. compute vertical momentum advection component: w * du/dz, w*dv/dz diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index e34b07cf4..7d791d5a4 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -184,7 +184,8 @@ MODULE o_ARRAYS ! Arrays are described in subroutine array_setup real(kind=WP), allocatable, target :: Wvel(:,:), Wvel_e(:,:), Wvel_i(:,:) !!PS real(kind=WP), allocatable :: UV(:,:,:) -real(kind=WP), allocatable :: UV_rhs(:,:,:), UV_rhsAB(:,:,:) +real(kind=WP), allocatable :: UV_rhs(:,:,:) +!!PS real(kind=WP), allocatable :: UV_rhsAB(:,:,:) real(kind=WP), allocatable :: uke(:,:), v_back(:,:), uke_back(:,:), uke_dis(:,:), uke_dif(:,:) real(kind=WP), allocatable :: uke_rhs(:,:), uke_rhs_old(:,:) real(kind=WP), allocatable :: UV_dis_tend(:,:,:), UV_back_tend(:,:,:), UV_total_tend(:,:,:), UV_dis_tend_node(:,:,:) diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 544b5db87..b0445da47 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -476,7 +476,7 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) !allocate(stress_diag(2, elem_size))!delete me !!PS allocate(UV(2, nl-1, elem_size)) allocate(UV_rhs(2,nl-1, elem_size)) -allocate(UV_rhsAB(2,nl-1, elem_size)) +!!PS allocate(UV_rhsAB(2,nl-1, elem_size)) allocate(Visc(nl-1, elem_size)) ! ================ ! elevation and its rhs @@ -625,7 +625,7 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) !!PS UV=0.0_WP UV_rhs=0.0_WP - UV_rhsAB=0.0_WP +!!PS UV_rhsAB=0.0_WP ! eta_n=0.0_WP d_eta=0.0_WP From 5a9a4f4bfb1d143893e548f02410e92963e01732 Mon Sep 17 00:00:00 2001 From: a270042 Date: Tue, 2 Nov 2021 22:47:26 +0100 Subject: [PATCH 420/909] fix bug in ../src/oce_vel_rhs_vinv.F90 --- src/oce_vel_rhs_vinv.F90 | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/oce_vel_rhs_vinv.F90 b/src/oce_vel_rhs_vinv.F90 index f7bf16720..d9cf24c99 100755 --- a/src/oce_vel_rhs_vinv.F90 +++ b/src/oce_vel_rhs_vinv.F90 @@ -118,7 +118,7 @@ end subroutine relative_vorticity ! ========================================================================== subroutine compute_vel_rhs_vinv(dynamics, partit, mesh) !vector invariant USE o_PARAM - USE o_ARRAYS, only: UV_rhs, UV_rhsAB, eta_n, coriolis_node, hpressure, vorticity + USE o_ARRAYS, only: UV_rhs, eta_n, coriolis_node, hpressure, vorticity USE MOD_MESH USE MOD_PARTIT @@ -140,12 +140,13 @@ subroutine compute_vel_rhs_vinv(dynamics, partit, mesh) !vector invariant real(kind=WP) :: KE_node(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP) :: dZ_inv(2:mesh%nl-1), dzbar_inv(mesh%nl-1), elem_area_inv real(kind=WP) :: density0_inv = 1./density_0 - real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhsAB #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) + UV => dynamics%uv(:,:,:) + UV_rhsAB => dynamics%uv_rhsAB(:,:,:) w = 0.0_WP From 24c1b67235f6a07837a3c299bfd7fe207b89b1b7 Mon Sep 17 00:00:00 2001 From: a270042 Date: Tue, 2 Nov 2021 23:32:44 +0100 Subject: [PATCH 421/909] exchange Wvel, Wvel_e, Wvel_i with coresponding derived types --- src/gen_modules_diag.F90 | 4 +++- src/io_blowup.F90 | 6 +++--- src/io_meandata.F90 | 6 +++--- src/io_restart.F90 | 6 +++--- src/oce_ale.F90 | 18 +++++++++++++----- src/oce_ale_tracer.F90 | 27 ++++++++++++++++++--------- src/oce_ale_vel_rhs.F90 | 3 ++- src/oce_dyn.F90 | 18 +++++++++++------- src/oce_setup_step.F90 | 8 +------- src/toy_channel_soufflet.F90 | 2 ++ src/write_step_info.F90 | 11 ++++++++--- 11 files changed, 67 insertions(+), 42 deletions(-) diff --git a/src/gen_modules_diag.F90 b/src/gen_modules_diag.F90 index 5a0e47f90..0870a7052 100755 --- a/src/gen_modules_diag.F90 +++ b/src/gen_modules_diag.F90 @@ -244,11 +244,13 @@ subroutine diag_energy(mode, dynamics, partit, mesh) real(kind=WP) :: ux, vx, uy, vy, tvol, rval(2) real(kind=WP) :: geo_grad_x(3), geo_grad_y(3), geo_u(3), geo_v(3) real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:), pointer :: Wvel #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) + UV => dynamics%uv(:,:,:) + Wvel => dynamics%w(:,:) !===================== if (firstcall) then !allocate the stuff at the first call diff --git a/src/io_blowup.F90 b/src/io_blowup.F90 index ea0e063db..c88e367c1 100644 --- a/src/io_blowup.F90 +++ b/src/io_blowup.F90 @@ -143,9 +143,9 @@ subroutine ini_blowup_io(year, dynamics, tracers, partit, mesh) !!PS longname=trim(longname)//', Adams–Bashforth' !!PS call def_variable(bid, trim(trname)//'_AB',(/nl-1, nod2D/), trim(longname), trim(units), tracers%data(j)%valuesAB(:,:)(:,:)); end do - call def_variable(bid, 'w' , (/nl, nod2D/) , 'vertical velocity', 'm/s', Wvel); - call def_variable(bid, 'w_expl' , (/nl, nod2D/) , 'vertical velocity', 'm/s', Wvel_e); - call def_variable(bid, 'w_impl' , (/nl, nod2D/) , 'vertical velocity', 'm/s', Wvel_i); + call def_variable(bid, 'w' , (/nl, nod2D/) , 'vertical velocity', 'm/s', dynamics%w); + call def_variable(bid, 'w_expl' , (/nl, nod2D/) , 'vertical velocity', 'm/s', dynamics%w_e); + call def_variable(bid, 'w_impl' , (/nl, nod2D/) , 'vertical velocity', 'm/s', dynamics%w_i); call def_variable(bid, 'cfl_z' , (/nl-1, nod2D/) , 'vertical CFL criteria', '', CFL_z); !_____________________________________________________________________________ diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 4acbd17eb..5c6e0636f 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -157,7 +157,7 @@ subroutine ini_mean_io(dynamics, tracers, partit, mesh) CASE ('ssh ') call def_stream(nod2D, myDim_nod2D, 'ssh', 'sea surface elevation', 'm', eta_n, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('vve_5 ') - call def_stream(nod2D, myDim_nod2D, 'vve_5', 'vertical velocity at 5th level', 'm/s', Wvel(5,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'vve_5', 'vertical velocity at 5th level', 'm/s', dynamics%w(5,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('ssh_rhs ') call def_stream(nod2D, myDim_nod2D, 'ssh_rhs', 'ssh rhs', '?', ssh_rhs, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) @@ -321,7 +321,7 @@ subroutine ini_mean_io(dynamics, tracers, partit, mesh) CASE ('v ') call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v', 'meridional velocity','m/s', dynamics%uv(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('w ') - call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'w', 'vertical velocity', 'm/s', Wvel(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'w', 'vertical velocity', 'm/s', dynamics%w(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('Av ') call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'Av', 'vertical viscosity Av', 'm2/s', Av(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('u_dis_tend') @@ -433,7 +433,7 @@ subroutine ini_mean_io(dynamics, tracers, partit, mesh) call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'um', 'horizontal velocity', 'm/s', dynamics%uv(1,:,:), 1, 'm', i_real4, partit, mesh) call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'vm', 'meridional velocity', 'm/s', dynamics%uv(2,:,:), 1, 'm', i_real4, partit, mesh) - call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'wm', 'vertical velocity', 'm/s', Wvel(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'wm', 'vertical velocity', 'm/s', dynamics%w(:,:), 1, 'm', i_real8, partit, mesh) call def_stream(elem2D, myDim_elem2D, 'utau_surf', '(u, tau) at the surface', 'N/(m s)', utau_surf(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) call def_stream(elem2D, myDim_elem2D, 'utau_bott', '(u, tau) at the bottom', 'N/(m s)', utau_bott(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index fdd5512ca..dd2f5307d 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -158,9 +158,9 @@ subroutine ini_ocean_io(year, dynamics, tracers, partit, mesh) longname=trim(longname)//', Adams–Bashforth' call def_variable(oid, trim(trname)//'_AB',(/nl-1, nod2D/), trim(longname), trim(units), tracers%data(j)%valuesAB(:,:)); end do - call def_variable(oid, 'w', (/nl, nod2D/), 'vertical velocity', 'm/s', Wvel); - call def_variable(oid, 'w_expl', (/nl, nod2D/), 'vertical velocity', 'm/s', Wvel_e); - call def_variable(oid, 'w_impl', (/nl, nod2D/), 'vertical velocity', 'm/s', Wvel_i); + call def_variable(oid, 'w', (/nl, nod2D/), 'vertical velocity', 'm/s', dynamics%w); + call def_variable(oid, 'w_expl', (/nl, nod2D/), 'vertical velocity', 'm/s', dynamics%w_e); + call def_variable(oid, 'w_impl', (/nl, nod2D/), 'vertical velocity', 'm/s', dynamics%w_i); end subroutine ini_ocean_io ! !-------------------------------------------------------------------------------------------- diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 171fbdd49..0d20a8f43 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -1843,8 +1843,8 @@ end subroutine compute_hbar_ale subroutine vert_vel_ale(dynamics, partit, mesh) use g_config,only: dt, which_ALE, min_hnode, lzstar_lev, flag_warn_cflz use MOD_MESH - use o_ARRAYS, only: Wvel, fer_Wvel, fer_UV, CFL_z, water_flux, ssh_rhs, & - ssh_rhs_old, eta_n, d_eta, Wvel_e, Wvel_i + use o_ARRAYS, only: fer_Wvel, fer_UV, CFL_z, water_flux, ssh_rhs, & + ssh_rhs_old, eta_n, d_eta use o_PARAM USE MOD_PARTIT USE MOD_PARSUP @@ -1867,11 +1867,15 @@ subroutine vert_vel_ale(dynamics, partit, mesh) type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:) , pointer :: Wvel, Wvel_e, Wvel_i #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV=>dynamics%uv(:,:,:) + UV =>dynamics%uv(:,:,:) + Wvel =>dynamics%w(:,:) + Wvel_e=>dynamics%w_e(:,:) + Wvel_i=>dynamics%w_i(:,:) !___________________________________________________________________________ ! Contributions from levels in divergence @@ -2518,7 +2522,7 @@ end subroutine solve_ssh_ale subroutine impl_vert_visc_ale(dynamics, partit, mesh) USE MOD_MESH USE o_PARAM -USE o_ARRAYS, only: UV_rhs, Av, Wvel_i, stress_surf +USE o_ARRAYS, only: UV_rhs, Av, stress_surf USE MOD_PARTIT USE MOD_PARSUP USE MOD_DYN @@ -2534,11 +2538,13 @@ subroutine impl_vert_visc_ale(dynamics, partit, mesh) integer :: nz, elem, nzmax, nzmin, elnodes(3) real(kind=WP) :: zinv, m, friction, wu, wd real(kind=WP), dimension(:,:,:), pointer :: UV +real(kind=WP), dimension(:,:) , pointer :: Wvel_i #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" -UV=>dynamics%uv(:,:,:) +UV =>dynamics%uv(:,:,:) +Wvel_i=>dynamics%w_i(:,:) DO elem=1,myDim_elem2D elnodes=elem2D_nodes(:,elem) @@ -2952,10 +2958,12 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) t9=MPI_Wtime() !___________________________________________________________________________ ! write out global fields for debugging + if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call write_step_info'//achar(27)//'[0m' call write_step_info(n,logfile_outfreq, dynamics, tracers, partit, mesh) ! check model for blowup --> ! write_step_info and check_blowup require ! togeather around 2.5% of model runtime + if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call check_blowup'//achar(27)//'[0m' call check_blowup(n, dynamics, tracers, partit, mesh) t10=MPI_Wtime() diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index c962697bb..5335528d6 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -59,12 +59,14 @@ subroutine diff_ver_part_redi_expl(tr_num, tracer, partit, mesh) end module module diff_ver_part_impl_ale_interface interface - subroutine diff_ver_part_impl_ale(tr_num, tracer, partit, mesh) + subroutine diff_ver_part_impl_ale(tr_num, dynamics, tracer, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP use mod_tracer + use MOD_DYN integer, intent(in), target :: tr_num + type(t_dyn), intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracer type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit @@ -138,7 +140,7 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) use g_config use o_PARAM, only: SPP, Fer_GM - use o_arrays, only: Wvel, Wvel_e, fer_Wvel, fer_UV + use o_arrays, only: fer_Wvel, fer_UV use mod_mesh USE MOD_PARTIT USE MOD_PARSUP @@ -157,12 +159,15 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) type(t_partit), intent(inout), target :: partit integer :: tr_num, node, nzmax, nzmin real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:) , pointer :: Wvel, Wvel_e #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) + UV => dynamics%uv(:,:,:) + Wvel => dynamics%w(:,:) + Wvel_e => dynamics%w_e(:,:) !___________________________________________________________________________ if (SPP) call cal_rejected_salt(partit, mesh) @@ -271,7 +276,7 @@ subroutine adv_tracers_ale(dt, tr_num, dynamics, tracers, partit, mesh) ! here --> add horizontal advection part to del_ttf(nz,n) = del_ttf(nz,n) + ... tracers%work%del_ttf_advhoriz = 0.0_WP tracers%work%del_ttf_advvert = 0.0_WP - call do_oce_adv_tra(dt, dynamics%uv, wvel, wvel_i, wvel_e, tr_num, tracers, partit, mesh) + call do_oce_adv_tra(dt, dynamics%uv, dynamics%w, dynamics%w_i, dynamics%w_e, tr_num, tracers, partit, mesh) !___________________________________________________________________________ ! update array for total tracer flux del_ttf with the fluxes from horizontal ! and vertical advection @@ -360,7 +365,7 @@ subroutine diff_tracers_ale(tr_num, dynamics, tracers, partit, mesh) !___________________________________________________________________________ if (tracers%i_vert_diff) then ! do vertical diffusion: implicite - call diff_ver_part_impl_ale(tr_num, tracers, partit, mesh) + call diff_ver_part_impl_ale(tr_num, dynamics, tracers, partit, mesh) end if !We DO not set del_ttf to zero because it will not be used in this timestep anymore @@ -454,13 +459,14 @@ end subroutine diff_ver_part_expl_ale ! !=============================================================================== ! vertical diffusivity augmented with Redi contribution [vertical flux of K(3,3)*d_zT] -subroutine diff_ver_part_impl_ale(tr_num, tracers, partit, mesh) +subroutine diff_ver_part_impl_ale(tr_num, dynamics, tracers, partit, mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use MOD_TRACER + use MOD_DYN use o_PARAM - use o_ARRAYS + use o_ARRAYS, only: Ki, Kv, heat_flux, water_flux, slope_tapered use i_ARRAYS USE MOD_PARTIT USE MOD_PARSUP @@ -472,8 +478,9 @@ subroutine diff_ver_part_impl_ale(tr_num, tracers, partit, mesh) implicit none integer, intent(in), target :: tr_num + type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracers - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit real(kind=WP) :: a(mesh%nl), b(mesh%nl), c(mesh%nl), tr(mesh%nl) real(kind=WP) :: cp(mesh%nl), tp(mesh%nl) @@ -485,12 +492,14 @@ subroutine diff_ver_part_impl_ale(tr_num, tracers, partit, mesh) logical :: do_wimpl=.true. real(kind=WP), dimension(:,:), pointer :: trarr + real(kind=WP), dimension(:,:), pointer :: Wvel_i #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - trarr=>tracers%data(tr_num)%values(:,:) + trarr => tracers%data(tr_num)%values(:,:) + Wvel_i => dynamics%w_i(:,:) !___________________________________________________________________________ if ((trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') .OR. (.not. w_split)) do_wimpl=.false. diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index fd3faa7bd..926dda43a 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -189,7 +189,6 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP use MOD_DYN -USE o_ARRAYS, only: Wvel_e USE o_PARAM use g_comm_auto IMPLICIT NONE @@ -204,6 +203,7 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) real(kind=WP) :: wu(1:mesh%nl), wv(1:mesh%nl) real(kind=WP) :: Unode_rhs(2,mesh%nl-1,partit%myDim_nod2d+partit%eDim_nod2D) real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhsAB +real(kind=WP), dimension(:,:), pointer :: Wvel_e #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -211,6 +211,7 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) #include "associate_mesh_ass.h" UV =>dynamics%uv(:,:,:) UV_rhsAB=>dynamics%uv_rhsAB(:,:,:) + Wvel_e =>dynamics%w_e(:,:) !___________________________________________________________________________ ! 1st. compute vertical momentum advection component: w * du/dz, w*dv/dz diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 784cc9e62..65dcf69a1 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -13,10 +13,12 @@ ! 5. Leith_c=? (need to be adjusted) module h_viscosity_leith_interface interface - subroutine h_viscosity_leith(partit, mesh) + subroutine h_viscosity_leith(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN + type(t_dyn), intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh @@ -280,19 +282,19 @@ subroutine viscosity_filter(option, dynamics, partit, mesh) ! ==== ! Harmonic Leith parameterization ! ==== - call h_viscosity_leith(partit, mesh) + call h_viscosity_leith(dynamics, partit, mesh) call visc_filt_harmon(dynamics, partit, mesh) CASE (2) ! === ! Laplacian+Leith+biharmonic background ! === - call h_viscosity_leith(partit, mesh) + call h_viscosity_leith(dynamics, partit, mesh) call visc_filt_hbhmix(dynamics, partit, mesh) CASE (3) ! === ! Biharmonic Leith parameterization ! === - call h_viscosity_leith(partit, mesh) + call h_viscosity_leith(dynamics, partit, mesh) call visc_filt_biharm(2, dynamics, partit, mesh) CASE (4) ! === @@ -564,14 +566,14 @@ SUBROUTINE visc_filt_hbhmix(dynamics, partit, mesh) end subroutine visc_filt_hbhmix ! =================================================================== -SUBROUTINE h_viscosity_leith(partit, mesh) +SUBROUTINE h_viscosity_leith(dynamics, partit, mesh) ! ! Coefficient of horizontal viscosity is a combination of the Leith (with Leith_c) and modified Leith (with Div_c) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use MOD_DYN - USE o_ARRAYS + USE o_ARRAYS, only: Visc, vorticity USE o_PARAM USE g_CONFIG use g_comm_auto @@ -580,13 +582,15 @@ SUBROUTINE h_viscosity_leith(partit, mesh) integer :: elem, nl1, nz, elnodes(3), n, k, nt, ul1 real(kind=WP) :: leithx, leithy real(kind=WP), allocatable :: aux(:,:) + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh - + real(kind=WP), dimension(:,:), pointer :: Wvel #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + Wvel =>dynamics%w(:,:) ! if(mom_adv<4) call relative_vorticity(partit, mesh) !!! vorticity array should be allocated ! Fill in viscosity: diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index b0445da47..aead52a12 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -474,9 +474,7 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) ! Velocities ! ================ !allocate(stress_diag(2, elem_size))!delete me -!!PS allocate(UV(2, nl-1, elem_size)) allocate(UV_rhs(2,nl-1, elem_size)) -!!PS allocate(UV_rhsAB(2,nl-1, elem_size)) allocate(Visc(nl-1, elem_size)) ! ================ ! elevation and its rhs @@ -491,8 +489,7 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) ! ================ ! Vertical velocity and pressure ! ================ -allocate(Wvel(nl, node_size), hpressure(nl,node_size)) -allocate(Wvel_e(nl, node_size), Wvel_i(nl, node_size)) +allocate( hpressure(nl,node_size)) allocate(CFL_z(nl, node_size)) ! vertical CFL criteria allocate(bvfreq(nl,node_size),mixlay_dep(node_size),bv_ref(node_size)) ! ================ @@ -630,9 +627,6 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) eta_n=0.0_WP d_eta=0.0_WP ssh_rhs=0.0_WP - Wvel=0.0_WP - Wvel_e =0.0_WP - Wvel_i =0.0_WP CFL_z =0.0_WP hpressure=0.0_WP ! diff --git a/src/toy_channel_soufflet.F90 b/src/toy_channel_soufflet.F90 index cf34e60ee..8d05a0e73 100644 --- a/src/toy_channel_soufflet.F90 +++ b/src/toy_channel_soufflet.F90 @@ -375,11 +375,13 @@ subroutine energy_out_soufflet(dynamics, partit, mesh) type(t_mesh) , intent(in) , target :: mesh real(kind=WP), dimension(:,:,:), pointer :: UV +real(kind=WP), dimension(:,:), pointer :: Wvel #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV => dynamics%uv(:,:,:) +Wvel => dynamics%w(:,:) nybins=100 diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index 5c68ff9af..369e3aec1 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -41,7 +41,7 @@ subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) use MOD_TRACER use MOD_DYN use o_PARAM - use o_ARRAYS, only: eta_n, d_eta, water_flux, heat_flux, Wvel, Unode, CFL_z, & + use o_ARRAYS, only: eta_n, d_eta, water_flux, heat_flux, Unode, CFL_z, & pgf_x, pgf_y, Av, Kv use i_ARRAYS use g_comm_auto @@ -63,11 +63,13 @@ subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) type(t_tracer), intent(in) , target :: tracers type(t_dyn) , intent(in) , target :: dynamics real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:), pointer :: Wvel #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV => dynamics%uv(:,:,:) + Wvel => dynamics%w(:,:) if (mod(istep,outfreq)==0) then @@ -261,7 +263,7 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) use MOD_DYN use o_PARAM use o_ARRAYS, only: eta_n, d_eta, ssh_rhs, ssh_rhs_old, water_flux, stress_surf, & - Wvel, CFL_z, heat_flux, Kv, Av + CFL_z, heat_flux, Kv, Av use i_ARRAYS use g_comm_auto use io_BLOWUP @@ -277,11 +279,13 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) type(t_tracer), intent(in) , target :: tracers type(t_dyn) , intent(in) , target :: dynamics real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:), pointer :: Wvel #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) + UV => dynamics%uv(:,:,:) + Wvel => dynamics%w(:,:) !___________________________________________________________________________ ! ! if (mod(istep,logfile_outfreq)==0) then @@ -550,3 +554,4 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) call par_ex(partit%MPI_COMM_FESOM, partit%mype) endif end subroutine + From b603fb5be00ddc4e1c9c515f7d1b75594d4ea0b7 Mon Sep 17 00:00:00 2001 From: a270042 Date: Tue, 2 Nov 2021 23:55:07 +0100 Subject: [PATCH 422/909] exchange UV_rhs against derived type --- src/io_blowup.F90 | 4 ++-- src/oce_ale.F90 | 14 +++++++------ src/oce_ale_vel_rhs.F90 | 5 +++-- src/oce_dyn.F90 | 38 +++++++++++++++++++++--------------- src/oce_modules.F90 | 4 ---- src/oce_setup_step.F90 | 5 ----- src/oce_vel_rhs_vinv.F90 | 5 +++-- src/toy_channel_soufflet.F90 | 4 +++- 8 files changed, 41 insertions(+), 38 deletions(-) diff --git a/src/io_blowup.F90 b/src/io_blowup.F90 index c88e367c1..39eda3107 100644 --- a/src/io_blowup.F90 +++ b/src/io_blowup.F90 @@ -112,8 +112,8 @@ subroutine ini_blowup_io(year, dynamics, tracers, partit, mesh) call def_variable(bid, 'helem' , (/nl-1, elem2D/) , 'Element layer thickness', 'm/s', helem(:,:)); call def_variable(bid, 'u' , (/nl-1, elem2D/) , 'zonal velocity', 'm/s', dynamics%uv(1,:,:)); call def_variable(bid, 'v' , (/nl-1, elem2D/) , 'meridional velocity', 'm/s', dynamics%uv(2,:,:)); - call def_variable(bid, 'u_rhs' , (/nl-1, elem2D/) , 'zonal velocity', 'm/s', UV_rhs(1,:,:)); - call def_variable(bid, 'v_rhs' , (/nl-1, elem2D/) , 'meridional velocity', 'm/s', UV_rhs(2,:,:)); + call def_variable(bid, 'u_rhs' , (/nl-1, elem2D/) , 'zonal velocity', 'm/s', dynamics%uv_rhs(1,:,:)); + call def_variable(bid, 'v_rhs' , (/nl-1, elem2D/) , 'meridional velocity', 'm/s', dynamics%uv_rhs(2,:,:)); call def_variable(bid, 'urhs_AB' , (/nl-1, elem2D/) , 'Adams–Bashforth for u', 'm/s', dynamics%uv_rhsAB(1,:,:)); call def_variable(bid, 'vrhs_AB' , (/nl-1, elem2D/) , 'Adams–Bashforth for v', 'm/s', dynamics%uv_rhsAB(2,:,:)); call def_variable(bid, 'zbar_n_bot' , (/nod2D/) , 'node bottom depth', 'm', zbar_n_bot); diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 0d20a8f43..025827b07 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -1612,7 +1612,7 @@ end subroutine update_stiff_mat_ale subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) use g_config,only: which_ALE,dt use MOD_MESH - use o_ARRAYS, only: ssh_rhs, ssh_rhs_old, UV_rhs, water_flux + use o_ARRAYS, only: ssh_rhs, ssh_rhs_old, water_flux use o_PARAM USE MOD_PARTIT USE MOD_PARSUP @@ -1629,12 +1629,13 @@ subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit type(t_dyn), intent(inout), target :: dynamics - real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV=>dynamics%uv(:,:,:) + UV_rhs=>dynamics%uv_rhs(:,:,:) ssh_rhs=0.0_WP !___________________________________________________________________________ @@ -2522,7 +2523,7 @@ end subroutine solve_ssh_ale subroutine impl_vert_visc_ale(dynamics, partit, mesh) USE MOD_MESH USE o_PARAM -USE o_ARRAYS, only: UV_rhs, Av, stress_surf +USE o_ARRAYS, only: Av, stress_surf USE MOD_PARTIT USE MOD_PARSUP USE MOD_DYN @@ -2537,14 +2538,15 @@ subroutine impl_vert_visc_ale(dynamics, partit, mesh) real(kind=WP) :: cp(mesh%nl-1), up(mesh%nl-1), vp(mesh%nl-1) integer :: nz, elem, nzmax, nzmin, elnodes(3) real(kind=WP) :: zinv, m, friction, wu, wd -real(kind=WP), dimension(:,:,:), pointer :: UV +real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs real(kind=WP), dimension(:,:) , pointer :: Wvel_i #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" -UV =>dynamics%uv(:,:,:) -Wvel_i=>dynamics%w_i(:,:) +UV =>dynamics%uv(:,:,:) +UV_rhs =>dynamics%uv_rhs(:,:,:) +Wvel_i =>dynamics%w_i(:,:) DO elem=1,myDim_elem2D elnodes=elem2D_nodes(:,elem) diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index 926dda43a..799094bdf 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -38,7 +38,7 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_DYN - use o_ARRAYS, only: UV_rhs, eta_n, coriolis, ssh_gp, pgf_x, pgf_y + use o_ARRAYS, only: eta_n, coriolis, ssh_gp, pgf_x, pgf_y use i_ARRAYS use i_therm_param use o_PARAM @@ -60,13 +60,14 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) real(kind=WP) :: t1, t2, t3, t4 real(kind=WP) :: p_ice(3), p_air(3), p_eta(3) integer :: use_pice - real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhsAB + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhsAB, UV_rhs #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV =>dynamics%uv(:,:,:) + UV_rhs =>dynamics%uv_rhs(:,:,:) UV_rhsAB =>dynamics%uv_rhsAB(:,:,:) t1=MPI_Wtime() diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 65dcf69a1..be2a1de72 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -164,7 +164,7 @@ SUBROUTINE update_vel(dynamics, partit, mesh) USE MOD_PARSUP USE MOD_DYN - USE o_ARRAYS, only: d_eta, eta_n, UV_rhs + USE o_ARRAYS, only: d_eta, eta_n USE o_PARAM USE g_CONFIG use g_comm_auto @@ -175,13 +175,14 @@ SUBROUTINE update_vel(dynamics, partit, mesh) type(t_dyn) , intent(inout), target :: dynamics type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit - real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV=>dynamics%uv(:,:,:) + UV_rhs=>dynamics%uv_rhs(:,:,:) DO elem=1, myDim_elem2D elnodes=elem2D_nodes(:,elem) @@ -322,7 +323,7 @@ SUBROUTINE visc_filt_harmon(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_DYN -USE o_ARRAYS, only: Visc, UV_rhs +USE o_ARRAYS, only: Visc USE o_PARAM USE g_CONFIG IMPLICIT NONE @@ -332,12 +333,13 @@ SUBROUTINE visc_filt_harmon(dynamics, partit, mesh) type(t_dyn) , intent(inout), target :: dynamics type(t_mesh) , intent(in), target :: mesh type(t_partit), intent(inout), target :: partit -real(kind=WP), dimension(:,:,:), pointer :: UV +real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV => dynamics%uv(:,:,:) +UV_rhs => dynamics%uv_rhs(:,:,:) ! An analog of harmonic viscosity operator. ! It adds to the rhs(0) Visc*(u1+u2+u3-3*u0)/area @@ -369,7 +371,7 @@ SUBROUTINE visc_filt_biharm(option, dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP use MOD_DYN - USE o_ARRAYS, only: Visc, UV_rhs + USE o_ARRAYS, only: Visc USE o_PARAM USE g_CONFIG use g_comm_auto @@ -383,12 +385,13 @@ SUBROUTINE visc_filt_biharm(option, dynamics, partit, mesh) type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh - real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV => dynamics%uv(:,:,:) + UV_rhs => dynamics%uv_rhs(:,:,:) ! Filter is applied twice. ed=myDim_elem2D+eDim_elem2D @@ -478,7 +481,7 @@ SUBROUTINE visc_filt_hbhmix(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP use MOD_DYN - USE o_ARRAYS, only: Visc, UV_rhs + USE o_ARRAYS, only: Visc USE o_PARAM USE g_CONFIG use g_comm_auto @@ -494,12 +497,13 @@ SUBROUTINE visc_filt_hbhmix(dynamics, partit, mesh) type(t_dyn), intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh - real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV => dynamics%uv(:,:,:) + UV_rhs => dynamics%uv_rhs(:,:,:) ! Filter is applied twice. ed=myDim_elem2D+eDim_elem2D @@ -680,7 +684,6 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP use MOD_DYN - USE o_ARRAYS, only: UV_rhs USE o_PARAM USE g_CONFIG USE g_comm_auto @@ -692,12 +695,13 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh - real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV => dynamics%uv(:,:,:) + UV_rhs => dynamics%uv_rhs(:,:,:) ! An analog of harmonic viscosity operator. ! Same as visc_filt_h, but with the backscatter. @@ -783,7 +787,6 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP use MOD_DYN - USE o_ARRAYS, only: UV_rhs USE o_PARAM USE g_CONFIG USE g_comm_auto @@ -796,12 +799,13 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh - real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV => dynamics%uv(:,:,:) + UV_rhs => dynamics%uv_rhs(:,:,:) ed=myDim_elem2D+eDim_elem2D allocate(U_c(nl-1,ed), V_c(nl-1, ed)) @@ -870,7 +874,6 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP use MOD_DYN - USE o_ARRAYS, only: UV_rhs USE o_PARAM USE g_CONFIG USE g_comm_auto @@ -882,12 +885,13 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh - real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV => dynamics%uv(:,:,:) + UV_rhs => dynamics%uv_rhs(:,:,:) ! ed=myDim_elem2D+eDim_elem2D allocate(U_c(nl-1,ed), V_c(nl-1, ed)) @@ -950,7 +954,7 @@ SUBROUTINE visc_filt_dbcksc(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP use MOD_DYN -USE o_ARRAYS, only: UV_rhs, v_back, UV_dis_tend, UV_total_tend, UV_back_tend, & +USE o_ARRAYS, only: v_back, UV_dis_tend, UV_total_tend, UV_back_tend, & uke, uke_dif USE o_PARAM USE g_CONFIG @@ -966,12 +970,13 @@ SUBROUTINE visc_filt_dbcksc(dynamics, partit, mesh) type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh -real(kind=WP), dimension(:,:,:), pointer :: UV +real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV => dynamics%uv(:,:,:) +UV_rhs => dynamics%uv_rhs(:,:,:) ! An analog of harmonic viscosity operator. ! It adds to the rhs(0) Visc*(u1+u2+u3-3*u0)/area @@ -1318,3 +1323,4 @@ end subroutine uke_update ! =================================================================== + diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index 7d791d5a4..52abf3e95 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -182,10 +182,6 @@ MODULE o_ARRAYS USE o_PARAM IMPLICIT NONE ! Arrays are described in subroutine array_setup -real(kind=WP), allocatable, target :: Wvel(:,:), Wvel_e(:,:), Wvel_i(:,:) -!!PS real(kind=WP), allocatable :: UV(:,:,:) -real(kind=WP), allocatable :: UV_rhs(:,:,:) -!!PS real(kind=WP), allocatable :: UV_rhsAB(:,:,:) real(kind=WP), allocatable :: uke(:,:), v_back(:,:), uke_back(:,:), uke_dis(:,:), uke_dif(:,:) real(kind=WP), allocatable :: uke_rhs(:,:), uke_rhs_old(:,:) real(kind=WP), allocatable :: UV_dis_tend(:,:,:), UV_back_tend(:,:,:), UV_total_tend(:,:,:), UV_dis_tend_node(:,:,:) diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index aead52a12..26311d642 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -474,7 +474,6 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) ! Velocities ! ================ !allocate(stress_diag(2, elem_size))!delete me -allocate(UV_rhs(2,nl-1, elem_size)) allocate(Visc(nl-1, elem_size)) ! ================ ! elevation and its rhs @@ -620,10 +619,6 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) ! Initialize with zeros ! ================= -!!PS UV=0.0_WP - UV_rhs=0.0_WP -!!PS UV_rhsAB=0.0_WP -! eta_n=0.0_WP d_eta=0.0_WP ssh_rhs=0.0_WP diff --git a/src/oce_vel_rhs_vinv.F90 b/src/oce_vel_rhs_vinv.F90 index d9cf24c99..f1b87d68f 100755 --- a/src/oce_vel_rhs_vinv.F90 +++ b/src/oce_vel_rhs_vinv.F90 @@ -118,7 +118,7 @@ end subroutine relative_vorticity ! ========================================================================== subroutine compute_vel_rhs_vinv(dynamics, partit, mesh) !vector invariant USE o_PARAM - USE o_ARRAYS, only: UV_rhs, eta_n, coriolis_node, hpressure, vorticity + USE o_ARRAYS, only: eta_n, coriolis_node, hpressure, vorticity USE MOD_MESH USE MOD_PARTIT @@ -140,12 +140,13 @@ subroutine compute_vel_rhs_vinv(dynamics, partit, mesh) !vector invariant real(kind=WP) :: KE_node(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP) :: dZ_inv(2:mesh%nl-1), dzbar_inv(mesh%nl-1), elem_area_inv real(kind=WP) :: density0_inv = 1./density_0 - real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhsAB + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs, UV_rhsAB #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV => dynamics%uv(:,:,:) + UV_rhs => dynamics%uv_rhs(:,:,:) UV_rhsAB => dynamics%uv_rhsAB(:,:,:) w = 0.0_WP diff --git a/src/toy_channel_soufflet.F90 b/src/toy_channel_soufflet.F90 index 8d05a0e73..53052ccc5 100644 --- a/src/toy_channel_soufflet.F90 +++ b/src/toy_channel_soufflet.F90 @@ -53,12 +53,14 @@ subroutine relax_zonal_vel(dynamics, partit, mesh) type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh + real(kind=WP), dimension(:,:,:), pointer :: UV_rhs #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - + UV_rhs=>dynamics%uv_rhs(:,:,:) + DO elem=1, myDim_elem2D ! ======== ! Interpolation From 57dc2758c2bef5ef014f48a6137ae8a41e55fc02 Mon Sep 17 00:00:00 2001 From: a270042 Date: Wed, 3 Nov 2021 00:26:29 +0100 Subject: [PATCH 423/909] exchange Unode against derived type dynamics%uvnode --- src/cavity_param.F90 | 22 +++++++++++++--------- src/gen_modules_cvmix_kpp.F90 | 31 ++++++++++++++++++------------- src/gen_modules_cvmix_pp.F90 | 12 ++++++++---- src/gen_modules_cvmix_tke.F90 | 16 ++++++++++------ src/gen_modules_diag.F90 | 21 +++++++++++---------- src/io_meandata.F90 | 4 ++-- src/oce_ale.F90 | 10 +++++----- src/oce_ale_mixing_kpp.F90 | 27 ++++++++++++++++----------- src/oce_ale_mixing_pp.F90 | 13 +++++++++---- src/oce_dyn.F90 | 10 +++++----- src/oce_modules.F90 | 2 +- src/oce_setup_step.F90 | 2 +- src/write_step_info.F90 | 21 +++++++++++---------- 13 files changed, 110 insertions(+), 81 deletions(-) diff --git a/src/cavity_param.F90 b/src/cavity_param.F90 index 3c3236c39..35ed3bdf5 100644 --- a/src/cavity_param.F90 +++ b/src/cavity_param.F90 @@ -137,19 +137,21 @@ end subroutine compute_nrst_pnt2cavline ! adjusted for use in FESOM by Ralph Timmermann, 16.02.2011 ! Reviewed by ? ! adapted by P. SCholz for FESOM2.0 -subroutine cavity_heat_water_fluxes_3eq(tracers, partit, mesh) +subroutine cavity_heat_water_fluxes_3eq(dynamics, tracers, partit, mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use MOD_TRACER + use MOD_DYN use o_PARAM , only: density_0, WP - use o_ARRAYS, only: heat_flux, water_flux, Unode, density_m_rho0,density_ref + use o_ARRAYS, only: heat_flux, water_flux, density_m_rho0, density_ref use i_ARRAYS, only: net_heat_flux, fresh_wa_flux implicit none !___________________________________________________________________________ type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh type(t_tracer), intent(in), target :: tracers + type(t_dyn), intent(in), target :: dynamics real (kind=WP) :: temp,sal,tin,zice real (kind=WP) :: rhow, rhor, rho real (kind=WP) :: gats1, gats2, gas, gat @@ -187,11 +189,12 @@ subroutine cavity_heat_water_fluxes_3eq(tracers, partit, mesh) ! hemw= 4.02*14. ! oomw= -30. ! oofw= -2.5 - + real(kind=WP), dimension(:,:,:), pointer :: UVnode #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UVnode=>dynamics%uvnode(:,:,:) !___________________________________________________________________________ do node=1,myDim_nod2D !+eDim_nod2D @@ -216,7 +219,7 @@ subroutine cavity_heat_water_fluxes_3eq(tracers, partit, mesh) ! if(vt1.eq.0.) vt1=0.001 !rt re = Hz_r(i,j,N)*ds/un !Reynolds number - vt1 = sqrt(Unode(1,nzmin,node)*Unode(1,nzmin,node)+Unode(2,nzmin,node)*Unode(2,nzmin,node)) + vt1 = sqrt(UVnode(1,nzmin,node)*UVnode(1,nzmin,node)+UVnode(2,nzmin,node)*UVnode(2,nzmin,node)) vt1 = max(vt1,0.001_WP) !vt1 = max(vt1,0.005) ! CW re = 10._WP/un !vt1*re (=velocity times length scale over kinematic viscosity) is the Reynolds number @@ -388,7 +391,7 @@ subroutine cavity_momentum_fluxes(dynamics, partit, mesh) USE MOD_PARSUP USE MOD_DYN use o_PARAM , only: density_0, C_d, WP - use o_ARRAYS, only: Unode, stress_surf, stress_node_surf + use o_ARRAYS, only: stress_surf, stress_node_surf use i_ARRAYS, only: u_w, v_w implicit none @@ -398,13 +401,14 @@ subroutine cavity_momentum_fluxes(dynamics, partit, mesh) type(t_mesh) , intent(in) , target :: mesh integer :: elem, elnodes(3), nzmin, node real(kind=WP) :: aux - real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:,:), pointer :: UV, UVnode #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV=>dynamics%uv(:,:,:) + UVnode=>dynamics%uvnode(:,:,:) !___________________________________________________________________________ do elem=1,myDim_elem2D @@ -428,9 +432,9 @@ subroutine cavity_momentum_fluxes(dynamics, partit, mesh) ! momentum stress: ! need to check the sensitivity to the drag coefficient ! here I use the bottom stress coefficient, which is 3e-3, for this FO2 work. - aux=sqrt(Unode(1,nzmin,node)**2+Unode(2,nzmin,node)**2)*density_0*C_d - stress_node_surf(1,node)=-aux*Unode(1,nzmin,node) - stress_node_surf(2,node)=-aux*Unode(2,nzmin,node) + aux=sqrt(UVnode(1,nzmin,node)**2+UVnode(2,nzmin,node)**2)*density_0*C_d + stress_node_surf(1,node)=-aux*UVnode(1,nzmin,node) + stress_node_surf(2,node)=-aux*UVnode(2,nzmin,node) end do end subroutine cavity_momentum_fluxes ! diff --git a/src/gen_modules_cvmix_kpp.F90 b/src/gen_modules_cvmix_kpp.F90 index 81c35cfdd..33c587016 100644 --- a/src/gen_modules_cvmix_kpp.F90 +++ b/src/gen_modules_cvmix_kpp.F90 @@ -26,6 +26,7 @@ module g_cvmix_kpp USE MOD_PARTIT USE MOD_PARSUP use mod_tracer + use MOD_DYN use o_arrays use g_comm_auto use i_arrays @@ -347,10 +348,11 @@ end subroutine init_cvmix_kpp ! !=========================================================================== ! calculate PP vertrical mixing coefficients from CVMIX library - subroutine calc_cvmix_kpp(tracers, partit, mesh) + subroutine calc_cvmix_kpp(dynamics, tracers, partit, mesh) type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in), target :: tracers + type(t_dyn) , intent(in), target :: dynamics integer :: node, elem, nz, nln, nun, elnodes(3), aux_nz real(kind=WP) :: vshear2, dz2, aux, aux_wm(mesh%nl), aux_ws(mesh%nl) real(kind=WP) :: aux_coeff, sigma, stable @@ -361,12 +363,15 @@ subroutine calc_cvmix_kpp(tracers, partit, mesh) real(kind=WP) :: rhopot, bulk_0, bulk_pz, bulk_pz2 real(kind=WP) :: sfc_rhopot, sfc_bulk_0, sfc_bulk_pz, sfc_bulk_pz2 real(kind=WP), dimension(:,:), pointer :: temp, salt + real(kind=WP), dimension(:,:,:), pointer :: UVnode #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" temp=>tracers%data(1)%values(:,:) salt=>tracers%data(2)%values(:,:) + UVnode=>dynamics%uvnode(:,:,:) + !_______________________________________________________________________ kpp_Av = 0.0_WP kpp_Kv = 0.0_WP @@ -402,15 +407,15 @@ subroutine calc_cvmix_kpp(tracers, partit, mesh) !___________________________________________________________ ! calculate squared velocity shear referenced to the surface ! --> cvmix wants to have it with respect to the midlevel rather than full levels - !!PS kpp_dvsurf2(nz) = ((Unode(1,nz-1,node)+Unode(1,nz,node))*0.5 - Unode( 1,1,node) )**2 + & - !!PS ((Unode(2,nz-1,node)+Unode(2,nz,node))*0.5 - Unode( 2,1,node) )**2 - kpp_dvsurf2(nz) = ((Unode(1,nz-1,node)+Unode(1,nz,node))*0.5 - Unode( 1,nun,node) )**2 + & - ((Unode(2,nz-1,node)+Unode(2,nz,node))*0.5 - Unode( 2,nun,node) )**2 + !!PS kpp_dvsurf2(nz) = ((UVnode(1,nz-1,node)+UVnode(1,nz,node))*0.5 - UVnode( 1,1,node) )**2 + & + !!PS ((UVnode(2,nz-1,node)+UVnode(2,nz,node))*0.5 - UVnode( 2,1,node) )**2 + kpp_dvsurf2(nz) = ((UVnode(1,nz-1,node)+UVnode(1,nz,node))*0.5 - UVnode( 1,nun,node) )**2 + & + ((UVnode(2,nz-1,node)+UVnode(2,nz,node))*0.5 - UVnode( 2,nun,node) )**2 !___________________________________________________________ ! calculate shear Richardson number Ri = N^2/(du/dz)^2 dz2 = (Z_3d_n( nz-1,node)-Z_3d_n( nz,node))**2 - vshear2 = (Unode(1,nz-1,node)-Unode(1,nz,node))**2 + & - (Unode(2,nz-1,node)-Unode(2,nz,node))**2 + vshear2 = (UVnode(1,nz-1,node)-UVnode(1,nz,node))**2 + & + (UVnode(2,nz-1,node)-UVnode(2,nz,node))**2 vshear2 = vshear2/dz2 kpp_shearRi(nz) = max(bvfreq(nz,node),0.0_WP)/(vshear2+kpp_epsln) @@ -457,8 +462,8 @@ subroutine calc_cvmix_kpp(tracers, partit, mesh) htot = htot+delh sfc_temp = sfc_temp + temp(nztmp,node)*delh sfc_salt = sfc_salt + salt(nztmp,node)*delh - sfc_u = sfc_u + Unode(1,nztmp,node) *delh - sfc_v = sfc_v + Unode(2,nztmp,node) *delh + sfc_u = sfc_u + UVnode(1,nztmp,node) *delh + sfc_v = sfc_v + UVnode(2,nztmp,node) *delh end do sfc_temp = sfc_temp/htot sfc_salt = sfc_salt/htot @@ -468,8 +473,8 @@ subroutine calc_cvmix_kpp(tracers, partit, mesh) !___________________________________________________________ ! calculate vertical shear between present layer and surface ! averaged sfc_u and sfc_v - kpp_dvsurf2(nz) = (Unode(1,nz,node)-sfc_u)**2 + & - (Unode(2,nz,node)-sfc_v)**2 + kpp_dvsurf2(nz) = (UVnode(1,nz,node)-sfc_u)**2 + & + (UVnode(2,nz,node)-sfc_v)**2 !___________________________________________________________ ! calculate buoyancy difference between the surface averaged @@ -492,8 +497,8 @@ subroutine calc_cvmix_kpp(tracers, partit, mesh) ! calculate shear Richardson number Ri = N^2/(du/dz)^2 for ! mixing parameterisation below ocean boundary layer dz2 = (Z_3d_n( nz-1,node)-Z_3d_n( nz,node))**2 - vshear2 = (Unode(1,nz-1,node)-Unode(1,nz,node))**2 + & - (Unode(2,nz-1,node)-Unode(2,nz,node))**2 + vshear2 = (UVnode(1,nz-1,node)-UVnode(1,nz,node))**2 + & + (UVnode(2,nz-1,node)-UVnode(2,nz,node))**2 vshear2 = vshear2/dz2 kpp_shearRi(nz) = max(bvfreq(nz,node),0.0_WP)/(vshear2+kpp_epsln) end do ! --> do nz=1, nln diff --git a/src/gen_modules_cvmix_pp.F90 b/src/gen_modules_cvmix_pp.F90 index 39dfa5673..58e9f2104 100644 --- a/src/gen_modules_cvmix_pp.F90 +++ b/src/gen_modules_cvmix_pp.F90 @@ -27,6 +27,7 @@ module g_cvmix_pp use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN use o_arrays use g_comm_auto use i_arrays @@ -66,7 +67,6 @@ module g_cvmix_pp ! allocate and initialize CVMIX PP variables --> call initialisation ! routine from cvmix library subroutine init_cvmix_pp(partit, mesh) - use MOD_MESH implicit none type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit @@ -162,17 +162,21 @@ end subroutine init_cvmix_pp ! !=========================================================================== ! calculate PP vertrical mixing coefficients from CVMIX library - subroutine calc_cvmix_pp(partit, mesh) + subroutine calc_cvmix_pp(dynamics, partit, mesh) use MOD_MESH + implicit none type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit + type(t_dyn), intent(inout), target :: dynamics integer :: node, elem, nz, nln, nun, elnodes(3), windnl=2, node_size real(kind=WP) :: vshear2, dz2, Kvb + real(kind=WP), dimension(:,:,:), pointer :: UVnode #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UVnode=>dynamics%uvnode(:,:,:) node_size = myDim_nod2D !_______________________________________________________________________ do node = 1,node_size @@ -186,8 +190,8 @@ subroutine calc_cvmix_pp(partit, mesh) !!PS do nz=2,nln do nz=nun+1,nln dz2 = (Z_3d_n( nz-1,node)-Z_3d_n( nz,node))**2 - vshear2 = (Unode(1,nz-1,node)-Unode(1,nz,node))**2 +& - (Unode(2,nz-1,node)-Unode(2,nz,node))**2 + vshear2 = (UVnode(1,nz-1,node)-UVnode(1,nz,node))**2 +& + (UVnode(2,nz-1,node)-UVnode(2,nz,node))**2 vshear2 = vshear2/dz2 ! WIKIPEDIA: The Richardson number is always ! considered positive. A negative value of N² (i.e. complex N) diff --git a/src/gen_modules_cvmix_tke.F90 b/src/gen_modules_cvmix_tke.F90 index c286cf5f4..aa1deae21 100644 --- a/src/gen_modules_cvmix_tke.F90 +++ b/src/gen_modules_cvmix_tke.F90 @@ -28,6 +28,7 @@ module g_cvmix_tke use mod_mesh USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN use o_arrays use g_comm_auto implicit none @@ -249,20 +250,23 @@ end subroutine init_cvmix_tke ! !=========================================================================== ! calculate TKE vertical mixing coefficients from CVMIX library - subroutine calc_cvmix_tke(partit, mesh) + subroutine calc_cvmix_tke(dynamics, partit, mesh) implicit none type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit + type(t_dyn), intent(inout), target :: dynamics integer :: node, elem, nelem, nz, nln, nun, elnodes(3), node_size real(kind=WP) :: tvol real(kind=WP) :: dz_trr(mesh%nl), bvfreq2(mesh%nl), vshear2(mesh%nl) real(kind=WP) :: tke_Av_old(mesh%nl), tke_Kv_old(mesh%nl), tke_old(mesh%nl) - + real(kind=WP), dimension(:,:,:), pointer :: UVnode + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - + UVnode=>dynamics%uvnode(:,:,:) + node_size = myDim_nod2D !_______________________________________________________________________ ! calculate all neccessary forcing for TKE @@ -297,8 +301,8 @@ subroutine calc_cvmix_tke(partit, mesh) ! calculate for TKE 3D vertical velocity shear vshear2=0.0_WP do nz=nun+1,nln - vshear2(nz)=(( Unode(1, nz-1, node) - Unode(1, nz, node))**2 + & - ( Unode(2, nz-1, node) - Unode(2, nz, node))**2)/ & + vshear2(nz)=(( UVnode(1, nz-1, node) - UVnode(1, nz, node))**2 + & + ( UVnode(2, nz-1, node) - UVnode(2, nz, node))**2)/ & ((Z_3d_n(nz-1,node)-Z_3d_n(nz,node))**2) end do @@ -399,4 +403,4 @@ subroutine calc_cvmix_tke(partit, mesh) end do end do end subroutine calc_cvmix_tke -end module g_cvmix_tke +end module g_cvmix_tke \ No newline at end of file diff --git a/src/gen_modules_diag.F90 b/src/gen_modules_diag.F90 index 0870a7052..5015da263 100755 --- a/src/gen_modules_diag.F90 +++ b/src/gen_modules_diag.F90 @@ -243,14 +243,15 @@ subroutine diag_energy(mode, dynamics, partit, mesh) integer :: iup, ilo real(kind=WP) :: ux, vx, uy, vy, tvol, rval(2) real(kind=WP) :: geo_grad_x(3), geo_grad_y(3), geo_u(3), geo_v(3) - real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:,:), pointer :: UV, UVnode real(kind=WP), dimension(:,:), pointer :: Wvel #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) - Wvel => dynamics%w(:,:) + UV => dynamics%uv(:,:,:) + UVnode=> dynamics%uvnode(:,:,:) + Wvel => dynamics%w(:,:) !===================== if (firstcall) then !allocate the stuff at the first call @@ -291,9 +292,9 @@ subroutine diag_energy(mode, dynamics, partit, mesh) if (mode==0) return end if - u_x_u=Unode(1,1:nl-1,1:myDim_nod2D)*Unode(1,1:nl-1,1:myDim_nod2D) - u_x_v=Unode(1,1:nl-1,1:myDim_nod2D)*Unode(2,1:nl-1,1:myDim_nod2D) - v_x_v=Unode(2,1:nl-1,1:myDim_nod2D)*Unode(2,1:nl-1,1:myDim_nod2D) + u_x_u=UVnode(1,1:nl-1,1:myDim_nod2D)*UVnode(1,1:nl-1,1:myDim_nod2D) + u_x_v=UVnode(1,1:nl-1,1:myDim_nod2D)*UVnode(2,1:nl-1,1:myDim_nod2D) + v_x_v=UVnode(2,1:nl-1,1:myDim_nod2D)*UVnode(2,1:nl-1,1:myDim_nod2D) ! this loop might be very expensive DO n=1, myDim_elem2D nzmax = nlevels(n) @@ -396,10 +397,10 @@ subroutine diag_energy(mode, dynamics, partit, mesh) if (nlevels(elem)-1 < nz) cycle elnodes=elem2D_nodes(:, elem) tvol=tvol+elem_area(elem) - ux=ux+sum(gradient_sca(1:3,elem)*Unode(1,nz,elnodes))*elem_area(elem) !accumulate tensor of velocity derivatives - vx=vx+sum(gradient_sca(1:3,elem)*Unode(2,nz,elnodes))*elem_area(elem) - uy=uy+sum(gradient_sca(4:6,elem)*Unode(1,nz,elnodes))*elem_area(elem) - vy=vy+sum(gradient_sca(4:6,elem)*Unode(2,nz,elnodes))*elem_area(elem) + ux=ux+sum(gradient_sca(1:3,elem)*UVnode(1,nz,elnodes))*elem_area(elem) !accumulate tensor of velocity derivatives + vx=vx+sum(gradient_sca(1:3,elem)*UVnode(2,nz,elnodes))*elem_area(elem) + uy=uy+sum(gradient_sca(4:6,elem)*UVnode(1,nz,elnodes))*elem_area(elem) + vy=vy+sum(gradient_sca(4:6,elem)*UVnode(2,nz,elnodes))*elem_area(elem) END DO dudx(nz,n)=ux/tvol!/area(nz, n)/3. dvdx(nz,n)=vx/tvol diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 5c6e0636f..4cf2f09cd 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -428,8 +428,8 @@ subroutine ini_mean_io(dynamics, tracers, partit, mesh) call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'av_dvdz', 'int(Av * dv/dz)', 'm3/s2', av_dvdz(:,:), 1, 'm', i_real4, partit, mesh) call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'av_dudz_sq', 'Av * (du/dz)^2', 'm^2/s^3', av_dudz_sq(:,:), 1, 'm', i_real4, partit, mesh) call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'Av', 'Vertical mixing A', 'm2/s', Av(:,:), 1, 'm', i_real4, partit, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'unod', 'horizontal velocity at nodes', 'm/s', Unode(1,:,:), 1, 'm', i_real8, partit, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'vnod', 'meridional velocity at nodes', 'm/s', Unode(2,:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'unod', 'horizontal velocity at nodes', 'm/s', dynamics%uvnode(1,:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'vnod', 'meridional velocity at nodes', 'm/s', dynamics%uvnode(2,:,:), 1, 'm', i_real8, partit, mesh) call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'um', 'horizontal velocity', 'm/s', dynamics%uv(1,:,:), 1, 'm', i_real4, partit, mesh) call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'vm', 'meridional velocity', 'm/s', dynamics%uv(2,:,:), 1, 'm', i_real4, partit, mesh) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 025827b07..d30938e10 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2808,7 +2808,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) ! use FESOM2.0 tuned k-profile parameterization for vertical mixing if (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call oce_mixing_KPP'//achar(27)//'[0m' - call oce_mixing_KPP(Av, Kv_double, tracers, partit, mesh) + call oce_mixing_KPP(Av, Kv_double, dynamics, tracers, partit, mesh) Kv=Kv_double(:,:,1) call mo_convect(partit, mesh) @@ -2816,13 +2816,13 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) ! mixing else if(mix_scheme_nmb==2 .or. mix_scheme_nmb==27) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call oce_mixing_PP'//achar(27)//'[0m' - call oce_mixing_PP(partit, mesh) + call oce_mixing_PP(dynamics, partit, mesh) call mo_convect(partit, mesh) ! use CVMIX KPP (Large at al. 1994) else if(mix_scheme_nmb==3 .or. mix_scheme_nmb==37) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call calc_cvmix_kpp'//achar(27)//'[0m' - call calc_cvmix_kpp(tracers, partit, mesh) + call calc_cvmix_kpp(dynamics, tracers, partit, mesh) call mo_convect(partit, mesh) ! use CVMIX PP (Pacanowski and Philander 1981) parameterisation for mixing @@ -2830,7 +2830,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) ! N^2 and vertical horizontal velocity shear dui/dz else if(mix_scheme_nmb==4 .or. mix_scheme_nmb==47) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call calc_cvmix_pp'//achar(27)//'[0m' - call calc_cvmix_pp(partit, mesh) + call calc_cvmix_pp(dynamics, partit, mesh) call mo_convect(partit, mesh) ! use CVMIX TKE (turbulent kinetic energy closure) parameterisation for @@ -2839,7 +2839,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) ! Model for the diapycnal diffusivity induced by internal gravity waves" else if(mix_scheme_nmb==5 .or. mix_scheme_nmb==56) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call calc_cvmix_tke'//achar(27)//'[0m' - call calc_cvmix_tke(partit, mesh) + call calc_cvmix_tke(dynamics, partit, mesh) call mo_convect(partit, mesh) end if diff --git a/src/oce_ale_mixing_kpp.F90 b/src/oce_ale_mixing_kpp.F90 index 5c62871e1..cc2a54890 100755 --- a/src/oce_ale_mixing_kpp.F90 +++ b/src/oce_ale_mixing_kpp.F90 @@ -11,6 +11,7 @@ MODULE o_mixing_KPP_mod USE MOD_PARTIT USE MOD_PARSUP USE MOD_TRACER + USE MOD_DYN USE o_ARRAYS USE g_config USE i_arrays @@ -242,7 +243,7 @@ end subroutine oce_mixing_kpp_init ! diffK = diffusion coefficient (m^2/s) ! !--------------------------------------------------------------- - subroutine oce_mixing_KPP(viscAE, diffK, tracers, partit, mesh) + subroutine oce_mixing_KPP(viscAE, diffK, dynamics, tracers, partit, mesh) IMPLICIT NONE @@ -253,6 +254,7 @@ subroutine oce_mixing_KPP(viscAE, diffK, tracers, partit, mesh) type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in), target :: tracers + type(t_dyn) , intent(in), target :: dynamics integer :: node, kn, elem, elnodes(3) integer :: nz, ns, j, q, lay, lay_mi, nzmin, nzmax real(KIND=WP) :: smftu, smftv, aux, vol @@ -265,11 +267,12 @@ subroutine oce_mixing_KPP(viscAE, diffK, tracers, partit, mesh) real(KIND=WP), dimension(mesh%nl, partit%myDim_elem2D+partit%eDim_elem2D), intent(inout) :: viscAE!for momentum (elements) real(KIND=WP), dimension(mesh%nl, partit%myDim_nod2D +partit%eDim_nod2D) :: viscA !for momentum (nodes) real(KIND=WP), dimension(mesh%nl, partit%myDim_nod2D +partit%eDim_nod2D, tracers%num_tracers), intent(inout) :: diffK !for T and S - + real(kind=WP), dimension(:,:,:), pointer :: UVnode #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UVnode=>dynamics%uvnode(:,:,:) ViscA=0.0_WP DO node=1, myDim_nod2D !+eDim_nod2D @@ -299,15 +302,15 @@ subroutine oce_mixing_KPP(viscAE, diffK, tracers, partit, mesh) dbsfc(nzmin,node) = 0.0_WP ! Surface velocity - usurf = Unode(1,nzmin,node) - vsurf = Unode(2,nzmin,node) + usurf = UVnode(1,nzmin,node) + vsurf = UVnode(2,nzmin,node) !!PS DO nz=2, nlevels_nod2d(node)-1 DO nz=nzmin+1, nzmax-1 ! Squared velocity shear referenced to surface (@ Z) - u_loc = 0.5_WP * ( Unode(1,nz-1,node) + Unode(1,nz,node) ) - v_loc = 0.5_WP * ( Unode(2,nz-1,node) + Unode(2,nz,node) ) + u_loc = 0.5_WP * ( UVnode(1,nz-1,node) + UVnode(1,nz,node) ) + v_loc = 0.5_WP * ( UVnode(2,nz-1,node) + UVnode(2,nz,node) ) dVsq(nz,node) = ( usurf - u_loc )**2 + ( vsurf - v_loc )**2 @@ -347,7 +350,7 @@ subroutine oce_mixing_KPP(viscAE, diffK, tracers, partit, mesh) ! compute interior mixing coefficients everywhere, due to constant ! internal wave activity, static instability, and local shear ! instability. - CALL ri_iwmix(viscA, diffK, tracers, partit, mesh) + CALL ri_iwmix(viscA, diffK, dynamics, tracers, partit, mesh) ! add double diffusion IF (double_diffusion) then CALL ddmix(diffK, tracers, partit, mesh) @@ -729,11 +732,12 @@ END SUBROUTINE wscale ! visc = viscosity coefficient (m**2/s) ! diff = diffusion coefficient (m**2/s) ! - subroutine ri_iwmix(viscA, diffK, tracers, partit, mesh) + subroutine ri_iwmix(viscA, diffK, dynamics, tracers, partit, mesh) IMPLICIT NONE type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in), target :: tracers + type(t_dyn), intent(in), target :: dynamics integer :: node, nz, mr, nzmin, nzmax real(KIND=WP) , parameter :: Riinfty = 0.8_WP ! local Richardson Number limit for shear instability (LMD 1994 uses 0.7) real(KIND=WP) :: ri_prev, tmp @@ -746,11 +750,12 @@ subroutine ri_iwmix(viscA, diffK, tracers, partit, mesh) ! Put them under the namelist.oce logical :: smooth_richardson_number = .false. integer :: num_smoothings = 1 ! for vertical smoothing of Richardson number - + real(kind=WP), dimension(:,:,:), pointer :: UVnode #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UVnode=>dynamics%uvnode(:,:,:) ! Compute Richardson number and store it as diffK to save memory DO node=1, myDim_nod2D! +eDim_nod2D @@ -759,8 +764,8 @@ subroutine ri_iwmix(viscA, diffK, tracers, partit, mesh) !!PS DO nz=2,nlevels_nod2d(node)-1 DO nz=nzmin+1,nzmax-1 dz_inv = 1.0_WP / (Z_3d_n(nz-1,node)-Z_3d_n(nz,node)) ! > 0 - shear = ( Unode(1, nz-1, node) - Unode(1, nz, node) )**2 + & - ( Unode(2, nz-1, node) - Unode(2, nz, node) )**2 + shear = ( UVnode(1, nz-1, node) - UVnode(1, nz, node) )**2 + & + ( UVnode(2, nz-1, node) - UVnode(2, nz, node) )**2 shear = shear * dz_inv * dz_inv diffK(nz,node,1) = MAX( bvfreq(nz,node), 0.0_WP ) / (shear + epsln) ! To avoid NaNs at start END DO ! minimum Richardson number is 0 diff --git a/src/oce_ale_mixing_pp.F90 b/src/oce_ale_mixing_pp.F90 index b4c7958d2..36cf7d519 100644 --- a/src/oce_ale_mixing_pp.F90 +++ b/src/oce_ale_mixing_pp.F90 @@ -1,5 +1,5 @@ !======================================================================= -subroutine oce_mixing_pp(partit, mesh) +subroutine oce_mixing_pp(dynamics, partit, mesh) ! Compute Richardson number dependent Av and Kv following ! Pacanowski and Philander, 1981 ! Av = Avmax * factor**2 + Av0, @@ -18,6 +18,7 @@ subroutine oce_mixing_pp(partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP +USE MOD_DYN USE o_PARAM USE o_ARRAYS USE g_config @@ -26,13 +27,17 @@ subroutine oce_mixing_pp(partit, mesh) type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit +type(t_dyn), intent(inout), target :: dynamics real(kind=WP) :: dz_inv, bv, shear, a, rho_up, rho_dn, t, s, Kv0_b integer :: node, nz, nzmax, nzmin, elem, elnodes(3), i - +real(kind=WP), dimension(:,:,:), pointer :: UVnode #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" +UVnode=>dynamics%uvnode(:,:,:) + + !___________________________________________________________________________ do node=1, myDim_nod2D+eDim_nod2D nzmin = ulevels_nod2d(node) @@ -43,8 +48,8 @@ subroutine oce_mixing_pp(partit, mesh) !!PS do nz=2,nlevels_nod2d(node)-1 do nz=nzmin+1,nzmax-1 dz_inv=1.0_WP/(Z_3d_n(nz-1,node)-Z_3d_n(nz,node)) - shear = (Unode(1,nz-1,node)-Unode(1,nz,node))**2 +& - (Unode(2,nz-1,node)-Unode(2,nz,node))**2 + shear = (UVnode(1,nz-1,node)-UVnode(1,nz,node))**2 +& + (UVnode(2,nz-1,node)-UVnode(2,nz,node))**2 shear = shear*dz_inv*dz_inv Kv(nz,node) = shear/(shear+5._WP*max(bvfreq(nz,node),0.0_WP)+1.0e-14) ! To avoid NaNs at start end do diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index be2a1de72..0bff6401b 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -207,7 +207,6 @@ subroutine compute_vel_nodes(dynamics, partit, mesh) USE MOD_PARSUP USE MOD_DYN USE o_PARAM - USE o_ARRAYS, only: Unode use g_comm_auto IMPLICIT NONE integer :: n, nz, k, elem, nln, uln, nle, ule @@ -216,12 +215,13 @@ subroutine compute_vel_nodes(dynamics, partit, mesh) type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh - real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:,:), pointer :: UV, UVnode #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV=>dynamics%uv(:,:,:) + UVnode=>dynamics%uvnode(:,:,:) DO n=1, myDim_nod2D uln = ulevels_nod2D(n) @@ -241,11 +241,11 @@ subroutine compute_vel_nodes(dynamics, partit, mesh) tx=tx+UV(1,nz,elem)*elem_area(elem) ty=ty+UV(2,nz,elem)*elem_area(elem) END DO - Unode(1,nz,n)=tx/tvol - Unode(2,nz,n)=ty/tvol + UVnode(1,nz,n)=tx/tvol + UVnode(2,nz,n)=ty/tvol END DO END DO - call exchange_nod(Unode, partit) + call exchange_nod(UVnode, partit) end subroutine compute_vel_nodes !=========================================================================== subroutine viscosity_filter(option, dynamics, partit, mesh) diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index 52abf3e95..9045b44e3 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -220,7 +220,7 @@ MODULE o_ARRAYS real(kind=WP), allocatable,dimension(:,:,:) :: Kv_double real(kind=WP), allocatable,dimension(:) :: Kv0 !Velocities interpolated to nodes -real(kind=WP), allocatable,dimension(:,:,:) :: Unode +!!PS real(kind=WP), allocatable,dimension(:,:,:) :: Unode ! Auxiliary arrays to store Redi-GM fields real(kind=WP), allocatable,dimension(:,:,:) :: neutral_slope diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 26311d642..4c6951f9a 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -562,7 +562,7 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) end if !Velocities at nodes -allocate(Unode(2,nl-1,node_size)) +!!PS allocate(Unode(2,nl-1,node_size)) ! tracer gradients & RHS allocate(ttrhs(nl-1,node_size)) diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index 369e3aec1..d4ad1dbba 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -41,7 +41,7 @@ subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) use MOD_TRACER use MOD_DYN use o_PARAM - use o_ARRAYS, only: eta_n, d_eta, water_flux, heat_flux, Unode, CFL_z, & + use o_ARRAYS, only: eta_n, d_eta, water_flux, heat_flux, CFL_z, & pgf_x, pgf_y, Av, Kv use i_ARRAYS use g_comm_auto @@ -62,13 +62,14 @@ subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in) , target :: tracers type(t_dyn) , intent(in) , target :: dynamics - real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:,:), pointer :: UV, UVnode real(kind=WP), dimension(:,:), pointer :: Wvel #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV => dynamics%uv(:,:,:) + UVnode => dynamics%uvnode(:,:,:) Wvel => dynamics%w(:,:) if (mod(istep,outfreq)==0) then @@ -147,13 +148,13 @@ subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) call MPI_AllREDUCE(loc , min_wvel , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) loc = minval(Wvel(2,1:myDim_nod2D)) call MPI_AllREDUCE(loc , min_wvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(Unode(1,1,1:myDim_nod2D)) + loc = minval(UVnode(1,1,1:myDim_nod2D)) call MPI_AllREDUCE(loc , min_uvel , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(Unode(1,2,1:myDim_nod2D)) + loc = minval(UVnode(1,2,1:myDim_nod2D)) call MPI_AllREDUCE(loc , min_uvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(Unode(2,1,1:myDim_nod2D)) + loc = minval(UVnode(2,1,1:myDim_nod2D)) call MPI_AllREDUCE(loc , min_vvel , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(Unode(2,2,1:myDim_nod2D)) + loc = minval(UVnode(2,2,1:myDim_nod2D)) call MPI_AllREDUCE(loc , min_vvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) loc = minval(d_eta(1:myDim_nod2D)) call MPI_AllREDUCE(loc , min_deta , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) @@ -179,13 +180,13 @@ subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) call MPI_AllREDUCE(loc , max_wvel , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) loc = maxval(Wvel(2,1:myDim_nod2D)) call MPI_AllREDUCE(loc , max_wvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(Unode(1,1,1:myDim_nod2D)) + loc = maxval(UVnode(1,1,1:myDim_nod2D)) call MPI_AllREDUCE(loc , max_uvel , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(Unode(1,2,1:myDim_nod2D)) + loc = maxval(UVnode(1,2,1:myDim_nod2D)) call MPI_AllREDUCE(loc , max_uvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(Unode(2,1,1:myDim_nod2D)) + loc = maxval(UVnode(2,1,1:myDim_nod2D)) call MPI_AllREDUCE(loc , max_vvel , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(Unode(2,2,1:myDim_nod2D)) + loc = maxval(UVnode(2,2,1:myDim_nod2D)) call MPI_AllREDUCE(loc , max_vvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) loc = maxval(d_eta(1:myDim_nod2D)) call MPI_AllREDUCE(loc , max_deta , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) From ad0904a23490f1de3666b935994612cd42373fe5 Mon Sep 17 00:00:00 2001 From: a270042 Date: Wed, 3 Nov 2021 00:35:19 +0100 Subject: [PATCH 424/909] exchange CFL_z with dynamics derived type --- src/io_blowup.F90 | 2 +- src/oce_ale.F90 | 5 +++-- src/oce_modules.F90 | 1 - src/oce_setup_step.F90 | 2 -- src/write_step_info.F90 | 10 ++++++---- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/io_blowup.F90 b/src/io_blowup.F90 index 39eda3107..d5cccd6e2 100644 --- a/src/io_blowup.F90 +++ b/src/io_blowup.F90 @@ -146,7 +146,7 @@ subroutine ini_blowup_io(year, dynamics, tracers, partit, mesh) call def_variable(bid, 'w' , (/nl, nod2D/) , 'vertical velocity', 'm/s', dynamics%w); call def_variable(bid, 'w_expl' , (/nl, nod2D/) , 'vertical velocity', 'm/s', dynamics%w_e); call def_variable(bid, 'w_impl' , (/nl, nod2D/) , 'vertical velocity', 'm/s', dynamics%w_i); - call def_variable(bid, 'cfl_z' , (/nl-1, nod2D/) , 'vertical CFL criteria', '', CFL_z); + call def_variable(bid, 'cfl_z' , (/nl-1, nod2D/) , 'vertical CFL criteria', '', dynamics%cfl_z); !_____________________________________________________________________________ ! write snapshot ice variables to blowup file diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index d30938e10..e577e4d4a 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -1844,7 +1844,7 @@ end subroutine compute_hbar_ale subroutine vert_vel_ale(dynamics, partit, mesh) use g_config,only: dt, which_ALE, min_hnode, lzstar_lev, flag_warn_cflz use MOD_MESH - use o_ARRAYS, only: fer_Wvel, fer_UV, CFL_z, water_flux, ssh_rhs, & + use o_ARRAYS, only: fer_Wvel, fer_UV, water_flux, ssh_rhs, & ssh_rhs_old, eta_n, d_eta use o_PARAM USE MOD_PARTIT @@ -1868,7 +1868,7 @@ subroutine vert_vel_ale(dynamics, partit, mesh) type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit real(kind=WP), dimension(:,:,:), pointer :: UV - real(kind=WP), dimension(:,:) , pointer :: Wvel, Wvel_e, Wvel_i + real(kind=WP), dimension(:,:) , pointer :: Wvel, Wvel_e, Wvel_i, CFL_z #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -1877,6 +1877,7 @@ subroutine vert_vel_ale(dynamics, partit, mesh) Wvel =>dynamics%w(:,:) Wvel_e=>dynamics%w_e(:,:) Wvel_i=>dynamics%w_i(:,:) + CFL_z =>dynamics%cfl_z(:,:) !___________________________________________________________________________ ! Contributions from levels in divergence diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index 9045b44e3..992dcd78f 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -188,7 +188,6 @@ MODULE o_ARRAYS real(kind=WP), allocatable :: UV_dis_posdef_b2(:,:), UV_dis_posdef(:,:), UV_back_posdef(:,:) real(kind=WP), allocatable :: eta_n(:), d_eta(:) real(kind=WP), allocatable :: ssh_rhs(:), hpressure(:,:) -real(kind=WP), allocatable :: CFL_z(:,:) real(kind=WP), allocatable :: stress_surf(:,:) real(kind=WP), allocatable :: stress_node_surf(:,:) REAL(kind=WP), ALLOCATABLE :: stress_atmoce_x(:) diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 4c6951f9a..f122880c4 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -489,7 +489,6 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) ! Vertical velocity and pressure ! ================ allocate( hpressure(nl,node_size)) -allocate(CFL_z(nl, node_size)) ! vertical CFL criteria allocate(bvfreq(nl,node_size),mixlay_dep(node_size),bv_ref(node_size)) ! ================ ! Ocean forcing arrays @@ -622,7 +621,6 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) eta_n=0.0_WP d_eta=0.0_WP ssh_rhs=0.0_WP - CFL_z =0.0_WP hpressure=0.0_WP ! heat_flux=0.0_WP diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index d4ad1dbba..a989d61da 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -41,7 +41,7 @@ subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) use MOD_TRACER use MOD_DYN use o_PARAM - use o_ARRAYS, only: eta_n, d_eta, water_flux, heat_flux, CFL_z, & + use o_ARRAYS, only: eta_n, d_eta, water_flux, heat_flux, & pgf_x, pgf_y, Av, Kv use i_ARRAYS use g_comm_auto @@ -63,7 +63,7 @@ subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) type(t_tracer), intent(in) , target :: tracers type(t_dyn) , intent(in) , target :: dynamics real(kind=WP), dimension(:,:,:), pointer :: UV, UVnode - real(kind=WP), dimension(:,:), pointer :: Wvel + real(kind=WP), dimension(:,:), pointer :: Wvel, CFL_z #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -71,6 +71,7 @@ subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) UV => dynamics%uv(:,:,:) UVnode => dynamics%uvnode(:,:,:) Wvel => dynamics%w(:,:) + CFL_z => dynamics%cfl_z(:,:) if (mod(istep,outfreq)==0) then @@ -264,7 +265,7 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) use MOD_DYN use o_PARAM use o_ARRAYS, only: eta_n, d_eta, ssh_rhs, ssh_rhs_old, water_flux, stress_surf, & - CFL_z, heat_flux, Kv, Av + heat_flux, Kv, Av use i_ARRAYS use g_comm_auto use io_BLOWUP @@ -280,13 +281,14 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) type(t_tracer), intent(in) , target :: tracers type(t_dyn) , intent(in) , target :: dynamics real(kind=WP), dimension(:,:,:), pointer :: UV - real(kind=WP), dimension(:,:), pointer :: Wvel + real(kind=WP), dimension(:,:), pointer :: Wvel, CFL_z #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV => dynamics%uv(:,:,:) Wvel => dynamics%w(:,:) + CFL_z => dynamics%cfl_z(:,:) !___________________________________________________________________________ ! ! if (mod(istep,logfile_outfreq)==0) then From 61a6b632ba662828db9801e60d019b8e775f0c43 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 3 Nov 2021 09:45:12 +0100 Subject: [PATCH 425/909] - move FESOM runloop to separate subroutine - run for a number of given steps - store number of done steps --- src/fvom.F90 | 119 +++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 111 insertions(+), 8 deletions(-) diff --git a/src/fvom.F90 b/src/fvom.F90 index 6230ea7b5..eb576089a 100755 --- a/src/fvom.F90 +++ b/src/fvom.F90 @@ -42,7 +42,7 @@ module fesom_main_storage_module type :: fesom_main_storage_type - integer :: n, nsteps, offset, row, i, provided + integer :: n, from_nstep, offset, row, i, provided integer, pointer :: mype, npes, MPIerr, MPI_COMM_FESOM real(kind=WP) :: t0, t1, t2, t3, t4, t5, t6, t7, t8, t0_ice, t1_ice, t0_frc, t1_frc real(kind=real32) :: rtime_setup_mesh, rtime_setup_ocean, rtime_setup_forcing @@ -123,7 +123,7 @@ subroutine fesom_init(fesom_total_nsteps) !===================== call setup_model(f%partit) ! Read Namelists, always before clock_init call clock_init(f%partit) ! read the clock file - call get_run_steps(f%nsteps, f%partit) + call get_run_steps(fesom_total_nsteps, f%partit) call mesh_setup(f%partit, f%mesh) if (f%mype==0) write(*,*) 'FESOM mesh_setup... complete' @@ -236,17 +236,120 @@ subroutine fesom_init(fesom_total_nsteps) !call par_ex(f%partit%MPI_COMM_FESOM, f%partit%mype) !stop ! - ! if (f%mype==10) write(,) f%mesh1%ssh_stiff%values-f%mesh%ssh_stiff%value - - - fesom_total_nsteps = f%nsteps + ! if (f%mype==10) write(,) f%mesh1%ssh_stiff%values-f%mesh%ssh_stiff%value + + f%from_nstep = 1 end subroutine - subroutine fesom_runloop(nsteps) - integer, intent(in) :: nsteps + subroutine fesom_runloop(current_nsteps) + use fesom_main_storage_module + integer, intent(in) :: current_nsteps ! EO parameters + integer n + real(kind=WP) :: rtime_fullice, rtime_write_restart, rtime_write_means, rtime_compute_diag, rtime_read_forcing + + !===================== + ! Time stepping + !===================== + +! Initialize timers + rtime_fullice = 0._WP + rtime_write_restart = 0._WP + rtime_write_means = 0._WP + rtime_compute_diag = 0._WP + rtime_read_forcing = 0._WP + + if (f%mype==0) write(*,*) 'FESOM start iteration before the barrier...' + call MPI_Barrier(f%MPI_COMM_FESOM, f%MPIERR) + + if (f%mype==0) then + write(*,*) 'FESOM start iteration after the barrier...' + f%t0 = MPI_Wtime() + endif + if(f%mype==0) then + write(*,*) + print *, achar(27)//'[32m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;32m'//' --> FESOM STARTS TIME LOOP '//achar(27)//'[0m' + end if + !___MODEL TIME STEPPING LOOP________________________________________________ + if (use_global_tides) then + call foreph_ini(yearnew, month, f%partit) + end if + do n=f%from_nstep, f%from_nstep-1+current_nsteps + if (use_global_tides) then + call foreph(f%partit, f%mesh) + end if + mstep = n + if (mod(n,logfile_outfreq)==0 .and. f%mype==0) then + write(*,*) 'FESOM =======================================================' +! write(*,*) 'FESOM step:',n,' day:', n*dt/24./3600., + write(*,*) 'FESOM step:',n,' day:', daynew,' year:',yearnew + write(*,*) + end if +#if defined (__oifs) || defined (__oasis) + seconds_til_now=INT(dt)*(n-1) +#endif + call clock + !___compute horizontal velocity on nodes (originaly on elements)________ + call compute_vel_nodes(f%partit, f%mesh) + !___model sea-ice step__________________________________________________ + f%t1 = MPI_Wtime() + if(use_ice) then + !___compute fluxes from ocean to ice________________________________ + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call ocean2ice(n)'//achar(27)//'[0m' + call ocean2ice(f%tracers, f%partit, f%mesh) + + !___compute update of atmospheric forcing____________________________ + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call update_atm_forcing(n)'//achar(27)//'[0m' + f%t0_frc = MPI_Wtime() + call update_atm_forcing(n, f%tracers, f%partit, f%mesh) + f%t1_frc = MPI_Wtime() + !___compute ice step________________________________________________ + if (ice_steps_since_upd>=ice_ave_steps-1) then + ice_update=.true. + ice_steps_since_upd = 0 + else + ice_update=.false. + ice_steps_since_upd=ice_steps_since_upd+1 + endif + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call ice_timestep(n)'//achar(27)//'[0m' + if (ice_update) call ice_timestep(n, f%partit, f%mesh) + !___compute fluxes to the ocean: heat, freshwater, momentum_________ + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call oce_fluxes_mom...'//achar(27)//'[0m' + call oce_fluxes_mom(f%partit, f%mesh) ! momentum only + call oce_fluxes(f%tracers, f%partit, f%mesh) + end if + call before_oce_step(f%tracers, f%partit, f%mesh) ! prepare the things if required + f%t2 = MPI_Wtime() + !___model ocean step____________________________________________________ + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call oce_timestep_ale'//achar(27)//'[0m' + + call oce_timestep_ale(n, f%tracers, f%partit, f%mesh) + + f%t3 = MPI_Wtime() + !___compute energy diagnostics..._______________________________________ + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call compute_diagnostics(1)'//achar(27)//'[0m' + call compute_diagnostics(1, f%tracers, f%partit, f%mesh) + + f%t4 = MPI_Wtime() + !___prepare output______________________________________________________ + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call output (n)'//achar(27)//'[0m' + call output (n, f%tracers, f%partit, f%mesh) + + f%t5 = MPI_Wtime() + call restart(n, .false., .false., f%tracers, f%partit, f%mesh) + f%t6 = MPI_Wtime() + + rtime_fullice = rtime_fullice + f%t2 - f%t1 + rtime_compute_diag = rtime_compute_diag + f%t4 - f%t3 + rtime_write_means = rtime_write_means + f%t5 - f%t4 + rtime_write_restart = rtime_write_restart + f%t6 - f%t5 + rtime_read_forcing = rtime_read_forcing + f%t1_frc - f%t0_frc + end do + + f%from_nstep = f%from_nstep+current_nsteps end subroutine From 421786f27da2c236339680d281eaf7f188d75940 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 3 Nov 2021 10:22:09 +0100 Subject: [PATCH 426/909] move back some variables from the runloop which are needed in the global storage for the finalize subroutine --- src/fvom.F90 | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/src/fvom.F90 b/src/fvom.F90 index eb576089a..02fd57700 100755 --- a/src/fvom.F90 +++ b/src/fvom.F90 @@ -45,6 +45,7 @@ module fesom_main_storage_module integer :: n, from_nstep, offset, row, i, provided integer, pointer :: mype, npes, MPIerr, MPI_COMM_FESOM real(kind=WP) :: t0, t1, t2, t3, t4, t5, t6, t7, t8, t0_ice, t1_ice, t0_frc, t1_frc + real(kind=WP) :: rtime_fullice, rtime_write_restart, rtime_write_means, rtime_compute_diag, rtime_read_forcing real(kind=real32) :: rtime_setup_mesh, rtime_setup_ocean, rtime_setup_forcing real(kind=real32) :: rtime_setup_ice, rtime_setup_other, rtime_setup_restart real(kind=real32) :: mean_rtime(15), max_rtime(15), min_rtime(15) @@ -238,6 +239,13 @@ subroutine fesom_init(fesom_total_nsteps) ! ! if (f%mype==10) write(,) f%mesh1%ssh_stiff%values-f%mesh%ssh_stiff%value + ! Initialize timers + f%rtime_fullice = 0._WP + f%rtime_write_restart = 0._WP + f%rtime_write_means = 0._WP + f%rtime_compute_diag = 0._WP + f%rtime_read_forcing = 0._WP + f%from_nstep = 1 end subroutine @@ -248,19 +256,11 @@ subroutine fesom_runloop(current_nsteps) ! EO parameters integer n - real(kind=WP) :: rtime_fullice, rtime_write_restart, rtime_write_means, rtime_compute_diag, rtime_read_forcing !===================== ! Time stepping !===================== -! Initialize timers - rtime_fullice = 0._WP - rtime_write_restart = 0._WP - rtime_write_means = 0._WP - rtime_compute_diag = 0._WP - rtime_read_forcing = 0._WP - if (f%mype==0) write(*,*) 'FESOM start iteration before the barrier...' call MPI_Barrier(f%MPI_COMM_FESOM, f%MPIERR) @@ -342,11 +342,11 @@ subroutine fesom_runloop(current_nsteps) call restart(n, .false., .false., f%tracers, f%partit, f%mesh) f%t6 = MPI_Wtime() - rtime_fullice = rtime_fullice + f%t2 - f%t1 - rtime_compute_diag = rtime_compute_diag + f%t4 - f%t3 - rtime_write_means = rtime_write_means + f%t5 - f%t4 - rtime_write_restart = rtime_write_restart + f%t6 - f%t5 - rtime_read_forcing = rtime_read_forcing + f%t1_frc - f%t0_frc + f%rtime_fullice = f%rtime_fullice + f%t2 - f%t1 + f%rtime_compute_diag = f%rtime_compute_diag + f%t4 - f%t3 + f%rtime_write_means = f%rtime_write_means + f%t5 - f%t4 + f%rtime_write_restart = f%rtime_write_restart + f%t6 - f%t5 + f%rtime_read_forcing = f%rtime_read_forcing + f%t1_frc - f%t0_frc end do f%from_nstep = f%from_nstep+current_nsteps From 731692762642e89140593fbe3a84d31c8395ab26 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 3 Nov 2021 14:35:21 +0100 Subject: [PATCH 427/909] move FESOM finalization code to separate subroutine --- src/fvom.F90 | 64 +++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 63 insertions(+), 1 deletion(-) diff --git a/src/fvom.F90 b/src/fvom.F90 index 02fd57700..4ab323df9 100755 --- a/src/fvom.F90 +++ b/src/fvom.F90 @@ -48,7 +48,6 @@ module fesom_main_storage_module real(kind=WP) :: rtime_fullice, rtime_write_restart, rtime_write_means, rtime_compute_diag, rtime_read_forcing real(kind=real32) :: rtime_setup_mesh, rtime_setup_ocean, rtime_setup_forcing real(kind=real32) :: rtime_setup_ice, rtime_setup_other, rtime_setup_restart - real(kind=real32) :: mean_rtime(15), max_rtime(15), min_rtime(15) real(kind=real32) :: runtime_alltimesteps @@ -354,7 +353,70 @@ subroutine fesom_runloop(current_nsteps) subroutine fesom_finalize() + use fesom_main_storage_module + ! EO parameters + real(kind=real32) :: mean_rtime(15), max_rtime(15), min_rtime(15) + + call finalize_output() + + !___FINISH MODEL RUN________________________________________________________ + + call MPI_Barrier(f%MPI_COMM_FESOM, f%MPIERR) + if (f%mype==0) then + f%t1 = MPI_Wtime() + f%runtime_alltimesteps = real(f%t1-f%t0,real32) + write(*,*) 'FESOM Run is finished, updating clock' + endif + + mean_rtime(1) = rtime_oce + mean_rtime(2) = rtime_oce_mixpres + mean_rtime(3) = rtime_oce_dyn + mean_rtime(4) = rtime_oce_dynssh + mean_rtime(5) = rtime_oce_solvessh + mean_rtime(6) = rtime_oce_GMRedi + mean_rtime(7) = rtime_oce_solvetra + mean_rtime(8) = rtime_ice + mean_rtime(9) = rtime_tot + mean_rtime(10) = f%rtime_fullice - f%rtime_read_forcing + mean_rtime(11) = f%rtime_compute_diag + mean_rtime(12) = f%rtime_write_means + mean_rtime(13) = f%rtime_write_restart + mean_rtime(14) = f%rtime_read_forcing + + max_rtime(1:14) = mean_rtime(1:14) + min_rtime(1:14) = mean_rtime(1:14) + call MPI_AllREDUCE(MPI_IN_PLACE, mean_rtime, 14, MPI_REAL, MPI_SUM, f%MPI_COMM_FESOM, f%MPIerr) + mean_rtime(1:14) = mean_rtime(1:14) / real(f%npes,real32) + call MPI_AllREDUCE(MPI_IN_PLACE, max_rtime, 14, MPI_REAL, MPI_MAX, f%MPI_COMM_FESOM, f%MPIerr) + call MPI_AllREDUCE(MPI_IN_PLACE, min_rtime, 14, MPI_REAL, MPI_MIN, f%MPI_COMM_FESOM, f%MPIerr) + + if (f%mype==0) then + write(*,*) '___MODEL RUNTIME mean, min, max per task [seconds]________________________' + write(*,*) ' runtime ocean:',mean_rtime(1), min_rtime(1), max_rtime(1) + write(*,*) ' > runtime oce. mix,pres. :',mean_rtime(2), min_rtime(2), max_rtime(2) + write(*,*) ' > runtime oce. dyn. u,v,w:',mean_rtime(3), min_rtime(3), max_rtime(3) + write(*,*) ' > runtime oce. dyn. ssh :',mean_rtime(4), min_rtime(4), max_rtime(4) + write(*,*) ' > runtime oce. solve ssh:',mean_rtime(5), min_rtime(5), max_rtime(5) + write(*,*) ' > runtime oce. GM/Redi :',mean_rtime(6), min_rtime(6), max_rtime(6) + write(*,*) ' > runtime oce. tracer :',mean_rtime(7), min_rtime(7), max_rtime(7) + write(*,*) ' runtime ice :',mean_rtime(10), min_rtime(10), max_rtime(10) + write(*,*) ' > runtime ice step :',mean_rtime(8), min_rtime(8), max_rtime(8) + write(*,*) ' runtime diag: ', mean_rtime(11), min_rtime(11), max_rtime(11) + write(*,*) ' runtime output: ', mean_rtime(12), min_rtime(12), max_rtime(12) + write(*,*) ' runtime restart:', mean_rtime(13), min_rtime(13), max_rtime(13) + write(*,*) ' runtime forcing:', mean_rtime(14), min_rtime(14), max_rtime(14) + write(*,*) ' runtime total (ice+oce):',mean_rtime(9), min_rtime(9), max_rtime(9) + write(*,*) + write(*,*) '============================================' + write(*,*) '=========== BENCHMARK RUNTIME ==============' + write(*,*) ' Number of cores : ',f%npes + write(*,*) ' Runtime for all timesteps : ',f%runtime_alltimesteps,' sec' + write(*,*) '============================================' + write(*,*) + end if +! call clock_finish + call par_ex(f%partit%MPI_COMM_FESOM, f%partit%mype) end subroutine end module From 5d01c3d1cb3510c3a0943cbe15f5ba93d39c342b Mon Sep 17 00:00:00 2001 From: a270042 Date: Wed, 3 Nov 2021 14:49:17 +0100 Subject: [PATCH 428/909] exchange ssh_rhs and ssh_rhs_old with dynamics derived type --- src/fvom_main.F90 | 5 +++ src/io_blowup.F90 | 4 +- src/io_meandata.F90 | 4 +- src/oce_ale.F90 | 83 ++++++++++++++++++++++++++++++++--------- src/oce_modules.F90 | 2 +- src/oce_setup_step.F90 | 25 +++++++++---- src/write_step_info.F90 | 8 +++- 7 files changed, 100 insertions(+), 31 deletions(-) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index c23cb5f76..1dc4a6abb 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -112,6 +112,7 @@ program main call setup_model(partit) ! Read Namelists, always before clock_init call clock_init(partit) ! read the clock file call get_run_steps(nsteps, partit) + if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call mesh_setup'//achar(27)//'[0m' call mesh_setup(partit, mesh) if (mype==0) write(*,*) 'FESOM mesh_setup... complete' @@ -121,12 +122,16 @@ program main ! and additional arrays needed for ! fancy advection etc. !===================== + if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call check_mesh_consistency'//achar(27)//'[0m' call check_mesh_consistency(partit, mesh) if (mype==0) t2=MPI_Wtime() + if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call xxxx_init'//achar(27)//'[0m' call dynamics_init(dynamics, partit, mesh) call tracer_init(tracers, partit, mesh) ! allocate array of ocean tracers (derived type "t_tracer") call arrays_init(tracers%num_tracers, partit, mesh) ! allocate other arrays (to be refactured same as tracers in the future) + + if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call ocean_setup'//achar(27)//'[0m' call ocean_setup(dynamics, tracers, partit, mesh) if (mype==0) then diff --git a/src/io_blowup.F90 b/src/io_blowup.F90 index d5cccd6e2..4b594499b 100644 --- a/src/io_blowup.F90 +++ b/src/io_blowup.F90 @@ -105,8 +105,8 @@ subroutine ini_blowup_io(year, dynamics, tracers, partit, mesh) !___ALE related fields______________________________________________________ call def_variable(bid, 'hbar' , (/nod2D/) , 'ALE surface elevation hbar_n+0.5', 'm', hbar); !!PS call def_variable(bid, 'hbar_old' , (/nod2D/) , 'ALE surface elevation hbar_n-0.5', 'm', hbar_old); - call def_variable(bid, 'ssh_rhs' , (/nod2D/) , 'RHS for the elevation', '?', ssh_rhs); - call def_variable(bid, 'ssh_rhs_old', (/nod2D/) , 'RHS for the elevation', '?', ssh_rhs_old); + call def_variable(bid, 'ssh_rhs' , (/nod2D/) , 'RHS for the elevation', '?', dynamics%ssh_rhs); + call def_variable(bid, 'ssh_rhs_old', (/nod2D/) , 'RHS for the elevation', '?', dynamics%ssh_rhs_old); !___Define the netCDF variables for 3D fields_______________________________ call def_variable(bid, 'hnode' , (/nl-1, nod2D/) , 'ALE stuff', '?', hnode); call def_variable(bid, 'helem' , (/nl-1, elem2D/) , 'Element layer thickness', 'm/s', helem(:,:)); diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 4cf2f09cd..5b499e624 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -160,9 +160,9 @@ subroutine ini_mean_io(dynamics, tracers, partit, mesh) call def_stream(nod2D, myDim_nod2D, 'vve_5', 'vertical velocity at 5th level', 'm/s', dynamics%w(5,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('ssh_rhs ') - call def_stream(nod2D, myDim_nod2D, 'ssh_rhs', 'ssh rhs', '?', ssh_rhs, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'ssh_rhs', 'ssh rhs', '?', dynamics%ssh_rhs, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('ssh_rhs_old ') - call def_stream(nod2D, myDim_nod2D, 'ssh_rhs_old', 'ssh rhs', '?', ssh_rhs_old, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'ssh_rhs_old', 'ssh rhs', '?', dynamics%ssh_rhs_old, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) !___________________________________________________________________________________________________________________________________ ! output sea ice diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index e577e4d4a..bdd1d3b34 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -60,12 +60,14 @@ subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) type(t_dyn), intent(inout), target :: dynamics end subroutine - subroutine solve_ssh_ale(partit, mesh) + subroutine solve_ssh_ale(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit + type(t_dyn), intent(inout), target :: dynamics end subroutine subroutine compute_hbar_ale(dynamics, partit, mesh) @@ -98,6 +100,34 @@ subroutine update_thickness_ale(partit, mesh) end interface end module +module init_ale_interface + interface + subroutine init_ale(dynamics, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_DYN + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_dyn) , intent(inout), target :: dynamics + end subroutine + end interface +end module + +module init_thickness_ale_interface + interface + subroutine init_thickness_ale(dynamics, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_DYN + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_dyn) , intent(inout), target :: dynamics + end subroutine + end interface +end module + module oce_timestep_ale_interface interface subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) @@ -136,11 +166,12 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) ! !=============================================================================== ! allocate & initialise arrays for Arbitrary-Langrangian-Eularian (ALE) method -subroutine init_ale(partit, mesh) +subroutine init_ale(dynamics, partit, mesh) USE o_PARAM USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN USE o_ARRAYS USE g_config, only: which_ale, use_cavity, use_partial_cell USE g_forcing_param, only: use_virt_salt @@ -150,6 +181,7 @@ subroutine init_ale(partit, mesh) integer :: n, nzmax, nzmin, elnodes(3), elem type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit + type(t_dyn) , intent(inout), target :: dynamics #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -160,7 +192,8 @@ subroutine init_ale(partit, mesh) allocate(mesh%hnode_new(1:nl-1, myDim_nod2D+eDim_nod2D)) ! ssh_rhs_old: auxiliary array to store an intermediate part of the rhs computations. - allocate(ssh_rhs_old(myDim_nod2D+eDim_nod2D)) + allocate(dynamics%ssh_rhs_old(myDim_nod2D+eDim_nod2D)) + dynamics%ssh_rhs_old = 0.0_WP ! hbar, hbar_old: correspond to the elevation, but on semi-integer time steps. allocate(mesh%hbar(myDim_nod2D+eDim_nod2D)) @@ -647,7 +680,7 @@ end subroutine init_surface_node_depth ! !=============================================================================== ! initialize thickness arrays based on the current hbar -subroutine init_thickness_ale(partit, mesh) +subroutine init_thickness_ale(dynamics, partit, mesh) ! For z-star case: we stretch scalar thicknesses (nodal) ! through nlevels_nod2D_min -2 layers. Layer nlevels_nod2D_min-1 ! should not be touched if partial cell is implemented (it is). @@ -658,17 +691,20 @@ subroutine init_thickness_ale(partit, mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP - use o_ARRAYS + USE MOD_DYN + use o_ARRAYS, only: eta_n implicit none integer :: n, nz, elem, elnodes(3), nzmin, nzmax real(kind=WP) :: dd type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit - + type(t_dyn), intent(inout), target :: dynamics + real(kind=WP), dimension(:), pointer :: ssh_rhs_old #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + ssh_rhs_old=>dynamics%ssh_rhs_old(:) if(mype==0) then write(*,*) '____________________________________________________________' @@ -1612,7 +1648,7 @@ end subroutine update_stiff_mat_ale subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) use g_config,only: which_ALE,dt use MOD_MESH - use o_ARRAYS, only: ssh_rhs, ssh_rhs_old, water_flux + use o_ARRAYS, only: water_flux use o_PARAM USE MOD_PARTIT USE MOD_PARSUP @@ -1630,12 +1666,15 @@ subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_dyn), intent(inout), target :: dynamics real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs + real(kind=WP), dimension(:), pointer :: ssh_rhs, ssh_rhs_old #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV=>dynamics%uv(:,:,:) UV_rhs=>dynamics%uv_rhs(:,:,:) + ssh_rhs=>dynamics%ssh_rhs(:) + ssh_rhs_old=>dynamics%ssh_rhs_old(:) ssh_rhs=0.0_WP !___________________________________________________________________________ @@ -1728,7 +1767,7 @@ end subroutine compute_ssh_rhs_ale subroutine compute_hbar_ale(dynamics, partit, mesh) use g_config,only: dt, which_ALE, use_cavity use MOD_MESH - use o_ARRAYS, only: ssh_rhs, ssh_rhs_old, water_flux + use o_ARRAYS, only: water_flux use o_PARAM USE MOD_PARTIT USE MOD_PARSUP @@ -1748,12 +1787,15 @@ subroutine compute_hbar_ale(dynamics, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_dyn) , intent(inout), target :: dynamics real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:), pointer :: ssh_rhs, ssh_rhs_old #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV=>dynamics%uv(:,:,:) + ssh_rhs=>dynamics%ssh_rhs(:) + ssh_rhs_old=>dynamics%ssh_rhs_old(:) !___________________________________________________________________________ ! compute the rhs @@ -1844,8 +1886,8 @@ end subroutine compute_hbar_ale subroutine vert_vel_ale(dynamics, partit, mesh) use g_config,only: dt, which_ALE, min_hnode, lzstar_lev, flag_warn_cflz use MOD_MESH - use o_ARRAYS, only: fer_Wvel, fer_UV, water_flux, ssh_rhs, & - ssh_rhs_old, eta_n, d_eta + use o_ARRAYS, only: fer_Wvel, fer_UV, water_flux, & + eta_n, d_eta use o_PARAM USE MOD_PARTIT USE MOD_PARSUP @@ -1869,6 +1911,7 @@ subroutine vert_vel_ale(dynamics, partit, mesh) type(t_partit), intent(inout), target :: partit real(kind=WP), dimension(:,:,:), pointer :: UV real(kind=WP), dimension(:,:) , pointer :: Wvel, Wvel_e, Wvel_i, CFL_z + real(kind=WP), dimension(:) , pointer :: ssh_rhs, ssh_rhs_old #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -1878,6 +1921,8 @@ subroutine vert_vel_ale(dynamics, partit, mesh) Wvel_e=>dynamics%w_e(:,:) Wvel_i=>dynamics%w_i(:,:) CFL_z =>dynamics%cfl_z(:,:) + ssh_rhs =>dynamics%ssh_rhs(:) + ssh_rhs_old =>dynamics%ssh_rhs_old(:) !___________________________________________________________________________ ! Contributions from levels in divergence @@ -2372,12 +2417,13 @@ end subroutine vert_vel_ale !=============================================================================== ! solve eq.18 in S. Danilov et al. : FESOM2: from finite elements to finite volumes. ! for (eta^(n+1)-eta^n) = d_eta -subroutine solve_ssh_ale(partit, mesh) +subroutine solve_ssh_ale(dynamics, partit, mesh) use o_PARAM use MOD_MESH use o_ARRAYS USE MOD_PARTIT USE MOD_PARSUP +USE MOD_DYN use g_comm_auto use g_config, only: which_ale ! @@ -2401,14 +2447,16 @@ subroutine solve_ssh_ale(partit, mesh) real(kind=WP), allocatable :: arr_nod2D(:),arr_nod2D2(:,:),arr_nod2D3(:) real(kind=WP) :: cssh1,cssh2,crhs integer :: i -type(t_mesh), intent(inout), target :: mesh +type(t_mesh) , intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit - +type(t_dyn) , intent(inout), target :: dynamics +!!PS real(kind=WP), dimension(:), pointer :: ssh_rhs #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" +!!PS ssh_rhs=>dynamics%ssh_rhs(:) Pmode = PET_BLOCKP+PET_SOLVE + PET_BICGSTAB +PET_REPORT + PET_QUIET+ PET_RCM+PET_PCBJ if (lfirst) then @@ -2422,7 +2470,7 @@ subroutine solve_ssh_ale(partit, mesh) droptol, & soltol, & part, ssh_stiff%rowptr, ssh_stiff%colind, ssh_stiff%values, & - ssh_rhs, d_eta, & + dynamics%ssh_rhs, d_eta, & rinfo, MPI_COMM_FESOM, mesh) ! ! @@ -2438,8 +2486,9 @@ subroutine solve_ssh_ale(partit, mesh) integer(kind=C_INT) :: maxiter, restart, lutype, fillin real(kind=C_DOUBLE) :: droptol, soltol integer :: n -type(t_mesh), intent(inout), target :: mesh +type(t_mesh) , intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit +type(t_dyn) , intent(inout), target :: dynamics interface @@ -2509,7 +2558,7 @@ end subroutine psolve ssh_stiff%colind-1, ssh_stiff%values, reuse, MPI_COMM_FESOM) lfirst=.false. end if - call psolve(ident, ssh_rhs, ssh_stiff%values, d_eta, new_values) + call psolve(ident, dynamics%ssh_rhs, ssh_stiff%values, d_eta, new_values) #endif ! @@ -2895,7 +2944,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) ! Take updated ssh matrix and solve --> new ssh! t30=MPI_Wtime() - call solve_ssh_ale(partit, mesh) + call solve_ssh_ale(dynamics, partit, mesh) if ((toy_ocean) .AND. (TRIM(which_toy)=="soufflet")) call relax_zonal_vel(dynamics, partit, mesh) t3=MPI_Wtime() diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index 992dcd78f..b4a261c15 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -187,7 +187,7 @@ MODULE o_ARRAYS real(kind=WP), allocatable :: UV_dis_tend(:,:,:), UV_back_tend(:,:,:), UV_total_tend(:,:,:), UV_dis_tend_node(:,:,:) real(kind=WP), allocatable :: UV_dis_posdef_b2(:,:), UV_dis_posdef(:,:), UV_back_posdef(:,:) real(kind=WP), allocatable :: eta_n(:), d_eta(:) -real(kind=WP), allocatable :: ssh_rhs(:), hpressure(:,:) +real(kind=WP), allocatable :: hpressure(:,:) real(kind=WP), allocatable :: stress_surf(:,:) real(kind=WP), allocatable :: stress_node_surf(:,:) REAL(kind=WP), ALLOCATABLE :: stress_atmoce_x(:) diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index f122880c4..f6e80d60b 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -89,6 +89,8 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) use Toy_Channel_Soufflet use oce_initial_state_interface use oce_adv_tra_fct_interfaces +use init_ale_interface +use init_thickness_ale_interface IMPLICIT NONE type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit @@ -117,7 +119,10 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) write(*,*) ' --> initialise ALE arrays + sparse SSH stiff matrix' write(*,*) end if - call init_ale(partit, mesh) + + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_ale'//achar(27)//'[0m' + call init_ale(dynamics, partit, mesh) + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_stiff_mat_ale'//achar(27)//'[0m' call init_stiff_mat_ale(partit, mesh) !!PS test !___________________________________________________________________________ @@ -145,20 +150,24 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) ! initialise fesom1.4 like KPP if (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) then + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call oce_mixing_kpp_init'//achar(27)//'[0m' call oce_mixing_kpp_init(partit, mesh) ! initialise fesom1.4 like PP elseif (mix_scheme_nmb==2 .or. mix_scheme_nmb==27) then ! initialise cvmix_KPP elseif (mix_scheme_nmb==3 .or. mix_scheme_nmb==37) then + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_cvmix_kpp'//achar(27)//'[0m' call init_cvmix_kpp(partit, mesh) ! initialise cvmix_PP elseif (mix_scheme_nmb==4 .or. mix_scheme_nmb==47) then + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_cvmix_pp'//achar(27)//'[0m' call init_cvmix_pp(partit, mesh) ! initialise cvmix_TKE elseif (mix_scheme_nmb==5 .or. mix_scheme_nmb==56) then + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_cvmix_tke'//achar(27)//'[0m' call init_cvmix_tke(partit, mesh) endif @@ -166,12 +175,14 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) ! initialise additional mixing cvmix_IDEMIX --> only in combination with ! cvmix_TKE+cvmix_IDEMIX or stand alone for debbuging as cvmix_TKE if (mod(mix_scheme_nmb,10)==6) then + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_cvmix_idemix'//achar(27)//'[0m' call init_cvmix_idemix(partit, mesh) ! initialise additional mixing cvmix_TIDAL --> only in combination with ! KPP+cvmix_TIDAL, PP+cvmix_TIDAL, cvmix_KPP+cvmix_TIDAL, cvmix_PP+cvmix_TIDAL ! or stand alone for debbuging as cvmix_TIDAL elseif (mod(mix_scheme_nmb,10)==7) then + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_cvmix_tidal'//achar(27)//'[0m' call init_cvmix_tidal(partit, mesh) end if @@ -192,7 +203,7 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) if(partit%mype==0) write(*,*) 'Arrays are set' !if(open_boundary) call set_open_boundary !TODO - + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call oce_adv_tra_fct_init'//achar(27)//'[0m' call oce_adv_tra_fct_init(tracers%work, partit, mesh) call muscl_adv_init(tracers%work, partit, mesh) !!PS test !===================== @@ -202,6 +213,7 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) if (toy_ocean) then SELECT CASE (TRIM(which_toy)) CASE ("soufflet") !forcing update for soufflet testcase + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call toy_channel'//achar(27)//'[0m' if (mod(mstep, soufflet_forc_update)==0) then call initial_state_soufflet(dynamics, tracers, partit, mesh) call compute_zonal_mean_ini(partit, mesh) @@ -225,7 +237,8 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) write(*,*) ' --> call init_thickness_ale' write(*,*) end if - call init_thickness_ale(partit, mesh) + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_thickness_ale'//achar(27)//'[0m' + call init_thickness_ale(dynamics, partit, mesh) !___________________________________________________________________________ if(partit%mype==0) write(*,*) 'Initial state' @@ -408,11 +421,11 @@ SUBROUTINE dynamics_init(dynamics, partit, mesh) allocate(dynamics%eta_n( node_size)) allocate(dynamics%d_eta( node_size)) allocate(dynamics%ssh_rhs( node_size)) - allocate(dynamics%ssh_rhs_old(node_size)) + !!PS allocate(dynamics%ssh_rhs_old(node_size)) dynamics%eta_n = 0.0_WP dynamics%d_eta = 0.0_WP dynamics%ssh_rhs = 0.0_WP - dynamics%ssh_rhs_old= 0.0_WP +!!PS dynamics%ssh_rhs_old= 0.0_WP ! set parameters in derived type !!PS dynamics%visc_opt = visc_opt @@ -479,7 +492,6 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) ! elevation and its rhs ! ================ allocate(eta_n(node_size), d_eta(node_size)) -allocate(ssh_rhs(node_size)) ! ================ ! Monin-Obukhov ! ================ @@ -620,7 +632,6 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) eta_n=0.0_WP d_eta=0.0_WP - ssh_rhs=0.0_WP hpressure=0.0_WP ! heat_flux=0.0_WP diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index a989d61da..e03c5c475 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -264,7 +264,7 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) USE MOD_PARSUP use MOD_DYN use o_PARAM - use o_ARRAYS, only: eta_n, d_eta, ssh_rhs, ssh_rhs_old, water_flux, stress_surf, & + use o_ARRAYS, only: eta_n, d_eta, water_flux, stress_surf, & heat_flux, Kv, Av use i_ARRAYS use g_comm_auto @@ -281,7 +281,8 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) type(t_tracer), intent(in) , target :: tracers type(t_dyn) , intent(in) , target :: dynamics real(kind=WP), dimension(:,:,:), pointer :: UV - real(kind=WP), dimension(:,:), pointer :: Wvel, CFL_z + real(kind=WP), dimension(:,:) , pointer :: Wvel, CFL_z + real(kind=WP), dimension(:) , pointer :: ssh_rhs, ssh_rhs_old #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -289,6 +290,8 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) UV => dynamics%uv(:,:,:) Wvel => dynamics%w(:,:) CFL_z => dynamics%cfl_z(:,:) + ssh_rhs => dynamics%ssh_rhs(:) + ssh_rhs_old => dynamics%ssh_rhs_old(:) !___________________________________________________________________________ ! ! if (mod(istep,logfile_outfreq)==0) then @@ -558,3 +561,4 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) endif end subroutine + From 33c8c45f1c3e2d8fd7a7f01e1fc8fc5ca77eb5c8 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Wed, 3 Nov 2021 14:54:54 +0100 Subject: [PATCH 429/909] OpenMP implementation in the tracer advection part as it was suggested by Natalja --- src/oce_adv_tra_driver.F90 | 9 +- src/oce_adv_tra_fct.F90 | 233 +++++++++++++++++-------------------- src/oce_adv_tra_hor.F90 | 17 ++- src/oce_adv_tra_ver.F90 | 45 ++++--- 4 files changed, 159 insertions(+), 145 deletions(-) diff --git a/src/oce_adv_tra_driver.F90 b/src/oce_adv_tra_driver.F90 index 54d9603c7..a82a6b99b 100644 --- a/src/oce_adv_tra_driver.F90 +++ b/src/oce_adv_tra_driver.F90 @@ -233,8 +233,10 @@ subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, partit, !___________________________________________________________________________ ! c. Update the solution ! Vertical +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, k, elem, enodes, num, el, nu12, nl12, nu1, nu2, nl1, nl2, edge) if (present(use_lo)) then if (use_lo) then +!$OMP DO do n=1, myDim_nod2d nu1 = ulevels_nod2D(n) nl1 = nlevels_nod2D(n) @@ -243,9 +245,10 @@ subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, partit, dttf_v(nz,n)=dttf_v(nz,n)-ttf(nz,n)*hnode(nz,n)+LO(nz,n)*hnode_new(nz,n) end do end do +!$OMP END DO end if end if - +!$OMP DO do n=1, myDim_nod2d nu1 = ulevels_nod2D(n) nl1 = nlevels_nod2D(n) @@ -253,8 +256,8 @@ subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, partit, dttf_v(nz,n)=dttf_v(nz,n) + (flux_v(nz,n)-flux_v(nz+1,n))*dt/areasvol(nz,n) end do end do - - +!$OMP END DO +!$OMP END PARALLEL ! Horizontal do edge=1, myDim_edge2D enodes(1:2)=edges(:,edge) diff --git a/src/oce_adv_tra_fct.F90 b/src/oce_adv_tra_fct.F90 index d76c3aebc..78b45899b 100644 --- a/src/oce_adv_tra_fct.F90 +++ b/src/oce_adv_tra_fct.F90 @@ -23,7 +23,7 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, real(kind=WP), intent(in) :: lo (mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(inout) :: adf_h(mesh%nl-1, partit%myDim_edge2D) real(kind=WP), intent(inout) :: adf_v(mesh%nl, partit%myDim_nod2D) - real(kind=WP), intent(inout) :: fct_plus(mesh%nl-1, partit%myDim_edge2D) + real(kind=WP), intent(inout) :: fct_plus(mesh%nl-1, partit%myDim_nod2D) real(kind=WP), intent(inout) :: fct_minus(mesh%nl, partit%myDim_nod2D) real(kind=WP), intent(inout) :: AUX(:,:,:) !a large auxuary array end subroutine @@ -40,7 +40,7 @@ subroutine oce_adv_tra_fct_init(twork, partit, mesh) implicit none integer :: my_size type(t_mesh), intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit + type(t_partit), intent(inout), target :: partit type(t_tracer_work), intent(inout), target :: twork #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -81,6 +81,7 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, USE MOD_PARTIT USE MOD_PARSUP use g_comm_auto + use omp_lib implicit none real(kind=WP), intent(in), target :: dt type(t_mesh), intent(in), target :: mesh @@ -99,12 +100,25 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, real(kind=WP) :: flux_eps=1e-16 real(kind=WP) :: bignumber=1e3 integer :: vlimit=1 - + integer(omp_lock_kind), allocatable, save :: plock(:) + integer(omp_lock_kind) :: mlock(partit%myDim_nod2D) + logical, save :: l_first=.true. #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, k, elem, enodes, num, el, nl1, nl2, nu1, nu2, nl12, nu12, edge, & +!$OMP flux, ae,tvert_max, tvert_min) +!$OMP MASTER + if (l_first) then + allocate(plock(partit%myDim_nod2D+partit%eDim_nod2D)) + do n=1, myDim_nod2D+partit%eDim_nod2D + call omp_init_lock_with_hint(plock(n),omp_sync_hint_speculative+omp_sync_hint_uncontended) + enddo + l_first = .false. + endif +!$OMP END MASTER ! -------------------------------------------------------------------------- ! ttf is the tracer field on step n ! del_ttf is the increment @@ -112,6 +126,7 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, ! -------------------------------------------------------------------------- !___________________________________________________________________________ ! a1. max, min between old solution and updated low-order solution per node +!$OMP DO do n=1,myDim_nod2D + edim_nod2d nu1 = ulevels_nod2D(n) nl1 = nlevels_nod2D(n) @@ -119,12 +134,13 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_ttf_max(nz,n)=max(LO(nz,n), ttf(nz,n)) fct_ttf_min(nz,n)=min(LO(nz,n), ttf(nz,n)) end do - end do - + end do +!$OMP END DO !___________________________________________________________________________ ! a2. Admissible increments on elements ! (only layers below the first and above the last layer) ! look for max, min bounds for each element --> AUX here auxilary array +!$OMP DO do elem=1, myDim_elem2D enodes=elem2D_nodes(:,elem) nu1 = ulevels(elem) @@ -140,98 +156,51 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, end do endif end do ! --> do elem=1, myDim_elem2D - +!$OMP END DO !___________________________________________________________________________ ! a3. Bounds on clusters and admissible increments ! Vertical1: In this version we look at the bounds on the clusters ! above and below, which leaves wide bounds because typically ! vertical gradients are larger. - if(vlimit==1) then !Horizontal - do n=1, myDim_nod2D - nu1 = ulevels_nod2D(n) - nl1 = nlevels_nod2D(n) - - !___________________________________________________________________ - do nz=nu1,nl1-1 - ! max,min horizontal bound in cluster around node n in every - ! vertical layer - ! nod_in_elem2D --> elem indices of which node n is surrounded - ! nod_in_elem2D_num --> max number of surrounded elem - tvert_max(nz)= maxval(AUX(1,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) - tvert_min(nz)= minval(AUX(2,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) - end do - - !___________________________________________________________________ - ! calc max,min increment of surface layer with respect to low order - ! solution - fct_ttf_max(nu1,n)=tvert_max(nu1)-LO(nu1,n) - fct_ttf_min(nu1,n)=tvert_min(nu1)-LO(nu1,n) - - ! calc max,min increment from nz-1:nz+1 with respect to low order - ! solution at layer nz - do nz=nu1+1,nl1-2 - fct_ttf_max(nz,n)=maxval(tvert_max(nz-1:nz+1))-LO(nz,n) - fct_ttf_min(nz,n)=minval(tvert_min(nz-1:nz+1))-LO(nz,n) - end do - ! calc max,min increment of bottom layer -1 with respect to low order - ! solution - nz=nl1-1 - fct_ttf_max(nz,n)=tvert_max(nz)-LO(nz,n) - fct_ttf_min(nz,n)=tvert_min(nz)-LO(nz,n) - end do - end if - - !___________________________________________________________________________ - ! Vertical2: Similar to the version above, but the vertical bounds are more - ! local - if(vlimit==2) then - do n=1, myDim_nod2D - nu1 = ulevels_nod2D(n) - nl1 = nlevels_nod2D(n) - do nz=nu1,nl1-1 - tvert_max(nz)= maxval(AUX(1,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) - tvert_min(nz)= minval(AUX(2,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) - end do - do nz=nu1+1, nl1-2 - tvert_max(nz)=max(tvert_max(nz),maxval(fct_ttf_max(nz-1:nz+1,n))) - tvert_min(nz)=min(tvert_min(nz),minval(fct_ttf_max(nz-1:nz+1,n))) - end do - do nz=nu1,nl1-1 - fct_ttf_max(nz,n)=tvert_max(nz)-LO(nz,n) - fct_ttf_min(nz,n)=tvert_min(nz)-LO(nz,n) - end do - end do - end if - - !___________________________________________________________________________ - ! Vertical3: Vertical bounds are taken into account only if they are narrower than the - ! horizontal ones - if(vlimit==3) then - do n=1, myDim_nod2D - nu1 = ulevels_nod2D(n) - nl1 = nlevels_nod2D(n) - do nz=nu1, nl1-1 - tvert_max(nz)= maxval(AUX(1,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) - tvert_min(nz)= minval(AUX(2,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) - end do - do nz=nu1+1, nl1-2 - tvert_max(nz)=min(tvert_max(nz),maxval(fct_ttf_max(nz-1:nz+1,n))) - tvert_min(nz)=max(tvert_min(nz),minval(fct_ttf_max(nz-1:nz+1,n))) - end do - do nz=nu1, nl1-1 - fct_ttf_max(nz,n)=tvert_max(nz)-LO(nz,n) - fct_ttf_min(nz,n)=tvert_min(nz)-LO(nz,n) - end do - end do - end if - +!$OMP DO + do n=1, myDim_nod2D + nu1 = ulevels_nod2D(n) + nl1 = nlevels_nod2D(n) + !___________________________________________________________________ + do nz=nu1,nl1-1 + ! max,min horizontal bound in cluster around node n in every + ! vertical layer + ! nod_in_elem2D --> elem indices of which node n is surrounded + ! nod_in_elem2D_num --> max number of surrounded elem + tvert_max(nz)= maxval(AUX(1,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) + tvert_min(nz)= minval(AUX(2,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) + end do + !___________________________________________________________________ + ! calc max,min increment of surface layer with respect to low order + ! solution + fct_ttf_max(nu1,n)=tvert_max(nu1)-LO(nu1,n) + fct_ttf_min(nu1,n)=tvert_min(nu1)-LO(nu1,n) + ! calc max,min increment from nz-1:nz+1 with respect to low order + ! solution at layer nz + do nz=nu1+1,nl1-2 + fct_ttf_max(nz,n)=maxval(tvert_max(nz-1:nz+1))-LO(nz,n) + fct_ttf_min(nz,n)=minval(tvert_min(nz-1:nz+1))-LO(nz,n) + end do + ! calc max,min increment of bottom layer -1 with respect to low order + ! solution + nz=nl1-1 + fct_ttf_max(nz,n)=tvert_max(nz)-LO(nz,n) + fct_ttf_min(nz,n)=tvert_min(nz)-LO(nz,n) + end do +!$OMP END DO !___________________________________________________________________________ ! b1. Split positive and negative antidiffusive contributions ! --> sum all positive (fct_plus), negative (fct_minus) antidiffusive ! horizontal element and vertical node contribution to node n and layer nz ! see. R. Löhner et al. "finite element flux corrected transport (FEM-FCT) ! for the euler and navier stoke equation +!$OMP DO do n=1, myDim_nod2D nu1 = ulevels_nod2D(n) nl1 = nlevels_nod2D(n) @@ -240,50 +209,54 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_minus(nz,n)=0._WP end do end do - +!$OMP END DO !Vertical +!$OMP DO do n=1, myDim_nod2D - nu1 = ulevels_nod2D(n) - nl1 = nlevels_nod2D(n) - do nz=nu1,nl1-1 -! fct_plus(nz,n)=fct_plus(nz,n)+ & -! (max(0.0_WP,adf_v(nz,n))+max(0.0_WP,-adf_v(nz+1,n))) & -! /hnode(nz,n) -! fct_minus(nz,n)=fct_minus(nz,n)+ & -! (min(0.0_WP,adf_v(nz,n))+min(0.0_WP,-adf_v(nz+1,n))) & -! /hnode(nz,n) - fct_plus(nz,n) =fct_plus(nz,n) +(max(0.0_WP,adf_v(nz,n))+max(0.0_WP,-adf_v(nz+1,n))) - fct_minus(nz,n)=fct_minus(nz,n)+(min(0.0_WP,adf_v(nz,n))+min(0.0_WP,-adf_v(nz+1,n))) - end do + nu1 = ulevels_nod2D(n) + nl1 = nlevels_nod2D(n) + do nz=nu1,nl1-1 + fct_plus(nz,n) =fct_plus(nz,n) +(max(0.0_WP,adf_v(nz,n))+max(0.0_WP,-adf_v(nz+1,n))) + fct_minus(nz,n)=fct_minus(nz,n)+(min(0.0_WP,adf_v(nz,n))+min(0.0_WP,-adf_v(nz+1,n))) + end do end do - +!$OMP END DO + +!$OMP DO !Horizontal do edge=1, myDim_edge2D - enodes(1:2)=edges(:,edge) - el=edge_tri(:,edge) - nl1=nlevels(el(1))-1 - nu1=ulevels(el(1)) - nl2=0 - nu2=0 - if(el(2)>0) then - nl2=nlevels(el(2))-1 - nu2=ulevels(el(2)) - end if + enodes(1:2)=edges(:,edge) + el=edge_tri(:,edge) + nl1=nlevels(el(1))-1 + nu1=ulevels(el(1)) + nl2=0 + nu2=0 + if (el(2)>0) then + nl2=nlevels(el(2))-1 + nu2=ulevels(el(2)) + end if - nl12 = max(nl1,nl2) - nu12 = nu1 - if (nu2>0) nu12 = min(nu1,nu2) - - do nz=nu12, nl12 - fct_plus (nz,enodes(1))=fct_plus (nz,enodes(1)) + max(0.0_WP, adf_h(nz,edge)) - fct_minus(nz,enodes(1))=fct_minus(nz,enodes(1)) + min(0.0_WP, adf_h(nz,edge)) - fct_plus (nz,enodes(2))=fct_plus (nz,enodes(2)) + max(0.0_WP,-adf_h(nz,edge)) - fct_minus(nz,enodes(2))=fct_minus(nz,enodes(2)) + min(0.0_WP,-adf_h(nz,edge)) - end do - end do - + nl12 = max(nl1,nl2) + nu12 = nu1 + if (nu2>0) nu12 = min(nu1,nu2) + call omp_set_lock(plock(enodes(1))) + do nz=nu12, nl12 + fct_plus (nz,enodes(1))=fct_plus (nz,enodes(1)) + max(0.0_WP, adf_h(nz,edge)) + fct_minus(nz,enodes(1))=fct_minus(nz,enodes(1)) + min(0.0_WP, adf_h(nz,edge)) + end do + call omp_unset_lock(plock(enodes(1))) + + call omp_set_lock(plock(enodes(2))) + do nz=nu12, nl12 + fct_plus (nz,enodes(2))=fct_plus (nz,enodes(2)) + max(0.0_WP,-adf_h(nz,edge)) + fct_minus(nz,enodes(2))=fct_minus(nz,enodes(2)) + min(0.0_WP,-adf_h(nz,edge)) + end do + call omp_unset_lock(plock(enodes(2))) + end do +!$OMP END DO !___________________________________________________________________________ ! b2. Limiting factors +!$OMP DO do n=1,myDim_nod2D nu1=ulevels_nod2D(n) nl1=nlevels_nod2D(n) @@ -294,13 +267,16 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_minus(nz,n)=min(1.0_WP,fct_ttf_min(nz,n)/flux) end do end do - +!$OMP END DO ! fct_minus and fct_plus must be known to neighbouring PE +!$OMP MASTER call exchange_nod(fct_plus, fct_minus, partit) - +!$OMP END MASTER +!!$OMP BARRIER !___________________________________________________________________________ ! b3. Limiting !Vertical +!$OMP DO do n=1, myDim_nod2D nu1=ulevels_nod2D(n) nl1=nlevels_nod2D(n) @@ -331,10 +307,13 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, end do ! the bottom flux is always zero end do - - call exchange_nod_end(partit) ! fct_plus, fct_minus - +!$OMP END DO +!!$OMP MASTER +! call exchange_nod_end(partit) ! fct_plus, fct_minus +!!$OMP END MASTER +!!$OMP BARRIER !Horizontal +!$OMP DO do edge=1, myDim_edge2D enodes(1:2)=edges(:,edge) el=edge_tri(:,edge) @@ -366,4 +345,6 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, adf_h(nz,edge)=ae*adf_h(nz,edge) end do end do +!$OMP END DO +!$OMP END PARALLEL end subroutine oce_tra_adv_fct diff --git a/src/oce_adv_tra_hor.F90 b/src/oce_adv_tra_hor.F90 index 441372ba8..9214a277d 100644 --- a/src/oce_adv_tra_hor.F90 +++ b/src/oce_adv_tra_hor.F90 @@ -92,6 +92,9 @@ subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, init_zero) ! The result is the low-order solution horizontal fluxes ! They are put into flux !___________________________________________________________________________ +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(edge, deltaX1, deltaY1, deltaX2, deltaY2, & +!$OMP a, vflux, el, enodes, nz, nu12, nl12, nl1, nl2, nu1, nu2) +!$OMP DO do edge=1, myDim_edge2D ! local indice of nodes that span up edge ed enodes=edges(:,edge) @@ -214,6 +217,8 @@ subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, init_zero) ttf(nz, enodes(2))*(vflux-abs(vflux)))-flux(nz, edge) end do end do +!$OMP END DO +!$OMP END PARALLEL end subroutine adv_tra_hor_upw1 ! ! @@ -255,6 +260,9 @@ subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_g ! The result is the low-order solution horizontal fluxes ! They are put into flux !___________________________________________________________________________ +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(edge, deltaX1, deltaY1, deltaX2, deltaY2, Tmean1, Tmean2, cHO, & +!$OMP c_lo, a, vflux, el, enodes, nz, nu12, nl12, nl1, nl2, nu1, nu2) +!$OMP DO do edge=1, myDim_edge2D ! local indice of nodes that span up edge ed enodes=edges(:,edge) @@ -487,6 +495,8 @@ subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_g flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) end do end do +!$OMP END DO +!$OMP END PARALLEL end subroutine adv_tra_hor_muscl ! ! @@ -498,7 +508,7 @@ subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_gr USE MOD_PARSUP use g_comm_auto implicit none - type(t_partit),intent(in), target :: partit + type(t_partit),intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) @@ -526,6 +536,9 @@ subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_gr ! The result is the low-order solution horizontal fluxes ! They are put into flux !___________________________________________________________________________ +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(edge, deltaX1, deltaY1, deltaX2, deltaY2, Tmean1, Tmean2, cHO, & +!$OMP a, vflux, el, enodes, nz, nu12, nl12, nl1, nl2, nu1, nu2) +!$OMP DO do edge=1, myDim_edge2D ! local indice of nodes that span up edge ed enodes=edges(:,edge) @@ -741,5 +754,7 @@ subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_gr flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) end do end do +!$OMP END DO +!$OMP END PARALLEL end subroutine adv_tra_hor_mfct diff --git a/src/oce_adv_tra_ver.F90 b/src/oce_adv_tra_ver.F90 index 84ee55173..03a7cb4e8 100644 --- a/src/oce_adv_tra_ver.F90 +++ b/src/oce_adv_tra_ver.F90 @@ -24,10 +24,10 @@ subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, init_zero) USE MOD_PARSUP type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh - real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) - real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) - real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) - logical, optional :: init_zero + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) + logical, optional :: init_zero end subroutine !=============================================================================== ! QR (4th order centerd) @@ -103,7 +103,7 @@ subroutine adv_tra_vert_impl(dt, w, ttf, partit, mesh) real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP) :: a(mesh%nl), b(mesh%nl), c(mesh%nl), tr(mesh%nl) real(kind=WP) :: cp(mesh%nl), tp(mesh%nl) - integer :: nz, n, nzmax, nzmin, tr_num + integer :: nz, n, nzmax, nzmin real(kind=WP) :: m, zinv, dt_inv, dz real(kind=WP) :: c1, v_adv @@ -113,7 +113,8 @@ subroutine adv_tra_vert_impl(dt, w, ttf, partit, mesh) #include "associate_mesh_ass.h" dt_inv=1.0_WP/dt - +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(a, b, c, tr, cp, tp, n, nz, nzmax, nzmin, m, zinv, dz, c1, v_adv) +!$OMP DO !___________________________________________________________________________ ! loop over local nodes do n=1,myDim_nod2D @@ -233,6 +234,8 @@ subroutine adv_tra_vert_impl(dt, w, ttf, partit, mesh) ttf(nz,n)=ttf(nz,n)+tr(nz) end do end do ! --> do n=1,myDim_nod2D +!$OMP END DO +!$OMP END PARALLEL end subroutine adv_tra_vert_impl ! ! @@ -263,7 +266,8 @@ subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, init_zero) else flux=0.0_WP end if - +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(tvert, n, nz, nzmax, nzmin) +!$OMP DO do n=1, myDim_nod2D !_______________________________________________________________________ nzmax=nlevels_nod2D(n) @@ -291,6 +295,8 @@ subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, init_zero) ttf(nz-1,n)*(W(nz,n)-abs(W(nz,n))))*area(nz,n)-flux(nz,n) end do end do +!$OMP END DO +!$OMP END PARALLEL end subroutine adv_tra_ver_upw1 ! ! @@ -324,7 +330,8 @@ subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, init_zero) else flux=0.0_WP end if - +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(tvert,n, nz, nzmax, nzmin, Tmean, Tmean1, Tmean2, qc, qu,qd) +!$OMP DO do n=1, myDim_nod2D !_______________________________________________________________________ nzmax=nlevels_nod2D(n) @@ -364,10 +371,11 @@ subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, init_zero) Tmean1=ttf(nz ,n)+(2*qc+qu)*(zbar_3d_n(nz,n)-Z_3d_n(nz ,n))/3.0_WP Tmean2=ttf(nz-1,n)+(2*qc+qd)*(zbar_3d_n(nz,n)-Z_3d_n(nz-1,n))/3.0_WP Tmean =(W(nz,n)+abs(W(nz,n)))*Tmean1+(W(nz,n)-abs(W(nz,n)))*Tmean2 - ! flux(nz,n)=-0.5_WP*(num_ord*(Tmean1+Tmean2)*W(nz,n)+(1.0_WP-num_ord)*Tmean)*area(nz,n)-flux(nz,n) flux(nz,n)=(-0.5_WP*(1.0_WP-num_ord)*Tmean - num_ord*(0.5_WP*(Tmean1+Tmean2))*W(nz,n))*area(nz,n)-flux(nz,n) end do end do +!$OMP END DO +!$OMP END PARALLEL end subroutine adv_tra_ver_qr4c ! ! @@ -389,7 +397,7 @@ subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, init_zero) real(kind=WP) :: tvert(mesh%nl), tv(mesh%nl), aL, aR, aj, x real(kind=WP) :: dzjm1, dzj, dzjp1, dzjp2, deltaj, deltajp1 integer :: n, nz, nzmax, nzmin - integer :: overshoot_counter, counter +! integer :: overshoot_counter, counter #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -410,8 +418,10 @@ subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, init_zero) ! non-uniformity into account, but this is more cumbersome. This is the version for AB ! time stepping ! -------------------------------------------------------------------------- - overshoot_counter=0 - counter =0 +! overshoot_counter=0 +! counter =0 +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(tvert, tv, aL, aR, aj, x, dzjm1, dzj, dzjp1, dzjp2, deltaj, deltajp1, n, nz, nzmax, nzmin) +!$OMP DO do n=1, myDim_nod2D !_______________________________________________________________________ !Interpolate to zbar...depth levels --> all quantities (tracer ...) are @@ -510,12 +520,12 @@ subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, init_zero) ! loop over layers (segments) do nz=nzmin, nzmax-1 if ((W(nz,n)<=0._WP) .AND. (W(nz+1,n)>=0._WP)) CYCLE - counter=counter+1 + !counter=counter+1 aL=tv(nz) aR=tv(nz+1) if ((aR-ttf(nz, n))*(ttf(nz, n)-aL)<=0._WP) then ! write(*,*) aL, ttf(nz, n), aR - overshoot_counter=overshoot_counter+1 + ! overshoot_counter=overshoot_counter+1 aL =ttf(nz, n) aR =ttf(nz, n) end if @@ -552,6 +562,8 @@ subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, init_zero) flux(nzmin:nzmax, n)=tvert(nzmin:nzmax)-flux(nzmin:nzmax, n) end do ! --> do n=1, myDim_nod2D ! if (mype==0) write(*,*) 'PPM overshoot statistics:', real(overshoot_counter)/real(counter) +!$OMP END DO +!$OMP END PARALLEL end subroutine adv_tra_vert_ppm ! ! @@ -581,7 +593,8 @@ subroutine adv_tra_ver_cdiff(w, ttf, partit, mesh, flux, init_zero) else flux=0.0_WP end if - +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, nzmax, nzmin, tv, tvert) +!$OMP DO do n=1, myDim_nod2D !_______________________________________________________________________ nzmax=nlevels_nod2D(n)-1 @@ -605,4 +618,6 @@ subroutine adv_tra_ver_cdiff(w, ttf, partit, mesh, flux, init_zero) !_______________________________________________________________________ flux(nzmin:nzmax, n)=tvert(nzmin:nzmax)-flux(nzmin:nzmax, n) end do ! --> do n=1, myDim_nod2D +!$OMP END DO +!$OMP END PARALLEL end subroutine adv_tra_ver_cdiff From d115c848badfdecf450df30e57b1bd3c213714d4 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Wed, 3 Nov 2021 15:00:36 +0100 Subject: [PATCH 430/909] added -qopenmp into CMake (always compiled with OpenMP) --- src/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index fd13d5d2e..01892778c 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -67,7 +67,7 @@ if(${VERBOSE}) endif() # CMAKE_Fortran_COMPILER_ID will also work if a wrapper is being used (e.g. mpif90 wraps ifort -> compiler id is Intel) if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Intel ) - target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -init=zero -no-wrap-margin) + target_compile_options(${PROJECT_NAME} PRIVATE -qopenmp -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -init=zero -no-wrap-margin) # target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -g -traceback -check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU ) target_compile_options(${PROJECT_NAME} PRIVATE -O3 -finit-local-zero -finline-functions -march=native -fimplicit-none -fdefault-real-8 -ffree-line-length-none) From 49380e625a1bb8c785979a563f90bb5d7da8cc65 Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Wed, 3 Nov 2021 15:04:56 +0100 Subject: [PATCH 431/909] More tests and local testing with containers (#194) * rename workflows * add more tests and run_tests scripts * change test path * add simplest test, plus documentation * Update fesom2.1.yml * Update fesom2_icepack.yml --- .github/workflows/fesom2.1.yml | 4 +- .github/workflows/fesom2_icepack.yml | 4 +- setups/paths.yml | 6 +-- setups/test_pi_floatice/setup.yml | 80 ++++++++++++++++++++++++++++ setups/test_pi_linfs/setup.yml | 80 ++++++++++++++++++++++++++++ setups/test_pi_partial/setup.yml | 80 ++++++++++++++++++++++++++++ setups/test_pi_visc7/setup.yml | 80 ++++++++++++++++++++++++++++ setups/test_pi_zstar/setup.yml | 80 ++++++++++++++++++++++++++++ test.sh | 32 +++++++++++ test/run_tests.sh | 21 ++++++++ 10 files changed, 458 insertions(+), 9 deletions(-) create mode 100644 setups/test_pi_floatice/setup.yml create mode 100644 setups/test_pi_linfs/setup.yml create mode 100644 setups/test_pi_partial/setup.yml create mode 100644 setups/test_pi_visc7/setup.yml create mode 100644 setups/test_pi_zstar/setup.yml create mode 100755 test.sh create mode 100755 test/run_tests.sh diff --git a/.github/workflows/fesom2.1.yml b/.github/workflows/fesom2.1.yml index 53cf575ca..733eaf55d 100644 --- a/.github/workflows/fesom2.1.yml +++ b/.github/workflows/fesom2.1.yml @@ -8,11 +8,11 @@ on: [push, pull_request] # A workflow run is made up of one or more jobs that can run sequentially or in parallel jobs: - gfortran_ubuntu: + general_test: # Containers must run in Linux based operating systems runs-on: ubuntu-latest # Docker Hub image that `container-job` executes in - container: koldunovn/fesom2_test:f2.1_tracers + container: koldunovn/fesom2_test:refactoring # Service containers to run with `gfortran_ubuntu` steps: diff --git a/.github/workflows/fesom2_icepack.yml b/.github/workflows/fesom2_icepack.yml index df4fcd2bd..06d84ba64 100644 --- a/.github/workflows/fesom2_icepack.yml +++ b/.github/workflows/fesom2_icepack.yml @@ -8,11 +8,11 @@ on: [push, pull_request] # A workflow run is made up of one or more jobs that can run sequentially or in parallel jobs: - gfortran_ubuntu: + icepack_test: # Containers must run in Linux based operating systems runs-on: ubuntu-latest # Docker Hub image that `container-job` executes in - container: koldunovn/fesom2_test:f2.1_tracers + container: koldunovn/fesom2_test:refactoring # Service containers to run with `gfortran_ubuntu` steps: diff --git a/setups/paths.yml b/setups/paths.yml index e74aa8cb6..963785b8c 100644 --- a/setups/paths.yml +++ b/setups/paths.yml @@ -54,18 +54,14 @@ docker: lnodename: - ' ' meshes: - pi: /fesom/pi/ test_souf: ./test/meshes/soufflet/ test_global: ./test/meshes/pi/ forcing: - CORE2: /fesom/dCORE2/ - JRA55: /fesom/dJRA55/ test_global: ./test/input/global/ clim: - phc: /fesom/phc3/ test_global: ./test/input/global/ opath: - opath: ../results/ + opath: ./test/ juwels: lnodename: diff --git a/setups/test_pi_floatice/setup.yml b/setups/test_pi_floatice/setup.yml new file mode 100644 index 000000000..6f99efd5a --- /dev/null +++ b/setups/test_pi_floatice/setup.yml @@ -0,0 +1,80 @@ +mesh: test_global +forcing: test_global +clim: + type: test_global + filelist: ['woa18_netcdf_5deg.nc','woa18_netcdf_5deg.nc'] + varlist: ['salt', 'temp'] +ntasks: 2 +time: "00:10:00" + +namelist.config: + timestep: + step_per_day: 96 + run_length: 1 + run_length_unit: "d" + geometry: + force_rotation: True + restart_log: + restart_length: 1 + restart_length_unit: "d" + logfile_outfreq: 10 + run_config: + use_floatice: True + +namelist.oce: + oce_dyn: + Div_c: 0.5 + Leith_c: 0.05 + w_split: True + +namelist.ice: + ice_dyn: + whichEVP: 1 + evp_rheol_steps: 120 + +namelist.io: + diag_list: + ldiag_energy: False + nml_list: + io_list: + "sst ": + freq: 1 + unit: d + prec: 8 + "a_ice ": + freq: 1 + unit: d + prec: 8 + "temp ": + freq: 1 + unit: d + prec: 8 + "salt ": + freq: 1 + unit: d + prec: 8 + "u ": + freq: 1 + unit: d + prec: 8 + "v ": + freq: 1 + unit: d + prec: 8 + +fcheck: + a_ice: 0.26880359680085886 + salt: 23.943630158896298 + temp: 1.7010247885672327 + sst: 8.509590362118958 + u: -0.005721019451264724 + v: 0.00047682952470964814 + + + + + + + + + diff --git a/setups/test_pi_linfs/setup.yml b/setups/test_pi_linfs/setup.yml new file mode 100644 index 000000000..bc604dccc --- /dev/null +++ b/setups/test_pi_linfs/setup.yml @@ -0,0 +1,80 @@ +mesh: test_global +forcing: test_global +clim: + type: test_global + filelist: ['woa18_netcdf_5deg.nc','woa18_netcdf_5deg.nc'] + varlist: ['salt', 'temp'] +ntasks: 2 +time: "00:10:00" + +namelist.config: + timestep: + step_per_day: 96 + run_length: 1 + run_length_unit: "d" + geometry: + force_rotation: True + restart_log: + restart_length: 1 + restart_length_unit: "d" + logfile_outfreq: 10 + ale_def: + which_ALE: "linfs" + +namelist.oce: + oce_dyn: + Div_c: 0.5 + Leith_c: 0.05 + w_split: True + +namelist.ice: + ice_dyn: + whichEVP: 1 + evp_rheol_steps: 120 + +namelist.io: + diag_list: + ldiag_energy: False + nml_list: + io_list: + "sst ": + freq: 1 + unit: d + prec: 8 + "a_ice ": + freq: 1 + unit: d + prec: 8 + "temp ": + freq: 1 + unit: d + prec: 8 + "salt ": + freq: 1 + unit: d + prec: 8 + "u ": + freq: 1 + unit: d + prec: 8 + "v ": + freq: 1 + unit: d + prec: 8 + +fcheck: + a_ice: 0.2685778327298968 + salt: 23.944511945072648 + temp: 1.7011044195264193 + sst: 8.51781304844356 + u: -0.0013090250570688075 + v: 0.00013164013131872999 + + + + + + + + + diff --git a/setups/test_pi_partial/setup.yml b/setups/test_pi_partial/setup.yml new file mode 100644 index 000000000..bae697a6a --- /dev/null +++ b/setups/test_pi_partial/setup.yml @@ -0,0 +1,80 @@ +mesh: test_global +forcing: test_global +clim: + type: test_global + filelist: ['woa18_netcdf_5deg.nc','woa18_netcdf_5deg.nc'] + varlist: ['salt', 'temp'] +ntasks: 2 +time: "00:10:00" + +namelist.config: + timestep: + step_per_day: 96 + run_length: 1 + run_length_unit: "d" + geometry: + force_rotation: True + restart_log: + restart_length: 1 + restart_length_unit: "d" + logfile_outfreq: 10 + ale_def: + use_partial_cell: False + +namelist.oce: + oce_dyn: + Div_c: 0.5 + Leith_c: 0.05 + w_split: True + +namelist.ice: + ice_dyn: + whichEVP: 1 + evp_rheol_steps: 120 + +namelist.io: + diag_list: + ldiag_energy: False + nml_list: + io_list: + "sst ": + freq: 1 + unit: d + prec: 8 + "a_ice ": + freq: 1 + unit: d + prec: 8 + "temp ": + freq: 1 + unit: d + prec: 8 + "salt ": + freq: 1 + unit: d + prec: 8 + "u ": + freq: 1 + unit: d + prec: 8 + "v ": + freq: 1 + unit: d + prec: 8 + +fcheck: + a_ice: 0.2691270793874835 + salt: 23.944032641762846 + temp: 1.7014629411995628 + sst: 8.531605186060785 + u: -0.0014154276919262456 + v: 0.00013994193864008374 + + + + + + + + + diff --git a/setups/test_pi_visc7/setup.yml b/setups/test_pi_visc7/setup.yml new file mode 100644 index 000000000..c4d616619 --- /dev/null +++ b/setups/test_pi_visc7/setup.yml @@ -0,0 +1,80 @@ +mesh: test_global +forcing: test_global +clim: + type: test_global + filelist: ['woa18_netcdf_5deg.nc','woa18_netcdf_5deg.nc'] + varlist: ['salt', 'temp'] +ntasks: 2 +time: "00:10:00" + +namelist.config: + timestep: + step_per_day: 96 + run_length: 1 + run_length_unit: "d" + geometry: + force_rotation: True + restart_log: + restart_length: 1 + restart_length_unit: "d" + logfile_outfreq: 10 + +namelist.oce: + oce_dyn: + Div_c: 0.5 + Leith_c: 0.05 + w_split: True + visc_option: 7 + + +namelist.ice: + ice_dyn: + whichEVP: 1 + evp_rheol_steps: 120 + +namelist.io: + diag_list: + ldiag_energy: False + nml_list: + io_list: + "sst ": + freq: 1 + unit: d + prec: 8 + "a_ice ": + freq: 1 + unit: d + prec: 8 + "temp ": + freq: 1 + unit: d + prec: 8 + "salt ": + freq: 1 + unit: d + prec: 8 + "u ": + freq: 1 + unit: d + prec: 8 + "v ": + freq: 1 + unit: d + prec: 8 + +fcheck: + a_ice: 0.2691276109603212 + salt: 23.944024690144552 + temp: 1.7017686482560304 + sst: 8.531529100200583 + u: -0.0014071010764418097 + v: 0.00014173175700137738 + + + + + + + + + diff --git a/setups/test_pi_zstar/setup.yml b/setups/test_pi_zstar/setup.yml new file mode 100644 index 000000000..e487659ae --- /dev/null +++ b/setups/test_pi_zstar/setup.yml @@ -0,0 +1,80 @@ +mesh: test_global +forcing: test_global +clim: + type: test_global + filelist: ['woa18_netcdf_5deg.nc','woa18_netcdf_5deg.nc'] + varlist: ['salt', 'temp'] +ntasks: 2 +time: "00:10:00" + +namelist.config: + timestep: + step_per_day: 96 + run_length: 1 + run_length_unit: "d" + geometry: + force_rotation: True + restart_log: + restart_length: 1 + restart_length_unit: "d" + logfile_outfreq: 10 + ale_def: + which_ALE: "zstar" + +namelist.oce: + oce_dyn: + Div_c: 0.5 + Leith_c: 0.05 + w_split: True + +namelist.ice: + ice_dyn: + whichEVP: 1 + evp_rheol_steps: 120 + +namelist.io: + diag_list: + ldiag_energy: False + nml_list: + io_list: + "sst ": + freq: 1 + unit: d + prec: 8 + "a_ice ": + freq: 1 + unit: d + prec: 8 + "temp ": + freq: 1 + unit: d + prec: 8 + "salt ": + freq: 1 + unit: d + prec: 8 + "u ": + freq: 1 + unit: d + prec: 8 + "v ": + freq: 1 + unit: d + prec: 8 + +fcheck: + a_ice: 0.2691276443855294 + salt: 23.944024712806094 + temp: 1.701768707848739 + sst: 8.531522995932146 + u: -0.001407225233294229 + v: 0.00014182969591235959 + + + + + + + + + diff --git a/test.sh b/test.sh new file mode 100755 index 000000000..81fe8ec34 --- /dev/null +++ b/test.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# Run simples FESOM2 test in a container. +# +# With singularity on ollie +# +# module load singularity/3.5.1 +# cd fesom2 +# singularity exec /home/ollie/nkolduno/SINGULARITY/fesom_refactoring.sif ./test.sh +# +# With docker on Linux/Mac +# docker run -it -v "$(pwd)"/fesom2:/fesom/fesom2 koldunovn/fesom2_test:refactoring /bin/bash +# cd fesom2 +# ./test.sh +# + +set -e + +machine="docker" +tests="test_pi" + +for test in $tests; do + + ./configure.sh ubuntu + echo $test + mkrun pi $test -m $machine + cd work_pi + chmod +x job_docker_new + ./job_docker_new + fcheck . + cd ../ + +done diff --git a/test/run_tests.sh b/test/run_tests.sh new file mode 100755 index 000000000..f19bdfe50 --- /dev/null +++ b/test/run_tests.sh @@ -0,0 +1,21 @@ +#!/bin/bash +set -e +cd ../ + +machine="docker" +tests="test_pi test_pi_linfs test_pi_zstar test_pi_partial test_pi_floatice test_pi_visc7 test_pi_zstar" + +for test in $tests; do + +./configure.sh ubuntu +echo $test + mkrun pi $test -m $machine + pwd + cd work_pi + chmod +x job_docker_new + ./job_docker_new + fcheck . + cd ../ + +done + From 4ec072c6500b4c37065721bc81555ac816e55dde Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Wed, 3 Nov 2021 15:16:58 +0100 Subject: [PATCH 432/909] old GNU compiler nows little about OMP locks :(. OpenMP has been commented out per default! --- src/CMakeLists.txt | 2 +- src/oce_adv_tra_fct.F90 | 19 +++++++++++++++---- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 01892778c..fd13d5d2e 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -67,7 +67,7 @@ if(${VERBOSE}) endif() # CMAKE_Fortran_COMPILER_ID will also work if a wrapper is being used (e.g. mpif90 wraps ifort -> compiler id is Intel) if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Intel ) - target_compile_options(${PROJECT_NAME} PRIVATE -qopenmp -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -init=zero -no-wrap-margin) + target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -init=zero -no-wrap-margin) # target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -g -traceback -check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU ) target_compile_options(${PROJECT_NAME} PRIVATE -O3 -finit-local-zero -finline-functions -march=native -fimplicit-none -fdefault-real-8 -ffree-line-length-none) diff --git a/src/oce_adv_tra_fct.F90 b/src/oce_adv_tra_fct.F90 index 78b45899b..c68facd23 100644 --- a/src/oce_adv_tra_fct.F90 +++ b/src/oce_adv_tra_fct.F90 @@ -80,8 +80,10 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, use MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP - use g_comm_auto - use omp_lib + USE g_comm_auto +#if defined(_OPENMP) + USE OMP_LIB +#endif implicit none real(kind=WP), intent(in), target :: dt type(t_mesh), intent(in), target :: mesh @@ -100,8 +102,10 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, real(kind=WP) :: flux_eps=1e-16 real(kind=WP) :: bignumber=1e3 integer :: vlimit=1 +#if defined(_OPENMP) integer(omp_lock_kind), allocatable, save :: plock(:) integer(omp_lock_kind) :: mlock(partit%myDim_nod2D) +#endif logical, save :: l_first=.true. #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -110,7 +114,8 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, k, elem, enodes, num, el, nl1, nl2, nu1, nu2, nl12, nu12, edge, & !$OMP flux, ae,tvert_max, tvert_min) -!$OMP MASTER +!$OMP MASTER +#if defined(_OPENMP) if (l_first) then allocate(plock(partit%myDim_nod2D+partit%eDim_nod2D)) do n=1, myDim_nod2D+partit%eDim_nod2D @@ -118,6 +123,7 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, enddo l_first = .false. endif +#endif !$OMP END MASTER ! -------------------------------------------------------------------------- ! ttf is the tracer field on step n @@ -239,19 +245,24 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, nl12 = max(nl1,nl2) nu12 = nu1 if (nu2>0) nu12 = min(nu1,nu2) +#if defined(_OPENMP) call omp_set_lock(plock(enodes(1))) +#endif do nz=nu12, nl12 fct_plus (nz,enodes(1))=fct_plus (nz,enodes(1)) + max(0.0_WP, adf_h(nz,edge)) fct_minus(nz,enodes(1))=fct_minus(nz,enodes(1)) + min(0.0_WP, adf_h(nz,edge)) end do +#if defined(_OPENMP) call omp_unset_lock(plock(enodes(1))) - call omp_set_lock(plock(enodes(2))) +#endif do nz=nu12, nl12 fct_plus (nz,enodes(2))=fct_plus (nz,enodes(2)) + max(0.0_WP,-adf_h(nz,edge)) fct_minus(nz,enodes(2))=fct_minus(nz,enodes(2)) + min(0.0_WP,-adf_h(nz,edge)) end do +#if defined(_OPENMP) call omp_unset_lock(plock(enodes(2))) +#endif end do !$OMP END DO !___________________________________________________________________________ From 2766326e52bf73e8df6aa1b6d116686083670c9d Mon Sep 17 00:00:00 2001 From: a270042 Date: Wed, 3 Nov 2021 15:17:08 +0100 Subject: [PATCH 433/909] exchange eta_n and d_eta with dynamics derived type --- src/gen_modules_diag.F90 | 9 ++++++--- src/io_blowup.F90 | 4 ++-- src/io_meandata.F90 | 2 +- src/io_restart.F90 | 4 ++-- src/oce_ale.F90 | 20 ++++++++++++-------- src/oce_ale_vel_rhs.F90 | 4 +++- src/oce_dyn.F90 | 7 ++++--- src/oce_modules.F90 | 1 - src/oce_setup_step.F90 | 4 +--- src/oce_vel_rhs_vinv.F90 | 4 +++- src/write_step_info.F90 | 26 ++++++++++++++++---------- 11 files changed, 50 insertions(+), 35 deletions(-) diff --git a/src/gen_modules_diag.F90 b/src/gen_modules_diag.F90 index 5015da263..a2a7fed17 100755 --- a/src/gen_modules_diag.F90 +++ b/src/gen_modules_diag.F90 @@ -76,17 +76,20 @@ module diagnostics ! ============================================================== !rhs_diag=ssh_rhs? -subroutine diag_solver(mode, partit, mesh) +subroutine diag_solver(mode, dynamics, partit, mesh) implicit none - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in), target :: mesh type(t_partit), intent(inout), target :: partit + type(t_dyn) , intent(inout), target :: dynamics integer, intent(in) :: mode integer :: n, is, ie logical, save :: firstcall=.true. + real(kind=WP), dimension(:) , pointer :: d_eta #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + d_eta =>dynamics%d_eta(:) !===================== if (firstcall) then !allocate the stuff at the first call @@ -677,7 +680,7 @@ subroutine compute_diagnostics(mode, dynamics, tracers, partit, mesh) type(t_dyn) , intent(inout), target :: dynamics integer, intent(in) :: mode !constructor mode (0=only allocation; any other=do diagnostic) real(kind=WP) :: val !1. solver diagnostic - if (ldiag_solver) call diag_solver(mode, partit, mesh) + if (ldiag_solver) call diag_solver(mode, dynamics, partit, mesh) !2. compute curl(stress_surf) if (lcurt_stress_surf) call diag_curl_stress_surf(mode, partit, mesh) !3. compute curl(velocity) diff --git a/src/io_blowup.F90 b/src/io_blowup.F90 index 4b594499b..52b83d251 100644 --- a/src/io_blowup.F90 +++ b/src/io_blowup.F90 @@ -100,8 +100,8 @@ subroutine ini_blowup_io(year, dynamics, tracers, partit, mesh) !=========================================================================== !___Define the netCDF variables for 2D fields_______________________________ !___SSH_____________________________________________________________________ - call def_variable(bid, 'eta_n' , (/nod2D/) , 'sea surface elevation', 'm', eta_n); - call def_variable(bid, 'd_eta' , (/nod2D/) , 'change in ssh from solver', 'm', d_eta); + call def_variable(bid, 'eta_n' , (/nod2D/) , 'sea surface elevation', 'm', dynamics%eta_n); + call def_variable(bid, 'd_eta' , (/nod2D/) , 'change in ssh from solver', 'm', dynamics%d_eta); !___ALE related fields______________________________________________________ call def_variable(bid, 'hbar' , (/nod2D/) , 'ALE surface elevation hbar_n+0.5', 'm', hbar); !!PS call def_variable(bid, 'hbar_old' , (/nod2D/) , 'ALE surface elevation hbar_n-0.5', 'm', hbar_old); diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 5b499e624..9e53f6f1f 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -155,7 +155,7 @@ subroutine ini_mean_io(dynamics, tracers, partit, mesh) CASE ('sss ') call def_stream(nod2D, myDim_nod2D, 'sss', 'sea surface salinity', 'psu', tracers%data(2)%values(1,1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('ssh ') - call def_stream(nod2D, myDim_nod2D, 'ssh', 'sea surface elevation', 'm', eta_n, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'ssh', 'sea surface elevation', 'm', dynamics%eta_n, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('vve_5 ') call def_stream(nod2D, myDim_nod2D, 'vve_5', 'vertical velocity at 5th level', 'm/s', dynamics%w(5,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index dd2f5307d..e5a3f6b5b 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -114,11 +114,11 @@ subroutine ini_ocean_io(year, dynamics, tracers, partit, mesh) !=========================================================================== !___Define the netCDF variables for 2D fields_______________________________ !___SSH_____________________________________________________________________ - call def_variable(oid, 'ssh', (/nod2D/), 'sea surface elevation', 'm', eta_n); + call def_variable(oid, 'ssh', (/nod2D/), 'sea surface elevation', 'm', dynamics%eta_n); !___ALE related fields______________________________________________________ call def_variable(oid, 'hbar', (/nod2D/), 'ALE surface elevation', 'm', hbar); !!PS call def_variable(oid, 'ssh_rhs', (/nod2D/), 'RHS for the elevation', '?', ssh_rhs); - call def_variable(oid, 'ssh_rhs_old', (/nod2D/), 'RHS for the elevation', '?', ssh_rhs_old); + call def_variable(oid, 'ssh_rhs_old', (/nod2D/), 'RHS for the elevation', '?', dynamics%ssh_rhs_old); call def_variable(oid, 'hnode', (/nl-1, nod2D/), 'nodal layer thickness', 'm', hnode); !___Define the netCDF variables for 3D fields_______________________________ diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index bdd1d3b34..9e6f0edeb 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -692,19 +692,19 @@ subroutine init_thickness_ale(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_DYN - use o_ARRAYS, only: eta_n implicit none integer :: n, nz, elem, elnodes(3), nzmin, nzmax real(kind=WP) :: dd type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit type(t_dyn), intent(inout), target :: dynamics - real(kind=WP), dimension(:), pointer :: ssh_rhs_old + real(kind=WP), dimension(:), pointer :: ssh_rhs_old, eta_n #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" ssh_rhs_old=>dynamics%ssh_rhs_old(:) + eta_n =>dynamics%eta_n(:) if(mype==0) then write(*,*) '____________________________________________________________' @@ -1886,8 +1886,7 @@ end subroutine compute_hbar_ale subroutine vert_vel_ale(dynamics, partit, mesh) use g_config,only: dt, which_ALE, min_hnode, lzstar_lev, flag_warn_cflz use MOD_MESH - use o_ARRAYS, only: fer_Wvel, fer_UV, water_flux, & - eta_n, d_eta + use o_ARRAYS, only: fer_Wvel, fer_UV, water_flux use o_PARAM USE MOD_PARTIT USE MOD_PARSUP @@ -1912,6 +1911,7 @@ subroutine vert_vel_ale(dynamics, partit, mesh) real(kind=WP), dimension(:,:,:), pointer :: UV real(kind=WP), dimension(:,:) , pointer :: Wvel, Wvel_e, Wvel_i, CFL_z real(kind=WP), dimension(:) , pointer :: ssh_rhs, ssh_rhs_old + real(kind=WP), dimension(:) , pointer :: eta_n, d_eta #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -1923,6 +1923,8 @@ subroutine vert_vel_ale(dynamics, partit, mesh) CFL_z =>dynamics%cfl_z(:,:) ssh_rhs =>dynamics%ssh_rhs(:) ssh_rhs_old =>dynamics%ssh_rhs_old(:) + eta_n =>dynamics%eta_n(:) + d_eta =>dynamics%d_eta(:) !___________________________________________________________________________ ! Contributions from levels in divergence @@ -2558,13 +2560,13 @@ end subroutine psolve ssh_stiff%colind-1, ssh_stiff%values, reuse, MPI_COMM_FESOM) lfirst=.false. end if - call psolve(ident, dynamics%ssh_rhs, ssh_stiff%values, d_eta, new_values) + call psolve(ident, dynamics%ssh_rhs, ssh_stiff%values, dynamics%d_eta, new_values) #endif ! ! !___________________________________________________________________________ -call exchange_nod(d_eta, partit) !is this required after calling psolve ? +call exchange_nod(dynamics%d_eta, partit) !is this required after calling psolve ? end subroutine solve_ssh_ale ! @@ -2790,12 +2792,13 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) real(kind=8) :: t0,t1, t2, t30, t3, t4, t5, t6, t7, t8, t9, t10, loc, glo integer :: n, node - + real(kind=WP), dimension(:), pointer :: eta_n #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - + eta_n => dynamics%eta_n(:) + t0=MPI_Wtime() ! water_flux = 0.0_WP @@ -3049,3 +3052,4 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) write(*,*) end if end subroutine oce_timestep_ale + diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index 799094bdf..cabaa7fe0 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -38,7 +38,7 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_DYN - use o_ARRAYS, only: eta_n, coriolis, ssh_gp, pgf_x, pgf_y + use o_ARRAYS, only: coriolis, ssh_gp, pgf_x, pgf_y use i_ARRAYS use i_therm_param use o_PARAM @@ -61,6 +61,7 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) real(kind=WP) :: p_ice(3), p_air(3), p_eta(3) integer :: use_pice real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhsAB, UV_rhs + real(kind=WP), dimension(:) , pointer :: eta_n #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -69,6 +70,7 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) UV =>dynamics%uv(:,:,:) UV_rhs =>dynamics%uv_rhs(:,:,:) UV_rhsAB =>dynamics%uv_rhsAB(:,:,:) + eta_n =>dynamics%eta_n(:) t1=MPI_Wtime() use_pice=0 diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 0bff6401b..8fce89659 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -163,8 +163,6 @@ SUBROUTINE update_vel(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_DYN - - USE o_ARRAYS, only: d_eta, eta_n USE o_PARAM USE g_CONFIG use g_comm_auto @@ -176,6 +174,7 @@ SUBROUTINE update_vel(dynamics, partit, mesh) type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs + real(kind=WP), dimension(:), pointer :: eta_n, d_eta #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -183,7 +182,9 @@ SUBROUTINE update_vel(dynamics, partit, mesh) #include "associate_mesh_ass.h" UV=>dynamics%uv(:,:,:) UV_rhs=>dynamics%uv_rhs(:,:,:) - + eta_n=>dynamics%eta_n(:) + d_eta=>dynamics%d_eta(:) + DO elem=1, myDim_elem2D elnodes=elem2D_nodes(:,elem) eta=-g*theta*dt*d_eta(elnodes) diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index b4a261c15..2173ce1d0 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -186,7 +186,6 @@ MODULE o_ARRAYS real(kind=WP), allocatable :: uke_rhs(:,:), uke_rhs_old(:,:) real(kind=WP), allocatable :: UV_dis_tend(:,:,:), UV_back_tend(:,:,:), UV_total_tend(:,:,:), UV_dis_tend_node(:,:,:) real(kind=WP), allocatable :: UV_dis_posdef_b2(:,:), UV_dis_posdef(:,:), UV_back_posdef(:,:) -real(kind=WP), allocatable :: eta_n(:), d_eta(:) real(kind=WP), allocatable :: hpressure(:,:) real(kind=WP), allocatable :: stress_surf(:,:) real(kind=WP), allocatable :: stress_node_surf(:,:) diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index f6e80d60b..0116baf5f 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -491,7 +491,7 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) ! ================ ! elevation and its rhs ! ================ -allocate(eta_n(node_size), d_eta(node_size)) + ! ================ ! Monin-Obukhov ! ================ @@ -630,8 +630,6 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) ! Initialize with zeros ! ================= - eta_n=0.0_WP - d_eta=0.0_WP hpressure=0.0_WP ! heat_flux=0.0_WP diff --git a/src/oce_vel_rhs_vinv.F90 b/src/oce_vel_rhs_vinv.F90 index f1b87d68f..1ba0a34ee 100755 --- a/src/oce_vel_rhs_vinv.F90 +++ b/src/oce_vel_rhs_vinv.F90 @@ -118,7 +118,7 @@ end subroutine relative_vorticity ! ========================================================================== subroutine compute_vel_rhs_vinv(dynamics, partit, mesh) !vector invariant USE o_PARAM - USE o_ARRAYS, only: eta_n, coriolis_node, hpressure, vorticity + USE o_ARRAYS, only: coriolis_node, hpressure, vorticity USE MOD_MESH USE MOD_PARTIT @@ -141,6 +141,7 @@ subroutine compute_vel_rhs_vinv(dynamics, partit, mesh) !vector invariant real(kind=WP) :: dZ_inv(2:mesh%nl-1), dzbar_inv(mesh%nl-1), elem_area_inv real(kind=WP) :: density0_inv = 1./density_0 real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs, UV_rhsAB + real(kind=WP), dimension(:) , pointer :: eta_n #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -148,6 +149,7 @@ subroutine compute_vel_rhs_vinv(dynamics, partit, mesh) !vector invariant UV => dynamics%uv(:,:,:) UV_rhs => dynamics%uv_rhs(:,:,:) UV_rhsAB => dynamics%uv_rhsAB(:,:,:) + eta_n =>dynamics%eta_n(:) w = 0.0_WP diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index e03c5c475..1b84011ab 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -41,7 +41,7 @@ subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) use MOD_TRACER use MOD_DYN use o_PARAM - use o_ARRAYS, only: eta_n, d_eta, water_flux, heat_flux, & + use o_ARRAYS, only: water_flux, heat_flux, & pgf_x, pgf_y, Av, Kv use i_ARRAYS use g_comm_auto @@ -63,15 +63,18 @@ subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) type(t_tracer), intent(in) , target :: tracers type(t_dyn) , intent(in) , target :: dynamics real(kind=WP), dimension(:,:,:), pointer :: UV, UVnode - real(kind=WP), dimension(:,:), pointer :: Wvel, CFL_z + real(kind=WP), dimension(:,:) , pointer :: Wvel, CFL_z + real(kind=WP), dimension(:) , pointer :: eta_n, d_eta #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) + UV => dynamics%uv(:,:,:) UVnode => dynamics%uvnode(:,:,:) - Wvel => dynamics%w(:,:) - CFL_z => dynamics%cfl_z(:,:) + Wvel => dynamics%w(:,:) + CFL_z => dynamics%cfl_z(:,:) + eta_n => dynamics%eta_n(:) + d_eta => dynamics%d_eta(:) if (mod(istep,outfreq)==0) then @@ -264,7 +267,7 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) USE MOD_PARSUP use MOD_DYN use o_PARAM - use o_ARRAYS, only: eta_n, d_eta, water_flux, stress_surf, & + use o_ARRAYS, only: water_flux, stress_surf, & heat_flux, Kv, Av use i_ARRAYS use g_comm_auto @@ -283,15 +286,18 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) real(kind=WP), dimension(:,:,:), pointer :: UV real(kind=WP), dimension(:,:) , pointer :: Wvel, CFL_z real(kind=WP), dimension(:) , pointer :: ssh_rhs, ssh_rhs_old + real(kind=WP), dimension(:) , pointer :: eta_n, d_eta #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) - Wvel => dynamics%w(:,:) - CFL_z => dynamics%cfl_z(:,:) - ssh_rhs => dynamics%ssh_rhs(:) + UV => dynamics%uv(:,:,:) + Wvel => dynamics%w(:,:) + CFL_z => dynamics%cfl_z(:,:) + ssh_rhs => dynamics%ssh_rhs(:) ssh_rhs_old => dynamics%ssh_rhs_old(:) + eta_n => dynamics%eta_n(:) + d_eta => dynamics%d_eta(:) !___________________________________________________________________________ ! ! if (mod(istep,logfile_outfreq)==0) then From 7b487a44a0310e403e8553c47014505f7766aca6 Mon Sep 17 00:00:00 2001 From: a270042 Date: Wed, 3 Nov 2021 16:16:55 +0100 Subject: [PATCH 434/909] exchange fer_UV and fer_Wvel with dynamics derived type --- src/MOD_DYN.F90 | 22 ++++++++++++++----- src/gen_modules_diag.F90 | 10 ++++----- src/io_meandata.F90 | 6 ++--- src/oce_ale.F90 | 14 +++++++----- src/oce_ale_tracer.F90 | 9 +++++--- src/oce_fer_gm.F90 | 47 +++++++++++++++++++++++++++++++++++----- src/oce_modules.F90 | 1 - src/oce_setup_step.F90 | 12 +++++++--- 8 files changed, 90 insertions(+), 31 deletions(-) diff --git a/src/MOD_DYN.F90 b/src/MOD_DYN.F90 index 920a77b4c..ddaad66a8 100644 --- a/src/MOD_DYN.F90 +++ b/src/MOD_DYN.F90 @@ -28,13 +28,15 @@ MODULE MOD_DYN ! option for momentum advection TYPE T_DYN ! instant zonal merdional velocity & Adams-Bashfort rhs - real(kind=WP), allocatable, dimension(:,:,:):: uv, uv_rhs, uv_rhsAB + real(kind=WP), allocatable, dimension(:,:,:):: uv, uv_rhs, uv_rhsAB, fer_uv - ! instant vertical velm explicite+implicite part - real(kind=WP), allocatable, dimension(:,:) :: w, w_e, w_i, cfl_z - + ! horizontal velocities at nodes real(kind=WP), allocatable, dimension(:,:,:):: uvnode, uvnode_rhs + ! instant vertical vel arrays + real(kind=WP), allocatable, dimension(:,:) :: w, w_e, w_i, cfl_z, fer_w + + ! sea surface height arrays real(kind=WP), allocatable, dimension(:) :: eta_n, d_eta, ssh_rhs, ssh_rhs_old ! summarizes solver input parameter @@ -112,9 +114,13 @@ subroutine WRITE_T_DYN(dynamics, unit, iostat, iomsg) call write_bin_array(dynamics%w , unit, iostat, iomsg) call write_bin_array(dynamics%w_e , unit, iostat, iomsg) call write_bin_array(dynamics%w_i , unit, iostat, iomsg) - call write_bin_array(dynamics%cfl_z , unit, iostat, iomsg) + if (Fer_GM) then + call write_bin_array(dynamics%fer_w , unit, iostat, iomsg) + call write_bin_array(dynamics%fer_uv , unit, iostat, iomsg) + end if + !___________________________________________________________________________ write(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_opt write(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma0_visc @@ -151,9 +157,13 @@ subroutine READ_T_DYN(dynamics, unit, iostat, iomsg) call read_bin_array(dynamics%w , unit, iostat, iomsg) call read_bin_array(dynamics%w_e , unit, iostat, iomsg) call read_bin_array(dynamics%w_i , unit, iostat, iomsg) - call read_bin_array(dynamics%cfl_z , unit, iostat, iomsg) + if (Fer_GM) then + call read_bin_array(dynamics%fer_w , unit, iostat, iomsg) + call read_bin_array(dynamics%fer_uv , unit, iostat, iomsg) + end if + !___________________________________________________________________________ read(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_opt read(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma0_visc diff --git a/src/gen_modules_diag.F90 b/src/gen_modules_diag.F90 index a2a7fed17..ddb845731 100755 --- a/src/gen_modules_diag.F90 +++ b/src/gen_modules_diag.F90 @@ -430,15 +430,15 @@ subroutine diag_densMOC(mode, dynamics, tracers, partit, mesh) real(kind=WP), save, allocatable :: std_dens_w(:,:), std_dens_VOL1(:,:), std_dens_VOL2(:,:) logical, save :: firstcall_s=.true., firstcall_e=.true. real(kind=WP), dimension(:,:), pointer :: temp, salt - real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:,:), pointer :: UV, fer_UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) - - temp=>tracers%data(1)%values(:,:) - salt=>tracers%data(2)%values(:,:) + UV => dynamics%uv(:,:,:) + temp => tracers%data(1)%values(:,:) + salt => tracers%data(2)%values(:,:) + fer_UV => dynamics%fer_uv(:,:,:) if (firstcall_s) then !allocate the stuff at the first call allocate(std_dens_UVDZ(2,std_dens_N, myDim_elem2D)) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 9e53f6f1f..0b1e3c7ee 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -352,15 +352,15 @@ subroutine ini_mean_io(dynamics, tracers, partit, mesh) ! output Ferrari/GM parameterisation CASE ('bolus_u ') if (Fer_GM) then - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'bolus_u', 'GM bolus velocity U','m/s', fer_uv(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'bolus_u', 'GM bolus velocity U','m/s', dynamics%fer_uv(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('bolus_v ') if (Fer_GM) then - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'bolus_v', 'GM bolus velocity V','m/s', fer_uv(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'bolus_v', 'GM bolus velocity V','m/s', dynamics%fer_uv(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('bolus_w ') if (Fer_GM) then - call def_stream((/nl , nod2D /), (/nl, myDim_nod2D /), 'bolus_w', 'GM bolus velocity W','m/s', fer_Wvel(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream((/nl , nod2D /), (/nl, myDim_nod2D /), 'bolus_w', 'GM bolus velocity W','m/s', dynamics%fer_w(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('fer_K ') if (Fer_GM) then diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 9e6f0edeb..b97726074 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -1886,7 +1886,7 @@ end subroutine compute_hbar_ale subroutine vert_vel_ale(dynamics, partit, mesh) use g_config,only: dt, which_ALE, min_hnode, lzstar_lev, flag_warn_cflz use MOD_MESH - use o_ARRAYS, only: fer_Wvel, fer_UV, water_flux + use o_ARRAYS, only: water_flux use o_PARAM USE MOD_PARTIT USE MOD_PARSUP @@ -1908,8 +1908,8 @@ subroutine vert_vel_ale(dynamics, partit, mesh) type(t_dyn) , intent(inout), target :: dynamics type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit - real(kind=WP), dimension(:,:,:), pointer :: UV - real(kind=WP), dimension(:,:) , pointer :: Wvel, Wvel_e, Wvel_i, CFL_z + real(kind=WP), dimension(:,:,:), pointer :: UV, fer_UV + real(kind=WP), dimension(:,:) , pointer :: Wvel, Wvel_e, Wvel_i, CFL_z, fer_Wvel real(kind=WP), dimension(:) , pointer :: ssh_rhs, ssh_rhs_old real(kind=WP), dimension(:) , pointer :: eta_n, d_eta #include "associate_part_def.h" @@ -1925,7 +1925,10 @@ subroutine vert_vel_ale(dynamics, partit, mesh) ssh_rhs_old =>dynamics%ssh_rhs_old(:) eta_n =>dynamics%eta_n(:) d_eta =>dynamics%d_eta(:) - + if (Fer_GM) then + fer_UV =>dynamics%fer_uv(:,:,:) + fer_Wvel =>dynamics%fer_w(:,:) + end if !___________________________________________________________________________ ! Contributions from levels in divergence Wvel=0.0_WP @@ -2784,6 +2787,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) use solve_tracers_ale_interface use write_step_info_interface use check_blowup_interface + use fer_solve_interface IMPLICIT NONE type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit @@ -2990,7 +2994,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) if (Fer_GM) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call fer_solve_Gamma'//achar(27)//'[0m' call fer_solve_Gamma(partit, mesh) - call fer_gamma2vel(partit, mesh) + call fer_gamma2vel(dynamics, partit, mesh) end if t6=MPI_Wtime() !___________________________________________________________________________ diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 5335528d6..78a8e5e94 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -140,7 +140,6 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) use g_config use o_PARAM, only: SPP, Fer_GM - use o_arrays, only: fer_Wvel, fer_UV use mod_mesh USE MOD_PARTIT USE MOD_PARSUP @@ -158,8 +157,8 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit integer :: tr_num, node, nzmax, nzmin - real(kind=WP), dimension(:,:,:), pointer :: UV - real(kind=WP), dimension(:,:) , pointer :: Wvel, Wvel_e + real(kind=WP), dimension(:,:,:), pointer :: UV, fer_UV + real(kind=WP), dimension(:,:) , pointer :: Wvel, Wvel_e, fer_Wvel #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -168,6 +167,10 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) UV => dynamics%uv(:,:,:) Wvel => dynamics%w(:,:) Wvel_e => dynamics%w_e(:,:) + if (Fer_GM) then + fer_UV => dynamics%fer_uv(:,:,:) + fer_Wvel => dynamics%fer_w(:,:) + end if !___________________________________________________________________________ if (SPP) call cal_rejected_salt(partit, mesh) diff --git a/src/oce_fer_gm.F90 b/src/oce_fer_gm.F90 index ab12e49ae..7db79c91e 100644 --- a/src/oce_fer_gm.F90 +++ b/src/oce_fer_gm.F90 @@ -1,3 +1,35 @@ +module fer_solve_interface + interface + subroutine fer_solve_Gamma(partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + end subroutine + + subroutine fer_gamma2vel(dynamics, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_dyn) , intent(inout), target :: dynamics + end subroutine + + subroutine init_Redi_GM(partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + end subroutine + end interface +end module + + + !--------------------------------------------------------------------------- !Implementation of Gent & McWiliams parameterization after R. Ferrari et al., 2010 !Contains: @@ -127,12 +159,13 @@ END subroutine fer_solve_Gamma ! ! !==================================================================== -subroutine fer_gamma2vel(partit, mesh) +subroutine fer_gamma2vel(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN USE o_PARAM - USE o_ARRAYS, ONLY: fer_gamma, fer_uv + USE o_ARRAYS, ONLY: fer_gamma USE g_CONFIG use g_comm_auto IMPLICIT NONE @@ -140,14 +173,18 @@ subroutine fer_gamma2vel(partit, mesh) integer :: nz, nzmax, el, elnod(3), nzmin real(kind=WP) :: zinv real(kind=WP) :: onethird=1._WP/3._WP + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - + type(t_mesh) , intent(in), target :: mesh + real(kind=WP), dimension(:,:,:), pointer :: fer_UV + real(kind=WP), dimension(:,:) , pointer :: fer_Wvel #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - + fer_UV =>dynamics%fer_uv(:,:,:) + fer_Wvel =>dynamics%fer_w(:,:) + DO el=1, myDim_elem2D elnod=elem2D_nodes(:,el) ! max. number of levels at element el diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index 2173ce1d0..5f613cb82 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -254,7 +254,6 @@ MODULE o_ARRAYS !GM_stuff real(kind=WP),allocatable :: bvfreq(:,:),mixlay_dep(:),bv_ref(:) -real(kind=WP), allocatable :: fer_UV(:,:,:), fer_wvel(:,:) real(kind=WP), target, allocatable :: fer_c(:), fer_scal(:), fer_K(:,:), fer_gamma(:,:,:) real(kind=WP), allocatable :: ice_rejected_salt(:) diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 0116baf5f..6a0aa8e05 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -353,6 +353,7 @@ SUBROUTINE dynamics_init(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_DYN + USE o_param IMPLICIT NONE integer :: elem_size, node_size integer, save :: nm_unit = 104 ! unit to open namelist file, skip 100-102 for cray @@ -425,6 +426,14 @@ SUBROUTINE dynamics_init(dynamics, partit, mesh) dynamics%eta_n = 0.0_WP dynamics%d_eta = 0.0_WP dynamics%ssh_rhs = 0.0_WP + + if (Fer_GM) then + allocate(dynamics%fer_uv(2, nl-1, elem_size)) + allocate(dynamics%fer_w( nl, node_size)) + dynamics%fer_uv = 0.0_WP + dynamics%fer_w = 0.0_WP + end if + !!PS dynamics%ssh_rhs_old= 0.0_WP ! set parameters in derived type @@ -612,10 +621,7 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) if (Fer_GM) then allocate(fer_c(node_size),fer_scal(node_size), fer_gamma(2, nl, node_size), fer_K(nl, node_size)) - allocate(fer_wvel(nl, node_size), fer_UV(2, nl-1, elem_size)) fer_gamma=0.0_WP - fer_uv=0.0_WP - fer_wvel=0.0_WP fer_K=500._WP fer_c=1._WP fer_scal = 0.0_WP From 7388062d705e2c995a807374395b2af21a37a32d Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 3 Nov 2021 16:17:17 +0100 Subject: [PATCH 435/909] use 3-part FESOM main program --- src/fvom_main.F90 | 380 +--------------------------------------------- 1 file changed, 6 insertions(+), 374 deletions(-) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index a9cb7142f..a48953ed2 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -7,380 +7,12 @@ !=============================================================================! program main -USE MOD_MESH -USE MOD_TRACER -USE MOD_PARTIT -USE MOD_PARSUP -USE o_ARRAYS -USE o_PARAM -USE i_PARAM -use i_ARRAYS -use g_clock -use g_config -use g_comm_auto -use g_forcing_arrays -use io_RESTART -use io_MEANDATA -use io_mesh_info -use diagnostics -use mo_tidal -use tracer_init_interface -use ocean_setup_interface -use ice_setup_interface -use ocean2ice_interface -use oce_fluxes_interface -use update_atm_forcing_interface -use before_oce_step_interface -use oce_timestep_ale_interface -use read_mesh_interface -use fesom_version_info_module -use command_line_options_module -! Define icepack module -#if defined (__icepack) -use icedrv_main, only: set_icepack, init_icepack, alloc_icepack -#endif + use fvom_module -#if defined (__oasis) -use cpl_driver -#endif + integer nsteps -IMPLICIT NONE + call fesom_init(nsteps) + call fesom_runloop(nsteps) + call fesom_finalize -integer :: n, nsteps, offset, row, i, provided -integer, pointer :: mype, npes, MPIerr, MPI_COMM_FESOM -real(kind=WP) :: t0, t1, t2, t3, t4, t5, t6, t7, t8, t0_ice, t1_ice, t0_frc, t1_frc -real(kind=WP) :: rtime_fullice, rtime_write_restart, rtime_write_means, rtime_compute_diag, rtime_read_forcing -real(kind=real32) :: rtime_setup_mesh, rtime_setup_ocean, rtime_setup_forcing -real(kind=real32) :: rtime_setup_ice, rtime_setup_other, rtime_setup_restart -real(kind=real32) :: mean_rtime(15), max_rtime(15), min_rtime(15) -real(kind=real32) :: runtime_alltimesteps - - -type(t_mesh), target, save :: mesh -type(t_tracer), target, save :: tracers -type(t_partit), target, save :: partit - - -character(LEN=256) :: dump_dir, dump_filename -logical :: L_EXISTS -type(t_mesh), target, save :: mesh_copy -type(t_tracer), target, save :: tracers_copy - -character(LEN=MPI_MAX_LIBRARY_VERSION_STRING) :: mpi_version_txt -integer mpi_version_len - - - if(command_argument_count() > 0) then - call command_line_options%parse() - stop - end if - -#ifndef __oifs - !ECHAM6-FESOM2 coupling: cpl_oasis3mct_init is called here in order to avoid circular dependencies between modules (cpl_driver and g_PARSUP) - !OIFS-FESOM2 coupling: does not require MPI_INIT here as this is done by OASIS - call MPI_INIT_THREAD(MPI_THREAD_MULTIPLE, provided, i) -#endif - - -#if defined (__oasis) - call cpl_oasis3mct_init(partit%MPI_COMM_FESOM) -#endif - t1 = MPI_Wtime() - - call par_init(partit) - - mype =>partit%mype - MPIerr =>partit%MPIerr - MPI_COMM_FESOM=>partit%MPI_COMM_FESOM - npes =>partit%npes - if(mype==0) then - write(*,*) - print *,"FESOM2 git SHA: "//fesom_git_sha() - call MPI_Get_library_version(mpi_version_txt, mpi_version_len, MPIERR) - print *,"MPI library version: "//trim(mpi_version_txt) - print *, achar(27)//'[32m' //'____________________________________________________________'//achar(27)//'[0m' - print *, achar(27)//'[7;32m'//' --> FESOM BUILDS UP MODEL CONFIGURATION '//achar(27)//'[0m' - end if - !===================== - ! Read configuration data, - ! load the mesh and fill in - ! auxiliary mesh arrays - !===================== - call setup_model(partit) ! Read Namelists, always before clock_init - call clock_init(partit) ! read the clock file - call get_run_steps(nsteps, partit) - call mesh_setup(partit, mesh) - - if (mype==0) write(*,*) 'FESOM mesh_setup... complete' - - !===================== - ! Allocate field variables - ! and additional arrays needed for - ! fancy advection etc. - !===================== - call check_mesh_consistency(partit, mesh) - if (mype==0) t2=MPI_Wtime() - - call tracer_init(tracers, partit, mesh) ! allocate array of ocean tracers (derived type "t_tracer") - call arrays_init(tracers%num_tracers, partit, mesh) ! allocate other arrays (to be refactured same as tracers in the future) - call ocean_setup(tracers, partit, mesh) - - if (mype==0) then - write(*,*) 'FESOM ocean_setup... complete' - t3=MPI_Wtime() - endif - call forcing_setup(partit, mesh) - - if (mype==0) t4=MPI_Wtime() - if (use_ice) then - call ice_setup(tracers, partit, mesh) - ice_steps_since_upd = ice_ave_steps-1 - ice_update=.true. - if (mype==0) write(*,*) 'EVP scheme option=', whichEVP - endif - if (mype==0) t5=MPI_Wtime() - call compute_diagnostics(0, tracers, partit, mesh) ! allocate arrays for diagnostic -#if defined (__oasis) - call cpl_oasis3mct_define_unstr(partit, mesh) - if(mype==0) write(*,*) 'FESOM ----> cpl_oasis3mct_define_unstr nsend, nrecv:',nsend, nrecv -#endif - -#if defined (__icepack) - !===================== - ! Setup icepack - !===================== - if (mype==0) write(*,*) 'Icepack: reading namelists from namelist.icepack' - call set_icepack(partit) - call alloc_icepack - call init_icepack(tracers%data(1), mesh) - if (mype==0) write(*,*) 'Icepack: setup complete' -#endif - call clock_newyear ! check if it is a new year - if (mype==0) t6=MPI_Wtime() - !___CREATE NEW RESTART FILE IF APPLICABLE___________________________________ - ! The interface to the restart module is made via call restart ! - ! The inputs are: istep, l_write, l_create - ! if istep is not zero it will be decided whether restart shall be written - ! if l_write is TRUE the restart will be forced - ! if l_read the restart will be read - ! as an example, for reading restart one does: call restart(0, .false., .false., .true., tracers, partit, mesh) - call restart(0, .false., r_restart, tracers, partit, mesh) ! istep, l_write, l_read - if (mype==0) t7=MPI_Wtime() - ! store grid information into netcdf file - if (.not. r_restart) call write_mesh_info(partit, mesh) - - !___IF RESTART WITH ZLEVEL OR ZSTAR IS DONE, ALSO THE ACTUAL LEVELS AND ____ - !___MIDDEPTH LEVELS NEEDS TO BE CALCULATET AT RESTART_______________________ - if (r_restart) then - call restart_thickness_ale(partit, mesh) - end if - if (mype==0) then - t8=MPI_Wtime() - - rtime_setup_mesh = real( t2 - t1 ,real32) - rtime_setup_ocean = real( t3 - t2 ,real32) - rtime_setup_forcing = real( t4 - t3 ,real32) - rtime_setup_ice = real( t5 - t4 ,real32) - rtime_setup_restart = real( t7 - t6 ,real32) - rtime_setup_other = real((t8 - t7) + (t6 - t5) ,real32) - - write(*,*) '==========================================' - write(*,*) 'MODEL SETUP took on mype=0 [seconds] ' - write(*,*) 'runtime setup total ',real(t8-t1,real32) - write(*,*) ' > runtime setup mesh ',rtime_setup_mesh - write(*,*) ' > runtime setup ocean ',rtime_setup_ocean - write(*,*) ' > runtime setup forcing ',rtime_setup_forcing - write(*,*) ' > runtime setup ice ',rtime_setup_ice - write(*,*) ' > runtime setup restart ',rtime_setup_restart - write(*,*) ' > runtime setup other ',rtime_setup_other - write(*,*) '============================================' - endif - - DUMP_DIR='DUMP/' - INQUIRE(file=trim(dump_dir), EXIST=L_EXISTS) - if (.not. L_EXISTS) call system('mkdir '//trim(dump_dir)) - - write (dump_filename, "(A7,I7.7)") "t_mesh.", mype - open (mype+300, file=TRIM(DUMP_DIR)//trim(dump_filename), status='replace', form="unformatted") - write (mype+300) mesh - close (mype+300) - -! open (mype+300, file=trim(dump_filename), status='old', form="unformatted") -! read (mype+300) mesh_copy -! close (mype+300) - - write (dump_filename, "(A9,I7.7)") "t_tracer.", mype - open (mype+300, file=TRIM(DUMP_DIR)//trim(dump_filename), status='replace', form="unformatted") - write (mype+300) tracers - close (mype+300) - -! open (mype+300, file=trim(dump_filename), status='old', form="unformatted") -! read (mype+300) tracers_copy -! close (mype+300) - -!call par_ex(partit%MPI_COMM_FESOM, partit%mype) -!stop -! -! if (mype==10) write(,) mesh1%ssh_stiff%values-mesh%ssh_stiff%value - - !===================== - ! Time stepping - !===================== - -! Initialize timers - rtime_fullice = 0._WP - rtime_write_restart = 0._WP - rtime_write_means = 0._WP - rtime_compute_diag = 0._WP - rtime_read_forcing = 0._WP - - if (mype==0) write(*,*) 'FESOM start iteration before the barrier...' - call MPI_Barrier(MPI_COMM_FESOM, MPIERR) - - if (mype==0) then - write(*,*) 'FESOM start iteration after the barrier...' - t0 = MPI_Wtime() - endif - if(mype==0) then - write(*,*) - print *, achar(27)//'[32m' //'____________________________________________________________'//achar(27)//'[0m' - print *, achar(27)//'[7;32m'//' --> FESOM STARTS TIME LOOP '//achar(27)//'[0m' - end if - !___MODEL TIME STEPPING LOOP________________________________________________ - if (use_global_tides) then - call foreph_ini(yearnew, month, partit) - end if - do n=1, nsteps - if (use_global_tides) then - call foreph(partit, mesh) - end if - mstep = n - if (mod(n,logfile_outfreq)==0 .and. mype==0) then - write(*,*) 'FESOM =======================================================' -! write(*,*) 'FESOM step:',n,' day:', n*dt/24./3600., - write(*,*) 'FESOM step:',n,' day:', daynew,' year:',yearnew - write(*,*) - end if -#if defined (__oifs) || defined (__oasis) - seconds_til_now=INT(dt)*(n-1) -#endif - call clock - !___compute horizontal velocity on nodes (originaly on elements)________ - call compute_vel_nodes(partit, mesh) - !___model sea-ice step__________________________________________________ - t1 = MPI_Wtime() - if(use_ice) then - !___compute fluxes from ocean to ice________________________________ - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call ocean2ice(n)'//achar(27)//'[0m' - call ocean2ice(tracers, partit, mesh) - - !___compute update of atmospheric forcing____________________________ - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call update_atm_forcing(n)'//achar(27)//'[0m' - t0_frc = MPI_Wtime() - call update_atm_forcing(n, tracers, partit, mesh) - t1_frc = MPI_Wtime() - !___compute ice step________________________________________________ - if (ice_steps_since_upd>=ice_ave_steps-1) then - ice_update=.true. - ice_steps_since_upd = 0 - else - ice_update=.false. - ice_steps_since_upd=ice_steps_since_upd+1 - endif - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call ice_timestep(n)'//achar(27)//'[0m' - if (ice_update) call ice_timestep(n, partit, mesh) - !___compute fluxes to the ocean: heat, freshwater, momentum_________ - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call oce_fluxes_mom...'//achar(27)//'[0m' - call oce_fluxes_mom(partit, mesh) ! momentum only - call oce_fluxes(tracers, partit, mesh) - end if - call before_oce_step(tracers, partit, mesh) ! prepare the things if required - t2 = MPI_Wtime() - !___model ocean step____________________________________________________ - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call oce_timestep_ale'//achar(27)//'[0m' - - call oce_timestep_ale(n, tracers, partit, mesh) - - t3 = MPI_Wtime() - !___compute energy diagnostics..._______________________________________ - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call compute_diagnostics(1)'//achar(27)//'[0m' - call compute_diagnostics(1, tracers, partit, mesh) - - t4 = MPI_Wtime() - !___prepare output______________________________________________________ - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call output (n)'//achar(27)//'[0m' - call output (n, tracers, partit, mesh) - - t5 = MPI_Wtime() - call restart(n, .false., .false., tracers, partit, mesh) - t6 = MPI_Wtime() - - rtime_fullice = rtime_fullice + t2 - t1 - rtime_compute_diag = rtime_compute_diag + t4 - t3 - rtime_write_means = rtime_write_means + t5 - t4 - rtime_write_restart = rtime_write_restart + t6 - t5 - rtime_read_forcing = rtime_read_forcing + t1_frc - t0_frc - end do - - call finalize_output() - - !___FINISH MODEL RUN________________________________________________________ - - call MPI_Barrier(MPI_COMM_FESOM, MPIERR) - if (mype==0) then - t1 = MPI_Wtime() - runtime_alltimesteps = real(t1-t0,real32) - write(*,*) 'FESOM Run is finished, updating clock' - endif - - mean_rtime(1) = rtime_oce - mean_rtime(2) = rtime_oce_mixpres - mean_rtime(3) = rtime_oce_dyn - mean_rtime(4) = rtime_oce_dynssh - mean_rtime(5) = rtime_oce_solvessh - mean_rtime(6) = rtime_oce_GMRedi - mean_rtime(7) = rtime_oce_solvetra - mean_rtime(8) = rtime_ice - mean_rtime(9) = rtime_tot - mean_rtime(10) = rtime_fullice - rtime_read_forcing - mean_rtime(11) = rtime_compute_diag - mean_rtime(12) = rtime_write_means - mean_rtime(13) = rtime_write_restart - mean_rtime(14) = rtime_read_forcing - - max_rtime(1:14) = mean_rtime(1:14) - min_rtime(1:14) = mean_rtime(1:14) - - call MPI_AllREDUCE(MPI_IN_PLACE, mean_rtime, 14, MPI_REAL, MPI_SUM, MPI_COMM_FESOM, MPIerr) - mean_rtime(1:14) = mean_rtime(1:14) / real(npes,real32) - call MPI_AllREDUCE(MPI_IN_PLACE, max_rtime, 14, MPI_REAL, MPI_MAX, MPI_COMM_FESOM, MPIerr) - call MPI_AllREDUCE(MPI_IN_PLACE, min_rtime, 14, MPI_REAL, MPI_MIN, MPI_COMM_FESOM, MPIerr) - - if (mype==0) then - write(*,*) '___MODEL RUNTIME mean, min, max per task [seconds]________________________' - write(*,*) ' runtime ocean:',mean_rtime(1), min_rtime(1), max_rtime(1) - write(*,*) ' > runtime oce. mix,pres. :',mean_rtime(2), min_rtime(2), max_rtime(2) - write(*,*) ' > runtime oce. dyn. u,v,w:',mean_rtime(3), min_rtime(3), max_rtime(3) - write(*,*) ' > runtime oce. dyn. ssh :',mean_rtime(4), min_rtime(4), max_rtime(4) - write(*,*) ' > runtime oce. solve ssh:',mean_rtime(5), min_rtime(5), max_rtime(5) - write(*,*) ' > runtime oce. GM/Redi :',mean_rtime(6), min_rtime(6), max_rtime(6) - write(*,*) ' > runtime oce. tracer :',mean_rtime(7), min_rtime(7), max_rtime(7) - write(*,*) ' runtime ice :',mean_rtime(10), min_rtime(10), max_rtime(10) - write(*,*) ' > runtime ice step :',mean_rtime(8), min_rtime(8), max_rtime(8) - write(*,*) ' runtime diag: ', mean_rtime(11), min_rtime(11), max_rtime(11) - write(*,*) ' runtime output: ', mean_rtime(12), min_rtime(12), max_rtime(12) - write(*,*) ' runtime restart:', mean_rtime(13), min_rtime(13), max_rtime(13) - write(*,*) ' runtime forcing:', mean_rtime(14), min_rtime(14), max_rtime(14) - write(*,*) ' runtime total (ice+oce):',mean_rtime(9), min_rtime(9), max_rtime(9) - write(*,*) - write(*,*) '============================================' - write(*,*) '=========== BENCHMARK RUNTIME ==============' - write(*,*) ' Number of cores : ',npes - write(*,*) ' Runtime for all timesteps : ',runtime_alltimesteps,' sec' - write(*,*) '============================================' - write(*,*) - end if -! call clock_finish - call par_ex(partit%MPI_COMM_FESOM, partit%mype) -end program main - +end program From 446b247f7f07138b506fdcf964fa18bca7caa3ae Mon Sep 17 00:00:00 2001 From: a270042 Date: Wed, 3 Nov 2021 16:25:30 +0100 Subject: [PATCH 436/909] exchange Unode_rhs with dynamics derived type --- src/oce_ale_vel_rhs.F90 | 50 ++++++++++++++++++++--------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index cabaa7fe0..71e0487c2 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -204,17 +204,17 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) integer :: nl1, nl2, ul1, ul2, nod(2), el, ed, k, nle, ule real(kind=WP) :: un1(1:mesh%nl-1), un2(1:mesh%nl-1) real(kind=WP) :: wu(1:mesh%nl), wv(1:mesh%nl) -real(kind=WP) :: Unode_rhs(2,mesh%nl-1,partit%myDim_nod2d+partit%eDim_nod2D) -real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhsAB +real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhsAB, UVnode_rhs real(kind=WP), dimension(:,:), pointer :: Wvel_e #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV =>dynamics%uv(:,:,:) - UV_rhsAB=>dynamics%uv_rhsAB(:,:,:) - Wvel_e =>dynamics%w_e(:,:) + UV =>dynamics%uv(:,:,:) + UV_rhsAB =>dynamics%uv_rhsAB(:,:,:) + UVnode_rhs=>dynamics%uvnode_rhs(:,:,:) + Wvel_e =>dynamics%w_e(:,:) !___________________________________________________________________________ ! 1st. compute vertical momentum advection component: w * du/dz, w*dv/dz @@ -261,15 +261,15 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) !!PS if (ul1>1) write(*,*) mype, wu(ul1:nl1) ! Here 1/3 because 1/3 of the area is related to the node --> comes from ! averaging the elemental velocities - Unode_rhs(1,nz,n) = - (wu(nz) - wu(nz+1) ) / (3._WP*hnode(nz,n)) - Unode_rhs(2,nz,n) = - (wv(nz) - wv(nz+1) ) / (3._WP*hnode(nz,n)) + UVnode_rhs(1,nz,n) = - (wu(nz) - wu(nz+1) ) / (3._WP*hnode(nz,n)) + UVnode_rhs(2,nz,n) = - (wv(nz) - wv(nz+1) ) / (3._WP*hnode(nz,n)) enddo !_______________________________________________________________________ ! To get a clean checksum, set the remaining values to zero - Unode_rhs(1:2,nl1+1:nl-1,n) = 0._WP - Unode_rhs(1:2,1:ul1-1 ,n) = 0._WP + UVnode_rhs(1:2,nl1+1:nl-1,n) = 0._WP + UVnode_rhs(1:2,1:ul1-1 ,n) = 0._WP end do @@ -327,8 +327,8 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) if (nod(1) <= myDim_nod2d) then do nz=min(ul1,ul2), max(nl1,nl2) ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) - Unode_rhs(1,nz,nod(1)) = Unode_rhs(1,nz,nod(1)) + un1(nz)*UV(1,nz,el1) + un2(nz)*UV(1,nz,el2) - Unode_rhs(2,nz,nod(1)) = Unode_rhs(2,nz,nod(1)) + un1(nz)*UV(2,nz,el1) + un2(nz)*UV(2,nz,el2) + UVnode_rhs(1,nz,nod(1)) = UVnode_rhs(1,nz,nod(1)) + un1(nz)*UV(1,nz,el1) + un2(nz)*UV(1,nz,el2) + UVnode_rhs(2,nz,nod(1)) = UVnode_rhs(2,nz,nod(1)) + un1(nz)*UV(2,nz,el1) + un2(nz)*UV(2,nz,el2) end do endif @@ -336,8 +336,8 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) if (nod(2) <= myDim_nod2d) then do nz=min(ul1,ul2), max(nl1,nl2) ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) - Unode_rhs(1,nz,nod(2)) = Unode_rhs(1,nz,nod(2)) - un1(nz)*UV(1,nz,el1) - un2(nz)*UV(1,nz,el2) - Unode_rhs(2,nz,nod(2)) = Unode_rhs(2,nz,nod(2)) - un1(nz)*UV(2,nz,el1) - un2(nz)*UV(2,nz,el2) + UVnode_rhs(1,nz,nod(2)) = UVnode_rhs(1,nz,nod(2)) - un1(nz)*UV(1,nz,el1) - un2(nz)*UV(1,nz,el2) + UVnode_rhs(2,nz,nod(2)) = UVnode_rhs(2,nz,nod(2)) - un1(nz)*UV(2,nz,el1) - un2(nz)*UV(2,nz,el2) end do endif @@ -346,8 +346,8 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) if (nod(1) <= myDim_nod2d) then do nz=ul1, nl1 ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) - Unode_rhs(1,nz,nod(1)) = Unode_rhs(1,nz,nod(1)) + un1(nz)*UV(1,nz,el1) - Unode_rhs(2,nz,nod(1)) = Unode_rhs(2,nz,nod(1)) + un1(nz)*UV(2,nz,el1) + UVnode_rhs(1,nz,nod(1)) = UVnode_rhs(1,nz,nod(1)) + un1(nz)*UV(1,nz,el1) + UVnode_rhs(2,nz,nod(1)) = UVnode_rhs(2,nz,nod(1)) + un1(nz)*UV(2,nz,el1) end do ! --> do nz=ul1, nl1 endif @@ -356,8 +356,8 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) !!PS do nz=1, nl1 do nz=ul1, nl1 ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) - Unode_rhs(1,nz,nod(2)) = Unode_rhs(1,nz,nod(2)) - un1(nz)*UV(1,nz,el1) - Unode_rhs(2,nz,nod(2)) = Unode_rhs(2,nz,nod(2)) - un1(nz)*UV(2,nz,el1) + UVnode_rhs(1,nz,nod(2)) = UVnode_rhs(1,nz,nod(2)) - un1(nz)*UV(1,nz,el1) + UVnode_rhs(2,nz,nod(2)) = UVnode_rhs(2,nz,nod(2)) - un1(nz)*UV(2,nz,el1) end do ! --> do nz=ul1, nl1 endif endif ! --> if (el2>0) then @@ -368,14 +368,14 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) do n=1,myDim_nod2d nl1 = nlevels_nod2D(n)-1 ul1 = ulevels_nod2D(n) -!!PS Unode_rhs(1,ul1:nl1,n) = Unode_rhs(1,ul1:nl1,n) *area_inv(ul1:nl1,n) ! --> TEST_cavity -!!PS Unode_rhs(2,ul1:nl1,n) = Unode_rhs(2,ul1:nl1,n) *area_inv(ul1:nl1,n) ! --> TEST_cavity - Unode_rhs(1,ul1:nl1,n) = Unode_rhs(1,ul1:nl1,n) *areasvol_inv(ul1:nl1,n) - Unode_rhs(2,ul1:nl1,n) = Unode_rhs(2,ul1:nl1,n) *areasvol_inv(ul1:nl1,n) +!!PS UVnode_rhs(1,ul1:nl1,n) = UVnode_rhs(1,ul1:nl1,n) *area_inv(ul1:nl1,n) ! --> TEST_cavity +!!PS UVnode_rhs(2,ul1:nl1,n) = UVnode_rhs(2,ul1:nl1,n) *area_inv(ul1:nl1,n) ! --> TEST_cavity + UVnode_rhs(1,ul1:nl1,n) = UVnode_rhs(1,ul1:nl1,n) *areasvol_inv(ul1:nl1,n) + UVnode_rhs(2,ul1:nl1,n) = UVnode_rhs(2,ul1:nl1,n) *areasvol_inv(ul1:nl1,n) end do !-->do n=1,myDim_nod2d !___________________________________________________________________________ - call exchange_nod(Unode_rhs, partit) + call exchange_nod(UVnode_rhs, partit) !___________________________________________________________________________ ! convert total nodal advection from vertice --> elements @@ -383,9 +383,9 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) nl1 = nlevels(el)-1 ul1 = ulevels(el) UV_rhsAB(1:2,ul1:nl1,el) = UV_rhsAB(1:2,ul1:nl1,el) & - + elem_area(el)*(Unode_rhs(1:2,ul1:nl1,elem2D_nodes(1,el)) & - + Unode_rhs(1:2,ul1:nl1,elem2D_nodes(2,el)) & - + Unode_rhs(1:2,ul1:nl1,elem2D_nodes(3,el))) / 3.0_WP + + elem_area(el)*(UVnode_rhs(1:2,ul1:nl1,elem2D_nodes(1,el)) & + + UVnode_rhs(1:2,ul1:nl1,elem2D_nodes(2,el)) & + + UVnode_rhs(1:2,ul1:nl1,elem2D_nodes(3,el))) / 3.0_WP end do ! --> do el=1, myDim_elem2D end subroutine momentum_adv_scalar From 4bef7606f30055f79e8acd2000001f2e83c3b56e Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 3 Nov 2021 17:14:12 +0100 Subject: [PATCH 437/909] compile FESOM with oenmp support --- src/CMakeLists.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index fd13d5d2e..9fbc8a8dc 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -4,6 +4,8 @@ project(fesom C Fortran) option(DISABLE_MULTITHREADING "disable asynchronous operations" OFF) +find_package(OpenMP REQUIRED) + # get our source files set(src_home ${CMAKE_CURRENT_LIST_DIR}) # path to src directory starting from the dir containing our CMakeLists.txt if(${USE_ICEPACK}) @@ -84,6 +86,7 @@ target_link_libraries(${PROJECT_NAME} ${PROJECT_NAME}_C ${NETCDF_Fortran_LIBRARI target_link_libraries(${PROJECT_NAME} ${PROJECT_NAME}_C ${MCT_Fortran_LIBRARIES} ${MPEU_Fortran_LIBRARIES} ${SCRIP_Fortran_LIBRARIES}) target_link_libraries(${PROJECT_NAME} async_threads_cpp) set_target_properties(${PROJECT_NAME} PROPERTIES LINKER_LANGUAGE Fortran) +target_link_libraries(${PROJECT_NAME} OpenMP::OpenMP_Fortran) set(FESOM_INSTALL_FILEPATH "${CMAKE_CURRENT_LIST_DIR}/../bin/fesom.x" CACHE FILEPATH "file path where the FESOM binary should be put") get_filename_component(FESOM_INSTALL_PATH ${FESOM_INSTALL_FILEPATH} DIRECTORY) From 6d89159f6274cb370231c5c84e00737ea769e999 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 3 Nov 2021 17:20:13 +0100 Subject: [PATCH 438/909] be able to switch building with OpenMP on and off --- src/CMakeLists.txt | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 9fbc8a8dc..9500ef1ea 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -4,7 +4,11 @@ project(fesom C Fortran) option(DISABLE_MULTITHREADING "disable asynchronous operations" OFF) -find_package(OpenMP REQUIRED) +option(ENABLE_OPENMP "build FESOM with OpenMP" OFF) +if(${ENABLE_OPENMP}) + find_package(OpenMP REQUIRED) +endif() + # get our source files set(src_home ${CMAKE_CURRENT_LIST_DIR}) # path to src directory starting from the dir containing our CMakeLists.txt @@ -86,7 +90,9 @@ target_link_libraries(${PROJECT_NAME} ${PROJECT_NAME}_C ${NETCDF_Fortran_LIBRARI target_link_libraries(${PROJECT_NAME} ${PROJECT_NAME}_C ${MCT_Fortran_LIBRARIES} ${MPEU_Fortran_LIBRARIES} ${SCRIP_Fortran_LIBRARIES}) target_link_libraries(${PROJECT_NAME} async_threads_cpp) set_target_properties(${PROJECT_NAME} PROPERTIES LINKER_LANGUAGE Fortran) -target_link_libraries(${PROJECT_NAME} OpenMP::OpenMP_Fortran) +if(${ENABLE_OPENMP}) + target_link_libraries(${PROJECT_NAME} OpenMP::OpenMP_Fortran) +endif() set(FESOM_INSTALL_FILEPATH "${CMAKE_CURRENT_LIST_DIR}/../bin/fesom.x" CACHE FILEPATH "file path where the FESOM binary should be put") get_filename_component(FESOM_INSTALL_PATH ${FESOM_INSTALL_FILEPATH} DIRECTORY) From 4913a463891a77e92cb4d5ef483b432e60e845c4 Mon Sep 17 00:00:00 2001 From: a270042 Date: Wed, 3 Nov 2021 17:20:48 +0100 Subject: [PATCH 439/909] exchange flags of solver with flags from dynamics drived type --- src/MOD_DYN.F90 | 16 ++-- src/oce_ale.F90 | 215 ++++++++++++++++++------------------------------ 2 files changed, 88 insertions(+), 143 deletions(-) diff --git a/src/MOD_DYN.F90 b/src/MOD_DYN.F90 index ddaad66a8..39bf61d0f 100644 --- a/src/MOD_DYN.F90 +++ b/src/MOD_DYN.F90 @@ -11,14 +11,14 @@ MODULE MOD_DYN ! !_______________________________________________________________________________ TYPE T_solverinfo - integer :: maxiter=2000 - integer :: restarts=15 - integer :: fillin=3 - integer :: lutype=2 - integer :: nrhs=1 - real(kind=WP) :: droptol=1.e-7 - real(kind=WP) :: soltol =1e-10 !1.e-10 - logical :: lfirst=.true. + integer :: ident = 1 + integer :: maxiter = 2000 + integer :: restart = 15 + integer :: fillin = 3 + integer :: lutype = 2 + real(kind=WP) :: droptol=1.e-8 + real(kind=WP) :: soltol =1e-10 !1.e-10 + END TYPE T_solverinfo ! diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index b97726074..88a751677 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2423,153 +2423,98 @@ end subroutine vert_vel_ale ! solve eq.18 in S. Danilov et al. : FESOM2: from finite elements to finite volumes. ! for (eta^(n+1)-eta^n) = d_eta subroutine solve_ssh_ale(dynamics, partit, mesh) -use o_PARAM -use MOD_MESH -use o_ARRAYS -USE MOD_PARTIT -USE MOD_PARSUP -USE MOD_DYN -use g_comm_auto -use g_config, only: which_ale - ! - ! - !___USE PETSC SOLVER________________________________________________________ - ! this is not longer used but is still kept in the code -#ifdef PETSC -implicit none -#include "petscf.h" -integer :: myrows -integer :: Pmode -real(kind=WP) :: rinfo(20,20) -integer :: maxiter=2000 -integer :: restarts=15 -integer :: fillin=3 -integer :: lutype=2 -integer :: nrhs=1 -real(kind=WP) :: droptol=1.e-7 -real(kind=WP) :: soltol =1e-10 !1.e-10 -logical, save :: lfirst=.true. -real(kind=WP), allocatable :: arr_nod2D(:),arr_nod2D2(:,:),arr_nod2D3(:) -real(kind=WP) :: cssh1,cssh2,crhs -integer :: i -type(t_mesh) , intent(inout), target :: mesh -type(t_partit), intent(inout), target :: partit -type(t_dyn) , intent(inout), target :: dynamics -!!PS real(kind=WP), dimension(:), pointer :: ssh_rhs - -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" -!!PS ssh_rhs=>dynamics%ssh_rhs(:) - -Pmode = PET_BLOCKP+PET_SOLVE + PET_BICGSTAB +PET_REPORT + PET_QUIET+ PET_RCM+PET_PCBJ -if (lfirst) then - Pmode = Pmode+PET_STRUCT+PET_PMVALS + PET_PCASM+PET_OVL_2 !+PET_PCBJ+PET_ILU - lfirst=.false. -end if -call PETSC_S(Pmode, 1, ssh_stiff%dim, ssh_stiff%nza, myrows, & - maxiter, & - restarts, & - fillin, & - droptol, & - soltol, & - part, ssh_stiff%rowptr, ssh_stiff%colind, ssh_stiff%values, & - dynamics%ssh_rhs, d_eta, & - rinfo, MPI_COMM_FESOM, mesh) - ! - ! - !___USE PARMS SOLVER (recommended)__________________________________________ -#elif defined(PARMS) - - use iso_c_binding, only: C_INT, C_DOUBLE - implicit none + use o_PARAM + use MOD_MESH + use o_ARRAYS + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + use g_comm_auto + use g_config, only: which_ale + use iso_c_binding, only: C_INT, C_DOUBLE + implicit none #include "fparms.h" -logical, save :: lfirst=.true. -integer(kind=C_INT) :: ident -integer(kind=C_INT) :: n3, reuse, new_values -integer(kind=C_INT) :: maxiter, restart, lutype, fillin -real(kind=C_DOUBLE) :: droptol, soltol -integer :: n -type(t_mesh) , intent(inout), target :: mesh -type(t_partit), intent(inout), target :: partit -type(t_dyn) , intent(inout), target :: dynamics - - -interface - subroutine psolver_init(ident, SOL, PCGLOB, PCLOC, lutype, & - fillin, droptol, maxiter, restart, soltol, & - part, rowptr, colind, values, reuse, MPI_COMM) bind(C) - use iso_c_binding, only: C_INT, C_DOUBLE - integer(kind=C_INT) :: ident, SOL, PCGLOB, PCLOC, lutype, & - fillin, maxiter, restart, & - part(*), rowptr(*), colind(*), reuse, MPI_COMM - real(kind=C_DOUBLE) :: droptol, soltol, values(*) - end subroutine psolver_init -end interface -interface - subroutine psolve(ident, ssh_rhs, values, d_eta, newvalues) bind(C) - - use iso_c_binding, only: C_INT, C_DOUBLE - integer(kind=C_INT) :: ident, newvalues - real(kind=C_DOUBLE) :: values(*), ssh_rhs(*), d_eta(*) - - end subroutine psolve -end interface + logical, save :: lfirst=.true. + integer(kind=C_INT) :: n3, reuse, new_values + integer :: n + type(t_mesh) , intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_dyn) , intent(inout), target :: dynamics + real(kind=C_DOUBLE), pointer :: droptol, soltol + integer(kind=C_INT), pointer :: maxiter, restart, lutype, fillin, ident + + interface + subroutine psolver_init(ident, SOL, PCGLOB, PCLOC, lutype, & + fillin, droptol, maxiter, restart, soltol, & + part, rowptr, colind, values, reuse, MPI_COMM) bind(C) + use iso_c_binding, only: C_INT, C_DOUBLE + integer(kind=C_INT) :: ident, SOL, PCGLOB, PCLOC, lutype, & + fillin, maxiter, restart, & + part(*), rowptr(*), colind(*), reuse, MPI_COMM + real(kind=C_DOUBLE) :: droptol, soltol, values(*) + end subroutine psolver_init + end interface + interface + subroutine psolve(ident, ssh_rhs, values, d_eta, newvalues) bind(C) + use iso_c_binding, only: C_INT, C_DOUBLE + integer(kind=C_INT) :: ident, newvalues + real(kind=C_DOUBLE) :: values(*), ssh_rhs(*), d_eta(*) + end subroutine psolve + end interface #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + ident => dynamics%solverinfo%ident + maxiter => dynamics%solverinfo%maxiter + restart => dynamics%solverinfo%restart + lutype => dynamics%solverinfo%lutype + fillin => dynamics%solverinfo%fillin + droptol => dynamics%solverinfo%droptol + soltol => dynamics%solverinfo%soltol + + if (trim(which_ale)=='linfs') then + reuse=0 + new_values=0 + else + reuse=1 ! For varying coefficients, set reuse=1 + new_values=1 !PS 1 ! and new_values=1, as soon as the coefficients have changed + end if -ident=1 -maxiter=2000 -restart=15 -fillin=3 -lutype=2 -droptol=1.e-8 -soltol=1.e-10 - -if (trim(which_ale)=='linfs') then - reuse=0 - new_values=0 -else - reuse=1 ! For varying coefficients, set reuse=1 - new_values=1 !PS 1 ! and new_values=1, as soon as the coefficients have changed -end if - -! reuse=0: matrix remains static -! reuse=1: keeps a copy of the matrix structure to apply scaling of the matrix fast - -! new_values=0: matrix coefficients unchanged (compared to the last call of psolve) -! new_values=1: replaces the matrix values (keeps the structure and the preconditioner) -! new_values=2: replaces the matrix values and recomputes the preconditioner (keeps the structure) - -! new_values>0 requires reuse=1 in psolver_init! - -if (lfirst) then - ! Set SOLCG for CG solver (symmetric, positiv definit matrices only, no precond available!!) - ! SOLBICGS for BiCGstab solver (arbitrary matrices) - ! SOLBICGS_RAS for BiCGstab solver (arbitrary matrices) with integrated RAS - the global - ! preconditioner setting is ignored! It saves a 4 vector copies per iteration - ! compared to SOLBICGS + PCRAS. - ! SOLPBICGS for pipelined BiCGstab solver (arbitrary matrices) - ! Should scale better than SOLBICGS, but be careful, it is still experimental. - ! SOLPBICGS_RAS is SOLPBICGS with integrated RAS (global preconditioner setting is ignored!) - ! for even better scalability, well, in the end, it does not matter much. - call psolver_init(ident, SOLBICGS_RAS, PCRAS, PCILUK, lutype, & - fillin, droptol, maxiter, restart, soltol, & - part-1, ssh_stiff%rowptr(:)-ssh_stiff%rowptr(1), & - ssh_stiff%colind-1, ssh_stiff%values, reuse, MPI_COMM_FESOM) - lfirst=.false. -end if + ! reuse=0: matrix remains static + ! reuse=1: keeps a copy of the matrix structure to apply scaling of the matrix fast + + ! new_values=0: matrix coefficients unchanged (compared to the last call of psolve) + ! new_values=1: replaces the matrix values (keeps the structure and the preconditioner) + ! new_values=2: replaces the matrix values and recomputes the preconditioner (keeps the structure) + + ! new_values>0 requires reuse=1 in psolver_init! + + if (lfirst) then + ! Set SOLCG for CG solver (symmetric, positiv definit matrices only, no precond available!!) + ! SOLBICGS for BiCGstab solver (arbitrary matrices) + ! SOLBICGS_RAS for BiCGstab solver (arbitrary matrices) with integrated RAS - the global + ! preconditioner setting is ignored! It saves a 4 vector copies per iteration + ! compared to SOLBICGS + PCRAS. + ! SOLPBICGS for pipelined BiCGstab solver (arbitrary matrices) + ! Should scale better than SOLBICGS, but be careful, it is still experimental. + ! SOLPBICGS_RAS is SOLPBICGS with integrated RAS (global preconditioner setting is ignored!) + ! for even better scalability, well, in the end, it does not matter much. + call psolver_init(ident, SOLBICGS_RAS, PCRAS, PCILUK, lutype, & + fillin, droptol, maxiter, restart, soltol, & + part-1, ssh_stiff%rowptr(:)-ssh_stiff%rowptr(1), & + ssh_stiff%colind-1, ssh_stiff%values, reuse, MPI_COMM_FESOM) + lfirst=.false. + end if + call psolve(ident, dynamics%ssh_rhs, ssh_stiff%values, dynamics%d_eta, new_values) -#endif + ! ! !___________________________________________________________________________ -call exchange_nod(dynamics%d_eta, partit) !is this required after calling psolve ? + call exchange_nod(dynamics%d_eta, partit) !is this required after calling psolve ? end subroutine solve_ssh_ale ! From 454e28da8823ac158e3b299d8c15575b9bdb9eb7 Mon Sep 17 00:00:00 2001 From: a270042 Date: Wed, 3 Nov 2021 17:30:12 +0100 Subject: [PATCH 440/909] update layout --- src/MOD_DYN.F90 | 5 ++--- src/oce_ale.F90 | 7 ++++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/MOD_DYN.F90 b/src/MOD_DYN.F90 index 39bf61d0f..a17fd0651 100644 --- a/src/MOD_DYN.F90 +++ b/src/MOD_DYN.F90 @@ -16,9 +16,8 @@ MODULE MOD_DYN integer :: restart = 15 integer :: fillin = 3 integer :: lutype = 2 - real(kind=WP) :: droptol=1.e-8 - real(kind=WP) :: soltol =1e-10 !1.e-10 - + real(kind=WP) :: droptol = 1.e-8 + real(kind=WP) :: soltol = 1e-10 !1.e-10 END TYPE T_solverinfo ! diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 88a751677..693bc5047 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2491,6 +2491,8 @@ end subroutine psolve ! new_values>0 requires reuse=1 in psolver_init! + ! + !___________________________________________________________________________ if (lfirst) then ! Set SOLCG for CG solver (symmetric, positiv definit matrices only, no precond available!!) ! SOLBICGS for BiCGstab solver (arbitrary matrices) @@ -2507,11 +2509,10 @@ end subroutine psolve ssh_stiff%colind-1, ssh_stiff%values, reuse, MPI_COMM_FESOM) lfirst=.false. end if - + ! + !___________________________________________________________________________ call psolve(ident, dynamics%ssh_rhs, ssh_stiff%values, dynamics%d_eta, new_values) - - ! ! !___________________________________________________________________________ call exchange_nod(dynamics%d_eta, partit) !is this required after calling psolve ? From 1985e527e26f48a48f2634309464128a452560de Mon Sep 17 00:00:00 2001 From: a270042 Date: Wed, 3 Nov 2021 17:37:54 +0100 Subject: [PATCH 441/909] move subroutine relative_vorticity to oce_dyn.F90 and delete vector invariant momentum advection../src/oce_vel_rhs_vinv.F90 --- src/oce_ale.F90 | 13 +- src/oce_dyn.F90 | 119 ++++++++++++- src/oce_vel_rhs_vinv.F90 | 353 --------------------------------------- 3 files changed, 117 insertions(+), 368 deletions(-) delete mode 100755 src/oce_vel_rhs_vinv.F90 diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 693bc5047..561d5c2b2 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2862,18 +2862,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) !___________________________________________________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call compute_vel_rhs'//achar(27)//'[0m' - -!!PS if (any(UV_rhs/=UV_rhs)) write(*,*) n, mype,' --> found NaN UV_rhs before compute_vel_rhs' -!!PS if (any(UV/=UV)) write(*,*) n, mype,' --> found NaN UV before compute_vel_rhs' -!!PS if (any(ssh_rhs/=ssh_rhs)) write(*,*) n, mype,' --> found NaN ssh_rhs before compute_vel_rhs' -!!PS if (any(ssh_rhs_old/=ssh_rhs_old)) write(*,*) n, mype,' --> found NaN ssh_rhs_old before compute_vel_rhs' -!!PS if (any(abs(Wvel_e)>1.0e20)) write(*,*) n, mype,' --> found Inf Wvel_e before compute_vel_rhs' - - if(mom_adv/=3) then - call compute_vel_rhs(dynamics, partit, mesh) - else - call compute_vel_rhs_vinv(partit, mesh) - end if + call compute_vel_rhs(dynamics, partit, mesh) !___________________________________________________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call viscosity_filter'//achar(27)//'[0m' diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 8fce89659..a02c03d4a 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -153,6 +153,20 @@ subroutine uke_update(dynamics, partit, mesh) end interface end module +module relative_vorticity_interface + interface + subroutine relative_vorticity(dynamics, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + + end subroutine + end interface +end module ! =================================================================== ! Contains routines needed for computations of dynamics. @@ -582,6 +596,7 @@ SUBROUTINE h_viscosity_leith(dynamics, partit, mesh) USE o_PARAM USE g_CONFIG use g_comm_auto + use relative_vorticity_interface IMPLICIT NONE real(kind=WP) :: dz, div_elem(3), xe, ye, vi integer :: elem, nl1, nz, elnodes(3), n, k, nt, ul1 @@ -597,7 +612,7 @@ SUBROUTINE h_viscosity_leith(dynamics, partit, mesh) #include "associate_mesh_ass.h" Wvel =>dynamics%w(:,:) ! - if(mom_adv<4) call relative_vorticity(partit, mesh) !!! vorticity array should be allocated + if(mom_adv<4) call relative_vorticity(dynamics, partit, mesh) !!! vorticity array should be allocated ! Fill in viscosity: Visc = 0.0_WP DO elem=1, myDim_elem2D !! m=1, myDim_elem2D @@ -1321,7 +1336,105 @@ SUBROUTINE uke_update(dynamics, partit, mesh) deallocate(uuu) end subroutine uke_update +! +! +!_______________________________________________________________________________ +subroutine relative_vorticity(dynamics, partit, mesh) + USE o_ARRAYS, only: vorticity + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + use g_comm_auto + IMPLICIT NONE + integer :: n, nz, el(2), enodes(2), nl1, nl2, edge, ul1, ul2, nl12, ul12 + real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2, c1 + + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + real(kind=WP), dimension(:,:,:), pointer :: UV +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) -! =================================================================== - + !!PS DO n=1,myDim_nod2D + !!PS nl1 = nlevels_nod2D(n)-1 + !!PS ul1 = ulevels_nod2D(n) + !!PS vorticity(ul1:nl1,n)=0.0_WP + !!PS !!PS DO nz=1, nlevels_nod2D(n)-1 + !!PS !!PS vorticity(nz,n)=0.0_WP + !!PS !!PS END DO + !!PS END DO + vorticity(:,1:myDim_nod2D) = 0.0_WP + DO edge=1,myDim_edge2D + !! edge=myList_edge2D(m) + enodes=edges(:,edge) + el=edge_tri(:,edge) + nl1=nlevels(el(1))-1 + ul1=ulevels(el(1)) + deltaX1=edge_cross_dxdy(1,edge) + deltaY1=edge_cross_dxdy(2,edge) + nl2=0 + ul2=0 + if(el(2)>0) then + deltaX2=edge_cross_dxdy(3,edge) + deltaY2=edge_cross_dxdy(4,edge) + nl2=nlevels(el(2))-1 + ul2=ulevels(el(2)) + end if + nl12 = min(nl1,nl2) + ul12 = max(ul1,ul2) + + DO nz=ul1,ul12-1 + c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1)) + vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 + vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 + END DO + if (ul2>0) then + DO nz=ul2,ul12-1 + c1= -deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) + vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 + vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 + END DO + endif + !!PS DO nz=1,min(nl1,nl2) + DO nz=ul12,nl12 + c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1))- & + deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) + vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 + vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 + END DO + !!PS DO nz=min(nl1,nl2)+1,nl1 + DO nz=nl12+1,nl1 + c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1)) + vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 + vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 + END DO + !!PS DO nz=min(nl1,nl2)+1,nl2 + DO nz=nl12+1,nl2 + c1= -deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) + vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 + vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 + END DO + END DO + + ! vorticity = vorticity*area at this stage + ! It is correct only on myDim nodes + DO n=1,myDim_nod2D + !! n=myList_nod2D(m) + ul1 = ulevels_nod2D(n) + nl1 = nlevels_nod2D(n) + !!PS DO nz=1,nlevels_nod2D(n)-1 + DO nz=ul1,nl1-1 + vorticity(nz,n)=vorticity(nz,n)/areasvol(nz,n) + END DO + END DO + + call exchange_nod(vorticity, partit) + +! Now it the relative vorticity known on neighbors too +end subroutine relative_vorticity diff --git a/src/oce_vel_rhs_vinv.F90 b/src/oce_vel_rhs_vinv.F90 deleted file mode 100755 index 1ba0a34ee..000000000 --- a/src/oce_vel_rhs_vinv.F90 +++ /dev/null @@ -1,353 +0,0 @@ -module relative_vorticity_interface - interface - subroutine relative_vorticity(dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use MOD_DYN - type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - - end subroutine - end interface -end module - -! Vector invariant momentum advection: -! (curl u+f)\times u+grad(u^2/2)+w du/dz -! -! =================================================================== -subroutine relative_vorticity(dynamics, partit, mesh) - USE o_ARRAYS, only: vorticity - USE MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_DYN - use g_comm_auto - IMPLICIT NONE - integer :: n, nz, el(2), enodes(2), nl1, nl2, edge, ul1, ul2, nl12, ul12 - real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2, c1 - - type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - real(kind=WP), dimension(:,:,:), pointer :: UV -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) - - !!PS DO n=1,myDim_nod2D - !!PS nl1 = nlevels_nod2D(n)-1 - !!PS ul1 = ulevels_nod2D(n) - !!PS vorticity(ul1:nl1,n)=0.0_WP - !!PS !!PS DO nz=1, nlevels_nod2D(n)-1 - !!PS !!PS vorticity(nz,n)=0.0_WP - !!PS !!PS END DO - !!PS END DO - vorticity(:,1:myDim_nod2D) = 0.0_WP - DO edge=1,myDim_edge2D - !! edge=myList_edge2D(m) - enodes=edges(:,edge) - el=edge_tri(:,edge) - nl1=nlevels(el(1))-1 - ul1=ulevels(el(1)) - deltaX1=edge_cross_dxdy(1,edge) - deltaY1=edge_cross_dxdy(2,edge) - nl2=0 - ul2=0 - if(el(2)>0) then - deltaX2=edge_cross_dxdy(3,edge) - deltaY2=edge_cross_dxdy(4,edge) - nl2=nlevels(el(2))-1 - ul2=ulevels(el(2)) - end if - nl12 = min(nl1,nl2) - ul12 = max(ul1,ul2) - - DO nz=ul1,ul12-1 - c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1)) - vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 - vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 - END DO - if (ul2>0) then - DO nz=ul2,ul12-1 - c1= -deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) - vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 - vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 - END DO - endif - !!PS DO nz=1,min(nl1,nl2) - DO nz=ul12,nl12 - c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1))- & - deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) - vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 - vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 - END DO - !!PS DO nz=min(nl1,nl2)+1,nl1 - DO nz=nl12+1,nl1 - c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1)) - vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 - vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 - END DO - !!PS DO nz=min(nl1,nl2)+1,nl2 - DO nz=nl12+1,nl2 - c1= -deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) - vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 - vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 - END DO - END DO - - ! vorticity = vorticity*area at this stage - ! It is correct only on myDim nodes - DO n=1,myDim_nod2D - !! n=myList_nod2D(m) - ul1 = ulevels_nod2D(n) - nl1 = nlevels_nod2D(n) - !!PS DO nz=1,nlevels_nod2D(n)-1 - DO nz=ul1,nl1-1 - vorticity(nz,n)=vorticity(nz,n)/areasvol(nz,n) - END DO - END DO - - call exchange_nod(vorticity, partit) - -! Now it the relative vorticity known on neighbors too -end subroutine relative_vorticity -! ========================================================================== -subroutine compute_vel_rhs_vinv(dynamics, partit, mesh) !vector invariant - USE o_PARAM - USE o_ARRAYS, only: coriolis_node, hpressure, vorticity - - USE MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - use MOD_DYN - USE g_CONFIG - use g_comm_auto - use relative_vorticity_interface - IMPLICIT NONE - - type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - - integer :: n, n1, nz, elem, elnodes(3), nl1, j, nzmin,nzmax - real(kind=WP) :: a, b, c, da, db, dc, dg, ff(3), gg, eta(3), pre(3), Fx, Fy,w - real(kind=WP) :: uvert(mesh%nl,2), umean, vmean, friction - logical, save :: lfirst=.true. - real(kind=WP) :: KE_node(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) - real(kind=WP) :: dZ_inv(2:mesh%nl-1), dzbar_inv(mesh%nl-1), elem_area_inv - real(kind=WP) :: density0_inv = 1./density_0 - real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs, UV_rhsAB - real(kind=WP), dimension(:) , pointer :: eta_n -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) - UV_rhs => dynamics%uv_rhs(:,:,:) - UV_rhsAB => dynamics%uv_rhsAB(:,:,:) - eta_n =>dynamics%eta_n(:) - - w = 0.0_WP - - uvert=0.0_WP - - ! ====================== - ! Kinetic energy at nodes: - ! ====================== - - - KE_node(:,:)=0.0_WP - - DO elem=1, myDim_elem2D - !! elem=myList_elem2D(m) - elnodes=elem2D_nodes(:,elem) - nzmin = ulevels(elem) - nzmax = nlevels(elem) - DO j=1,3 !NR interchange loops => nz-loop vectorizes - !!PS DO nz=1,nlevels(elem)-1 - DO nz=nzmin,nzmax-1 - KE_node(nz,elnodes(j)) = KE_node(nz,elnodes(j))+(UV(1,nz,elem)*UV(1,nz,elem) & - +UV(2,nz,elem)*UV(2,nz,elem))*elem_area(elem) !NR/6.0_WP below - END DO - END DO - END DO - - DO n=1,myDim_nod2D - !! n=myList_nod2D(m) - nzmin = ulevels_nod2D(n) - nzmax = nlevels_nod2D(n) - !!PS DO nz=1, nlevels_nod2D(n)-1 - DO nz=nzmin, nzmax-1 - !DO nz=1, nl-1 - KE_node(nz,n)=KE_node(nz,n)/(6._WP*areasvol(nz,n)) !NR divide by 6 here - END DO - END DO - - ! Set the kinetic energy to zero at lateral walls: - DO n=1,myDim_edge2D - !! n=myList_edge2D(m) - if(myList_edge2D(n) > edge2D_in) then - elnodes(1:2)=edges(:,n) - KE_node(:,elnodes(1:2))=0.0_WP - endif - end DO - - call exchange_nod(KE_node, partit) - ! Now gradients of KE will be correct on myDim_elem2D - - ! ================== - ! AB contribution from the old time step - ! ================== - Do elem=1, myDim_elem2D !! P (a) - !! elem=myList_elem2D(m) - nzmin = ulevels(elem) - nzmax = nlevels(elem) - !!PS DO nz=1,nl-1 - DO nz=nzmin,nzmax-1 - UV_rhs(1,nz,elem)=-(0.5_WP+epsilon)*UV_rhsAB(1,nz,elem) - UV_rhs(2,nz,elem)=-(0.5_WP+epsilon)*UV_rhsAB(2,nz,elem) - END DO - END DO - - call relative_vorticity(dynamics, partit, mesh) - ! ==================== - ! Sea level and pressure contribution -\nabla(g\eta +hpressure/rho_0+V^2/2) - ! and the Coriolis force (elemental part) - ! ==================== - - !DS KE_node=0. !DS - !DS vorticity=0. !DS - DO elem=1, myDim_elem2D !! P (b) elem=1,elem2D - !! elem=myList_elem2D(m) - elnodes = elem2D_nodes(:,elem) - eta = g*eta_n(elnodes) - gg = elem_area(elem) - ff = coriolis_node(elnodes) - - nzmin = ulevels(elem) - nzmax = nlevels(elem) - !!PS DO nz=1,nlevels(elem)-1 - DO nz=nzmin,nzmax-1 - pre = -(eta + hpressure(nz,elnodes)*density0_inv) - Fx = sum(gradient_sca(1:3,elem)*pre) - Fy = sum(gradient_sca(4:6,elem)*pre) - UV_rhs(1,nz,elem) = UV_rhs(1,nz,elem)+Fx*gg - UV_rhs(2,nz,elem) = UV_rhs(2,nz,elem)+Fy*gg - - pre = -KE_node(nz,elnodes) - Fx = sum(gradient_sca(1:3,elem)*pre) - Fy = sum(gradient_sca(4:6,elem)*pre) - - da = UV(2,nz,elem)*sum(ff+vorticity(nz,elnodes))/3.0_WP - db =-UV(1,nz,elem)*sum(ff+vorticity(nz,elnodes))/3.0_WP - - UV_rhsAB(1,nz,elem)=(da+Fx)*gg - UV_rhsAB(2,nz,elem)=(db+Fy)*gg - - END DO - END DO - ! ======================= - ! Compute w du/dz at elements: wdu/dz=d(wu)/dz-udw/dz - ! The central estimate of u in the flux term will correspond to energy - ! conservation - ! ======================= - - !NR precompute - DO nz=2,nl-1 - dZ_inv(nz) = 1.0_WP/(Z(nz-1)-Z(nz)) - ENDDO - DO nz=1,nl-1 - dzbar_inv(nz) = 1.0_WP/(zbar(nz)-zbar(nz+1)) - END DO - -!DO elem=1, myDim_elem2D -! !! elem=myList_elem2D(m) -! elnodes=elem2D_nodes(:,elem) -! nl1=nlevels(elem)-1 -! -! uvert(1,1:2)=0d0 -! uvert(nl1+1,1:2)=0d0 -! -! DO nz=2, nl1 -! w=sum(Wvel(nz,elnodes))/3.0_WP -! umean=0.5_WP*(UV(1,nz-1,elem)+UV(1,nz,elem)) -! vmean=0.5_WP*(UV(2,nz-1,elem)+UV(2,nz,elem)) -! uvert(nz,1)=-umean*w -! uvert(nz,2)=-vmean*w -! END DO -! DO nz=1,nl1 -! da=sum(Wvel(nz,elnodes)-Wvel(nz+1,elnodes))/3.0_WP -! UV_rhsAB(1,nz,elem) = UV_rhsAB(1,nz,elem) + (uvert(nz,1)-uvert(nz+1,1)+& -! da*UV(1,nz,elem))*elem_area(elem)*dzbar_inv(nz) !/(zbar(nz)-zbar(nz+1)) -! UV_rhsAB(2,nz,elem)=UV_rhsAB(2,nz,elem)+(uvert(nz,2)-uvert(nz+1,2)+& -! da*UV(2,nz,elem))*elem_area(elem)*dzbar_inv(nz) !/(zbar(nz)-zbar(nz+1)) -! -! END DO -!END DO - - - DO elem=1, myDim_elem2D - !! elem=myList_elem2D(m) - elnodes=elem2D_nodes(:,elem) - !!PS nl1=nlevels(elem)-1 - nzmax=nlevels(elem)-1 - nzmin=ulevels(elem) - - ! w=sum(Wvel(2, elnodes))/3.0_WP - ! w=min(abs(w), 0.0001)*sign(1.0_WP, w) - uvert(1,1)=w*(UV(1,1,elem)-UV(1,2,elem))*dZ_inv(2)*0.5_WP - uvert(1,2)=w*(UV(2,1,elem)-UV(2,2,elem))*dZ_inv(2)*0.5_WP - - ! w=sum(Wvel(nl1, elnodes))/3.0_WP - ! w=min(abs(w), 0.0001)*sign(1.0_WP, w) - !!PS uvert(nl1,1)=w*(UV(1,nl1-1,elem)-UV(1,nl1,elem))*dZ_inv(nl1)*0.5_WP - !!PS uvert(nl1,2)=w*(UV(2,nl1-1,elem)-UV(2,nl1,elem))*dZ_inv(nl1)*0.5_WP - uvert(nzmax,1)=w*(UV(1,nzmax-1,elem)-UV(1,nzmax,elem))*dZ_inv(nzmax)*0.5_WP - uvert(nzmax,2)=w*(UV(2,nzmax-1,elem)-UV(2,nzmax,elem))*dZ_inv(nzmax)*0.5_WP - - - !!PS DO nz=2, nl1-1 - DO nz=nzmin+1, nzmax-1 - ! w=sum(Wvel(nz,elnodes)+Wvel(nz+1,elnodes))/6.0_WP - ! w=min(abs(w), 0.0001)*sign(1.0_WP, w) - if (w >= 0.0_WP) then - uvert(nz,1)=w*(UV(1,nz,elem)-UV(1,nz+1,elem))*dZ_inv(nz+1) - uvert(nz,2)=w*(UV(2,nz,elem)-UV(2,nz+1,elem))*dZ_inv(nz+1) - else - uvert(nz,1)=w*(UV(1,nz-1,elem)-UV(1,nz,elem))*dZ_inv(nz) - uvert(nz,2)=w*(UV(2,nz-1,elem)-UV(2,nz,elem))*dZ_inv(nz) - end if - END DO - !!PS UV_rhsAB(1,1:nl1,elem) = UV_rhsAB(1,1:nl1,elem) - uvert(1:nl1,1)*elem_area(elem) - !!PS UV_rhsAB(2,1:nl1,elem) = UV_rhsAB(2,1:nl1,elem) - uvert(1:nl1,2)*elem_area(elem) - UV_rhsAB(1,nzmin:nzmax,elem) = UV_rhsAB(1,nzmin:nzmax,elem) - uvert(nzmin:nzmax,1)*elem_area(elem) - UV_rhsAB(2,nzmin:nzmax,elem) = UV_rhsAB(2,nzmin:nzmax,elem) - uvert(nzmin:nzmax,2)*elem_area(elem) - - END DO - - ! ======================= - ! Update the rhs - ! ======================= - gg=(1.5_WP+epsilon) - if(lfirst.and.(.not.r_restart)) then - gg=1.0_WP - lfirst=.false. - end if - - DO elem=1, myDim_elem2D !! P(e) elem=1, elem2D - !! elem=myList_elem2D(m) - elem_area_inv = dt/elem_area(elem) - nzmin = ulevels(elem) - nzmax = nlevels(elem) - !!PS DO nz=1,nlevels(elem)-1 - DO nz=nzmin,nzmax-1 - UV_rhs(1,nz,elem)= (UV_rhs(1,nz,elem)+UV_rhsAB(1,nz,elem)*gg) *elem_area_inv - UV_rhs(2,nz,elem)= (UV_rhs(2,nz,elem)+UV_rhsAB(2,nz,elem)*gg) *elem_area_inv - END DO - END DO - ! U_rhs contains all contributions to velocity from old time steps -end subroutine compute_vel_rhs_vinv From 0645f34f987e1c04b425c975fc63b55f551576bc Mon Sep 17 00:00:00 2001 From: a270042 Date: Wed, 3 Nov 2021 17:59:05 +0100 Subject: [PATCH 442/909] remove viscosity_option=1,2,3,4 and associated parameters Div_c, Leith_c, Visc from oce_dyn.F90 --- src/MOD_DYN.F90 | 13 - src/oce_dyn.F90 | 556 +++++------------------------------------ src/oce_modules.F90 | 2 +- src/oce_setup_step.F90 | 6 +- 4 files changed, 62 insertions(+), 515 deletions(-) diff --git a/src/MOD_DYN.F90 b/src/MOD_DYN.F90 index a17fd0651..18ab6f46b 100644 --- a/src/MOD_DYN.F90 +++ b/src/MOD_DYN.F90 @@ -43,10 +43,6 @@ MODULE MOD_DYN ! visc_option=... - ! 1=Harmonic Leith parameterization; - ! 2=Laplacian+Leith+biharmonic background - ! 3=Biharmonic Leith parameterization - ! 4=Biharmonic flow aware ! 5=Kinematic (easy) Backscatter ! 6=Biharmonic flow aware (viscosity depends on velocity Laplacian) ! 7=Biharmonic flow aware (viscosity depends on velocity differences) @@ -61,11 +57,6 @@ MODULE MOD_DYN real(kind=WP) :: gamma1_visc = 0.1 real(kind=WP) :: gamma2_visc = 0.285 - ! div_c the strength of the modified Leith viscosity, nondimensional, 0.3 -- 1.0 - ! leith the strength of the Leith viscosity - real(kind=WP) :: div_c_visc = 0.5 - real(kind=WP) :: leith_c_visc = 0.05 - ! coefficient for returned sub-gridscale energy, to be used with visc_option=5 ! (easy backscatter) real(kind=WP) :: easy_bs_return= 1.5 @@ -125,8 +116,6 @@ subroutine WRITE_T_DYN(dynamics, unit, iostat, iomsg) write(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma0_visc write(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma1_visc write(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma2_visc - write(unit, iostat=iostat, iomsg=iomsg) dynamics%div_c_visc - write(unit, iostat=iostat, iomsg=iomsg) dynamics%leith_c_visc !___________________________________________________________________________ write(unit, iostat=iostat, iomsg=iomsg) dynamics%use_ivertvisc @@ -168,8 +157,6 @@ subroutine READ_T_DYN(dynamics, unit, iostat, iomsg) read(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma0_visc read(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma1_visc read(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma2_visc - read(unit, iostat=iostat, iomsg=iomsg) dynamics%div_c_visc - read(unit, iostat=iostat, iomsg=iomsg) dynamics%leith_c_visc !___________________________________________________________________________ read(unit, iostat=iostat, iomsg=iomsg) dynamics%use_ivertvisc diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index a02c03d4a..b045b28b2 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -11,63 +11,7 @@ ! (5) visc_filt_bcksct, (6) visc_filt_bilapl, (7) visc_filt_bidiff ! 4. Div_c =1. should be default ! 5. Leith_c=? (need to be adjusted) -module h_viscosity_leith_interface - interface - subroutine h_viscosity_leith(dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_DYN - type(t_dyn), intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - - end subroutine - end interface -end module -module visc_filt_harmon_interface - interface - subroutine visc_filt_harmon(dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_DYN - type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - - end subroutine - end interface -end module -module visc_filt_hbhmix_interface - interface - subroutine visc_filt_hbhmix(dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_DYN - type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - - end subroutine - end interface -end module -module visc_filt_biharm_interface - interface - subroutine visc_filt_biharm(option, dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_DYN - integer :: option - type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - - end subroutine - end interface -end module + module visc_filt_bcksct_interface interface subroutine visc_filt_bcksct(dynamics, partit, mesh) @@ -168,10 +112,10 @@ subroutine relative_vorticity(dynamics, partit, mesh) end interface end module -! =================================================================== +! ! Contains routines needed for computations of dynamics. ! includes: update_vel, compute_vel_nodes -! =================================================================== +!_______________________________________________________________________________ SUBROUTINE update_vel(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT @@ -215,7 +159,9 @@ SUBROUTINE update_vel(dynamics, partit, mesh) eta_n=eta_n+d_eta call exchange_elem(UV, partit) end subroutine update_vel -!========================================================================== +! +! +!_______________________________________________________________________________ subroutine compute_vel_nodes(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT @@ -262,439 +208,53 @@ subroutine compute_vel_nodes(dynamics, partit, mesh) END DO call exchange_nod(UVnode, partit) end subroutine compute_vel_nodes -!=========================================================================== +! +! +!_______________________________________________________________________________ subroutine viscosity_filter(option, dynamics, partit, mesh) -use o_PARAM -use MOD_MESH -USE MOD_PARTIT -USE MOD_PARSUP -use MOD_DYN -use h_viscosity_leith_interface -use visc_filt_harmon_interface -use visc_filt_hbhmix_interface -use visc_filt_biharm_interface -use visc_filt_bcksct_interface -use visc_filt_bilapl_interface -use visc_filt_bidiff_interface -use visc_filt_dbcksc_interface -use backscatter_coef_interface -IMPLICIT NONE -integer :: option -type(t_dyn) , intent(inout), target :: dynamics -type(t_mesh) , intent(in) , target :: mesh -type(t_partit), intent(inout), target :: partit - -! Driving routine -! Background viscosity is selected in terms of Vl, where V is -! background velocity scale and l is the resolution. V is 0.005 -! or 0.01, perhaps it would be better to pass it as a parameter. - -! h_viscosity_leiht needs vorticity, so vorticity array should be -! allocated. At present, there are two rounds of smoothing in -! h_viscosity. - -SELECT CASE (option) -CASE (1) - ! ==== - ! Harmonic Leith parameterization - ! ==== - call h_viscosity_leith(dynamics, partit, mesh) - call visc_filt_harmon(dynamics, partit, mesh) -CASE (2) - ! === - ! Laplacian+Leith+biharmonic background - ! === - call h_viscosity_leith(dynamics, partit, mesh) - call visc_filt_hbhmix(dynamics, partit, mesh) -CASE (3) - ! === - ! Biharmonic Leith parameterization - ! === - call h_viscosity_leith(dynamics, partit, mesh) - call visc_filt_biharm(2, dynamics, partit, mesh) -CASE (4) - ! === - ! Biharmonic+upwind-type - ! === - call visc_filt_biharm(1, dynamics, partit, mesh) -CASE (5) - call visc_filt_bcksct(dynamics, partit, mesh) -CASE (6) - call visc_filt_bilapl(dynamics, partit, mesh) -CASE (7) - call visc_filt_bidiff(dynamics, partit, mesh) -CASE (8) - call backscatter_coef(dynamics, partit, mesh) - call visc_filt_dbcksc(dynamics, partit, mesh) -CASE DEFAULT - if (partit%mype==0) write(*,*) 'mixing scheme with option ' , option, 'has not yet been implemented' - call par_ex(partit%MPI_COMM_FESOM, partit%mype) - stop -END SELECT -end subroutine viscosity_filter -! =================================================================== -SUBROUTINE visc_filt_harmon(dynamics, partit, mesh) -USE MOD_MESH -USE MOD_PARTIT -USE MOD_PARSUP -USE MOD_DYN -USE o_ARRAYS, only: Visc -USE o_PARAM -USE g_CONFIG -IMPLICIT NONE - -real(kind=WP) :: u1, v1, le(2), len, vi -integer :: nz, ed, el(2) , nzmin,nzmax -type(t_dyn) , intent(inout), target :: dynamics -type(t_mesh) , intent(in), target :: mesh -type(t_partit), intent(inout), target :: partit -real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" -UV => dynamics%uv(:,:,:) -UV_rhs => dynamics%uv_rhs(:,:,:) - - ! An analog of harmonic viscosity operator. - ! It adds to the rhs(0) Visc*(u1+u2+u3-3*u0)/area - ! on triangles, which is Visc*Laplacian/4 on equilateral triangles. - ! The contribution from boundary edges is neglected (free slip). - DO ed=1, myDim_edge2D+eDim_edge2D - if(myList_edge2D(ed)>edge2D_in) cycle - el=edge_tri(:,ed) - len=sqrt(sum(elem_area(el(1:2)))) - nzmax = minval(nlevels(el)) - nzmin = maxval(ulevels(el)) - !!PS DO nz=1,minval(nlevels(el))-1 - DO nz=nzmin,nzmax-1 - vi=0.5_WP*(Visc(nz,el(1))+Visc(nz,el(2))) - vi=max(vi, gamma0*len)*dt ! limited from below by backgroung - u1=(UV(1,nz,el(1))-UV(1,nz,el(2)))*vi - v1=(UV(2,nz,el(1))-UV(2,nz,el(2)))*vi - - UV_rhs(1,nz,el(1))=UV_rhs(1,nz,el(1))-u1/elem_area(el(1)) - UV_rhs(1,nz,el(2))=UV_rhs(1,nz,el(2))+u1/elem_area(el(2)) - UV_rhs(2,nz,el(1))=UV_rhs(2,nz,el(1))-v1/elem_area(el(1)) - UV_rhs(2,nz,el(2))=UV_rhs(2,nz,el(2))+v1/elem_area(el(2)) - END DO - END DO -end subroutine visc_filt_harmon -! =================================================================== -SUBROUTINE visc_filt_biharm(option, dynamics, partit, mesh) - USE MOD_MESH + use o_PARAM + use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use MOD_DYN - USE o_ARRAYS, only: Visc - USE o_PARAM - USE g_CONFIG - use g_comm_auto - IMPLICIT NONE - ! An energy conserving version - ! Also, we use the Leith viscosity - ! - real(kind=WP) :: u1, v1, vi, len - integer :: ed, el(2), nz, option, nzmin, nzmax - real(kind=WP), allocatable :: U_c(:,:), V_c(:,:) + use visc_filt_bcksct_interface + use visc_filt_bilapl_interface + use visc_filt_bidiff_interface + use visc_filt_dbcksc_interface + use backscatter_coef_interface + IMPLICIT NONE + integer :: option type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh - real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) - UV_rhs => dynamics%uv_rhs(:,:,:) - - ! Filter is applied twice. - ed=myDim_elem2D+eDim_elem2D - allocate(U_c(nl-1,ed), V_c(nl-1, ed)) - U_c=0.0_WP - V_c=0.0_WP - DO ed=1, myDim_edge2D+eDim_edge2D - if(myList_edge2D(ed)>edge2D_in) cycle - el=edge_tri(:,ed) - nzmax = minval(nlevels(el)) - nzmin = maxval(ulevels(el)) - !!PS DO nz=1,minval(nlevels(el))-1 - DO nz=nzmin,nzmax-1 - u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) - v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) - U_c(nz,el(1))=U_c(nz,el(1))-u1 - U_c(nz,el(2))=U_c(nz,el(2))+u1 - V_c(nz,el(1))=V_c(nz,el(1))-v1 - V_c(nz,el(2))=V_c(nz,el(2))+v1 - END DO - END DO - - if(option==1) then - Do ed=1,myDim_elem2D - len=sqrt(elem_area(ed)) - nzmin = ulevels(ed) - nzmax = nlevels(ed) - !!PS Do nz=1,nlevels(ed)-1 - Do nz=nzmin,nzmax-1 - ! vi has the sense of harmonic viscosity coefficient because of - ! the division by area in the end - ! ==== - ! Case 1 -- an analog to the third-order upwind (vi=gamma1 * |u| * l) - ! ==== - vi=max(gamma0, gamma1*sqrt(UV(1,nz,ed)**2+UV(2,nz,ed)**2))*len*dt - U_c(nz,ed)=-U_c(nz,ed)*vi - V_c(nz,ed)=-V_c(nz,ed)*vi - END DO - end do - end if - - if(option==2) then - Do ed=1,myDim_elem2D - len=sqrt(elem_area(ed)) - nzmin = ulevels(ed) - nzmax = nlevels(ed) - !!PS Do nz=1,nlevels(ed)-1 - Do nz=nzmin,nzmax-1 - ! vi has the sense of harmonic viscosity coefficient because of - ! the division by area in the end - ! === - ! Case 2 -- Leith +background (do not forget to call h_viscosity_leith before using this option) - ! === - vi=max(Visc(nz,ed), gamma0*len)*dt ! limited from below by backgroung - ! - U_c(nz,ed)=-U_c(nz,ed)*vi - V_c(nz,ed)=-V_c(nz,ed)*vi - END DO - end do - end if - - call exchange_elem(U_c, partit) - call exchange_elem(V_c, partit) - DO ed=1, myDim_edge2D+eDim_edge2D - ! check if its a boudnary edge - if(myList_edge2D(ed)>edge2D_in) cycle - el=edge_tri(:,ed) - nzmin = maxval(ulevels(el)) - nzmax = minval(nlevels(el)) - !!PS DO nz=1,minval(nlevels(el))-1 - DO nz=nzmin,nzmax-1 - u1=(U_c(nz,el(1))-U_c(nz,el(2))) - v1=(V_c(nz,el(1))-V_c(nz,el(2))) - UV_rhs(1,nz,el(1))=UV_rhs(1,nz,el(1))-u1/elem_area(el(1)) - UV_rhs(1,nz,el(2))=UV_rhs(1,nz,el(2))+u1/elem_area(el(2)) - UV_rhs(2,nz,el(1))=UV_rhs(2,nz,el(1))-v1/elem_area(el(1)) - UV_rhs(2,nz,el(2))=UV_rhs(2,nz,el(2))+v1/elem_area(el(2)) - END DO - END DO - - deallocate(V_c,U_c) - -end subroutine visc_filt_biharm -! =================================================================== -SUBROUTINE visc_filt_hbhmix(dynamics, partit, mesh) - USE MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - use MOD_DYN - USE o_ARRAYS, only: Visc - USE o_PARAM - USE g_CONFIG - use g_comm_auto - IMPLICIT NONE - - ! An energy and momentum conserving version. - ! We use the harmonic Leith viscosity + biharmonic background viscosity - ! - - real(kind=WP) :: u1, v1, vi, len, crosslen, le(2) - integer :: ed, el(2), nz, nzmin, nzmax - real(kind=WP), allocatable :: U_c(:,:), V_c(:,:) - type(t_dyn), intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) - UV_rhs => dynamics%uv_rhs(:,:,:) - ! Filter is applied twice. - ed=myDim_elem2D+eDim_elem2D - allocate(U_c(nl-1,ed), V_c(nl-1, ed)) - U_c=0.0_WP - V_c=0.0_WP - DO ed=1, myDim_edge2D+eDim_edge2D - ! check if its a boudnary edge - if(myList_edge2D(ed)>edge2D_in) cycle - el=edge_tri(:,ed) - nzmin = maxval(ulevels(el)) - nzmax = minval(nlevels(el)) - !!PS DO nz=1,minval(nlevels(el))-1 - DO nz=nzmin,nzmax-1 - vi=dt*0.5_WP*(Visc(nz,el(1))+Visc(nz,el(2))) - ! backgroung is added later (biharmonically) - u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) - v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) - U_c(nz,el(1))=U_c(nz,el(1))-u1 - U_c(nz,el(2))=U_c(nz,el(2))+u1 - V_c(nz,el(1))=V_c(nz,el(1))-v1 - V_c(nz,el(2))=V_c(nz,el(2))+v1 - u1=u1*vi - v1=v1*vi - UV_rhs(1,nz,el(1))=UV_rhs(1,nz,el(1))-u1/elem_area(el(1)) - UV_rhs(1,nz,el(2))=UV_rhs(1,nz,el(2))+u1/elem_area(el(2)) - UV_rhs(2,nz,el(1))=UV_rhs(2,nz,el(1))-v1/elem_area(el(1)) - UV_rhs(2,nz,el(2))=UV_rhs(2,nz,el(2))+v1/elem_area(el(2)) - END DO - END DO - - Do ed=1,myDim_elem2D - len=sqrt(elem_area(ed)) - nzmin = ulevels(ed) - nzmax = nlevels(ed) - !!PS Do nz=1,nlevels(ed)-1 - Do nz=nzmin,nzmax-1 - vi=dt*gamma0*len ! add biharmonic backgroung - U_c(nz,ed)=-U_c(nz,ed)*vi - V_c(nz,ed)=-V_c(nz,ed)*vi - END DO - end do - call exchange_elem(U_c, partit) - call exchange_elem(V_c, partit) - DO ed=1, myDim_edge2D+eDim_edge2D - ! check if its a boudnary edge - if(myList_edge2D(ed)>edge2D_in) cycle - el=edge_tri(:,ed) - nzmin = maxval(ulevels(el)) - nzmax = minval(nlevels(el)) - !!PS DO nz=1,minval(nlevels(el))-1 - DO nz=nzmin,nzmax-1 - u1=(U_c(nz,el(1))-U_c(nz,el(2))) - v1=(V_c(nz,el(1))-V_c(nz,el(2))) - UV_rhs(1,nz,el(1))=UV_rhs(1,nz,el(1))-u1/elem_area(el(1)) - UV_rhs(1,nz,el(2))=UV_rhs(1,nz,el(2))+u1/elem_area(el(2)) - UV_rhs(2,nz,el(1))=UV_rhs(2,nz,el(1))-v1/elem_area(el(1)) - UV_rhs(2,nz,el(2))=UV_rhs(2,nz,el(2))+v1/elem_area(el(2)) - END DO - END DO - - deallocate(V_c,U_c) - -end subroutine visc_filt_hbhmix - -! =================================================================== -SUBROUTINE h_viscosity_leith(dynamics, partit, mesh) - ! - ! Coefficient of horizontal viscosity is a combination of the Leith (with Leith_c) and modified Leith (with Div_c) - USE MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - use MOD_DYN - USE o_ARRAYS, only: Visc, vorticity - USE o_PARAM - USE g_CONFIG - use g_comm_auto - use relative_vorticity_interface - IMPLICIT NONE - real(kind=WP) :: dz, div_elem(3), xe, ye, vi - integer :: elem, nl1, nz, elnodes(3), n, k, nt, ul1 - real(kind=WP) :: leithx, leithy - real(kind=WP), allocatable :: aux(:,:) - type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - real(kind=WP), dimension(:,:), pointer :: Wvel -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" - Wvel =>dynamics%w(:,:) - ! - if(mom_adv<4) call relative_vorticity(dynamics, partit, mesh) !!! vorticity array should be allocated - ! Fill in viscosity: - Visc = 0.0_WP - DO elem=1, myDim_elem2D !! m=1, myDim_elem2D - !! elem=myList_elem2D(m) - !_______________________________________________________________________ - ! Here can not exchange zbar_n & Z_n with zbar_3d_n & Z_3d_n because - ! they run over elements here - nl1 =nlevels(elem)-1 - ul1 =ulevels(elem) - - zbar_n=0.0_WP - ! in case of partial cells zbar_n(nzmax) is not any more at zbar(nzmax), - ! zbar_n(nzmax) is now zbar_e_bot(elem), - zbar_n(nl1+1)=zbar_e_bot(elem) - !!PS do nz=nl1,2,-1 - do nz=nl1,ul1+1,-1 - zbar_n(nz) = zbar_n(nz+1) + helem(nz,elem) - end do - !!PS zbar_n(1) = zbar_n(2) + helem(1,elem) - zbar_n(ul1) = zbar_n(ul1+1) + helem(ul1,elem) - - !_______________________________________________________________________ - elnodes=elem2D_nodes(:,elem) - !!PS do nz=1,nl1 - do nz=ul1,nl1 - dz=zbar_n(nz)-zbar_n(nz+1) - div_elem=(Wvel(nz,elnodes)-Wvel(nz+1,elnodes))/dz - xe=sum(gradient_sca(1:3,elem)*div_elem) - ye=sum(gradient_sca(4:6,elem)*div_elem) - div_elem=vorticity(nz,elnodes) - leithx=sum(gradient_sca(1:3,elem)*div_elem) - leithy=sum(gradient_sca(4:6,elem)*div_elem) - Visc(nz,elem)=min(gamma1*elem_area(elem)*sqrt((Div_c*(xe**2+ye**2) & - + Leith_c*(leithx**2+leithy**2))*elem_area(elem)), elem_area(elem)/dt) - end do !! 0.1 here comes from (2S)^{3/2}/pi^3 - do nz=nl1+1, nl-1 - Visc(nz, elem)=0.0_WP - end do - do nz=1,ul1-1 - Visc(nz, elem)=0.0_WP - end do - END DO - - allocate(aux(nl-1,myDim_nod2D+eDim_nod2D)) - aux = 0.0_WP - DO nt=1,2 - DO n=1, myDim_nod2D - nl1 = nlevels_nod2D(n) - ul1 = ulevels_nod2D(n) - !!PS DO nz=1, nlevels_nod2D(n)-1 - DO nz=ul1, nl1-1 - dz=0.0_WP - vi=0.0_WP - DO k=1, nod_in_elem2D_num(n) - elem=nod_in_elem2D(k,n) - dz=dz+elem_area(elem) - vi=vi+Visc(nz,elem)*elem_area(elem) - END DO - aux(nz,n)=vi/dz - END DO - END DO - call exchange_nod(aux, partit) - do elem=1, myDim_elem2D - elnodes=elem2D_nodes(:,elem) - nl1=nlevels(elem)-1 - ul1=ulevels(elem) - !!!PS Do nz=1, nl1 - Do nz=ul1, nl1 - Visc(nz,elem)=sum(aux(nz,elnodes))/3.0_WP - END DO - DO nz=nl1+1, nl-1 - Visc(nz,elem)=0.0_WP - END Do - DO nz=1, ul1-1 - Visc(nz,elem)=0.0_WP - END Do - end do - end do - call exchange_elem(Visc, partit) - deallocate(aux) -END subroutine h_viscosity_leith -! ======================================================================= + ! Driving routine + ! Background viscosity is selected in terms of Vl, where V is + ! background velocity scale and l is the resolution. V is 0.005 + ! or 0.01, perhaps it would be better to pass it as a parameter. + + ! h_viscosity_leiht needs vorticity, so vorticity array should be + ! allocated. At present, there are two rounds of smoothing in + ! h_viscosity. + SELECT CASE (option) + CASE (5) + call visc_filt_bcksct(dynamics, partit, mesh) + CASE (6) + call visc_filt_bilapl(dynamics, partit, mesh) + CASE (7) + call visc_filt_bidiff(dynamics, partit, mesh) + CASE (8) + call backscatter_coef(dynamics, partit, mesh) + call visc_filt_dbcksc(dynamics, partit, mesh) + CASE DEFAULT + if (partit%mype==0) write(*,*) 'mixing scheme with option ' , option, 'has not yet been implemented' + call par_ex(partit%MPI_COMM_FESOM, partit%mype) + stop + END SELECT +end subroutine viscosity_filter +! +! +!_______________________________________________________________________________ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT @@ -790,8 +350,9 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) end do deallocate(V_c,U_c,V_b,U_b) end subroutine visc_filt_bcksct - -! =================================================================== +! +! +!_______________________________________________________________________________ ! Strictly energy dissipative and momentum conserving version ! Viscosity depends on velocity Laplacian, i.e., on an analog of ! the Leith viscosity (Lapl==second derivatives) @@ -878,7 +439,9 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) deallocate(V_c,U_c) end subroutine visc_filt_bilapl -! =================================================================== +! +! +!_______________________________________________________________________________ ! Strictly energy dissipative and momentum conserving version ! Viscosity depends on velocity differences, and is introduced symmetrically ! into both stages of biharmonic operator @@ -961,10 +524,9 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) deallocate(V_c, U_c) end subroutine visc_filt_bidiff -! =================================================================== - - -! =================================================================== +! +! +!_______________________________________________________________________________ SUBROUTINE visc_filt_dbcksc(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT @@ -1126,8 +688,9 @@ SUBROUTINE visc_filt_dbcksc(dynamics, partit, mesh) deallocate(uuu) end subroutine visc_filt_dbcksc -!=========================================================================== - +! +! +!_______________________________________________________________________________ SUBROUTINE backscatter_coef(partit, mesh) USE MOD_MESH USE MOD_PARTIT @@ -1162,8 +725,9 @@ SUBROUTINE backscatter_coef(partit, mesh) call exchange_elem(v_back, partit) end subroutine backscatter_coef -!=========================================================================== - +! +! +!_______________________________________________________________________________ SUBROUTINE uke_update(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index 5f613cb82..013495860 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -196,7 +196,7 @@ MODULE o_ARRAYS real(kind=WP), allocatable :: water_flux(:), Ssurf(:) real(kind=WP), allocatable :: virtual_salt(:), relax_salt(:) real(kind=WP), allocatable :: Tclim(:,:), Sclim(:,:) -real(kind=WP), allocatable :: Visc(:,:) +!!PS real(kind=WP), allocatable :: Visc(:,:) real(kind=WP), allocatable :: Tsurf_t(:,:), Ssurf_t(:,:) real(kind=WP), allocatable :: tau_x_t(:,:), tau_y_t(:,:) real(kind=WP), allocatable :: heat_flux_t(:,:), heat_rel_t(:,:), heat_rel(:) diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 6a0aa8e05..4a2bfbc60 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -441,8 +441,6 @@ SUBROUTINE dynamics_init(dynamics, partit, mesh) !!PS dynamics%gamma0_visc = gamma0_visc !!PS dynamics%gamma1_visc = gamma1_visc !!PS dynamics%gamma2_visc = gamma2_visc -!!PS dynamics%div_c_visc = div_c_visc -!!PS dynamics%leith_c_visc = leith_c_visc !!PS dynamics%use_ivertvisc = use_ivertvisc !!PS dynamics%momadv_opt = momadv_opt !!PS dynamics%use_freeslip = use_freeslip @@ -453,8 +451,6 @@ SUBROUTINE dynamics_init(dynamics, partit, mesh) dynamics%gamma0_visc = gamma0 dynamics%gamma1_visc = gamma1 dynamics%gamma2_visc = gamma2 - dynamics%div_c_visc = Div_c - dynamics%leith_c_visc = Leith_c dynamics%use_ivertvisc = i_vert_visc dynamics%momadv_opt = mom_adv dynamics%use_freeslip = free_slip @@ -496,7 +492,7 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) ! Velocities ! ================ !allocate(stress_diag(2, elem_size))!delete me -allocate(Visc(nl-1, elem_size)) +!!PS allocate(Visc(nl-1, elem_size)) ! ================ ! elevation and its rhs ! ================ From ce24b1bce48a7e5773ec3a9e84e334bc26b282cc Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Wed, 3 Nov 2021 17:07:43 +0000 Subject: [PATCH 443/909] use consistent target attribute, otherwise compilation fails with cray compiler --- src/gen_modules_partitioning.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index 1c74cc724..29b6767e0 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -25,7 +25,7 @@ subroutine init_mpi_types(partit, mesh) USE MOD_PARTIT USE MOD_PARSUP implicit none - type(t_partit), intent(in), target :: partit + type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh end subroutine From 807c472b3d21bc5a5d084f682ddddcebfa061fca Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Wed, 3 Nov 2021 23:58:38 +0100 Subject: [PATCH 444/909] tracer advection part has been fullly OpenMP parallelized. It turns out that OpenMP does not slow down the modes as compared to MPI. On ollie: a setup with 288 MPI tasks is only 5% faster than a setup with 8MPI x 36 OpenMP a setup with 16MPI tasks x 18 OpenMP is faster than a setup with 288MPI tasks further increase in throuput has been observed up to 20 MPI x 18 OpenMP I didnt chek for more resources since the mesh is small (CORE2) --- src/MOD_PARTIT.F90 | 9 ++- src/gen_modules_partitioning.F90 | 8 +++ src/oce_adv_tra_driver.F90 | 78 ++++++++++++++------- src/oce_adv_tra_fct.F90 | 28 ++------ src/oce_adv_tra_hor.F90 | 69 ++++++++++++------- src/oce_adv_tra_ver.F90 | 112 ++++++++++++++++++++----------- 6 files changed, 191 insertions(+), 113 deletions(-) diff --git a/src/MOD_PARTIT.F90 b/src/MOD_PARTIT.F90 index bd3b7dec2..5d6b917c3 100644 --- a/src/MOD_PARTIT.F90 +++ b/src/MOD_PARTIT.F90 @@ -5,6 +5,9 @@ module MOD_PARTIT USE, intrinsic :: ISO_FORTRAN_ENV USE MOD_WRITE_BINARY_ARRAYS USE MOD_READ_BINARY_ARRAYS +#if defined(_OPENMP) + USE OMP_LIB +#endif IMPLICIT NONE SAVE include 'mpif.h' @@ -69,11 +72,15 @@ module MOD_PARTIT integer, allocatable, dimension(:) :: myList_edge2D integer :: pe_status = 0 ! if /=0 then something is wrong - !!! remPtr_* are constructed during the runtime ans shall not be dumped!!! + !!! remPtr_* are constructed during the runtime and shall not be dumped!!! integer, allocatable :: remPtr_nod2D(:), remList_nod2D(:) integer, allocatable :: remPtr_elem2D(:), remList_elem2D(:) logical :: elem_full_flag +#if defined(_OPENMP) + !!! plock is constructed during the runtime and shall not be dumped!!! + integer(omp_lock_kind), allocatable :: plock(:) +#endif contains procedure WRITE_T_PARTIT procedure READ_T_PARTIT diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index 1c74cc724..552349af7 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -506,6 +506,14 @@ subroutine init_gatherLists(partit) call MPI_SEND(myList_elem2D, myDim_elem2D, MPI_INTEGER, 0, 3, MPI_COMM_FESOM, MPIerr ) endif +!$OMP MASTER +#if defined(_OPENMP) + allocate(partit%plock(partit%myDim_nod2D+partit%eDim_nod2D)) + do n=1, myDim_nod2D+partit%eDim_nod2D + call omp_init_lock_with_hint(partit%plock(n),omp_sync_hint_speculative+omp_sync_hint_uncontended) + enddo +#endif +!$OMP END MASTER end subroutine init_gatherLists !=================================================================== subroutine status_check(partit) diff --git a/src/oce_adv_tra_driver.F90 b/src/oce_adv_tra_driver.F90 index a82a6b99b..734cf55bd 100644 --- a/src/oce_adv_tra_driver.F90 +++ b/src/oce_adv_tra_driver.F90 @@ -104,11 +104,16 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, partit, mesh) ! part of antidiffusive flux if (trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') then ! compute the low order upwind horizontal flux - ! init_zero=.true. : zero the horizontal flux before computation - ! init_zero=.false. : input flux will be substracted - call adv_tra_hor_upw1(vel, ttf, partit, mesh, adv_flux_hor, init_zero=.true.) + ! o_init_zero=.true. : zero the horizontal flux before computation + ! o_init_zero=.false. : input flux will be substracted + call adv_tra_hor_upw1(vel, ttf, partit, mesh, adv_flux_hor, o_init_zero=.true.) ! update the LO solution for horizontal contribution - fct_LO=0.0_WP +!$OMP PARALLEL DO + do n=1, myDim_nod2D+eDim_nod2D + fct_LO(:,n)=0.0_WP + end do +!$OMP END PARALLEL DO +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(e, enodes, el, nl1, nu1, nl2, nu2, nz) do e=1, myDim_edge2D enodes=edges(:,e) el=edge_tri(:,e) @@ -126,15 +131,29 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, partit, mesh) if (nu2>0) nu12 = min(nu1,nu2) !!PS do nz=1, max(nl1, nl2) +#if defined(_OPENMP) + call omp_set_lock(partit%plock(enodes(1))) +#endif + do nz=nu12, nl12 + fct_LO(nz, enodes(1))=fct_LO(nz, enodes(1))+adv_flux_hor(nz, e) + end do +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(enodes(1))) + call omp_set_lock (partit%plock(enodes(2))) +#endif do nz=nu12, nl12 - fct_LO(nz, enodes(1))=fct_LO(nz, enodes(1))+adv_flux_hor(nz, e) - fct_LO(nz, enodes(2))=fct_LO(nz, enodes(2))-adv_flux_hor(nz, e) + fct_LO(nz, enodes(2))=fct_LO(nz, enodes(2))-adv_flux_hor(nz, e) end do - end do +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(enodes(2))) +#endif + end do +!$OMP END PARALLEL DO ! compute the low order upwind vertical flux (explicit part only) ! zero the input/output flux before computation - call adv_tra_ver_upw1(we, ttf, partit, mesh, adv_flux_ver, init_zero=.true.) + call adv_tra_ver_upw1(we, ttf, partit, mesh, adv_flux_ver, o_init_zero=.true.) ! update the LO solution for vertical contribution +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nu1, nl1, nz) do n=1, myDim_nod2D nu1 = ulevels_nod2D(n) nl1 = nlevels_nod2D(n) @@ -143,6 +162,7 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, partit, mesh) fct_LO(nz,n)=(ttf(nz,n)*hnode(nz,n)+(fct_LO(nz,n)+(adv_flux_ver(nz, n)-adv_flux_ver(nz+1, n)))*dt/areasvol(nz,n))/hnode_new(nz,n) end do end do +!$OMP END PARALLEL DO if (w_split) then !wvel/=wvel_e ! update for implicit contribution (w_split option) call adv_tra_vert_impl(dt, wi, fct_LO, partit, mesh) @@ -150,23 +170,22 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, partit, mesh) ! zero the input/output flux before computation ! --> compute here low order part of vertical anti diffusive fluxes, ! has to be done on the full vertical velocity w - call adv_tra_ver_upw1(w, ttf, partit, mesh, adv_flux_ver, init_zero=.true.) - end if + call adv_tra_ver_upw1(w, ttf, partit, mesh, adv_flux_ver, o_init_zero=.true.) + end if call exchange_nod(fct_LO, partit) end if - do_zero_flux=.true. if (trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') do_zero_flux=.false. !___________________________________________________________________________ ! do horizontal tracer advection, in case of FCT high order solution SELECT CASE(trim(tracers%data(tr_num)%tra_adv_hor)) CASE('MUSCL') - ! compute the untidiffusive horizontal flux (init_zero=.false.: input is the LO horizontal flux computed above) - call adv_tra_hor_muscl(vel, ttfAB, partit, mesh, opth, adv_flux_hor, edge_up_dn_grad, nboundary_lay, init_zero=do_zero_flux) + ! compute the untidiffusive horizontal flux (o_init_zero=.false.: input is the LO horizontal flux computed above) + call adv_tra_hor_muscl(vel, ttfAB, partit, mesh, opth, adv_flux_hor, edge_up_dn_grad, nboundary_lay, o_init_zero=do_zero_flux) CASE('MFCT') - call adv_tra_hor_mfct(vel, ttfAB, partit, mesh, opth, adv_flux_hor, edge_up_dn_grad, init_zero=do_zero_flux) + call adv_tra_hor_mfct(vel, ttfAB, partit, mesh, opth, adv_flux_hor, edge_up_dn_grad, o_init_zero=do_zero_flux) CASE('UPW1') - call adv_tra_hor_upw1(vel, ttfAB, partit, mesh, adv_flux_hor, init_zero=do_zero_flux) + call adv_tra_hor_upw1(vel, ttfAB, partit, mesh, adv_flux_hor, o_init_zero=do_zero_flux) CASE DEFAULT !unknown if (mype==0) write(*,*) 'Unknown horizontal advection type ', trim(tracers%data(tr_num)%tra_adv_hor), '! Check your namelists!' call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) @@ -180,14 +199,14 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, partit, mesh) ! do vertical tracer advection, in case of FCT high order solution SELECT CASE(trim(tracers%data(tr_num)%tra_adv_ver)) CASE('QR4C') - ! compute the untidiffusive vertical flux (init_zero=.false.:input is the LO vertical flux computed above) - call adv_tra_ver_qr4c ( pwvel, ttfAB, partit, mesh, optv, adv_flux_ver, init_zero=do_zero_flux) + ! compute the untidiffusive vertical flux (o_init_zero=.false.:input is the LO vertical flux computed above) + call adv_tra_ver_qr4c ( pwvel, ttfAB, partit, mesh, optv, adv_flux_ver, o_init_zero=do_zero_flux) CASE('CDIFF') - call adv_tra_ver_cdiff( pwvel, ttfAB, partit, mesh, adv_flux_ver, init_zero=do_zero_flux) + call adv_tra_ver_cdiff( pwvel, ttfAB, partit, mesh, adv_flux_ver, o_init_zero=do_zero_flux) CASE('PPM') - call adv_tra_vert_ppm(dt, pwvel, ttfAB, partit, mesh, adv_flux_ver, init_zero=do_zero_flux) + call adv_tra_vert_ppm(dt, pwvel, ttfAB, partit, mesh, adv_flux_ver, o_init_zero=do_zero_flux) CASE('UPW1') - call adv_tra_ver_upw1 ( pwvel, ttfAB, partit, mesh, adv_flux_ver, init_zero=do_zero_flux) + call adv_tra_ver_upw1 ( pwvel, ttfAB, partit, mesh, adv_flux_ver, o_init_zero=do_zero_flux) CASE DEFAULT !unknown if (mype==0) write(*,*) 'Unknown vertical advection type ', trim(tracers%data(tr_num)%tra_adv_ver), '! Check your namelists!' call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) @@ -256,9 +275,9 @@ subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, partit, dttf_v(nz,n)=dttf_v(nz,n) + (flux_v(nz,n)-flux_v(nz+1,n))*dt/areasvol(nz,n) end do end do -!$OMP END DO -!$OMP END PARALLEL +!$OMP END DO ! Horizontal +!$OMP DO do edge=1, myDim_edge2D enodes(1:2)=edges(:,edge) el=edge_tri(:,edge) @@ -276,10 +295,23 @@ subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, partit, nu12 = nu1 if (nu2>0) nu12 = min(nu1,nu2) - !!PS do nz=1, max(nl1, nl2) +#if defined(_OPENMP) + call omp_set_lock(partit%plock(enodes(1))) +#endif do nz=nu12, nl12 dttf_h(nz,enodes(1))=dttf_h(nz,enodes(1))+flux_h(nz,edge)*dt/areasvol(nz,enodes(1)) + end do +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(enodes(1))) + call omp_set_lock (partit%plock(enodes(2))) +#endif + do nz=nu12, nl12 dttf_h(nz,enodes(2))=dttf_h(nz,enodes(2))-flux_h(nz,edge)*dt/areasvol(nz,enodes(2)) end do +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(enodes(2))) +#endif end do +!$OMP END DO +!$OMP END PARALLEL end subroutine oce_tra_adv_flux2dtracer diff --git a/src/oce_adv_tra_fct.F90 b/src/oce_adv_tra_fct.F90 index c68facd23..3b176d1ab 100644 --- a/src/oce_adv_tra_fct.F90 +++ b/src/oce_adv_tra_fct.F90 @@ -81,9 +81,6 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, USE MOD_PARTIT USE MOD_PARSUP USE g_comm_auto -#if defined(_OPENMP) - USE OMP_LIB -#endif implicit none real(kind=WP), intent(in), target :: dt type(t_mesh), intent(in), target :: mesh @@ -101,12 +98,6 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, real(kind=WP) :: flux, ae,tvert_max(mesh%nl-1),tvert_min(mesh%nl-1) real(kind=WP) :: flux_eps=1e-16 real(kind=WP) :: bignumber=1e3 - integer :: vlimit=1 -#if defined(_OPENMP) - integer(omp_lock_kind), allocatable, save :: plock(:) - integer(omp_lock_kind) :: mlock(partit%myDim_nod2D) -#endif - logical, save :: l_first=.true. #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -114,17 +105,6 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, k, elem, enodes, num, el, nl1, nl2, nu1, nu2, nl12, nu12, edge, & !$OMP flux, ae,tvert_max, tvert_min) -!$OMP MASTER -#if defined(_OPENMP) - if (l_first) then - allocate(plock(partit%myDim_nod2D+partit%eDim_nod2D)) - do n=1, myDim_nod2D+partit%eDim_nod2D - call omp_init_lock_with_hint(plock(n),omp_sync_hint_speculative+omp_sync_hint_uncontended) - enddo - l_first = .false. - endif -#endif -!$OMP END MASTER ! -------------------------------------------------------------------------- ! ttf is the tracer field on step n ! del_ttf is the increment @@ -246,22 +226,22 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, nu12 = nu1 if (nu2>0) nu12 = min(nu1,nu2) #if defined(_OPENMP) - call omp_set_lock(plock(enodes(1))) + call omp_set_lock(partit%plock(enodes(1))) #endif do nz=nu12, nl12 fct_plus (nz,enodes(1))=fct_plus (nz,enodes(1)) + max(0.0_WP, adf_h(nz,edge)) fct_minus(nz,enodes(1))=fct_minus(nz,enodes(1)) + min(0.0_WP, adf_h(nz,edge)) end do #if defined(_OPENMP) - call omp_unset_lock(plock(enodes(1))) - call omp_set_lock(plock(enodes(2))) + call omp_unset_lock(partit%plock(enodes(1))) + call omp_set_lock (partit%plock(enodes(2))) #endif do nz=nu12, nl12 fct_plus (nz,enodes(2))=fct_plus (nz,enodes(2)) + max(0.0_WP,-adf_h(nz,edge)) fct_minus(nz,enodes(2))=fct_minus(nz,enodes(2)) + min(0.0_WP,-adf_h(nz,edge)) end do #if defined(_OPENMP) - call omp_unset_lock(plock(enodes(2))) + call omp_unset_lock(partit%plock(enodes(2))) #endif end do !$OMP END DO diff --git a/src/oce_adv_tra_hor.F90 b/src/oce_adv_tra_hor.F90 index 9214a277d..01ae06a26 100644 --- a/src/oce_adv_tra_hor.F90 +++ b/src/oce_adv_tra_hor.F90 @@ -8,7 +8,7 @@ module oce_adv_tra_hor_interfaces ! IF init_zero=.TRUE. : flux will be set to zero before computation ! IF init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, init_zero) + subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, o_init_zero) use MOD_MESH use MOD_TRACER USE MOD_PARTIT @@ -18,7 +18,7 @@ subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, init_zero) real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) - logical, optional :: init_zero + logical, optional :: o_init_zero end subroutine !=============================================================================== ! MUSCL @@ -27,7 +27,7 @@ subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, init_zero) ! IF init_zero=.TRUE. : flux will be set to zero before computation ! IF init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, nboundary_lay, init_zero) + subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, nboundary_lay, o_init_zero) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP @@ -39,11 +39,11 @@ subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_g real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) integer, intent(in) :: nboundary_lay(partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, partit%myDim_edge2D) - logical, optional :: init_zero + logical, optional :: o_init_zero end subroutine ! a not stable version of MUSCL (reconstruction in the vicinity of bottom topography is not upwind) ! it runs with FCT option only - subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, init_zero) + subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, o_init_zero) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP @@ -54,14 +54,14 @@ subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_gr real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, partit%myDim_edge2D) - logical, optional :: init_zero + logical, optional :: o_init_zero end subroutine end interface end module ! ! !=============================================================================== -subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, init_zero) +subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, o_init_zero) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP @@ -72,7 +72,8 @@ subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, init_zero) real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) - logical, optional :: init_zero + logical, optional :: o_init_zero + logical :: l_init_zero real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2 real(kind=WP) :: a, vflux integer :: el(2), enodes(2), nz, edge @@ -83,10 +84,16 @@ subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, init_zero) #include "associate_part_ass.h" #include "associate_mesh_ass.h" - if (present(init_zero))then - if (init_zero) flux=0.0_WP - else - flux=0.0_WP + l_init_zero=.true. + if (present(o_init_zero)) then + l_init_zero=o_init_zero + end if + if (l_init_zero) then +!$OMP PARALLEL DO + do edge=1, myDim_edge2D + flux(:,edge)=0.0_WP + end do +!$OMP END PARALLEL DO end if ! The result is the low-order solution horizontal fluxes @@ -223,7 +230,7 @@ end subroutine adv_tra_hor_upw1 ! ! !=============================================================================== -subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, nboundary_lay, init_zero) +subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, nboundary_lay, o_init_zero) use MOD_MESH use MOD_TRACER USE MOD_PARTIT @@ -238,7 +245,8 @@ subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_g real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) integer, intent(in) :: nboundary_lay(partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, partit%myDim_edge2D) - logical, optional :: init_zero + logical, optional :: o_init_zero + logical :: l_init_zero real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2 real(kind=WP) :: Tmean1, Tmean2, cHO real(kind=WP) :: c_lo(2) @@ -251,10 +259,16 @@ subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_g #include "associate_part_ass.h" #include "associate_mesh_ass.h" - if (present(init_zero))then - if (init_zero) flux=0.0_WP - else - flux=0.0_WP + l_init_zero=.true. + if (present(o_init_zero)) then + l_init_zero=o_init_zero + end if + if (l_init_zero) then +!$OMP PARALLEL DO + do edge=1, myDim_edge2D + flux(:,edge)=0.0_WP + end do +!$OMP END PARALLEL DO end if ! The result is the low-order solution horizontal fluxes @@ -501,7 +515,7 @@ end subroutine adv_tra_hor_muscl ! ! !=============================================================================== - subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, init_zero) + subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, o_init_zero) use MOD_MESH use MOD_TRACER USE MOD_PARTIT @@ -515,7 +529,8 @@ subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_gr real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, partit%myDim_edge2D) - logical, optional :: init_zero + logical, optional :: o_init_zero + logical :: l_init_zero real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2 real(kind=WP) :: Tmean1, Tmean2, cHO real(kind=WP) :: a, vflux @@ -527,10 +542,16 @@ subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_gr #include "associate_part_ass.h" #include "associate_mesh_ass.h" - if (present(init_zero))then - if (init_zero) flux=0.0_WP - else - flux=0.0_WP + l_init_zero=.true. + if (present(o_init_zero)) then + l_init_zero=o_init_zero + end if + if (l_init_zero) then +!$OMP PARALLEL DO + do edge=1, myDim_edge2D + flux(:,edge)=0.0_WP + end do +!$OMP END PARALLEL DO end if ! The result is the low-order solution horizontal fluxes diff --git a/src/oce_adv_tra_ver.F90 b/src/oce_adv_tra_ver.F90 index 03a7cb4e8..d8f3bea5e 100644 --- a/src/oce_adv_tra_ver.F90 +++ b/src/oce_adv_tra_ver.F90 @@ -15,10 +15,10 @@ subroutine adv_tra_vert_impl(dt, w, ttf, partit, mesh) !=============================================================================== ! 1st order upwind (explicit) ! returns flux given at vertical interfaces of scalar volumes -! IF init_zero=.TRUE. : flux will be set to zero before computation -! IF init_zero=.FALSE. : flux=flux-input flux +! IF o_init_zero=.TRUE. : flux will be set to zero before computation +! IF o_init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, init_zero) + subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, o_init_zero) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP @@ -27,15 +27,15 @@ subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, init_zero) real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) - logical, optional :: init_zero + logical, optional :: o_init_zero end subroutine !=============================================================================== ! QR (4th order centerd) ! returns flux given at vertical interfaces of scalar volumes -! IF init_zero=.TRUE. : flux will be set to zero before computation -! IF init_zero=.FALSE. : flux=flux-input flux +! IF o_init_zero=.TRUE. : flux will be set to zero before computation +! IF o_init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, init_zero) + subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, o_init_zero) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP @@ -45,34 +45,33 @@ subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, init_zero) real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) - logical, optional :: init_zero + logical, optional :: o_init_zero end subroutine !=============================================================================== ! Vertical advection with PPM reconstruction (5th order) ! returns flux given at vertical interfaces of scalar volumes -! IF init_zero=.TRUE. : flux will be set to zero before computation -! IF init_zero=.FALSE. : flux=flux-input flux +! IF o_init_zero=.TRUE. : flux will be set to zero before computation +! IF o_init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, init_zero) + subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, o_init_zero) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP real(kind=WP), intent(in), target :: dt type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh - integer :: n, nz, nl1 real(kind=WP) :: tvert(mesh%nl), tv real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) - logical, optional :: init_zero + logical, optional :: o_init_zero end subroutine ! central difference reconstruction (2nd order, use only with FCT) ! returns flux given at vertical interfaces of scalar volumes -! IF init_zero=.TRUE. : flux will be set to zero before computation -! IF init_zero=.FALSE. : flux=flux-input flux +! IF o_init_zero=.TRUE. : flux will be set to zero before computation +! IF o_init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_ver_cdiff(w, ttf, partit, mesh, flux, init_zero) + subroutine adv_tra_ver_cdiff(w, ttf, partit, mesh, flux, o_init_zero) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP @@ -83,7 +82,7 @@ subroutine adv_tra_ver_cdiff(w, ttf, partit, mesh, flux, init_zero) real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) - logical, optional :: init_zero + logical, optional :: o_init_zero end subroutine end interface end module @@ -240,7 +239,7 @@ end subroutine adv_tra_vert_impl ! ! !=============================================================================== -subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, init_zero) +subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, o_init_zero) use MOD_MESH use MOD_TRACER USE MOD_PARTIT @@ -255,19 +254,27 @@ subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, init_zero) real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) - logical, optional :: init_zero + logical, optional :: o_init_zero + logical :: l_init_zero #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - if (present(init_zero))then - if (init_zero) flux=0.0_WP - else - flux=0.0_WP + l_init_zero=.true. + if (present(o_init_zero)) then + l_init_zero=o_init_zero + end if + if (l_init_zero) then +!$OMP PARALLEL DO + do n=1, myDim_nod2D + flux(:, n)=0.0_WP + end do +!$OMP END PARALLEL DO end if !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(tvert, n, nz, nzmax, nzmin) !$OMP DO + do n=1, myDim_nod2D !_______________________________________________________________________ nzmax=nlevels_nod2D(n) @@ -301,7 +308,7 @@ end subroutine adv_tra_ver_upw1 ! ! !=============================================================================== -subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, init_zero) +subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, o_init_zero) use MOD_MESH use o_ARRAYS use o_PARAM @@ -314,7 +321,8 @@ subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, init_zero) real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) - logical, optional :: init_zero + logical, optional :: o_init_zero + logical :: l_init_zero real(kind=WP) :: tvert(mesh%nl) integer :: n, nz, nzmax, nzmin real(kind=WP) :: Tmean, Tmean1, Tmean2 @@ -325,13 +333,20 @@ subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, init_zero) #include "associate_part_ass.h" #include "associate_mesh_ass.h" - if (present(init_zero))then - if (init_zero) flux=0.0_WP - else - flux=0.0_WP + l_init_zero=.true. + if (present(o_init_zero)) then + l_init_zero=o_init_zero + end if + if (l_init_zero) then +!$OMP PARALLEL DO + do n=1, myDim_nod2D + flux(:, n)=0.0_WP + end do +!$OMP END PARALLEL DO end if !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(tvert,n, nz, nzmax, nzmin, Tmean, Tmean1, Tmean2, qc, qu,qd) !$OMP DO + do n=1, myDim_nod2D !_______________________________________________________________________ nzmax=nlevels_nod2D(n) @@ -380,7 +395,7 @@ end subroutine adv_tra_ver_qr4c ! ! !=============================================================================== -subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, init_zero) +subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, o_init_zero) use MOD_MESH use MOD_TRACER USE MOD_PARTIT @@ -393,7 +408,8 @@ subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, init_zero) real(kind=WP), intent(in) :: ttf (mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) - logical, optional :: init_zero + logical, optional :: o_init_zero + logical :: l_init_zero real(kind=WP) :: tvert(mesh%nl), tv(mesh%nl), aL, aR, aj, x real(kind=WP) :: dzjm1, dzj, dzjp1, dzjp2, deltaj, deltajp1 integer :: n, nz, nzmax, nzmin @@ -404,10 +420,16 @@ subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, init_zero) #include "associate_part_ass.h" #include "associate_mesh_ass.h" - if (present(init_zero))then - if (init_zero) flux=0.0_WP - else - flux=0.0_WP + l_init_zero=.true. + if (present(o_init_zero)) then + l_init_zero=o_init_zero + end if + if (l_init_zero) then +!$OMP PARALLEL DO + do n=1, myDim_nod2D + flux(:, n)=0.0_WP + end do +!$OMP END PARALLEL DO end if ! -------------------------------------------------------------------------- @@ -568,7 +590,7 @@ end subroutine adv_tra_vert_ppm ! ! !=============================================================================== -subroutine adv_tra_ver_cdiff(w, ttf, partit, mesh, flux, init_zero) +subroutine adv_tra_ver_cdiff(w, ttf, partit, mesh, flux, o_init_zero) use MOD_MESH use MOD_TRACER USE MOD_PARTIT @@ -580,7 +602,8 @@ subroutine adv_tra_ver_cdiff(w, ttf, partit, mesh, flux, init_zero) real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) - logical, optional :: init_zero + logical, optional :: o_init_zero + logical :: l_init_zero integer :: n, nz, nzmax, nzmin real(kind=WP) :: tvert(mesh%nl), tv #include "associate_part_def.h" @@ -588,11 +611,18 @@ subroutine adv_tra_ver_cdiff(w, ttf, partit, mesh, flux, init_zero) #include "associate_part_ass.h" #include "associate_mesh_ass.h" - if (present(init_zero))then - if (init_zero) flux=0.0_WP - else - flux=0.0_WP + l_init_zero=.true. + if (present(o_init_zero)) then + l_init_zero=o_init_zero end if + if (l_init_zero) then +!$OMP PARALLEL DO + do n=1, myDim_nod2D + flux(:, n)=0.0_WP + end do +!$OMP END PARALLEL DO + end if + !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, nzmax, nzmin, tv, tvert) !$OMP DO do n=1, myDim_nod2D From 75dafe7219f674e5100aa3e3c9220faa2049b131 Mon Sep 17 00:00:00 2001 From: a270042 Date: Thu, 4 Nov 2021 10:54:29 +0100 Subject: [PATCH 445/909] add derived type for dynamics working arrays --- src/MOD_DYN.F90 | 111 +++++++++++++++++++++++++++++++++++++--- src/oce_ale_vel_rhs.F90 | 2 +- src/oce_dyn.F90 | 31 +++++++---- src/oce_setup_step.F90 | 30 +++++------ 4 files changed, 141 insertions(+), 33 deletions(-) diff --git a/src/MOD_DYN.F90 b/src/MOD_DYN.F90 index 18ab6f46b..86b6cbfea 100644 --- a/src/MOD_DYN.F90 +++ b/src/MOD_DYN.F90 @@ -10,7 +10,7 @@ MODULE MOD_DYN ! ! !_______________________________________________________________________________ -TYPE T_solverinfo +TYPE T_SOLVERINFO integer :: ident = 1 integer :: maxiter = 2000 integer :: restart = 15 @@ -18,19 +18,37 @@ MODULE MOD_DYN integer :: lutype = 2 real(kind=WP) :: droptol = 1.e-8 real(kind=WP) :: soltol = 1e-10 !1.e-10 -END TYPE T_solverinfo - + contains + procedure WRITE_T_SOLVERINFO + procedure READ_T_SOLVERINFO + generic :: write(unformatted) => WRITE_T_SOLVERINFO + generic :: read(unformatted) => READ_T_SOLVERINFO +END TYPE T_SOLVERINFO +! +! +!_______________________________________________________________________________ +TYPE T_DYN_WORK + real(kind=WP), allocatable, dimension(:,:,:) :: uvnode_rhs + real(kind=WP), allocatable, dimension(:,:) :: u_c, v_c + real(kind=WP), allocatable, dimension(:,:) :: u_b, v_b + contains + procedure WRITE_T_DYN_WORK + procedure READ_T_DYN_WORK + generic :: write(unformatted) => WRITE_T_DYN_WORK + generic :: read(unformatted) => READ_T_DYN_WORK +END TYPE T_DYN_WORK ! ! !_______________________________________________________________________________ ! set main structure for dynamicss, contains viscosity options and parameters + ! option for momentum advection TYPE T_DYN +!___________________________________________________________________________ ! instant zonal merdional velocity & Adams-Bashfort rhs real(kind=WP), allocatable, dimension(:,:,:):: uv, uv_rhs, uv_rhsAB, fer_uv ! horizontal velocities at nodes - real(kind=WP), allocatable, dimension(:,:,:):: uvnode, uvnode_rhs + real(kind=WP), allocatable, dimension(:,:,:):: uvnode ! instant vertical vel arrays real(kind=WP), allocatable, dimension(:,:) :: w, w_e, w_i, cfl_z, fer_w @@ -38,10 +56,15 @@ MODULE MOD_DYN ! sea surface height arrays real(kind=WP), allocatable, dimension(:) :: eta_n, d_eta, ssh_rhs, ssh_rhs_old + !___________________________________________________________________________ ! summarizes solver input parameter type(t_solverinfo) :: solverinfo + !___________________________________________________________________________ + ! put dynmiacs working arrays + type(t_dyn_work) :: work + !___________________________________________________________________________ ! visc_option=... ! 5=Kinematic (easy) Backscatter ! 6=Biharmonic flow aware (viscosity depends on velocity Laplacian) @@ -83,6 +106,71 @@ MODULE MOD_DYN contains +! +! +!_______________________________________________________________________________ +! set unformatted writing and reading for T_DYN_WORK +subroutine WRITE_T_SOLVERINFO(tsolverinfo, unit, iostat, iomsg) + IMPLICIT NONE + class(T_SOLVERINFO), intent(in) :: tsolverinfo + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + !___________________________________________________________________________ + write(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%ident + write(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%maxiter + write(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%restart + write(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%fillin + write(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%lutype + write(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%droptol + write(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%soltol +end subroutine WRITE_T_SOLVERINFO + +subroutine READ_T_SOLVERINFO(tsolverinfo, unit, iostat, iomsg) + IMPLICIT NONE + class(T_SOLVERINFO), intent(inout) :: tsolverinfo + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + read(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%ident + read(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%maxiter + read(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%restart + read(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%fillin + read(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%lutype + read(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%droptol + read(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%soltol +end subroutine READ_T_SOLVERINFO + +! +! +!_______________________________________________________________________________ +! set unformatted writing and reading for T_DYN_WORK +subroutine WRITE_T_DYN_WORK(twork, unit, iostat, iomsg) + IMPLICIT NONE + class(T_DYN_WORK), intent(in) :: twork + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + call write_bin_array(twork%uvnode_rhs, unit, iostat, iomsg) + call write_bin_array(twork%u_c, unit, iostat, iomsg) + call write_bin_array(twork%v_c, unit, iostat, iomsg) + call write_bin_array(twork%u_b, unit, iostat, iomsg) + call write_bin_array(twork%v_b, unit, iostat, iomsg) +end subroutine WRITE_T_DYN_WORK + +subroutine READ_T_DYN_WORK(twork, unit, iostat, iomsg) + IMPLICIT NONE + class(T_DYN_WORK), intent(inout) :: twork + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + call read_bin_array(twork%uvnode_rhs, unit, iostat, iomsg) + call read_bin_array(twork%u_c, unit, iostat, iomsg) + call read_bin_array(twork%v_c, unit, iostat, iomsg) + call read_bin_array(twork%u_b, unit, iostat, iomsg) + call read_bin_array(twork%v_b, unit, iostat, iomsg) +end subroutine READ_T_DYN_WORK + ! ! !_______________________________________________________________________________ @@ -99,7 +187,6 @@ subroutine WRITE_T_DYN(dynamics, unit, iostat, iomsg) call write_bin_array(dynamics%uv_rhs , unit, iostat, iomsg) call write_bin_array(dynamics%uv_rhsAB , unit, iostat, iomsg) call write_bin_array(dynamics%uvnode , unit, iostat, iomsg) - call write_bin_array(dynamics%uvnode_rhs, unit, iostat, iomsg) call write_bin_array(dynamics%w , unit, iostat, iomsg) call write_bin_array(dynamics%w_e , unit, iostat, iomsg) @@ -107,10 +194,16 @@ subroutine WRITE_T_DYN(dynamics, unit, iostat, iomsg) call write_bin_array(dynamics%cfl_z , unit, iostat, iomsg) if (Fer_GM) then - call write_bin_array(dynamics%fer_w , unit, iostat, iomsg) - call write_bin_array(dynamics%fer_uv , unit, iostat, iomsg) + call write_bin_array(dynamics%fer_w , unit, iostat, iomsg) + call write_bin_array(dynamics%fer_uv, unit, iostat, iomsg) end if + !___________________________________________________________________________ + write(unit, iostat=iostat, iomsg=iomsg) dynamics%work + + !___________________________________________________________________________ + write(unit, iostat=iostat, iomsg=iomsg) dynamics%solverinfo + !___________________________________________________________________________ write(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_opt write(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma0_visc @@ -140,7 +233,6 @@ subroutine READ_T_DYN(dynamics, unit, iostat, iomsg) call read_bin_array(dynamics%uv_rhs , unit, iostat, iomsg) call read_bin_array(dynamics%uv_rhsAB , unit, iostat, iomsg) call read_bin_array(dynamics%uvnode , unit, iostat, iomsg) - call read_bin_array(dynamics%uvnode_rhs, unit, iostat, iomsg) call read_bin_array(dynamics%w , unit, iostat, iomsg) call read_bin_array(dynamics%w_e , unit, iostat, iomsg) @@ -152,6 +244,9 @@ subroutine READ_T_DYN(dynamics, unit, iostat, iomsg) call read_bin_array(dynamics%fer_uv , unit, iostat, iomsg) end if + !___________________________________________________________________________ + read(unit, iostat=iostat, iomsg=iomsg) dynamics%work + !___________________________________________________________________________ read(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_opt read(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma0_visc diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index 71e0487c2..bee2c0b7d 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -213,7 +213,7 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) #include "associate_mesh_ass.h" UV =>dynamics%uv(:,:,:) UV_rhsAB =>dynamics%uv_rhsAB(:,:,:) - UVnode_rhs=>dynamics%uvnode_rhs(:,:,:) + UVnode_rhs=>dynamics%work%uvnode_rhs(:,:,:) Wvel_e =>dynamics%w_e(:,:) !___________________________________________________________________________ diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index b045b28b2..036aaefea 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -11,7 +11,6 @@ ! (5) visc_filt_bcksct, (6) visc_filt_bilapl, (7) visc_filt_bidiff ! 4. Div_c =1. should be default ! 5. Leith_c=? (need to be adjusted) - module visc_filt_bcksct_interface interface subroutine visc_filt_bcksct(dynamics, partit, mesh) @@ -267,17 +266,21 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) real(kind=8) :: u1, v1, len, vi integer :: nz, ed, el(2), nelem(3),k, elem, nzmin, nzmax - real(kind=8), allocatable :: U_b(:,:), V_b(:,:), U_c(:,:), V_c(:,:) +!!PS real(kind=8), allocatable :: U_c(:,:), V_c(:,:) + real(kind=8), allocatable :: U_b(:,:), V_b(:,:) type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs + real(kind=WP), dimension(:,:) , pointer :: U_c, V_c #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) + UV => dynamics%uv(:,:,:) UV_rhs => dynamics%uv_rhs(:,:,:) + U_c => dynamics%work%u_c(:,:) + V_c => dynamics%work%v_c(:,:) ! An analog of harmonic viscosity operator. ! Same as visc_filt_h, but with the backscatter. @@ -370,19 +373,22 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) IMPLICIT NONE real(kind=8) :: u1, v1, vi, len integer :: ed, el(2), nz, nzmin, nzmax - real(kind=8), allocatable :: U_c(:,:), V_c(:,:) +!!PS real(kind=8), allocatable :: U_c(:,:), V_c(:,:) type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs + real(kind=WP), dimension(:,:) , pointer :: U_c, V_c #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV => dynamics%uv(:,:,:) UV_rhs => dynamics%uv_rhs(:,:,:) + U_c => dynamics%work%u_c(:,:) + V_c => dynamics%work%v_c(:,:) ed=myDim_elem2D+eDim_elem2D allocate(U_c(nl-1,ed), V_c(nl-1, ed)) @@ -459,18 +465,21 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) IMPLICIT NONE real(kind=8) :: u1, v1, vi, len integer :: ed, el(2), nz, nzmin, nzmax - real(kind=8), allocatable :: U_c(:,:), V_c(:,:) +!!PS real(kind=8), allocatable :: U_c(:,:), V_c(:,:) type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs + real(kind=WP), dimension(:,:) , pointer :: U_c, V_c #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV => dynamics%uv(:,:,:) UV_rhs => dynamics%uv_rhs(:,:,:) + U_c => dynamics%work%u_c(:,:) + V_c => dynamics%work%v_c(:,:) ! ed=myDim_elem2D+eDim_elem2D allocate(U_c(nl-1,ed), V_c(nl-1, ed)) @@ -543,18 +552,22 @@ SUBROUTINE visc_filt_dbcksc(dynamics, partit, mesh) real(kind=8) :: u1, v1, le(2), len, crosslen, vi, uke1 integer :: nz, ed, el(2) -real(kind=8), allocatable :: U_c(:,:), V_c(:,:), UV_back(:,:,:), UV_dis(:,:,:), uke_d(:,:) -real(kind=8), allocatable :: uuu(:) +!!PS real(kind=8), allocatable :: U_c(:,:), V_c(:,:) +real(kind=8) , allocatable :: UV_back(:,:,:), UV_dis(:,:,:), uke_d(:,:) +real(kind=8) , allocatable :: uuu(:) type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh -real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs +real(kind=WP) , dimension(:,:,:), pointer :: UV, UV_rhs +real(kind=WP) , dimension(:,:) , pointer :: U_c, V_c #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" -UV => dynamics%uv(:,:,:) +UV => dynamics%uv(:,:,:) UV_rhs => dynamics%uv_rhs(:,:,:) +U_c => dynamics%work%u_c(:,:) +V_c => dynamics%work%v_c(:,:) ! An analog of harmonic viscosity operator. ! It adds to the rhs(0) Visc*(u1+u2+u3-3*u0)/area diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 4a2bfbc60..cfe11d459 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -403,35 +403,35 @@ SUBROUTINE dynamics_init(dynamics, partit, mesh) allocate(dynamics%uv_rhs( 2, nl-1, elem_size)) allocate(dynamics%uv_rhsAB( 2, nl-1, elem_size)) allocate(dynamics%uvnode( 2, nl-1, node_size)) - allocate(dynamics%uvnode_rhs(2, nl-1, node_size)) - dynamics%uv = 0.0_WP - dynamics%uv_rhs = 0.0_WP - dynamics%uv_rhsAB = 0.0_WP - dynamics%uvnode = 0.0_WP - dynamics%uvnode_rhs = 0.0_WP + allocate(dynamics%work%uvnode_rhs(2, nl-1, node_size)) + dynamics%uv = 0.0_WP + dynamics%uv_rhs = 0.0_WP + dynamics%uv_rhsAB = 0.0_WP + dynamics%uvnode = 0.0_WP + dynamics%work%uvnode_rhs = 0.0_WP allocate(dynamics%w( nl, node_size)) allocate(dynamics%w_e( nl, node_size)) allocate(dynamics%w_i( nl, node_size)) allocate(dynamics%cfl_z( nl, node_size)) - dynamics%w = 0.0_WP - dynamics%w_e = 0.0_WP - dynamics%w_i = 0.0_WP - dynamics%cfl_z = 0.0_WP + dynamics%w = 0.0_WP + dynamics%w_e = 0.0_WP + dynamics%w_i = 0.0_WP + dynamics%cfl_z = 0.0_WP allocate(dynamics%eta_n( node_size)) allocate(dynamics%d_eta( node_size)) allocate(dynamics%ssh_rhs( node_size)) !!PS allocate(dynamics%ssh_rhs_old(node_size)) - dynamics%eta_n = 0.0_WP - dynamics%d_eta = 0.0_WP - dynamics%ssh_rhs = 0.0_WP + dynamics%eta_n = 0.0_WP + dynamics%d_eta = 0.0_WP + dynamics%ssh_rhs = 0.0_WP if (Fer_GM) then allocate(dynamics%fer_uv(2, nl-1, elem_size)) allocate(dynamics%fer_w( nl, node_size)) - dynamics%fer_uv = 0.0_WP - dynamics%fer_w = 0.0_WP + dynamics%fer_uv = 0.0_WP + dynamics%fer_w = 0.0_WP end if !!PS dynamics%ssh_rhs_old= 0.0_WP From fefe11421683004d28ecc30b25a337695ae1b261 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Thu, 4 Nov 2021 11:55:36 +0100 Subject: [PATCH 446/909] omp_init_lock_with_hint is implemented only OPENMP v.5 and we recommend to use it. For older versions omp_init_lock will be used and is less efficient. --- src/gen_modules_partitioning.F90 | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index 552349af7..4f69080c6 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -510,7 +510,13 @@ subroutine init_gatherLists(partit) #if defined(_OPENMP) allocate(partit%plock(partit%myDim_nod2D+partit%eDim_nod2D)) do n=1, myDim_nod2D+partit%eDim_nod2D +!experiments showd that OPENMP5 implementation of the lock (201811) is >10% more efficient +!make sure you use OPENMP v. 5.0 +#if _OPENMP >= 201811 call omp_init_lock_with_hint(partit%plock(n),omp_sync_hint_speculative+omp_sync_hint_uncontended) +#else + call omp_init_lock(partit%plock(n)) +#endif enddo #endif !$OMP END MASTER From bc3546d4603a7c59f9b81f2e519104f7425e24fe Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Fri, 5 Nov 2021 09:10:35 +0000 Subject: [PATCH 447/909] make attribute statements consistent between interface and subroutine definitions --- src/gen_forcing_couple.F90 | 3 ++- src/ice_oce_coupling.F90 | 4 ++-- src/oce_adv_tra_hor.F90 | 2 +- src/oce_mesh.F90 | 12 ++++++------ 4 files changed, 11 insertions(+), 10 deletions(-) diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index af1d2469d..5aacb7f93 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -97,10 +97,11 @@ subroutine update_atm_forcing(istep, tracers, partit, mesh) use force_flux_consv_interface implicit none + integer, intent(in) :: istep type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in), target :: tracers - integer :: i, istep,itime,n2,n,nz,k,elem + integer :: i, itime,n2,n,nz,k,elem real(kind=WP) :: i_coef, aux real(kind=WP) :: dux, dvy,tx,ty,tvol real(kind=WP) :: t1, t2 diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index 30dedc505..5a7218013 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -128,7 +128,7 @@ subroutine ocean2ice(tracers, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers + type(t_tracer), intent(inout), target :: tracers integer :: n, elem, k real(kind=WP) :: uw, vw, vol real(kind=WP), dimension(:,:), pointer :: temp, salt @@ -217,7 +217,7 @@ subroutine oce_fluxes(tracers, partit, mesh) implicit none type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers + type(t_tracer), intent(inout), target :: tracers integer :: n, elem, elnodes(3),n1 real(kind=WP) :: rsss, net real(kind=WP), allocatable :: flux(:) diff --git a/src/oce_adv_tra_hor.F90 b/src/oce_adv_tra_hor.F90 index 01ae06a26..c7e209dfd 100644 --- a/src/oce_adv_tra_hor.F90 +++ b/src/oce_adv_tra_hor.F90 @@ -47,7 +47,7 @@ subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_gr use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP - type(t_partit),intent(in), target :: partit + type(t_partit),intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) diff --git a/src/oce_mesh.F90 b/src/oce_mesh.F90 index 0bb876ae7..e205e8f2a 100755 --- a/src/oce_mesh.F90 +++ b/src/oce_mesh.F90 @@ -82,8 +82,8 @@ subroutine elem_center(elem, x, y, mesh) USE MOD_PARTIT USE MOD_PARSUP integer :: elem - real(kind=WP) :: x, y - type(t_mesh), intent(inout), target :: mesh + real(kind=WP), intent(inout), :: x, y + type(t_mesh), intent(inout), target :: mesh end subroutine end interface end module @@ -94,8 +94,8 @@ subroutine edge_center(n1, n2, x, y, mesh) USE MOD_PARTIT USE MOD_PARSUP integer :: n1, n2 - real(kind=WP) :: x, y - type(t_mesh), intent(inout), target :: mesh + real(kind=WP), intent(inout):: x, y + type(t_mesh), intent(inout), target :: mesh end subroutine end interface end module @@ -1857,7 +1857,7 @@ subroutine edge_center(n1, n2, x, y, mesh) implicit none integer :: n1, n2 ! nodes of the edge real(kind=WP), intent(inout) :: x, y -type(t_mesh), intent(in) :: mesh +type(t_mesh), intent(inout), target :: mesh real(kind=WP) :: a(2), b(2) a=mesh%coord_nod2D(:,n1) @@ -1875,7 +1875,7 @@ subroutine elem_center(elem, x, y, mesh) USE g_CONFIG implicit none real(kind=WP), intent(inout) :: x, y -type(t_mesh), intent(in) :: mesh +type(t_mesh), intent(inout), target :: mesh integer :: elem, elnodes(3), k real(kind=WP) :: ax(3), amin From 5013fc4e8f7abe258c5e4819f1b0ebda5d96dda5 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Fri, 5 Nov 2021 09:22:18 +0000 Subject: [PATCH 448/909] delete comma --- src/oce_mesh.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/oce_mesh.F90 b/src/oce_mesh.F90 index e205e8f2a..b4222164c 100755 --- a/src/oce_mesh.F90 +++ b/src/oce_mesh.F90 @@ -82,7 +82,7 @@ subroutine elem_center(elem, x, y, mesh) USE MOD_PARTIT USE MOD_PARSUP integer :: elem - real(kind=WP), intent(inout), :: x, y + real(kind=WP), intent(inout) :: x, y type(t_mesh), intent(inout), target :: mesh end subroutine end interface From 663e955a2efb98324f4d8fb130a369fd029caf1f Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Fri, 5 Nov 2021 09:32:39 +0000 Subject: [PATCH 449/909] Continuation of character context in free source form requires an & as the first nonblank character on the next line for the cray compiler --- src/io_meandata.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 75395e0a6..a9c508e8d 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -1077,8 +1077,8 @@ subroutine associate_new_stream(name, entry) do i=1, io_NSTREAMS if(trim(io_stream(i)%name) .eq. name) then print *,"variable '"//name//"' already exists, & - check if you define it multiple times, for example in namelist.io, & - namelist.icepack, io_meandata.F90 or other place that add I/O stream." + &check if you define it multiple times, for example in namelist.io, & + &namelist.icepack, io_meandata.F90 or other place that add I/O stream." call assert(.false., __LINE__) end if end do From e1c92397f293bc0d4dc7be4d16b8aae83464769d Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Fri, 5 Nov 2021 15:07:50 +0000 Subject: [PATCH 450/909] fesom2 compiles on the ECMWF cray machine --- src/oce_ale.F90 | 31 ++++++++++++++++--------------- src/oce_ale_tracer.F90 | 2 +- src/oce_setup_step.F90 | 2 +- 3 files changed, 18 insertions(+), 17 deletions(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index fb4ee6336..15e6ce308 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -4,7 +4,7 @@ subroutine init_bottom_elem_thickness(partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -12,7 +12,7 @@ subroutine init_bottom_node_thickness(partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -20,7 +20,7 @@ subroutine init_surface_elem_depth(partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -28,7 +28,7 @@ subroutine init_surface_node_depth(partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -36,7 +36,7 @@ subroutine impl_vert_visc_ale(partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -44,7 +44,7 @@ subroutine update_stiff_mat_ale(partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -52,7 +52,7 @@ subroutine compute_ssh_rhs_ale(partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -60,7 +60,7 @@ subroutine solve_ssh_ale(partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -68,7 +68,7 @@ subroutine compute_hbar_ale(partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -76,7 +76,7 @@ subroutine vert_vel_ale(partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine @@ -84,7 +84,7 @@ subroutine update_thickness_ale(partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine end interface @@ -97,8 +97,8 @@ subroutine oce_timestep_ale(n, tracers, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP use mod_tracer - integer, intent(in) :: n - type(t_mesh), intent(in), target :: mesh + integer, intent(in) :: n + type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers end subroutine @@ -2700,12 +2700,13 @@ subroutine oce_timestep_ale(n, tracers, partit, mesh) use write_step_info_interface use check_blowup_interface IMPLICIT NONE - type(t_mesh), intent(in), target :: mesh + integer, intent(in) :: n + type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers real(kind=8) :: t0,t1, t2, t30, t3, t4, t5, t6, t7, t8, t9, t10, loc, glo - integer :: n, node + integer :: node #include "associate_part_def.h" #include "associate_mesh_def.h" diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index ed5145ec2..5e3c02282 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -233,7 +233,7 @@ subroutine adv_tracers_ale(dt, tr_num, tracers, partit, mesh) implicit none real(kind=WP), intent(in), target :: dt integer :: node, nz - integer, intent(in) :: tr_num + integer, intent(in), target :: tr_num type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 2842b69f0..216e36b9b 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -31,7 +31,7 @@ subroutine ocean_setup(tracers, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP use mod_tracer - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers end subroutine From 96bcb0b97858d94f480bcdae737c2f7b98fe4f19 Mon Sep 17 00:00:00 2001 From: a270042 Date: Fri, 5 Nov 2021 16:56:06 +0100 Subject: [PATCH 451/909] exchange arrays U_b, V_b against derived type for dynamics working arrays dynamics%work%u_b ... --- src/MOD_DYN.F90 | 2 + src/oce_dyn.F90 | 11 ++---- src/oce_setup_step.F90 | 88 ++++++++++++++++++++++++++---------------- 3 files changed, 61 insertions(+), 40 deletions(-) diff --git a/src/MOD_DYN.F90 b/src/MOD_DYN.F90 index 86b6cbfea..a3b570afc 100644 --- a/src/MOD_DYN.F90 +++ b/src/MOD_DYN.F90 @@ -30,6 +30,8 @@ MODULE MOD_DYN TYPE T_DYN_WORK real(kind=WP), allocatable, dimension(:,:,:) :: uvnode_rhs real(kind=WP), allocatable, dimension(:,:) :: u_c, v_c + + ! easy backscatter contribution real(kind=WP), allocatable, dimension(:,:) :: u_b, v_b contains procedure WRITE_T_DYN_WORK diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 036aaefea..f2834253d 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -266,21 +266,21 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) real(kind=8) :: u1, v1, len, vi integer :: nz, ed, el(2), nelem(3),k, elem, nzmin, nzmax -!!PS real(kind=8), allocatable :: U_c(:,:), V_c(:,:) - real(kind=8), allocatable :: U_b(:,:), V_b(:,:) type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs - real(kind=WP), dimension(:,:) , pointer :: U_c, V_c + real(kind=WP), dimension(:,:) , pointer :: U_c, V_c, U_b, V_b #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) + UV => dynamics%uv( :,:,:) UV_rhs => dynamics%uv_rhs(:,:,:) U_c => dynamics%work%u_c(:,:) V_c => dynamics%work%v_c(:,:) + U_b => dynamics%work%u_b(:,:) + V_b => dynamics%work%v_b(:,:) ! An analog of harmonic viscosity operator. ! Same as visc_filt_h, but with the backscatter. @@ -351,7 +351,6 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) UV_rhs(2,nz,ed)=UV_rhs(2,nz,ed)+V_b(nz,ed) -easy_bs_return*sum(V_c(nz,nelem))/3.0_WP END DO end do - deallocate(V_c,U_c,V_b,U_b) end subroutine visc_filt_bcksct ! ! @@ -373,7 +372,6 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) IMPLICIT NONE real(kind=8) :: u1, v1, vi, len integer :: ed, el(2), nz, nzmin, nzmax -!!PS real(kind=8), allocatable :: U_c(:,:), V_c(:,:) type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit @@ -465,7 +463,6 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) IMPLICIT NONE real(kind=8) :: u1, v1, vi, len integer :: ed, el(2), nz, nzmin, nzmax -!!PS real(kind=8), allocatable :: U_c(:,:), V_c(:,:) type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index cfe11d459..7d142e043 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -394,22 +394,50 @@ SUBROUTINE dynamics_init(dynamics, partit, mesh) !!PS read(nm_unit, nml=dynamics_general, iostat=iost) !!PS close(nm_unit) + !___________________________________________________________________________ + ! set parameters in derived type +!!PS dynamics%visc_opt = visc_opt +!!PS dynamics%gamma0_visc = gamma0_visc +!!PS dynamics%gamma1_visc = gamma1_visc +!!PS dynamics%gamma2_visc = gamma2_visc +!!PS dynamics%use_ivertvisc = use_ivertvisc +!!PS dynamics%momadv_opt = momadv_opt +!!PS dynamics%use_freeslip = use_freeslip +!!PS dynamics%use_wsplit = use_wsplit +!!PS dynamics%wsplit_maxcfl = wsplit_maxcfl + + dynamics%visc_opt = visc_option + dynamics%gamma0_visc = gamma0 + dynamics%gamma1_visc = gamma1 + dynamics%gamma2_visc = gamma2 + dynamics%use_ivertvisc = i_vert_visc + dynamics%momadv_opt = mom_adv + dynamics%use_freeslip = free_slip + dynamics%use_wsplit = w_split + dynamics%wsplit_maxcfl = w_max_cfl + + !___________________________________________________________________________ ! define local vertice & elem array size elem_size=myDim_elem2D+eDim_elem2D node_size=myDim_nod2D+eDim_nod2D - - ! allocate data arrays in derived type + + !___________________________________________________________________________ + ! allocate/initialise horizontal velocity arrays in derived type allocate(dynamics%uv( 2, nl-1, elem_size)) allocate(dynamics%uv_rhs( 2, nl-1, elem_size)) allocate(dynamics%uv_rhsAB( 2, nl-1, elem_size)) allocate(dynamics%uvnode( 2, nl-1, node_size)) - allocate(dynamics%work%uvnode_rhs(2, nl-1, node_size)) dynamics%uv = 0.0_WP dynamics%uv_rhs = 0.0_WP dynamics%uv_rhsAB = 0.0_WP dynamics%uvnode = 0.0_WP - dynamics%work%uvnode_rhs = 0.0_WP + if (Fer_GM) then + allocate(dynamics%fer_uv(2, nl-1, elem_size)) + dynamics%fer_uv = 0.0_WP + end if + !___________________________________________________________________________ + ! allocate/initialise vertical velocity arrays in derived type allocate(dynamics%w( nl, node_size)) allocate(dynamics%w_e( nl, node_size)) allocate(dynamics%w_i( nl, node_size)) @@ -418,44 +446,38 @@ SUBROUTINE dynamics_init(dynamics, partit, mesh) dynamics%w_e = 0.0_WP dynamics%w_i = 0.0_WP dynamics%cfl_z = 0.0_WP + if (Fer_GM) then + allocate(dynamics%fer_w( nl, node_size)) + dynamics%fer_w = 0.0_WP + end if + !___________________________________________________________________________ + ! allocate/initialise ssh arrays in derived type allocate(dynamics%eta_n( node_size)) allocate(dynamics%d_eta( node_size)) allocate(dynamics%ssh_rhs( node_size)) - !!PS allocate(dynamics%ssh_rhs_old(node_size)) dynamics%eta_n = 0.0_WP dynamics%d_eta = 0.0_WP dynamics%ssh_rhs = 0.0_WP - - if (Fer_GM) then - allocate(dynamics%fer_uv(2, nl-1, elem_size)) - allocate(dynamics%fer_w( nl, node_size)) - dynamics%fer_uv = 0.0_WP - dynamics%fer_w = 0.0_WP + !!PS allocate(dynamics%ssh_rhs_old(node_size)) + !!PS dynamics%ssh_rhs_old= 0.0_WP + + !___________________________________________________________________________ + ! inititalise working arrays + allocate(dynamics%work%uvnode_rhs(2, nl-1, node_size)) + allocate(dynamics%work%u_c(nl-1, elem_size)) + allocate(dynamics%work%v_c(nl-1, elem_size)) + dynamics%work%uvnode_rhs = 0.0_WP + dynamics%work%u_c = 0.0_WP + dynamics%work%v_c = 0.0_WP + if (dynamics%visc_opt==5) then + allocate(dynamics%work%u_b(nl-1, elem_size)) + allocate(dynamics%work%v_b(nl-1, elem_size)) + dynamics%work%u_b = 0.0_WP + dynamics%work%v_b = 0.0_WP end if - -!!PS dynamics%ssh_rhs_old= 0.0_WP - - ! set parameters in derived type -!!PS dynamics%visc_opt = visc_opt -!!PS dynamics%gamma0_visc = gamma0_visc -!!PS dynamics%gamma1_visc = gamma1_visc -!!PS dynamics%gamma2_visc = gamma2_visc -!!PS dynamics%use_ivertvisc = use_ivertvisc -!!PS dynamics%momadv_opt = momadv_opt -!!PS dynamics%use_freeslip = use_freeslip -!!PS dynamics%use_wsplit = use_wsplit -!!PS dynamics%wsplit_maxcfl = wsplit_maxcfl - dynamics%visc_opt = visc_option - dynamics%gamma0_visc = gamma0 - dynamics%gamma1_visc = gamma1 - dynamics%gamma2_visc = gamma2 - dynamics%use_ivertvisc = i_vert_visc - dynamics%momadv_opt = mom_adv - dynamics%use_freeslip = free_slip - dynamics%use_wsplit = w_split - dynamics%wsplit_maxcfl = w_max_cfl + END SUBROUTINE dynamics_init ! ! From 1a40e24d5b85902ee249ae77ee6c9a1ebab69fb7 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Fri, 5 Nov 2021 18:10:25 +0000 Subject: [PATCH 452/909] adapt CMakeLists.txt for cray with OpenMP --- src/CMakeLists.txt | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 9500ef1ea..9b956c421 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -81,8 +81,12 @@ elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU ) target_compile_options(${PROJECT_NAME} PRIVATE -fallow-argument-mismatch) # gfortran v10 is strict about erroneous API calls: "Rank mismatch between actual argument at (1) and actual argument at (2) (scalar and rank-1)" endif() elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray ) +if(${ENABLE_OPENMP}) + target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64 -homp) +else() target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64 -hnoomp) endif() +endif() target_include_directories(${PROJECT_NAME} PRIVATE ${NETCDF_Fortran_INCLUDE_DIRECTORIES} ${OASIS_Fortran_INCLUDE_DIRECTORIES}) target_include_directories(${PROJECT_NAME} PRIVATE ${MCT_Fortran_INCLUDE_DIRECTORIES} ${MPEU_Fortran_INCLUDE_DIRECTORIES}) target_include_directories(${PROJECT_NAME} PRIVATE ${SCRIP_Fortran_INCLUDE_DIRECTORIES}) @@ -90,7 +94,7 @@ target_link_libraries(${PROJECT_NAME} ${PROJECT_NAME}_C ${NETCDF_Fortran_LIBRARI target_link_libraries(${PROJECT_NAME} ${PROJECT_NAME}_C ${MCT_Fortran_LIBRARIES} ${MPEU_Fortran_LIBRARIES} ${SCRIP_Fortran_LIBRARIES}) target_link_libraries(${PROJECT_NAME} async_threads_cpp) set_target_properties(${PROJECT_NAME} PROPERTIES LINKER_LANGUAGE Fortran) -if(${ENABLE_OPENMP}) +if(${ENABLE_OPENMP} AND NOT ${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray) target_link_libraries(${PROJECT_NAME} OpenMP::OpenMP_Fortran) endif() From 9a4c30720801defcdf55fc22616fad6412db44b7 Mon Sep 17 00:00:00 2001 From: a270042 Date: Sat, 6 Nov 2021 23:14:22 +0100 Subject: [PATCH 453/909] outsource stochastic backscatter into an own module src/gen_modules_backscatter.F90, where backscatter varaibles are declared and initialse by subroutine init_backscatter --- src/gen_modules_backscatter.F90 | 411 +++++++++++++++ src/oce_dyn.F90 | 856 ++++++++++++++++---------------- src/oce_setup_step.F90 | 48 +- 3 files changed, 869 insertions(+), 446 deletions(-) create mode 100644 src/gen_modules_backscatter.F90 diff --git a/src/gen_modules_backscatter.F90 b/src/gen_modules_backscatter.F90 new file mode 100644 index 000000000..f602c39c0 --- /dev/null +++ b/src/gen_modules_backscatter.F90 @@ -0,0 +1,411 @@ +module g_backscatter + + !___________________________________________________________________________ + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + + !___________________________________________________________________________ + USE o_ARRAYS, only: bvfreq, coriolis_node + + !___________________________________________________________________________ + USE o_param + USE g_CONFIG + USE g_comm_auto + USE g_support + USE g_rotate_grid + IMPLICIT NONE + + !___________________________________________________________________________ + ! allocate backscatter arrays + real(kind=WP), allocatable, dimension(:,:) :: v_back + real(kind=WP), allocatable, dimension(:,:) :: uke, uke_back, uke_dis, uke_dif + real(kind=WP), allocatable, dimension(:,:) :: uke_rhs, uke_rhs_old + real(kind=WP), allocatable, dimension(:,:) :: UV_dis_posdef_b2, UV_dis_posdef, UV_back_posdef + real(kind=WP), allocatable, dimension(:,:,:):: UV_back, UV_dis + real(kind=WP), allocatable, dimension(:,:,:):: UV_dis_tend, UV_total_tend, UV_back_tend + + contains + ! + ! + !___________________________________________________________________________ + ! allocate/initialise backscatter arrays + subroutine init_backscatter(partit, mesh) + implicit none + integer :: elem_size + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + elem_size = myDim_elem2D + eDim_elem2D + allocate(v_back( nl-1, elem_size)) ! Backscatter viscosity + allocate(uke( nl-1, elem_size)) ! Unresolved kinetic energy for backscatter coefficient + allocate(uke_dis( nl-1, elem_size)) + allocate(uke_back( nl-1, elem_size)) + allocate(uke_dif( nl-1, elem_size)) + allocate(uke_rhs( nl-1, elem_size)) + allocate(uke_rhs_old( nl-1, elem_size)) + allocate(UV_dis( 2, nl-1, elem_size)) + allocate(UV_back( 2, nl-1, elem_size)) + allocate(UV_dis_tend( 2, nl-1, elem_size)) + allocate(UV_back_tend( 2, nl-1, elem_size)) + allocate(UV_total_tend(2, nl-1, elem_size)) + uke = 0.0_WP + v_back = 0.0_WP + uke_dis = 0.0_WP + uke_dif = 0.0_WP + uke_back = 0.0_WP + uke_rhs = 0.0_WP + uke_rhs_old = 0.0_WP + UV_dis = 0.0_WP + UV_dis_tend = 0.0_WP + UV_back = 0.0_WP + UV_back_tend = 0.0_WP + UV_total_tend = 0.0_WP + + end subroutine init_backscatter + + ! + ! + !_______________________________________________________________________________ + subroutine visc_filt_dbcksc(dynamics, partit, mesh) + IMPLICIT NONE + + real(kind=WP) :: u1, v1, le(2), len, crosslen, vi, uke1 + integer :: nz, ed, el(2) + real(kind=WP) , allocatable :: uke_d(:,:) + !!PS real(kind=WP) , allocatable :: UV_back(:,:,:), UV_dis(:,:,:) + real(kind=WP) , allocatable :: uuu(:) + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + real(kind=WP) , dimension(:,:,:), pointer :: UV, UV_rhs + real(kind=WP) , dimension(:,:) , pointer :: U_c, V_c +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + UV => dynamics%uv(:,:,:) + UV_rhs => dynamics%uv_rhs(:,:,:) + U_c => dynamics%work%u_c(:,:) + V_c => dynamics%work%v_c(:,:) + + ! An analog of harmonic viscosity operator. + ! It adds to the rhs(0) Visc*(u1+u2+u3-3*u0)/area + ! on triangles, which is Visc*Laplacian/4 on equilateral triangles. + ! The contribution from boundary edges is neglected (free slip). + ! Filter is applied twice. + ed=myDim_elem2D+eDim_elem2D + !!PS allocate(UV_back(2,nl-1,ed), UV_dis(2,nl-1, ed)) + allocate(uke_d(nl-1,ed)) + allocate(uuu(ed)) + UV_back= 0.0_WP + UV_dis = 0.0_WP + uke_d = 0.0_WP + U_c = 0.0_WP + V_c = 0.0_WP + + DO ed=1, myDim_edge2D+eDim_edge2D + if(myList_edge2D(ed)>edge2D_in) cycle + el=edge_tri(:,ed) + DO nz=1,minval(nlevels(el))-1 + u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) + v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) + + U_c(nz,el(1))=U_c(nz,el(1))-u1 + U_c(nz,el(2))=U_c(nz,el(2))+u1 + V_c(nz,el(1))=V_c(nz,el(1))-v1 + V_c(nz,el(2))=V_c(nz,el(2))+v1 + END DO + END DO + + Do ed=1,myDim_elem2D + len=sqrt(elem_area(ed)) + len=dt*len/30.0_WP + Do nz=1,nlevels(ed)-1 + ! vi has the sense of harmonic viscosity coefficient because of + ! the division by area in the end + ! ==== + ! Case 1 -- an analog to the third-order upwind (vi=|u|l/12) + ! ==== + vi=max(0.2_WP,sqrt(UV(1,nz,ed)**2+UV(2,nz,ed)**2))*len + U_c(nz,ed)=-U_c(nz,ed)*vi + V_c(nz,ed)=-V_c(nz,ed)*vi + END DO + end do + call exchange_elem(U_c, partit) + call exchange_elem(V_c, partit) + + DO ed=1, myDim_edge2D+eDim_edge2D + if(myList_edge2D(ed)>edge2D_in) cycle + el=edge_tri(:,ed) + le=edge_dxdy(:,ed) + le(1)=le(1)*sum(elem_cos(el))*0.25_WP + len=sqrt(le(1)**2+le(2)**2)*r_earth + le(1)=edge_cross_dxdy(1,ed)-edge_cross_dxdy(3,ed) + le(2)=edge_cross_dxdy(2,ed)-edge_cross_dxdy(4,ed) + crosslen=sqrt(le(1)**2+le(2)**2) + + DO nz=1,minval(nlevels(el))-1 + vi=dt*len*(v_back(nz,el(1))+v_back(nz,el(2)))/crosslen + !if(mype==0) write(*,*) 'vi ', vi , ' and ed' , ed + !if(mype==0) write(*,*) 'dt*len/crosslen ', dt*len/crosslen, ' and ed' , ed + !vi=max(vi,0.005*len*dt) ! This helps to reduce noise in places where + ! Visc is small and decoupling might happen + !Backscatter contribution + u1=(UV(1,nz,el(1))-UV(1,nz,el(2)))*vi + v1=(UV(2,nz,el(1))-UV(2,nz,el(2)))*vi + + !UKE diffusion + vi=dt*len*(K_back*sqrt(elem_area(el(1))/scale_area)+K_back*sqrt(elem_area(el(2))/scale_area))/crosslen + uke1=(uke(nz,el(1))-uke(nz,el(2)))*vi + + UV_back(1,nz,el(1))=UV_back(1,nz,el(1))-u1/elem_area(el(1)) + UV_back(1,nz,el(2))=UV_back(1,nz,el(2))+u1/elem_area(el(2)) + UV_back(2,nz,el(1))=UV_back(2,nz,el(1))-v1/elem_area(el(1)) + UV_back(2,nz,el(2))=UV_back(2,nz,el(2))+v1/elem_area(el(2)) + + !Correct scaling for the diffusion? + uke_d(nz,el(1))=uke_d(nz,el(1))-uke1/elem_area(el(1)) + uke_d(nz,el(2))=uke_d(nz,el(2))+uke1/elem_area(el(2)) + + !Biharmonic contribution + u1=(U_c(nz,el(1))-U_c(nz,el(2))) + v1=(V_c(nz,el(1))-V_c(nz,el(2))) + + UV_dis(1,nz,el(1))=UV_dis(1,nz,el(1))-u1/elem_area(el(1)) + UV_dis(1,nz,el(2))=UV_dis(1,nz,el(2))+u1/elem_area(el(2)) + UV_dis(2,nz,el(1))=UV_dis(2,nz,el(1))-v1/elem_area(el(1)) + UV_dis(2,nz,el(2))=UV_dis(2,nz,el(2))+v1/elem_area(el(2)) + + END DO + END DO + call exchange_elem(UV_back, partit) + + DO nz=1, nl-1 + uuu=0.0_WP + uuu=UV_back(1,nz,:) + call smooth_elem(uuu,smooth_back_tend, partit, mesh) + UV_back(1,nz,:)=uuu + uuu=0.0_WP + uuu=UV_back(2,nz,:) + call smooth_elem(uuu,smooth_back_tend, partit, mesh) + UV_back(2,nz,:)=uuu + END DO + + DO ed=1, myDim_elem2D + DO nz=1,nlevels(ed)-1 + UV_rhs(1,nz,ed)=UV_rhs(1,nz,ed)+UV_dis(1,nz,ed)+UV_back(1,nz,ed) + UV_rhs(2,nz,ed)=UV_rhs(2,nz,ed)+UV_dis(2,nz,ed)+UV_back(2,nz,ed) + END DO + END DO + + UV_dis_tend=UV_dis!+UV_back + UV_total_tend=UV_dis+UV_back + UV_back_tend=UV_back + uke_dif=uke_d + + call uke_update(dynamics, partit, mesh) + + !!PS deallocate(UV_dis,UV_back) + deallocate(uke_d) + deallocate(uuu) + end subroutine visc_filt_dbcksc + + ! + ! + !_______________________________________________________________________________ + subroutine backscatter_coef(partit, mesh) + IMPLICIT NONE + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: elem, nz +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + !Potentially add the Rossby number scaling to the script... + !check if sign is right! Different in the Jansen paper + !Also check with the normalization by area; as before we use element length sqrt(2*elem_area(ed)) + v_back=0.0_WP + DO elem=1, myDim_elem2D + DO nz=1,nlevels(elem)-1 + !v_back(1,ed)=c_back*sqrt(2.0_WP*elem_area(ed))*sqrt(max(2.0_WP*uke(1,ed),0.0_WP))*(3600.0_WP*24.0_WP/tau_c)*4.0_WP/sqrt(2.0_WP*elem_area(ed))**2 !*sqrt(max(2.0_WP*uke(1,ed),0.0_WP)) + !v_back(nz,elem)=-c_back*sqrt(4._8/sqrt(3.0_8)*elem_area(elem))*sqrt(max(2.0_8*uke(nz,elem),0.0_8)) !Is the scaling correct + v_back(nz,elem)=min(-c_back*sqrt(elem_area(elem))*sqrt(max(2.0_8*uke(nz,elem),0.0_8)),0.2*elem_area(elem)/dt) !Is the scaling correct + !Scaling by sqrt(2*elem_area) or sqrt(elem_area)? + END DO + END DO + call exchange_elem(v_back, partit) + end subroutine backscatter_coef + ! + ! + !_______________________________________________________________________________ + subroutine uke_update(dynamics, partit, mesh) + IMPLICIT NONE + + !I had to change uke(:) to uke(:,:) to make output and restart work!! + !Why is it necessary to implement the length of the array? It doesn't work without! + !integer, intent(in) :: t_levels + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + + real(kind=WP) :: hall, h1_eta, hnz, vol + integer :: elnodes(3), nz, ed, edi, node, j, elem, q + real(kind=WP), allocatable :: uuu(:), work_array(:), U_work(:,:), V_work(:,:), rosb_array(:,:), work_uv(:) + integer :: kk, nzmax, el + real(kind=WP) :: c1, rosb, vel_u, vel_v, vel_uv, scaling, reso + real*8 :: c_min=0.5, f_min=1.e-6, r_max=200000., ex, ey, a1, a2, len_reg, dist_reg(2) ! Are those values still correct? + real(kind=WP), dimension(:,:,:), pointer :: UV +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) + + !rosb_dis=1._8 !Should be variable to control how much of the dissipated energy is backscattered + !rossby_num=2 + + ed=myDim_elem2D+eDim_elem2D + allocate(uuu(ed)) + + uke_back=0.0_WP + uke_dis=0.0_WP + DO ed=1, myDim_elem2D + DO nz=1, nlevels(ed)-1 + uke_dis(nz,ed) =(UV(1,nz,ed)*UV_dis_tend( 1,nz,ed)+UV(2,nz,ed)*UV_dis_tend( 2,nz,ed)) + uke_back(nz,ed)=(UV(1,nz,ed)*UV_back_tend(1,nz,ed)+UV(2,nz,ed)*UV_back_tend(2,nz,ed)) + END DO + END DO + + DO nz=1,nl-1 + uuu=0.0_8 + uuu=uke_back(nz,:) + call smooth_elem(uuu,smooth_back, partit, mesh) !3) ? + uke_back(nz,:)=uuu + END DO + + !Timestepping use simple backward timestepping; all components should have dt in it, unless they need it twice + !Amplitudes should be right given the correction of the viscosities; check for all, also for biharmonic + !uke(1,ed)=uke(1,ed)-uke_dis(1,ed)-uke_back(1,ed)+uke_dif(1,ed) + ed=myDim_elem2D+eDim_elem2D + allocate(U_work(nl-1,myDim_nod2D+eDim_nod2D),V_work(nl-1,myDim_nod2D+eDim_nod2D)) + allocate(work_uv(myDim_nod2D+eDim_nod2D)) + allocate(rosb_array(nl-1,ed)) + call exchange_elem(UV, partit) + rosb_array=0._WP + DO nz=1, nl-1 + work_uv=0._WP + DO node=1, myDim_nod2D + vol=0._WP + U_work(nz,node)=0._WP + V_work(nz,node)=0._WP + DO j=1, nod_in_elem2D_num(node) + elem=nod_in_elem2D(j, node) + U_work(nz,node)=U_work(nz,node)+UV(1,nz,elem)*elem_area(elem) + V_work(nz,node)=V_work(nz,node)+UV(2,nz,elem)*elem_area(elem) + vol=vol+elem_area(elem) + END DO + U_work(nz,node)=U_work(nz,node)/vol + V_work(nz,node)=U_work(nz,node)/vol + END DO + work_uv=U_work(nz,:) + call exchange_nod(work_uv, partit) + U_work(nz,:)=work_uv + work_uv=V_work(nz,:) + call exchange_nod(work_uv, partit) + V_work(nz,:)=work_uv + END DO + + DO el=1,myDim_elem2D + DO nz=1, nlevels(el)-1 + rosb_array(nz,el)=sqrt((sum(gradient_sca(1:3,el)*U_work(nz,elem2D_nodes(1:3,el)))-& + sum(gradient_sca(4:6, el)*V_work(nz,elem2D_nodes(1:3,el))))**2+& + (sum(gradient_sca(4:6, el)*U_work(nz,elem2D_nodes(1:3,el)))+& + sum(gradient_sca(1:3, el)*V_work(nz,elem2D_nodes(1:3,el))))**2) + ! hall=hall+hnz + END DO + ! rosb_array(el)=rosb_array(el)/hall + END DO + + DO ed=1, myDim_elem2D + scaling=1._WP + IF(uke_scaling) then + reso=sqrt(elem_area(ed)*4._wp/sqrt(3._wp)) + rosb=0._wp + elnodes=elem2D_nodes(:, ed) + DO kk=1,3 + c1=0._wp + nzmax=minval(nlevels(nod_in_elem2D(1:nod_in_elem2D_num(elnodes(kk)), elnodes(kk))), 1) + !Vertical average; same scaling in the vertical + DO nz=1, nzmax-1 + c1=c1+hnode_new(nz,elnodes(kk))*(sqrt(max(bvfreq(nz,elnodes(kk)), 0._WP))+sqrt(max(bvfreq(nz+1,elnodes(kk)), 0._WP)))/2. + END DO + c1=max(c_min, c1/pi) !ca. first baroclinic gravity wave speed limited from below by c_min + !Cutoff K_GM depending on (Resolution/Rossby radius) ratio + rosb=rosb+min(c1/max(abs(coriolis_node(elnodes(kk))), f_min), r_max) + END DO + rosb=rosb/3._WP + scaling=1._WP/(1._WP+(uke_scaling_factor*reso/rosb))!(4._wp*reso/rosb)) + END IF + + DO nz=1, nlevels(ed)-1 + elnodes=elem2D_nodes(:,ed) + + !Taking out that one place where it is always weird (Pacific Southern Ocean) + !Should not really be used later on, once we fix the issue with the 1/4 degree grid + if(.not. (TRIM(which_toy)=="soufflet")) then + call elem_center(ed, ex, ey) + !a1=-104.*rad + !a2=-49.*rad + call g2r(-104.*rad, -49.*rad, a1, a2) + dist_reg(1)=ex-a1 + dist_reg(2)=ey-a2 + call trim_cyclic(dist_reg(1)) + dist_reg(1)=dist_reg(1)*elem_cos(ed) + dist_reg=dist_reg*r_earth + len_reg=sqrt(dist_reg(1)**2+dist_reg(2)**2) + + !if(mype==0) write(*,*) 'len_reg ', len_reg , ' and dist_reg' , dist_reg, ' and ex, ey', ex, ey, ' and a ', a1, a2 + rosb_array(nz,ed)=rosb_array(nz,ed)/max(abs(sum(coriolis_node(elnodes(:)))), f_min) + !uke_dif(nz, ed)=scaling*(1-exp(-len_reg/300000))*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)!UV_dif(1,ed) + uke_dis(nz,ed)=scaling*(1-exp(-len_reg/300000))*1._WP/(1._WP+rosb_array(nz,ed)/rosb_dis)*uke_dis(nz,ed) + else + rosb_array(nz,ed)=rosb_array(nz,ed)/max(abs(sum(coriolis_node(elnodes(:)))), f_min) + !uke_dif(nz, ed)=scaling*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)!UV_dif(1,ed) + uke_dis(nz,ed)=scaling*1._WP/(1._WP+rosb_array(nz,ed)/rosb_dis)*uke_dis(nz,ed) + end if + END DO + END DO + + deallocate(U_work, V_work) + deallocate(rosb_array) + deallocate(work_uv) + + call exchange_elem(uke_dis, partit) + DO nz=1, nl-1 + uuu=uke_dis(nz,:) + call smooth_elem(uuu,smooth_dis, partit, mesh) + uke_dis(nz,:)=uuu + END DO + DO ed=1, myDim_elem2D + DO nz=1,nlevels(ed)-1 + uke_rhs_old(nz,ed)=uke_rhs(nz,ed) + uke_rhs(nz,ed)=-uke_dis(nz,ed)-uke_back(nz,ed)+uke_dif(nz,ed) + uke(nz,ed)=uke(nz,ed)+1.5_8*uke_rhs(nz,ed)-0.5_8*uke_rhs_old(nz,ed) + END DO + END DO + + call exchange_elem(uke, partit) + deallocate(uuu) + + end subroutine uke_update +end module g_backscatter + diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index f2834253d..cd78ac3c9 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -53,48 +53,48 @@ subroutine visc_filt_bidiff(dynamics, partit, mesh) end subroutine end interface end module -module visc_filt_dbcksc_interface - interface - subroutine visc_filt_dbcksc(dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_DYN - type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - - end subroutine - end interface -end module -module backscatter_coef_interface - interface - subroutine backscatter_coef(dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_DYN - type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - - end subroutine - end interface -end module -module uke_update_interface - interface - subroutine uke_update(dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_DYN - type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - - end subroutine - end interface -end module +!!PS module visc_filt_dbcksc_interface +!!PS interface +!!PS subroutine visc_filt_dbcksc(dynamics, partit, mesh) +!!PS use mod_mesh +!!PS USE MOD_PARTIT +!!PS USE MOD_PARSUP +!!PS USE MOD_DYN +!!PS type(t_dyn) , intent(inout), target :: dynamics +!!PS type(t_partit), intent(inout), target :: partit +!!PS type(t_mesh) , intent(in) , target :: mesh +!!PS +!!PS end subroutine +!!PS end interface +!!PS end module +!!PS module backscatter_coef_interface +!!PS interface +!!PS subroutine backscatter_coef(dynamics, partit, mesh) +!!PS use mod_mesh +!!PS USE MOD_PARTIT +!!PS USE MOD_PARSUP +!!PS USE MOD_DYN +!!PS type(t_dyn) , intent(inout), target :: dynamics +!!PS type(t_partit), intent(inout), target :: partit +!!PS type(t_mesh) , intent(in) , target :: mesh +!!PS +!!PS end subroutine +!!PS end interface +!!PS end module +!!PS module uke_update_interface +!!PS interface +!!PS subroutine uke_update(dynamics, partit, mesh) +!!PS use mod_mesh +!!PS USE MOD_PARTIT +!!PS USE MOD_PARSUP +!!PS USE MOD_DYN +!!PS type(t_dyn) , intent(inout), target :: dynamics +!!PS type(t_partit), intent(inout), target :: partit +!!PS type(t_mesh) , intent(in) , target :: mesh +!!PS +!!PS end subroutine +!!PS end interface +!!PS end module module relative_vorticity_interface interface @@ -219,8 +219,9 @@ subroutine viscosity_filter(option, dynamics, partit, mesh) use visc_filt_bcksct_interface use visc_filt_bilapl_interface use visc_filt_bidiff_interface - use visc_filt_dbcksc_interface - use backscatter_coef_interface +!!PS use visc_filt_dbcksc_interface +!!PS use backscatter_coef_interface + use g_backscatter IMPLICIT NONE integer :: option type(t_dyn) , intent(inout), target :: dynamics @@ -237,13 +238,18 @@ subroutine viscosity_filter(option, dynamics, partit, mesh) ! h_viscosity. SELECT CASE (option) CASE (5) + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[37m'//' --> call visc_filt_bcksct'//achar(27)//'[0m' call visc_filt_bcksct(dynamics, partit, mesh) CASE (6) + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[37m'//' --> call visc_filt_bilapl'//achar(27)//'[0m' call visc_filt_bilapl(dynamics, partit, mesh) CASE (7) + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[37m'//' --> call visc_filt_bidiff'//achar(27)//'[0m' call visc_filt_bidiff(dynamics, partit, mesh) CASE (8) - call backscatter_coef(dynamics, partit, mesh) + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[37m'//' --> call backscatter_coef'//achar(27)//'[0m' + call backscatter_coef(partit, mesh) + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[37m'//' --> call visc_filt_dbcksc'//achar(27)//'[0m' call visc_filt_dbcksc(dynamics, partit, mesh) CASE DEFAULT if (partit%mype==0) write(*,*) 'mixing scheme with option ' , option, 'has not yet been implemented' @@ -530,386 +536,386 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) deallocate(V_c, U_c) end subroutine visc_filt_bidiff -! -! -!_______________________________________________________________________________ -SUBROUTINE visc_filt_dbcksc(dynamics, partit, mesh) -USE MOD_MESH -USE MOD_PARTIT -USE MOD_PARSUP -use MOD_DYN -USE o_ARRAYS, only: v_back, UV_dis_tend, UV_total_tend, UV_back_tend, & - uke, uke_dif -USE o_PARAM -USE g_CONFIG -USE g_comm_auto -USE g_support -USE uke_update_interface -IMPLICIT NONE - -real(kind=8) :: u1, v1, le(2), len, crosslen, vi, uke1 -integer :: nz, ed, el(2) -!!PS real(kind=8), allocatable :: U_c(:,:), V_c(:,:) -real(kind=8) , allocatable :: UV_back(:,:,:), UV_dis(:,:,:), uke_d(:,:) -real(kind=8) , allocatable :: uuu(:) -type(t_dyn) , intent(inout), target :: dynamics -type(t_partit), intent(inout), target :: partit -type(t_mesh) , intent(in) , target :: mesh -real(kind=WP) , dimension(:,:,:), pointer :: UV, UV_rhs -real(kind=WP) , dimension(:,:) , pointer :: U_c, V_c -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" -UV => dynamics%uv(:,:,:) -UV_rhs => dynamics%uv_rhs(:,:,:) -U_c => dynamics%work%u_c(:,:) -V_c => dynamics%work%v_c(:,:) - - ! An analog of harmonic viscosity operator. - ! It adds to the rhs(0) Visc*(u1+u2+u3-3*u0)/area - ! on triangles, which is Visc*Laplacian/4 on equilateral triangles. - ! The contribution from boundary edges is neglected (free slip). - ! Filter is applied twice. - -ed=myDim_elem2D+eDim_elem2D -allocate(U_c(nl-1,ed), V_c(nl-1, ed)) -allocate(UV_back(2,nl-1,ed), UV_dis(2,nl-1, ed)) -allocate(uke_d(nl-1,ed)) -allocate(uuu(ed)) - - U_c=0.0_8 - V_c=0.0_8 - UV_back=0.0_8 - UV_dis=0.0_8 - uke_d=0.0_8 - - DO ed=1, myDim_edge2D+eDim_edge2D - if(myList_edge2D(ed)>edge2D_in) cycle - el=edge_tri(:,ed) - DO nz=1,minval(nlevels(el))-1 - u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) - v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) - - U_c(nz,el(1))=U_c(nz,el(1))-u1 - U_c(nz,el(2))=U_c(nz,el(2))+u1 - V_c(nz,el(1))=V_c(nz,el(1))-v1 - V_c(nz,el(2))=V_c(nz,el(2))+v1 - END DO - END DO - - - Do ed=1,myDim_elem2D - len=sqrt(elem_area(ed)) - len=dt*len/30.0_8 - Do nz=1,nlevels(ed)-1 - ! vi has the sense of harmonic viscosity coefficient because of - ! the division by area in the end - ! ==== - ! Case 1 -- an analog to the third-order upwind (vi=|u|l/12) - ! ==== - vi=max(0.2_8,sqrt(UV(1,nz,ed)**2+UV(2,nz,ed)**2))*len - U_c(nz,ed)=-U_c(nz,ed)*vi - V_c(nz,ed)=-V_c(nz,ed)*vi - END DO - end do - - - call exchange_elem(U_c, partit) - call exchange_elem(V_c, partit) - - DO ed=1, myDim_edge2D+eDim_edge2D - if(myList_edge2D(ed)>edge2D_in) cycle - el=edge_tri(:,ed) - le=edge_dxdy(:,ed) - le(1)=le(1)*sum(elem_cos(el))*0.25_8 - len=sqrt(le(1)**2+le(2)**2)*r_earth - le(1)=edge_cross_dxdy(1,ed)-edge_cross_dxdy(3,ed) - le(2)=edge_cross_dxdy(2,ed)-edge_cross_dxdy(4,ed) - crosslen=sqrt(le(1)**2+le(2)**2) - DO nz=1,minval(nlevels(el))-1 - vi=dt*len*(v_back(nz,el(1))+v_back(nz,el(2)))/crosslen - !if(mype==0) write(*,*) 'vi ', vi , ' and ed' , ed - !if(mype==0) write(*,*) 'dt*len/crosslen ', dt*len/crosslen, ' and ed' , ed - !vi=max(vi,0.005*len*dt) ! This helps to reduce noise in places where - ! Visc is small and decoupling might happen - !Backscatter contribution - u1=(UV(1,nz,el(1))-UV(1,nz,el(2)))*vi - v1=(UV(2,nz,el(1))-UV(2,nz,el(2)))*vi - - !UKE diffusion - vi=dt*len*(K_back*sqrt(elem_area(el(1))/scale_area)+K_back*sqrt(elem_area(el(2))/scale_area))/crosslen - - uke1=(uke(nz,el(1))-uke(nz,el(2)))*vi - - - UV_back(1,nz,el(1))=UV_back(1,nz,el(1))-u1/elem_area(el(1)) - UV_back(1,nz,el(2))=UV_back(1,nz,el(2))+u1/elem_area(el(2)) - UV_back(2,nz,el(1))=UV_back(2,nz,el(1))-v1/elem_area(el(1)) - UV_back(2,nz,el(2))=UV_back(2,nz,el(2))+v1/elem_area(el(2)) - - !Correct scaling for the diffusion? - uke_d(nz,el(1))=uke_d(nz,el(1))-uke1/elem_area(el(1)) - uke_d(nz,el(2))=uke_d(nz,el(2))+uke1/elem_area(el(2)) - - - - !Biharmonic contribution - u1=(U_c(nz,el(1))-U_c(nz,el(2))) - v1=(V_c(nz,el(1))-V_c(nz,el(2))) - - UV_dis(1,nz,el(1))=UV_dis(1,nz,el(1))-u1/elem_area(el(1)) - UV_dis(1,nz,el(2))=UV_dis(1,nz,el(2))+u1/elem_area(el(2)) - UV_dis(2,nz,el(1))=UV_dis(2,nz,el(1))-v1/elem_area(el(1)) - UV_dis(2,nz,el(2))=UV_dis(2,nz,el(2))+v1/elem_area(el(2)) - - END DO - END DO - -call exchange_elem(UV_back, partit) - -DO nz=1, nl-1 - uuu=0.0_8 - uuu=UV_back(1,nz,:) - call smooth_elem(uuu,smooth_back_tend, partit, mesh) - UV_back(1,nz,:)=uuu - uuu=0.0_8 - uuu=UV_back(2,nz,:) - call smooth_elem(uuu,smooth_back_tend, partit, mesh) - UV_back(2,nz,:)=uuu -END DO - - DO ed=1, myDim_elem2D - DO nz=1,nlevels(ed)-1 - UV_rhs(1,nz,ed)=UV_rhs(1,nz,ed)+UV_dis(1,nz,ed)+UV_back(1,nz,ed) - UV_rhs(2,nz,ed)=UV_rhs(2,nz,ed)+UV_dis(2,nz,ed)+UV_back(2,nz,ed) - END DO - END DO - - UV_dis_tend=UV_dis!+UV_back - UV_total_tend=UV_dis+UV_back - UV_back_tend=UV_back - uke_dif=uke_d - - call uke_update(dynamics, partit, mesh) - deallocate(V_c,U_c) - deallocate(UV_dis,UV_back) - deallocate(uke_d) - deallocate(uuu) - -end subroutine visc_filt_dbcksc -! -! -!_______________________________________________________________________________ -SUBROUTINE backscatter_coef(partit, mesh) -USE MOD_MESH -USE MOD_PARTIT -USE MOD_PARSUP -USE o_ARRAYS -USE o_PARAM -USE g_CONFIG -use g_comm_auto -IMPLICIT NONE -type(t_mesh), intent(in), target :: mesh -type(t_partit), intent(inout), target :: partit -integer :: elem, nz -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" - -!Potentially add the Rossby number scaling to the script... -!check if sign is right! Different in the Jansen paper -!Also check with the normalization by area; as before we use element length sqrt(2*elem_area(ed)) - -v_back=0.0_8 -DO elem=1, myDim_elem2D - DO nz=1,nlevels(elem)-1 -!v_back(1,ed)=c_back*sqrt(2.0_WP*elem_area(ed))*sqrt(max(2.0_WP*uke(1,ed),0.0_WP))*(3600.0_WP*24.0_WP/tau_c)*4.0_WP/sqrt(2.0_WP*elem_area(ed))**2 !*sqrt(max(2.0_WP*uke(1,ed),0.0_WP)) -!v_back(nz,elem)=-c_back*sqrt(4._8/sqrt(3.0_8)*elem_area(elem))*sqrt(max(2.0_8*uke(nz,elem),0.0_8)) !Is the scaling correct -v_back(nz,elem)=min(-c_back*sqrt(elem_area(elem))*sqrt(max(2.0_8*uke(nz,elem),0.0_8)),0.2*elem_area(elem)/dt) !Is the scaling correct -!Scaling by sqrt(2*elem_area) or sqrt(elem_area)? - END DO -END DO - -call exchange_elem(v_back, partit) - -end subroutine backscatter_coef -! -! -!_______________________________________________________________________________ -SUBROUTINE uke_update(dynamics, partit, mesh) -USE MOD_MESH -USE MOD_PARTIT -USE MOD_PARSUP -use MOD_DYN -USE o_ARRAYS, only: uke_rhs, uke_dif, uke_back, uke_dis, uke, UV_dis_tend, uv_back_tend, uke_rhs_old, & - bvfreq, coriolis_node -USE o_PARAM -USE g_CONFIG -use g_comm_auto -USE g_support -USE g_rotate_grid -IMPLICIT NONE - -!I had to change uke(:) to uke(:,:) to make output and restart work!! - -!Why is it necessary to implement the length of the array? It doesn't work without! -!integer, intent(in) :: t_levels -type(t_dyn) , intent(inout), target :: dynamics -type(t_partit), intent(inout), target :: partit -type(t_mesh) , intent(in) , target :: mesh - -real(kind=8) :: hall, h1_eta, hnz, vol -integer :: elnodes(3), nz, ed, edi, node, j, elem, q -real(kind=8), allocatable :: uuu(:), work_array(:), U_work(:,:), V_work(:,:), rosb_array(:,:), work_uv(:) -integer :: kk, nzmax, el -real(kind=8) :: c1, rosb, vel_u, vel_v, vel_uv, scaling, reso -real*8 :: c_min=0.5, f_min=1.e-6, r_max=200000., ex, ey, a1, a2, len_reg, dist_reg(2) ! Are those values still correct? -real(kind=WP), dimension(:,:,:), pointer :: UV -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" -UV => dynamics%uv(:,:,:) - -!rosb_dis=1._8 !Should be variable to control how much of the dissipated energy is backscattered -!rossby_num=2 - -ed=myDim_elem2D+eDim_elem2D -allocate(uuu(ed)) - -uke_back=0.0_8 -uke_dis=0.0_8 -DO ed=1, myDim_elem2D -DO nz=1, nlevels(ed)-1 - uke_dis(nz,ed)=(UV(1,nz,ed)*UV_dis_tend(1,nz,ed)+UV(2,nz,ed)*UV_dis_tend(2,nz,ed)) - uke_back(nz,ed)=(UV(1,nz,ed)*UV_back_tend(1,nz,ed)+UV(2,nz,ed)*UV_back_tend(2,nz,ed)) -END DO -END DO - -DO nz=1,nl-1 - uuu=0.0_8 - uuu=uke_back(nz,:) - call smooth_elem(uuu,smooth_back, partit, mesh) !3) ? - uke_back(nz,:)=uuu -END DO - - - -!Timestepping use simple backward timestepping; all components should have dt in it, unless they need it twice -!Amplitudes should be right given the correction of the viscosities; check for all, also for biharmonic -!uke(1,ed)=uke(1,ed)-uke_dis(1,ed)-uke_back(1,ed)+uke_dif(1,ed) -ed=myDim_elem2D+eDim_elem2D -allocate(U_work(nl-1,myDim_nod2D+eDim_nod2D),V_work(nl-1,myDim_nod2D+eDim_nod2D)) -allocate(work_uv(myDim_nod2D+eDim_nod2D)) -allocate(rosb_array(nl-1,ed)) -call exchange_elem(UV, partit) -rosb_array=0._8 -DO nz=1, nl-1 - work_uv=0._WP - DO node=1, myDim_nod2D - vol=0._WP - U_work(nz,node)=0._WP - V_work(nz,node)=0._WP - DO j=1, nod_in_elem2D_num(node) - elem=nod_in_elem2D(j, node) - U_work(nz,node)=U_work(nz,node)+UV(1,nz,elem)*elem_area(elem) - V_work(nz,node)=V_work(nz,node)+UV(2,nz,elem)*elem_area(elem) - vol=vol+elem_area(elem) - END DO - U_work(nz,node)=U_work(nz,node)/vol - V_work(nz,node)=U_work(nz,node)/vol - END DO - work_uv=U_work(nz,:) - call exchange_nod(work_uv, partit) - U_work(nz,:)=work_uv - work_uv=V_work(nz,:) - call exchange_nod(work_uv, partit) - V_work(nz,:)=work_uv -END DO - - DO el=1,myDim_elem2D - DO nz=1, nlevels(el)-1 - rosb_array(nz,el)=sqrt((sum(gradient_sca(1:3,el)*U_work(nz,elem2D_nodes(1:3,el)))-& - sum(gradient_sca(4:6, el)*V_work(nz,elem2D_nodes(1:3,el))))**2+& - (sum(gradient_sca(4:6, el)*U_work(nz,elem2D_nodes(1:3,el)))+& - sum(gradient_sca(1:3, el)*V_work(nz,elem2D_nodes(1:3,el))))**2) -! hall=hall+hnz - END DO -! rosb_array(el)=rosb_array(el)/hall - END DO -DO ed=1, myDim_elem2D - scaling=1._WP - IF(uke_scaling) then - reso=sqrt(elem_area(ed)*4._wp/sqrt(3._wp)) - rosb=0._wp - elnodes=elem2D_nodes(:, ed) - DO kk=1,3 - c1=0._wp - nzmax=minval(nlevels(nod_in_elem2D(1:nod_in_elem2D_num(elnodes(kk)), elnodes(kk))), 1) - !Vertical average; same scaling in the vertical - DO nz=1, nzmax-1 - c1=c1+hnode_new(nz,elnodes(kk))*(sqrt(max(bvfreq(nz,elnodes(kk)), 0._WP))+sqrt(max(bvfreq(nz+1,elnodes(kk)), 0._WP)))/2. - END DO - c1=max(c_min, c1/pi) !ca. first baroclinic gravity wave speed limited from below by c_min - !Cutoff K_GM depending on (Resolution/Rossby radius) ratio - rosb=rosb+min(c1/max(abs(coriolis_node(elnodes(kk))), f_min), r_max) - END DO - rosb=rosb/3._8 - scaling=1._WP/(1._WP+(uke_scaling_factor*reso/rosb))!(4._wp*reso/rosb)) - END IF - - DO nz=1, nlevels(ed)-1 - elnodes=elem2D_nodes(:,ed) - - !Taking out that one place where it is always weird (Pacific Southern Ocean) - !Should not really be used later on, once we fix the issue with the 1/4 degree grid - if(.not. (TRIM(which_toy)=="soufflet")) then - call elem_center(ed, ex, ey) - !a1=-104.*rad - !a2=-49.*rad - call g2r(-104.*rad, -49.*rad, a1, a2) - dist_reg(1)=ex-a1 - dist_reg(2)=ey-a2 - call trim_cyclic(dist_reg(1)) - dist_reg(1)=dist_reg(1)*elem_cos(ed) - dist_reg=dist_reg*r_earth - len_reg=sqrt(dist_reg(1)**2+dist_reg(2)**2) - - - !if(mype==0) write(*,*) 'len_reg ', len_reg , ' and dist_reg' , dist_reg, ' and ex, ey', ex, ey, ' and a ', a1, a2 - rosb_array(nz,ed)=rosb_array(nz,ed)/max(abs(sum(coriolis_node(elnodes(:)))), f_min) - !uke_dif(nz, ed)=scaling*(1-exp(-len_reg/300000))*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)!UV_dif(1,ed) - uke_dis(nz,ed)=scaling*(1-exp(-len_reg/300000))*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)*uke_dis(nz,ed) - else - rosb_array(nz,ed)=rosb_array(nz,ed)/max(abs(sum(coriolis_node(elnodes(:)))), f_min) - !uke_dif(nz, ed)=scaling*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)!UV_dif(1,ed) - uke_dis(nz,ed)=scaling*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)*uke_dis(nz,ed) - end if - - END DO -END DO -deallocate(U_work, V_work) -deallocate(rosb_array) -deallocate(work_uv) -call exchange_elem(uke_dis, partit) -DO nz=1, nl-1 - uuu=uke_dis(nz,:) - call smooth_elem(uuu,smooth_dis, partit, mesh) - uke_dis(nz,:)=uuu -END DO -DO ed=1, myDim_elem2D - DO nz=1,nlevels(ed)-1 - uke_rhs_old(nz,ed)=uke_rhs(nz,ed) - uke_rhs(nz,ed)=-uke_dis(nz,ed)-uke_back(nz,ed)+uke_dif(nz,ed) - uke(nz,ed)=uke(nz,ed)+1.5_8*uke_rhs(nz,ed)-0.5_8*uke_rhs_old(nz,ed) - END DO -END DO -call exchange_elem(uke, partit) - -deallocate(uuu) -end subroutine uke_update +!!PS ! +!!PS ! +!!PS !_______________________________________________________________________________ +!!PS SUBROUTINE visc_filt_dbcksc(dynamics, partit, mesh) +!!PS USE MOD_MESH +!!PS USE MOD_PARTIT +!!PS USE MOD_PARSUP +!!PS use MOD_DYN +!!PS USE o_ARRAYS, only: v_back, UV_dis_tend, UV_total_tend, UV_back_tend, & +!!PS uke, uke_dif +!!PS USE o_PARAM +!!PS USE g_CONFIG +!!PS USE g_comm_auto +!!PS USE g_support +!!PS USE uke_update_interface +!!PS IMPLICIT NONE +!!PS +!!PS real(kind=8) :: u1, v1, le(2), len, crosslen, vi, uke1 +!!PS integer :: nz, ed, el(2) +!!PS !!PS real(kind=8), allocatable :: U_c(:,:), V_c(:,:) +!!PS real(kind=8) , allocatable :: UV_back(:,:,:), UV_dis(:,:,:), uke_d(:,:) +!!PS real(kind=8) , allocatable :: uuu(:) +!!PS type(t_dyn) , intent(inout), target :: dynamics +!!PS type(t_partit), intent(inout), target :: partit +!!PS type(t_mesh) , intent(in) , target :: mesh +!!PS real(kind=WP) , dimension(:,:,:), pointer :: UV, UV_rhs +!!PS real(kind=WP) , dimension(:,:) , pointer :: U_c, V_c +!!PS #include "associate_part_def.h" +!!PS #include "associate_mesh_def.h" +!!PS #include "associate_part_ass.h" +!!PS #include "associate_mesh_ass.h" +!!PS UV => dynamics%uv(:,:,:) +!!PS UV_rhs => dynamics%uv_rhs(:,:,:) +!!PS U_c => dynamics%work%u_c(:,:) +!!PS V_c => dynamics%work%v_c(:,:) +!!PS +!!PS ! An analog of harmonic viscosity operator. +!!PS ! It adds to the rhs(0) Visc*(u1+u2+u3-3*u0)/area +!!PS ! on triangles, which is Visc*Laplacian/4 on equilateral triangles. +!!PS ! The contribution from boundary edges is neglected (free slip). +!!PS ! Filter is applied twice. +!!PS +!!PS ed=myDim_elem2D+eDim_elem2D +!!PS allocate(U_c(nl-1,ed), V_c(nl-1, ed)) +!!PS allocate(UV_back(2,nl-1,ed), UV_dis(2,nl-1, ed)) +!!PS allocate(uke_d(nl-1,ed)) +!!PS allocate(uuu(ed)) +!!PS +!!PS U_c=0.0_8 +!!PS V_c=0.0_8 +!!PS UV_back=0.0_8 +!!PS UV_dis=0.0_8 +!!PS uke_d=0.0_8 +!!PS +!!PS DO ed=1, myDim_edge2D+eDim_edge2D +!!PS if(myList_edge2D(ed)>edge2D_in) cycle +!!PS el=edge_tri(:,ed) +!!PS DO nz=1,minval(nlevels(el))-1 +!!PS u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) +!!PS v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) +!!PS +!!PS U_c(nz,el(1))=U_c(nz,el(1))-u1 +!!PS U_c(nz,el(2))=U_c(nz,el(2))+u1 +!!PS V_c(nz,el(1))=V_c(nz,el(1))-v1 +!!PS V_c(nz,el(2))=V_c(nz,el(2))+v1 +!!PS END DO +!!PS END DO +!!PS +!!PS +!!PS Do ed=1,myDim_elem2D +!!PS len=sqrt(elem_area(ed)) +!!PS len=dt*len/30.0_8 +!!PS Do nz=1,nlevels(ed)-1 +!!PS ! vi has the sense of harmonic viscosity coefficient because of +!!PS ! the division by area in the end +!!PS ! ==== +!!PS ! Case 1 -- an analog to the third-order upwind (vi=|u|l/12) +!!PS ! ==== +!!PS vi=max(0.2_8,sqrt(UV(1,nz,ed)**2+UV(2,nz,ed)**2))*len +!!PS U_c(nz,ed)=-U_c(nz,ed)*vi +!!PS V_c(nz,ed)=-V_c(nz,ed)*vi +!!PS END DO +!!PS end do +!!PS +!!PS +!!PS call exchange_elem(U_c, partit) +!!PS call exchange_elem(V_c, partit) +!!PS +!!PS DO ed=1, myDim_edge2D+eDim_edge2D +!!PS if(myList_edge2D(ed)>edge2D_in) cycle +!!PS el=edge_tri(:,ed) +!!PS le=edge_dxdy(:,ed) +!!PS le(1)=le(1)*sum(elem_cos(el))*0.25_8 +!!PS len=sqrt(le(1)**2+le(2)**2)*r_earth +!!PS le(1)=edge_cross_dxdy(1,ed)-edge_cross_dxdy(3,ed) +!!PS le(2)=edge_cross_dxdy(2,ed)-edge_cross_dxdy(4,ed) +!!PS crosslen=sqrt(le(1)**2+le(2)**2) +!!PS DO nz=1,minval(nlevels(el))-1 +!!PS vi=dt*len*(v_back(nz,el(1))+v_back(nz,el(2)))/crosslen +!!PS !if(mype==0) write(*,*) 'vi ', vi , ' and ed' , ed +!!PS !if(mype==0) write(*,*) 'dt*len/crosslen ', dt*len/crosslen, ' and ed' , ed +!!PS !vi=max(vi,0.005*len*dt) ! This helps to reduce noise in places where +!!PS ! Visc is small and decoupling might happen +!!PS !Backscatter contribution +!!PS u1=(UV(1,nz,el(1))-UV(1,nz,el(2)))*vi +!!PS v1=(UV(2,nz,el(1))-UV(2,nz,el(2)))*vi +!!PS +!!PS !UKE diffusion +!!PS vi=dt*len*(K_back*sqrt(elem_area(el(1))/scale_area)+K_back*sqrt(elem_area(el(2))/scale_area))/crosslen +!!PS +!!PS uke1=(uke(nz,el(1))-uke(nz,el(2)))*vi +!!PS +!!PS +!!PS UV_back(1,nz,el(1))=UV_back(1,nz,el(1))-u1/elem_area(el(1)) +!!PS UV_back(1,nz,el(2))=UV_back(1,nz,el(2))+u1/elem_area(el(2)) +!!PS UV_back(2,nz,el(1))=UV_back(2,nz,el(1))-v1/elem_area(el(1)) +!!PS UV_back(2,nz,el(2))=UV_back(2,nz,el(2))+v1/elem_area(el(2)) +!!PS +!!PS !Correct scaling for the diffusion? +!!PS uke_d(nz,el(1))=uke_d(nz,el(1))-uke1/elem_area(el(1)) +!!PS uke_d(nz,el(2))=uke_d(nz,el(2))+uke1/elem_area(el(2)) +!!PS +!!PS +!!PS +!!PS !Biharmonic contribution +!!PS u1=(U_c(nz,el(1))-U_c(nz,el(2))) +!!PS v1=(V_c(nz,el(1))-V_c(nz,el(2))) +!!PS +!!PS UV_dis(1,nz,el(1))=UV_dis(1,nz,el(1))-u1/elem_area(el(1)) +!!PS UV_dis(1,nz,el(2))=UV_dis(1,nz,el(2))+u1/elem_area(el(2)) +!!PS UV_dis(2,nz,el(1))=UV_dis(2,nz,el(1))-v1/elem_area(el(1)) +!!PS UV_dis(2,nz,el(2))=UV_dis(2,nz,el(2))+v1/elem_area(el(2)) +!!PS +!!PS END DO +!!PS END DO +!!PS +!!PS call exchange_elem(UV_back, partit) +!!PS +!!PS DO nz=1, nl-1 +!!PS uuu=0.0_8 +!!PS uuu=UV_back(1,nz,:) +!!PS call smooth_elem(uuu,smooth_back_tend, partit, mesh) +!!PS UV_back(1,nz,:)=uuu +!!PS uuu=0.0_8 +!!PS uuu=UV_back(2,nz,:) +!!PS call smooth_elem(uuu,smooth_back_tend, partit, mesh) +!!PS UV_back(2,nz,:)=uuu +!!PS END DO +!!PS +!!PS DO ed=1, myDim_elem2D +!!PS DO nz=1,nlevels(ed)-1 +!!PS UV_rhs(1,nz,ed)=UV_rhs(1,nz,ed)+UV_dis(1,nz,ed)+UV_back(1,nz,ed) +!!PS UV_rhs(2,nz,ed)=UV_rhs(2,nz,ed)+UV_dis(2,nz,ed)+UV_back(2,nz,ed) +!!PS END DO +!!PS END DO +!!PS +!!PS UV_dis_tend=UV_dis!+UV_back +!!PS UV_total_tend=UV_dis+UV_back +!!PS UV_back_tend=UV_back +!!PS uke_dif=uke_d +!!PS +!!PS call uke_update(dynamics, partit, mesh) +!!PS deallocate(V_c,U_c) +!!PS deallocate(UV_dis,UV_back) +!!PS deallocate(uke_d) +!!PS deallocate(uuu) +!!PS +!!PS end subroutine visc_filt_dbcksc +!!PS ! +!!PS ! +!!PS !_______________________________________________________________________________ +!!PS SUBROUTINE backscatter_coef(partit, mesh) +!!PS USE MOD_MESH +!!PS USE MOD_PARTIT +!!PS USE MOD_PARSUP +!!PS USE o_ARRAYS +!!PS USE o_PARAM +!!PS USE g_CONFIG +!!PS use g_comm_auto +!!PS IMPLICIT NONE +!!PS type(t_mesh), intent(in), target :: mesh +!!PS type(t_partit), intent(inout), target :: partit +!!PS integer :: elem, nz +!!PS #include "associate_part_def.h" +!!PS #include "associate_mesh_def.h" +!!PS #include "associate_part_ass.h" +!!PS #include "associate_mesh_ass.h" +!!PS +!!PS !Potentially add the Rossby number scaling to the script... +!!PS !check if sign is right! Different in the Jansen paper +!!PS !Also check with the normalization by area; as before we use element length sqrt(2*elem_area(ed)) +!!PS +!!PS v_back=0.0_8 +!!PS DO elem=1, myDim_elem2D +!!PS DO nz=1,nlevels(elem)-1 +!!PS !v_back(1,ed)=c_back*sqrt(2.0_WP*elem_area(ed))*sqrt(max(2.0_WP*uke(1,ed),0.0_WP))*(3600.0_WP*24.0_WP/tau_c)*4.0_WP/sqrt(2.0_WP*elem_area(ed))**2 !*sqrt(max(2.0_WP*uke(1,ed),0.0_WP)) +!!PS !v_back(nz,elem)=-c_back*sqrt(4._8/sqrt(3.0_8)*elem_area(elem))*sqrt(max(2.0_8*uke(nz,elem),0.0_8)) !Is the scaling correct +!!PS v_back(nz,elem)=min(-c_back*sqrt(elem_area(elem))*sqrt(max(2.0_8*uke(nz,elem),0.0_8)),0.2*elem_area(elem)/dt) !Is the scaling correct +!!PS !Scaling by sqrt(2*elem_area) or sqrt(elem_area)? +!!PS END DO +!!PS END DO +!!PS +!!PS call exchange_elem(v_back, partit) +!!PS +!!PS end subroutine backscatter_coef +!!PS ! +!!PS ! +!!PS !_______________________________________________________________________________ +!!PS SUBROUTINE uke_update(dynamics, partit, mesh) +!!PS USE MOD_MESH +!!PS USE MOD_PARTIT +!!PS USE MOD_PARSUP +!!PS use MOD_DYN +!!PS USE o_ARRAYS, only: uke_rhs, uke_dif, uke_back, uke_dis, uke, UV_dis_tend, uv_back_tend, uke_rhs_old, & +!!PS bvfreq, coriolis_node +!!PS USE o_PARAM +!!PS USE g_CONFIG +!!PS use g_comm_auto +!!PS USE g_support +!!PS USE g_rotate_grid +!!PS IMPLICIT NONE +!!PS +!!PS !I had to change uke(:) to uke(:,:) to make output and restart work!! +!!PS +!!PS !Why is it necessary to implement the length of the array? It doesn't work without! +!!PS !integer, intent(in) :: t_levels +!!PS type(t_dyn) , intent(inout), target :: dynamics +!!PS type(t_partit), intent(inout), target :: partit +!!PS type(t_mesh) , intent(in) , target :: mesh +!!PS +!!PS real(kind=8) :: hall, h1_eta, hnz, vol +!!PS integer :: elnodes(3), nz, ed, edi, node, j, elem, q +!!PS real(kind=8), allocatable :: uuu(:), work_array(:), U_work(:,:), V_work(:,:), rosb_array(:,:), work_uv(:) +!!PS integer :: kk, nzmax, el +!!PS real(kind=8) :: c1, rosb, vel_u, vel_v, vel_uv, scaling, reso +!!PS real*8 :: c_min=0.5, f_min=1.e-6, r_max=200000., ex, ey, a1, a2, len_reg, dist_reg(2) ! Are those values still correct? +!!PS real(kind=WP), dimension(:,:,:), pointer :: UV +!!PS #include "associate_part_def.h" +!!PS #include "associate_mesh_def.h" +!!PS #include "associate_part_ass.h" +!!PS #include "associate_mesh_ass.h" +!!PS UV => dynamics%uv(:,:,:) +!!PS +!!PS !rosb_dis=1._8 !Should be variable to control how much of the dissipated energy is backscattered +!!PS !rossby_num=2 +!!PS +!!PS ed=myDim_elem2D+eDim_elem2D +!!PS allocate(uuu(ed)) +!!PS +!!PS uke_back=0.0_8 +!!PS uke_dis=0.0_8 +!!PS DO ed=1, myDim_elem2D +!!PS DO nz=1, nlevels(ed)-1 +!!PS uke_dis(nz,ed)=(UV(1,nz,ed)*UV_dis_tend(1,nz,ed)+UV(2,nz,ed)*UV_dis_tend(2,nz,ed)) +!!PS uke_back(nz,ed)=(UV(1,nz,ed)*UV_back_tend(1,nz,ed)+UV(2,nz,ed)*UV_back_tend(2,nz,ed)) +!!PS END DO +!!PS END DO +!!PS +!!PS DO nz=1,nl-1 +!!PS uuu=0.0_8 +!!PS uuu=uke_back(nz,:) +!!PS call smooth_elem(uuu,smooth_back, partit, mesh) !3) ? +!!PS uke_back(nz,:)=uuu +!!PS END DO +!!PS +!!PS +!!PS +!!PS !Timestepping use simple backward timestepping; all components should have dt in it, unless they need it twice +!!PS !Amplitudes should be right given the correction of the viscosities; check for all, also for biharmonic +!!PS !uke(1,ed)=uke(1,ed)-uke_dis(1,ed)-uke_back(1,ed)+uke_dif(1,ed) +!!PS ed=myDim_elem2D+eDim_elem2D +!!PS allocate(U_work(nl-1,myDim_nod2D+eDim_nod2D),V_work(nl-1,myDim_nod2D+eDim_nod2D)) +!!PS allocate(work_uv(myDim_nod2D+eDim_nod2D)) +!!PS allocate(rosb_array(nl-1,ed)) +!!PS call exchange_elem(UV, partit) +!!PS rosb_array=0._8 +!!PS DO nz=1, nl-1 +!!PS work_uv=0._WP +!!PS DO node=1, myDim_nod2D +!!PS vol=0._WP +!!PS U_work(nz,node)=0._WP +!!PS V_work(nz,node)=0._WP +!!PS DO j=1, nod_in_elem2D_num(node) +!!PS elem=nod_in_elem2D(j, node) +!!PS U_work(nz,node)=U_work(nz,node)+UV(1,nz,elem)*elem_area(elem) +!!PS V_work(nz,node)=V_work(nz,node)+UV(2,nz,elem)*elem_area(elem) +!!PS vol=vol+elem_area(elem) +!!PS END DO +!!PS U_work(nz,node)=U_work(nz,node)/vol +!!PS V_work(nz,node)=U_work(nz,node)/vol +!!PS END DO +!!PS work_uv=U_work(nz,:) +!!PS call exchange_nod(work_uv, partit) +!!PS U_work(nz,:)=work_uv +!!PS work_uv=V_work(nz,:) +!!PS call exchange_nod(work_uv, partit) +!!PS V_work(nz,:)=work_uv +!!PS END DO +!!PS +!!PS DO el=1,myDim_elem2D +!!PS DO nz=1, nlevels(el)-1 +!!PS rosb_array(nz,el)=sqrt((sum(gradient_sca(1:3,el)*U_work(nz,elem2D_nodes(1:3,el)))-& +!!PS sum(gradient_sca(4:6, el)*V_work(nz,elem2D_nodes(1:3,el))))**2+& +!!PS (sum(gradient_sca(4:6, el)*U_work(nz,elem2D_nodes(1:3,el)))+& +!!PS sum(gradient_sca(1:3, el)*V_work(nz,elem2D_nodes(1:3,el))))**2) +!!PS ! hall=hall+hnz +!!PS END DO +!!PS ! rosb_array(el)=rosb_array(el)/hall +!!PS END DO +!!PS DO ed=1, myDim_elem2D +!!PS scaling=1._WP +!!PS IF(uke_scaling) then +!!PS reso=sqrt(elem_area(ed)*4._wp/sqrt(3._wp)) +!!PS rosb=0._wp +!!PS elnodes=elem2D_nodes(:, ed) +!!PS DO kk=1,3 +!!PS c1=0._wp +!!PS nzmax=minval(nlevels(nod_in_elem2D(1:nod_in_elem2D_num(elnodes(kk)), elnodes(kk))), 1) +!!PS !Vertical average; same scaling in the vertical +!!PS DO nz=1, nzmax-1 +!!PS c1=c1+hnode_new(nz,elnodes(kk))*(sqrt(max(bvfreq(nz,elnodes(kk)), 0._WP))+sqrt(max(bvfreq(nz+1,elnodes(kk)), 0._WP)))/2. +!!PS END DO +!!PS c1=max(c_min, c1/pi) !ca. first baroclinic gravity wave speed limited from below by c_min +!!PS !Cutoff K_GM depending on (Resolution/Rossby radius) ratio +!!PS rosb=rosb+min(c1/max(abs(coriolis_node(elnodes(kk))), f_min), r_max) +!!PS END DO +!!PS rosb=rosb/3._8 +!!PS scaling=1._WP/(1._WP+(uke_scaling_factor*reso/rosb))!(4._wp*reso/rosb)) +!!PS END IF +!!PS +!!PS DO nz=1, nlevels(ed)-1 +!!PS elnodes=elem2D_nodes(:,ed) +!!PS +!!PS !Taking out that one place where it is always weird (Pacific Southern Ocean) +!!PS !Should not really be used later on, once we fix the issue with the 1/4 degree grid +!!PS if(.not. (TRIM(which_toy)=="soufflet")) then +!!PS call elem_center(ed, ex, ey) +!!PS !a1=-104.*rad +!!PS !a2=-49.*rad +!!PS call g2r(-104.*rad, -49.*rad, a1, a2) +!!PS dist_reg(1)=ex-a1 +!!PS dist_reg(2)=ey-a2 +!!PS call trim_cyclic(dist_reg(1)) +!!PS dist_reg(1)=dist_reg(1)*elem_cos(ed) +!!PS dist_reg=dist_reg*r_earth +!!PS len_reg=sqrt(dist_reg(1)**2+dist_reg(2)**2) +!!PS +!!PS +!!PS !if(mype==0) write(*,*) 'len_reg ', len_reg , ' and dist_reg' , dist_reg, ' and ex, ey', ex, ey, ' and a ', a1, a2 +!!PS rosb_array(nz,ed)=rosb_array(nz,ed)/max(abs(sum(coriolis_node(elnodes(:)))), f_min) +!!PS !uke_dif(nz, ed)=scaling*(1-exp(-len_reg/300000))*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)!UV_dif(1,ed) +!!PS uke_dis(nz,ed)=scaling*(1-exp(-len_reg/300000))*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)*uke_dis(nz,ed) +!!PS else +!!PS rosb_array(nz,ed)=rosb_array(nz,ed)/max(abs(sum(coriolis_node(elnodes(:)))), f_min) +!!PS !uke_dif(nz, ed)=scaling*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)!UV_dif(1,ed) +!!PS uke_dis(nz,ed)=scaling*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)*uke_dis(nz,ed) +!!PS end if +!!PS +!!PS END DO +!!PS END DO +!!PS deallocate(U_work, V_work) +!!PS deallocate(rosb_array) +!!PS deallocate(work_uv) +!!PS call exchange_elem(uke_dis, partit) +!!PS DO nz=1, nl-1 +!!PS uuu=uke_dis(nz,:) +!!PS call smooth_elem(uuu,smooth_dis, partit, mesh) +!!PS uke_dis(nz,:)=uuu +!!PS END DO +!!PS DO ed=1, myDim_elem2D +!!PS DO nz=1,nlevels(ed)-1 +!!PS uke_rhs_old(nz,ed)=uke_rhs(nz,ed) +!!PS uke_rhs(nz,ed)=-uke_dis(nz,ed)-uke_back(nz,ed)+uke_dif(nz,ed) +!!PS uke(nz,ed)=uke(nz,ed)+1.5_8*uke_rhs(nz,ed)-0.5_8*uke_rhs_old(nz,ed) +!!PS END DO +!!PS END DO +!!PS call exchange_elem(uke, partit) +!!PS +!!PS deallocate(uuu) +!!PS end subroutine uke_update ! ! !_______________________________________________________________________________ diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 7d142e043..071627d84 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -86,6 +86,7 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) use g_cvmix_pp use g_cvmix_kpp use g_cvmix_tidal +use g_backscatter use Toy_Channel_Soufflet use oce_initial_state_interface use oce_adv_tra_fct_interfaces @@ -240,6 +241,11 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_thickness_ale'//achar(27)//'[0m' call init_thickness_ale(dynamics, partit, mesh) + !___________________________________________________________________________ + ! initialise arrays that are needed for backscatter_coef + if(dynamics%visc_opt==8) call init_backscatter(partit, mesh) + + !___________________________________________________________________________ if(partit%mype==0) write(*,*) 'Initial state' if (w_split .and. partit%mype==0) then @@ -577,27 +583,27 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) ! Backscatter arrays ! ================= -if(visc_option==8) then - -allocate(uke(nl-1,elem_size)) ! Unresolved kinetic energy for backscatter coefficient -allocate(v_back(nl-1,elem_size)) ! Backscatter viscosity -allocate(uke_dis(nl-1,elem_size), uke_back(nl-1,elem_size)) -allocate(uke_dif(nl-1,elem_size)) -allocate(uke_rhs(nl-1,elem_size), uke_rhs_old(nl-1,elem_size)) -allocate(UV_dis_tend(2,nl-1,elem_size), UV_back_tend(2,nl-1,elem_size)) -allocate(UV_total_tend(2,nl-1,elem_size)) - -uke=0.0_8 -v_back=0.0_8 -uke_dis=0.0_8 -uke_dif=0.0_8 -uke_back=0.0_8 -uke_rhs=0.0_8 -uke_rhs_old=0.0_8 -UV_dis_tend=0.0_8 -UV_back_tend=0.0_8 -UV_total_tend=0.0_8 -end if +!!PS if(visc_option==8) then +!!PS +!!PS allocate(uke(nl-1,elem_size)) ! Unresolved kinetic energy for backscatter coefficient +!!PS allocate(v_back(nl-1,elem_size)) ! Backscatter viscosity +!!PS allocate(uke_dis(nl-1,elem_size), uke_back(nl-1,elem_size)) +!!PS allocate(uke_dif(nl-1,elem_size)) +!!PS allocate(uke_rhs(nl-1,elem_size), uke_rhs_old(nl-1,elem_size)) +!!PS allocate(UV_dis_tend(2,nl-1,elem_size), UV_back_tend(2,nl-1,elem_size)) +!!PS allocate(UV_total_tend(2,nl-1,elem_size)) +!!PS +!!PS uke=0.0_8 +!!PS v_back=0.0_8 +!!PS uke_dis=0.0_8 +!!PS uke_dif=0.0_8 +!!PS uke_back=0.0_8 +!!PS uke_rhs=0.0_8 +!!PS uke_rhs_old=0.0_8 +!!PS UV_dis_tend=0.0_8 +!!PS UV_back_tend=0.0_8 +!!PS UV_total_tend=0.0_8 +!!PS end if !Velocities at nodes !!PS allocate(Unode(2,nl-1,node_size)) From 7d77f70a1ab2b0fbb2fe9968a3cb1dcbeeca07e4 Mon Sep 17 00:00:00 2001 From: a270042 Date: Sun, 7 Nov 2021 16:56:42 +0100 Subject: [PATCH 454/909] set back flag_debug=.false. --- src/gen_modules_config.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gen_modules_config.F90 b/src/gen_modules_config.F90 index b9d3d1807..f265ea898 100755 --- a/src/gen_modules_config.F90 +++ b/src/gen_modules_config.F90 @@ -107,7 +107,7 @@ module g_config real(kind=WP) :: cavity_partial_cell_thresh=0.0_WP ! same as partial_cell_tresh but for surface logical :: toy_ocean=.false. ! Ersatz forcing has to be supplied character(100) :: which_toy="soufflet" - logical :: flag_debug=.true. ! prints name of actual subroutine he is in + logical :: flag_debug=.false. ! prints name of actual subroutine he is in logical :: flag_warn_cflz=.true. ! switches off cflz warning namelist /run_config/ use_ice,use_floatice, use_sw_pene, use_cavity, & use_cavity_partial_cell, cavity_partial_cell_thresh, toy_ocean, which_toy, flag_debug, flag_warn_cflz From 82468eca6614335850b42a4bb53710ba1c974231 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 8 Nov 2021 11:08:24 +0100 Subject: [PATCH 455/909] reshaped structure of oce_ale_tracers calls before making OpenMP directives --- src/oce_ale_tracer.F90 | 138 +++++++++++----------------------------- src/oce_tracer_mod.F90 | 139 +++++++++++++++++++---------------------- 2 files changed, 99 insertions(+), 178 deletions(-) diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index ed5145ec2..f28c9d855 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -12,21 +12,6 @@ subroutine diff_part_hor_redi(tr_num, tracer, partit, mesh) end subroutine end interface end module -module adv_tracers_ale_interface - interface - subroutine adv_tracers_ale(dt, tr_num, tracer, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use mod_tracer - real(kind=WP), intent(in), target :: dt - integer, intent(in), target :: tr_num - type(t_tracer), intent(inout), target :: tracer - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - end subroutine - end interface -end module module diff_ver_part_expl_ale_interface interface subroutine diff_ver_part_expl_ale(tr_num, tracer, partit, mesh) @@ -138,19 +123,21 @@ subroutine solve_tracers_ale(tracers, partit, mesh) use g_comm_auto use o_tracers use Toy_Channel_Soufflet - use adv_tracers_ale_interface use diff_tracers_ale_interface - + use oce_adv_tra_driver_interfaces implicit none - type(t_tracer), intent(inout), target :: tracers - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - integer :: tr_num, node, nzmax, nzmin - + type(t_tracer), intent(inout), target :: tracers + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: tr_num, node, elem, nzmax, nzmin + real(kind=WP), pointer, dimension (:,:) :: del_ttf #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + + del_ttf => tracers%work%del_ttf + !___________________________________________________________________________ if (SPP) call cal_rejected_salt(partit, mesh) if (SPP) call app_rejected_salt(tracers%data(2)%values, partit, mesh) @@ -159,9 +146,13 @@ subroutine solve_tracers_ale(tracers, partit, mesh) ! 1. bolus velocities are computed according to GM implementation after R. Ferrari et al., 2010 ! 2. bolus velocities are used only for advecting tracers and shall be subtracted back afterwards if (Fer_GM) then - UV =UV +fer_UV - Wvel_e=Wvel_e+fer_Wvel - Wvel =Wvel +fer_Wvel + do elem=1, myDim_elem2D+eDim_elem2D + UV(:, :, elem) =UV(:, :, elem) + fer_UV(:, :, elem) + end do + do node=1, myDim_nod2D+eDim_nod2D + Wvel_e(:, node)=Wvel_e(:, node)+fer_Wvel(:, node) + Wvel (:, node)=Wvel (:, node)+fer_Wvel(:, node) + end do end if !___________________________________________________________________________ ! loop over all tracers @@ -172,7 +163,12 @@ subroutine solve_tracers_ale(tracers, partit, mesh) call init_tracers_AB(tr_num, tracers, partit, mesh) ! advect tracers if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call adv_tracers_ale'//achar(27)//'[0m' - call adv_tracers_ale(dt, tr_num, tracers, partit, mesh) + ! it will update del_ttf with contributions from horizontal and vertical advection parts (del_ttf_advhoriz and del_ttf_advvert) + call do_oce_adv_tra(dt, UV, wvel, wvel_i, wvel_e, tr_num, tracers, partit, mesh) + !___________________________________________________________________________ + ! AB is not needed after the advection step. Initialize it with the current tracer before it is modified. + ! call init_tracers_AB at the beginning of this loop will compute AB for the next time step then. + tracers%data(tr_num)%valuesAB(:,:)=tracers%data(tr_num)%values(:,:) !DS: check that this is the right place! ! diffuse tracers if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call diff_tracers_ale'//achar(27)//'[0m' call diff_tracers_ale(tr_num, tracers, partit, mesh) @@ -194,10 +190,14 @@ subroutine solve_tracers_ale(tracers, partit, mesh) !___________________________________________________________________________ ! subtract the the bolus velocities back from 3D velocities: if (Fer_GM) then - UV =UV -fer_UV - Wvel_e=Wvel_e-fer_Wvel - Wvel =Wvel -fer_Wvel - end if + do elem=1, myDim_elem2D+eDim_elem2D + UV(:, :, elem) =UV(:, :, elem) - fer_UV(:, :, elem) + end do + do node=1, myDim_nod2D+eDim_nod2D + Wvel_e(:, node)=Wvel_e(:, node)-fer_Wvel(:, node) + Wvel (:, node)=Wvel (:, node)-fer_Wvel(:, node) + end do + end if !___________________________________________________________________________ ! to avoid crash with high salinities when coupled to atmosphere ! --> if we do only where (tr_arr(:,:,2) < 3._WP ) we also fill up the bottom @@ -218,61 +218,6 @@ end subroutine solve_tracers_ale ! ! !=============================================================================== -subroutine adv_tracers_ale(dt, tr_num, tracers, partit, mesh) - use g_config, only: flag_debug - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use mod_tracer - use o_arrays - use diagnostics, only: ldiag_DVD, compute_diag_dvd_2ndmoment_klingbeil_etal_2014, & - compute_diag_dvd_2ndmoment_burchard_etal_2008, compute_diag_dvd -! use adv_tracers_muscle_ale_interface -! use adv_tracers_vert_ppm_ale_interface - use oce_adv_tra_driver_interfaces - implicit none - real(kind=WP), intent(in), target :: dt - integer :: node, nz - integer, intent(in) :: tr_num - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(inout), target :: tracers - ! del_ttf ... initialised and setted to zero in call init_tracers_AB(tr_num) - ! --> del_ttf ... equivalent to R_T^n in Danilov etal FESOM2: "from finite element - ! to finite volume". At the end R_T^n should contain all advection therms and - ! the terms due to diffusion. - ! del_ttf=0d0 - - !___________________________________________________________________________ - ! if ldiag_DVD=.true. --> compute tracer second moments for the calcualtion - ! of discret variance decay - if (ldiag_DVD .and. tr_num <= 2) then - if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call compute_diag_dvd_2ndmoment'//achar(27)//'[0m' - call compute_diag_dvd_2ndmoment_klingbeil_etal_2014(tr_num, tracers, partit, mesh) - end if - - !___________________________________________________________________________ - ! horizontal ale tracer advection - ! here --> add horizontal advection part to del_ttf(nz,n) = del_ttf(nz,n) + ... - tracers%work%del_ttf_advhoriz = 0.0_WP - tracers%work%del_ttf_advvert = 0.0_WP - call do_oce_adv_tra(dt, UV, wvel, wvel_i, wvel_e, tr_num, tracers, partit, mesh) - !___________________________________________________________________________ - ! update array for total tracer flux del_ttf with the fluxes from horizontal - ! and vertical advection - tracers%work%del_ttf=tracers%work%del_ttf+tracers%work%del_ttf_advhoriz+tracers%work%del_ttf_advvert - - !___________________________________________________________________________ - ! compute discrete variance decay after Burchard and Rennau 2008 - if (ldiag_DVD .and. tr_num <= 2) then - if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call compute_diag_dvd'//achar(27)//'[0m' - call compute_diag_dvd(tr_num, tracers, partit, mesh) - end if - -end subroutine adv_tracers_ale -! -! -!=============================================================================== subroutine diff_tracers_ale(tr_num, tracers, partit, mesh) use mod_mesh USE MOD_PARTIT @@ -301,35 +246,23 @@ subroutine diff_tracers_ale(tr_num, tracers, partit, mesh) del_ttf => tracers%work%del_ttf !___________________________________________________________________________ - ! convert tr_arr_old(:,:,tr_num)=ttr_n-0.5 --> prepare to calc ttr_n+0.5 - ! eliminate AB (adams bashfort) interpolates tracer, which is only needed for - ! tracer advection. For diffusion only need tracer from previouse time step - tracers%data(tr_num)%valuesAB(:,:)=tracers%data(tr_num)%values(:,:) !DS: check that this is the right place! - !___________________________________________________________________________ ! do horizontal diffusiion ! write there also horizontal diffusion rhs to del_ttf which is equal the R_T^n ! in danilovs srcipt ! includes Redi diffusivity if Redi=.true. call diff_part_hor_redi(tr_num, tracers, partit, mesh) ! seems to be ~9% faster than diff_part_hor !___________________________________________________________________________ - ! do vertical diffusion: explicite + ! do vertical diffusion: explicit if (.not. tracers%i_vert_diff) call diff_ver_part_expl_ale(tr_num, tracers, partit, mesh) ! A projection of horizontal Redi diffussivity onto vertical. This par contains horizontal ! derivatives and has to be computed explicitly! - if (Redi) call diff_ver_part_redi_expl(tr_num, tracers, partit, mesh) - + if (Redi) call diff_ver_part_redi_expl(tr_num, tracers, partit, mesh) !___________________________________________________________________________ - ! Update tracers --> calculate T* see Danilov etal "FESOM2 from finite elements - ! to finite volume" + ! Update tracers --> calculate T* see Danilov et al. (2017) ! T* = (dt*R_T^n + h^(n-0.5)*T^(n-0.5))/h^(n+0.5) do n=1, myDim_nod2D nzmax=nlevels_nod2D(n)-1 nzmin=ulevels_nod2D(n) - !!PS del_ttf(1:nzmax,n)=del_ttf(1:nzmax,n)+tr_arr(1:nzmax,n,tr_num)* & - !!PS (hnode(1:nzmax,n)-hnode_new(1:nzmax,n)) - !!PS tr_arr(1:nzmax,n,tr_num)=tr_arr(1:nzmax,n,tr_num)+ & - !!PS del_ttf(1:nzmax,n)/hnode_new(1:nzmax,n) - del_ttf(nzmin:nzmax,n)=del_ttf(nzmin:nzmax,n)+tracers%data(tr_num)%values(nzmin:nzmax,n)* & (hnode(nzmin:nzmax,n)-hnode_new(nzmin:nzmax,n)) tracers%data(tr_num)%values(nzmin:nzmax,n)=tracers%data(tr_num)%values(nzmin:nzmax,n)+ & @@ -347,9 +280,8 @@ subroutine diff_tracers_ale(tr_num, tracers, partit, mesh) end if !We DO not set del_ttf to zero because it will not be used in this timestep anymore - !init_tracers will set it to zero for the next timestep - !init_tracers will set it to zero for the next timestep - if (tracers%smooth_bh_tra) then + !init_tracers_AB will set it to zero for the next timestep + if (tracers%smooth_bh_tra) then call diff_part_bh(tr_num, tracers, partit, mesh) ! alpply biharmonic diffusion (implemented as filter) end if end subroutine diff_tracers_ale diff --git a/src/oce_tracer_mod.F90 b/src/oce_tracer_mod.F90 index 5b0528724..0334328b4 100755 --- a/src/oce_tracer_mod.F90 +++ b/src/oce_tracer_mod.F90 @@ -6,20 +6,50 @@ MODULE o_tracers USE MOD_PARSUP IMPLICIT NONE -interface - subroutine tracer_gradient_z(ttf, partit, mesh) +CONTAINS +! +! +!=============================================================================== +SUBROUTINE init_tracers_AB(tr_num, tracers, partit, mesh) USE MOD_MESH - USE MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP + USE MOD_TRACER + use g_config, only: flag_debug + use o_arrays + use g_comm_auto IMPLICIT NONE + integer, intent(in) :: tr_num type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - real(kind=WP) :: ttf(mesh%nl-1,partit%myDim_nod2D+partit%eDim_nod2D) - end subroutine -end interface + type(t_tracer), intent(inout), target :: tracers + integer :: n,nz -CONTAINS + do n=1, partit%myDim_nod2D+partit%eDim_nod2D + ! del_ttf will contain all advection / diffusion contributions for this tracer. Set it to 0 at the beginning! + tracers%work%del_ttf(:, n) = 0.0_WP + ! AB interpolation + tracers%data(tr_num)%valuesAB(:, n)=-(0.5_WP+epsilon)*tracers%data(tr_num)%valuesAB(:, n)+(1.5_WP+epsilon)*tracers%data(tr_num)%values(:, n) + end do + + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_elements'//achar(27)//'[0m' + call tracer_gradient_elements(tracers%data(tr_num)%valuesAB, partit, mesh) + call exchange_elem_begin(tr_xy, partit) + + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_z'//achar(27)//'[0m' + call tracer_gradient_z(tracers%data(tr_num)%values, partit, mesh) !WHY NOT AB HERE? DSIDOREN! + call exchange_elem_end(partit) ! tr_xy used in fill_up_dn_grad + call exchange_nod_begin(tr_z, partit) ! not used in fill_up_dn_grad + + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call fill_up_dn_grad'//achar(27)//'[0m' + call fill_up_dn_grad(tracers%work, partit, mesh) + call exchange_nod_end(partit) ! tr_z halos should have arrived by now. + + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_elements'//achar(27)//'[0m' + call tracer_gradient_elements(tracers%data(tr_num)%values, partit, mesh) !redefine tr_arr to the current timestep + call exchange_elem(tr_xy, partit) + +END SUBROUTINE init_tracers_AB ! ! !======================================================================= @@ -58,44 +88,42 @@ END SUBROUTINE tracer_gradient_elements ! ! !======================================================================================== -SUBROUTINE init_tracers_AB(tr_num, tracers, partit, mesh) +SUBROUTINE tracer_gradient_z(ttf, partit, mesh) + !computes vertical gradient of tracer USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP USE MOD_TRACER - use g_config, only: flag_debug - use o_arrays - use g_comm_auto + USE o_PARAM + USE o_ARRAYS + USE g_CONFIG IMPLICIT NONE - integer, intent(in) :: tr_num type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(inout), target :: tracers - integer :: n,nz - !filling work arrays - tracers%work%del_ttf=0.0_WP - - !AB interpolation - tracers%data(tr_num)%valuesAB(:,:)=-(0.5_WP+epsilon)*tracers%data(tr_num)%valuesAB(:,:)+(1.5_WP+epsilon)*tracers%data(tr_num)%values(:,:) - - if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_elements'//achar(27)//'[0m' - call tracer_gradient_elements(tracers%data(tr_num)%valuesAB, partit, mesh) - call exchange_elem_begin(tr_xy, partit) - - if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_z'//achar(27)//'[0m' - call tracer_gradient_z(tracers%data(tr_num)%values, partit, mesh) !WHY NOT AB HERE? DSIDOREN! - call exchange_elem_end(partit) ! tr_xy used in fill_up_dn_grad - call exchange_nod_begin(tr_z, partit) ! not used in fill_up_dn_grad - - if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call fill_up_dn_grad'//achar(27)//'[0m' - call fill_up_dn_grad(tracers%work, partit, mesh) - call exchange_nod_end(partit) ! tr_z halos should have arrived by now. + real(kind=WP) :: ttf(mesh%nl-1,partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP) :: dz + integer :: n, nz, nzmin, nzmax - if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_elements'//achar(27)//'[0m' - call tracer_gradient_elements(tracers%data(tr_num)%values, partit, mesh) !redefine tr_arr to the current timestep - call exchange_elem(tr_xy, partit) +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" -END SUBROUTINE init_tracers_AB + DO n=1, myDim_nod2D+eDim_nod2D + !!PS nlev=nlevels_nod2D(n) + nzmax=nlevels_nod2D(n) + nzmin=ulevels_nod2D(n) + !!PS DO nz=2, nlev-1 + DO nz=nzmin+1, nzmax-1 + dz=0.5_WP*(hnode_new(nz-1,n)+hnode_new(nz,n)) + tr_z(nz, n)=(ttf(nz-1,n)-ttf(nz,n))/dz + END DO + !!PS tr_z(1, n)=0.0_WP + !!PS tr_z(nlev, n)=0.0_WP + tr_z(nzmin, n)=0.0_WP + tr_z(nzmax, n)=0.0_WP + END DO +END SUBROUTINE tracer_gradient_z ! ! !======================================================================================== @@ -141,42 +169,3 @@ SUBROUTINE relax_to_clim(tr_num, tracers, partit, mesh) END IF END SUBROUTINE relax_to_clim END MODULE o_tracers -! -! -!======================================================================================== -SUBROUTINE tracer_gradient_z(ttf, partit, mesh) - !computes vertical gradient of tracer - USE MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_TRACER - USE o_PARAM - USE o_ARRAYS - USE g_CONFIG - IMPLICIT NONE - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - real(kind=WP) :: ttf(mesh%nl-1,partit%myDim_nod2D+partit%eDim_nod2D) - real(kind=WP) :: dz - integer :: n, nz, nzmin, nzmax - -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" - - DO n=1, myDim_nod2D+eDim_nod2D - !!PS nlev=nlevels_nod2D(n) - nzmax=nlevels_nod2D(n) - nzmin=ulevels_nod2D(n) - !!PS DO nz=2, nlev-1 - DO nz=nzmin+1, nzmax-1 - dz=0.5_WP*(hnode_new(nz-1,n)+hnode_new(nz,n)) - tr_z(nz, n)=(ttf(nz-1,n)-ttf(nz,n))/dz - END DO - !!PS tr_z(1, n)=0.0_WP - !!PS tr_z(nlev, n)=0.0_WP - tr_z(nzmin, n)=0.0_WP - tr_z(nzmax, n)=0.0_WP - END DO -END SUBROUTINE tracer_gradient_z From 8be5c914047d32a40d680e9864896c9750e8211c Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 8 Nov 2021 11:22:21 +0100 Subject: [PATCH 456/909] forgot to set del_ttf_advhoriz and del_ttf_advvert to zero before doing tracer advection --- src/oce_tracer_mod.F90 | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/oce_tracer_mod.F90 b/src/oce_tracer_mod.F90 index 0334328b4..da2d7baa7 100755 --- a/src/oce_tracer_mod.F90 +++ b/src/oce_tracer_mod.F90 @@ -27,7 +27,9 @@ SUBROUTINE init_tracers_AB(tr_num, tracers, partit, mesh) do n=1, partit%myDim_nod2D+partit%eDim_nod2D ! del_ttf will contain all advection / diffusion contributions for this tracer. Set it to 0 at the beginning! - tracers%work%del_ttf(:, n) = 0.0_WP + tracers%work%del_ttf (:, n) = 0.0_WP + tracers%work%del_ttf_advhoriz (:, n) = 0.0_WP + tracers%work%del_ttf_advvert (:, n) = 0.0_WP ! AB interpolation tracers%data(tr_num)%valuesAB(:, n)=-(0.5_WP+epsilon)*tracers%data(tr_num)%valuesAB(:, n)+(1.5_WP+epsilon)*tracers%data(tr_num)%values(:, n) end do From 73f761bbdc82515b0c4087b57aa4ed959ce760e3 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 8 Nov 2021 11:55:30 +0100 Subject: [PATCH 457/909] as usually some bug fixes due to the lack of git knowledge :) --- src/oce_adv_tra_driver.F90 | 2 +- src/oce_ale_tracer.F90 | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/src/oce_adv_tra_driver.F90 b/src/oce_adv_tra_driver.F90 index 734cf55bd..b3a1bd8b1 100644 --- a/src/oce_adv_tra_driver.F90 +++ b/src/oce_adv_tra_driver.F90 @@ -110,7 +110,7 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, partit, mesh) ! update the LO solution for horizontal contribution !$OMP PARALLEL DO do n=1, myDim_nod2D+eDim_nod2D - fct_LO(:,n)=0.0_WP + fct_LO(:,n) = 0.0_WP end do !$OMP END PARALLEL DO !$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(e, enodes, el, nl1, nu1, nl2, nu2, nz) diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index f28c9d855..f4c1f75e2 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -165,6 +165,14 @@ subroutine solve_tracers_ale(tracers, partit, mesh) if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call adv_tracers_ale'//achar(27)//'[0m' ! it will update del_ttf with contributions from horizontal and vertical advection parts (del_ttf_advhoriz and del_ttf_advvert) call do_oce_adv_tra(dt, UV, wvel, wvel_i, wvel_e, tr_num, tracers, partit, mesh) + !___________________________________________________________________________ + ! update array for total tracer flux del_ttf with the fluxes from horizontal + ! and vertical advection +!$OMP DO + do node=1, myDim_nod2d + tracers%work%del_ttf(:, node)=tracers%work%del_ttf(:, node)+tracers%work%del_ttf_advhoriz(:, node)+tracers%work%del_ttf_advvert(:, node) + end do +!$OMP END DO !___________________________________________________________________________ ! AB is not needed after the advection step. Initialize it with the current tracer before it is modified. ! call init_tracers_AB at the beginning of this loop will compute AB for the next time step then. From 59a0c15d1edee1c11021ca5cf4ae82d5d611770b Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 8 Nov 2021 12:00:26 +0100 Subject: [PATCH 458/909] let us update del_ttf due to advection terms in solve_tracers_ale directly to make it visible what happens --- src/oce_ale_tracer.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index f4c1f75e2..addb6e4a4 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -168,11 +168,11 @@ subroutine solve_tracers_ale(tracers, partit, mesh) !___________________________________________________________________________ ! update array for total tracer flux del_ttf with the fluxes from horizontal ! and vertical advection -!$OMP DO +!$OMP PARALLEL DO do node=1, myDim_nod2d tracers%work%del_ttf(:, node)=tracers%work%del_ttf(:, node)+tracers%work%del_ttf_advhoriz(:, node)+tracers%work%del_ttf_advvert(:, node) end do -!$OMP END DO +!$OMP END PARALLEL DO !___________________________________________________________________________ ! AB is not needed after the advection step. Initialize it with the current tracer before it is modified. ! call init_tracers_AB at the beginning of this loop will compute AB for the next time step then. From beb9fe92a459cfc34d01cbba0cd37ef66428314a Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 8 Nov 2021 13:19:46 +0100 Subject: [PATCH 459/909] OpenMP in the main loop if solve_tracers_ale. the main job just starts :) --- src/oce_ale_tracer.F90 | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index addb6e4a4..a73719608 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -146,13 +146,17 @@ subroutine solve_tracers_ale(tracers, partit, mesh) ! 1. bolus velocities are computed according to GM implementation after R. Ferrari et al., 2010 ! 2. bolus velocities are used only for advecting tracers and shall be subtracted back afterwards if (Fer_GM) then +!$OMP PARALLEL DO do elem=1, myDim_elem2D+eDim_elem2D UV(:, :, elem) =UV(:, :, elem) + fer_UV(:, :, elem) end do +!$OMP END PARALLEL DO +!$OMP PARALLEL DO do node=1, myDim_nod2D+eDim_nod2D Wvel_e(:, node)=Wvel_e(:, node)+fer_Wvel(:, node) Wvel (:, node)=Wvel (:, node)+fer_Wvel(:, node) end do +!$OMP END PARALLEL DO end if !___________________________________________________________________________ ! loop over all tracers @@ -171,12 +175,12 @@ subroutine solve_tracers_ale(tracers, partit, mesh) !$OMP PARALLEL DO do node=1, myDim_nod2d tracers%work%del_ttf(:, node)=tracers%work%del_ttf(:, node)+tracers%work%del_ttf_advhoriz(:, node)+tracers%work%del_ttf_advvert(:, node) + !___________________________________________________________________________ + ! AB is not needed after the advection step. Initialize it with the current tracer before it is modified. + ! call init_tracers_AB at the beginning of this loop will compute AB for the next time step then. + tracers%data(tr_num)%valuesAB(:, node)=tracers%data(tr_num)%values(:, node) !DS: check that this is the right place! end do !$OMP END PARALLEL DO - !___________________________________________________________________________ - ! AB is not needed after the advection step. Initialize it with the current tracer before it is modified. - ! call init_tracers_AB at the beginning of this loop will compute AB for the next time step then. - tracers%data(tr_num)%valuesAB(:,:)=tracers%data(tr_num)%values(:,:) !DS: check that this is the right place! ! diffuse tracers if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call diff_tracers_ale'//achar(27)//'[0m' call diff_tracers_ale(tr_num, tracers, partit, mesh) @@ -191,26 +195,32 @@ subroutine solve_tracers_ale(tracers, partit, mesh) call exchange_nod(tracers%data(tr_num)%values(:,:), partit) end do !___________________________________________________________________________ + ! 3D restoring for "passive" tracers + !!!$OMPTODO: add OpenMP later, not needed right now! do tr_num=1, ptracers_restore_total - tracers%data(ptracers_restore(tr_num)%locid)%values(:,ptracers_restore(tr_num)%ind2)=1.0_WP + tracers%data(ptracers_restore(tr_num)%locid)%values(:, ptracers_restore(tr_num)%ind2)=1.0_WP end do - !___________________________________________________________________________ ! subtract the the bolus velocities back from 3D velocities: if (Fer_GM) then +!$OMP PARALLEL DO do elem=1, myDim_elem2D+eDim_elem2D UV(:, :, elem) =UV(:, :, elem) - fer_UV(:, :, elem) end do +!$OMP END PARALLEL DO +!$OMP PARALLEL DO do node=1, myDim_nod2D+eDim_nod2D Wvel_e(:, node)=Wvel_e(:, node)-fer_Wvel(:, node) Wvel (:, node)=Wvel (:, node)-fer_Wvel(:, node) end do +!$OMP END PARALLEL DO end if !___________________________________________________________________________ ! to avoid crash with high salinities when coupled to atmosphere ! --> if we do only where (tr_arr(:,:,2) < 3._WP ) we also fill up the bottom ! topogrpahy with values which are then writte into the output --> thats why ! do node=1,.... and tr_arr(node,1:nzmax,2) +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, nzmin, nzmax) do node=1,myDim_nod2D+eDim_nod2D nzmax=nlevels_nod2D(node)-1 nzmin=ulevels_nod2D(node) @@ -222,6 +232,7 @@ subroutine solve_tracers_ale(tracers, partit, mesh) tracers%data(2)%values(nzmin:nzmax,node) = 3._WP end where end do +!$OMP END PARALLEL DO end subroutine solve_tracers_ale ! ! From ea2e1ea05c37ef711a9022bb6484ab08b5e50d3b Mon Sep 17 00:00:00 2001 From: a270042 Date: Mon, 8 Nov 2021 16:11:38 +0100 Subject: [PATCH 460/909] add new namelist for dynamics derived type --> namelist.dyn. exchange namelist and derived type parameters throughout the code. Since new namelist is introduced github testcase will fail for the moment --- config/namelist.dyn | 27 ++ src/MOD_DYN.F90 | 32 +- src/gen_modules_diag.F90 | 128 +++++++- src/io_meandata.F90 | 34 ++- src/io_restart.F90 | 2 +- src/oce_adv_tra_driver.F90 | 16 +- src/oce_ale.F90 | 18 +- src/oce_ale_tracer.F90 | 14 +- src/oce_ale_vel_rhs.F90 | 4 +- src/oce_dyn.F90 | 583 ++----------------------------------- src/oce_modules.F90 | 25 +- src/oce_setup_step.F90 | 121 +++----- 12 files changed, 288 insertions(+), 716 deletions(-) create mode 100644 config/namelist.dyn diff --git a/config/namelist.dyn b/config/namelist.dyn new file mode 100644 index 000000000..c729acea1 --- /dev/null +++ b/config/namelist.dyn @@ -0,0 +1,27 @@ +&dynamics_visc +visc_gamma0 = 0.003 ! [m/s], backgroung viscosity= gamma0*len, it should be as small a s possible (keep it < 0.01 m/s). +visc_gamma1 = 0.1 ! [nodim], for computation of the flow aware viscosity +visc_gamma2 = 0.285 ! [s/m], is only used in easy backscatter option +visc_easybsreturn= 1.5 + +opt_visc = 5 +! 5=Kinematic (easy) Backscatter +! 6=Biharmonic flow aware (viscosity depends on velocity Laplacian) +! 7=Biharmonic flow aware (viscosity depends on velocity differences) +! 8=Dynamic Backscatter + +use_ivertvisc= .true. +/ + +&dynamics_general +momadv_opt = 2 ! option for momentum advection in moment only =2 +use_freeslip = .false. ! Switch on free slip +use_wsplit = .false. ! Switch for implicite/explicte splitting of vert. velocity +wsplit_maxcfl= 1.0 ! maximum allowed CFL criteria in vertical (0.5 < w_max_cfl < 1.) + ! in older FESOM it used to be w_exp_max=1.e-3 +/ + +!&dynamics_phys +!A_ver = 1.e-4 ! Vertical viscosity, m^2/s +!scale_area = 5.8e9 ! Visc. and diffus. are for an element with scale_area +!/ \ No newline at end of file diff --git a/src/MOD_DYN.F90 b/src/MOD_DYN.F90 index a3b570afc..77438f64b 100644 --- a/src/MOD_DYN.F90 +++ b/src/MOD_DYN.F90 @@ -67,24 +67,24 @@ MODULE MOD_DYN type(t_dyn_work) :: work !___________________________________________________________________________ - ! visc_option=... + ! opt_visc=... ! 5=Kinematic (easy) Backscatter ! 6=Biharmonic flow aware (viscosity depends on velocity Laplacian) ! 7=Biharmonic flow aware (viscosity depends on velocity differences) ! 8=Dynamic Backscatter - integer :: visc_opt = 5 + integer :: opt_visc = 5 ! gamma0 [m/s], backgroung viscosity= gamma0*len, it should be as small ! as possible (keep it < 0.01 m/s). ! gamma1 [nodim], for computation of the flow aware viscosity ! gamma2 [s/m], is only used in easy backscatter option - real(kind=WP) :: gamma0_visc = 0.03 - real(kind=WP) :: gamma1_visc = 0.1 - real(kind=WP) :: gamma2_visc = 0.285 + real(kind=WP) :: visc_gamma0 = 0.03 + real(kind=WP) :: visc_gamma1 = 0.1 + real(kind=WP) :: visc_gamma2 = 0.285 - ! coefficient for returned sub-gridscale energy, to be used with visc_option=5 + ! coefficient for returned sub-gridscale energy, to be used with opt_visc=5 ! (easy backscatter) - real(kind=WP) :: easy_bs_return= 1.5 + real(kind=WP) :: visc_easybsreturn = 1.5 logical :: use_ivertvisc = .true. integer :: momadv_opt = 2 @@ -207,10 +207,11 @@ subroutine WRITE_T_DYN(dynamics, unit, iostat, iomsg) write(unit, iostat=iostat, iomsg=iomsg) dynamics%solverinfo !___________________________________________________________________________ - write(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_opt - write(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma0_visc - write(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma1_visc - write(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma2_visc + write(unit, iostat=iostat, iomsg=iomsg) dynamics%opt_visc + write(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_gamma0 + write(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_gamma1 + write(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_gamma2 + write(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_easybsreturn !___________________________________________________________________________ write(unit, iostat=iostat, iomsg=iomsg) dynamics%use_ivertvisc @@ -250,10 +251,11 @@ subroutine READ_T_DYN(dynamics, unit, iostat, iomsg) read(unit, iostat=iostat, iomsg=iomsg) dynamics%work !___________________________________________________________________________ - read(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_opt - read(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma0_visc - read(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma1_visc - read(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma2_visc + read(unit, iostat=iostat, iomsg=iomsg) dynamics%opt_visc + read(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_gamma0 + read(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_gamma1 + read(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_gamma2 + read(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_easybsreturn !___________________________________________________________________________ read(unit, iostat=iostat, iomsg=iomsg) dynamics%use_ivertvisc diff --git a/src/gen_modules_diag.F90 b/src/gen_modules_diag.F90 index ddb845731..036afca1d 100755 --- a/src/gen_modules_diag.F90 +++ b/src/gen_modules_diag.F90 @@ -17,11 +17,15 @@ module diagnostics implicit none private - public :: ldiag_solver, lcurt_stress_surf, ldiag_energy, ldiag_dMOC, ldiag_DVD, ldiag_forc, ldiag_salt3D, ldiag_curl_vel3, diag_list, & - compute_diagnostics, rhs_diag, curl_stress_surf, curl_vel3, wrhof, rhof, & - u_x_u, u_x_v, v_x_v, v_x_w, u_x_w, dudx, dudy, dvdx, dvdy, dudz, dvdz, utau_surf, utau_bott, av_dudz_sq, av_dudz, av_dvdz, stress_bott, u_surf, v_surf, u_bott, v_bott, & - std_dens_min, std_dens_max, std_dens_N, std_dens, std_dens_UVDZ, std_dens_DIV, std_dens_Z, std_dens_dVdT, std_dens_flux, dens_flux_e, & - compute_diag_dvd_2ndmoment_klingbeil_etal_2014, compute_diag_dvd_2ndmoment_burchard_etal_2008, compute_diag_dvd + public :: ldiag_solver, lcurt_stress_surf, ldiag_energy, ldiag_dMOC, ldiag_DVD, & + ldiag_forc, ldiag_salt3D, ldiag_curl_vel3, diag_list, ldiag_vorticity, & + compute_diagnostics, rhs_diag, curl_stress_surf, curl_vel3, wrhof, rhof, & + u_x_u, u_x_v, v_x_v, v_x_w, u_x_w, dudx, dudy, dvdx, dvdy, dudz, dvdz, & + utau_surf, utau_bott, av_dudz_sq, av_dudz, av_dvdz, stress_bott, u_surf, & + v_surf, u_bott, v_bott, std_dens_min, std_dens_max, std_dens_N, std_dens, & + std_dens_UVDZ, std_dens_DIV, std_dens_Z, std_dens_dVdT, std_dens_flux, & + dens_flux_e, vorticity, compute_diag_dvd_2ndmoment_klingbeil_etal_2014, & + compute_diag_dvd_2ndmoment_burchard_etal_2008, compute_diag_dvd ! Arrays used for diagnostics, some shall be accessible to the I/O ! 1. solver diagnostics: A*x=rhs? ! A=ssh_stiff, x=d_eta, rhs=ssh_rhs; rhs_diag=A*x; @@ -33,6 +37,7 @@ module diagnostics real(kind=WP), save, allocatable, target :: dudx(:,:), dudy(:,:), dvdx(:,:), dvdy(:,:), dudz(:,:), dvdz(:,:), av_dudz(:,:), av_dvdz(:,:), av_dudz_sq(:,:) real(kind=WP), save, allocatable, target :: utau_surf(:), utau_bott(:) real(kind=WP), save, allocatable, target :: stress_bott(:,:), u_bott(:), v_bott(:), u_surf(:), v_surf(:) + real(kind=WP), save, allocatable, target :: vorticity(:,:) ! defining a set of standard density bins which will be used for computing densMOC ! integer, parameter :: std_dens_N = 100 @@ -69,8 +74,10 @@ module diagnostics logical :: ldiag_forc =.false. + logical :: ldiag_vorticity =.false. + namelist /diag_list/ ldiag_solver, lcurt_stress_surf, ldiag_curl_vel3, ldiag_energy, & - ldiag_dMOC, ldiag_DVD, ldiag_salt3D, ldiag_forc + ldiag_dMOC, ldiag_DVD, ldiag_salt3D, ldiag_forc, ldiag_vorticity contains @@ -670,8 +677,112 @@ subroutine diag_densMOC(mode, dynamics, tracers, partit, mesh) std_dens_VOL1=std_dens_VOL2 firstcall_e=.false. end subroutine diag_densMOC -! ============================================================== +! +! +!_______________________________________________________________________________ +subroutine relative_vorticity(mode, dynamics, partit, mesh) + IMPLICIT NONE + integer :: n, nz, el(2), enodes(2), nl1, nl2, edge, ul1, ul2, nl12, ul12 + real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2, c1 + integer, intent(in) :: mode + logical, save :: firstcall=.true. + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + real(kind=WP), dimension(:,:,:), pointer :: UV +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) + + !___________________________________________________________________________ + if (firstcall) then !allocate the stuff at the first call + allocate(vorticity(nl-1, myDim_nod2D+eDim_nod2D)) + firstcall=.false. + if (mode==0) return + end if + !!PS DO n=1,myDim_nod2D + !!PS nl1 = nlevels_nod2D(n)-1 + !!PS ul1 = ulevels_nod2D(n) + !!PS vorticity(ul1:nl1,n)=0.0_WP + !!PS !!PS DO nz=1, nlevels_nod2D(n)-1 + !!PS !!PS vorticity(nz,n)=0.0_WP + !!PS !!PS END DO + !!PS END DO + vorticity = 0.0_WP + DO edge=1,myDim_edge2D + !! edge=myList_edge2D(m) + enodes=edges(:,edge) + el=edge_tri(:,edge) + nl1=nlevels(el(1))-1 + ul1=ulevels(el(1)) + deltaX1=edge_cross_dxdy(1,edge) + deltaY1=edge_cross_dxdy(2,edge) + nl2=0 + ul2=0 + if(el(2)>0) then + deltaX2=edge_cross_dxdy(3,edge) + deltaY2=edge_cross_dxdy(4,edge) + nl2=nlevels(el(2))-1 + ul2=ulevels(el(2)) + end if + nl12 = min(nl1,nl2) + ul12 = max(ul1,ul2) + + DO nz=ul1,ul12-1 + c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1)) + vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 + vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 + END DO + if (ul2>0) then + DO nz=ul2,ul12-1 + c1= -deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) + vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 + vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 + END DO + endif + !!PS DO nz=1,min(nl1,nl2) + DO nz=ul12,nl12 + c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1))- & + deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) + vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 + vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 + END DO + !!PS DO nz=min(nl1,nl2)+1,nl1 + DO nz=nl12+1,nl1 + c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1)) + vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 + vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 + END DO + !!PS DO nz=min(nl1,nl2)+1,nl2 + DO nz=nl12+1,nl2 + c1= -deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) + vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 + vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 + END DO + END DO + + ! vorticity = vorticity*area at this stage + ! It is correct only on myDim nodes + DO n=1,myDim_nod2D + !! n=myList_nod2D(m) + ul1 = ulevels_nod2D(n) + nl1 = nlevels_nod2D(n) + !!PS DO nz=1,nlevels_nod2D(n)-1 + DO nz=ul1,nl1-1 + vorticity(nz,n)=vorticity(nz,n)/areasvol(nz,n) + END DO + END DO + + call exchange_nod(vorticity, partit) + +! Now it the relative vorticity known on neighbors too +end subroutine relative_vorticity + + +! ============================================================== subroutine compute_diagnostics(mode, dynamics, tracers, partit, mesh) implicit none type(t_mesh) , intent(in) , target :: mesh @@ -698,6 +809,9 @@ subroutine compute_diagnostics(mode, dynamics, tracers, partit, mesh) end if !6. MOC in density coordinate if (ldiag_dMOC) call diag_densMOC(mode, dynamics, tracers, partit, mesh) + + ! compute relative vorticity + if (ldiag_vorticity) call relative_vorticity(mode, dynamics, partit, mesh) end subroutine compute_diagnostics diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 0b1e3c7ee..f32b52e43 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -325,27 +325,27 @@ subroutine ini_mean_io(dynamics, tracers, partit, mesh) CASE ('Av ') call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'Av', 'vertical viscosity Av', 'm2/s', Av(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('u_dis_tend') - if(visc_option==8) then + if(dynamics%opt_visc==8) then call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u_dis_tend', 'horizontal velocity viscosity tendency', 'm/s', UV_dis_tend(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('v_dis_tend') - if(visc_option==8) then + if(dynamics%opt_visc==8) then call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v_dis_tend', 'meridional velocity viscosity tendency', 'm/s', UV_dis_tend(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('u_back_tend') - if(visc_option==8) then + if(dynamics%opt_visc==8) then call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u_back_tend', 'horizontal velocity backscatter tendency', 'm2/s2', UV_back_tend(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('v_back_tend') - if(visc_option==8) then + if(dynamics%opt_visc==8) then call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v_back_tend', 'meridional velocity backscatter tendency', 'm2/s2', UV_back_tend(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('u_total_tend') - if(visc_option==8) then + if(dynamics%opt_visc==8) then call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u_total_tend', 'horizontal velocity total viscosity tendency', 'm/s', UV_total_tend(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('v_total_tend') - if(visc_option==8) then + if(dynamics%opt_visc==8) then call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v_total_tend', 'meridional velocity total viscosity tendency', 'm/s', UV_total_tend(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if !___________________________________________________________________________________________________________________________________ @@ -582,11 +582,12 @@ function mesh_dimname_from_dimsize(size, partit, mesh) result(name) ! !-------------------------------------------------------------------------------------------- ! -subroutine create_new_file(entry, partit, mesh) +subroutine create_new_file(entry, dynamics, partit, mesh) use g_clock use mod_mesh USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN use fesom_version_info_module use g_config use i_PARAM @@ -596,6 +597,7 @@ subroutine create_new_file(entry, partit, mesh) character(2000) :: att_text type(t_mesh) , intent(in) :: mesh type(t_partit), intent(in) :: partit + type(t_dyn) , intent(in) :: dynamics type(Meandata), intent(inout) :: entry character(len=*), parameter :: global_attributes_prefix = "FESOM_" @@ -665,15 +667,15 @@ subroutine create_new_file(entry, partit, mesh) ! call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'tra_adv_lim', len_trim(tra_adv_lim), trim(tra_adv_lim)), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_partial_cell', NF_INT, 1, use_partial_cell), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'force_rotation', NF_INT, 1, force_rotation), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_partial_cell' , NF_INT, 1, use_partial_cell), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'force_rotation' , NF_INT, 1, force_rotation), __LINE__) call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'include_fleapyear', NF_INT, 1, include_fleapyear), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_floatice', NF_INT, 1, use_floatice), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'whichEVP', NF_INT, 1, whichEVP), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'evp_rheol_steps', NF_INT, 1, evp_rheol_steps), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'visc_option', NF_INT, 1, visc_option), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'w_split', NF_INT, 1, w_split), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_partial_cell', NF_INT, 1, use_partial_cell), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_floatice' , NF_INT, 1, use_floatice), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'whichEVP' , NF_INT, 1, whichEVP), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'evp_rheol_steps' , NF_INT, 1, evp_rheol_steps), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'opt_visc' , NF_INT, 1, dynamics%opt_visc), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_wsplit' , NF_INT, 1, dynamics%use_wsplit), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_partial_cell' , NF_INT, 1, use_partial_cell), __LINE__) @@ -881,7 +883,7 @@ subroutine output(istep, dynamics, tracers, partit, mesh) entry%filename = filepath ! use any existing file with this name or create a new one if( nf_open(entry%filename, nf_write, entry%ncid) /= nf_noerr ) then - call create_new_file(entry, partit, mesh) + call create_new_file(entry, dynamics, partit, mesh) call assert_nf( nf_open(entry%filename, nf_write, entry%ncid), __LINE__) end if call assoc_ids(entry) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index e5a3f6b5b..c5112b7f2 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -134,7 +134,7 @@ subroutine ini_ocean_io(year, dynamics, tracers, partit, mesh) if (trim(mix_scheme)=='cvmix_IDEMIX' .or. trim(mix_scheme)=='cvmix_TKE+IDEMIX') then call def_variable(oid, 'iwe', (/nl, nod2d/), 'Internal Wave eneryy', 'm2/s2', tke(:,:)); endif - if (visc_option==8) then + if (dynamics%opt_visc==8) then call def_variable(oid, 'uke', (/nl-1, elem2D/), 'unresolved kinetic energy', 'm2/s2', uke(:,:)); call def_variable(oid, 'uke_rhs', (/nl-1, elem2D/), 'unresolved kinetic energy rhs', 'm2/s2', uke_rhs(:,:)); endif diff --git a/src/oce_adv_tra_driver.F90 b/src/oce_adv_tra_driver.F90 index 54d9603c7..916133a3d 100644 --- a/src/oce_adv_tra_driver.F90 +++ b/src/oce_adv_tra_driver.F90 @@ -1,15 +1,17 @@ module oce_adv_tra_driver_interfaces interface - subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, partit, mesh) + subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, dynamics, tracers, partit, mesh) use MOD_MESH use MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN real(kind=WP), intent(in), target :: dt integer, intent(in) :: tr_num type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_tracer), intent(inout), target :: tracers + type(t_dyn) , intent(inout), target :: dynamics real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) real(kind=WP), intent(in), target :: W(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(in), target :: WI(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) @@ -41,11 +43,12 @@ subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, partit, ! ! !=============================================================================== -subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, partit, mesh) +subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, dynamics, tracers, partit, mesh) use MOD_MESH use MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN use g_comm_auto use oce_adv_tra_hor_interfaces use oce_adv_tra_ver_interfaces @@ -54,9 +57,10 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, partit, mesh) implicit none real(kind=WP), intent(in), target :: dt integer, intent(in) :: tr_num - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers + type(t_dyn) , intent(inout), target :: dynamics real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) real(kind=WP), intent(in), target :: W(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(in), target :: WI(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) @@ -143,8 +147,8 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, partit, mesh) fct_LO(nz,n)=(ttf(nz,n)*hnode(nz,n)+(fct_LO(nz,n)+(adv_flux_ver(nz, n)-adv_flux_ver(nz+1, n)))*dt/areasvol(nz,n))/hnode_new(nz,n) end do end do - if (w_split) then !wvel/=wvel_e - ! update for implicit contribution (w_split option) + if (dynamics%use_wsplit) then !wvel/=wvel_e + ! update for implicit contribution (use_wsplit option) call adv_tra_vert_impl(dt, wi, fct_LO, partit, mesh) ! compute the low order upwind vertical flux (full vertical velocity) ! zero the input/output flux before computation diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 561d5c2b2..8652312f5 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2396,19 +2396,19 @@ subroutine vert_vel_ale(dynamics, partit, mesh) !___________________________________________________________________________ ! Split implicit vertical velocity onto implicit and explicit components using CFL criteria: - ! w_max_cfl constrains the allowed explicit w according to the CFL at this place - ! w_max_cfl=1 means w_exp is cut at at the maximum of allowed CFL - ! w_max_cfl=0 means w_exp is zero (everything computed implicitly) - ! w_max_cfl=inf menas w_impl is zero (everything computed explicitly) - ! a guess for optimal choice of w_max_cfl would be 0.95 + ! wsplit_maxcfl constrains the allowed explicit w according to the CFL at this place + ! wsplit_maxcfl=1 means w_exp is cut at at the maximum of allowed CFL + ! wsplit_maxcfl=0 means w_exp is zero (everything computed implicitly) + ! wsplit_maxcfl=inf menas w_impl is zero (everything computed explicitly) + ! a guess for optimal choice of wsplit_maxcfl would be 0.95 do n=1, myDim_nod2D+eDim_nod2D nzmin = ulevels_nod2D(n) nzmax = nlevels_nod2D(n) do nz=nzmin,nzmax c1=1.0_WP c2=0.0_WP - if (w_split .and. (CFL_z(nz, n) > w_max_cfl)) then - dd=max((CFL_z(nz, n)-w_max_cfl), 0.0_WP)/max(w_max_cfl, 1.e-12) + if (dynamics%use_wsplit .and. (CFL_z(nz, n) > dynamics%wsplit_maxcfl)) then + dd=max((CFL_z(nz, n)-dynamics%wsplit_maxcfl), 0.0_WP)/max(dynamics%wsplit_maxcfl, 1.e-12) c1=1.0_WP/(1.0_WP+dd) !explicit part =1. if dd=0. c2=dd /(1.0_WP+dd) !implicit part =1. if dd=inf end if @@ -2866,11 +2866,11 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) !___________________________________________________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call viscosity_filter'//achar(27)//'[0m' - call viscosity_filter(visc_option, dynamics, partit, mesh) + call viscosity_filter(dynamics%opt_visc, dynamics, partit, mesh) !___________________________________________________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call impl_vert_visc_ale'//achar(27)//'[0m' - if(i_vert_visc) call impl_vert_visc_ale(dynamics,partit, mesh) + if(dynamics%use_ivertvisc) call impl_vert_visc_ale(dynamics,partit, mesh) t2=MPI_Wtime() !___________________________________________________________________________ diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 78a8e5e94..6a941cf91 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -279,7 +279,7 @@ subroutine adv_tracers_ale(dt, tr_num, dynamics, tracers, partit, mesh) ! here --> add horizontal advection part to del_ttf(nz,n) = del_ttf(nz,n) + ... tracers%work%del_ttf_advhoriz = 0.0_WP tracers%work%del_ttf_advvert = 0.0_WP - call do_oce_adv_tra(dt, dynamics%uv, dynamics%w, dynamics%w_i, dynamics%w_e, tr_num, tracers, partit, mesh) + call do_oce_adv_tra(dt, dynamics%uv, dynamics%w, dynamics%w_i, dynamics%w_e, tr_num, dynamics, tracers, partit, mesh) !___________________________________________________________________________ ! update array for total tracer flux del_ttf with the fluxes from horizontal ! and vertical advection @@ -504,7 +504,7 @@ subroutine diff_ver_part_impl_ale(tr_num, dynamics, tracers, partit, mesh) trarr => tracers%data(tr_num)%values(:,:) Wvel_i => dynamics%w_i(:,:) !___________________________________________________________________________ - if ((trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') .OR. (.not. w_split)) do_wimpl=.false. + if ((trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') .OR. (.not. dynamics%use_wsplit)) do_wimpl=.false. if (Redi) isredi=1._WP dt_inv=1.0_WP/dt @@ -1221,7 +1221,10 @@ SUBROUTINE diff_part_bh(tr_num, dynamics, tracers, partit, mesh) v1=UV(2, nz,el(1))-UV(2, nz,el(2)) vi=u1*u1+v1*v1 tt=ttf(nz,en(1))-ttf(nz,en(2)) - vi=sqrt(max(gamma0, max(gamma1*sqrt(vi), gamma2*vi))*len) + vi=sqrt(max(dynamics%visc_gamma0, & + max(dynamics%visc_gamma1*sqrt(vi), & + dynamics%visc_gamma2*vi) & + )*len) !vi=sqrt(max(sqrt(u1*u1+v1*v1),0.04)*le) ! 10m^2/s for 10 km (0.04 h/50) !vi=sqrt(10.*le) tt=tt*vi @@ -1245,7 +1248,10 @@ SUBROUTINE diff_part_bh(tr_num, dynamics, tracers, partit, mesh) v1=UV(2, nz,el(1))-UV(2, nz,el(2)) vi=u1*u1+v1*v1 tt=temporary_ttf(nz,en(1))-temporary_ttf(nz,en(2)) - vi=sqrt(max(gamma0, max(gamma1*sqrt(vi), gamma2*vi))*len) + vi=sqrt(max(dynamics%visc_gamma0, & + max(dynamics%visc_gamma1*sqrt(vi), & + dynamics%visc_gamma2*vi) & + )*len) !vi=sqrt(max(sqrt(u1*u1+v1*v1),0.04)*le) ! 10m^2/s for 10 km (0.04 h/50) !vi=sqrt(10.*le) tt=-tt*vi*dt diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index bee2c0b7d..ea77166ed 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -146,10 +146,10 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) t2=MPI_Wtime() !___________________________________________________________________________ ! advection - if (mom_adv==1) then + if (dynamics%momadv_opt==1) then if (mype==0) write(*,*) 'in moment not adapted mom_adv advection typ for ALE, check your namelist' call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) - elseif (mom_adv==2) then + elseif (dynamics%momadv_opt==2) then call momentum_adv_scalar(dynamics, partit, mesh) end if t3=MPI_Wtime() diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index cd78ac3c9..5b80ccf7a 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -1,13 +1,13 @@ ! A set of routines for computing the horizonlal viscosity ! the control parameters (their default values) are: -! gamma0 (0.01 [m/s]), gamma1 (0.1 [no dim.]), gamma2 (10.[s/m]), Div_c [1.], Leith_c[1.?] -! 1. gamma0 has the dimension of velocity. It should be as small as possible, but in any case smaller than 0.01 m/s. +! dynamics%visc_gamma0 (0.01 [m/s]), dynamics%visc_gamma1 (0.1 [no dim.]), dynamics%visc_gamma2 (10.[s/m]), Div_c [1.], Leith_c[1.?] +! 1. dynamics%visc_gamma0 has the dimension of velocity. It should be as small as possible, but in any case smaller than 0.01 m/s. ! All major ocean circulation models are stable with harmonic viscosity 0.01*len. -! 2. gamma1 is nondimensional. In commonly used Leith or Smagorinsky parameterizations it is C/pi^2=0.1 (C is about 1). +! 2. dynamics%visc_gamma1 is nondimensional. In commonly used Leith or Smagorinsky parameterizations it is C/pi^2=0.1 (C is about 1). ! We therefore try to follow this, allowing some adjustments (because our mesh is triangular, our resolution is different, etc.). -! We however, try to keep gamma1<0.1 -! 3. gamma2 is dimensional (1/velocity). If it is 10, then the respective term dominates starting from |u|=0.1 m/s an so on. It is only used in: +! We however, try to keep dynamics%visc_gamma1<0.1 +! 3. dynamics%visc_gamma2 is dimensional (1/velocity). If it is 10, then the respective term dominates starting from |u|=0.1 m/s an so on. It is only used in: ! (5) visc_filt_bcksct, (6) visc_filt_bilapl, (7) visc_filt_bidiff ! 4. Div_c =1. should be default ! 5. Leith_c=? (need to be adjusted) @@ -25,6 +25,7 @@ subroutine visc_filt_bcksct(dynamics, partit, mesh) end subroutine end interface end module + module visc_filt_bilapl_interface interface subroutine visc_filt_bilapl(dynamics, partit, mesh) @@ -39,6 +40,7 @@ subroutine visc_filt_bilapl(dynamics, partit, mesh) end subroutine end interface end module + module visc_filt_bidiff_interface interface subroutine visc_filt_bidiff(dynamics, partit, mesh) @@ -53,63 +55,6 @@ subroutine visc_filt_bidiff(dynamics, partit, mesh) end subroutine end interface end module -!!PS module visc_filt_dbcksc_interface -!!PS interface -!!PS subroutine visc_filt_dbcksc(dynamics, partit, mesh) -!!PS use mod_mesh -!!PS USE MOD_PARTIT -!!PS USE MOD_PARSUP -!!PS USE MOD_DYN -!!PS type(t_dyn) , intent(inout), target :: dynamics -!!PS type(t_partit), intent(inout), target :: partit -!!PS type(t_mesh) , intent(in) , target :: mesh -!!PS -!!PS end subroutine -!!PS end interface -!!PS end module -!!PS module backscatter_coef_interface -!!PS interface -!!PS subroutine backscatter_coef(dynamics, partit, mesh) -!!PS use mod_mesh -!!PS USE MOD_PARTIT -!!PS USE MOD_PARSUP -!!PS USE MOD_DYN -!!PS type(t_dyn) , intent(inout), target :: dynamics -!!PS type(t_partit), intent(inout), target :: partit -!!PS type(t_mesh) , intent(in) , target :: mesh -!!PS -!!PS end subroutine -!!PS end interface -!!PS end module -!!PS module uke_update_interface -!!PS interface -!!PS subroutine uke_update(dynamics, partit, mesh) -!!PS use mod_mesh -!!PS USE MOD_PARTIT -!!PS USE MOD_PARSUP -!!PS USE MOD_DYN -!!PS type(t_dyn) , intent(inout), target :: dynamics -!!PS type(t_partit), intent(inout), target :: partit -!!PS type(t_mesh) , intent(in) , target :: mesh -!!PS -!!PS end subroutine -!!PS end interface -!!PS end module - -module relative_vorticity_interface - interface - subroutine relative_vorticity(dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use MOD_DYN - type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - - end subroutine - end interface -end module ! ! Contains routines needed for computations of dynamics. @@ -311,9 +256,12 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) DO nz=nzmin,nzmax-1 u1=UV(1,nz,el(1))-UV(1,nz,el(2)) v1=UV(2,nz,el(1))-UV(2,nz,el(2)) - vi=dt*max(gamma0, max(gamma1*sqrt(u1*u1+v1*v1), gamma2*(u1*u1+v1*v1)))*len -! vi=dt*max(gamma0, gamma1*max(sqrt(u1*u1+v1*v1), gamma2*(u1*u1+v1*v1)))*len - !here gamma2 is dimensional (1/velocity). If it is 10, then the respective term dominates starting from |u|=0.1 m/s an so on. + vi=dt*max(dynamics%visc_gamma0, & + max(dynamics%visc_gamma1*sqrt(u1*u1+v1*v1), & + dynamics%visc_gamma2*(u1*u1+v1*v1)) & + )*len +! vi=dt*max(dynamics%visc_gamma0, dynamics%visc_gamma1*max(sqrt(u1*u1+v1*v1), dynamics%visc_gamma2*(u1*u1+v1*v1)))*len + !here dynamics%visc_gamma2 is dimensional (1/velocity). If it is 10, then the respective term dominates starting from |u|=0.1 m/s an so on. u1=u1*vi v1=v1*vi U_b(nz,el(1))=U_b(nz,el(1))-u1/elem_area(el(1)) @@ -353,8 +301,8 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) nzmax = nlevels(ed) !!PS Do nz=1, nlevels(ed)-1 Do nz=nzmin, nzmax-1 - UV_rhs(1,nz,ed)=UV_rhs(1,nz,ed)+U_b(nz,ed) -easy_bs_return*sum(U_c(nz,nelem))/3.0_WP - UV_rhs(2,nz,ed)=UV_rhs(2,nz,ed)+V_b(nz,ed) -easy_bs_return*sum(V_c(nz,nelem))/3.0_WP + UV_rhs(1,nz,ed)=UV_rhs(1,nz,ed)+U_b(nz,ed) -dynamics%visc_easybsreturn*sum(U_c(nz,nelem))/3.0_WP + UV_rhs(2,nz,ed)=UV_rhs(2,nz,ed)+V_b(nz,ed) -dynamics%visc_easybsreturn*sum(V_c(nz,nelem))/3.0_WP END DO end do end subroutine visc_filt_bcksct @@ -423,7 +371,10 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) ! vi has the sense of harmonic viscosity coef. because of ! division by area in the end u1=U_c(nz,ed)**2+V_c(nz,ed)**2 - vi=max(gamma0, max(gamma1*sqrt(u1), gamma2*u1))*len*dt + vi=max(dynamics%visc_gamma0, & + max(dynamics%visc_gamma1*sqrt(u1), & + dynamics%visc_gamma2*u1) & + )*len*dt U_c(nz,ed)=-U_c(nz,ed)*vi V_c(nz,ed)=-V_c(nz,ed)*vi END DO @@ -499,8 +450,11 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) vi=u1*u1+v1*v1 - vi=sqrt(max(gamma0, max(gamma1*sqrt(vi), gamma2*vi))*len) - ! vi=sqrt(max(gamma0, gamma1*max(sqrt(vi), gamma2*vi))*len) + vi=sqrt(max(dynamics%visc_gamma0, & + max(dynamics%visc_gamma1*sqrt(vi), & + dynamics%visc_gamma2*vi) & + )*len) + ! vi=sqrt(max(dynamics%visc_gamma0, dynamics%visc_gamma1*max(sqrt(vi), dynamics%visc_gamma2*vi))*len) u1=u1*vi v1=v1*vi U_c(nz,el(1))=U_c(nz,el(1))-u1 @@ -523,8 +477,11 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) vi=u1*u1+v1*v1 - vi=-dt*sqrt(max(gamma0, max(gamma1*sqrt(vi), gamma2*vi))*len) - ! vi=-dt*sqrt(max(gamma0, gamma1*max(sqrt(vi), gamma2*vi))*len) + vi=-dt*sqrt(max(dynamics%visc_gamma0, & + max(dynamics%visc_gamma1*sqrt(vi), & + dynamics%visc_gamma2*vi) & + )*len) + ! vi=-dt*sqrt(max(dynamics%visc_gamma0, dynamics%visc_gamma1*max(sqrt(vi), dynamics%visc_gamma2*vi))*len) u1=vi*(U_c(nz,el(1))-U_c(nz,el(2))) v1=vi*(V_c(nz,el(1))-V_c(nz,el(2))) UV_rhs(1,nz,el(1))=UV_rhs(1,nz,el(1))-u1/elem_area(el(1)) @@ -534,487 +491,5 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) END DO END DO deallocate(V_c, U_c) - end subroutine visc_filt_bidiff -!!PS ! -!!PS ! -!!PS !_______________________________________________________________________________ -!!PS SUBROUTINE visc_filt_dbcksc(dynamics, partit, mesh) -!!PS USE MOD_MESH -!!PS USE MOD_PARTIT -!!PS USE MOD_PARSUP -!!PS use MOD_DYN -!!PS USE o_ARRAYS, only: v_back, UV_dis_tend, UV_total_tend, UV_back_tend, & -!!PS uke, uke_dif -!!PS USE o_PARAM -!!PS USE g_CONFIG -!!PS USE g_comm_auto -!!PS USE g_support -!!PS USE uke_update_interface -!!PS IMPLICIT NONE -!!PS -!!PS real(kind=8) :: u1, v1, le(2), len, crosslen, vi, uke1 -!!PS integer :: nz, ed, el(2) -!!PS !!PS real(kind=8), allocatable :: U_c(:,:), V_c(:,:) -!!PS real(kind=8) , allocatable :: UV_back(:,:,:), UV_dis(:,:,:), uke_d(:,:) -!!PS real(kind=8) , allocatable :: uuu(:) -!!PS type(t_dyn) , intent(inout), target :: dynamics -!!PS type(t_partit), intent(inout), target :: partit -!!PS type(t_mesh) , intent(in) , target :: mesh -!!PS real(kind=WP) , dimension(:,:,:), pointer :: UV, UV_rhs -!!PS real(kind=WP) , dimension(:,:) , pointer :: U_c, V_c -!!PS #include "associate_part_def.h" -!!PS #include "associate_mesh_def.h" -!!PS #include "associate_part_ass.h" -!!PS #include "associate_mesh_ass.h" -!!PS UV => dynamics%uv(:,:,:) -!!PS UV_rhs => dynamics%uv_rhs(:,:,:) -!!PS U_c => dynamics%work%u_c(:,:) -!!PS V_c => dynamics%work%v_c(:,:) -!!PS -!!PS ! An analog of harmonic viscosity operator. -!!PS ! It adds to the rhs(0) Visc*(u1+u2+u3-3*u0)/area -!!PS ! on triangles, which is Visc*Laplacian/4 on equilateral triangles. -!!PS ! The contribution from boundary edges is neglected (free slip). -!!PS ! Filter is applied twice. -!!PS -!!PS ed=myDim_elem2D+eDim_elem2D -!!PS allocate(U_c(nl-1,ed), V_c(nl-1, ed)) -!!PS allocate(UV_back(2,nl-1,ed), UV_dis(2,nl-1, ed)) -!!PS allocate(uke_d(nl-1,ed)) -!!PS allocate(uuu(ed)) -!!PS -!!PS U_c=0.0_8 -!!PS V_c=0.0_8 -!!PS UV_back=0.0_8 -!!PS UV_dis=0.0_8 -!!PS uke_d=0.0_8 -!!PS -!!PS DO ed=1, myDim_edge2D+eDim_edge2D -!!PS if(myList_edge2D(ed)>edge2D_in) cycle -!!PS el=edge_tri(:,ed) -!!PS DO nz=1,minval(nlevels(el))-1 -!!PS u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) -!!PS v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) -!!PS -!!PS U_c(nz,el(1))=U_c(nz,el(1))-u1 -!!PS U_c(nz,el(2))=U_c(nz,el(2))+u1 -!!PS V_c(nz,el(1))=V_c(nz,el(1))-v1 -!!PS V_c(nz,el(2))=V_c(nz,el(2))+v1 -!!PS END DO -!!PS END DO -!!PS -!!PS -!!PS Do ed=1,myDim_elem2D -!!PS len=sqrt(elem_area(ed)) -!!PS len=dt*len/30.0_8 -!!PS Do nz=1,nlevels(ed)-1 -!!PS ! vi has the sense of harmonic viscosity coefficient because of -!!PS ! the division by area in the end -!!PS ! ==== -!!PS ! Case 1 -- an analog to the third-order upwind (vi=|u|l/12) -!!PS ! ==== -!!PS vi=max(0.2_8,sqrt(UV(1,nz,ed)**2+UV(2,nz,ed)**2))*len -!!PS U_c(nz,ed)=-U_c(nz,ed)*vi -!!PS V_c(nz,ed)=-V_c(nz,ed)*vi -!!PS END DO -!!PS end do -!!PS -!!PS -!!PS call exchange_elem(U_c, partit) -!!PS call exchange_elem(V_c, partit) -!!PS -!!PS DO ed=1, myDim_edge2D+eDim_edge2D -!!PS if(myList_edge2D(ed)>edge2D_in) cycle -!!PS el=edge_tri(:,ed) -!!PS le=edge_dxdy(:,ed) -!!PS le(1)=le(1)*sum(elem_cos(el))*0.25_8 -!!PS len=sqrt(le(1)**2+le(2)**2)*r_earth -!!PS le(1)=edge_cross_dxdy(1,ed)-edge_cross_dxdy(3,ed) -!!PS le(2)=edge_cross_dxdy(2,ed)-edge_cross_dxdy(4,ed) -!!PS crosslen=sqrt(le(1)**2+le(2)**2) -!!PS DO nz=1,minval(nlevels(el))-1 -!!PS vi=dt*len*(v_back(nz,el(1))+v_back(nz,el(2)))/crosslen -!!PS !if(mype==0) write(*,*) 'vi ', vi , ' and ed' , ed -!!PS !if(mype==0) write(*,*) 'dt*len/crosslen ', dt*len/crosslen, ' and ed' , ed -!!PS !vi=max(vi,0.005*len*dt) ! This helps to reduce noise in places where -!!PS ! Visc is small and decoupling might happen -!!PS !Backscatter contribution -!!PS u1=(UV(1,nz,el(1))-UV(1,nz,el(2)))*vi -!!PS v1=(UV(2,nz,el(1))-UV(2,nz,el(2)))*vi -!!PS -!!PS !UKE diffusion -!!PS vi=dt*len*(K_back*sqrt(elem_area(el(1))/scale_area)+K_back*sqrt(elem_area(el(2))/scale_area))/crosslen -!!PS -!!PS uke1=(uke(nz,el(1))-uke(nz,el(2)))*vi -!!PS -!!PS -!!PS UV_back(1,nz,el(1))=UV_back(1,nz,el(1))-u1/elem_area(el(1)) -!!PS UV_back(1,nz,el(2))=UV_back(1,nz,el(2))+u1/elem_area(el(2)) -!!PS UV_back(2,nz,el(1))=UV_back(2,nz,el(1))-v1/elem_area(el(1)) -!!PS UV_back(2,nz,el(2))=UV_back(2,nz,el(2))+v1/elem_area(el(2)) -!!PS -!!PS !Correct scaling for the diffusion? -!!PS uke_d(nz,el(1))=uke_d(nz,el(1))-uke1/elem_area(el(1)) -!!PS uke_d(nz,el(2))=uke_d(nz,el(2))+uke1/elem_area(el(2)) -!!PS -!!PS -!!PS -!!PS !Biharmonic contribution -!!PS u1=(U_c(nz,el(1))-U_c(nz,el(2))) -!!PS v1=(V_c(nz,el(1))-V_c(nz,el(2))) -!!PS -!!PS UV_dis(1,nz,el(1))=UV_dis(1,nz,el(1))-u1/elem_area(el(1)) -!!PS UV_dis(1,nz,el(2))=UV_dis(1,nz,el(2))+u1/elem_area(el(2)) -!!PS UV_dis(2,nz,el(1))=UV_dis(2,nz,el(1))-v1/elem_area(el(1)) -!!PS UV_dis(2,nz,el(2))=UV_dis(2,nz,el(2))+v1/elem_area(el(2)) -!!PS -!!PS END DO -!!PS END DO -!!PS -!!PS call exchange_elem(UV_back, partit) -!!PS -!!PS DO nz=1, nl-1 -!!PS uuu=0.0_8 -!!PS uuu=UV_back(1,nz,:) -!!PS call smooth_elem(uuu,smooth_back_tend, partit, mesh) -!!PS UV_back(1,nz,:)=uuu -!!PS uuu=0.0_8 -!!PS uuu=UV_back(2,nz,:) -!!PS call smooth_elem(uuu,smooth_back_tend, partit, mesh) -!!PS UV_back(2,nz,:)=uuu -!!PS END DO -!!PS -!!PS DO ed=1, myDim_elem2D -!!PS DO nz=1,nlevels(ed)-1 -!!PS UV_rhs(1,nz,ed)=UV_rhs(1,nz,ed)+UV_dis(1,nz,ed)+UV_back(1,nz,ed) -!!PS UV_rhs(2,nz,ed)=UV_rhs(2,nz,ed)+UV_dis(2,nz,ed)+UV_back(2,nz,ed) -!!PS END DO -!!PS END DO -!!PS -!!PS UV_dis_tend=UV_dis!+UV_back -!!PS UV_total_tend=UV_dis+UV_back -!!PS UV_back_tend=UV_back -!!PS uke_dif=uke_d -!!PS -!!PS call uke_update(dynamics, partit, mesh) -!!PS deallocate(V_c,U_c) -!!PS deallocate(UV_dis,UV_back) -!!PS deallocate(uke_d) -!!PS deallocate(uuu) -!!PS -!!PS end subroutine visc_filt_dbcksc -!!PS ! -!!PS ! -!!PS !_______________________________________________________________________________ -!!PS SUBROUTINE backscatter_coef(partit, mesh) -!!PS USE MOD_MESH -!!PS USE MOD_PARTIT -!!PS USE MOD_PARSUP -!!PS USE o_ARRAYS -!!PS USE o_PARAM -!!PS USE g_CONFIG -!!PS use g_comm_auto -!!PS IMPLICIT NONE -!!PS type(t_mesh), intent(in), target :: mesh -!!PS type(t_partit), intent(inout), target :: partit -!!PS integer :: elem, nz -!!PS #include "associate_part_def.h" -!!PS #include "associate_mesh_def.h" -!!PS #include "associate_part_ass.h" -!!PS #include "associate_mesh_ass.h" -!!PS -!!PS !Potentially add the Rossby number scaling to the script... -!!PS !check if sign is right! Different in the Jansen paper -!!PS !Also check with the normalization by area; as before we use element length sqrt(2*elem_area(ed)) -!!PS -!!PS v_back=0.0_8 -!!PS DO elem=1, myDim_elem2D -!!PS DO nz=1,nlevels(elem)-1 -!!PS !v_back(1,ed)=c_back*sqrt(2.0_WP*elem_area(ed))*sqrt(max(2.0_WP*uke(1,ed),0.0_WP))*(3600.0_WP*24.0_WP/tau_c)*4.0_WP/sqrt(2.0_WP*elem_area(ed))**2 !*sqrt(max(2.0_WP*uke(1,ed),0.0_WP)) -!!PS !v_back(nz,elem)=-c_back*sqrt(4._8/sqrt(3.0_8)*elem_area(elem))*sqrt(max(2.0_8*uke(nz,elem),0.0_8)) !Is the scaling correct -!!PS v_back(nz,elem)=min(-c_back*sqrt(elem_area(elem))*sqrt(max(2.0_8*uke(nz,elem),0.0_8)),0.2*elem_area(elem)/dt) !Is the scaling correct -!!PS !Scaling by sqrt(2*elem_area) or sqrt(elem_area)? -!!PS END DO -!!PS END DO -!!PS -!!PS call exchange_elem(v_back, partit) -!!PS -!!PS end subroutine backscatter_coef -!!PS ! -!!PS ! -!!PS !_______________________________________________________________________________ -!!PS SUBROUTINE uke_update(dynamics, partit, mesh) -!!PS USE MOD_MESH -!!PS USE MOD_PARTIT -!!PS USE MOD_PARSUP -!!PS use MOD_DYN -!!PS USE o_ARRAYS, only: uke_rhs, uke_dif, uke_back, uke_dis, uke, UV_dis_tend, uv_back_tend, uke_rhs_old, & -!!PS bvfreq, coriolis_node -!!PS USE o_PARAM -!!PS USE g_CONFIG -!!PS use g_comm_auto -!!PS USE g_support -!!PS USE g_rotate_grid -!!PS IMPLICIT NONE -!!PS -!!PS !I had to change uke(:) to uke(:,:) to make output and restart work!! -!!PS -!!PS !Why is it necessary to implement the length of the array? It doesn't work without! -!!PS !integer, intent(in) :: t_levels -!!PS type(t_dyn) , intent(inout), target :: dynamics -!!PS type(t_partit), intent(inout), target :: partit -!!PS type(t_mesh) , intent(in) , target :: mesh -!!PS -!!PS real(kind=8) :: hall, h1_eta, hnz, vol -!!PS integer :: elnodes(3), nz, ed, edi, node, j, elem, q -!!PS real(kind=8), allocatable :: uuu(:), work_array(:), U_work(:,:), V_work(:,:), rosb_array(:,:), work_uv(:) -!!PS integer :: kk, nzmax, el -!!PS real(kind=8) :: c1, rosb, vel_u, vel_v, vel_uv, scaling, reso -!!PS real*8 :: c_min=0.5, f_min=1.e-6, r_max=200000., ex, ey, a1, a2, len_reg, dist_reg(2) ! Are those values still correct? -!!PS real(kind=WP), dimension(:,:,:), pointer :: UV -!!PS #include "associate_part_def.h" -!!PS #include "associate_mesh_def.h" -!!PS #include "associate_part_ass.h" -!!PS #include "associate_mesh_ass.h" -!!PS UV => dynamics%uv(:,:,:) -!!PS -!!PS !rosb_dis=1._8 !Should be variable to control how much of the dissipated energy is backscattered -!!PS !rossby_num=2 -!!PS -!!PS ed=myDim_elem2D+eDim_elem2D -!!PS allocate(uuu(ed)) -!!PS -!!PS uke_back=0.0_8 -!!PS uke_dis=0.0_8 -!!PS DO ed=1, myDim_elem2D -!!PS DO nz=1, nlevels(ed)-1 -!!PS uke_dis(nz,ed)=(UV(1,nz,ed)*UV_dis_tend(1,nz,ed)+UV(2,nz,ed)*UV_dis_tend(2,nz,ed)) -!!PS uke_back(nz,ed)=(UV(1,nz,ed)*UV_back_tend(1,nz,ed)+UV(2,nz,ed)*UV_back_tend(2,nz,ed)) -!!PS END DO -!!PS END DO -!!PS -!!PS DO nz=1,nl-1 -!!PS uuu=0.0_8 -!!PS uuu=uke_back(nz,:) -!!PS call smooth_elem(uuu,smooth_back, partit, mesh) !3) ? -!!PS uke_back(nz,:)=uuu -!!PS END DO -!!PS -!!PS -!!PS -!!PS !Timestepping use simple backward timestepping; all components should have dt in it, unless they need it twice -!!PS !Amplitudes should be right given the correction of the viscosities; check for all, also for biharmonic -!!PS !uke(1,ed)=uke(1,ed)-uke_dis(1,ed)-uke_back(1,ed)+uke_dif(1,ed) -!!PS ed=myDim_elem2D+eDim_elem2D -!!PS allocate(U_work(nl-1,myDim_nod2D+eDim_nod2D),V_work(nl-1,myDim_nod2D+eDim_nod2D)) -!!PS allocate(work_uv(myDim_nod2D+eDim_nod2D)) -!!PS allocate(rosb_array(nl-1,ed)) -!!PS call exchange_elem(UV, partit) -!!PS rosb_array=0._8 -!!PS DO nz=1, nl-1 -!!PS work_uv=0._WP -!!PS DO node=1, myDim_nod2D -!!PS vol=0._WP -!!PS U_work(nz,node)=0._WP -!!PS V_work(nz,node)=0._WP -!!PS DO j=1, nod_in_elem2D_num(node) -!!PS elem=nod_in_elem2D(j, node) -!!PS U_work(nz,node)=U_work(nz,node)+UV(1,nz,elem)*elem_area(elem) -!!PS V_work(nz,node)=V_work(nz,node)+UV(2,nz,elem)*elem_area(elem) -!!PS vol=vol+elem_area(elem) -!!PS END DO -!!PS U_work(nz,node)=U_work(nz,node)/vol -!!PS V_work(nz,node)=U_work(nz,node)/vol -!!PS END DO -!!PS work_uv=U_work(nz,:) -!!PS call exchange_nod(work_uv, partit) -!!PS U_work(nz,:)=work_uv -!!PS work_uv=V_work(nz,:) -!!PS call exchange_nod(work_uv, partit) -!!PS V_work(nz,:)=work_uv -!!PS END DO -!!PS -!!PS DO el=1,myDim_elem2D -!!PS DO nz=1, nlevels(el)-1 -!!PS rosb_array(nz,el)=sqrt((sum(gradient_sca(1:3,el)*U_work(nz,elem2D_nodes(1:3,el)))-& -!!PS sum(gradient_sca(4:6, el)*V_work(nz,elem2D_nodes(1:3,el))))**2+& -!!PS (sum(gradient_sca(4:6, el)*U_work(nz,elem2D_nodes(1:3,el)))+& -!!PS sum(gradient_sca(1:3, el)*V_work(nz,elem2D_nodes(1:3,el))))**2) -!!PS ! hall=hall+hnz -!!PS END DO -!!PS ! rosb_array(el)=rosb_array(el)/hall -!!PS END DO -!!PS DO ed=1, myDim_elem2D -!!PS scaling=1._WP -!!PS IF(uke_scaling) then -!!PS reso=sqrt(elem_area(ed)*4._wp/sqrt(3._wp)) -!!PS rosb=0._wp -!!PS elnodes=elem2D_nodes(:, ed) -!!PS DO kk=1,3 -!!PS c1=0._wp -!!PS nzmax=minval(nlevels(nod_in_elem2D(1:nod_in_elem2D_num(elnodes(kk)), elnodes(kk))), 1) -!!PS !Vertical average; same scaling in the vertical -!!PS DO nz=1, nzmax-1 -!!PS c1=c1+hnode_new(nz,elnodes(kk))*(sqrt(max(bvfreq(nz,elnodes(kk)), 0._WP))+sqrt(max(bvfreq(nz+1,elnodes(kk)), 0._WP)))/2. -!!PS END DO -!!PS c1=max(c_min, c1/pi) !ca. first baroclinic gravity wave speed limited from below by c_min -!!PS !Cutoff K_GM depending on (Resolution/Rossby radius) ratio -!!PS rosb=rosb+min(c1/max(abs(coriolis_node(elnodes(kk))), f_min), r_max) -!!PS END DO -!!PS rosb=rosb/3._8 -!!PS scaling=1._WP/(1._WP+(uke_scaling_factor*reso/rosb))!(4._wp*reso/rosb)) -!!PS END IF -!!PS -!!PS DO nz=1, nlevels(ed)-1 -!!PS elnodes=elem2D_nodes(:,ed) -!!PS -!!PS !Taking out that one place where it is always weird (Pacific Southern Ocean) -!!PS !Should not really be used later on, once we fix the issue with the 1/4 degree grid -!!PS if(.not. (TRIM(which_toy)=="soufflet")) then -!!PS call elem_center(ed, ex, ey) -!!PS !a1=-104.*rad -!!PS !a2=-49.*rad -!!PS call g2r(-104.*rad, -49.*rad, a1, a2) -!!PS dist_reg(1)=ex-a1 -!!PS dist_reg(2)=ey-a2 -!!PS call trim_cyclic(dist_reg(1)) -!!PS dist_reg(1)=dist_reg(1)*elem_cos(ed) -!!PS dist_reg=dist_reg*r_earth -!!PS len_reg=sqrt(dist_reg(1)**2+dist_reg(2)**2) -!!PS -!!PS -!!PS !if(mype==0) write(*,*) 'len_reg ', len_reg , ' and dist_reg' , dist_reg, ' and ex, ey', ex, ey, ' and a ', a1, a2 -!!PS rosb_array(nz,ed)=rosb_array(nz,ed)/max(abs(sum(coriolis_node(elnodes(:)))), f_min) -!!PS !uke_dif(nz, ed)=scaling*(1-exp(-len_reg/300000))*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)!UV_dif(1,ed) -!!PS uke_dis(nz,ed)=scaling*(1-exp(-len_reg/300000))*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)*uke_dis(nz,ed) -!!PS else -!!PS rosb_array(nz,ed)=rosb_array(nz,ed)/max(abs(sum(coriolis_node(elnodes(:)))), f_min) -!!PS !uke_dif(nz, ed)=scaling*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)!UV_dif(1,ed) -!!PS uke_dis(nz,ed)=scaling*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)*uke_dis(nz,ed) -!!PS end if -!!PS -!!PS END DO -!!PS END DO -!!PS deallocate(U_work, V_work) -!!PS deallocate(rosb_array) -!!PS deallocate(work_uv) -!!PS call exchange_elem(uke_dis, partit) -!!PS DO nz=1, nl-1 -!!PS uuu=uke_dis(nz,:) -!!PS call smooth_elem(uuu,smooth_dis, partit, mesh) -!!PS uke_dis(nz,:)=uuu -!!PS END DO -!!PS DO ed=1, myDim_elem2D -!!PS DO nz=1,nlevels(ed)-1 -!!PS uke_rhs_old(nz,ed)=uke_rhs(nz,ed) -!!PS uke_rhs(nz,ed)=-uke_dis(nz,ed)-uke_back(nz,ed)+uke_dif(nz,ed) -!!PS uke(nz,ed)=uke(nz,ed)+1.5_8*uke_rhs(nz,ed)-0.5_8*uke_rhs_old(nz,ed) -!!PS END DO -!!PS END DO -!!PS call exchange_elem(uke, partit) -!!PS -!!PS deallocate(uuu) -!!PS end subroutine uke_update -! -! -!_______________________________________________________________________________ -subroutine relative_vorticity(dynamics, partit, mesh) - USE o_ARRAYS, only: vorticity - USE MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_DYN - use g_comm_auto - IMPLICIT NONE - integer :: n, nz, el(2), enodes(2), nl1, nl2, edge, ul1, ul2, nl12, ul12 - real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2, c1 - - type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - real(kind=WP), dimension(:,:,:), pointer :: UV -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) - - !!PS DO n=1,myDim_nod2D - !!PS nl1 = nlevels_nod2D(n)-1 - !!PS ul1 = ulevels_nod2D(n) - !!PS vorticity(ul1:nl1,n)=0.0_WP - !!PS !!PS DO nz=1, nlevels_nod2D(n)-1 - !!PS !!PS vorticity(nz,n)=0.0_WP - !!PS !!PS END DO - !!PS END DO - vorticity(:,1:myDim_nod2D) = 0.0_WP - DO edge=1,myDim_edge2D - !! edge=myList_edge2D(m) - enodes=edges(:,edge) - el=edge_tri(:,edge) - nl1=nlevels(el(1))-1 - ul1=ulevels(el(1)) - deltaX1=edge_cross_dxdy(1,edge) - deltaY1=edge_cross_dxdy(2,edge) - nl2=0 - ul2=0 - if(el(2)>0) then - deltaX2=edge_cross_dxdy(3,edge) - deltaY2=edge_cross_dxdy(4,edge) - nl2=nlevels(el(2))-1 - ul2=ulevels(el(2)) - end if - nl12 = min(nl1,nl2) - ul12 = max(ul1,ul2) - - DO nz=ul1,ul12-1 - c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1)) - vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 - vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 - END DO - if (ul2>0) then - DO nz=ul2,ul12-1 - c1= -deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) - vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 - vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 - END DO - endif - !!PS DO nz=1,min(nl1,nl2) - DO nz=ul12,nl12 - c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1))- & - deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) - vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 - vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 - END DO - !!PS DO nz=min(nl1,nl2)+1,nl1 - DO nz=nl12+1,nl1 - c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1)) - vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 - vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 - END DO - !!PS DO nz=min(nl1,nl2)+1,nl2 - DO nz=nl12+1,nl2 - c1= -deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) - vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 - vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 - END DO - END DO - - ! vorticity = vorticity*area at this stage - ! It is correct only on myDim nodes - DO n=1,myDim_nod2D - !! n=myList_nod2D(m) - ul1 = ulevels_nod2D(n) - nl1 = nlevels_nod2D(n) - !!PS DO nz=1,nlevels_nod2D(n)-1 - DO nz=ul1,nl1-1 - vorticity(nz,n)=vorticity(nz,n)/areasvol(nz,n) - END DO - END DO - - call exchange_nod(vorticity, partit) - -! Now it the relative vorticity known on neighbors too -end subroutine relative_vorticity diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index 013495860..1a0c078a0 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -23,14 +23,7 @@ MODULE o_PARAM real(kind=WP) :: C_d= 0.0025_WP ! Bottom drag coefficient real(kind=WP) :: kappa=0.4 !von Karman's constant real(kind=WP) :: mix_coeff_PP=0.01_WP ! mixing coef for PP scheme -real(kind=WP) :: gamma0=0.01! [m/s], gamma0*len*dt is the background viscosity -real(kind=WP) :: gamma1=0.1! [non dim.], or computation of the flow aware viscosity -real(kind=WP) :: gamma2=10.! [s/m], is only used in easy backscatter option -real(kind=WP) :: Div_c =1.0_WP !modified Leith viscosity weight -real(kind=WP) :: Leith_c=1.0_WP !Leith viscosity weight. It needs vorticity! -real(kind=WP) :: easy_bs_return=1.0 !backscatter option only (how much to return) real(kind=WP) :: A_ver=0.001_WP ! Vertical harm. visc. -integer :: visc_option=5 logical :: uke_scaling=.true. real(kind=WP) :: uke_scaling_factor=1._WP real(kind=WP) :: rosb_dis=1._WP @@ -81,9 +74,6 @@ MODULE o_PARAM ! elevation and divergence real(kind=WP) :: epsilon=0.1_WP ! AB2 offset ! Tracers -logical :: i_vert_visc= .true. -logical :: w_split =.false. -real(kind=WP) :: w_max_cfl=1.e-5_WP logical :: SPP=.false. @@ -99,9 +89,9 @@ MODULE o_PARAM ! Momentum -logical :: free_slip=.false. - ! false=no slip -integer :: mom_adv=2 +!!PS logical :: free_slip=.false. +!!PS ! false=no slip +!!PS integer :: mom_adv=2 ! 1 vector control volumes, p1 velocities ! 2 scalar control volumes ! 3 vector invariant @@ -161,11 +151,11 @@ MODULE o_PARAM character(20) :: which_pgf='shchepetkin' - NAMELIST /oce_dyn/ state_equation, C_d, A_ver, gamma0, gamma1, gamma2, Leith_c, Div_c, easy_bs_return, & - scale_area, mom_adv, free_slip, i_vert_visc, w_split, w_max_cfl, SPP,& + NAMELIST /oce_dyn/ state_equation, C_d, A_ver, & + scale_area, SPP,& Fer_GM, K_GM_max, K_GM_min, K_GM_bvref, K_GM_resscalorder, K_GM_rampmax, K_GM_rampmin, & scaling_Ferreira, scaling_Rossby, scaling_resolution, scaling_FESOM14, & - Redi, visc_sh_limit, mix_scheme, Ricr, concv, which_pgf, visc_option, alpha, theta, use_density_ref, & + Redi, visc_sh_limit, mix_scheme, Ricr, concv, which_pgf, alpha, theta, use_density_ref, & K_back, c_back, uke_scaling, uke_scaling_factor, smooth_back, smooth_dis, & smooth_back_tend, rosb_dis @@ -210,9 +200,6 @@ MODULE o_ARRAYS real(kind=WP), allocatable :: tr_xy(:,:,:) real(kind=WP), allocatable :: tr_z(:,:) -! Auxiliary arrays for vector-invariant form of momentum advection -real(kind=WP), allocatable,dimension(:,:) :: vorticity - !Viscosity and diff coefs real(kind=WP), allocatable,dimension(:,:) :: Av,Kv real(kind=WP), allocatable,dimension(:,:,:) :: Kv_double diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 071627d84..8309a1ca6 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -243,15 +243,15 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) !___________________________________________________________________________ ! initialise arrays that are needed for backscatter_coef - if(dynamics%visc_opt==8) call init_backscatter(partit, mesh) + if(dynamics%opt_visc==8) call init_backscatter(partit, mesh) !___________________________________________________________________________ if(partit%mype==0) write(*,*) 'Initial state' - if (w_split .and. partit%mype==0) then + if (dynamics%use_wsplit .and. partit%mype==0) then write(*,*) '******************************************************************************' write(*,*) 'vertical velocity will be split onto explicit and implicit constitutes;' - write(*,*) 'maximum allowed CDF on explicit W is set to: ', w_max_cfl + write(*,*) 'maximum allowed CDF on explicit W is set to: ', dynamics%wsplit_maxcfl write(*,*) '******************************************************************************' end if end subroutine ocean_setup @@ -362,12 +362,12 @@ SUBROUTINE dynamics_init(dynamics, partit, mesh) USE o_param IMPLICIT NONE integer :: elem_size, node_size - integer, save :: nm_unit = 104 ! unit to open namelist file, skip 100-102 for cray + integer, save :: nm_unit = 105 ! unit to open namelist file, skip 100-102 for cray integer :: iost - integer :: visc_opt - real(kind=WP) :: gamma0_visc, gamma1_visc, gamma2_visc - real(kind=WP) :: div_c_visc, leith_c_visc, easybackscat_return + integer :: opt_visc + real(kind=WP) :: visc_gamma0, visc_gamma1, visc_gamma2 + real(kind=WP) :: visc_easybsreturn logical :: use_ivertvisc integer :: momadv_opt logical :: use_freeslip @@ -377,50 +377,42 @@ SUBROUTINE dynamics_init(dynamics, partit, mesh) type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_dyn) , intent(inout), target :: dynamics + + ! define dynamics namelist parameter + namelist /dynamics_visc / opt_visc, visc_gamma0, visc_gamma1, visc_gamma2, & + use_ivertvisc, visc_easybsreturn + namelist /dynamics_general/ momadv_opt, use_freeslip, use_wsplit, wsplit_maxcfl + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" -#include "associate_mesh_ass.h" - -!!PS ! define dynamics namelist parameter -!!PS namelist /dynamics_visc / visc_opt, gamma0_visc, gamma1_visc, gamma2_visc, & -!!PS div_c_visc, leith_c_visc, use_ivertvisc, easy_bs_return -!!PS namelist /dynamics_general / momadv_opt, use_freeslip, use_wsplit, wsplit_maxcfl -!!PS -!!PS ! open and read namelist for I/O -!!PS open(unit=nm_unit, file='namelist.dyn', form='formatted', access='sequential', status='old', iostat=iost ) -!!PS if (iost == 0) then -!!PS if (mype==0) write(*,*) ' file : ', 'namelist.dyn',' open ok' -!!PS else -!!PS if (mype==0) write(*,*) 'ERROR: --> bad opening file : ', 'namelist.dyn',' ; iostat=',iost -!!PS call par_ex(partit%MPI_COMM_FESOM, partit%mype) -!!PS stop -!!PS end if -!!PS read(nm_unit, nml=dynamics_visc , iostat=iost) -!!PS read(nm_unit, nml=dynamics_general, iostat=iost) -!!PS close(nm_unit) +#include "associate_mesh_ass.h" + + ! open and read namelist for I/O + open(unit=nm_unit, file='namelist.dyn', form='formatted', access='sequential', status='old', iostat=iost ) + if (iost == 0) then + if (mype==0) write(*,*) ' file : ', 'namelist.dyn',' open ok' + else + if (mype==0) write(*,*) 'ERROR: --> bad opening file : ', 'namelist.dyn',' ; iostat=',iost + call par_ex(partit%MPI_COMM_FESOM, partit%mype) + stop + end if + read(nm_unit, nml=dynamics_visc, iostat=iost) + read(nm_unit, nml=dynamics_general, iostat=iost) + close(nm_unit) !___________________________________________________________________________ ! set parameters in derived type -!!PS dynamics%visc_opt = visc_opt -!!PS dynamics%gamma0_visc = gamma0_visc -!!PS dynamics%gamma1_visc = gamma1_visc -!!PS dynamics%gamma2_visc = gamma2_visc -!!PS dynamics%use_ivertvisc = use_ivertvisc -!!PS dynamics%momadv_opt = momadv_opt -!!PS dynamics%use_freeslip = use_freeslip -!!PS dynamics%use_wsplit = use_wsplit -!!PS dynamics%wsplit_maxcfl = wsplit_maxcfl - - dynamics%visc_opt = visc_option - dynamics%gamma0_visc = gamma0 - dynamics%gamma1_visc = gamma1 - dynamics%gamma2_visc = gamma2 - dynamics%use_ivertvisc = i_vert_visc - dynamics%momadv_opt = mom_adv - dynamics%use_freeslip = free_slip - dynamics%use_wsplit = w_split - dynamics%wsplit_maxcfl = w_max_cfl + dynamics%opt_visc = opt_visc + dynamics%visc_gamma0 = visc_gamma0 + dynamics%visc_gamma1 = visc_gamma1 + dynamics%visc_gamma2 = visc_gamma2 + dynamics%visc_easybsreturn = visc_easybsreturn + dynamics%use_ivertvisc = use_ivertvisc + dynamics%momadv_opt = momadv_opt + dynamics%use_freeslip = use_freeslip + dynamics%use_wsplit = use_wsplit + dynamics%wsplit_maxcfl = wsplit_maxcfl !___________________________________________________________________________ ! define local vertice & elem array size @@ -476,7 +468,7 @@ SUBROUTINE dynamics_init(dynamics, partit, mesh) dynamics%work%uvnode_rhs = 0.0_WP dynamics%work%u_c = 0.0_WP dynamics%work%v_c = 0.0_WP - if (dynamics%visc_opt==5) then + if (dynamics%opt_visc==5) then allocate(dynamics%work%u_b(nl-1, elem_size)) allocate(dynamics%work%v_b(nl-1, elem_size)) dynamics%work%u_b = 0.0_WP @@ -556,14 +548,6 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) allocate(Tsurf_t(node_size,2), Ssurf_t(node_size,2)) allocate(tau_x_t(node_size,2), tau_y_t(node_size,2)) -! ================= -! All auxiliary arrays -! ================= - -!if(mom_adv==3) then -allocate(vorticity(nl-1,node_size)) -vorticity=0.0_WP -!end if ! ================= ! Visc and Diff coefs @@ -579,35 +563,6 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) !!PS call oce_mixing_kpp_init ! Setup constants, allocate arrays and construct look up table end if -! ================= -! Backscatter arrays -! ================= - -!!PS if(visc_option==8) then -!!PS -!!PS allocate(uke(nl-1,elem_size)) ! Unresolved kinetic energy for backscatter coefficient -!!PS allocate(v_back(nl-1,elem_size)) ! Backscatter viscosity -!!PS allocate(uke_dis(nl-1,elem_size), uke_back(nl-1,elem_size)) -!!PS allocate(uke_dif(nl-1,elem_size)) -!!PS allocate(uke_rhs(nl-1,elem_size), uke_rhs_old(nl-1,elem_size)) -!!PS allocate(UV_dis_tend(2,nl-1,elem_size), UV_back_tend(2,nl-1,elem_size)) -!!PS allocate(UV_total_tend(2,nl-1,elem_size)) -!!PS -!!PS uke=0.0_8 -!!PS v_back=0.0_8 -!!PS uke_dis=0.0_8 -!!PS uke_dif=0.0_8 -!!PS uke_back=0.0_8 -!!PS uke_rhs=0.0_8 -!!PS uke_rhs_old=0.0_8 -!!PS UV_dis_tend=0.0_8 -!!PS UV_back_tend=0.0_8 -!!PS UV_total_tend=0.0_8 -!!PS end if - -!Velocities at nodes -!!PS allocate(Unode(2,nl-1,node_size)) - ! tracer gradients & RHS allocate(ttrhs(nl-1,node_size)) allocate(tr_xy(2,nl-1,myDim_elem2D+eDim_elem2D+eXDim_elem2D)) From 4793a8d045565c3bb642cd762011ab2268f35820 Mon Sep 17 00:00:00 2001 From: a270042 Date: Mon, 8 Nov 2021 16:26:38 +0100 Subject: [PATCH 461/909] update ../config/namelist.dyn --- config/namelist.dyn | 4 ---- 1 file changed, 4 deletions(-) diff --git a/config/namelist.dyn b/config/namelist.dyn index c729acea1..e35508f2f 100644 --- a/config/namelist.dyn +++ b/config/namelist.dyn @@ -21,7 +21,3 @@ wsplit_maxcfl= 1.0 ! maximum allowed CFL criteria in vertical (0.5 < w_max_c ! in older FESOM it used to be w_exp_max=1.e-3 / -!&dynamics_phys -!A_ver = 1.e-4 ! Vertical viscosity, m^2/s -!scale_area = 5.8e9 ! Visc. and diffus. are for an element with scale_area -!/ \ No newline at end of file From 392daed4f9ca9a3afd3c262855e755bbfb19920f Mon Sep 17 00:00:00 2001 From: a270042 Date: Mon, 8 Nov 2021 17:42:36 +0100 Subject: [PATCH 462/909] solve conflicts from merging with refactoring branch --- src/fvom.F90 | 54 ++++-- src/fvom_main.F90 | 388 +------------------------------------ src/oce_adv_tra_driver.F90 | 9 +- src/oce_ale_tracer.F90 | 136 +++---------- 4 files changed, 67 insertions(+), 520 deletions(-) diff --git a/src/fvom.F90 b/src/fvom.F90 index 4ab323df9..82752d507 100755 --- a/src/fvom.F90 +++ b/src/fvom.F90 @@ -5,6 +5,7 @@ module fesom_main_storage_module USE MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN USE o_ARRAYS USE o_PARAM USE i_PARAM @@ -51,15 +52,17 @@ module fesom_main_storage_module real(kind=real32) :: runtime_alltimesteps - type(t_mesh) mesh + type(t_mesh) mesh type(t_tracer) tracers + type(t_dyn) dynamics type(t_partit) partit character(LEN=256) :: dump_dir, dump_filename logical :: L_EXISTS - type(t_mesh) mesh_copy + type(t_mesh) mesh_copy type(t_tracer) tracers_copy + type(t_dyn) dynamics_copy character(LEN=MPI_MAX_LIBRARY_VERSION_STRING) :: mpi_version_txt integer mpi_version_len @@ -124,6 +127,7 @@ subroutine fesom_init(fesom_total_nsteps) call setup_model(f%partit) ! Read Namelists, always before clock_init call clock_init(f%partit) ! read the clock file call get_run_steps(fesom_total_nsteps, f%partit) + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call mesh_setup'//achar(27)//'[0m' call mesh_setup(f%partit, f%mesh) if (f%mype==0) write(*,*) 'FESOM mesh_setup... complete' @@ -133,12 +137,21 @@ subroutine fesom_init(fesom_total_nsteps) ! and additional arrays needed for ! fancy advection etc. !===================== + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call check_mesh_consistency'//achar(27)//'[0m' call check_mesh_consistency(f%partit, f%mesh) if (f%mype==0) f%t2=MPI_Wtime() + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call dynamics_init'//achar(27)//'[0m' + call dynamics_init(f%dynamics, f%partit, f%mesh) + + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call tracer_init'//achar(27)//'[0m' call tracer_init(f%tracers, f%partit, f%mesh) ! allocate array of ocean tracers (derived type "t_tracer") + + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call arrays_init'//achar(27)//'[0m' call arrays_init(f%tracers%num_tracers, f%partit, f%mesh) ! allocate other arrays (to be refactured same as tracers in the future) - call ocean_setup(f%tracers, f%partit, f%mesh) + + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call ocean_setup'//achar(27)//'[0m' + call ocean_setup(f%dynamics, f%tracers, f%partit, f%mesh) if (f%mype==0) then write(*,*) 'FESOM ocean_setup... complete' @@ -154,7 +167,7 @@ subroutine fesom_init(fesom_total_nsteps) if (f%mype==0) write(*,*) 'EVP scheme option=', whichEVP endif if (f%mype==0) f%t5=MPI_Wtime() - call compute_diagnostics(0, f%tracers, f%partit, f%mesh) ! allocate arrays for diagnostic + call compute_diagnostics(0, f%dynamics, f%tracers, f%partit, f%mesh) ! allocate arrays for diagnostic #if defined (__oasis) call cpl_oasis3mct_define_unstr(f%partit, f%mesh) if(f%mype==0) write(*,*) 'FESOM ----> cpl_oasis3mct_define_unstr nsend, nrecv:',nsend, nrecv @@ -179,7 +192,7 @@ subroutine fesom_init(fesom_total_nsteps) ! if l_write is TRUE the restart will be forced ! if l_read the restart will be read ! as an example, for reading restart one does: call restart(0, .false., .false., .true., tracers, partit, mesh) - call restart(0, .false., r_restart, f%tracers, f%partit, f%mesh) ! istep, l_write, l_read + call restart(0, .false., r_restart, f%dynamics, f%tracers, f%partit, f%mesh) ! istep, l_write, l_read if (f%mype==0) f%t7=MPI_Wtime() ! store grid information into netcdf file if (.not. r_restart) call write_mesh_info(f%partit, f%mesh) @@ -230,9 +243,18 @@ subroutine fesom_init(fesom_total_nsteps) close (f%mype+300) ! open (f%mype+300, file=trim(f%dump_filename), status='old', form="unformatted") - ! read (f%mype+300) f%tracers_copy + ! read (f%mype+300) f%dynamics_copy ! close (f%mype+300) + write (f%dump_filename, "(A9,I7.7)") "t_dynamics.", f%mype + open (f%mype+300, file=TRIM(f%dump_dir)//trim(f%dump_filename), status='replace', form="unformatted") + write (f%mype+300) f%dynamics + close (f%mype+300) + + ! open (f%mype+300, file=trim(f%dump_filename), status='old', form="unformatted") + ! read (f%mype+300) f%tracers_copy + ! close (f%mype+300) + !call par_ex(f%partit%MPI_COMM_FESOM, f%partit%mype) !stop ! @@ -292,13 +314,15 @@ subroutine fesom_runloop(current_nsteps) #endif call clock !___compute horizontal velocity on nodes (originaly on elements)________ - call compute_vel_nodes(f%partit, f%mesh) + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call compute_vel_nodes'//achar(27)//'[0m' + call compute_vel_nodes(f%dynamics, f%partit, f%mesh) + !___model sea-ice step__________________________________________________ f%t1 = MPI_Wtime() if(use_ice) then !___compute fluxes from ocean to ice________________________________ if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call ocean2ice(n)'//achar(27)//'[0m' - call ocean2ice(f%tracers, f%partit, f%mesh) + call ocean2ice(f%dynamics, f%tracers, f%partit, f%mesh) !___compute update of atmospheric forcing____________________________ if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call update_atm_forcing(n)'//achar(27)//'[0m' @@ -317,28 +341,28 @@ subroutine fesom_runloop(current_nsteps) if (ice_update) call ice_timestep(n, f%partit, f%mesh) !___compute fluxes to the ocean: heat, freshwater, momentum_________ if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call oce_fluxes_mom...'//achar(27)//'[0m' - call oce_fluxes_mom(f%partit, f%mesh) ! momentum only + call oce_fluxes_mom(f%dynamics, f%partit, f%mesh) ! momentum only call oce_fluxes(f%tracers, f%partit, f%mesh) end if - call before_oce_step(f%tracers, f%partit, f%mesh) ! prepare the things if required + call before_oce_step(f%dynamics, f%tracers, f%partit, f%mesh) ! prepare the things if required f%t2 = MPI_Wtime() + !___model ocean step____________________________________________________ if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call oce_timestep_ale'//achar(27)//'[0m' - - call oce_timestep_ale(n, f%tracers, f%partit, f%mesh) + call oce_timestep_ale(n, f%dynamics, f%tracers, f%partit, f%mesh) f%t3 = MPI_Wtime() !___compute energy diagnostics..._______________________________________ if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call compute_diagnostics(1)'//achar(27)//'[0m' - call compute_diagnostics(1, f%tracers, f%partit, f%mesh) + call compute_diagnostics(1, f%dynamics, f%tracers, f%partit, f%mesh) f%t4 = MPI_Wtime() !___prepare output______________________________________________________ if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call output (n)'//achar(27)//'[0m' - call output (n, f%tracers, f%partit, f%mesh) + call output (n, f%dynamics, f%tracers, f%partit, f%mesh) f%t5 = MPI_Wtime() - call restart(n, .false., .false., f%tracers, f%partit, f%mesh) + call restart(n, .false., .false., f%dynamics, f%tracers, f%partit, f%mesh) f%t6 = MPI_Wtime() f%rtime_fullice = f%rtime_fullice + f%t2 - f%t1 diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index c3ffb89b6..2793532d7 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -7,44 +7,8 @@ !=============================================================================! program main -<<<<<<< HEAD -USE MOD_MESH -USE MOD_TRACER -USE MOD_PARTIT -USE MOD_PARSUP -USE MOD_DYN -USE o_ARRAYS -USE o_PARAM -USE i_PARAM -use i_ARRAYS -use g_clock -use g_config -use g_comm_auto -use g_forcing_arrays -use io_RESTART -use io_MEANDATA -use io_mesh_info -use diagnostics -use mo_tidal -use dynamics_init_interface -use tracer_init_interface -use ocean_setup_interface -use ice_setup_interface -use ocean2ice_interface -use oce_fluxes_interface -use update_atm_forcing_interface -use before_oce_step_interface -use oce_timestep_ale_interface -use read_mesh_interface -use fesom_version_info_module -use command_line_options_module -! Define icepack module -#if defined (__icepack) -use icedrv_main, only: set_icepack, init_icepack, alloc_icepack -#endif -======= + use fvom_module ->>>>>>> beb9fe92a459cfc34d01cbba0cd37ef66428314a integer nsteps @@ -52,354 +16,4 @@ program main call fesom_runloop(nsteps) call fesom_finalize -<<<<<<< HEAD -integer :: n, nsteps, offset, row, i, provided -integer, pointer :: mype, npes, MPIerr, MPI_COMM_FESOM -real(kind=WP) :: t0, t1, t2, t3, t4, t5, t6, t7, t8, t0_ice, t1_ice, t0_frc, t1_frc -real(kind=WP) :: rtime_fullice, rtime_write_restart, rtime_write_means, rtime_compute_diag, rtime_read_forcing -real(kind=real32) :: rtime_setup_mesh, rtime_setup_ocean, rtime_setup_forcing -real(kind=real32) :: rtime_setup_ice, rtime_setup_other, rtime_setup_restart -real(kind=real32) :: mean_rtime(15), max_rtime(15), min_rtime(15) -real(kind=real32) :: runtime_alltimesteps - - -type(t_mesh) , target, save :: mesh -type(t_tracer), target, save :: tracers -type(t_partit), target, save :: partit -type(t_dyn) , target, save :: dynamics - - -character(LEN=256) :: dump_dir, dump_filename -logical :: L_EXISTS -type(t_mesh), target, save :: mesh_copy -type(t_tracer), target, save :: tracers_copy - -character(LEN=MPI_MAX_LIBRARY_VERSION_STRING) :: mpi_version_txt -integer mpi_version_len - - - if(command_argument_count() > 0) then - call command_line_options%parse() - stop - end if - -#ifndef __oifs - !ECHAM6-FESOM2 coupling: cpl_oasis3mct_init is called here in order to avoid circular dependencies between modules (cpl_driver and g_PARSUP) - !OIFS-FESOM2 coupling: does not require MPI_INIT here as this is done by OASIS - call MPI_INIT_THREAD(MPI_THREAD_MULTIPLE, provided, i) -#endif - - -#if defined (__oasis) - call cpl_oasis3mct_init(partit%MPI_COMM_FESOM) -#endif - t1 = MPI_Wtime() - - call par_init(partit) - - mype =>partit%mype - MPIerr =>partit%MPIerr - MPI_COMM_FESOM=>partit%MPI_COMM_FESOM - npes =>partit%npes - if(mype==0) then - write(*,*) - print *,"FESOM2 git SHA: "//fesom_git_sha() - call MPI_Get_library_version(mpi_version_txt, mpi_version_len, MPIERR) - print *,"MPI library version: "//trim(mpi_version_txt) - print *, achar(27)//'[32m' //'____________________________________________________________'//achar(27)//'[0m' - print *, achar(27)//'[7;32m'//' --> FESOM BUILDS UP MODEL CONFIGURATION '//achar(27)//'[0m' - end if - !===================== - ! Read configuration data, - ! load the mesh and fill in - ! auxiliary mesh arrays - !===================== - call setup_model(partit) ! Read Namelists, always before clock_init - call clock_init(partit) ! read the clock file - call get_run_steps(nsteps, partit) - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call mesh_setup'//achar(27)//'[0m' - call mesh_setup(partit, mesh) - - if (mype==0) write(*,*) 'FESOM mesh_setup... complete' - - !===================== - ! Allocate field variables - ! and additional arrays needed for - ! fancy advection etc. - !===================== - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call check_mesh_consistency'//achar(27)//'[0m' - call check_mesh_consistency(partit, mesh) - if (mype==0) t2=MPI_Wtime() - - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call xxxx_init'//achar(27)//'[0m' - call dynamics_init(dynamics, partit, mesh) - call tracer_init(tracers, partit, mesh) ! allocate array of ocean tracers (derived type "t_tracer") - call arrays_init(tracers%num_tracers, partit, mesh) ! allocate other arrays (to be refactured same as tracers in the future) - - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call ocean_setup'//achar(27)//'[0m' - call ocean_setup(dynamics, tracers, partit, mesh) - - if (mype==0) then - write(*,*) 'FESOM ocean_setup... complete' - t3=MPI_Wtime() - endif - call forcing_setup(partit, mesh) - - if (mype==0) t4=MPI_Wtime() - if (use_ice) then - call ice_setup(tracers, partit, mesh) - ice_steps_since_upd = ice_ave_steps-1 - ice_update=.true. - if (mype==0) write(*,*) 'EVP scheme option=', whichEVP - endif - if (mype==0) t5=MPI_Wtime() - call compute_diagnostics(0, dynamics, tracers, partit, mesh) ! allocate arrays for diagnostic -#if defined (__oasis) - call cpl_oasis3mct_define_unstr(partit, mesh) - if(mype==0) write(*,*) 'FESOM ----> cpl_oasis3mct_define_unstr nsend, nrecv:',nsend, nrecv -#endif - -#if defined (__icepack) - !===================== - ! Setup icepack - !===================== - if (mype==0) write(*,*) 'Icepack: reading namelists from namelist.icepack' - call set_icepack(partit) - call alloc_icepack - call init_icepack(tracers%data(1), mesh) - if (mype==0) write(*,*) 'Icepack: setup complete' -#endif - call clock_newyear ! check if it is a new year - if (mype==0) t6=MPI_Wtime() - !___CREATE NEW RESTART FILE IF APPLICABLE___________________________________ - ! The interface to the restart module is made via call restart ! - ! The inputs are: istep, l_write, l_create - ! if istep is not zero it will be decided whether restart shall be written - ! if l_write is TRUE the restart will be forced - ! if l_read the restart will be read - ! as an example, for reading restart one does: call restart(0, .false., .false., .true., tracers, partit, mesh) - call restart(0, .false., r_restart, dynamics, tracers, partit, mesh) ! istep, l_write, l_read - if (mype==0) t7=MPI_Wtime() - ! store grid information into netcdf file - if (.not. r_restart) call write_mesh_info(partit, mesh) - - !___IF RESTART WITH ZLEVEL OR ZSTAR IS DONE, ALSO THE ACTUAL LEVELS AND ____ - !___MIDDEPTH LEVELS NEEDS TO BE CALCULATET AT RESTART_______________________ - if (r_restart) then - call restart_thickness_ale(partit, mesh) - end if - if (mype==0) then - t8=MPI_Wtime() - - rtime_setup_mesh = real( t2 - t1 ,real32) - rtime_setup_ocean = real( t3 - t2 ,real32) - rtime_setup_forcing = real( t4 - t3 ,real32) - rtime_setup_ice = real( t5 - t4 ,real32) - rtime_setup_restart = real( t7 - t6 ,real32) - rtime_setup_other = real((t8 - t7) + (t6 - t5) ,real32) - - write(*,*) '==========================================' - write(*,*) 'MODEL SETUP took on mype=0 [seconds] ' - write(*,*) 'runtime setup total ',real(t8-t1,real32) - write(*,*) ' > runtime setup mesh ',rtime_setup_mesh - write(*,*) ' > runtime setup ocean ',rtime_setup_ocean - write(*,*) ' > runtime setup forcing ',rtime_setup_forcing - write(*,*) ' > runtime setup ice ',rtime_setup_ice - write(*,*) ' > runtime setup restart ',rtime_setup_restart - write(*,*) ' > runtime setup other ',rtime_setup_other - write(*,*) '============================================' - endif - - DUMP_DIR='DUMP/' - INQUIRE(file=trim(dump_dir), EXIST=L_EXISTS) - if (.not. L_EXISTS) call system('mkdir '//trim(dump_dir)) - - write (dump_filename, "(A7,I7.7)") "t_mesh.", mype - open (mype+300, file=TRIM(DUMP_DIR)//trim(dump_filename), status='replace', form="unformatted") - write (mype+300) mesh - close (mype+300) - -! open (mype+300, file=trim(dump_filename), status='old', form="unformatted") -! read (mype+300) mesh_copy -! close (mype+300) - - write (dump_filename, "(A9,I7.7)") "t_tracer.", mype - open (mype+300, file=TRIM(DUMP_DIR)//trim(dump_filename), status='replace', form="unformatted") - write (mype+300) tracers - close (mype+300) - -! open (mype+300, file=trim(dump_filename), status='old', form="unformatted") -! read (mype+300) tracers_copy -! close (mype+300) - -!call par_ex(partit%MPI_COMM_FESOM, partit%mype) -!stop -! -! if (mype==10) write(,) mesh1%ssh_stiff%values-mesh%ssh_stiff%value - - !===================== - ! Time stepping - !===================== - -! Initialize timers - rtime_fullice = 0._WP - rtime_write_restart = 0._WP - rtime_write_means = 0._WP - rtime_compute_diag = 0._WP - rtime_read_forcing = 0._WP - - if (mype==0) write(*,*) 'FESOM start iteration before the barrier...' - call MPI_Barrier(MPI_COMM_FESOM, MPIERR) - - if (mype==0) then - write(*,*) 'FESOM start iteration after the barrier...' - t0 = MPI_Wtime() - endif - if(mype==0) then - write(*,*) - print *, achar(27)//'[32m' //'____________________________________________________________'//achar(27)//'[0m' - print *, achar(27)//'[7;32m'//' --> FESOM STARTS TIME LOOP '//achar(27)//'[0m' - end if - !___MODEL TIME STEPPING LOOP________________________________________________ - if (use_global_tides) then - call foreph_ini(yearnew, month, partit) - end if - do n=1, nsteps - if (use_global_tides) then - call foreph(partit, mesh) - end if - mstep = n - if (mod(n,logfile_outfreq)==0 .and. mype==0) then - write(*,*) 'FESOM =======================================================' -! write(*,*) 'FESOM step:',n,' day:', n*dt/24./3600., - write(*,*) 'FESOM step:',n,' day:', daynew,' year:',yearnew - write(*,*) - end if -#if defined (__oifs) || defined (__oasis) - seconds_til_now=INT(dt)*(n-1) -#endif - call clock - !___compute horizontal velocity on nodes (originaly on elements)________ - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call compute_vel_nodes'//achar(27)//'[0m' - call compute_vel_nodes(dynamics, partit, mesh) - - !___model sea-ice step__________________________________________________ - t1 = MPI_Wtime() - if(use_ice) then - !___compute fluxes from ocean to ice________________________________ - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call ocean2ice(n)'//achar(27)//'[0m' - call ocean2ice(dynamics, tracers, partit, mesh) - - !___compute update of atmospheric forcing____________________________ - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call update_atm_forcing(n)'//achar(27)//'[0m' - t0_frc = MPI_Wtime() - call update_atm_forcing(n, tracers, partit, mesh) - t1_frc = MPI_Wtime() - !___compute ice step________________________________________________ - if (ice_steps_since_upd>=ice_ave_steps-1) then - ice_update=.true. - ice_steps_since_upd = 0 - else - ice_update=.false. - ice_steps_since_upd=ice_steps_since_upd+1 - endif - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call ice_timestep(n)'//achar(27)//'[0m' - if (ice_update) call ice_timestep(n, partit, mesh) - !___compute fluxes to the ocean: heat, freshwater, momentum_________ - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call oce_fluxes_mom...'//achar(27)//'[0m' - call oce_fluxes_mom(dynamics, partit, mesh) ! momentum only - call oce_fluxes(tracers, partit, mesh) - end if - call before_oce_step(dynamics, tracers, partit, mesh) ! prepare the things if required - t2 = MPI_Wtime() - !___model ocean step____________________________________________________ - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call oce_timestep_ale'//achar(27)//'[0m' - - call oce_timestep_ale(n, dynamics, tracers, partit, mesh) - - t3 = MPI_Wtime() - !___compute energy diagnostics..._______________________________________ - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call compute_diagnostics(1)'//achar(27)//'[0m' - call compute_diagnostics(1, dynamics, tracers, partit, mesh) - - t4 = MPI_Wtime() - !___prepare output______________________________________________________ - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call output (n)'//achar(27)//'[0m' - call output (n, dynamics, tracers, partit, mesh) - - t5 = MPI_Wtime() - call restart(n, .false., .false., dynamics, tracers, partit, mesh) - t6 = MPI_Wtime() - - rtime_fullice = rtime_fullice + t2 - t1 - rtime_compute_diag = rtime_compute_diag + t4 - t3 - rtime_write_means = rtime_write_means + t5 - t4 - rtime_write_restart = rtime_write_restart + t6 - t5 - rtime_read_forcing = rtime_read_forcing + t1_frc - t0_frc - end do - - call finalize_output() - - !___FINISH MODEL RUN________________________________________________________ - - call MPI_Barrier(MPI_COMM_FESOM, MPIERR) - if (mype==0) then - t1 = MPI_Wtime() - runtime_alltimesteps = real(t1-t0,real32) - write(*,*) 'FESOM Run is finished, updating clock' - endif - - mean_rtime(1) = rtime_oce - mean_rtime(2) = rtime_oce_mixpres - mean_rtime(3) = rtime_oce_dyn - mean_rtime(4) = rtime_oce_dynssh - mean_rtime(5) = rtime_oce_solvessh - mean_rtime(6) = rtime_oce_GMRedi - mean_rtime(7) = rtime_oce_solvetra - mean_rtime(8) = rtime_ice - mean_rtime(9) = rtime_tot - mean_rtime(10) = rtime_fullice - rtime_read_forcing - mean_rtime(11) = rtime_compute_diag - mean_rtime(12) = rtime_write_means - mean_rtime(13) = rtime_write_restart - mean_rtime(14) = rtime_read_forcing - - max_rtime(1:14) = mean_rtime(1:14) - min_rtime(1:14) = mean_rtime(1:14) - - call MPI_AllREDUCE(MPI_IN_PLACE, mean_rtime, 14, MPI_REAL, MPI_SUM, MPI_COMM_FESOM, MPIerr) - mean_rtime(1:14) = mean_rtime(1:14) / real(npes,real32) - call MPI_AllREDUCE(MPI_IN_PLACE, max_rtime, 14, MPI_REAL, MPI_MAX, MPI_COMM_FESOM, MPIerr) - call MPI_AllREDUCE(MPI_IN_PLACE, min_rtime, 14, MPI_REAL, MPI_MIN, MPI_COMM_FESOM, MPIerr) - - if (mype==0) then - write(*,*) '___MODEL RUNTIME mean, min, max per task [seconds]________________________' - write(*,*) ' runtime ocean:',mean_rtime(1), min_rtime(1), max_rtime(1) - write(*,*) ' > runtime oce. mix,pres. :',mean_rtime(2), min_rtime(2), max_rtime(2) - write(*,*) ' > runtime oce. dyn. u,v,w:',mean_rtime(3), min_rtime(3), max_rtime(3) - write(*,*) ' > runtime oce. dyn. ssh :',mean_rtime(4), min_rtime(4), max_rtime(4) - write(*,*) ' > runtime oce. solve ssh:',mean_rtime(5), min_rtime(5), max_rtime(5) - write(*,*) ' > runtime oce. GM/Redi :',mean_rtime(6), min_rtime(6), max_rtime(6) - write(*,*) ' > runtime oce. tracer :',mean_rtime(7), min_rtime(7), max_rtime(7) - write(*,*) ' runtime ice :',mean_rtime(10), min_rtime(10), max_rtime(10) - write(*,*) ' > runtime ice step :',mean_rtime(8), min_rtime(8), max_rtime(8) - write(*,*) ' runtime diag: ', mean_rtime(11), min_rtime(11), max_rtime(11) - write(*,*) ' runtime output: ', mean_rtime(12), min_rtime(12), max_rtime(12) - write(*,*) ' runtime restart:', mean_rtime(13), min_rtime(13), max_rtime(13) - write(*,*) ' runtime forcing:', mean_rtime(14), min_rtime(14), max_rtime(14) - write(*,*) ' runtime total (ice+oce):',mean_rtime(9), min_rtime(9), max_rtime(9) - write(*,*) - write(*,*) '============================================' - write(*,*) '=========== BENCHMARK RUNTIME ==============' - write(*,*) ' Number of cores : ',npes - write(*,*) ' Runtime for all timesteps : ',runtime_alltimesteps,' sec' - write(*,*) '============================================' - write(*,*) - end if -! call clock_finish - call par_ex(partit%MPI_COMM_FESOM, partit%mype) -end program main - -======= end program ->>>>>>> beb9fe92a459cfc34d01cbba0cd37ef66428314a diff --git a/src/oce_adv_tra_driver.F90 b/src/oce_adv_tra_driver.F90 index fd56c3143..acc15253c 100644 --- a/src/oce_adv_tra_driver.F90 +++ b/src/oce_adv_tra_driver.F90 @@ -166,14 +166,11 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, dynamics, tracers, partit, fct_LO(nz,n)=(ttf(nz,n)*hnode(nz,n)+(fct_LO(nz,n)+(adv_flux_ver(nz, n)-adv_flux_ver(nz+1, n)))*dt/areasvol(nz,n))/hnode_new(nz,n) end do end do -<<<<<<< HEAD - if (dynamics%use_wsplit) then !wvel/=wvel_e - ! update for implicit contribution (use_wsplit option) -======= !$OMP END PARALLEL DO - if (w_split) then !wvel/=wvel_e + + if (dynamics%use_wsplit) then !wvel/=wvel_e ! update for implicit contribution (w_split option) ->>>>>>> beb9fe92a459cfc34d01cbba0cd37ef66428314a + call adv_tra_vert_impl(dt, wi, fct_LO, partit, mesh) ! compute the low order upwind vertical flux (full vertical velocity) ! zero the input/output flux before computation diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index dba2e42a1..748927421 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -12,26 +12,7 @@ subroutine diff_part_hor_redi(tr_num, tracer, partit, mesh) end subroutine end interface end module -<<<<<<< HEAD -module adv_tracers_ale_interface - interface - subroutine adv_tracers_ale(dt, tr_num, dynamics, tracer, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use mod_tracer - use MOD_DYN - real(kind=WP), intent(in), target :: dt - integer, intent(in), target :: tr_num - type(t_dyn) , intent(inout), target :: dynamics - type(t_tracer), intent(inout), target :: tracer - type(t_mesh) , intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit - end subroutine - end interface -end module -======= ->>>>>>> beb9fe92a459cfc34d01cbba0cd37ef66428314a + module diff_ver_part_expl_ale_interface interface subroutine diff_ver_part_expl_ale(tr_num, tracer, partit, mesh) @@ -46,6 +27,7 @@ subroutine diff_ver_part_expl_ale(tr_num, tracer, partit, mesh) end subroutine end interface end module + module diff_ver_part_redi_expl_interface interface subroutine diff_ver_part_redi_expl(tr_num, tracer, partit, mesh) @@ -60,6 +42,7 @@ subroutine diff_ver_part_redi_expl(tr_num, tracer, partit, mesh) end subroutine end interface end module + module diff_ver_part_impl_ale_interface interface subroutine diff_ver_part_impl_ale(tr_num, dynamics, tracer, partit, mesh) @@ -76,6 +59,7 @@ subroutine diff_ver_part_impl_ale(tr_num, dynamics, tracer, partit, mesh) end subroutine end interface end module + module diff_tracers_ale_interface interface subroutine diff_tracers_ale(tr_num, dynamics, tracer, partit, mesh) @@ -92,6 +76,7 @@ subroutine diff_tracers_ale(tr_num, dynamics, tracer, partit, mesh) end subroutine end interface end module + module bc_surface_interface interface function bc_surface(n, id, sval, partit) @@ -105,6 +90,7 @@ function bc_surface(n, id, sval, partit) end function end interface end module + module diff_part_bh_interface interface subroutine diff_part_bh(tr_num, dynamics, tracer, partit, mesh) @@ -121,6 +107,7 @@ subroutine diff_part_bh(tr_num, dynamics, tracer, partit, mesh) end subroutine end interface end module + module solve_tracers_ale_interface interface subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) @@ -154,42 +141,34 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) use diff_tracers_ale_interface use oce_adv_tra_driver_interfaces implicit none -<<<<<<< HEAD - type(t_dyn) , intent(inout), target :: dynamics - type(t_tracer), intent(inout), target :: tracers - type(t_mesh) , intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit - integer :: tr_num, node, nzmax, nzmin - real(kind=WP), dimension(:,:,:), pointer :: UV, fer_UV - real(kind=WP), dimension(:,:) , pointer :: Wvel, Wvel_e, fer_Wvel - -======= + + type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracers - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit integer :: tr_num, node, elem, nzmax, nzmin - real(kind=WP), pointer, dimension (:,:) :: del_ttf ->>>>>>> beb9fe92a459cfc34d01cbba0cd37ef66428314a + real(kind=WP), dimension(:,:,:), pointer :: UV, fer_UV + real(kind=WP), dimension(:,:) , pointer :: Wvel, Wvel_e, Wvel_i, fer_Wvel + real(kind=WP), dimension(:,:) , pointer :: del_ttf #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" -<<<<<<< HEAD + UV => dynamics%uv(:,:,:) Wvel => dynamics%w(:,:) Wvel_e => dynamics%w_e(:,:) + Wvel_i => dynamics%w_i(:,:) if (Fer_GM) then fer_UV => dynamics%fer_uv(:,:,:) fer_Wvel => dynamics%fer_w(:,:) end if -======= - del_ttf => tracers%work%del_ttf ->>>>>>> beb9fe92a459cfc34d01cbba0cd37ef66428314a !___________________________________________________________________________ if (SPP) call cal_rejected_salt(partit, mesh) - if (SPP) call app_rejected_salt(tracers%data(2)%values, partit, mesh) + if (SPP) call app_rejected_salt(tracers%data(2)%values, partit, mesh) + !___________________________________________________________________________ ! update 3D velocities with the bolus velocities: ! 1. bolus velocities are computed according to GM implementation after R. Ferrari et al., 2010 @@ -207,6 +186,7 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) end do !$OMP END PARALLEL DO end if + !___________________________________________________________________________ ! loop over all tracers do tr_num=1, tracers%num_tracers @@ -214,13 +194,12 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) ! needed if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call init_tracers_AB'//achar(27)//'[0m' call init_tracers_AB(tr_num, tracers, partit, mesh) + ! advect tracers if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call adv_tracers_ale'//achar(27)//'[0m' -<<<<<<< HEAD - call adv_tracers_ale(dt, tr_num, dynamics, tracers, partit, mesh) -======= ! it will update del_ttf with contributions from horizontal and vertical advection parts (del_ttf_advhoriz and del_ttf_advvert) - call do_oce_adv_tra(dt, UV, wvel, wvel_i, wvel_e, tr_num, tracers, partit, mesh) + call do_oce_adv_tra(dt, UV, Wvel, Wvel_i, Wvel_e, tr_num, dynamics, tracers, partit, mesh) + !___________________________________________________________________________ ! update array for total tracer flux del_ttf with the fluxes from horizontal ! and vertical advection @@ -233,10 +212,11 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) tracers%data(tr_num)%valuesAB(:, node)=tracers%data(tr_num)%values(:, node) !DS: check that this is the right place! end do !$OMP END PARALLEL DO ->>>>>>> beb9fe92a459cfc34d01cbba0cd37ef66428314a + ! diffuse tracers if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call diff_tracers_ale'//achar(27)//'[0m' call diff_tracers_ale(tr_num, dynamics, tracers, partit, mesh) + ! relax to salt and temp climatology if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call relax_to_clim'//achar(27)//'[0m' ! if ((toy_ocean) .AND. ((tr_num==1) .AND. (TRIM(which_toy)=="soufflet"))) then @@ -290,68 +270,7 @@ end subroutine solve_tracers_ale ! ! !=============================================================================== -<<<<<<< HEAD -subroutine adv_tracers_ale(dt, tr_num, dynamics, tracers, partit, mesh) - use g_config, only: flag_debug - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use MOD_TRACER - use MOD_DYN - use o_arrays - use diagnostics, only: ldiag_DVD, compute_diag_dvd_2ndmoment_klingbeil_etal_2014, & - compute_diag_dvd_2ndmoment_burchard_etal_2008, compute_diag_dvd -! use adv_tracers_muscle_ale_interface -! use adv_tracers_vert_ppm_ale_interface - use oce_adv_tra_driver_interfaces - implicit none - real(kind=WP), intent(in), target :: dt - integer :: node, nz - integer, intent(in) :: tr_num - type(t_mesh) , intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(inout), target :: tracers - type(t_dyn) , intent(inout), target :: dynamics - ! del_ttf ... initialised and setted to zero in call init_tracers_AB(tr_num) - ! --> del_ttf ... equivalent to R_T^n in Danilov etal FESOM2: "from finite element - ! to finite volume". At the end R_T^n should contain all advection therms and - ! the terms due to diffusion. - ! del_ttf=0d0 - - !___________________________________________________________________________ - ! if ldiag_DVD=.true. --> compute tracer second moments for the calcualtion - ! of discret variance decay - if (ldiag_DVD .and. tr_num <= 2) then - if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call compute_diag_dvd_2ndmoment'//achar(27)//'[0m' - call compute_diag_dvd_2ndmoment_klingbeil_etal_2014(tr_num, tracers, partit, mesh) - end if - - !___________________________________________________________________________ - ! horizontal ale tracer advection - ! here --> add horizontal advection part to del_ttf(nz,n) = del_ttf(nz,n) + ... - tracers%work%del_ttf_advhoriz = 0.0_WP - tracers%work%del_ttf_advvert = 0.0_WP - call do_oce_adv_tra(dt, dynamics%uv, dynamics%w, dynamics%w_i, dynamics%w_e, tr_num, dynamics, tracers, partit, mesh) - !___________________________________________________________________________ - ! update array for total tracer flux del_ttf with the fluxes from horizontal - ! and vertical advection - tracers%work%del_ttf=tracers%work%del_ttf+tracers%work%del_ttf_advhoriz+tracers%work%del_ttf_advvert - - !___________________________________________________________________________ - ! compute discrete variance decay after Burchard and Rennau 2008 - if (ldiag_DVD .and. tr_num <= 2) then - if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call compute_diag_dvd'//achar(27)//'[0m' - call compute_diag_dvd(tr_num, tracers, partit, mesh) - end if - -end subroutine adv_tracers_ale -! -! -!=============================================================================== subroutine diff_tracers_ale(tr_num, dynamics, tracers, partit, mesh) -======= -subroutine diff_tracers_ale(tr_num, tracers, partit, mesh) ->>>>>>> beb9fe92a459cfc34d01cbba0cd37ef66428314a use mod_mesh USE MOD_PARTIT USE MOD_PARSUP @@ -415,16 +334,9 @@ subroutine diff_tracers_ale(tr_num, tracers, partit, mesh) end if !We DO not set del_ttf to zero because it will not be used in this timestep anymore -<<<<<<< HEAD - !init_tracers will set it to zero for the next timestep - !init_tracers will set it to zero for the next timestep + !init_tracers_AB will set it to zero for the next timestep if (tracers%smooth_bh_tra) then call diff_part_bh(tr_num, dynamics, tracers, partit, mesh) ! alpply biharmonic diffusion (implemented as filter) -======= - !init_tracers_AB will set it to zero for the next timestep - if (tracers%smooth_bh_tra) then - call diff_part_bh(tr_num, tracers, partit, mesh) ! alpply biharmonic diffusion (implemented as filter) ->>>>>>> beb9fe92a459cfc34d01cbba0cd37ef66428314a end if end subroutine diff_tracers_ale ! From 02ddb878ea1dc09cfeb97afca626df251bc010c0 Mon Sep 17 00:00:00 2001 From: a270042 Date: Tue, 9 Nov 2021 11:04:16 +0100 Subject: [PATCH 463/909] add reduced namelist.oce --- config/namelist.oce | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/config/namelist.oce b/config/namelist.oce index a69770154..7af6867f7 100644 --- a/config/namelist.oce +++ b/config/namelist.oce @@ -2,27 +2,8 @@ &oce_dyn C_d=0.0025 ! Bottom drag, nondimensional -gamma0=0.003 ! [m/s], backgroung viscosity= gamma0*len, it should be as small as possible (keep it < 0.01 m/s). -gamma1=0.1 ! [nodim], for computation of the flow aware viscosity -gamma2=0.285 ! [s/m], is only used in easy backscatter option -Div_c=.5 ! the strength of the modified Leith viscosity, nondimensional, 0.3 -- 1.0 -Leith_c=.05 ! the strength of the Leith viscosity -visc_option=5 ! 1=Harmonic Leith parameterization; - ! 2=Laplacian+Leith+biharmonic background - ! 3=Biharmonic Leith parameterization - ! 4=Biharmonic flow aware - ! 5=Kinematic (easy) Backscatter - ! 6=Biharmonic flow aware (viscosity depends on velocity Laplacian) - ! 7=Biharmonic flow aware (viscosity depends on velocity differences) - ! 8=Dynamic Backscatter -easy_bs_return= 1.5 ! coefficient for returned sub-gridscale energy, to be used with visc_option=5 (easy backscatter) A_ver= 1.e-4 ! Vertical viscosity, m^2/s scale_area=5.8e9 ! Visc. and diffus. are for an element with scale_area -mom_adv=2 ! 1=vector CV, p1 vel, 2=sca. CV, 3=vector inv. -free_slip=.false. ! Switch on free slip -i_vert_visc=.true. -w_split=.false. -w_max_cfl=1.0 ! maximum allowed CFL criteria in vertical (0.5 < w_max_cfl < 1.) ! in older FESOM it used to be w_exp_max=1.e-3 SPP=.false. ! Salt Plume Parameterization Fer_GM=.true. ! to swith on/off GM after Ferrari et al. 2010 K_GM_max = 2000.0 ! max. GM thickness diffusivity (m2/s) From b6d94b761029e5bb95e8a3e777883ebd830e1365 Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Tue, 9 Nov 2021 13:45:53 +0100 Subject: [PATCH 464/909] Revert "add reduced namelist.oce" --- config/namelist.oce | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/config/namelist.oce b/config/namelist.oce index 7af6867f7..a69770154 100644 --- a/config/namelist.oce +++ b/config/namelist.oce @@ -2,8 +2,27 @@ &oce_dyn C_d=0.0025 ! Bottom drag, nondimensional +gamma0=0.003 ! [m/s], backgroung viscosity= gamma0*len, it should be as small as possible (keep it < 0.01 m/s). +gamma1=0.1 ! [nodim], for computation of the flow aware viscosity +gamma2=0.285 ! [s/m], is only used in easy backscatter option +Div_c=.5 ! the strength of the modified Leith viscosity, nondimensional, 0.3 -- 1.0 +Leith_c=.05 ! the strength of the Leith viscosity +visc_option=5 ! 1=Harmonic Leith parameterization; + ! 2=Laplacian+Leith+biharmonic background + ! 3=Biharmonic Leith parameterization + ! 4=Biharmonic flow aware + ! 5=Kinematic (easy) Backscatter + ! 6=Biharmonic flow aware (viscosity depends on velocity Laplacian) + ! 7=Biharmonic flow aware (viscosity depends on velocity differences) + ! 8=Dynamic Backscatter +easy_bs_return= 1.5 ! coefficient for returned sub-gridscale energy, to be used with visc_option=5 (easy backscatter) A_ver= 1.e-4 ! Vertical viscosity, m^2/s scale_area=5.8e9 ! Visc. and diffus. are for an element with scale_area +mom_adv=2 ! 1=vector CV, p1 vel, 2=sca. CV, 3=vector inv. +free_slip=.false. ! Switch on free slip +i_vert_visc=.true. +w_split=.false. +w_max_cfl=1.0 ! maximum allowed CFL criteria in vertical (0.5 < w_max_cfl < 1.) ! in older FESOM it used to be w_exp_max=1.e-3 SPP=.false. ! Salt Plume Parameterization Fer_GM=.true. ! to swith on/off GM after Ferrari et al. 2010 K_GM_max = 2000.0 ! max. GM thickness diffusivity (m2/s) From 8c5c21041bca594545aa599466a7104afeea69e1 Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Tue, 9 Nov 2021 13:46:40 +0100 Subject: [PATCH 465/909] Revert "Refactoring dynamics: part4" --- config/namelist.dyn | 23 - src/MOD_DYN.F90 | 271 -------- src/cavity_param.F90 | 30 +- src/fvom.F90 | 54 +- src/fvom_main.F90 | 1 - src/gen_modules_backscatter.F90 | 411 ------------ src/gen_modules_cvmix_kpp.F90 | 31 +- src/gen_modules_cvmix_pp.F90 | 12 +- src/gen_modules_cvmix_tke.F90 | 16 +- src/gen_modules_diag.F90 | 210 ++---- src/ice_oce_coupling.F90 | 41 +- src/io_blowup.F90 | 57 +- src/io_meandata.F90 | 89 ++- src/io_restart.F90 | 39 +- src/oce_adv_tra_driver.F90 | 16 +- src/oce_ale.F90 | 418 ++++++------ src/oce_ale_mixing_kpp.F90 | 27 +- src/oce_ale_mixing_pp.F90 | 13 +- src/oce_ale_tracer.F90 | 108 +--- src/oce_ale_vel_rhs.F90 | 100 +-- src/oce_dyn.F90 | 1070 ++++++++++++++++++++++++++----- src/oce_fer_gm.F90 | 47 +- src/oce_modules.F90 | 37 +- src/oce_setup_step.F90 | 256 +++----- src/oce_vel_rhs_vinv.F90 | 335 ++++++++++ src/toy_channel_soufflet.F90 | 37 +- src/write_step_info.F90 | 100 +-- 27 files changed, 1858 insertions(+), 1991 deletions(-) delete mode 100644 config/namelist.dyn delete mode 100644 src/MOD_DYN.F90 delete mode 100644 src/gen_modules_backscatter.F90 create mode 100755 src/oce_vel_rhs_vinv.F90 diff --git a/config/namelist.dyn b/config/namelist.dyn deleted file mode 100644 index e35508f2f..000000000 --- a/config/namelist.dyn +++ /dev/null @@ -1,23 +0,0 @@ -&dynamics_visc -visc_gamma0 = 0.003 ! [m/s], backgroung viscosity= gamma0*len, it should be as small a s possible (keep it < 0.01 m/s). -visc_gamma1 = 0.1 ! [nodim], for computation of the flow aware viscosity -visc_gamma2 = 0.285 ! [s/m], is only used in easy backscatter option -visc_easybsreturn= 1.5 - -opt_visc = 5 -! 5=Kinematic (easy) Backscatter -! 6=Biharmonic flow aware (viscosity depends on velocity Laplacian) -! 7=Biharmonic flow aware (viscosity depends on velocity differences) -! 8=Dynamic Backscatter - -use_ivertvisc= .true. -/ - -&dynamics_general -momadv_opt = 2 ! option for momentum advection in moment only =2 -use_freeslip = .false. ! Switch on free slip -use_wsplit = .false. ! Switch for implicite/explicte splitting of vert. velocity -wsplit_maxcfl= 1.0 ! maximum allowed CFL criteria in vertical (0.5 < w_max_cfl < 1.) - ! in older FESOM it used to be w_exp_max=1.e-3 -/ - diff --git a/src/MOD_DYN.F90 b/src/MOD_DYN.F90 deleted file mode 100644 index 77438f64b..000000000 --- a/src/MOD_DYN.F90 +++ /dev/null @@ -1,271 +0,0 @@ -!========================================================== -MODULE MOD_DYN -USE O_PARAM -USE, intrinsic :: ISO_FORTRAN_ENV -USE MOD_WRITE_BINARY_ARRAYS -USE MOD_READ_BINARY_ARRAYS -IMPLICIT NONE -SAVE - -! -! -!_______________________________________________________________________________ -TYPE T_SOLVERINFO - integer :: ident = 1 - integer :: maxiter = 2000 - integer :: restart = 15 - integer :: fillin = 3 - integer :: lutype = 2 - real(kind=WP) :: droptol = 1.e-8 - real(kind=WP) :: soltol = 1e-10 !1.e-10 - contains - procedure WRITE_T_SOLVERINFO - procedure READ_T_SOLVERINFO - generic :: write(unformatted) => WRITE_T_SOLVERINFO - generic :: read(unformatted) => READ_T_SOLVERINFO -END TYPE T_SOLVERINFO -! -! -!_______________________________________________________________________________ -TYPE T_DYN_WORK - real(kind=WP), allocatable, dimension(:,:,:) :: uvnode_rhs - real(kind=WP), allocatable, dimension(:,:) :: u_c, v_c - - ! easy backscatter contribution - real(kind=WP), allocatable, dimension(:,:) :: u_b, v_b - contains - procedure WRITE_T_DYN_WORK - procedure READ_T_DYN_WORK - generic :: write(unformatted) => WRITE_T_DYN_WORK - generic :: read(unformatted) => READ_T_DYN_WORK -END TYPE T_DYN_WORK -! -! -!_______________________________________________________________________________ -! set main structure for dynamicss, contains viscosity options and parameters + -! option for momentum advection -TYPE T_DYN -!___________________________________________________________________________ - ! instant zonal merdional velocity & Adams-Bashfort rhs - real(kind=WP), allocatable, dimension(:,:,:):: uv, uv_rhs, uv_rhsAB, fer_uv - - ! horizontal velocities at nodes - real(kind=WP), allocatable, dimension(:,:,:):: uvnode - - ! instant vertical vel arrays - real(kind=WP), allocatable, dimension(:,:) :: w, w_e, w_i, cfl_z, fer_w - - ! sea surface height arrays - real(kind=WP), allocatable, dimension(:) :: eta_n, d_eta, ssh_rhs, ssh_rhs_old - - !___________________________________________________________________________ - ! summarizes solver input parameter - type(t_solverinfo) :: solverinfo - - !___________________________________________________________________________ - ! put dynmiacs working arrays - type(t_dyn_work) :: work - - !___________________________________________________________________________ - ! opt_visc=... - ! 5=Kinematic (easy) Backscatter - ! 6=Biharmonic flow aware (viscosity depends on velocity Laplacian) - ! 7=Biharmonic flow aware (viscosity depends on velocity differences) - ! 8=Dynamic Backscatter - integer :: opt_visc = 5 - - ! gamma0 [m/s], backgroung viscosity= gamma0*len, it should be as small - ! as possible (keep it < 0.01 m/s). - ! gamma1 [nodim], for computation of the flow aware viscosity - ! gamma2 [s/m], is only used in easy backscatter option - real(kind=WP) :: visc_gamma0 = 0.03 - real(kind=WP) :: visc_gamma1 = 0.1 - real(kind=WP) :: visc_gamma2 = 0.285 - - ! coefficient for returned sub-gridscale energy, to be used with opt_visc=5 - ! (easy backscatter) - real(kind=WP) :: visc_easybsreturn = 1.5 - - logical :: use_ivertvisc = .true. - integer :: momadv_opt = 2 - - ! Switch on free slip - logical :: use_freeslip = .false. - - ! do implicite, explicite spliting of vertical velocity - logical :: use_wsplit = .false. - ! maximum allowed CFL criteria in vertical (0.5 < w_max_cfl < 1.) - ! in older FESOM it used to be w_exp_max=1.e-3 - real(kind=WP) :: wsplit_maxcfl= 1.0 - - !___________________________________________________________________________ - contains - procedure WRITE_T_DYN - procedure READ_T_DYN - generic :: write(unformatted) => WRITE_T_DYN - generic :: read(unformatted) => READ_T_DYN -END TYPE T_DYN - -contains - -! -! -!_______________________________________________________________________________ -! set unformatted writing and reading for T_DYN_WORK -subroutine WRITE_T_SOLVERINFO(tsolverinfo, unit, iostat, iomsg) - IMPLICIT NONE - class(T_SOLVERINFO), intent(in) :: tsolverinfo - integer, intent(in) :: unit - integer, intent(out) :: iostat - character(*), intent(inout) :: iomsg - !___________________________________________________________________________ - write(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%ident - write(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%maxiter - write(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%restart - write(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%fillin - write(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%lutype - write(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%droptol - write(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%soltol -end subroutine WRITE_T_SOLVERINFO - -subroutine READ_T_SOLVERINFO(tsolverinfo, unit, iostat, iomsg) - IMPLICIT NONE - class(T_SOLVERINFO), intent(inout) :: tsolverinfo - integer, intent(in) :: unit - integer, intent(out) :: iostat - character(*), intent(inout) :: iomsg - read(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%ident - read(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%maxiter - read(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%restart - read(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%fillin - read(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%lutype - read(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%droptol - read(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%soltol -end subroutine READ_T_SOLVERINFO - -! -! -!_______________________________________________________________________________ -! set unformatted writing and reading for T_DYN_WORK -subroutine WRITE_T_DYN_WORK(twork, unit, iostat, iomsg) - IMPLICIT NONE - class(T_DYN_WORK), intent(in) :: twork - integer, intent(in) :: unit - integer, intent(out) :: iostat - character(*), intent(inout) :: iomsg - call write_bin_array(twork%uvnode_rhs, unit, iostat, iomsg) - call write_bin_array(twork%u_c, unit, iostat, iomsg) - call write_bin_array(twork%v_c, unit, iostat, iomsg) - call write_bin_array(twork%u_b, unit, iostat, iomsg) - call write_bin_array(twork%v_b, unit, iostat, iomsg) -end subroutine WRITE_T_DYN_WORK - -subroutine READ_T_DYN_WORK(twork, unit, iostat, iomsg) - IMPLICIT NONE - class(T_DYN_WORK), intent(inout) :: twork - integer, intent(in) :: unit - integer, intent(out) :: iostat - character(*), intent(inout) :: iomsg - call read_bin_array(twork%uvnode_rhs, unit, iostat, iomsg) - call read_bin_array(twork%u_c, unit, iostat, iomsg) - call read_bin_array(twork%v_c, unit, iostat, iomsg) - call read_bin_array(twork%u_b, unit, iostat, iomsg) - call read_bin_array(twork%v_b, unit, iostat, iomsg) -end subroutine READ_T_DYN_WORK - -! -! -!_______________________________________________________________________________ -! set unformatted writing and reading for T_DYN -subroutine WRITE_T_DYN(dynamics, unit, iostat, iomsg) - IMPLICIT NONE - class(T_DYN), intent(in) :: dynamics - integer, intent(in) :: unit - integer, intent(out) :: iostat - character(*), intent(inout) :: iomsg - - !___________________________________________________________________________ - call write_bin_array(dynamics%uv , unit, iostat, iomsg) - call write_bin_array(dynamics%uv_rhs , unit, iostat, iomsg) - call write_bin_array(dynamics%uv_rhsAB , unit, iostat, iomsg) - call write_bin_array(dynamics%uvnode , unit, iostat, iomsg) - - call write_bin_array(dynamics%w , unit, iostat, iomsg) - call write_bin_array(dynamics%w_e , unit, iostat, iomsg) - call write_bin_array(dynamics%w_i , unit, iostat, iomsg) - call write_bin_array(dynamics%cfl_z , unit, iostat, iomsg) - - if (Fer_GM) then - call write_bin_array(dynamics%fer_w , unit, iostat, iomsg) - call write_bin_array(dynamics%fer_uv, unit, iostat, iomsg) - end if - - !___________________________________________________________________________ - write(unit, iostat=iostat, iomsg=iomsg) dynamics%work - - !___________________________________________________________________________ - write(unit, iostat=iostat, iomsg=iomsg) dynamics%solverinfo - - !___________________________________________________________________________ - write(unit, iostat=iostat, iomsg=iomsg) dynamics%opt_visc - write(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_gamma0 - write(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_gamma1 - write(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_gamma2 - write(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_easybsreturn - - !___________________________________________________________________________ - write(unit, iostat=iostat, iomsg=iomsg) dynamics%use_ivertvisc - write(unit, iostat=iostat, iomsg=iomsg) dynamics%momadv_opt - - !___________________________________________________________________________ - write(unit, iostat=iostat, iomsg=iomsg) dynamics%use_freeslip - write(unit, iostat=iostat, iomsg=iomsg) dynamics%use_wsplit - write(unit, iostat=iostat, iomsg=iomsg) dynamics%wsplit_maxcfl - -end subroutine WRITE_T_DYN - -subroutine READ_T_DYN(dynamics, unit, iostat, iomsg) - IMPLICIT NONE - class(T_DYN), intent(inout) :: dynamics - integer, intent(in) :: unit - integer, intent(out) :: iostat - character(*), intent(inout) :: iomsg - - !___________________________________________________________________________ - call read_bin_array(dynamics%uv , unit, iostat, iomsg) - call read_bin_array(dynamics%uv_rhs , unit, iostat, iomsg) - call read_bin_array(dynamics%uv_rhsAB , unit, iostat, iomsg) - call read_bin_array(dynamics%uvnode , unit, iostat, iomsg) - - call read_bin_array(dynamics%w , unit, iostat, iomsg) - call read_bin_array(dynamics%w_e , unit, iostat, iomsg) - call read_bin_array(dynamics%w_i , unit, iostat, iomsg) - call read_bin_array(dynamics%cfl_z , unit, iostat, iomsg) - - if (Fer_GM) then - call read_bin_array(dynamics%fer_w , unit, iostat, iomsg) - call read_bin_array(dynamics%fer_uv , unit, iostat, iomsg) - end if - - !___________________________________________________________________________ - read(unit, iostat=iostat, iomsg=iomsg) dynamics%work - - !___________________________________________________________________________ - read(unit, iostat=iostat, iomsg=iomsg) dynamics%opt_visc - read(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_gamma0 - read(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_gamma1 - read(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_gamma2 - read(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_easybsreturn - - !___________________________________________________________________________ - read(unit, iostat=iostat, iomsg=iomsg) dynamics%use_ivertvisc - read(unit, iostat=iostat, iomsg=iomsg) dynamics%momadv_opt - - !___________________________________________________________________________ - read(unit, iostat=iostat, iomsg=iomsg) dynamics%use_freeslip - read(unit, iostat=iostat, iomsg=iomsg) dynamics%use_wsplit - read(unit, iostat=iostat, iomsg=iomsg) dynamics%wsplit_maxcfl - -end subroutine READ_T_DYN - -END MODULE MOD_DYN \ No newline at end of file diff --git a/src/cavity_param.F90 b/src/cavity_param.F90 index 35ed3bdf5..eb8591754 100644 --- a/src/cavity_param.F90 +++ b/src/cavity_param.F90 @@ -137,21 +137,19 @@ end subroutine compute_nrst_pnt2cavline ! adjusted for use in FESOM by Ralph Timmermann, 16.02.2011 ! Reviewed by ? ! adapted by P. SCholz for FESOM2.0 -subroutine cavity_heat_water_fluxes_3eq(dynamics, tracers, partit, mesh) +subroutine cavity_heat_water_fluxes_3eq(tracers, partit, mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use MOD_TRACER - use MOD_DYN use o_PARAM , only: density_0, WP - use o_ARRAYS, only: heat_flux, water_flux, density_m_rho0, density_ref + use o_ARRAYS, only: heat_flux, water_flux, Unode, density_m_rho0,density_ref use i_ARRAYS, only: net_heat_flux, fresh_wa_flux implicit none !___________________________________________________________________________ type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh type(t_tracer), intent(in), target :: tracers - type(t_dyn), intent(in), target :: dynamics real (kind=WP) :: temp,sal,tin,zice real (kind=WP) :: rhow, rhor, rho real (kind=WP) :: gats1, gats2, gas, gat @@ -189,12 +187,11 @@ subroutine cavity_heat_water_fluxes_3eq(dynamics, tracers, partit, mesh) ! hemw= 4.02*14. ! oomw= -30. ! oofw= -2.5 - real(kind=WP), dimension(:,:,:), pointer :: UVnode + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UVnode=>dynamics%uvnode(:,:,:) !___________________________________________________________________________ do node=1,myDim_nod2D !+eDim_nod2D @@ -219,7 +216,7 @@ subroutine cavity_heat_water_fluxes_3eq(dynamics, tracers, partit, mesh) ! if(vt1.eq.0.) vt1=0.001 !rt re = Hz_r(i,j,N)*ds/un !Reynolds number - vt1 = sqrt(UVnode(1,nzmin,node)*UVnode(1,nzmin,node)+UVnode(2,nzmin,node)*UVnode(2,nzmin,node)) + vt1 = sqrt(Unode(1,nzmin,node)*Unode(1,nzmin,node)+Unode(2,nzmin,node)*Unode(2,nzmin,node)) vt1 = max(vt1,0.001_WP) !vt1 = max(vt1,0.005) ! CW re = 10._WP/un !vt1*re (=velocity times length scale over kinematic viscosity) is the Reynolds number @@ -385,30 +382,25 @@ end subroutine cavity_heat_water_fluxes_2eq !_______________________________________________________________________________ ! Compute the momentum fluxes under ice cavity ! Moved to this separated routine by Qiang, 20.1.2012 -subroutine cavity_momentum_fluxes(dynamics, partit, mesh) +subroutine cavity_momentum_fluxes(partit, mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN use o_PARAM , only: density_0, C_d, WP - use o_ARRAYS, only: stress_surf, stress_node_surf + use o_ARRAYS, only: UV, Unode, stress_surf, stress_node_surf use i_ARRAYS, only: u_w, v_w implicit none !___________________________________________________________________________ - type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh integer :: elem, elnodes(3), nzmin, node real(kind=WP) :: aux - real(kind=WP), dimension(:,:,:), pointer :: UV, UVnode - + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV=>dynamics%uv(:,:,:) - UVnode=>dynamics%uvnode(:,:,:) !___________________________________________________________________________ do elem=1,myDim_elem2D @@ -432,9 +424,9 @@ subroutine cavity_momentum_fluxes(dynamics, partit, mesh) ! momentum stress: ! need to check the sensitivity to the drag coefficient ! here I use the bottom stress coefficient, which is 3e-3, for this FO2 work. - aux=sqrt(UVnode(1,nzmin,node)**2+UVnode(2,nzmin,node)**2)*density_0*C_d - stress_node_surf(1,node)=-aux*UVnode(1,nzmin,node) - stress_node_surf(2,node)=-aux*UVnode(2,nzmin,node) + aux=sqrt(Unode(1,nzmin,node)**2+Unode(2,nzmin,node)**2)*density_0*C_d + stress_node_surf(1,node)=-aux*Unode(1,nzmin,node) + stress_node_surf(2,node)=-aux*Unode(2,nzmin,node) end do end subroutine cavity_momentum_fluxes ! diff --git a/src/fvom.F90 b/src/fvom.F90 index 82752d507..4ab323df9 100755 --- a/src/fvom.F90 +++ b/src/fvom.F90 @@ -5,7 +5,6 @@ module fesom_main_storage_module USE MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN USE o_ARRAYS USE o_PARAM USE i_PARAM @@ -52,17 +51,15 @@ module fesom_main_storage_module real(kind=real32) :: runtime_alltimesteps - type(t_mesh) mesh + type(t_mesh) mesh type(t_tracer) tracers - type(t_dyn) dynamics type(t_partit) partit character(LEN=256) :: dump_dir, dump_filename logical :: L_EXISTS - type(t_mesh) mesh_copy + type(t_mesh) mesh_copy type(t_tracer) tracers_copy - type(t_dyn) dynamics_copy character(LEN=MPI_MAX_LIBRARY_VERSION_STRING) :: mpi_version_txt integer mpi_version_len @@ -127,7 +124,6 @@ subroutine fesom_init(fesom_total_nsteps) call setup_model(f%partit) ! Read Namelists, always before clock_init call clock_init(f%partit) ! read the clock file call get_run_steps(fesom_total_nsteps, f%partit) - if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call mesh_setup'//achar(27)//'[0m' call mesh_setup(f%partit, f%mesh) if (f%mype==0) write(*,*) 'FESOM mesh_setup... complete' @@ -137,21 +133,12 @@ subroutine fesom_init(fesom_total_nsteps) ! and additional arrays needed for ! fancy advection etc. !===================== - if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call check_mesh_consistency'//achar(27)//'[0m' call check_mesh_consistency(f%partit, f%mesh) if (f%mype==0) f%t2=MPI_Wtime() - if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call dynamics_init'//achar(27)//'[0m' - call dynamics_init(f%dynamics, f%partit, f%mesh) - - if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call tracer_init'//achar(27)//'[0m' call tracer_init(f%tracers, f%partit, f%mesh) ! allocate array of ocean tracers (derived type "t_tracer") - - if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call arrays_init'//achar(27)//'[0m' call arrays_init(f%tracers%num_tracers, f%partit, f%mesh) ! allocate other arrays (to be refactured same as tracers in the future) - - if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call ocean_setup'//achar(27)//'[0m' - call ocean_setup(f%dynamics, f%tracers, f%partit, f%mesh) + call ocean_setup(f%tracers, f%partit, f%mesh) if (f%mype==0) then write(*,*) 'FESOM ocean_setup... complete' @@ -167,7 +154,7 @@ subroutine fesom_init(fesom_total_nsteps) if (f%mype==0) write(*,*) 'EVP scheme option=', whichEVP endif if (f%mype==0) f%t5=MPI_Wtime() - call compute_diagnostics(0, f%dynamics, f%tracers, f%partit, f%mesh) ! allocate arrays for diagnostic + call compute_diagnostics(0, f%tracers, f%partit, f%mesh) ! allocate arrays for diagnostic #if defined (__oasis) call cpl_oasis3mct_define_unstr(f%partit, f%mesh) if(f%mype==0) write(*,*) 'FESOM ----> cpl_oasis3mct_define_unstr nsend, nrecv:',nsend, nrecv @@ -192,7 +179,7 @@ subroutine fesom_init(fesom_total_nsteps) ! if l_write is TRUE the restart will be forced ! if l_read the restart will be read ! as an example, for reading restart one does: call restart(0, .false., .false., .true., tracers, partit, mesh) - call restart(0, .false., r_restart, f%dynamics, f%tracers, f%partit, f%mesh) ! istep, l_write, l_read + call restart(0, .false., r_restart, f%tracers, f%partit, f%mesh) ! istep, l_write, l_read if (f%mype==0) f%t7=MPI_Wtime() ! store grid information into netcdf file if (.not. r_restart) call write_mesh_info(f%partit, f%mesh) @@ -242,19 +229,10 @@ subroutine fesom_init(fesom_total_nsteps) write (f%mype+300) f%tracers close (f%mype+300) - ! open (f%mype+300, file=trim(f%dump_filename), status='old', form="unformatted") - ! read (f%mype+300) f%dynamics_copy - ! close (f%mype+300) - - write (f%dump_filename, "(A9,I7.7)") "t_dynamics.", f%mype - open (f%mype+300, file=TRIM(f%dump_dir)//trim(f%dump_filename), status='replace', form="unformatted") - write (f%mype+300) f%dynamics - close (f%mype+300) - ! open (f%mype+300, file=trim(f%dump_filename), status='old', form="unformatted") ! read (f%mype+300) f%tracers_copy ! close (f%mype+300) - + !call par_ex(f%partit%MPI_COMM_FESOM, f%partit%mype) !stop ! @@ -314,15 +292,13 @@ subroutine fesom_runloop(current_nsteps) #endif call clock !___compute horizontal velocity on nodes (originaly on elements)________ - if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call compute_vel_nodes'//achar(27)//'[0m' - call compute_vel_nodes(f%dynamics, f%partit, f%mesh) - + call compute_vel_nodes(f%partit, f%mesh) !___model sea-ice step__________________________________________________ f%t1 = MPI_Wtime() if(use_ice) then !___compute fluxes from ocean to ice________________________________ if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call ocean2ice(n)'//achar(27)//'[0m' - call ocean2ice(f%dynamics, f%tracers, f%partit, f%mesh) + call ocean2ice(f%tracers, f%partit, f%mesh) !___compute update of atmospheric forcing____________________________ if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call update_atm_forcing(n)'//achar(27)//'[0m' @@ -341,28 +317,28 @@ subroutine fesom_runloop(current_nsteps) if (ice_update) call ice_timestep(n, f%partit, f%mesh) !___compute fluxes to the ocean: heat, freshwater, momentum_________ if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call oce_fluxes_mom...'//achar(27)//'[0m' - call oce_fluxes_mom(f%dynamics, f%partit, f%mesh) ! momentum only + call oce_fluxes_mom(f%partit, f%mesh) ! momentum only call oce_fluxes(f%tracers, f%partit, f%mesh) end if - call before_oce_step(f%dynamics, f%tracers, f%partit, f%mesh) ! prepare the things if required + call before_oce_step(f%tracers, f%partit, f%mesh) ! prepare the things if required f%t2 = MPI_Wtime() - !___model ocean step____________________________________________________ if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call oce_timestep_ale'//achar(27)//'[0m' - call oce_timestep_ale(n, f%dynamics, f%tracers, f%partit, f%mesh) + + call oce_timestep_ale(n, f%tracers, f%partit, f%mesh) f%t3 = MPI_Wtime() !___compute energy diagnostics..._______________________________________ if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call compute_diagnostics(1)'//achar(27)//'[0m' - call compute_diagnostics(1, f%dynamics, f%tracers, f%partit, f%mesh) + call compute_diagnostics(1, f%tracers, f%partit, f%mesh) f%t4 = MPI_Wtime() !___prepare output______________________________________________________ if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call output (n)'//achar(27)//'[0m' - call output (n, f%dynamics, f%tracers, f%partit, f%mesh) + call output (n, f%tracers, f%partit, f%mesh) f%t5 = MPI_Wtime() - call restart(n, .false., .false., f%dynamics, f%tracers, f%partit, f%mesh) + call restart(n, .false., .false., f%tracers, f%partit, f%mesh) f%t6 = MPI_Wtime() f%rtime_fullice = f%rtime_fullice + f%t2 - f%t1 diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index 2793532d7..a48953ed2 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -7,7 +7,6 @@ !=============================================================================! program main - use fvom_module integer nsteps diff --git a/src/gen_modules_backscatter.F90 b/src/gen_modules_backscatter.F90 deleted file mode 100644 index f602c39c0..000000000 --- a/src/gen_modules_backscatter.F90 +++ /dev/null @@ -1,411 +0,0 @@ -module g_backscatter - - !___________________________________________________________________________ - USE MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_DYN - - !___________________________________________________________________________ - USE o_ARRAYS, only: bvfreq, coriolis_node - - !___________________________________________________________________________ - USE o_param - USE g_CONFIG - USE g_comm_auto - USE g_support - USE g_rotate_grid - IMPLICIT NONE - - !___________________________________________________________________________ - ! allocate backscatter arrays - real(kind=WP), allocatable, dimension(:,:) :: v_back - real(kind=WP), allocatable, dimension(:,:) :: uke, uke_back, uke_dis, uke_dif - real(kind=WP), allocatable, dimension(:,:) :: uke_rhs, uke_rhs_old - real(kind=WP), allocatable, dimension(:,:) :: UV_dis_posdef_b2, UV_dis_posdef, UV_back_posdef - real(kind=WP), allocatable, dimension(:,:,:):: UV_back, UV_dis - real(kind=WP), allocatable, dimension(:,:,:):: UV_dis_tend, UV_total_tend, UV_back_tend - - contains - ! - ! - !___________________________________________________________________________ - ! allocate/initialise backscatter arrays - subroutine init_backscatter(partit, mesh) - implicit none - integer :: elem_size - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" - - elem_size = myDim_elem2D + eDim_elem2D - allocate(v_back( nl-1, elem_size)) ! Backscatter viscosity - allocate(uke( nl-1, elem_size)) ! Unresolved kinetic energy for backscatter coefficient - allocate(uke_dis( nl-1, elem_size)) - allocate(uke_back( nl-1, elem_size)) - allocate(uke_dif( nl-1, elem_size)) - allocate(uke_rhs( nl-1, elem_size)) - allocate(uke_rhs_old( nl-1, elem_size)) - allocate(UV_dis( 2, nl-1, elem_size)) - allocate(UV_back( 2, nl-1, elem_size)) - allocate(UV_dis_tend( 2, nl-1, elem_size)) - allocate(UV_back_tend( 2, nl-1, elem_size)) - allocate(UV_total_tend(2, nl-1, elem_size)) - uke = 0.0_WP - v_back = 0.0_WP - uke_dis = 0.0_WP - uke_dif = 0.0_WP - uke_back = 0.0_WP - uke_rhs = 0.0_WP - uke_rhs_old = 0.0_WP - UV_dis = 0.0_WP - UV_dis_tend = 0.0_WP - UV_back = 0.0_WP - UV_back_tend = 0.0_WP - UV_total_tend = 0.0_WP - - end subroutine init_backscatter - - ! - ! - !_______________________________________________________________________________ - subroutine visc_filt_dbcksc(dynamics, partit, mesh) - IMPLICIT NONE - - real(kind=WP) :: u1, v1, le(2), len, crosslen, vi, uke1 - integer :: nz, ed, el(2) - real(kind=WP) , allocatable :: uke_d(:,:) - !!PS real(kind=WP) , allocatable :: UV_back(:,:,:), UV_dis(:,:,:) - real(kind=WP) , allocatable :: uuu(:) - type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - real(kind=WP) , dimension(:,:,:), pointer :: UV, UV_rhs - real(kind=WP) , dimension(:,:) , pointer :: U_c, V_c -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" - - UV => dynamics%uv(:,:,:) - UV_rhs => dynamics%uv_rhs(:,:,:) - U_c => dynamics%work%u_c(:,:) - V_c => dynamics%work%v_c(:,:) - - ! An analog of harmonic viscosity operator. - ! It adds to the rhs(0) Visc*(u1+u2+u3-3*u0)/area - ! on triangles, which is Visc*Laplacian/4 on equilateral triangles. - ! The contribution from boundary edges is neglected (free slip). - ! Filter is applied twice. - ed=myDim_elem2D+eDim_elem2D - !!PS allocate(UV_back(2,nl-1,ed), UV_dis(2,nl-1, ed)) - allocate(uke_d(nl-1,ed)) - allocate(uuu(ed)) - UV_back= 0.0_WP - UV_dis = 0.0_WP - uke_d = 0.0_WP - U_c = 0.0_WP - V_c = 0.0_WP - - DO ed=1, myDim_edge2D+eDim_edge2D - if(myList_edge2D(ed)>edge2D_in) cycle - el=edge_tri(:,ed) - DO nz=1,minval(nlevels(el))-1 - u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) - v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) - - U_c(nz,el(1))=U_c(nz,el(1))-u1 - U_c(nz,el(2))=U_c(nz,el(2))+u1 - V_c(nz,el(1))=V_c(nz,el(1))-v1 - V_c(nz,el(2))=V_c(nz,el(2))+v1 - END DO - END DO - - Do ed=1,myDim_elem2D - len=sqrt(elem_area(ed)) - len=dt*len/30.0_WP - Do nz=1,nlevels(ed)-1 - ! vi has the sense of harmonic viscosity coefficient because of - ! the division by area in the end - ! ==== - ! Case 1 -- an analog to the third-order upwind (vi=|u|l/12) - ! ==== - vi=max(0.2_WP,sqrt(UV(1,nz,ed)**2+UV(2,nz,ed)**2))*len - U_c(nz,ed)=-U_c(nz,ed)*vi - V_c(nz,ed)=-V_c(nz,ed)*vi - END DO - end do - call exchange_elem(U_c, partit) - call exchange_elem(V_c, partit) - - DO ed=1, myDim_edge2D+eDim_edge2D - if(myList_edge2D(ed)>edge2D_in) cycle - el=edge_tri(:,ed) - le=edge_dxdy(:,ed) - le(1)=le(1)*sum(elem_cos(el))*0.25_WP - len=sqrt(le(1)**2+le(2)**2)*r_earth - le(1)=edge_cross_dxdy(1,ed)-edge_cross_dxdy(3,ed) - le(2)=edge_cross_dxdy(2,ed)-edge_cross_dxdy(4,ed) - crosslen=sqrt(le(1)**2+le(2)**2) - - DO nz=1,minval(nlevels(el))-1 - vi=dt*len*(v_back(nz,el(1))+v_back(nz,el(2)))/crosslen - !if(mype==0) write(*,*) 'vi ', vi , ' and ed' , ed - !if(mype==0) write(*,*) 'dt*len/crosslen ', dt*len/crosslen, ' and ed' , ed - !vi=max(vi,0.005*len*dt) ! This helps to reduce noise in places where - ! Visc is small and decoupling might happen - !Backscatter contribution - u1=(UV(1,nz,el(1))-UV(1,nz,el(2)))*vi - v1=(UV(2,nz,el(1))-UV(2,nz,el(2)))*vi - - !UKE diffusion - vi=dt*len*(K_back*sqrt(elem_area(el(1))/scale_area)+K_back*sqrt(elem_area(el(2))/scale_area))/crosslen - uke1=(uke(nz,el(1))-uke(nz,el(2)))*vi - - UV_back(1,nz,el(1))=UV_back(1,nz,el(1))-u1/elem_area(el(1)) - UV_back(1,nz,el(2))=UV_back(1,nz,el(2))+u1/elem_area(el(2)) - UV_back(2,nz,el(1))=UV_back(2,nz,el(1))-v1/elem_area(el(1)) - UV_back(2,nz,el(2))=UV_back(2,nz,el(2))+v1/elem_area(el(2)) - - !Correct scaling for the diffusion? - uke_d(nz,el(1))=uke_d(nz,el(1))-uke1/elem_area(el(1)) - uke_d(nz,el(2))=uke_d(nz,el(2))+uke1/elem_area(el(2)) - - !Biharmonic contribution - u1=(U_c(nz,el(1))-U_c(nz,el(2))) - v1=(V_c(nz,el(1))-V_c(nz,el(2))) - - UV_dis(1,nz,el(1))=UV_dis(1,nz,el(1))-u1/elem_area(el(1)) - UV_dis(1,nz,el(2))=UV_dis(1,nz,el(2))+u1/elem_area(el(2)) - UV_dis(2,nz,el(1))=UV_dis(2,nz,el(1))-v1/elem_area(el(1)) - UV_dis(2,nz,el(2))=UV_dis(2,nz,el(2))+v1/elem_area(el(2)) - - END DO - END DO - call exchange_elem(UV_back, partit) - - DO nz=1, nl-1 - uuu=0.0_WP - uuu=UV_back(1,nz,:) - call smooth_elem(uuu,smooth_back_tend, partit, mesh) - UV_back(1,nz,:)=uuu - uuu=0.0_WP - uuu=UV_back(2,nz,:) - call smooth_elem(uuu,smooth_back_tend, partit, mesh) - UV_back(2,nz,:)=uuu - END DO - - DO ed=1, myDim_elem2D - DO nz=1,nlevels(ed)-1 - UV_rhs(1,nz,ed)=UV_rhs(1,nz,ed)+UV_dis(1,nz,ed)+UV_back(1,nz,ed) - UV_rhs(2,nz,ed)=UV_rhs(2,nz,ed)+UV_dis(2,nz,ed)+UV_back(2,nz,ed) - END DO - END DO - - UV_dis_tend=UV_dis!+UV_back - UV_total_tend=UV_dis+UV_back - UV_back_tend=UV_back - uke_dif=uke_d - - call uke_update(dynamics, partit, mesh) - - !!PS deallocate(UV_dis,UV_back) - deallocate(uke_d) - deallocate(uuu) - end subroutine visc_filt_dbcksc - - ! - ! - !_______________________________________________________________________________ - subroutine backscatter_coef(partit, mesh) - IMPLICIT NONE - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - integer :: elem, nz -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" - - !Potentially add the Rossby number scaling to the script... - !check if sign is right! Different in the Jansen paper - !Also check with the normalization by area; as before we use element length sqrt(2*elem_area(ed)) - v_back=0.0_WP - DO elem=1, myDim_elem2D - DO nz=1,nlevels(elem)-1 - !v_back(1,ed)=c_back*sqrt(2.0_WP*elem_area(ed))*sqrt(max(2.0_WP*uke(1,ed),0.0_WP))*(3600.0_WP*24.0_WP/tau_c)*4.0_WP/sqrt(2.0_WP*elem_area(ed))**2 !*sqrt(max(2.0_WP*uke(1,ed),0.0_WP)) - !v_back(nz,elem)=-c_back*sqrt(4._8/sqrt(3.0_8)*elem_area(elem))*sqrt(max(2.0_8*uke(nz,elem),0.0_8)) !Is the scaling correct - v_back(nz,elem)=min(-c_back*sqrt(elem_area(elem))*sqrt(max(2.0_8*uke(nz,elem),0.0_8)),0.2*elem_area(elem)/dt) !Is the scaling correct - !Scaling by sqrt(2*elem_area) or sqrt(elem_area)? - END DO - END DO - call exchange_elem(v_back, partit) - end subroutine backscatter_coef - ! - ! - !_______________________________________________________________________________ - subroutine uke_update(dynamics, partit, mesh) - IMPLICIT NONE - - !I had to change uke(:) to uke(:,:) to make output and restart work!! - !Why is it necessary to implement the length of the array? It doesn't work without! - !integer, intent(in) :: t_levels - type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - - real(kind=WP) :: hall, h1_eta, hnz, vol - integer :: elnodes(3), nz, ed, edi, node, j, elem, q - real(kind=WP), allocatable :: uuu(:), work_array(:), U_work(:,:), V_work(:,:), rosb_array(:,:), work_uv(:) - integer :: kk, nzmax, el - real(kind=WP) :: c1, rosb, vel_u, vel_v, vel_uv, scaling, reso - real*8 :: c_min=0.5, f_min=1.e-6, r_max=200000., ex, ey, a1, a2, len_reg, dist_reg(2) ! Are those values still correct? - real(kind=WP), dimension(:,:,:), pointer :: UV -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) - - !rosb_dis=1._8 !Should be variable to control how much of the dissipated energy is backscattered - !rossby_num=2 - - ed=myDim_elem2D+eDim_elem2D - allocate(uuu(ed)) - - uke_back=0.0_WP - uke_dis=0.0_WP - DO ed=1, myDim_elem2D - DO nz=1, nlevels(ed)-1 - uke_dis(nz,ed) =(UV(1,nz,ed)*UV_dis_tend( 1,nz,ed)+UV(2,nz,ed)*UV_dis_tend( 2,nz,ed)) - uke_back(nz,ed)=(UV(1,nz,ed)*UV_back_tend(1,nz,ed)+UV(2,nz,ed)*UV_back_tend(2,nz,ed)) - END DO - END DO - - DO nz=1,nl-1 - uuu=0.0_8 - uuu=uke_back(nz,:) - call smooth_elem(uuu,smooth_back, partit, mesh) !3) ? - uke_back(nz,:)=uuu - END DO - - !Timestepping use simple backward timestepping; all components should have dt in it, unless they need it twice - !Amplitudes should be right given the correction of the viscosities; check for all, also for biharmonic - !uke(1,ed)=uke(1,ed)-uke_dis(1,ed)-uke_back(1,ed)+uke_dif(1,ed) - ed=myDim_elem2D+eDim_elem2D - allocate(U_work(nl-1,myDim_nod2D+eDim_nod2D),V_work(nl-1,myDim_nod2D+eDim_nod2D)) - allocate(work_uv(myDim_nod2D+eDim_nod2D)) - allocate(rosb_array(nl-1,ed)) - call exchange_elem(UV, partit) - rosb_array=0._WP - DO nz=1, nl-1 - work_uv=0._WP - DO node=1, myDim_nod2D - vol=0._WP - U_work(nz,node)=0._WP - V_work(nz,node)=0._WP - DO j=1, nod_in_elem2D_num(node) - elem=nod_in_elem2D(j, node) - U_work(nz,node)=U_work(nz,node)+UV(1,nz,elem)*elem_area(elem) - V_work(nz,node)=V_work(nz,node)+UV(2,nz,elem)*elem_area(elem) - vol=vol+elem_area(elem) - END DO - U_work(nz,node)=U_work(nz,node)/vol - V_work(nz,node)=U_work(nz,node)/vol - END DO - work_uv=U_work(nz,:) - call exchange_nod(work_uv, partit) - U_work(nz,:)=work_uv - work_uv=V_work(nz,:) - call exchange_nod(work_uv, partit) - V_work(nz,:)=work_uv - END DO - - DO el=1,myDim_elem2D - DO nz=1, nlevels(el)-1 - rosb_array(nz,el)=sqrt((sum(gradient_sca(1:3,el)*U_work(nz,elem2D_nodes(1:3,el)))-& - sum(gradient_sca(4:6, el)*V_work(nz,elem2D_nodes(1:3,el))))**2+& - (sum(gradient_sca(4:6, el)*U_work(nz,elem2D_nodes(1:3,el)))+& - sum(gradient_sca(1:3, el)*V_work(nz,elem2D_nodes(1:3,el))))**2) - ! hall=hall+hnz - END DO - ! rosb_array(el)=rosb_array(el)/hall - END DO - - DO ed=1, myDim_elem2D - scaling=1._WP - IF(uke_scaling) then - reso=sqrt(elem_area(ed)*4._wp/sqrt(3._wp)) - rosb=0._wp - elnodes=elem2D_nodes(:, ed) - DO kk=1,3 - c1=0._wp - nzmax=minval(nlevels(nod_in_elem2D(1:nod_in_elem2D_num(elnodes(kk)), elnodes(kk))), 1) - !Vertical average; same scaling in the vertical - DO nz=1, nzmax-1 - c1=c1+hnode_new(nz,elnodes(kk))*(sqrt(max(bvfreq(nz,elnodes(kk)), 0._WP))+sqrt(max(bvfreq(nz+1,elnodes(kk)), 0._WP)))/2. - END DO - c1=max(c_min, c1/pi) !ca. first baroclinic gravity wave speed limited from below by c_min - !Cutoff K_GM depending on (Resolution/Rossby radius) ratio - rosb=rosb+min(c1/max(abs(coriolis_node(elnodes(kk))), f_min), r_max) - END DO - rosb=rosb/3._WP - scaling=1._WP/(1._WP+(uke_scaling_factor*reso/rosb))!(4._wp*reso/rosb)) - END IF - - DO nz=1, nlevels(ed)-1 - elnodes=elem2D_nodes(:,ed) - - !Taking out that one place where it is always weird (Pacific Southern Ocean) - !Should not really be used later on, once we fix the issue with the 1/4 degree grid - if(.not. (TRIM(which_toy)=="soufflet")) then - call elem_center(ed, ex, ey) - !a1=-104.*rad - !a2=-49.*rad - call g2r(-104.*rad, -49.*rad, a1, a2) - dist_reg(1)=ex-a1 - dist_reg(2)=ey-a2 - call trim_cyclic(dist_reg(1)) - dist_reg(1)=dist_reg(1)*elem_cos(ed) - dist_reg=dist_reg*r_earth - len_reg=sqrt(dist_reg(1)**2+dist_reg(2)**2) - - !if(mype==0) write(*,*) 'len_reg ', len_reg , ' and dist_reg' , dist_reg, ' and ex, ey', ex, ey, ' and a ', a1, a2 - rosb_array(nz,ed)=rosb_array(nz,ed)/max(abs(sum(coriolis_node(elnodes(:)))), f_min) - !uke_dif(nz, ed)=scaling*(1-exp(-len_reg/300000))*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)!UV_dif(1,ed) - uke_dis(nz,ed)=scaling*(1-exp(-len_reg/300000))*1._WP/(1._WP+rosb_array(nz,ed)/rosb_dis)*uke_dis(nz,ed) - else - rosb_array(nz,ed)=rosb_array(nz,ed)/max(abs(sum(coriolis_node(elnodes(:)))), f_min) - !uke_dif(nz, ed)=scaling*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)!UV_dif(1,ed) - uke_dis(nz,ed)=scaling*1._WP/(1._WP+rosb_array(nz,ed)/rosb_dis)*uke_dis(nz,ed) - end if - END DO - END DO - - deallocate(U_work, V_work) - deallocate(rosb_array) - deallocate(work_uv) - - call exchange_elem(uke_dis, partit) - DO nz=1, nl-1 - uuu=uke_dis(nz,:) - call smooth_elem(uuu,smooth_dis, partit, mesh) - uke_dis(nz,:)=uuu - END DO - DO ed=1, myDim_elem2D - DO nz=1,nlevels(ed)-1 - uke_rhs_old(nz,ed)=uke_rhs(nz,ed) - uke_rhs(nz,ed)=-uke_dis(nz,ed)-uke_back(nz,ed)+uke_dif(nz,ed) - uke(nz,ed)=uke(nz,ed)+1.5_8*uke_rhs(nz,ed)-0.5_8*uke_rhs_old(nz,ed) - END DO - END DO - - call exchange_elem(uke, partit) - deallocate(uuu) - - end subroutine uke_update -end module g_backscatter - diff --git a/src/gen_modules_cvmix_kpp.F90 b/src/gen_modules_cvmix_kpp.F90 index 33c587016..81c35cfdd 100644 --- a/src/gen_modules_cvmix_kpp.F90 +++ b/src/gen_modules_cvmix_kpp.F90 @@ -26,7 +26,6 @@ module g_cvmix_kpp USE MOD_PARTIT USE MOD_PARSUP use mod_tracer - use MOD_DYN use o_arrays use g_comm_auto use i_arrays @@ -348,11 +347,10 @@ end subroutine init_cvmix_kpp ! !=========================================================================== ! calculate PP vertrical mixing coefficients from CVMIX library - subroutine calc_cvmix_kpp(dynamics, tracers, partit, mesh) + subroutine calc_cvmix_kpp(tracers, partit, mesh) type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in), target :: tracers - type(t_dyn) , intent(in), target :: dynamics integer :: node, elem, nz, nln, nun, elnodes(3), aux_nz real(kind=WP) :: vshear2, dz2, aux, aux_wm(mesh%nl), aux_ws(mesh%nl) real(kind=WP) :: aux_coeff, sigma, stable @@ -363,15 +361,12 @@ subroutine calc_cvmix_kpp(dynamics, tracers, partit, mesh) real(kind=WP) :: rhopot, bulk_0, bulk_pz, bulk_pz2 real(kind=WP) :: sfc_rhopot, sfc_bulk_0, sfc_bulk_pz, sfc_bulk_pz2 real(kind=WP), dimension(:,:), pointer :: temp, salt - real(kind=WP), dimension(:,:,:), pointer :: UVnode #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" temp=>tracers%data(1)%values(:,:) salt=>tracers%data(2)%values(:,:) - UVnode=>dynamics%uvnode(:,:,:) - !_______________________________________________________________________ kpp_Av = 0.0_WP kpp_Kv = 0.0_WP @@ -407,15 +402,15 @@ subroutine calc_cvmix_kpp(dynamics, tracers, partit, mesh) !___________________________________________________________ ! calculate squared velocity shear referenced to the surface ! --> cvmix wants to have it with respect to the midlevel rather than full levels - !!PS kpp_dvsurf2(nz) = ((UVnode(1,nz-1,node)+UVnode(1,nz,node))*0.5 - UVnode( 1,1,node) )**2 + & - !!PS ((UVnode(2,nz-1,node)+UVnode(2,nz,node))*0.5 - UVnode( 2,1,node) )**2 - kpp_dvsurf2(nz) = ((UVnode(1,nz-1,node)+UVnode(1,nz,node))*0.5 - UVnode( 1,nun,node) )**2 + & - ((UVnode(2,nz-1,node)+UVnode(2,nz,node))*0.5 - UVnode( 2,nun,node) )**2 + !!PS kpp_dvsurf2(nz) = ((Unode(1,nz-1,node)+Unode(1,nz,node))*0.5 - Unode( 1,1,node) )**2 + & + !!PS ((Unode(2,nz-1,node)+Unode(2,nz,node))*0.5 - Unode( 2,1,node) )**2 + kpp_dvsurf2(nz) = ((Unode(1,nz-1,node)+Unode(1,nz,node))*0.5 - Unode( 1,nun,node) )**2 + & + ((Unode(2,nz-1,node)+Unode(2,nz,node))*0.5 - Unode( 2,nun,node) )**2 !___________________________________________________________ ! calculate shear Richardson number Ri = N^2/(du/dz)^2 dz2 = (Z_3d_n( nz-1,node)-Z_3d_n( nz,node))**2 - vshear2 = (UVnode(1,nz-1,node)-UVnode(1,nz,node))**2 + & - (UVnode(2,nz-1,node)-UVnode(2,nz,node))**2 + vshear2 = (Unode(1,nz-1,node)-Unode(1,nz,node))**2 + & + (Unode(2,nz-1,node)-Unode(2,nz,node))**2 vshear2 = vshear2/dz2 kpp_shearRi(nz) = max(bvfreq(nz,node),0.0_WP)/(vshear2+kpp_epsln) @@ -462,8 +457,8 @@ subroutine calc_cvmix_kpp(dynamics, tracers, partit, mesh) htot = htot+delh sfc_temp = sfc_temp + temp(nztmp,node)*delh sfc_salt = sfc_salt + salt(nztmp,node)*delh - sfc_u = sfc_u + UVnode(1,nztmp,node) *delh - sfc_v = sfc_v + UVnode(2,nztmp,node) *delh + sfc_u = sfc_u + Unode(1,nztmp,node) *delh + sfc_v = sfc_v + Unode(2,nztmp,node) *delh end do sfc_temp = sfc_temp/htot sfc_salt = sfc_salt/htot @@ -473,8 +468,8 @@ subroutine calc_cvmix_kpp(dynamics, tracers, partit, mesh) !___________________________________________________________ ! calculate vertical shear between present layer and surface ! averaged sfc_u and sfc_v - kpp_dvsurf2(nz) = (UVnode(1,nz,node)-sfc_u)**2 + & - (UVnode(2,nz,node)-sfc_v)**2 + kpp_dvsurf2(nz) = (Unode(1,nz,node)-sfc_u)**2 + & + (Unode(2,nz,node)-sfc_v)**2 !___________________________________________________________ ! calculate buoyancy difference between the surface averaged @@ -497,8 +492,8 @@ subroutine calc_cvmix_kpp(dynamics, tracers, partit, mesh) ! calculate shear Richardson number Ri = N^2/(du/dz)^2 for ! mixing parameterisation below ocean boundary layer dz2 = (Z_3d_n( nz-1,node)-Z_3d_n( nz,node))**2 - vshear2 = (UVnode(1,nz-1,node)-UVnode(1,nz,node))**2 + & - (UVnode(2,nz-1,node)-UVnode(2,nz,node))**2 + vshear2 = (Unode(1,nz-1,node)-Unode(1,nz,node))**2 + & + (Unode(2,nz-1,node)-Unode(2,nz,node))**2 vshear2 = vshear2/dz2 kpp_shearRi(nz) = max(bvfreq(nz,node),0.0_WP)/(vshear2+kpp_epsln) end do ! --> do nz=1, nln diff --git a/src/gen_modules_cvmix_pp.F90 b/src/gen_modules_cvmix_pp.F90 index 58e9f2104..39dfa5673 100644 --- a/src/gen_modules_cvmix_pp.F90 +++ b/src/gen_modules_cvmix_pp.F90 @@ -27,7 +27,6 @@ module g_cvmix_pp use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN use o_arrays use g_comm_auto use i_arrays @@ -67,6 +66,7 @@ module g_cvmix_pp ! allocate and initialize CVMIX PP variables --> call initialisation ! routine from cvmix library subroutine init_cvmix_pp(partit, mesh) + use MOD_MESH implicit none type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit @@ -162,21 +162,17 @@ end subroutine init_cvmix_pp ! !=========================================================================== ! calculate PP vertrical mixing coefficients from CVMIX library - subroutine calc_cvmix_pp(dynamics, partit, mesh) + subroutine calc_cvmix_pp(partit, mesh) use MOD_MESH - implicit none type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_dyn), intent(inout), target :: dynamics integer :: node, elem, nz, nln, nun, elnodes(3), windnl=2, node_size real(kind=WP) :: vshear2, dz2, Kvb - real(kind=WP), dimension(:,:,:), pointer :: UVnode #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UVnode=>dynamics%uvnode(:,:,:) node_size = myDim_nod2D !_______________________________________________________________________ do node = 1,node_size @@ -190,8 +186,8 @@ subroutine calc_cvmix_pp(dynamics, partit, mesh) !!PS do nz=2,nln do nz=nun+1,nln dz2 = (Z_3d_n( nz-1,node)-Z_3d_n( nz,node))**2 - vshear2 = (UVnode(1,nz-1,node)-UVnode(1,nz,node))**2 +& - (UVnode(2,nz-1,node)-UVnode(2,nz,node))**2 + vshear2 = (Unode(1,nz-1,node)-Unode(1,nz,node))**2 +& + (Unode(2,nz-1,node)-Unode(2,nz,node))**2 vshear2 = vshear2/dz2 ! WIKIPEDIA: The Richardson number is always ! considered positive. A negative value of N² (i.e. complex N) diff --git a/src/gen_modules_cvmix_tke.F90 b/src/gen_modules_cvmix_tke.F90 index aa1deae21..c286cf5f4 100644 --- a/src/gen_modules_cvmix_tke.F90 +++ b/src/gen_modules_cvmix_tke.F90 @@ -28,7 +28,6 @@ module g_cvmix_tke use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN use o_arrays use g_comm_auto implicit none @@ -250,23 +249,20 @@ end subroutine init_cvmix_tke ! !=========================================================================== ! calculate TKE vertical mixing coefficients from CVMIX library - subroutine calc_cvmix_tke(dynamics, partit, mesh) + subroutine calc_cvmix_tke(partit, mesh) implicit none type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_dyn), intent(inout), target :: dynamics integer :: node, elem, nelem, nz, nln, nun, elnodes(3), node_size real(kind=WP) :: tvol real(kind=WP) :: dz_trr(mesh%nl), bvfreq2(mesh%nl), vshear2(mesh%nl) real(kind=WP) :: tke_Av_old(mesh%nl), tke_Kv_old(mesh%nl), tke_old(mesh%nl) - real(kind=WP), dimension(:,:,:), pointer :: UVnode - + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UVnode=>dynamics%uvnode(:,:,:) - + node_size = myDim_nod2D !_______________________________________________________________________ ! calculate all neccessary forcing for TKE @@ -301,8 +297,8 @@ subroutine calc_cvmix_tke(dynamics, partit, mesh) ! calculate for TKE 3D vertical velocity shear vshear2=0.0_WP do nz=nun+1,nln - vshear2(nz)=(( UVnode(1, nz-1, node) - UVnode(1, nz, node))**2 + & - ( UVnode(2, nz-1, node) - UVnode(2, nz, node))**2)/ & + vshear2(nz)=(( Unode(1, nz-1, node) - Unode(1, nz, node))**2 + & + ( Unode(2, nz-1, node) - Unode(2, nz, node))**2)/ & ((Z_3d_n(nz-1,node)-Z_3d_n(nz,node))**2) end do @@ -403,4 +399,4 @@ subroutine calc_cvmix_tke(dynamics, partit, mesh) end do end do end subroutine calc_cvmix_tke -end module g_cvmix_tke \ No newline at end of file +end module g_cvmix_tke diff --git a/src/gen_modules_diag.F90 b/src/gen_modules_diag.F90 index 036afca1d..231345f2d 100755 --- a/src/gen_modules_diag.F90 +++ b/src/gen_modules_diag.F90 @@ -4,8 +4,7 @@ module diagnostics use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - use MOD_TRACER - use MOD_DYN + use mod_tracer use g_clock use g_comm_auto use o_ARRAYS @@ -17,15 +16,11 @@ module diagnostics implicit none private - public :: ldiag_solver, lcurt_stress_surf, ldiag_energy, ldiag_dMOC, ldiag_DVD, & - ldiag_forc, ldiag_salt3D, ldiag_curl_vel3, diag_list, ldiag_vorticity, & - compute_diagnostics, rhs_diag, curl_stress_surf, curl_vel3, wrhof, rhof, & - u_x_u, u_x_v, v_x_v, v_x_w, u_x_w, dudx, dudy, dvdx, dvdy, dudz, dvdz, & - utau_surf, utau_bott, av_dudz_sq, av_dudz, av_dvdz, stress_bott, u_surf, & - v_surf, u_bott, v_bott, std_dens_min, std_dens_max, std_dens_N, std_dens, & - std_dens_UVDZ, std_dens_DIV, std_dens_Z, std_dens_dVdT, std_dens_flux, & - dens_flux_e, vorticity, compute_diag_dvd_2ndmoment_klingbeil_etal_2014, & - compute_diag_dvd_2ndmoment_burchard_etal_2008, compute_diag_dvd + public :: ldiag_solver, lcurt_stress_surf, ldiag_energy, ldiag_dMOC, ldiag_DVD, ldiag_forc, ldiag_salt3D, ldiag_curl_vel3, diag_list, & + compute_diagnostics, rhs_diag, curl_stress_surf, curl_vel3, wrhof, rhof, & + u_x_u, u_x_v, v_x_v, v_x_w, u_x_w, dudx, dudy, dvdx, dvdy, dudz, dvdz, utau_surf, utau_bott, av_dudz_sq, av_dudz, av_dvdz, stress_bott, u_surf, v_surf, u_bott, v_bott, & + std_dens_min, std_dens_max, std_dens_N, std_dens, std_dens_UVDZ, std_dens_DIV, std_dens_Z, std_dens_dVdT, std_dens_flux, dens_flux_e, & + compute_diag_dvd_2ndmoment_klingbeil_etal_2014, compute_diag_dvd_2ndmoment_burchard_etal_2008, compute_diag_dvd ! Arrays used for diagnostics, some shall be accessible to the I/O ! 1. solver diagnostics: A*x=rhs? ! A=ssh_stiff, x=d_eta, rhs=ssh_rhs; rhs_diag=A*x; @@ -37,7 +32,6 @@ module diagnostics real(kind=WP), save, allocatable, target :: dudx(:,:), dudy(:,:), dvdx(:,:), dvdy(:,:), dudz(:,:), dvdz(:,:), av_dudz(:,:), av_dvdz(:,:), av_dudz_sq(:,:) real(kind=WP), save, allocatable, target :: utau_surf(:), utau_bott(:) real(kind=WP), save, allocatable, target :: stress_bott(:,:), u_bott(:), v_bott(:), u_surf(:), v_surf(:) - real(kind=WP), save, allocatable, target :: vorticity(:,:) ! defining a set of standard density bins which will be used for computing densMOC ! integer, parameter :: std_dens_N = 100 @@ -74,29 +68,24 @@ module diagnostics logical :: ldiag_forc =.false. - logical :: ldiag_vorticity =.false. - namelist /diag_list/ ldiag_solver, lcurt_stress_surf, ldiag_curl_vel3, ldiag_energy, & - ldiag_dMOC, ldiag_DVD, ldiag_salt3D, ldiag_forc, ldiag_vorticity + ldiag_dMOC, ldiag_DVD, ldiag_salt3D, ldiag_forc contains ! ============================================================== !rhs_diag=ssh_rhs? -subroutine diag_solver(mode, dynamics, partit, mesh) +subroutine diag_solver(mode, partit, mesh) implicit none - type(t_mesh) , intent(in), target :: mesh + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_dyn) , intent(inout), target :: dynamics integer, intent(in) :: mode integer :: n, is, ie logical, save :: firstcall=.true. - real(kind=WP), dimension(:) , pointer :: d_eta #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - d_eta =>dynamics%d_eta(:) !===================== if (firstcall) then !allocate the stuff at the first call @@ -160,21 +149,19 @@ subroutine diag_curl_stress_surf(mode, partit, mesh) end subroutine diag_curl_stress_surf ! ============================================================== !3D curl(velocity) -subroutine diag_curl_vel3(mode, dynamics, partit, mesh) +subroutine diag_curl_vel3(mode, partit, mesh) implicit none - type(t_dyn) , intent(inout), target :: dynamics + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh integer, intent(in) :: mode logical, save :: firstcall=.true. integer :: enodes(2), el(2), ed, n, nz, nl1, nl2, nl12, nu1, nu2, nu12 real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2, c1 - real(kind=WP), dimension(:,:,:), pointer :: UV + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" -#include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) +#include "associate_mesh_ass.h" !===================== if (firstcall) then !allocate the stuff at the first call @@ -242,27 +229,21 @@ subroutine diag_curl_vel3(mode, dynamics, partit, mesh) end subroutine diag_curl_vel3 ! ============================================================== !energy budget -subroutine diag_energy(mode, dynamics, partit, mesh) +subroutine diag_energy(mode, partit, mesh) implicit none - type(t_dyn) , intent(inout), target :: dynamics + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh integer, intent(in) :: mode logical, save :: firstcall=.true. integer :: n, nz, k, i, elem, nzmax, nzmin, elnodes(3) integer :: iup, ilo real(kind=WP) :: ux, vx, uy, vy, tvol, rval(2) real(kind=WP) :: geo_grad_x(3), geo_grad_y(3), geo_u(3), geo_v(3) - real(kind=WP), dimension(:,:,:), pointer :: UV, UVnode - real(kind=WP), dimension(:,:), pointer :: Wvel + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" -#include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) - UVnode=> dynamics%uvnode(:,:,:) - Wvel => dynamics%w(:,:) - +#include "associate_mesh_ass.h" !===================== if (firstcall) then !allocate the stuff at the first call allocate(wrhof(nl, myDim_nod2D), rhof(nl, myDim_nod2D)) @@ -302,9 +283,9 @@ subroutine diag_energy(mode, dynamics, partit, mesh) if (mode==0) return end if - u_x_u=UVnode(1,1:nl-1,1:myDim_nod2D)*UVnode(1,1:nl-1,1:myDim_nod2D) - u_x_v=UVnode(1,1:nl-1,1:myDim_nod2D)*UVnode(2,1:nl-1,1:myDim_nod2D) - v_x_v=UVnode(2,1:nl-1,1:myDim_nod2D)*UVnode(2,1:nl-1,1:myDim_nod2D) + u_x_u=Unode(1,1:nl-1,1:myDim_nod2D)*Unode(1,1:nl-1,1:myDim_nod2D) + u_x_v=Unode(1,1:nl-1,1:myDim_nod2D)*Unode(2,1:nl-1,1:myDim_nod2D) + v_x_v=Unode(2,1:nl-1,1:myDim_nod2D)*Unode(2,1:nl-1,1:myDim_nod2D) ! this loop might be very expensive DO n=1, myDim_elem2D nzmax = nlevels(n) @@ -407,10 +388,10 @@ subroutine diag_energy(mode, dynamics, partit, mesh) if (nlevels(elem)-1 < nz) cycle elnodes=elem2D_nodes(:, elem) tvol=tvol+elem_area(elem) - ux=ux+sum(gradient_sca(1:3,elem)*UVnode(1,nz,elnodes))*elem_area(elem) !accumulate tensor of velocity derivatives - vx=vx+sum(gradient_sca(1:3,elem)*UVnode(2,nz,elnodes))*elem_area(elem) - uy=uy+sum(gradient_sca(4:6,elem)*UVnode(1,nz,elnodes))*elem_area(elem) - vy=vy+sum(gradient_sca(4:6,elem)*UVnode(2,nz,elnodes))*elem_area(elem) + ux=ux+sum(gradient_sca(1:3,elem)*Unode(1,nz,elnodes))*elem_area(elem) !accumulate tensor of velocity derivatives + vx=vx+sum(gradient_sca(1:3,elem)*Unode(2,nz,elnodes))*elem_area(elem) + uy=uy+sum(gradient_sca(4:6,elem)*Unode(1,nz,elnodes))*elem_area(elem) + vy=vy+sum(gradient_sca(4:6,elem)*Unode(2,nz,elnodes))*elem_area(elem) END DO dudx(nz,n)=ux/tvol!/area(nz, n)/3. dvdx(nz,n)=vx/tvol @@ -420,13 +401,12 @@ subroutine diag_energy(mode, dynamics, partit, mesh) END DO end subroutine diag_energy ! ============================================================== -subroutine diag_densMOC(mode, dynamics, tracers, partit, mesh) +subroutine diag_densMOC(mode, tracers, partit, mesh) implicit none integer, intent(in) :: mode - type(t_mesh) , intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in) , target :: tracers - type(t_dyn) , intent(in) , target :: dynamics + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers integer :: nz, snz, elem, nzmax, nzmin, elnodes(3), is, ie, pos integer :: e, edge, enodes(2), eelems(2) real(kind=WP) :: div, deltaX, deltaY, locz @@ -437,15 +417,13 @@ subroutine diag_densMOC(mode, dynamics, tracers, partit, mesh) real(kind=WP), save, allocatable :: std_dens_w(:,:), std_dens_VOL1(:,:), std_dens_VOL2(:,:) logical, save :: firstcall_s=.true., firstcall_e=.true. real(kind=WP), dimension(:,:), pointer :: temp, salt - real(kind=WP), dimension(:,:,:), pointer :: UV, fer_UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" -#include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) - temp => tracers%data(1)%values(:,:) - salt => tracers%data(2)%values(:,:) - fer_UV => dynamics%fer_uv(:,:,:) +#include "associate_mesh_ass.h" + + temp=>tracers%data(1)%values(:,:) + salt=>tracers%data(2)%values(:,:) if (firstcall_s) then !allocate the stuff at the first call allocate(std_dens_UVDZ(2,std_dens_N, myDim_elem2D)) @@ -677,127 +655,22 @@ subroutine diag_densMOC(mode, dynamics, tracers, partit, mesh) std_dens_VOL1=std_dens_VOL2 firstcall_e=.false. end subroutine diag_densMOC -! -! -!_______________________________________________________________________________ -subroutine relative_vorticity(mode, dynamics, partit, mesh) - IMPLICIT NONE - integer :: n, nz, el(2), enodes(2), nl1, nl2, edge, ul1, ul2, nl12, ul12 - real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2, c1 - integer, intent(in) :: mode - logical, save :: firstcall=.true. - type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - real(kind=WP), dimension(:,:,:), pointer :: UV -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) - - !___________________________________________________________________________ - if (firstcall) then !allocate the stuff at the first call - allocate(vorticity(nl-1, myDim_nod2D+eDim_nod2D)) - firstcall=.false. - if (mode==0) return - end if - !!PS DO n=1,myDim_nod2D - !!PS nl1 = nlevels_nod2D(n)-1 - !!PS ul1 = ulevels_nod2D(n) - !!PS vorticity(ul1:nl1,n)=0.0_WP - !!PS !!PS DO nz=1, nlevels_nod2D(n)-1 - !!PS !!PS vorticity(nz,n)=0.0_WP - !!PS !!PS END DO - !!PS END DO - vorticity = 0.0_WP - DO edge=1,myDim_edge2D - !! edge=myList_edge2D(m) - enodes=edges(:,edge) - el=edge_tri(:,edge) - nl1=nlevels(el(1))-1 - ul1=ulevels(el(1)) - deltaX1=edge_cross_dxdy(1,edge) - deltaY1=edge_cross_dxdy(2,edge) - nl2=0 - ul2=0 - if(el(2)>0) then - deltaX2=edge_cross_dxdy(3,edge) - deltaY2=edge_cross_dxdy(4,edge) - nl2=nlevels(el(2))-1 - ul2=ulevels(el(2)) - end if - nl12 = min(nl1,nl2) - ul12 = max(ul1,ul2) - - DO nz=ul1,ul12-1 - c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1)) - vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 - vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 - END DO - if (ul2>0) then - DO nz=ul2,ul12-1 - c1= -deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) - vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 - vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 - END DO - endif - !!PS DO nz=1,min(nl1,nl2) - DO nz=ul12,nl12 - c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1))- & - deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) - vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 - vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 - END DO - !!PS DO nz=min(nl1,nl2)+1,nl1 - DO nz=nl12+1,nl1 - c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1)) - vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 - vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 - END DO - !!PS DO nz=min(nl1,nl2)+1,nl2 - DO nz=nl12+1,nl2 - c1= -deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) - vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 - vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 - END DO - END DO - - ! vorticity = vorticity*area at this stage - ! It is correct only on myDim nodes - DO n=1,myDim_nod2D - !! n=myList_nod2D(m) - ul1 = ulevels_nod2D(n) - nl1 = nlevels_nod2D(n) - !!PS DO nz=1,nlevels_nod2D(n)-1 - DO nz=ul1,nl1-1 - vorticity(nz,n)=vorticity(nz,n)/areasvol(nz,n) - END DO - END DO - - call exchange_nod(vorticity, partit) - -! Now it the relative vorticity known on neighbors too -end subroutine relative_vorticity - - - ! ============================================================== -subroutine compute_diagnostics(mode, dynamics, tracers, partit, mesh) + +subroutine compute_diagnostics(mode, tracers, partit, mesh) implicit none - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(inout), target :: tracers - type(t_dyn) , intent(inout), target :: dynamics + type(t_tracer), intent(in), target :: tracers integer, intent(in) :: mode !constructor mode (0=only allocation; any other=do diagnostic) real(kind=WP) :: val !1. solver diagnostic - if (ldiag_solver) call diag_solver(mode, dynamics, partit, mesh) + if (ldiag_solver) call diag_solver(mode, partit, mesh) !2. compute curl(stress_surf) if (lcurt_stress_surf) call diag_curl_stress_surf(mode, partit, mesh) !3. compute curl(velocity) - if (ldiag_curl_vel3) call diag_curl_vel3(mode, dynamics, partit, mesh) + if (ldiag_curl_vel3) call diag_curl_vel3(mode, partit, mesh) !4. compute energy budget - if (ldiag_energy) call diag_energy(mode, dynamics, partit, mesh) + if (ldiag_energy) call diag_energy(mode, partit, mesh) !5. print integrated temperature if (ldiag_salt3d) then if (mod(mstep,logfile_outfreq)==0) then @@ -808,10 +681,7 @@ subroutine compute_diagnostics(mode, dynamics, tracers, partit, mesh) end if end if !6. MOC in density coordinate - if (ldiag_dMOC) call diag_densMOC(mode, dynamics, tracers, partit, mesh) - - ! compute relative vorticity - if (ldiag_vorticity) call relative_vorticity(mode, dynamics, partit, mesh) + if (ldiag_dMOC) call diag_densMOC(mode, tracers, partit, mesh) end subroutine compute_diagnostics diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index a0aa70ceb..30dedc505 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -1,16 +1,13 @@ module ocean2ice_interface interface - subroutine ocean2ice(dynamics, tracers, partit, mesh) + subroutine ocean2ice(tracers, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP use mod_tracer - use MOD_DYN - type(t_dyn) , intent(in) , target :: dynamics - type(t_tracer), intent(in) , target :: tracers type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(inout), target :: tracers end subroutine end interface end module @@ -23,8 +20,8 @@ subroutine oce_fluxes(tracers, partit, mesh) USE MOD_PARSUP use mod_tracer type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - type(t_tracer), intent(in) , target :: tracers + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(inout), target :: tracers end subroutine end interface end module @@ -32,7 +29,7 @@ subroutine oce_fluxes(tracers, partit, mesh) ! ! !_______________________________________________________________________________ -subroutine oce_fluxes_mom(dynamics, partit, mesh) +subroutine oce_fluxes_mom(partit, mesh) ! transmits the relevant fields from the ice to the ocean model ! use o_PARAM @@ -40,7 +37,6 @@ subroutine oce_fluxes_mom(dynamics, partit, mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN use i_ARRAYS use i_PARAM USE g_CONFIG @@ -54,9 +50,8 @@ subroutine oce_fluxes_mom(dynamics, partit, mesh) integer :: n, elem, elnodes(3),n1 real(kind=WP) :: aux, aux1 - type(t_dyn) , intent(in) , target :: dynamics type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -110,43 +105,39 @@ subroutine oce_fluxes_mom(dynamics, partit, mesh) END DO !___________________________________________________________________________ - if (use_cavity) call cavity_momentum_fluxes(dynamics, partit, mesh) + if (use_cavity) call cavity_momentum_fluxes(partit, mesh) end subroutine oce_fluxes_mom ! ! !_______________________________________________________________________________ -subroutine ocean2ice(dynamics, tracers, partit, mesh) +subroutine ocean2ice(tracers, partit, mesh) ! transmits the relevant fields from the ocean to the ice model use o_PARAM + use o_ARRAYS use i_ARRAYS use MOD_MESH - use MOD_DYN use MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP USE g_CONFIG use g_comm_auto implicit none - type(t_dyn) , intent(in) , target :: dynamics - type(t_tracer), intent(in) , target :: tracers + type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - - + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers integer :: n, elem, k real(kind=WP) :: uw, vw, vol - real(kind=WP), dimension(:,:) , pointer :: temp, salt - real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:), pointer :: temp, salt #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - temp => tracers%data(1)%values(:,:) - salt => tracers%data(2)%values(:,:) - UV => dynamics%uv(:,:,:) + temp=>tracers%data(1)%values(:,:) + salt=>tracers%data(2)%values(:,:) ! the arrays in the ice model are renamed diff --git a/src/io_blowup.F90 b/src/io_blowup.F90 index 52b83d251..a4bbae11d 100644 --- a/src/io_blowup.F90 +++ b/src/io_blowup.F90 @@ -2,11 +2,10 @@ MODULE io_BLOWUP use g_config use g_clock use g_comm_auto - USE MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_TRACER - USE MOD_DYN + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_TRACER use o_arrays use i_arrays implicit none @@ -65,13 +64,12 @@ MODULE io_BLOWUP !_______________________________________________________________________________ ! ini_ocean_io initializes bid datatype which contains information of all variables need to be written into ! the ocean restart file. This is the only place need to be modified if a new variable is added! - subroutine ini_blowup_io(year, dynamics, tracers, partit, mesh) + subroutine ini_blowup_io(year, tracers, partit, mesh) implicit none integer, intent(in) :: year - type(t_mesh) , intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in) , target :: tracers - type(t_dyn) , intent(in) , target :: dynamics + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers integer :: ncid, j integer :: varid character(500) :: longname @@ -100,22 +98,22 @@ subroutine ini_blowup_io(year, dynamics, tracers, partit, mesh) !=========================================================================== !___Define the netCDF variables for 2D fields_______________________________ !___SSH_____________________________________________________________________ - call def_variable(bid, 'eta_n' , (/nod2D/) , 'sea surface elevation', 'm', dynamics%eta_n); - call def_variable(bid, 'd_eta' , (/nod2D/) , 'change in ssh from solver', 'm', dynamics%d_eta); + call def_variable(bid, 'eta_n' , (/nod2D/) , 'sea surface elevation', 'm', eta_n); + call def_variable(bid, 'd_eta' , (/nod2D/) , 'change in ssh from solver', 'm', d_eta); !___ALE related fields______________________________________________________ call def_variable(bid, 'hbar' , (/nod2D/) , 'ALE surface elevation hbar_n+0.5', 'm', hbar); !!PS call def_variable(bid, 'hbar_old' , (/nod2D/) , 'ALE surface elevation hbar_n-0.5', 'm', hbar_old); - call def_variable(bid, 'ssh_rhs' , (/nod2D/) , 'RHS for the elevation', '?', dynamics%ssh_rhs); - call def_variable(bid, 'ssh_rhs_old', (/nod2D/) , 'RHS for the elevation', '?', dynamics%ssh_rhs_old); + call def_variable(bid, 'ssh_rhs' , (/nod2D/) , 'RHS for the elevation', '?', ssh_rhs); + call def_variable(bid, 'ssh_rhs_old', (/nod2D/) , 'RHS for the elevation', '?', ssh_rhs_old); !___Define the netCDF variables for 3D fields_______________________________ call def_variable(bid, 'hnode' , (/nl-1, nod2D/) , 'ALE stuff', '?', hnode); call def_variable(bid, 'helem' , (/nl-1, elem2D/) , 'Element layer thickness', 'm/s', helem(:,:)); - call def_variable(bid, 'u' , (/nl-1, elem2D/) , 'zonal velocity', 'm/s', dynamics%uv(1,:,:)); - call def_variable(bid, 'v' , (/nl-1, elem2D/) , 'meridional velocity', 'm/s', dynamics%uv(2,:,:)); - call def_variable(bid, 'u_rhs' , (/nl-1, elem2D/) , 'zonal velocity', 'm/s', dynamics%uv_rhs(1,:,:)); - call def_variable(bid, 'v_rhs' , (/nl-1, elem2D/) , 'meridional velocity', 'm/s', dynamics%uv_rhs(2,:,:)); - call def_variable(bid, 'urhs_AB' , (/nl-1, elem2D/) , 'Adams–Bashforth for u', 'm/s', dynamics%uv_rhsAB(1,:,:)); - call def_variable(bid, 'vrhs_AB' , (/nl-1, elem2D/) , 'Adams–Bashforth for v', 'm/s', dynamics%uv_rhsAB(2,:,:)); + call def_variable(bid, 'u' , (/nl-1, elem2D/) , 'zonal velocity', 'm/s', UV(1,:,:)); + call def_variable(bid, 'v' , (/nl-1, elem2D/) , 'meridional velocity', 'm/s', UV(2,:,:)); + call def_variable(bid, 'u_rhs' , (/nl-1, elem2D/) , 'zonal velocity', 'm/s', UV_rhs(1,:,:)); + call def_variable(bid, 'v_rhs' , (/nl-1, elem2D/) , 'meridional velocity', 'm/s', UV_rhs(2,:,:)); + call def_variable(bid, 'urhs_AB' , (/nl-1, elem2D/) , 'Adams–Bashforth for u', 'm/s', UV_rhsAB(1,:,:)); + call def_variable(bid, 'vrhs_AB' , (/nl-1, elem2D/) , 'Adams–Bashforth for v', 'm/s', UV_rhsAB(2,:,:)); call def_variable(bid, 'zbar_n_bot' , (/nod2D/) , 'node bottom depth', 'm', zbar_n_bot); call def_variable(bid, 'zbar_e_bot' , (/elem2d/) , 'elem bottom depth', 'm', zbar_e_bot); call def_variable(bid, 'bottom_node_thickness' , (/nod2D/) , 'node bottom thickness', 'm', bottom_node_thickness); @@ -143,10 +141,10 @@ subroutine ini_blowup_io(year, dynamics, tracers, partit, mesh) !!PS longname=trim(longname)//', Adams–Bashforth' !!PS call def_variable(bid, trim(trname)//'_AB',(/nl-1, nod2D/), trim(longname), trim(units), tracers%data(j)%valuesAB(:,:)(:,:)); end do - call def_variable(bid, 'w' , (/nl, nod2D/) , 'vertical velocity', 'm/s', dynamics%w); - call def_variable(bid, 'w_expl' , (/nl, nod2D/) , 'vertical velocity', 'm/s', dynamics%w_e); - call def_variable(bid, 'w_impl' , (/nl, nod2D/) , 'vertical velocity', 'm/s', dynamics%w_i); - call def_variable(bid, 'cfl_z' , (/nl-1, nod2D/) , 'vertical CFL criteria', '', dynamics%cfl_z); + call def_variable(bid, 'w' , (/nl, nod2D/) , 'vertical velocity', 'm/s', Wvel); + call def_variable(bid, 'w_expl' , (/nl, nod2D/) , 'vertical velocity', 'm/s', Wvel_e); + call def_variable(bid, 'w_impl' , (/nl, nod2D/) , 'vertical velocity', 'm/s', Wvel_i); + call def_variable(bid, 'cfl_z' , (/nl-1, nod2D/) , 'vertical CFL criteria', '', CFL_z); !_____________________________________________________________________________ ! write snapshot ice variables to blowup file @@ -175,16 +173,15 @@ end subroutine ini_blowup_io ! ! !_______________________________________________________________________________ - subroutine blowup(istep, dynamics, tracers, partit, mesh) + subroutine blowup(istep, tracers, partit, mesh) implicit none - type(t_mesh) , intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in) , target :: tracers - type(t_dyn) , intent(in) , target :: dynamics + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers integer :: istep ctime=timeold+(dayold-1.)*86400 - call ini_blowup_io(yearnew, dynamics, tracers, partit, mesh) + call ini_blowup_io(yearnew, tracers, partit, mesh) if(partit%mype==0) write(*,*)'Do output (netCDF, blowup) ...' if(partit%mype==0) write(*,*)' --> call assoc_ids(bid)' call assoc_ids(bid, partit) ; call was_error(bid, partit) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index f32b52e43..75395e0a6 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -93,12 +93,11 @@ subroutine destructor(this) end subroutine -subroutine ini_mean_io(dynamics, tracers, partit, mesh) +subroutine ini_mean_io(tracers, partit, mesh) use MOD_MESH use MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN use g_cvmix_tke use g_cvmix_idemix use g_cvmix_kpp @@ -112,10 +111,9 @@ subroutine ini_mean_io(dynamics, tracers, partit, mesh) integer,dimension(15) :: sel_forcvar=0 character(len=10) :: id_string - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in) , target :: tracers - type(t_dyn) , intent(in) , target :: dynamics + type(t_tracer), intent(in), target :: tracers namelist /nml_listsize/ io_listsize namelist /nml_list / io_list @@ -155,14 +153,14 @@ subroutine ini_mean_io(dynamics, tracers, partit, mesh) CASE ('sss ') call def_stream(nod2D, myDim_nod2D, 'sss', 'sea surface salinity', 'psu', tracers%data(2)%values(1,1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('ssh ') - call def_stream(nod2D, myDim_nod2D, 'ssh', 'sea surface elevation', 'm', dynamics%eta_n, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'ssh', 'sea surface elevation', 'm', eta_n, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('vve_5 ') - call def_stream(nod2D, myDim_nod2D, 'vve_5', 'vertical velocity at 5th level', 'm/s', dynamics%w(5,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'vve_5', 'vertical velocity at 5th level', 'm/s', Wvel(5,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('ssh_rhs ') - call def_stream(nod2D, myDim_nod2D, 'ssh_rhs', 'ssh rhs', '?', dynamics%ssh_rhs, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'ssh_rhs', 'ssh rhs', '?', ssh_rhs, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('ssh_rhs_old ') - call def_stream(nod2D, myDim_nod2D, 'ssh_rhs_old', 'ssh rhs', '?', dynamics%ssh_rhs_old, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'ssh_rhs_old', 'ssh rhs', '?', ssh_rhs_old, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) !___________________________________________________________________________________________________________________________________ ! output sea ice @@ -317,50 +315,50 @@ subroutine ini_mean_io(dynamics, tracers, partit, mesh) CASE ('Kv ') call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'Kv', 'vertical diffusivity Kv', 'm2/s', Kv(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('u ') - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u', 'horizontal velocity','m/s', dynamics%uv(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u', 'horizontal velocity','m/s', uv(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('v ') - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v', 'meridional velocity','m/s', dynamics%uv(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v', 'meridional velocity','m/s', uv(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('w ') - call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'w', 'vertical velocity', 'm/s', dynamics%w(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'w', 'vertical velocity', 'm/s', Wvel(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('Av ') call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'Av', 'vertical viscosity Av', 'm2/s', Av(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('u_dis_tend') - if(dynamics%opt_visc==8) then + if(visc_option==8) then call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u_dis_tend', 'horizontal velocity viscosity tendency', 'm/s', UV_dis_tend(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('v_dis_tend') - if(dynamics%opt_visc==8) then + if(visc_option==8) then call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v_dis_tend', 'meridional velocity viscosity tendency', 'm/s', UV_dis_tend(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('u_back_tend') - if(dynamics%opt_visc==8) then + if(visc_option==8) then call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u_back_tend', 'horizontal velocity backscatter tendency', 'm2/s2', UV_back_tend(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('v_back_tend') - if(dynamics%opt_visc==8) then + if(visc_option==8) then call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v_back_tend', 'meridional velocity backscatter tendency', 'm2/s2', UV_back_tend(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('u_total_tend') - if(dynamics%opt_visc==8) then + if(visc_option==8) then call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u_total_tend', 'horizontal velocity total viscosity tendency', 'm/s', UV_total_tend(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('v_total_tend') - if(dynamics%opt_visc==8) then + if(visc_option==8) then call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v_total_tend', 'meridional velocity total viscosity tendency', 'm/s', UV_total_tend(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if !___________________________________________________________________________________________________________________________________ ! output Ferrari/GM parameterisation CASE ('bolus_u ') if (Fer_GM) then - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'bolus_u', 'GM bolus velocity U','m/s', dynamics%fer_uv(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'bolus_u', 'GM bolus velocity U','m/s', fer_uv(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('bolus_v ') if (Fer_GM) then - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'bolus_v', 'GM bolus velocity V','m/s', dynamics%fer_uv(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'bolus_v', 'GM bolus velocity V','m/s', fer_uv(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('bolus_w ') if (Fer_GM) then - call def_stream((/nl , nod2D /), (/nl, myDim_nod2D /), 'bolus_w', 'GM bolus velocity W','m/s', dynamics%fer_w(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream((/nl , nod2D /), (/nl, myDim_nod2D /), 'bolus_w', 'GM bolus velocity W','m/s', fer_Wvel(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('fer_K ') if (Fer_GM) then @@ -428,12 +426,12 @@ subroutine ini_mean_io(dynamics, tracers, partit, mesh) call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'av_dvdz', 'int(Av * dv/dz)', 'm3/s2', av_dvdz(:,:), 1, 'm', i_real4, partit, mesh) call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'av_dudz_sq', 'Av * (du/dz)^2', 'm^2/s^3', av_dudz_sq(:,:), 1, 'm', i_real4, partit, mesh) call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'Av', 'Vertical mixing A', 'm2/s', Av(:,:), 1, 'm', i_real4, partit, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'unod', 'horizontal velocity at nodes', 'm/s', dynamics%uvnode(1,:,:), 1, 'm', i_real8, partit, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'vnod', 'meridional velocity at nodes', 'm/s', dynamics%uvnode(2,:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'unod', 'horizontal velocity at nodes', 'm/s', Unode(1,:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'vnod', 'meridional velocity at nodes', 'm/s', Unode(2,:,:), 1, 'm', i_real8, partit, mesh) - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'um', 'horizontal velocity', 'm/s', dynamics%uv(1,:,:), 1, 'm', i_real4, partit, mesh) - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'vm', 'meridional velocity', 'm/s', dynamics%uv(2,:,:), 1, 'm', i_real4, partit, mesh) - call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'wm', 'vertical velocity', 'm/s', dynamics%w(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'um', 'horizontal velocity', 'm/s', uv(1,:,:), 1, 'm', i_real4, partit, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'vm', 'meridional velocity', 'm/s', uv(2,:,:), 1, 'm', i_real4, partit, mesh) + call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'wm', 'vertical velocity', 'm/s', Wvel(:,:), 1, 'm', i_real8, partit, mesh) call def_stream(elem2D, myDim_elem2D, 'utau_surf', '(u, tau) at the surface', 'N/(m s)', utau_surf(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) call def_stream(elem2D, myDim_elem2D, 'utau_bott', '(u, tau) at the bottom', 'N/(m s)', utau_bott(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) @@ -582,12 +580,11 @@ function mesh_dimname_from_dimsize(size, partit, mesh) result(name) ! !-------------------------------------------------------------------------------------------- ! -subroutine create_new_file(entry, dynamics, partit, mesh) +subroutine create_new_file(entry, partit, mesh) use g_clock use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN use fesom_version_info_module use g_config use i_PARAM @@ -597,7 +594,6 @@ subroutine create_new_file(entry, dynamics, partit, mesh) character(2000) :: att_text type(t_mesh) , intent(in) :: mesh type(t_partit), intent(in) :: partit - type(t_dyn) , intent(in) :: dynamics type(Meandata), intent(inout) :: entry character(len=*), parameter :: global_attributes_prefix = "FESOM_" @@ -641,8 +637,7 @@ subroutine create_new_file(entry, dynamics, partit, mesh) call assert_nf( nf_put_att_text(entry%ncid, entry%tID, 'axis', len_trim('T'), trim('T')), __LINE__) call assert_nf( nf_put_att_text(entry%ncid, entry%tID, 'stored_direction', len_trim('increasing'), trim('increasing')), __LINE__) - call assert_nf( nf_def_var(entry%ncid, trim(entry%name), entry%data_strategy%netcdf_type(), entry%ndim+1, & - (/entry%dimid(1:entry%ndim), entry%recID/), entry%varID), __LINE__) + call assert_nf( nf_def_var(entry%ncid, trim(entry%name), entry%data_strategy%netcdf_type(), entry%ndim+1, (/entry%dimid(entry%ndim:1:-1), entry%recID/), entry%varID), __LINE__) !CHUNKING stuff (netcdf libraries not always compited with it) !if (entry%ndim==2) then ! call assert_nf( nf_def_var_chunking(entry%ncid, entry%varID, NF_CHUNKED, (/1, entry%glsize(1)/)), __LINE__); @@ -667,15 +662,15 @@ subroutine create_new_file(entry, dynamics, partit, mesh) ! call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'tra_adv_lim', len_trim(tra_adv_lim), trim(tra_adv_lim)), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_partial_cell' , NF_INT, 1, use_partial_cell), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'force_rotation' , NF_INT, 1, force_rotation), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_partial_cell', NF_INT, 1, use_partial_cell), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'force_rotation', NF_INT, 1, force_rotation), __LINE__) call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'include_fleapyear', NF_INT, 1, include_fleapyear), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_floatice' , NF_INT, 1, use_floatice), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'whichEVP' , NF_INT, 1, whichEVP), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'evp_rheol_steps' , NF_INT, 1, evp_rheol_steps), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'opt_visc' , NF_INT, 1, dynamics%opt_visc), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_wsplit' , NF_INT, 1, dynamics%use_wsplit), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_partial_cell' , NF_INT, 1, use_partial_cell), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_floatice', NF_INT, 1, use_floatice), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'whichEVP', NF_INT, 1, whichEVP), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'evp_rheol_steps', NF_INT, 1, evp_rheol_steps), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'visc_option', NF_INT, 1, visc_option), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'w_split', NF_INT, 1, w_split), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_partial_cell', NF_INT, 1, use_partial_cell), __LINE__) @@ -752,7 +747,7 @@ subroutine write_mean(entry, entry_index) if (entry%ndim==1) then call assert_nf( nf_put_vara_double(entry%ncid, entry%varID, (/1, entry%rec_count/), (/size2, 1/), entry%aux_r8, 1), __LINE__) elseif (entry%ndim==2) then - call assert_nf( nf_put_vara_double(entry%ncid, entry%varID, (/lev, 1, entry%rec_count/), (/1, size2, 1/), entry%aux_r8, 1), __LINE__) + call assert_nf( nf_put_vara_double(entry%ncid, entry%varID, (/1, lev, entry%rec_count/), (/size2, 1, 1/), entry%aux_r8, 1), __LINE__) end if end if end do @@ -772,7 +767,7 @@ subroutine write_mean(entry, entry_index) if (entry%ndim==1) then call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/1, entry%rec_count/), (/size2, 1/), entry%aux_r4, 1), __LINE__) elseif (entry%ndim==2) then - call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/lev, 1, entry%rec_count/), (/1, size2, 1/), entry%aux_r4, 1), __LINE__) + call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/1, lev, entry%rec_count/), (/size2, 1, 1/), entry%aux_r4, 1), __LINE__) end if end if end do @@ -809,12 +804,11 @@ subroutine update_means ! !-------------------------------------------------------------------------------------------- ! -subroutine output(istep, dynamics, tracers, partit, mesh) +subroutine output(istep, tracers, partit, mesh) use g_clock use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - use MOD_DYN use mod_tracer use io_gather_module #if defined (__icepack) @@ -826,16 +820,15 @@ subroutine output(istep, dynamics, tracers, partit, mesh) integer :: n, k logical :: do_output type(Meandata), pointer :: entry - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in) , target :: tracers - type(t_dyn) , intent(in) , target :: dynamics + type(t_tracer), intent(in), target :: tracers character(:), allocatable :: filepath real(real64) :: rtime !timestamp of the record ctime=timeold+(dayold-1.)*86400 if (lfirst) then - call ini_mean_io(dynamics, tracers, partit, mesh) + call ini_mean_io(tracers, partit, mesh) #if defined (__icepack) call init_io_icepack(mesh) !icapack has its copy of p_partit => partit #endif @@ -883,7 +876,7 @@ subroutine output(istep, dynamics, tracers, partit, mesh) entry%filename = filepath ! use any existing file with this name or create a new one if( nf_open(entry%filename, nf_write, entry%ncid) /= nf_noerr ) then - call create_new_file(entry, dynamics, partit, mesh) + call create_new_file(entry, partit, mesh) call assert_nf( nf_open(entry%filename, nf_write, entry%ncid), __LINE__) end if call assoc_ids(entry) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index c5112b7f2..a9d2aac22 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -6,7 +6,6 @@ MODULE io_RESTART USE MOD_PARTIT USE MOD_PARSUP use mod_tracer - use MOD_DYN use o_arrays use i_arrays use g_cvmix_tke @@ -80,7 +79,7 @@ MODULE io_RESTART !-------------------------------------------------------------------------------------------- ! ini_ocean_io initializes oid datatype which contains information of all variables need to be written into ! the ocean restart file. This is the only place need to be modified if a new variable is added! -subroutine ini_ocean_io(year, dynamics, tracers, partit, mesh) +subroutine ini_ocean_io(year, tracers, partit, mesh) implicit none integer, intent(in) :: year @@ -90,10 +89,9 @@ subroutine ini_ocean_io(year, dynamics, tracers, partit, mesh) character(500) :: filename character(500) :: trname, units character(4) :: cyear - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in) , target :: tracers - type(t_dyn) , intent(in) , target :: dynamics + type(t_tracer), intent(in), target :: tracers #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -114,18 +112,18 @@ subroutine ini_ocean_io(year, dynamics, tracers, partit, mesh) !=========================================================================== !___Define the netCDF variables for 2D fields_______________________________ !___SSH_____________________________________________________________________ - call def_variable(oid, 'ssh', (/nod2D/), 'sea surface elevation', 'm', dynamics%eta_n); + call def_variable(oid, 'ssh', (/nod2D/), 'sea surface elevation', 'm', eta_n); !___ALE related fields______________________________________________________ call def_variable(oid, 'hbar', (/nod2D/), 'ALE surface elevation', 'm', hbar); !!PS call def_variable(oid, 'ssh_rhs', (/nod2D/), 'RHS for the elevation', '?', ssh_rhs); - call def_variable(oid, 'ssh_rhs_old', (/nod2D/), 'RHS for the elevation', '?', dynamics%ssh_rhs_old); + call def_variable(oid, 'ssh_rhs_old', (/nod2D/), 'RHS for the elevation', '?', ssh_rhs_old); call def_variable(oid, 'hnode', (/nl-1, nod2D/), 'nodal layer thickness', 'm', hnode); !___Define the netCDF variables for 3D fields_______________________________ - call def_variable(oid, 'u', (/nl-1, elem2D/), 'zonal velocity', 'm/s', dynamics%uv(1,:,:)); - call def_variable(oid, 'v', (/nl-1, elem2D/), 'meridional velocity', 'm/s', dynamics%uv(2,:,:)); - call def_variable(oid, 'urhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for u', 'm/s', dynamics%uv_rhsAB(1,:,:)); - call def_variable(oid, 'vrhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for v', 'm/s', dynamics%uv_rhsAB(2,:,:)); + call def_variable(oid, 'u', (/nl-1, elem2D/), 'zonal velocity', 'm/s', UV(1,:,:)); + call def_variable(oid, 'v', (/nl-1, elem2D/), 'meridional velocity', 'm/s', UV(2,:,:)); + call def_variable(oid, 'urhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for u', 'm/s', UV_rhsAB(1,:,:)); + call def_variable(oid, 'vrhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for v', 'm/s', UV_rhsAB(2,:,:)); !___Save restart variables for TKE and IDEMIX_________________________________ if (trim(mix_scheme)=='cvmix_TKE' .or. trim(mix_scheme)=='cvmix_TKE+IDEMIX') then @@ -134,7 +132,7 @@ subroutine ini_ocean_io(year, dynamics, tracers, partit, mesh) if (trim(mix_scheme)=='cvmix_IDEMIX' .or. trim(mix_scheme)=='cvmix_TKE+IDEMIX') then call def_variable(oid, 'iwe', (/nl, nod2d/), 'Internal Wave eneryy', 'm2/s2', tke(:,:)); endif - if (dynamics%opt_visc==8) then + if (visc_option==8) then call def_variable(oid, 'uke', (/nl-1, elem2D/), 'unresolved kinetic energy', 'm2/s2', uke(:,:)); call def_variable(oid, 'uke_rhs', (/nl-1, elem2D/), 'unresolved kinetic energy rhs', 'm2/s2', uke_rhs(:,:)); endif @@ -158,9 +156,9 @@ subroutine ini_ocean_io(year, dynamics, tracers, partit, mesh) longname=trim(longname)//', Adams–Bashforth' call def_variable(oid, trim(trname)//'_AB',(/nl-1, nod2D/), trim(longname), trim(units), tracers%data(j)%valuesAB(:,:)); end do - call def_variable(oid, 'w', (/nl, nod2D/), 'vertical velocity', 'm/s', dynamics%w); - call def_variable(oid, 'w_expl', (/nl, nod2D/), 'vertical velocity', 'm/s', dynamics%w_e); - call def_variable(oid, 'w_impl', (/nl, nod2D/), 'vertical velocity', 'm/s', dynamics%w_i); + call def_variable(oid, 'w', (/nl, nod2D/), 'vertical velocity', 'm/s', Wvel); + call def_variable(oid, 'w_expl', (/nl, nod2D/), 'vertical velocity', 'm/s', Wvel_e); + call def_variable(oid, 'w_impl', (/nl, nod2D/), 'vertical velocity', 'm/s', Wvel_i); end subroutine ini_ocean_io ! !-------------------------------------------------------------------------------------------- @@ -209,7 +207,7 @@ end subroutine ini_ice_io ! !-------------------------------------------------------------------------------------------- ! -subroutine restart(istep, l_write, l_read, dynamics, tracers, partit, mesh) +subroutine restart(istep, l_write, l_read, tracers, partit, mesh) #if defined(__icepack) use icedrv_main, only: init_restart_icepack @@ -224,19 +222,18 @@ subroutine restart(istep, l_write, l_read, dynamics, tracers, partit, mesh) logical :: l_write, l_read logical :: is_restart integer :: mpierr - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in) , target :: tracers - type(t_dyn) , intent(in) , target :: dynamics + type(t_tracer), intent(in), target :: tracers ctime=timeold+(dayold-1.)*86400 if (.not. l_read) then - call ini_ocean_io(yearnew, dynamics, tracers, partit, mesh) + call ini_ocean_io(yearnew, tracers, partit, mesh) if (use_ice) call ini_ice_io (yearnew, partit, mesh) #if defined(__icepack) if (use_ice) call init_restart_icepack(yearnew, mesh) !icapack has its copy of p_partit => partit #endif else - call ini_ocean_io(yearold, dynamics, tracers, partit, mesh) + call ini_ocean_io(yearold, tracers, partit, mesh) if (use_ice) call ini_ice_io (yearold, partit, mesh) #if defined(__icepack) if (use_ice) call init_restart_icepack(yearold, mesh) !icapack has its copy of p_partit => partit diff --git a/src/oce_adv_tra_driver.F90 b/src/oce_adv_tra_driver.F90 index acc15253c..b3a1bd8b1 100644 --- a/src/oce_adv_tra_driver.F90 +++ b/src/oce_adv_tra_driver.F90 @@ -1,17 +1,15 @@ module oce_adv_tra_driver_interfaces interface - subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, dynamics, tracers, partit, mesh) + subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, partit, mesh) use MOD_MESH use MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN real(kind=WP), intent(in), target :: dt integer, intent(in) :: tr_num type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh type(t_tracer), intent(inout), target :: tracers - type(t_dyn) , intent(inout), target :: dynamics real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) real(kind=WP), intent(in), target :: W(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(in), target :: WI(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) @@ -43,12 +41,11 @@ subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, partit, ! ! !=============================================================================== -subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, dynamics, tracers, partit, mesh) +subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, partit, mesh) use MOD_MESH use MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN use g_comm_auto use oce_adv_tra_hor_interfaces use oce_adv_tra_ver_interfaces @@ -57,10 +54,9 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, dynamics, tracers, partit, implicit none real(kind=WP), intent(in), target :: dt integer, intent(in) :: tr_num - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers - type(t_dyn) , intent(inout), target :: dynamics real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) real(kind=WP), intent(in), target :: W(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(in), target :: WI(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) @@ -167,10 +163,8 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, dynamics, tracers, partit, end do end do !$OMP END PARALLEL DO - - if (dynamics%use_wsplit) then !wvel/=wvel_e + if (w_split) then !wvel/=wvel_e ! update for implicit contribution (w_split option) - call adv_tra_vert_impl(dt, wi, fct_LO, partit, mesh) ! compute the low order upwind vertical flux (full vertical velocity) ! zero the input/output flux before computation diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 8652312f5..fb4ee6336 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -32,14 +32,12 @@ subroutine init_surface_node_depth(partit, mesh) type(t_partit), intent(inout), target :: partit end subroutine - subroutine impl_vert_visc_ale(dynamics, partit, mesh) + subroutine impl_vert_visc_ale(partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_dyn), intent(inout), target :: dynamics end subroutine subroutine update_stiff_mat_ale(partit, mesh) @@ -50,44 +48,36 @@ subroutine update_stiff_mat_ale(partit, mesh) type(t_partit), intent(inout), target :: partit end subroutine - subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) + subroutine compute_ssh_rhs_ale(partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - use MOD_DYN type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_dyn), intent(inout), target :: dynamics end subroutine - subroutine solve_ssh_ale(dynamics, partit, mesh) + subroutine solve_ssh_ale(partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_dyn), intent(inout), target :: dynamics end subroutine - subroutine compute_hbar_ale(dynamics, partit, mesh) + subroutine compute_hbar_ale(partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_dyn), intent(inout), target :: dynamics end subroutine - subroutine vert_vel_ale(dynamics, partit, mesh) + subroutine vert_vel_ale(partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_dyn), intent(inout), target :: dynamics end subroutine subroutine update_thickness_ale(partit, mesh) @@ -100,47 +90,17 @@ subroutine update_thickness_ale(partit, mesh) end interface end module -module init_ale_interface - interface - subroutine init_ale(dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use MOD_DYN - type(t_mesh) , intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_dyn) , intent(inout), target :: dynamics - end subroutine - end interface -end module - -module init_thickness_ale_interface - interface - subroutine init_thickness_ale(dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use MOD_DYN - type(t_mesh) , intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_dyn) , intent(inout), target :: dynamics - end subroutine - end interface -end module - module oce_timestep_ale_interface interface - subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) + subroutine oce_timestep_ale(n, tracers, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP use mod_tracer - use MOD_DYN integer, intent(in) :: n type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers - type(t_dyn), intent(inout), target :: dynamics end subroutine end interface end module @@ -166,12 +126,11 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) ! !=============================================================================== ! allocate & initialise arrays for Arbitrary-Langrangian-Eularian (ALE) method -subroutine init_ale(dynamics, partit, mesh) +subroutine init_ale(partit, mesh) USE o_PARAM USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN USE o_ARRAYS USE g_config, only: which_ale, use_cavity, use_partial_cell USE g_forcing_param, only: use_virt_salt @@ -181,7 +140,6 @@ subroutine init_ale(dynamics, partit, mesh) integer :: n, nzmax, nzmin, elnodes(3), elem type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_dyn) , intent(inout), target :: dynamics #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -192,8 +150,7 @@ subroutine init_ale(dynamics, partit, mesh) allocate(mesh%hnode_new(1:nl-1, myDim_nod2D+eDim_nod2D)) ! ssh_rhs_old: auxiliary array to store an intermediate part of the rhs computations. - allocate(dynamics%ssh_rhs_old(myDim_nod2D+eDim_nod2D)) - dynamics%ssh_rhs_old = 0.0_WP + allocate(ssh_rhs_old(myDim_nod2D+eDim_nod2D)) ! hbar, hbar_old: correspond to the elevation, but on semi-integer time steps. allocate(mesh%hbar(myDim_nod2D+eDim_nod2D)) @@ -680,7 +637,7 @@ end subroutine init_surface_node_depth ! !=============================================================================== ! initialize thickness arrays based on the current hbar -subroutine init_thickness_ale(dynamics, partit, mesh) +subroutine init_thickness_ale(partit, mesh) ! For z-star case: we stretch scalar thicknesses (nodal) ! through nlevels_nod2D_min -2 layers. Layer nlevels_nod2D_min-1 ! should not be touched if partial cell is implemented (it is). @@ -691,20 +648,17 @@ subroutine init_thickness_ale(dynamics, partit, mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN + use o_ARRAYS implicit none integer :: n, nz, elem, elnodes(3), nzmin, nzmax real(kind=WP) :: dd type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_dyn), intent(inout), target :: dynamics - real(kind=WP), dimension(:), pointer :: ssh_rhs_old, eta_n + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - ssh_rhs_old=>dynamics%ssh_rhs_old(:) - eta_n =>dynamics%eta_n(:) if(mype==0) then write(*,*) '____________________________________________________________' @@ -1645,14 +1599,13 @@ end subroutine update_stiff_mat_ale !"FESOM2: from finite elements to finite volumes" ! ! ssh_rhs = alpha * grad[ int_hbot^hbar(n+0.5)( u^n+deltau)dz + W(n+0.5) ] -subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) +subroutine compute_ssh_rhs_ale(partit, mesh) use g_config,only: which_ALE,dt use MOD_MESH - use o_ARRAYS, only: water_flux + use o_ARRAYS use o_PARAM USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN use g_comm_auto implicit none @@ -1664,17 +1617,12 @@ subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) real(kind=WP) :: dumc1_1, dumc1_2, dumc2_1, dumc2_2 !!PS type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_dyn), intent(inout), target :: dynamics - real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs - real(kind=WP), dimension(:), pointer :: ssh_rhs, ssh_rhs_old + + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV=>dynamics%uv(:,:,:) - UV_rhs=>dynamics%uv_rhs(:,:,:) - ssh_rhs=>dynamics%ssh_rhs(:) - ssh_rhs_old=>dynamics%ssh_rhs_old(:) ssh_rhs=0.0_WP !___________________________________________________________________________ @@ -1764,14 +1712,13 @@ end subroutine compute_ssh_rhs_ale ! hbar(n+0.5) = hbar(n-0.5) - tau*ssh_rhs_old ! ! in S. Danilov et al.: "FESOM2: from finite elements to finite volumes" -subroutine compute_hbar_ale(dynamics, partit, mesh) +subroutine compute_hbar_ale(partit, mesh) use g_config,only: dt, which_ALE, use_cavity use MOD_MESH - use o_ARRAYS, only: water_flux + use o_ARRAYS use o_PARAM USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN use g_comm_auto implicit none @@ -1785,17 +1732,11 @@ subroutine compute_hbar_ale(dynamics, partit, mesh) real(kind=WP) :: c1, c2, deltaX1, deltaX2, deltaY1, deltaY2 type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_dyn) , intent(inout), target :: dynamics - real(kind=WP), dimension(:,:,:), pointer :: UV - real(kind=WP), dimension(:), pointer :: ssh_rhs, ssh_rhs_old #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV=>dynamics%uv(:,:,:) - ssh_rhs=>dynamics%ssh_rhs(:) - ssh_rhs_old=>dynamics%ssh_rhs_old(:) !___________________________________________________________________________ ! compute the rhs @@ -1883,14 +1824,13 @@ end subroutine compute_hbar_ale ! > for zlevel: dh_k/dt_k=1 != 0 ! > for zstar : dh_k/dt_k=1...kbot-1 != 0 ! -subroutine vert_vel_ale(dynamics, partit, mesh) +subroutine vert_vel_ale(partit, mesh) use g_config,only: dt, which_ALE, min_hnode, lzstar_lev, flag_warn_cflz use MOD_MESH - use o_ARRAYS, only: water_flux + use o_ARRAYS use o_PARAM USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN use g_comm_auto use io_RESTART !!PS use i_arrays !!PS @@ -1905,30 +1845,14 @@ subroutine vert_vel_ale(dynamics, partit, mesh) real(kind=WP) :: dhbar_total, dhbar_rest, distrib_dhbar_int !PS real(kind=WP), dimension(:), allocatable :: max_dhbar2distr,cumsum_maxdhbar,distrib_dhbar integer , dimension(:), allocatable :: idx - type(t_dyn) , intent(inout), target :: dynamics type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit - real(kind=WP), dimension(:,:,:), pointer :: UV, fer_UV - real(kind=WP), dimension(:,:) , pointer :: Wvel, Wvel_e, Wvel_i, CFL_z, fer_Wvel - real(kind=WP), dimension(:) , pointer :: ssh_rhs, ssh_rhs_old - real(kind=WP), dimension(:) , pointer :: eta_n, d_eta + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV =>dynamics%uv(:,:,:) - Wvel =>dynamics%w(:,:) - Wvel_e=>dynamics%w_e(:,:) - Wvel_i=>dynamics%w_i(:,:) - CFL_z =>dynamics%cfl_z(:,:) - ssh_rhs =>dynamics%ssh_rhs(:) - ssh_rhs_old =>dynamics%ssh_rhs_old(:) - eta_n =>dynamics%eta_n(:) - d_eta =>dynamics%d_eta(:) - if (Fer_GM) then - fer_UV =>dynamics%fer_uv(:,:,:) - fer_Wvel =>dynamics%fer_w(:,:) - end if + !___________________________________________________________________________ ! Contributions from levels in divergence Wvel=0.0_WP @@ -2396,19 +2320,19 @@ subroutine vert_vel_ale(dynamics, partit, mesh) !___________________________________________________________________________ ! Split implicit vertical velocity onto implicit and explicit components using CFL criteria: - ! wsplit_maxcfl constrains the allowed explicit w according to the CFL at this place - ! wsplit_maxcfl=1 means w_exp is cut at at the maximum of allowed CFL - ! wsplit_maxcfl=0 means w_exp is zero (everything computed implicitly) - ! wsplit_maxcfl=inf menas w_impl is zero (everything computed explicitly) - ! a guess for optimal choice of wsplit_maxcfl would be 0.95 + ! w_max_cfl constrains the allowed explicit w according to the CFL at this place + ! w_max_cfl=1 means w_exp is cut at at the maximum of allowed CFL + ! w_max_cfl=0 means w_exp is zero (everything computed implicitly) + ! w_max_cfl=inf menas w_impl is zero (everything computed explicitly) + ! a guess for optimal choice of w_max_cfl would be 0.95 do n=1, myDim_nod2D+eDim_nod2D nzmin = ulevels_nod2D(n) nzmax = nlevels_nod2D(n) do nz=nzmin,nzmax c1=1.0_WP c2=0.0_WP - if (dynamics%use_wsplit .and. (CFL_z(nz, n) > dynamics%wsplit_maxcfl)) then - dd=max((CFL_z(nz, n)-dynamics%wsplit_maxcfl), 0.0_WP)/max(dynamics%wsplit_maxcfl, 1.e-12) + if (w_split .and. (CFL_z(nz, n) > w_max_cfl)) then + dd=max((CFL_z(nz, n)-w_max_cfl), 0.0_WP)/max(w_max_cfl, 1.e-12) c1=1.0_WP/(1.0_WP+dd) !explicit part =1. if dd=0. c2=dd /(1.0_WP+dd) !implicit part =1. if dd=inf end if @@ -2422,132 +2346,176 @@ end subroutine vert_vel_ale !=============================================================================== ! solve eq.18 in S. Danilov et al. : FESOM2: from finite elements to finite volumes. ! for (eta^(n+1)-eta^n) = d_eta -subroutine solve_ssh_ale(dynamics, partit, mesh) - use o_PARAM - use MOD_MESH - use o_ARRAYS - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_DYN - use g_comm_auto - use g_config, only: which_ale - use iso_c_binding, only: C_INT, C_DOUBLE - implicit none +subroutine solve_ssh_ale(partit, mesh) +use o_PARAM +use MOD_MESH +use o_ARRAYS +USE MOD_PARTIT +USE MOD_PARSUP +use g_comm_auto +use g_config, only: which_ale + ! + ! + !___USE PETSC SOLVER________________________________________________________ + ! this is not longer used but is still kept in the code +#ifdef PETSC +implicit none +#include "petscf.h" +integer :: myrows +integer :: Pmode +real(kind=WP) :: rinfo(20,20) +integer :: maxiter=2000 +integer :: restarts=15 +integer :: fillin=3 +integer :: lutype=2 +integer :: nrhs=1 +real(kind=WP) :: droptol=1.e-7 +real(kind=WP) :: soltol =1e-10 !1.e-10 +logical, save :: lfirst=.true. +real(kind=WP), allocatable :: arr_nod2D(:),arr_nod2D2(:,:),arr_nod2D3(:) +real(kind=WP) :: cssh1,cssh2,crhs +integer :: i +type(t_mesh), intent(inout), target :: mesh +type(t_partit), intent(inout), target :: partit + + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + +Pmode = PET_BLOCKP+PET_SOLVE + PET_BICGSTAB +PET_REPORT + PET_QUIET+ PET_RCM+PET_PCBJ +if (lfirst) then + Pmode = Pmode+PET_STRUCT+PET_PMVALS + PET_PCASM+PET_OVL_2 !+PET_PCBJ+PET_ILU + lfirst=.false. +end if +call PETSC_S(Pmode, 1, ssh_stiff%dim, ssh_stiff%nza, myrows, & + maxiter, & + restarts, & + fillin, & + droptol, & + soltol, & + part, ssh_stiff%rowptr, ssh_stiff%colind, ssh_stiff%values, & + ssh_rhs, d_eta, & + rinfo, MPI_COMM_FESOM, mesh) + ! + ! + !___USE PARMS SOLVER (recommended)__________________________________________ +#elif defined(PARMS) + + use iso_c_binding, only: C_INT, C_DOUBLE + implicit none #include "fparms.h" - logical, save :: lfirst=.true. - integer(kind=C_INT) :: n3, reuse, new_values - integer :: n - type(t_mesh) , intent(inout), target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_dyn) , intent(inout), target :: dynamics - real(kind=C_DOUBLE), pointer :: droptol, soltol - integer(kind=C_INT), pointer :: maxiter, restart, lutype, fillin, ident - - interface - subroutine psolver_init(ident, SOL, PCGLOB, PCLOC, lutype, & - fillin, droptol, maxiter, restart, soltol, & - part, rowptr, colind, values, reuse, MPI_COMM) bind(C) - use iso_c_binding, only: C_INT, C_DOUBLE - integer(kind=C_INT) :: ident, SOL, PCGLOB, PCLOC, lutype, & - fillin, maxiter, restart, & - part(*), rowptr(*), colind(*), reuse, MPI_COMM - real(kind=C_DOUBLE) :: droptol, soltol, values(*) - end subroutine psolver_init - end interface - interface - subroutine psolve(ident, ssh_rhs, values, d_eta, newvalues) bind(C) - use iso_c_binding, only: C_INT, C_DOUBLE - integer(kind=C_INT) :: ident, newvalues - real(kind=C_DOUBLE) :: values(*), ssh_rhs(*), d_eta(*) - end subroutine psolve - end interface +logical, save :: lfirst=.true. +integer(kind=C_INT) :: ident +integer(kind=C_INT) :: n3, reuse, new_values +integer(kind=C_INT) :: maxiter, restart, lutype, fillin +real(kind=C_DOUBLE) :: droptol, soltol +integer :: n +type(t_mesh), intent(inout), target :: mesh +type(t_partit), intent(inout), target :: partit + + +interface + subroutine psolver_init(ident, SOL, PCGLOB, PCLOC, lutype, & + fillin, droptol, maxiter, restart, soltol, & + part, rowptr, colind, values, reuse, MPI_COMM) bind(C) + use iso_c_binding, only: C_INT, C_DOUBLE + integer(kind=C_INT) :: ident, SOL, PCGLOB, PCLOC, lutype, & + fillin, maxiter, restart, & + part(*), rowptr(*), colind(*), reuse, MPI_COMM + real(kind=C_DOUBLE) :: droptol, soltol, values(*) + end subroutine psolver_init +end interface +interface + subroutine psolve(ident, ssh_rhs, values, d_eta, newvalues) bind(C) + + use iso_c_binding, only: C_INT, C_DOUBLE + integer(kind=C_INT) :: ident, newvalues + real(kind=C_DOUBLE) :: values(*), ssh_rhs(*), d_eta(*) + + end subroutine psolve +end interface #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - ident => dynamics%solverinfo%ident - maxiter => dynamics%solverinfo%maxiter - restart => dynamics%solverinfo%restart - lutype => dynamics%solverinfo%lutype - fillin => dynamics%solverinfo%fillin - droptol => dynamics%solverinfo%droptol - soltol => dynamics%solverinfo%soltol - - if (trim(which_ale)=='linfs') then - reuse=0 - new_values=0 - else - reuse=1 ! For varying coefficients, set reuse=1 - new_values=1 !PS 1 ! and new_values=1, as soon as the coefficients have changed - end if - ! reuse=0: matrix remains static - ! reuse=1: keeps a copy of the matrix structure to apply scaling of the matrix fast +ident=1 +maxiter=2000 +restart=15 +fillin=3 +lutype=2 +droptol=1.e-8 +soltol=1.e-10 - ! new_values=0: matrix coefficients unchanged (compared to the last call of psolve) - ! new_values=1: replaces the matrix values (keeps the structure and the preconditioner) - ! new_values=2: replaces the matrix values and recomputes the preconditioner (keeps the structure) +if (trim(which_ale)=='linfs') then + reuse=0 + new_values=0 +else + reuse=1 ! For varying coefficients, set reuse=1 + new_values=1 !PS 1 ! and new_values=1, as soon as the coefficients have changed +end if - ! new_values>0 requires reuse=1 in psolver_init! +! reuse=0: matrix remains static +! reuse=1: keeps a copy of the matrix structure to apply scaling of the matrix fast - ! - !___________________________________________________________________________ - if (lfirst) then - ! Set SOLCG for CG solver (symmetric, positiv definit matrices only, no precond available!!) - ! SOLBICGS for BiCGstab solver (arbitrary matrices) - ! SOLBICGS_RAS for BiCGstab solver (arbitrary matrices) with integrated RAS - the global - ! preconditioner setting is ignored! It saves a 4 vector copies per iteration - ! compared to SOLBICGS + PCRAS. - ! SOLPBICGS for pipelined BiCGstab solver (arbitrary matrices) - ! Should scale better than SOLBICGS, but be careful, it is still experimental. - ! SOLPBICGS_RAS is SOLPBICGS with integrated RAS (global preconditioner setting is ignored!) - ! for even better scalability, well, in the end, it does not matter much. - call psolver_init(ident, SOLBICGS_RAS, PCRAS, PCILUK, lutype, & - fillin, droptol, maxiter, restart, soltol, & - part-1, ssh_stiff%rowptr(:)-ssh_stiff%rowptr(1), & - ssh_stiff%colind-1, ssh_stiff%values, reuse, MPI_COMM_FESOM) - lfirst=.false. - end if - ! - !___________________________________________________________________________ - call psolve(ident, dynamics%ssh_rhs, ssh_stiff%values, dynamics%d_eta, new_values) +! new_values=0: matrix coefficients unchanged (compared to the last call of psolve) +! new_values=1: replaces the matrix values (keeps the structure and the preconditioner) +! new_values=2: replaces the matrix values and recomputes the preconditioner (keeps the structure) + +! new_values>0 requires reuse=1 in psolver_init! +if (lfirst) then + ! Set SOLCG for CG solver (symmetric, positiv definit matrices only, no precond available!!) + ! SOLBICGS for BiCGstab solver (arbitrary matrices) + ! SOLBICGS_RAS for BiCGstab solver (arbitrary matrices) with integrated RAS - the global + ! preconditioner setting is ignored! It saves a 4 vector copies per iteration + ! compared to SOLBICGS + PCRAS. + ! SOLPBICGS for pipelined BiCGstab solver (arbitrary matrices) + ! Should scale better than SOLBICGS, but be careful, it is still experimental. + ! SOLPBICGS_RAS is SOLPBICGS with integrated RAS (global preconditioner setting is ignored!) + ! for even better scalability, well, in the end, it does not matter much. + call psolver_init(ident, SOLBICGS_RAS, PCRAS, PCILUK, lutype, & + fillin, droptol, maxiter, restart, soltol, & + part-1, ssh_stiff%rowptr(:)-ssh_stiff%rowptr(1), & + ssh_stiff%colind-1, ssh_stiff%values, reuse, MPI_COMM_FESOM) + lfirst=.false. +end if + call psolve(ident, ssh_rhs, ssh_stiff%values, d_eta, new_values) + +#endif + ! ! !___________________________________________________________________________ - call exchange_nod(dynamics%d_eta, partit) !is this required after calling psolve ? +call exchange_nod(d_eta, partit) !is this required after calling psolve ? end subroutine solve_ssh_ale ! ! !=============================================================================== -subroutine impl_vert_visc_ale(dynamics, partit, mesh) +subroutine impl_vert_visc_ale(partit, mesh) USE MOD_MESH USE o_PARAM -USE o_ARRAYS, only: Av, stress_surf +USE o_ARRAYS USE MOD_PARTIT USE MOD_PARSUP -USE MOD_DYN USE g_CONFIG,only: dt IMPLICIT NONE type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit -type(t_dyn) , intent(inout), target :: dynamics real(kind=WP) :: a(mesh%nl-1), b(mesh%nl-1), c(mesh%nl-1), ur(mesh%nl-1), vr(mesh%nl-1) real(kind=WP) :: cp(mesh%nl-1), up(mesh%nl-1), vp(mesh%nl-1) integer :: nz, elem, nzmax, nzmin, elnodes(3) real(kind=WP) :: zinv, m, friction, wu, wd -real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs -real(kind=WP), dimension(:,:) , pointer :: Wvel_i + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" -UV =>dynamics%uv(:,:,:) -UV_rhs =>dynamics%uv_rhs(:,:,:) -Wvel_i =>dynamics%w_i(:,:) DO elem=1,myDim_elem2D elnodes=elem2D_nodes(:,elem) @@ -2706,11 +2674,10 @@ end subroutine impl_vert_visc_ale ! ! !=============================================================================== -subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) +subroutine oce_timestep_ale(n, tracers, partit, mesh) use g_config use MOD_MESH use MOD_TRACER - use MOD_DYN use o_ARRAYS use o_PARAM USE MOD_PARTIT @@ -2729,26 +2696,22 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) use pressure_bv_interface use pressure_force_4_linfs_interface use pressure_force_4_zxxxx_interface - use compute_vel_rhs_interface use solve_tracers_ale_interface use write_step_info_interface use check_blowup_interface - use fer_solve_interface IMPLICIT NONE type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers - type(t_dyn), intent(inout), target :: dynamics real(kind=8) :: t0,t1, t2, t30, t3, t4, t5, t6, t7, t8, t9, t10, loc, glo integer :: n, node - real(kind=WP), dimension(:), pointer :: eta_n + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - eta_n => dynamics%eta_n(:) - + t0=MPI_Wtime() ! water_flux = 0.0_WP @@ -2811,7 +2774,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) ! use FESOM2.0 tuned k-profile parameterization for vertical mixing if (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call oce_mixing_KPP'//achar(27)//'[0m' - call oce_mixing_KPP(Av, Kv_double, dynamics, tracers, partit, mesh) + call oce_mixing_KPP(Av, Kv_double, tracers, partit, mesh) Kv=Kv_double(:,:,1) call mo_convect(partit, mesh) @@ -2819,13 +2782,13 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) ! mixing else if(mix_scheme_nmb==2 .or. mix_scheme_nmb==27) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call oce_mixing_PP'//achar(27)//'[0m' - call oce_mixing_PP(dynamics, partit, mesh) + call oce_mixing_PP(partit, mesh) call mo_convect(partit, mesh) ! use CVMIX KPP (Large at al. 1994) else if(mix_scheme_nmb==3 .or. mix_scheme_nmb==37) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call calc_cvmix_kpp'//achar(27)//'[0m' - call calc_cvmix_kpp(dynamics, tracers, partit, mesh) + call calc_cvmix_kpp(tracers, partit, mesh) call mo_convect(partit, mesh) ! use CVMIX PP (Pacanowski and Philander 1981) parameterisation for mixing @@ -2833,7 +2796,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) ! N^2 and vertical horizontal velocity shear dui/dz else if(mix_scheme_nmb==4 .or. mix_scheme_nmb==47) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call calc_cvmix_pp'//achar(27)//'[0m' - call calc_cvmix_pp(dynamics, partit, mesh) + call calc_cvmix_pp(partit, mesh) call mo_convect(partit, mesh) ! use CVMIX TKE (turbulent kinetic energy closure) parameterisation for @@ -2842,7 +2805,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) ! Model for the diapycnal diffusivity induced by internal gravity waves" else if(mix_scheme_nmb==5 .or. mix_scheme_nmb==56) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call calc_cvmix_tke'//achar(27)//'[0m' - call calc_cvmix_tke(dynamics, partit, mesh) + call calc_cvmix_tke(partit, mesh) call mo_convect(partit, mesh) end if @@ -2862,15 +2825,25 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) !___________________________________________________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call compute_vel_rhs'//achar(27)//'[0m' - call compute_vel_rhs(dynamics, partit, mesh) + +!!PS if (any(UV_rhs/=UV_rhs)) write(*,*) n, mype,' --> found NaN UV_rhs before compute_vel_rhs' +!!PS if (any(UV/=UV)) write(*,*) n, mype,' --> found NaN UV before compute_vel_rhs' +!!PS if (any(ssh_rhs/=ssh_rhs)) write(*,*) n, mype,' --> found NaN ssh_rhs before compute_vel_rhs' +!!PS if (any(ssh_rhs_old/=ssh_rhs_old)) write(*,*) n, mype,' --> found NaN ssh_rhs_old before compute_vel_rhs' +!!PS if (any(abs(Wvel_e)>1.0e20)) write(*,*) n, mype,' --> found Inf Wvel_e before compute_vel_rhs' + + if(mom_adv/=3) then + call compute_vel_rhs(partit, mesh) + else + call compute_vel_rhs_vinv(partit, mesh) + end if !___________________________________________________________________________ - if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call viscosity_filter'//achar(27)//'[0m' - call viscosity_filter(dynamics%opt_visc, dynamics, partit, mesh) + call viscosity_filter(visc_option, partit, mesh) !___________________________________________________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call impl_vert_visc_ale'//achar(27)//'[0m' - if(dynamics%use_ivertvisc) call impl_vert_visc_ale(dynamics,partit, mesh) + if(i_vert_visc) call impl_vert_visc_ale(partit, mesh) t2=MPI_Wtime() !___________________________________________________________________________ @@ -2882,26 +2855,26 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call compute_ssh_rhs_ale'//achar(27)//'[0m' ! ssh_rhs=-alpha*\nabla\int(U_n+U_rhs)dz-(1-alpha)*... ! see "FESOM2: from finite elements to finte volumes, S. Danilov..." eq. (18) rhs - call compute_ssh_rhs_ale(dynamics, partit, mesh) + call compute_ssh_rhs_ale(partit, mesh) ! Take updated ssh matrix and solve --> new ssh! t30=MPI_Wtime() - call solve_ssh_ale(dynamics, partit, mesh) + call solve_ssh_ale(partit, mesh) - if ((toy_ocean) .AND. (TRIM(which_toy)=="soufflet")) call relax_zonal_vel(dynamics, partit, mesh) + if ((toy_ocean) .AND. (TRIM(which_toy)=="soufflet")) call relax_zonal_vel(partit, mesh) t3=MPI_Wtime() ! estimate new horizontal velocity u^(n+1) ! u^(n+1) = u* + [-g * tau * theta * grad(eta^(n+1)-eta^(n)) ] if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call update_vel'//achar(27)//'[0m' - call update_vel(dynamics, partit, mesh) + call update_vel(partit, mesh) ! --> eta_(n) --> eta_(n+1) = eta_(n) + deta = eta_(n) + (eta_(n+1) + eta_(n)) t4=MPI_Wtime() ! Update to hbar(n+3/2) and compute dhe to be used on the next step if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call compute_hbar_ale'//achar(27)//'[0m' - call compute_hbar_ale(dynamics, partit, mesh) + call compute_hbar_ale(partit, mesh) !___________________________________________________________________________ ! - Current dynamic elevation alpha*hbar(n+1/2)+(1-alpha)*hbar(n-1/2) @@ -2929,20 +2902,20 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) if (Fer_GM) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call fer_solve_Gamma'//achar(27)//'[0m' call fer_solve_Gamma(partit, mesh) - call fer_gamma2vel(dynamics, partit, mesh) + call fer_gamma2vel(partit, mesh) end if t6=MPI_Wtime() !___________________________________________________________________________ ! The main step of ALE procedure --> this is were the magic happens --> here ! is decided how change in hbar is distributed over the vertical layers if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call vert_vel_ale'//achar(27)//'[0m' - call vert_vel_ale(dynamics, partit, mesh) + call vert_vel_ale(partit, mesh) t7=MPI_Wtime() !___________________________________________________________________________ ! solve tracer equation if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call solve_tracers_ale'//achar(27)//'[0m' - call solve_tracers_ale(dynamics, tracers, partit, mesh) + call solve_tracers_ale(tracers, partit, mesh) t8=MPI_Wtime() !___________________________________________________________________________ @@ -2952,13 +2925,11 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) t9=MPI_Wtime() !___________________________________________________________________________ ! write out global fields for debugging - if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call write_step_info'//achar(27)//'[0m' - call write_step_info(n,logfile_outfreq, dynamics, tracers, partit, mesh) + call write_step_info(n,logfile_outfreq, tracers, partit, mesh) ! check model for blowup --> ! write_step_info and check_blowup require ! togeather around 2.5% of model runtime - if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call check_blowup'//achar(27)//'[0m' - call check_blowup(n, dynamics, tracers, partit, mesh) + call check_blowup(n, tracers, partit, mesh) t10=MPI_Wtime() !___________________________________________________________________________ @@ -2991,4 +2962,3 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) write(*,*) end if end subroutine oce_timestep_ale - diff --git a/src/oce_ale_mixing_kpp.F90 b/src/oce_ale_mixing_kpp.F90 index cc2a54890..5c62871e1 100755 --- a/src/oce_ale_mixing_kpp.F90 +++ b/src/oce_ale_mixing_kpp.F90 @@ -11,7 +11,6 @@ MODULE o_mixing_KPP_mod USE MOD_PARTIT USE MOD_PARSUP USE MOD_TRACER - USE MOD_DYN USE o_ARRAYS USE g_config USE i_arrays @@ -243,7 +242,7 @@ end subroutine oce_mixing_kpp_init ! diffK = diffusion coefficient (m^2/s) ! !--------------------------------------------------------------- - subroutine oce_mixing_KPP(viscAE, diffK, dynamics, tracers, partit, mesh) + subroutine oce_mixing_KPP(viscAE, diffK, tracers, partit, mesh) IMPLICIT NONE @@ -254,7 +253,6 @@ subroutine oce_mixing_KPP(viscAE, diffK, dynamics, tracers, partit, mesh) type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in), target :: tracers - type(t_dyn) , intent(in), target :: dynamics integer :: node, kn, elem, elnodes(3) integer :: nz, ns, j, q, lay, lay_mi, nzmin, nzmax real(KIND=WP) :: smftu, smftv, aux, vol @@ -267,12 +265,11 @@ subroutine oce_mixing_KPP(viscAE, diffK, dynamics, tracers, partit, mesh) real(KIND=WP), dimension(mesh%nl, partit%myDim_elem2D+partit%eDim_elem2D), intent(inout) :: viscAE!for momentum (elements) real(KIND=WP), dimension(mesh%nl, partit%myDim_nod2D +partit%eDim_nod2D) :: viscA !for momentum (nodes) real(KIND=WP), dimension(mesh%nl, partit%myDim_nod2D +partit%eDim_nod2D, tracers%num_tracers), intent(inout) :: diffK !for T and S - real(kind=WP), dimension(:,:,:), pointer :: UVnode + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UVnode=>dynamics%uvnode(:,:,:) ViscA=0.0_WP DO node=1, myDim_nod2D !+eDim_nod2D @@ -302,15 +299,15 @@ subroutine oce_mixing_KPP(viscAE, diffK, dynamics, tracers, partit, mesh) dbsfc(nzmin,node) = 0.0_WP ! Surface velocity - usurf = UVnode(1,nzmin,node) - vsurf = UVnode(2,nzmin,node) + usurf = Unode(1,nzmin,node) + vsurf = Unode(2,nzmin,node) !!PS DO nz=2, nlevels_nod2d(node)-1 DO nz=nzmin+1, nzmax-1 ! Squared velocity shear referenced to surface (@ Z) - u_loc = 0.5_WP * ( UVnode(1,nz-1,node) + UVnode(1,nz,node) ) - v_loc = 0.5_WP * ( UVnode(2,nz-1,node) + UVnode(2,nz,node) ) + u_loc = 0.5_WP * ( Unode(1,nz-1,node) + Unode(1,nz,node) ) + v_loc = 0.5_WP * ( Unode(2,nz-1,node) + Unode(2,nz,node) ) dVsq(nz,node) = ( usurf - u_loc )**2 + ( vsurf - v_loc )**2 @@ -350,7 +347,7 @@ subroutine oce_mixing_KPP(viscAE, diffK, dynamics, tracers, partit, mesh) ! compute interior mixing coefficients everywhere, due to constant ! internal wave activity, static instability, and local shear ! instability. - CALL ri_iwmix(viscA, diffK, dynamics, tracers, partit, mesh) + CALL ri_iwmix(viscA, diffK, tracers, partit, mesh) ! add double diffusion IF (double_diffusion) then CALL ddmix(diffK, tracers, partit, mesh) @@ -732,12 +729,11 @@ END SUBROUTINE wscale ! visc = viscosity coefficient (m**2/s) ! diff = diffusion coefficient (m**2/s) ! - subroutine ri_iwmix(viscA, diffK, dynamics, tracers, partit, mesh) + subroutine ri_iwmix(viscA, diffK, tracers, partit, mesh) IMPLICIT NONE type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in), target :: tracers - type(t_dyn), intent(in), target :: dynamics integer :: node, nz, mr, nzmin, nzmax real(KIND=WP) , parameter :: Riinfty = 0.8_WP ! local Richardson Number limit for shear instability (LMD 1994 uses 0.7) real(KIND=WP) :: ri_prev, tmp @@ -750,12 +746,11 @@ subroutine ri_iwmix(viscA, diffK, dynamics, tracers, partit, mesh) ! Put them under the namelist.oce logical :: smooth_richardson_number = .false. integer :: num_smoothings = 1 ! for vertical smoothing of Richardson number - real(kind=WP), dimension(:,:,:), pointer :: UVnode + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UVnode=>dynamics%uvnode(:,:,:) ! Compute Richardson number and store it as diffK to save memory DO node=1, myDim_nod2D! +eDim_nod2D @@ -764,8 +759,8 @@ subroutine ri_iwmix(viscA, diffK, dynamics, tracers, partit, mesh) !!PS DO nz=2,nlevels_nod2d(node)-1 DO nz=nzmin+1,nzmax-1 dz_inv = 1.0_WP / (Z_3d_n(nz-1,node)-Z_3d_n(nz,node)) ! > 0 - shear = ( UVnode(1, nz-1, node) - UVnode(1, nz, node) )**2 + & - ( UVnode(2, nz-1, node) - UVnode(2, nz, node) )**2 + shear = ( Unode(1, nz-1, node) - Unode(1, nz, node) )**2 + & + ( Unode(2, nz-1, node) - Unode(2, nz, node) )**2 shear = shear * dz_inv * dz_inv diffK(nz,node,1) = MAX( bvfreq(nz,node), 0.0_WP ) / (shear + epsln) ! To avoid NaNs at start END DO ! minimum Richardson number is 0 diff --git a/src/oce_ale_mixing_pp.F90 b/src/oce_ale_mixing_pp.F90 index 36cf7d519..b4c7958d2 100644 --- a/src/oce_ale_mixing_pp.F90 +++ b/src/oce_ale_mixing_pp.F90 @@ -1,5 +1,5 @@ !======================================================================= -subroutine oce_mixing_pp(dynamics, partit, mesh) +subroutine oce_mixing_pp(partit, mesh) ! Compute Richardson number dependent Av and Kv following ! Pacanowski and Philander, 1981 ! Av = Avmax * factor**2 + Av0, @@ -18,7 +18,6 @@ subroutine oce_mixing_pp(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP -USE MOD_DYN USE o_PARAM USE o_ARRAYS USE g_config @@ -27,17 +26,13 @@ subroutine oce_mixing_pp(dynamics, partit, mesh) type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit -type(t_dyn), intent(inout), target :: dynamics real(kind=WP) :: dz_inv, bv, shear, a, rho_up, rho_dn, t, s, Kv0_b integer :: node, nz, nzmax, nzmin, elem, elnodes(3), i -real(kind=WP), dimension(:,:,:), pointer :: UVnode + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" -UVnode=>dynamics%uvnode(:,:,:) - - !___________________________________________________________________________ do node=1, myDim_nod2D+eDim_nod2D nzmin = ulevels_nod2d(node) @@ -48,8 +43,8 @@ subroutine oce_mixing_pp(dynamics, partit, mesh) !!PS do nz=2,nlevels_nod2d(node)-1 do nz=nzmin+1,nzmax-1 dz_inv=1.0_WP/(Z_3d_n(nz-1,node)-Z_3d_n(nz,node)) - shear = (UVnode(1,nz-1,node)-UVnode(1,nz,node))**2 +& - (UVnode(2,nz-1,node)-UVnode(2,nz,node))**2 + shear = (Unode(1,nz-1,node)-Unode(1,nz,node))**2 +& + (Unode(2,nz-1,node)-Unode(2,nz,node))**2 shear = shear*dz_inv*dz_inv Kv(nz,node) = shear/(shear+5._WP*max(bvfreq(nz,node),0.0_WP)+1.0e-14) ! To avoid NaNs at start end do diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 748927421..a73719608 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -12,7 +12,6 @@ subroutine diff_part_hor_redi(tr_num, tracer, partit, mesh) end subroutine end interface end module - module diff_ver_part_expl_ale_interface interface subroutine diff_ver_part_expl_ale(tr_num, tracer, partit, mesh) @@ -27,7 +26,6 @@ subroutine diff_ver_part_expl_ale(tr_num, tracer, partit, mesh) end subroutine end interface end module - module diff_ver_part_redi_expl_interface interface subroutine diff_ver_part_redi_expl(tr_num, tracer, partit, mesh) @@ -42,41 +40,34 @@ subroutine diff_ver_part_redi_expl(tr_num, tracer, partit, mesh) end subroutine end interface end module - module diff_ver_part_impl_ale_interface interface - subroutine diff_ver_part_impl_ale(tr_num, dynamics, tracer, partit, mesh) + subroutine diff_ver_part_impl_ale(tr_num, tracer, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP use mod_tracer - use MOD_DYN integer, intent(in), target :: tr_num - type(t_dyn), intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracer type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine end interface end module - module diff_tracers_ale_interface interface - subroutine diff_tracers_ale(tr_num, dynamics, tracer, partit, mesh) + subroutine diff_tracers_ale(tr_num, tracer, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP use mod_tracer - use MOD_DYN integer, intent(in), target :: tr_num - type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracer - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine end interface end module - module bc_surface_interface interface function bc_surface(n, id, sval, partit) @@ -90,36 +81,30 @@ function bc_surface(n, id, sval, partit) end function end interface end module - module diff_part_bh_interface interface - subroutine diff_part_bh(tr_num, dynamics, tracer, partit, mesh) + subroutine diff_part_bh(tr_num, tracer, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP use mod_tracer - use MOD_DYN integer, intent(in), target :: tr_num - type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracer - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine end interface end module - module solve_tracers_ale_interface interface - subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) + subroutine solve_tracers_ale(tracers, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - use mod_tracer - use MOD_DYN + use mod_tracer type(t_tracer), intent(inout), target :: tracers - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_dyn) , intent(inout), target :: dynamics end subroutine end interface end module @@ -127,13 +112,13 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) ! !=============================================================================== ! Driving routine Here with ALE changes!!! -subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) +subroutine solve_tracers_ale(tracers, partit, mesh) use g_config use o_PARAM, only: SPP, Fer_GM + use o_arrays use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN use mod_tracer use g_comm_auto use o_tracers @@ -141,34 +126,21 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) use diff_tracers_ale_interface use oce_adv_tra_driver_interfaces implicit none - - type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracers - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit integer :: tr_num, node, elem, nzmax, nzmin - real(kind=WP), dimension(:,:,:), pointer :: UV, fer_UV - real(kind=WP), dimension(:,:) , pointer :: Wvel, Wvel_e, Wvel_i, fer_Wvel - real(kind=WP), dimension(:,:) , pointer :: del_ttf + real(kind=WP), pointer, dimension (:,:) :: del_ttf #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) - Wvel => dynamics%w(:,:) - Wvel_e => dynamics%w_e(:,:) - Wvel_i => dynamics%w_i(:,:) - if (Fer_GM) then - fer_UV => dynamics%fer_uv(:,:,:) - fer_Wvel => dynamics%fer_w(:,:) - end if del_ttf => tracers%work%del_ttf !___________________________________________________________________________ if (SPP) call cal_rejected_salt(partit, mesh) - if (SPP) call app_rejected_salt(tracers%data(2)%values, partit, mesh) - + if (SPP) call app_rejected_salt(tracers%data(2)%values, partit, mesh) !___________________________________________________________________________ ! update 3D velocities with the bolus velocities: ! 1. bolus velocities are computed according to GM implementation after R. Ferrari et al., 2010 @@ -186,7 +158,6 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) end do !$OMP END PARALLEL DO end if - !___________________________________________________________________________ ! loop over all tracers do tr_num=1, tracers%num_tracers @@ -194,12 +165,10 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) ! needed if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call init_tracers_AB'//achar(27)//'[0m' call init_tracers_AB(tr_num, tracers, partit, mesh) - ! advect tracers if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call adv_tracers_ale'//achar(27)//'[0m' ! it will update del_ttf with contributions from horizontal and vertical advection parts (del_ttf_advhoriz and del_ttf_advvert) - call do_oce_adv_tra(dt, UV, Wvel, Wvel_i, Wvel_e, tr_num, dynamics, tracers, partit, mesh) - + call do_oce_adv_tra(dt, UV, wvel, wvel_i, wvel_e, tr_num, tracers, partit, mesh) !___________________________________________________________________________ ! update array for total tracer flux del_ttf with the fluxes from horizontal ! and vertical advection @@ -212,11 +181,9 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) tracers%data(tr_num)%valuesAB(:, node)=tracers%data(tr_num)%values(:, node) !DS: check that this is the right place! end do !$OMP END PARALLEL DO - ! diffuse tracers if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call diff_tracers_ale'//achar(27)//'[0m' - call diff_tracers_ale(tr_num, dynamics, tracers, partit, mesh) - + call diff_tracers_ale(tr_num, tracers, partit, mesh) ! relax to salt and temp climatology if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call relax_to_clim'//achar(27)//'[0m' ! if ((toy_ocean) .AND. ((tr_num==1) .AND. (TRIM(which_toy)=="soufflet"))) then @@ -270,12 +237,11 @@ end subroutine solve_tracers_ale ! ! !=============================================================================== -subroutine diff_tracers_ale(tr_num, dynamics, tracers, partit, mesh) +subroutine diff_tracers_ale(tr_num, tracers, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP use mod_tracer - use MOD_DYN use o_arrays use o_tracers use diff_part_hor_redi_interface @@ -287,7 +253,6 @@ subroutine diff_tracers_ale(tr_num, dynamics, tracers, partit, mesh) integer :: n, nzmax, nzmin integer, intent(in), target :: tr_num - type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracers type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit @@ -330,13 +295,13 @@ subroutine diff_tracers_ale(tr_num, dynamics, tracers, partit, mesh) !___________________________________________________________________________ if (tracers%i_vert_diff) then ! do vertical diffusion: implicite - call diff_ver_part_impl_ale(tr_num, dynamics, tracers, partit, mesh) + call diff_ver_part_impl_ale(tr_num, tracers, partit, mesh) end if !We DO not set del_ttf to zero because it will not be used in this timestep anymore !init_tracers_AB will set it to zero for the next timestep - if (tracers%smooth_bh_tra) then - call diff_part_bh(tr_num, dynamics, tracers, partit, mesh) ! alpply biharmonic diffusion (implemented as filter) + if (tracers%smooth_bh_tra) then + call diff_part_bh(tr_num, tracers, partit, mesh) ! alpply biharmonic diffusion (implemented as filter) end if end subroutine diff_tracers_ale ! @@ -423,14 +388,13 @@ end subroutine diff_ver_part_expl_ale ! !=============================================================================== ! vertical diffusivity augmented with Redi contribution [vertical flux of K(3,3)*d_zT] -subroutine diff_ver_part_impl_ale(tr_num, dynamics, tracers, partit, mesh) +subroutine diff_ver_part_impl_ale(tr_num, tracers, partit, mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use MOD_TRACER - use MOD_DYN use o_PARAM - use o_ARRAYS, only: Ki, Kv, heat_flux, water_flux, slope_tapered + use o_ARRAYS use i_ARRAYS USE MOD_PARTIT USE MOD_PARSUP @@ -442,9 +406,8 @@ subroutine diff_ver_part_impl_ale(tr_num, dynamics, tracers, partit, mesh) implicit none integer, intent(in), target :: tr_num - type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracers - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit real(kind=WP) :: a(mesh%nl), b(mesh%nl), c(mesh%nl), tr(mesh%nl) real(kind=WP) :: cp(mesh%nl), tp(mesh%nl) @@ -456,16 +419,14 @@ subroutine diff_ver_part_impl_ale(tr_num, dynamics, tracers, partit, mesh) logical :: do_wimpl=.true. real(kind=WP), dimension(:,:), pointer :: trarr - real(kind=WP), dimension(:,:), pointer :: Wvel_i #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - trarr => tracers%data(tr_num)%values(:,:) - Wvel_i => dynamics%w_i(:,:) + trarr=>tracers%data(tr_num)%values(:,:) !___________________________________________________________________________ - if ((trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') .OR. (.not. dynamics%use_wsplit)) do_wimpl=.false. + if ((trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') .OR. (.not. w_split)) do_wimpl=.false. if (Redi) isredi=1._WP dt_inv=1.0_WP/dt @@ -1136,34 +1097,29 @@ end subroutine diff_part_hor_redi ! ! !=============================================================================== -SUBROUTINE diff_part_bh(tr_num, dynamics, tracers, partit, mesh) - use o_ARRAYS, only: +SUBROUTINE diff_part_bh(tr_num, tracers, partit, mesh) + use o_ARRAYS use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use MOD_TRACER - use MOD_DYN use o_param use g_config use g_comm_auto IMPLICIT NONE integer, intent(in), target :: tr_num - type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracers - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit real(kind=WP) :: u1, v1, len, vi, tt, ww integer :: nz, ed, el(2), en(2), k, elem, nl1, ul1 real(kind=WP), allocatable :: temporary_ttf(:,:) real(kind=WP), pointer :: ttf(:,:) - real(kind=WP), dimension(:,:,:), pointer :: UV - #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) ttf => tracers%data(tr_num)%values ed=myDim_nod2D+eDim_nod2D @@ -1182,10 +1138,7 @@ SUBROUTINE diff_part_bh(tr_num, dynamics, tracers, partit, mesh) v1=UV(2, nz,el(1))-UV(2, nz,el(2)) vi=u1*u1+v1*v1 tt=ttf(nz,en(1))-ttf(nz,en(2)) - vi=sqrt(max(dynamics%visc_gamma0, & - max(dynamics%visc_gamma1*sqrt(vi), & - dynamics%visc_gamma2*vi) & - )*len) + vi=sqrt(max(gamma0, max(gamma1*sqrt(vi), gamma2*vi))*len) !vi=sqrt(max(sqrt(u1*u1+v1*v1),0.04)*le) ! 10m^2/s for 10 km (0.04 h/50) !vi=sqrt(10.*le) tt=tt*vi @@ -1209,10 +1162,7 @@ SUBROUTINE diff_part_bh(tr_num, dynamics, tracers, partit, mesh) v1=UV(2, nz,el(1))-UV(2, nz,el(2)) vi=u1*u1+v1*v1 tt=temporary_ttf(nz,en(1))-temporary_ttf(nz,en(2)) - vi=sqrt(max(dynamics%visc_gamma0, & - max(dynamics%visc_gamma1*sqrt(vi), & - dynamics%visc_gamma2*vi) & - )*len) + vi=sqrt(max(gamma0, max(gamma1*sqrt(vi), gamma2*vi))*len) !vi=sqrt(max(sqrt(u1*u1+v1*v1),0.04)*le) ! 10m^2/s for 10 km (0.04 h/50) !vi=sqrt(10.*le) tt=-tt*vi*dt diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index ea77166ed..98c730732 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -1,44 +1,23 @@ - -module compute_vel_rhs_interface - interface - subroutine compute_vel_rhs(dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_DYN - type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - - end subroutine - end interface -end module - module momentum_adv_scalar_interface interface - subroutine momentum_adv_scalar(dynamics, partit, mesh) + subroutine momentum_adv_scalar(partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN - type(t_dyn) , intent(inout), target :: dynamics + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - end subroutine end interface end module - ! ! !_______________________________________________________________________________ -subroutine compute_vel_rhs(dynamics, partit, mesh) +subroutine compute_vel_rhs(partit, mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN - use o_ARRAYS, only: coriolis, ssh_gp, pgf_x, pgf_y + use o_ARRAYS use i_ARRAYS use i_therm_param use o_PARAM @@ -50,9 +29,8 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) use momentum_adv_scalar_interface implicit none - type(t_dyn) , intent(inout), target :: dynamics + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh integer :: elem, elnodes(3), nz, nzmax, nzmin real(kind=WP) :: ff, mm real(kind=WP) :: Fx, Fy, pre(3) @@ -60,17 +38,10 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) real(kind=WP) :: t1, t2, t3, t4 real(kind=WP) :: p_ice(3), p_air(3), p_eta(3) integer :: use_pice - real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhsAB, UV_rhs - real(kind=WP), dimension(:) , pointer :: eta_n - #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV =>dynamics%uv(:,:,:) - UV_rhs =>dynamics%uv_rhs(:,:,:) - UV_rhsAB =>dynamics%uv_rhsAB(:,:,:) - eta_n =>dynamics%eta_n(:) t1=MPI_Wtime() use_pice=0 @@ -146,11 +117,11 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) t2=MPI_Wtime() !___________________________________________________________________________ ! advection - if (dynamics%momadv_opt==1) then + if (mom_adv==1) then if (mype==0) write(*,*) 'in moment not adapted mom_adv advection typ for ALE, check your namelist' call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) - elseif (dynamics%momadv_opt==2) then - call momentum_adv_scalar(dynamics, partit, mesh) + elseif (mom_adv==2) then + call momentum_adv_scalar(partit, mesh) end if t3=MPI_Wtime() @@ -187,34 +158,27 @@ END SUBROUTINE compute_vel_rhs ! Momentum advection on scalar control volumes with ALE adaption--> exchange zinv(nz) ! against hnode(nz,node) !_______________________________________________________________________________ -subroutine momentum_adv_scalar(dynamics, partit, mesh) +subroutine momentum_adv_scalar(partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP -use MOD_DYN +USE o_ARRAYS USE o_PARAM use g_comm_auto IMPLICIT NONE -type(t_dyn) , intent(inout), target :: dynamics +type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit -type(t_mesh) , intent(in) , target :: mesh - integer :: n, nz, el1, el2 integer :: nl1, nl2, ul1, ul2, nod(2), el, ed, k, nle, ule real(kind=WP) :: un1(1:mesh%nl-1), un2(1:mesh%nl-1) real(kind=WP) :: wu(1:mesh%nl), wv(1:mesh%nl) -real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhsAB, UVnode_rhs -real(kind=WP), dimension(:,:), pointer :: Wvel_e +real(kind=WP) :: Unode_rhs(2,mesh%nl-1,partit%myDim_nod2d+partit%eDim_nod2D) #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV =>dynamics%uv(:,:,:) - UV_rhsAB =>dynamics%uv_rhsAB(:,:,:) - UVnode_rhs=>dynamics%work%uvnode_rhs(:,:,:) - Wvel_e =>dynamics%w_e(:,:) !___________________________________________________________________________ ! 1st. compute vertical momentum advection component: w * du/dz, w*dv/dz @@ -261,15 +225,15 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) !!PS if (ul1>1) write(*,*) mype, wu(ul1:nl1) ! Here 1/3 because 1/3 of the area is related to the node --> comes from ! averaging the elemental velocities - UVnode_rhs(1,nz,n) = - (wu(nz) - wu(nz+1) ) / (3._WP*hnode(nz,n)) - UVnode_rhs(2,nz,n) = - (wv(nz) - wv(nz+1) ) / (3._WP*hnode(nz,n)) + Unode_rhs(1,nz,n) = - (wu(nz) - wu(nz+1) ) / (3._WP*hnode(nz,n)) + Unode_rhs(2,nz,n) = - (wv(nz) - wv(nz+1) ) / (3._WP*hnode(nz,n)) enddo !_______________________________________________________________________ ! To get a clean checksum, set the remaining values to zero - UVnode_rhs(1:2,nl1+1:nl-1,n) = 0._WP - UVnode_rhs(1:2,1:ul1-1 ,n) = 0._WP + Unode_rhs(1:2,nl1+1:nl-1,n) = 0._WP + Unode_rhs(1:2,1:ul1-1 ,n) = 0._WP end do @@ -327,8 +291,8 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) if (nod(1) <= myDim_nod2d) then do nz=min(ul1,ul2), max(nl1,nl2) ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) - UVnode_rhs(1,nz,nod(1)) = UVnode_rhs(1,nz,nod(1)) + un1(nz)*UV(1,nz,el1) + un2(nz)*UV(1,nz,el2) - UVnode_rhs(2,nz,nod(1)) = UVnode_rhs(2,nz,nod(1)) + un1(nz)*UV(2,nz,el1) + un2(nz)*UV(2,nz,el2) + Unode_rhs(1,nz,nod(1)) = Unode_rhs(1,nz,nod(1)) + un1(nz)*UV(1,nz,el1) + un2(nz)*UV(1,nz,el2) + Unode_rhs(2,nz,nod(1)) = Unode_rhs(2,nz,nod(1)) + un1(nz)*UV(2,nz,el1) + un2(nz)*UV(2,nz,el2) end do endif @@ -336,8 +300,8 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) if (nod(2) <= myDim_nod2d) then do nz=min(ul1,ul2), max(nl1,nl2) ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) - UVnode_rhs(1,nz,nod(2)) = UVnode_rhs(1,nz,nod(2)) - un1(nz)*UV(1,nz,el1) - un2(nz)*UV(1,nz,el2) - UVnode_rhs(2,nz,nod(2)) = UVnode_rhs(2,nz,nod(2)) - un1(nz)*UV(2,nz,el1) - un2(nz)*UV(2,nz,el2) + Unode_rhs(1,nz,nod(2)) = Unode_rhs(1,nz,nod(2)) - un1(nz)*UV(1,nz,el1) - un2(nz)*UV(1,nz,el2) + Unode_rhs(2,nz,nod(2)) = Unode_rhs(2,nz,nod(2)) - un1(nz)*UV(2,nz,el1) - un2(nz)*UV(2,nz,el2) end do endif @@ -346,8 +310,8 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) if (nod(1) <= myDim_nod2d) then do nz=ul1, nl1 ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) - UVnode_rhs(1,nz,nod(1)) = UVnode_rhs(1,nz,nod(1)) + un1(nz)*UV(1,nz,el1) - UVnode_rhs(2,nz,nod(1)) = UVnode_rhs(2,nz,nod(1)) + un1(nz)*UV(2,nz,el1) + Unode_rhs(1,nz,nod(1)) = Unode_rhs(1,nz,nod(1)) + un1(nz)*UV(1,nz,el1) + Unode_rhs(2,nz,nod(1)) = Unode_rhs(2,nz,nod(1)) + un1(nz)*UV(2,nz,el1) end do ! --> do nz=ul1, nl1 endif @@ -356,8 +320,8 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) !!PS do nz=1, nl1 do nz=ul1, nl1 ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) - UVnode_rhs(1,nz,nod(2)) = UVnode_rhs(1,nz,nod(2)) - un1(nz)*UV(1,nz,el1) - UVnode_rhs(2,nz,nod(2)) = UVnode_rhs(2,nz,nod(2)) - un1(nz)*UV(2,nz,el1) + Unode_rhs(1,nz,nod(2)) = Unode_rhs(1,nz,nod(2)) - un1(nz)*UV(1,nz,el1) + Unode_rhs(2,nz,nod(2)) = Unode_rhs(2,nz,nod(2)) - un1(nz)*UV(2,nz,el1) end do ! --> do nz=ul1, nl1 endif endif ! --> if (el2>0) then @@ -368,14 +332,14 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) do n=1,myDim_nod2d nl1 = nlevels_nod2D(n)-1 ul1 = ulevels_nod2D(n) -!!PS UVnode_rhs(1,ul1:nl1,n) = UVnode_rhs(1,ul1:nl1,n) *area_inv(ul1:nl1,n) ! --> TEST_cavity -!!PS UVnode_rhs(2,ul1:nl1,n) = UVnode_rhs(2,ul1:nl1,n) *area_inv(ul1:nl1,n) ! --> TEST_cavity - UVnode_rhs(1,ul1:nl1,n) = UVnode_rhs(1,ul1:nl1,n) *areasvol_inv(ul1:nl1,n) - UVnode_rhs(2,ul1:nl1,n) = UVnode_rhs(2,ul1:nl1,n) *areasvol_inv(ul1:nl1,n) +!!PS Unode_rhs(1,ul1:nl1,n) = Unode_rhs(1,ul1:nl1,n) *area_inv(ul1:nl1,n) ! --> TEST_cavity +!!PS Unode_rhs(2,ul1:nl1,n) = Unode_rhs(2,ul1:nl1,n) *area_inv(ul1:nl1,n) ! --> TEST_cavity + Unode_rhs(1,ul1:nl1,n) = Unode_rhs(1,ul1:nl1,n) *areasvol_inv(ul1:nl1,n) + Unode_rhs(2,ul1:nl1,n) = Unode_rhs(2,ul1:nl1,n) *areasvol_inv(ul1:nl1,n) end do !-->do n=1,myDim_nod2d !___________________________________________________________________________ - call exchange_nod(UVnode_rhs, partit) + call exchange_nod(Unode_rhs, partit) !___________________________________________________________________________ ! convert total nodal advection from vertice --> elements @@ -383,9 +347,9 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) nl1 = nlevels(el)-1 ul1 = ulevels(el) UV_rhsAB(1:2,ul1:nl1,el) = UV_rhsAB(1:2,ul1:nl1,el) & - + elem_area(el)*(UVnode_rhs(1:2,ul1:nl1,elem2D_nodes(1,el)) & - + UVnode_rhs(1:2,ul1:nl1,elem2D_nodes(2,el)) & - + UVnode_rhs(1:2,ul1:nl1,elem2D_nodes(3,el))) / 3.0_WP + + elem_area(el)*(Unode_rhs(1:2,ul1:nl1,elem2D_nodes(1,el)) & + + Unode_rhs(1:2,ul1:nl1,elem2D_nodes(2,el)) & + + Unode_rhs(1:2,ul1:nl1,elem2D_nodes(3,el))) / 3.0_WP end do ! --> do el=1, myDim_elem2D end subroutine momentum_adv_scalar diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 5b80ccf7a..36b9f6d04 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -1,70 +1,138 @@ ! A set of routines for computing the horizonlal viscosity ! the control parameters (their default values) are: -! dynamics%visc_gamma0 (0.01 [m/s]), dynamics%visc_gamma1 (0.1 [no dim.]), dynamics%visc_gamma2 (10.[s/m]), Div_c [1.], Leith_c[1.?] -! 1. dynamics%visc_gamma0 has the dimension of velocity. It should be as small as possible, but in any case smaller than 0.01 m/s. +! gamma0 (0.01 [m/s]), gamma1 (0.1 [no dim.]), gamma2 (10.[s/m]), Div_c [1.], Leith_c[1.?] +! 1. gamma0 has the dimension of velocity. It should be as small as possible, but in any case smaller than 0.01 m/s. ! All major ocean circulation models are stable with harmonic viscosity 0.01*len. -! 2. dynamics%visc_gamma1 is nondimensional. In commonly used Leith or Smagorinsky parameterizations it is C/pi^2=0.1 (C is about 1). +! 2. gamma1 is nondimensional. In commonly used Leith or Smagorinsky parameterizations it is C/pi^2=0.1 (C is about 1). ! We therefore try to follow this, allowing some adjustments (because our mesh is triangular, our resolution is different, etc.). -! We however, try to keep dynamics%visc_gamma1<0.1 -! 3. dynamics%visc_gamma2 is dimensional (1/velocity). If it is 10, then the respective term dominates starting from |u|=0.1 m/s an so on. It is only used in: +! We however, try to keep gamma1<0.1 +! 3. gamma2 is dimensional (1/velocity). If it is 10, then the respective term dominates starting from |u|=0.1 m/s an so on. It is only used in: ! (5) visc_filt_bcksct, (6) visc_filt_bilapl, (7) visc_filt_bidiff ! 4. Div_c =1. should be default ! 5. Leith_c=? (need to be adjusted) +module h_viscosity_leith_interface + interface + subroutine h_viscosity_leith(partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + end subroutine + end interface +end module +module visc_filt_harmon_interface + interface + subroutine visc_filt_harmon(partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + end subroutine + end interface +end module +module visc_filt_hbhmix_interface + interface + subroutine visc_filt_hbhmix(partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + end subroutine + end interface +end module +module visc_filt_biharm_interface + interface + subroutine visc_filt_biharm(option, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + integer :: option + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + end subroutine + end interface +end module module visc_filt_bcksct_interface interface - subroutine visc_filt_bcksct(dynamics, partit, mesh) + subroutine visc_filt_bcksct(partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN - type(t_dyn) , intent(inout), target :: dynamics + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - end subroutine end interface end module - module visc_filt_bilapl_interface interface - subroutine visc_filt_bilapl(dynamics, partit, mesh) + subroutine visc_filt_bilapl(partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN - type(t_dyn) , intent(inout), target :: dynamics + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - end subroutine end interface end module - module visc_filt_bidiff_interface interface - subroutine visc_filt_bidiff(dynamics, partit, mesh) + subroutine visc_filt_bidiff(partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN - type(t_dyn) , intent(inout), target :: dynamics + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - end subroutine end interface end module +module visc_filt_dbcksc_interface + interface + subroutine visc_filt_dbcksc(partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + end subroutine + end interface +end module +module backscatter_coef_interface + interface + subroutine backscatter_coef(partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + end subroutine + end interface +end module +module uke_update_interface + interface + subroutine uke_update(partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + end subroutine + end interface +end module + -! +! =================================================================== ! Contains routines needed for computations of dynamics. ! includes: update_vel, compute_vel_nodes -!_______________________________________________________________________________ -SUBROUTINE update_vel(dynamics, partit, mesh) +! =================================================================== +SUBROUTINE update_vel(partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN + USE o_ARRAYS USE o_PARAM USE g_CONFIG use g_comm_auto @@ -72,21 +140,14 @@ SUBROUTINE update_vel(dynamics, partit, mesh) integer :: elem, elnodes(3), nz, m, nzmax, nzmin real(kind=WP) :: eta(3) real(kind=WP) :: Fx, Fy - type(t_dyn) , intent(inout), target :: dynamics - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs - real(kind=WP), dimension(:), pointer :: eta_n, d_eta #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV=>dynamics%uv(:,:,:) - UV_rhs=>dynamics%uv_rhs(:,:,:) - eta_n=>dynamics%eta_n(:) - d_eta=>dynamics%d_eta(:) - + DO elem=1, myDim_elem2D elnodes=elem2D_nodes(:,elem) eta=-g*theta*dt*d_eta(elnodes) @@ -103,30 +164,24 @@ SUBROUTINE update_vel(dynamics, partit, mesh) eta_n=eta_n+d_eta call exchange_elem(UV, partit) end subroutine update_vel -! -! -!_______________________________________________________________________________ -subroutine compute_vel_nodes(dynamics, partit, mesh) +!========================================================================== +subroutine compute_vel_nodes(partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN USE o_PARAM + USE o_ARRAYS use g_comm_auto IMPLICIT NONE integer :: n, nz, k, elem, nln, uln, nle, ule real(kind=WP) :: tx, ty, tvol - - type(t_dyn) , intent(inout), target :: dynamics + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - real(kind=WP), dimension(:,:,:), pointer :: UV, UVnode + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV=>dynamics%uv(:,:,:) - UVnode=>dynamics%uvnode(:,:,:) DO n=1, myDim_nod2D uln = ulevels_nod2D(n) @@ -146,70 +201,432 @@ subroutine compute_vel_nodes(dynamics, partit, mesh) tx=tx+UV(1,nz,elem)*elem_area(elem) ty=ty+UV(2,nz,elem)*elem_area(elem) END DO - UVnode(1,nz,n)=tx/tvol - UVnode(2,nz,n)=ty/tvol + Unode(1,nz,n)=tx/tvol + Unode(2,nz,n)=ty/tvol END DO END DO - call exchange_nod(UVnode, partit) + call exchange_nod(Unode, partit) end subroutine compute_vel_nodes -! -! -!_______________________________________________________________________________ -subroutine viscosity_filter(option, dynamics, partit, mesh) - use o_PARAM - use MOD_MESH +!=========================================================================== +subroutine viscosity_filter(option, partit, mesh) +use o_PARAM +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP +use h_viscosity_leith_interface +use visc_filt_harmon_interface +use visc_filt_hbhmix_interface +use visc_filt_biharm_interface +use visc_filt_bcksct_interface +use visc_filt_bilapl_interface +use visc_filt_bidiff_interface +use visc_filt_dbcksc_interface +use backscatter_coef_interface +IMPLICIT NONE +integer :: option +type(t_mesh), intent(in), target :: mesh +type(t_partit), intent(inout), target :: partit + +! Driving routine +! Background viscosity is selected in terms of Vl, where V is +! background velocity scale and l is the resolution. V is 0.005 +! or 0.01, perhaps it would be better to pass it as a parameter. + +! h_viscosity_leiht needs vorticity, so vorticity array should be +! allocated. At present, there are two rounds of smoothing in +! h_viscosity. + +SELECT CASE (option) +CASE (1) + ! ==== + ! Harmonic Leith parameterization + ! ==== + call h_viscosity_leith(partit, mesh) + call visc_filt_harmon(partit, mesh) +CASE (2) + ! === + ! Laplacian+Leith+biharmonic background + ! === + call h_viscosity_leith(partit, mesh) + call visc_filt_hbhmix(partit, mesh) +CASE (3) + ! === + ! Biharmonic Leith parameterization + ! === + call h_viscosity_leith(partit, mesh) + call visc_filt_biharm(2, partit, mesh) +CASE (4) + ! === + ! Biharmonic+upwind-type + ! === + call visc_filt_biharm(1, partit, mesh) +CASE (5) + call visc_filt_bcksct(partit, mesh) +CASE (6) + call visc_filt_bilapl(partit, mesh) +CASE (7) + call visc_filt_bidiff(partit, mesh) +CASE (8) + call backscatter_coef(partit, mesh) + call visc_filt_dbcksc(partit, mesh) +CASE DEFAULT + if (partit%mype==0) write(*,*) 'mixing scheme with option ' , option, 'has not yet been implemented' + call par_ex(partit%MPI_COMM_FESOM, partit%mype) + stop +END SELECT +end subroutine viscosity_filter +! =================================================================== +SUBROUTINE visc_filt_harmon(partit, mesh) +USE MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP +USE o_ARRAYS +USE o_PARAM +USE g_CONFIG +IMPLICIT NONE + +real(kind=WP) :: u1, v1, le(2), len, vi +integer :: nz, ed, el(2) , nzmin,nzmax +type(t_mesh), intent(in), target :: mesh +type(t_partit), intent(inout), target :: partit + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + ! An analog of harmonic viscosity operator. + ! It adds to the rhs(0) Visc*(u1+u2+u3-3*u0)/area + ! on triangles, which is Visc*Laplacian/4 on equilateral triangles. + ! The contribution from boundary edges is neglected (free slip). + DO ed=1, myDim_edge2D+eDim_edge2D + if(myList_edge2D(ed)>edge2D_in) cycle + el=edge_tri(:,ed) + len=sqrt(sum(elem_area(el(1:2)))) + nzmax = minval(nlevels(el)) + nzmin = maxval(ulevels(el)) + !!PS DO nz=1,minval(nlevels(el))-1 + DO nz=nzmin,nzmax-1 + vi=0.5_WP*(Visc(nz,el(1))+Visc(nz,el(2))) + vi=max(vi, gamma0*len)*dt ! limited from below by backgroung + u1=(UV(1,nz,el(1))-UV(1,nz,el(2)))*vi + v1=(UV(2,nz,el(1))-UV(2,nz,el(2)))*vi + + UV_rhs(1,nz,el(1))=UV_rhs(1,nz,el(1))-u1/elem_area(el(1)) + UV_rhs(1,nz,el(2))=UV_rhs(1,nz,el(2))+u1/elem_area(el(2)) + UV_rhs(2,nz,el(1))=UV_rhs(2,nz,el(1))-v1/elem_area(el(1)) + UV_rhs(2,nz,el(2))=UV_rhs(2,nz,el(2))+v1/elem_area(el(2)) + END DO + END DO +end subroutine visc_filt_harmon +! =================================================================== +SUBROUTINE visc_filt_biharm(option, partit, mesh) + USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP - use MOD_DYN - use visc_filt_bcksct_interface - use visc_filt_bilapl_interface - use visc_filt_bidiff_interface -!!PS use visc_filt_dbcksc_interface -!!PS use backscatter_coef_interface - use g_backscatter - IMPLICIT NONE - integer :: option - type(t_dyn) , intent(inout), target :: dynamics - type(t_mesh) , intent(in) , target :: mesh + USE o_ARRAYS + USE o_PARAM + USE g_CONFIG + use g_comm_auto + IMPLICIT NONE + ! An energy conserving version + ! Also, we use the Leith viscosity + ! + real(kind=WP) :: u1, v1, vi, len + integer :: ed, el(2), nz, option, nzmin, nzmax + real(kind=WP), allocatable :: U_c(:,:), V_c(:,:) + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - ! Driving routine - ! Background viscosity is selected in terms of Vl, where V is - ! background velocity scale and l is the resolution. V is 0.005 - ! or 0.01, perhaps it would be better to pass it as a parameter. - - ! h_viscosity_leiht needs vorticity, so vorticity array should be - ! allocated. At present, there are two rounds of smoothing in - ! h_viscosity. - SELECT CASE (option) - CASE (5) - if (flag_debug .and. partit%mype==0) print *, achar(27)//'[37m'//' --> call visc_filt_bcksct'//achar(27)//'[0m' - call visc_filt_bcksct(dynamics, partit, mesh) - CASE (6) - if (flag_debug .and. partit%mype==0) print *, achar(27)//'[37m'//' --> call visc_filt_bilapl'//achar(27)//'[0m' - call visc_filt_bilapl(dynamics, partit, mesh) - CASE (7) - if (flag_debug .and. partit%mype==0) print *, achar(27)//'[37m'//' --> call visc_filt_bidiff'//achar(27)//'[0m' - call visc_filt_bidiff(dynamics, partit, mesh) - CASE (8) - if (flag_debug .and. partit%mype==0) print *, achar(27)//'[37m'//' --> call backscatter_coef'//achar(27)//'[0m' - call backscatter_coef(partit, mesh) - if (flag_debug .and. partit%mype==0) print *, achar(27)//'[37m'//' --> call visc_filt_dbcksc'//achar(27)//'[0m' - call visc_filt_dbcksc(dynamics, partit, mesh) - CASE DEFAULT - if (partit%mype==0) write(*,*) 'mixing scheme with option ' , option, 'has not yet been implemented' - call par_ex(partit%MPI_COMM_FESOM, partit%mype) - stop - END SELECT -end subroutine viscosity_filter -! -! -!_______________________________________________________________________________ -SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + ! Filter is applied twice. + ed=myDim_elem2D+eDim_elem2D + allocate(U_c(nl-1,ed), V_c(nl-1, ed)) + U_c=0.0_WP + V_c=0.0_WP + DO ed=1, myDim_edge2D+eDim_edge2D + if(myList_edge2D(ed)>edge2D_in) cycle + el=edge_tri(:,ed) + nzmax = minval(nlevels(el)) + nzmin = maxval(ulevels(el)) + !!PS DO nz=1,minval(nlevels(el))-1 + DO nz=nzmin,nzmax-1 + u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) + v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) + U_c(nz,el(1))=U_c(nz,el(1))-u1 + U_c(nz,el(2))=U_c(nz,el(2))+u1 + V_c(nz,el(1))=V_c(nz,el(1))-v1 + V_c(nz,el(2))=V_c(nz,el(2))+v1 + END DO + END DO + + if(option==1) then + Do ed=1,myDim_elem2D + len=sqrt(elem_area(ed)) + nzmin = ulevels(ed) + nzmax = nlevels(ed) + !!PS Do nz=1,nlevels(ed)-1 + Do nz=nzmin,nzmax-1 + ! vi has the sense of harmonic viscosity coefficient because of + ! the division by area in the end + ! ==== + ! Case 1 -- an analog to the third-order upwind (vi=gamma1 * |u| * l) + ! ==== + vi=max(gamma0, gamma1*sqrt(UV(1,nz,ed)**2+UV(2,nz,ed)**2))*len*dt + U_c(nz,ed)=-U_c(nz,ed)*vi + V_c(nz,ed)=-V_c(nz,ed)*vi + END DO + end do + end if + + if(option==2) then + Do ed=1,myDim_elem2D + len=sqrt(elem_area(ed)) + nzmin = ulevels(ed) + nzmax = nlevels(ed) + !!PS Do nz=1,nlevels(ed)-1 + Do nz=nzmin,nzmax-1 + ! vi has the sense of harmonic viscosity coefficient because of + ! the division by area in the end + ! === + ! Case 2 -- Leith +background (do not forget to call h_viscosity_leith before using this option) + ! === + vi=max(Visc(nz,ed), gamma0*len)*dt ! limited from below by backgroung + ! + U_c(nz,ed)=-U_c(nz,ed)*vi + V_c(nz,ed)=-V_c(nz,ed)*vi + END DO + end do + end if + + call exchange_elem(U_c, partit) + call exchange_elem(V_c, partit) + DO ed=1, myDim_edge2D+eDim_edge2D + ! check if its a boudnary edge + if(myList_edge2D(ed)>edge2D_in) cycle + el=edge_tri(:,ed) + nzmin = maxval(ulevels(el)) + nzmax = minval(nlevels(el)) + !!PS DO nz=1,minval(nlevels(el))-1 + DO nz=nzmin,nzmax-1 + u1=(U_c(nz,el(1))-U_c(nz,el(2))) + v1=(V_c(nz,el(1))-V_c(nz,el(2))) + UV_rhs(1,nz,el(1))=UV_rhs(1,nz,el(1))-u1/elem_area(el(1)) + UV_rhs(1,nz,el(2))=UV_rhs(1,nz,el(2))+u1/elem_area(el(2)) + UV_rhs(2,nz,el(1))=UV_rhs(2,nz,el(1))-v1/elem_area(el(1)) + UV_rhs(2,nz,el(2))=UV_rhs(2,nz,el(2))+v1/elem_area(el(2)) + END DO + END DO + + deallocate(V_c,U_c) + +end subroutine visc_filt_biharm +! =================================================================== +SUBROUTINE visc_filt_hbhmix(partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP - use MOD_DYN + USE o_ARRAYS + USE o_PARAM + USE g_CONFIG + use g_comm_auto + IMPLICIT NONE + + ! An energy and momentum conserving version. + ! We use the harmonic Leith viscosity + biharmonic background viscosity + ! + + real(kind=WP) :: u1, v1, vi, len, crosslen, le(2) + integer :: ed, el(2), nz, nzmin, nzmax + real(kind=WP), allocatable :: U_c(:,:), V_c(:,:) + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + ! Filter is applied twice. + ed=myDim_elem2D+eDim_elem2D + allocate(U_c(nl-1,ed), V_c(nl-1, ed)) + U_c=0.0_WP + V_c=0.0_WP + DO ed=1, myDim_edge2D+eDim_edge2D + ! check if its a boudnary edge + if(myList_edge2D(ed)>edge2D_in) cycle + el=edge_tri(:,ed) + nzmin = maxval(ulevels(el)) + nzmax = minval(nlevels(el)) + !!PS DO nz=1,minval(nlevels(el))-1 + DO nz=nzmin,nzmax-1 + vi=dt*0.5_WP*(Visc(nz,el(1))+Visc(nz,el(2))) + ! backgroung is added later (biharmonically) + u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) + v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) + U_c(nz,el(1))=U_c(nz,el(1))-u1 + U_c(nz,el(2))=U_c(nz,el(2))+u1 + V_c(nz,el(1))=V_c(nz,el(1))-v1 + V_c(nz,el(2))=V_c(nz,el(2))+v1 + u1=u1*vi + v1=v1*vi + UV_rhs(1,nz,el(1))=UV_rhs(1,nz,el(1))-u1/elem_area(el(1)) + UV_rhs(1,nz,el(2))=UV_rhs(1,nz,el(2))+u1/elem_area(el(2)) + UV_rhs(2,nz,el(1))=UV_rhs(2,nz,el(1))-v1/elem_area(el(1)) + UV_rhs(2,nz,el(2))=UV_rhs(2,nz,el(2))+v1/elem_area(el(2)) + END DO + END DO + + Do ed=1,myDim_elem2D + len=sqrt(elem_area(ed)) + nzmin = ulevels(ed) + nzmax = nlevels(ed) + !!PS Do nz=1,nlevels(ed)-1 + Do nz=nzmin,nzmax-1 + vi=dt*gamma0*len ! add biharmonic backgroung + U_c(nz,ed)=-U_c(nz,ed)*vi + V_c(nz,ed)=-V_c(nz,ed)*vi + END DO + end do + call exchange_elem(U_c, partit) + call exchange_elem(V_c, partit) + DO ed=1, myDim_edge2D+eDim_edge2D + ! check if its a boudnary edge + if(myList_edge2D(ed)>edge2D_in) cycle + el=edge_tri(:,ed) + nzmin = maxval(ulevels(el)) + nzmax = minval(nlevels(el)) + !!PS DO nz=1,minval(nlevels(el))-1 + DO nz=nzmin,nzmax-1 + u1=(U_c(nz,el(1))-U_c(nz,el(2))) + v1=(V_c(nz,el(1))-V_c(nz,el(2))) + UV_rhs(1,nz,el(1))=UV_rhs(1,nz,el(1))-u1/elem_area(el(1)) + UV_rhs(1,nz,el(2))=UV_rhs(1,nz,el(2))+u1/elem_area(el(2)) + UV_rhs(2,nz,el(1))=UV_rhs(2,nz,el(1))-v1/elem_area(el(1)) + UV_rhs(2,nz,el(2))=UV_rhs(2,nz,el(2))+v1/elem_area(el(2)) + END DO + END DO + + deallocate(V_c,U_c) + +end subroutine visc_filt_hbhmix + +! =================================================================== +SUBROUTINE h_viscosity_leith(partit, mesh) + ! + ! Coefficient of horizontal viscosity is a combination of the Leith (with Leith_c) and modified Leith (with Div_c) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE o_ARRAYS + USE o_PARAM + USE g_CONFIG + use g_comm_auto + IMPLICIT NONE + real(kind=WP) :: dz, div_elem(3), xe, ye, vi + integer :: elem, nl1, nz, elnodes(3), n, k, nt, ul1 + real(kind=WP) :: leithx, leithy + real(kind=WP), allocatable :: aux(:,:) + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + ! + if(mom_adv<4) call relative_vorticity(partit, mesh) !!! vorticity array should be allocated + ! Fill in viscosity: + Visc = 0.0_WP + DO elem=1, myDim_elem2D !! m=1, myDim_elem2D + !! elem=myList_elem2D(m) + !_______________________________________________________________________ + ! Here can not exchange zbar_n & Z_n with zbar_3d_n & Z_3d_n because + ! they run over elements here + nl1 =nlevels(elem)-1 + ul1 =ulevels(elem) + + zbar_n=0.0_WP + ! in case of partial cells zbar_n(nzmax) is not any more at zbar(nzmax), + ! zbar_n(nzmax) is now zbar_e_bot(elem), + zbar_n(nl1+1)=zbar_e_bot(elem) + !!PS do nz=nl1,2,-1 + do nz=nl1,ul1+1,-1 + zbar_n(nz) = zbar_n(nz+1) + helem(nz,elem) + end do + !!PS zbar_n(1) = zbar_n(2) + helem(1,elem) + zbar_n(ul1) = zbar_n(ul1+1) + helem(ul1,elem) + + !_______________________________________________________________________ + elnodes=elem2D_nodes(:,elem) + !!PS do nz=1,nl1 + do nz=ul1,nl1 + dz=zbar_n(nz)-zbar_n(nz+1) + div_elem=(Wvel(nz,elnodes)-Wvel(nz+1,elnodes))/dz + xe=sum(gradient_sca(1:3,elem)*div_elem) + ye=sum(gradient_sca(4:6,elem)*div_elem) + div_elem=vorticity(nz,elnodes) + leithx=sum(gradient_sca(1:3,elem)*div_elem) + leithy=sum(gradient_sca(4:6,elem)*div_elem) + Visc(nz,elem)=min(gamma1*elem_area(elem)*sqrt((Div_c*(xe**2+ye**2) & + + Leith_c*(leithx**2+leithy**2))*elem_area(elem)), elem_area(elem)/dt) + end do !! 0.1 here comes from (2S)^{3/2}/pi^3 + do nz=nl1+1, nl-1 + Visc(nz, elem)=0.0_WP + end do + do nz=1,ul1-1 + Visc(nz, elem)=0.0_WP + end do + END DO + + allocate(aux(nl-1,myDim_nod2D+eDim_nod2D)) + aux = 0.0_WP + DO nt=1,2 + DO n=1, myDim_nod2D + nl1 = nlevels_nod2D(n) + ul1 = ulevels_nod2D(n) + !!PS DO nz=1, nlevels_nod2D(n)-1 + DO nz=ul1, nl1-1 + dz=0.0_WP + vi=0.0_WP + DO k=1, nod_in_elem2D_num(n) + elem=nod_in_elem2D(k,n) + dz=dz+elem_area(elem) + vi=vi+Visc(nz,elem)*elem_area(elem) + END DO + aux(nz,n)=vi/dz + END DO + END DO + call exchange_nod(aux, partit) + do elem=1, myDim_elem2D + elnodes=elem2D_nodes(:,elem) + nl1=nlevels(elem)-1 + ul1=ulevels(elem) + !!!PS Do nz=1, nl1 + Do nz=ul1, nl1 + Visc(nz,elem)=sum(aux(nz,elnodes))/3.0_WP + END DO + DO nz=nl1+1, nl-1 + Visc(nz,elem)=0.0_WP + END Do + DO nz=1, ul1-1 + Visc(nz,elem)=0.0_WP + END Do + end do + end do + call exchange_elem(Visc, partit) + deallocate(aux) +END subroutine h_viscosity_leith +! ======================================================================= +SUBROUTINE visc_filt_bcksct(partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE o_ARRAYS USE o_PARAM USE g_CONFIG USE g_comm_auto @@ -217,21 +634,14 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) real(kind=8) :: u1, v1, len, vi integer :: nz, ed, el(2), nelem(3),k, elem, nzmin, nzmax - type(t_dyn) , intent(inout), target :: dynamics + real(kind=8), allocatable :: U_b(:,:), V_b(:,:), U_c(:,:), V_c(:,:) + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs - real(kind=WP), dimension(:,:) , pointer :: U_c, V_c, U_b, V_b + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv( :,:,:) - UV_rhs => dynamics%uv_rhs(:,:,:) - U_c => dynamics%work%u_c(:,:) - V_c => dynamics%work%v_c(:,:) - U_b => dynamics%work%u_b(:,:) - V_b => dynamics%work%v_b(:,:) ! An analog of harmonic viscosity operator. ! Same as visc_filt_h, but with the backscatter. @@ -256,12 +666,9 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) DO nz=nzmin,nzmax-1 u1=UV(1,nz,el(1))-UV(1,nz,el(2)) v1=UV(2,nz,el(1))-UV(2,nz,el(2)) - vi=dt*max(dynamics%visc_gamma0, & - max(dynamics%visc_gamma1*sqrt(u1*u1+v1*v1), & - dynamics%visc_gamma2*(u1*u1+v1*v1)) & - )*len -! vi=dt*max(dynamics%visc_gamma0, dynamics%visc_gamma1*max(sqrt(u1*u1+v1*v1), dynamics%visc_gamma2*(u1*u1+v1*v1)))*len - !here dynamics%visc_gamma2 is dimensional (1/velocity). If it is 10, then the respective term dominates starting from |u|=0.1 m/s an so on. + vi=dt*max(gamma0, max(gamma1*sqrt(u1*u1+v1*v1), gamma2*(u1*u1+v1*v1)))*len +! vi=dt*max(gamma0, gamma1*max(sqrt(u1*u1+v1*v1), gamma2*(u1*u1+v1*v1)))*len + !here gamma2 is dimensional (1/velocity). If it is 10, then the respective term dominates starting from |u|=0.1 m/s an so on. u1=u1*vi v1=v1*vi U_b(nz,el(1))=U_b(nz,el(1))-u1/elem_area(el(1)) @@ -301,46 +708,39 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) nzmax = nlevels(ed) !!PS Do nz=1, nlevels(ed)-1 Do nz=nzmin, nzmax-1 - UV_rhs(1,nz,ed)=UV_rhs(1,nz,ed)+U_b(nz,ed) -dynamics%visc_easybsreturn*sum(U_c(nz,nelem))/3.0_WP - UV_rhs(2,nz,ed)=UV_rhs(2,nz,ed)+V_b(nz,ed) -dynamics%visc_easybsreturn*sum(V_c(nz,nelem))/3.0_WP + UV_rhs(1,nz,ed)=UV_rhs(1,nz,ed)+U_b(nz,ed) -easy_bs_return*sum(U_c(nz,nelem))/3.0_WP + UV_rhs(2,nz,ed)=UV_rhs(2,nz,ed)+V_b(nz,ed) -easy_bs_return*sum(V_c(nz,nelem))/3.0_WP END DO end do + deallocate(V_c,U_c,V_b,U_b) end subroutine visc_filt_bcksct -! -! -!_______________________________________________________________________________ + +! =================================================================== ! Strictly energy dissipative and momentum conserving version ! Viscosity depends on velocity Laplacian, i.e., on an analog of ! the Leith viscosity (Lapl==second derivatives) ! \nu=|3u_c-u_n1-u_n2-u_n3|*sqrt(S_c)/100. There is an additional term ! in viscosity that is proportional to the velocity amplitude squared. ! The coefficient has to be selected experimentally. -SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) +SUBROUTINE visc_filt_bilapl(partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP - use MOD_DYN + USE o_ARRAYS USE o_PARAM USE g_CONFIG USE g_comm_auto IMPLICIT NONE real(kind=8) :: u1, v1, vi, len integer :: ed, el(2), nz, nzmin, nzmax - - type(t_dyn) , intent(inout), target :: dynamics + real(kind=8), allocatable :: U_c(:,:), V_c(:,:) + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - - real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs - real(kind=WP), dimension(:,:) , pointer :: U_c, V_c + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) - UV_rhs => dynamics%uv_rhs(:,:,:) - U_c => dynamics%work%u_c(:,:) - V_c => dynamics%work%v_c(:,:) ed=myDim_elem2D+eDim_elem2D allocate(U_c(nl-1,ed), V_c(nl-1, ed)) @@ -371,10 +771,7 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) ! vi has the sense of harmonic viscosity coef. because of ! division by area in the end u1=U_c(nz,ed)**2+V_c(nz,ed)**2 - vi=max(dynamics%visc_gamma0, & - max(dynamics%visc_gamma1*sqrt(u1), & - dynamics%visc_gamma2*u1) & - )*len*dt + vi=max(gamma0, max(gamma1*sqrt(u1), gamma2*u1))*len*dt U_c(nz,ed)=-U_c(nz,ed)*vi V_c(nz,ed)=-V_c(nz,ed)*vi END DO @@ -400,40 +797,32 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) deallocate(V_c,U_c) end subroutine visc_filt_bilapl -! -! -!_______________________________________________________________________________ +! =================================================================== ! Strictly energy dissipative and momentum conserving version ! Viscosity depends on velocity differences, and is introduced symmetrically ! into both stages of biharmonic operator ! On each edge, \nu=sqrt(|u_c1-u_c2|*sqrt(S_c1+S_c2)/100) ! The effect is \nu^2 ! Quadratic in velocity term can be introduced if needed. -SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) +SUBROUTINE visc_filt_bidiff(partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP - use MOD_DYN + USE o_ARRAYS USE o_PARAM USE g_CONFIG USE g_comm_auto IMPLICIT NONE real(kind=8) :: u1, v1, vi, len integer :: ed, el(2), nz, nzmin, nzmax - type(t_dyn) , intent(inout), target :: dynamics + real(kind=8), allocatable :: U_c(:,:), V_c(:,:) + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - - real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs - real(kind=WP), dimension(:,:) , pointer :: U_c, V_c + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) - UV_rhs => dynamics%uv_rhs(:,:,:) - U_c => dynamics%work%u_c(:,:) - V_c => dynamics%work%v_c(:,:) ! ed=myDim_elem2D+eDim_elem2D allocate(U_c(nl-1,ed), V_c(nl-1, ed)) @@ -450,11 +839,8 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) vi=u1*u1+v1*v1 - vi=sqrt(max(dynamics%visc_gamma0, & - max(dynamics%visc_gamma1*sqrt(vi), & - dynamics%visc_gamma2*vi) & - )*len) - ! vi=sqrt(max(dynamics%visc_gamma0, dynamics%visc_gamma1*max(sqrt(vi), dynamics%visc_gamma2*vi))*len) + vi=sqrt(max(gamma0, max(gamma1*sqrt(vi), gamma2*vi))*len) + ! vi=sqrt(max(gamma0, gamma1*max(sqrt(vi), gamma2*vi))*len) u1=u1*vi v1=v1*vi U_c(nz,el(1))=U_c(nz,el(1))-u1 @@ -477,11 +863,8 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) vi=u1*u1+v1*v1 - vi=-dt*sqrt(max(dynamics%visc_gamma0, & - max(dynamics%visc_gamma1*sqrt(vi), & - dynamics%visc_gamma2*vi) & - )*len) - ! vi=-dt*sqrt(max(dynamics%visc_gamma0, dynamics%visc_gamma1*max(sqrt(vi), dynamics%visc_gamma2*vi))*len) + vi=-dt*sqrt(max(gamma0, max(gamma1*sqrt(vi), gamma2*vi))*len) + ! vi=-dt*sqrt(max(gamma0, gamma1*max(sqrt(vi), gamma2*vi))*len) u1=vi*(U_c(nz,el(1))-U_c(nz,el(2))) v1=vi*(V_c(nz,el(1))-V_c(nz,el(2))) UV_rhs(1,nz,el(1))=UV_rhs(1,nz,el(1))-u1/elem_area(el(1)) @@ -491,5 +874,370 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) END DO END DO deallocate(V_c, U_c) + end subroutine visc_filt_bidiff +! =================================================================== + + +! =================================================================== +SUBROUTINE visc_filt_dbcksc(partit, mesh) +USE MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP +USE o_ARRAYS +USE o_PARAM +USE g_CONFIG +USE g_comm_auto +USE g_support +USE uke_update_interface +IMPLICIT NONE + +real(kind=8) :: u1, v1, le(2), len, crosslen, vi, uke1 +integer :: nz, ed, el(2) +real(kind=8), allocatable :: U_c(:,:), V_c(:,:), UV_back(:,:,:), UV_dis(:,:,:), uke_d(:,:) +real(kind=8), allocatable :: uuu(:) +type(t_mesh), intent(in), target :: mesh +type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + ! An analog of harmonic viscosity operator. + ! It adds to the rhs(0) Visc*(u1+u2+u3-3*u0)/area + ! on triangles, which is Visc*Laplacian/4 on equilateral triangles. + ! The contribution from boundary edges is neglected (free slip). + ! Filter is applied twice. + +ed=myDim_elem2D+eDim_elem2D +allocate(U_c(nl-1,ed), V_c(nl-1, ed)) +allocate(UV_back(2,nl-1,ed), UV_dis(2,nl-1, ed)) +allocate(uke_d(nl-1,ed)) +allocate(uuu(ed)) + + U_c=0.0_8 + V_c=0.0_8 + UV_back=0.0_8 + UV_dis=0.0_8 + uke_d=0.0_8 + + DO ed=1, myDim_edge2D+eDim_edge2D + if(myList_edge2D(ed)>edge2D_in) cycle + el=edge_tri(:,ed) + DO nz=1,minval(nlevels(el))-1 + u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) + v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) + + U_c(nz,el(1))=U_c(nz,el(1))-u1 + U_c(nz,el(2))=U_c(nz,el(2))+u1 + V_c(nz,el(1))=V_c(nz,el(1))-v1 + V_c(nz,el(2))=V_c(nz,el(2))+v1 + END DO + END DO + + + Do ed=1,myDim_elem2D + len=sqrt(elem_area(ed)) + len=dt*len/30.0_8 + Do nz=1,nlevels(ed)-1 + ! vi has the sense of harmonic viscosity coefficient because of + ! the division by area in the end + ! ==== + ! Case 1 -- an analog to the third-order upwind (vi=|u|l/12) + ! ==== + vi=max(0.2_8,sqrt(UV(1,nz,ed)**2+UV(2,nz,ed)**2))*len + U_c(nz,ed)=-U_c(nz,ed)*vi + V_c(nz,ed)=-V_c(nz,ed)*vi + END DO + end do + + + call exchange_elem(U_c, partit) + call exchange_elem(V_c, partit) + + DO ed=1, myDim_edge2D+eDim_edge2D + if(myList_edge2D(ed)>edge2D_in) cycle + el=edge_tri(:,ed) + le=edge_dxdy(:,ed) + le(1)=le(1)*sum(elem_cos(el))*0.25_8 + len=sqrt(le(1)**2+le(2)**2)*r_earth + le(1)=edge_cross_dxdy(1,ed)-edge_cross_dxdy(3,ed) + le(2)=edge_cross_dxdy(2,ed)-edge_cross_dxdy(4,ed) + crosslen=sqrt(le(1)**2+le(2)**2) + DO nz=1,minval(nlevels(el))-1 + vi=dt*len*(v_back(nz,el(1))+v_back(nz,el(2)))/crosslen + !if(mype==0) write(*,*) 'vi ', vi , ' and ed' , ed + !if(mype==0) write(*,*) 'dt*len/crosslen ', dt*len/crosslen, ' and ed' , ed + !vi=max(vi,0.005*len*dt) ! This helps to reduce noise in places where + ! Visc is small and decoupling might happen + !Backscatter contribution + u1=(UV(1,nz,el(1))-UV(1,nz,el(2)))*vi + v1=(UV(2,nz,el(1))-UV(2,nz,el(2)))*vi + + !UKE diffusion + vi=dt*len*(K_back*sqrt(elem_area(el(1))/scale_area)+K_back*sqrt(elem_area(el(2))/scale_area))/crosslen + + uke1=(uke(nz,el(1))-uke(nz,el(2)))*vi + + + UV_back(1,nz,el(1))=UV_back(1,nz,el(1))-u1/elem_area(el(1)) + UV_back(1,nz,el(2))=UV_back(1,nz,el(2))+u1/elem_area(el(2)) + UV_back(2,nz,el(1))=UV_back(2,nz,el(1))-v1/elem_area(el(1)) + UV_back(2,nz,el(2))=UV_back(2,nz,el(2))+v1/elem_area(el(2)) + + !Correct scaling for the diffusion? + uke_d(nz,el(1))=uke_d(nz,el(1))-uke1/elem_area(el(1)) + uke_d(nz,el(2))=uke_d(nz,el(2))+uke1/elem_area(el(2)) + + + + !Biharmonic contribution + u1=(U_c(nz,el(1))-U_c(nz,el(2))) + v1=(V_c(nz,el(1))-V_c(nz,el(2))) + + UV_dis(1,nz,el(1))=UV_dis(1,nz,el(1))-u1/elem_area(el(1)) + UV_dis(1,nz,el(2))=UV_dis(1,nz,el(2))+u1/elem_area(el(2)) + UV_dis(2,nz,el(1))=UV_dis(2,nz,el(1))-v1/elem_area(el(1)) + UV_dis(2,nz,el(2))=UV_dis(2,nz,el(2))+v1/elem_area(el(2)) + + END DO + END DO + +call exchange_elem(UV_back, partit) + +DO nz=1, nl-1 + uuu=0.0_8 + uuu=UV_back(1,nz,:) + call smooth_elem(uuu,smooth_back_tend, partit, mesh) + UV_back(1,nz,:)=uuu + uuu=0.0_8 + uuu=UV_back(2,nz,:) + call smooth_elem(uuu,smooth_back_tend, partit, mesh) + UV_back(2,nz,:)=uuu +END DO + + DO ed=1, myDim_elem2D + DO nz=1,nlevels(ed)-1 + UV_rhs(1,nz,ed)=UV_rhs(1,nz,ed)+UV_dis(1,nz,ed)+UV_back(1,nz,ed) + UV_rhs(2,nz,ed)=UV_rhs(2,nz,ed)+UV_dis(2,nz,ed)+UV_back(2,nz,ed) + END DO + END DO + + UV_dis_tend=UV_dis!+UV_back + UV_total_tend=UV_dis+UV_back + UV_back_tend=UV_back + uke_dif=uke_d + + call uke_update(partit, mesh) + deallocate(V_c,U_c) + deallocate(UV_dis,UV_back) + deallocate(uke_d) + deallocate(uuu) + +end subroutine visc_filt_dbcksc +!=========================================================================== + +SUBROUTINE backscatter_coef(partit, mesh) +USE MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP +USE o_ARRAYS +USE o_PARAM +USE g_CONFIG +use g_comm_auto +IMPLICIT NONE +type(t_mesh), intent(in), target :: mesh +type(t_partit), intent(inout), target :: partit +integer :: elem, nz +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + +!Potentially add the Rossby number scaling to the script... +!check if sign is right! Different in the Jansen paper +!Also check with the normalization by area; as before we use element length sqrt(2*elem_area(ed)) + +v_back=0.0_8 +DO elem=1, myDim_elem2D + DO nz=1,nlevels(elem)-1 +!v_back(1,ed)=c_back*sqrt(2.0_WP*elem_area(ed))*sqrt(max(2.0_WP*uke(1,ed),0.0_WP))*(3600.0_WP*24.0_WP/tau_c)*4.0_WP/sqrt(2.0_WP*elem_area(ed))**2 !*sqrt(max(2.0_WP*uke(1,ed),0.0_WP)) +!v_back(nz,elem)=-c_back*sqrt(4._8/sqrt(3.0_8)*elem_area(elem))*sqrt(max(2.0_8*uke(nz,elem),0.0_8)) !Is the scaling correct +v_back(nz,elem)=min(-c_back*sqrt(elem_area(elem))*sqrt(max(2.0_8*uke(nz,elem),0.0_8)),0.2*elem_area(elem)/dt) !Is the scaling correct +!Scaling by sqrt(2*elem_area) or sqrt(elem_area)? + END DO +END DO + +call exchange_elem(v_back, partit) + +end subroutine backscatter_coef +!=========================================================================== + +SUBROUTINE uke_update(partit, mesh) +USE MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP +USE o_ARRAYS +USE o_PARAM +USE g_CONFIG +use g_comm_auto +USE g_support +USE g_rotate_grid +IMPLICIT NONE + +!I had to change uke(:) to uke(:,:) to make output and restart work!! + +!Why is it necessary to implement the length of the array? It doesn't work without! +!integer, intent(in) :: t_levels +type(t_mesh), intent(in), target :: mesh +type(t_partit), intent(inout), target :: partit +real(kind=8) :: hall, h1_eta, hnz, vol +integer :: elnodes(3), nz, ed, edi, node, j, elem, q +real(kind=8), allocatable :: uuu(:), work_array(:), U_work(:,:), V_work(:,:), rosb_array(:,:), work_uv(:) +integer :: kk, nzmax, el +real(kind=8) :: c1, rosb, vel_u, vel_v, vel_uv, scaling, reso +real*8 :: c_min=0.5, f_min=1.e-6, r_max=200000., ex, ey, a1, a2, len_reg, dist_reg(2) ! Are those values still correct? +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + +!rosb_dis=1._8 !Should be variable to control how much of the dissipated energy is backscattered +!rossby_num=2 + +ed=myDim_elem2D+eDim_elem2D +allocate(uuu(ed)) + +uke_back=0.0_8 +uke_dis=0.0_8 +DO ed=1, myDim_elem2D +DO nz=1, nlevels(ed)-1 + uke_dis(nz,ed)=(UV(1,nz,ed)*UV_dis_tend(1,nz,ed)+UV(2,nz,ed)*UV_dis_tend(2,nz,ed)) + uke_back(nz,ed)=(UV(1,nz,ed)*UV_back_tend(1,nz,ed)+UV(2,nz,ed)*UV_back_tend(2,nz,ed)) +END DO +END DO + +DO nz=1,nl-1 + uuu=0.0_8 + uuu=uke_back(nz,:) + call smooth_elem(uuu,smooth_back, partit, mesh) !3) ? + uke_back(nz,:)=uuu +END DO + + + +!Timestepping use simple backward timestepping; all components should have dt in it, unless they need it twice +!Amplitudes should be right given the correction of the viscosities; check for all, also for biharmonic +!uke(1,ed)=uke(1,ed)-uke_dis(1,ed)-uke_back(1,ed)+uke_dif(1,ed) +ed=myDim_elem2D+eDim_elem2D +allocate(U_work(nl-1,myDim_nod2D+eDim_nod2D),V_work(nl-1,myDim_nod2D+eDim_nod2D)) +allocate(work_uv(myDim_nod2D+eDim_nod2D)) +allocate(rosb_array(nl-1,ed)) +call exchange_elem(UV, partit) +rosb_array=0._8 +DO nz=1, nl-1 + work_uv=0._WP + DO node=1, myDim_nod2D + vol=0._WP + U_work(nz,node)=0._WP + V_work(nz,node)=0._WP + DO j=1, nod_in_elem2D_num(node) + elem=nod_in_elem2D(j, node) + U_work(nz,node)=U_work(nz,node)+UV(1,nz,elem)*elem_area(elem) + V_work(nz,node)=V_work(nz,node)+UV(2,nz,elem)*elem_area(elem) + vol=vol+elem_area(elem) + END DO + U_work(nz,node)=U_work(nz,node)/vol + V_work(nz,node)=U_work(nz,node)/vol + END DO + work_uv=U_work(nz,:) + call exchange_nod(work_uv, partit) + U_work(nz,:)=work_uv + work_uv=V_work(nz,:) + call exchange_nod(work_uv, partit) + V_work(nz,:)=work_uv +END DO + + DO el=1,myDim_elem2D + DO nz=1, nlevels(el)-1 + rosb_array(nz,el)=sqrt((sum(gradient_sca(1:3,el)*U_work(nz,elem2D_nodes(1:3,el)))-& + sum(gradient_sca(4:6, el)*V_work(nz,elem2D_nodes(1:3,el))))**2+& + (sum(gradient_sca(4:6, el)*U_work(nz,elem2D_nodes(1:3,el)))+& + sum(gradient_sca(1:3, el)*V_work(nz,elem2D_nodes(1:3,el))))**2) +! hall=hall+hnz + END DO +! rosb_array(el)=rosb_array(el)/hall + END DO +DO ed=1, myDim_elem2D + scaling=1._WP + IF(uke_scaling) then + reso=sqrt(elem_area(ed)*4._wp/sqrt(3._wp)) + rosb=0._wp + elnodes=elem2D_nodes(:, ed) + DO kk=1,3 + c1=0._wp + nzmax=minval(nlevels(nod_in_elem2D(1:nod_in_elem2D_num(elnodes(kk)), elnodes(kk))), 1) + !Vertical average; same scaling in the vertical + DO nz=1, nzmax-1 + c1=c1+hnode_new(nz,elnodes(kk))*(sqrt(max(bvfreq(nz,elnodes(kk)), 0._WP))+sqrt(max(bvfreq(nz+1,elnodes(kk)), 0._WP)))/2. + END DO + c1=max(c_min, c1/pi) !ca. first baroclinic gravity wave speed limited from below by c_min + !Cutoff K_GM depending on (Resolution/Rossby radius) ratio + rosb=rosb+min(c1/max(abs(coriolis_node(elnodes(kk))), f_min), r_max) + END DO + rosb=rosb/3._8 + scaling=1._WP/(1._WP+(uke_scaling_factor*reso/rosb))!(4._wp*reso/rosb)) + END IF + + DO nz=1, nlevels(ed)-1 + elnodes=elem2D_nodes(:,ed) + + !Taking out that one place where it is always weird (Pacific Southern Ocean) + !Should not really be used later on, once we fix the issue with the 1/4 degree grid + if(.not. (TRIM(which_toy)=="soufflet")) then + call elem_center(ed, ex, ey) + !a1=-104.*rad + !a2=-49.*rad + call g2r(-104.*rad, -49.*rad, a1, a2) + dist_reg(1)=ex-a1 + dist_reg(2)=ey-a2 + call trim_cyclic(dist_reg(1)) + dist_reg(1)=dist_reg(1)*elem_cos(ed) + dist_reg=dist_reg*r_earth + len_reg=sqrt(dist_reg(1)**2+dist_reg(2)**2) + + + !if(mype==0) write(*,*) 'len_reg ', len_reg , ' and dist_reg' , dist_reg, ' and ex, ey', ex, ey, ' and a ', a1, a2 + rosb_array(nz,ed)=rosb_array(nz,ed)/max(abs(sum(coriolis_node(elnodes(:)))), f_min) + !uke_dif(nz, ed)=scaling*(1-exp(-len_reg/300000))*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)!UV_dif(1,ed) + uke_dis(nz,ed)=scaling*(1-exp(-len_reg/300000))*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)*uke_dis(nz,ed) + else + rosb_array(nz,ed)=rosb_array(nz,ed)/max(abs(sum(coriolis_node(elnodes(:)))), f_min) + !uke_dif(nz, ed)=scaling*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)!UV_dif(1,ed) + uke_dis(nz,ed)=scaling*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)*uke_dis(nz,ed) + end if + + END DO +END DO +deallocate(U_work, V_work) +deallocate(rosb_array) +deallocate(work_uv) +call exchange_elem(uke_dis, partit) +DO nz=1, nl-1 + uuu=uke_dis(nz,:) + call smooth_elem(uuu,smooth_dis, partit, mesh) + uke_dis(nz,:)=uuu +END DO +DO ed=1, myDim_elem2D + DO nz=1,nlevels(ed)-1 + uke_rhs_old(nz,ed)=uke_rhs(nz,ed) + uke_rhs(nz,ed)=-uke_dis(nz,ed)-uke_back(nz,ed)+uke_dif(nz,ed) + uke(nz,ed)=uke(nz,ed)+1.5_8*uke_rhs(nz,ed)-0.5_8*uke_rhs_old(nz,ed) + END DO +END DO +call exchange_elem(uke, partit) + +deallocate(uuu) +end subroutine uke_update +! =================================================================== diff --git a/src/oce_fer_gm.F90 b/src/oce_fer_gm.F90 index 7db79c91e..ab12e49ae 100644 --- a/src/oce_fer_gm.F90 +++ b/src/oce_fer_gm.F90 @@ -1,35 +1,3 @@ -module fer_solve_interface - interface - subroutine fer_solve_Gamma(partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - type(t_mesh) , intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit - end subroutine - - subroutine fer_gamma2vel(dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_DYN - type(t_mesh) , intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_dyn) , intent(inout), target :: dynamics - end subroutine - - subroutine init_Redi_GM(partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - type(t_mesh) , intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit - end subroutine - end interface -end module - - - !--------------------------------------------------------------------------- !Implementation of Gent & McWiliams parameterization after R. Ferrari et al., 2010 !Contains: @@ -159,13 +127,12 @@ END subroutine fer_solve_Gamma ! ! !==================================================================== -subroutine fer_gamma2vel(dynamics, partit, mesh) +subroutine fer_gamma2vel(partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN USE o_PARAM - USE o_ARRAYS, ONLY: fer_gamma + USE o_ARRAYS, ONLY: fer_gamma, fer_uv USE g_CONFIG use g_comm_auto IMPLICIT NONE @@ -173,18 +140,14 @@ subroutine fer_gamma2vel(dynamics, partit, mesh) integer :: nz, nzmax, el, elnod(3), nzmin real(kind=WP) :: zinv real(kind=WP) :: onethird=1._WP/3._WP - type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in), target :: mesh - real(kind=WP), dimension(:,:,:), pointer :: fer_UV - real(kind=WP), dimension(:,:) , pointer :: fer_Wvel + type(t_mesh), intent(in), target :: mesh + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - fer_UV =>dynamics%fer_uv(:,:,:) - fer_Wvel =>dynamics%fer_w(:,:) - + DO el=1, myDim_elem2D elnod=elem2D_nodes(:,el) ! max. number of levels at element el diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index 1a0c078a0..3576ef01f 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -23,7 +23,14 @@ MODULE o_PARAM real(kind=WP) :: C_d= 0.0025_WP ! Bottom drag coefficient real(kind=WP) :: kappa=0.4 !von Karman's constant real(kind=WP) :: mix_coeff_PP=0.01_WP ! mixing coef for PP scheme +real(kind=WP) :: gamma0=0.01! [m/s], gamma0*len*dt is the background viscosity +real(kind=WP) :: gamma1=0.1! [non dim.], or computation of the flow aware viscosity +real(kind=WP) :: gamma2=10.! [s/m], is only used in easy backscatter option +real(kind=WP) :: Div_c =1.0_WP !modified Leith viscosity weight +real(kind=WP) :: Leith_c=1.0_WP !Leith viscosity weight. It needs vorticity! +real(kind=WP) :: easy_bs_return=1.0 !backscatter option only (how much to return) real(kind=WP) :: A_ver=0.001_WP ! Vertical harm. visc. +integer :: visc_option=5 logical :: uke_scaling=.true. real(kind=WP) :: uke_scaling_factor=1._WP real(kind=WP) :: rosb_dis=1._WP @@ -74,6 +81,9 @@ MODULE o_PARAM ! elevation and divergence real(kind=WP) :: epsilon=0.1_WP ! AB2 offset ! Tracers +logical :: i_vert_visc= .true. +logical :: w_split =.false. +real(kind=WP) :: w_max_cfl=1.e-5_WP logical :: SPP=.false. @@ -89,9 +99,9 @@ MODULE o_PARAM ! Momentum -!!PS logical :: free_slip=.false. -!!PS ! false=no slip -!!PS integer :: mom_adv=2 +logical :: free_slip=.false. + ! false=no slip +integer :: mom_adv=2 ! 1 vector control volumes, p1 velocities ! 2 scalar control volumes ! 3 vector invariant @@ -151,11 +161,11 @@ MODULE o_PARAM character(20) :: which_pgf='shchepetkin' - NAMELIST /oce_dyn/ state_equation, C_d, A_ver, & - scale_area, SPP,& + NAMELIST /oce_dyn/ state_equation, C_d, A_ver, gamma0, gamma1, gamma2, Leith_c, Div_c, easy_bs_return, & + scale_area, mom_adv, free_slip, i_vert_visc, w_split, w_max_cfl, SPP,& Fer_GM, K_GM_max, K_GM_min, K_GM_bvref, K_GM_resscalorder, K_GM_rampmax, K_GM_rampmin, & scaling_Ferreira, scaling_Rossby, scaling_resolution, scaling_FESOM14, & - Redi, visc_sh_limit, mix_scheme, Ricr, concv, which_pgf, alpha, theta, use_density_ref, & + Redi, visc_sh_limit, mix_scheme, Ricr, concv, which_pgf, visc_option, alpha, theta, use_density_ref, & K_back, c_back, uke_scaling, uke_scaling_factor, smooth_back, smooth_dis, & smooth_back_tend, rosb_dis @@ -172,11 +182,16 @@ MODULE o_ARRAYS USE o_PARAM IMPLICIT NONE ! Arrays are described in subroutine array_setup +real(kind=WP), allocatable, target :: Wvel(:,:), Wvel_e(:,:), Wvel_i(:,:) +real(kind=WP), allocatable :: UV(:,:,:) +real(kind=WP), allocatable :: UV_rhs(:,:,:), UV_rhsAB(:,:,:) real(kind=WP), allocatable :: uke(:,:), v_back(:,:), uke_back(:,:), uke_dis(:,:), uke_dif(:,:) real(kind=WP), allocatable :: uke_rhs(:,:), uke_rhs_old(:,:) real(kind=WP), allocatable :: UV_dis_tend(:,:,:), UV_back_tend(:,:,:), UV_total_tend(:,:,:), UV_dis_tend_node(:,:,:) real(kind=WP), allocatable :: UV_dis_posdef_b2(:,:), UV_dis_posdef(:,:), UV_back_posdef(:,:) -real(kind=WP), allocatable :: hpressure(:,:) +real(kind=WP), allocatable :: eta_n(:), d_eta(:) +real(kind=WP), allocatable :: ssh_rhs(:), hpressure(:,:) +real(kind=WP), allocatable :: CFL_z(:,:) real(kind=WP), allocatable :: stress_surf(:,:) real(kind=WP), allocatable :: stress_node_surf(:,:) REAL(kind=WP), ALLOCATABLE :: stress_atmoce_x(:) @@ -186,7 +201,7 @@ MODULE o_ARRAYS real(kind=WP), allocatable :: water_flux(:), Ssurf(:) real(kind=WP), allocatable :: virtual_salt(:), relax_salt(:) real(kind=WP), allocatable :: Tclim(:,:), Sclim(:,:) -!!PS real(kind=WP), allocatable :: Visc(:,:) +real(kind=WP), allocatable :: Visc(:,:) real(kind=WP), allocatable :: Tsurf_t(:,:), Ssurf_t(:,:) real(kind=WP), allocatable :: tau_x_t(:,:), tau_y_t(:,:) real(kind=WP), allocatable :: heat_flux_t(:,:), heat_rel_t(:,:), heat_rel(:) @@ -200,12 +215,15 @@ MODULE o_ARRAYS real(kind=WP), allocatable :: tr_xy(:,:,:) real(kind=WP), allocatable :: tr_z(:,:) +! Auxiliary arrays for vector-invariant form of momentum advection +real(kind=WP), allocatable,dimension(:,:) :: vorticity + !Viscosity and diff coefs real(kind=WP), allocatable,dimension(:,:) :: Av,Kv real(kind=WP), allocatable,dimension(:,:,:) :: Kv_double real(kind=WP), allocatable,dimension(:) :: Kv0 !Velocities interpolated to nodes -!!PS real(kind=WP), allocatable,dimension(:,:,:) :: Unode +real(kind=WP), allocatable,dimension(:,:,:) :: Unode ! Auxiliary arrays to store Redi-GM fields real(kind=WP), allocatable,dimension(:,:,:) :: neutral_slope @@ -241,6 +259,7 @@ MODULE o_ARRAYS !GM_stuff real(kind=WP),allocatable :: bvfreq(:,:),mixlay_dep(:),bv_ref(:) +real(kind=WP), allocatable :: fer_UV(:,:,:), fer_wvel(:,:) real(kind=WP), target, allocatable :: fer_c(:), fer_scal(:), fer_K(:,:), fer_gamma(:,:,:) real(kind=WP), allocatable :: ice_rejected_salt(:) diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 8309a1ca6..2842b69f0 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -11,7 +11,6 @@ subroutine oce_initial_state(tracers, partit, mesh) end subroutine end interface end module - module tracer_init_interface interface subroutine tracer_init(tracers, partit, mesh) @@ -25,58 +24,38 @@ subroutine tracer_init(tracers, partit, mesh) end subroutine end interface end module - -module dynamics_init_interface - interface - subroutine dynamics_init(dynamics, partit, mesh) - USE MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - use MOD_DYN - type(t_mesh) , intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_dyn) , intent(inout), target :: dynamics - end subroutine - end interface -end module - module ocean_setup_interface interface - subroutine ocean_setup(dynamics, tracers, partit, mesh) + subroutine ocean_setup(tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use mod_tracer - use MOD_DYN type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers - type(t_dyn), intent(inout), target :: dynamics end subroutine end interface end module module before_oce_step_interface interface - subroutine before_oce_step(dynamics, tracers, partit, mesh) + subroutine before_oce_step(tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use mod_tracer - use MOD_DYN type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers - type(t_dyn), intent(inout), target :: dynamics end subroutine end interface end module !_______________________________________________________________________________ -subroutine ocean_setup(dynamics, tracers, partit, mesh) +subroutine ocean_setup(tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP USE MOD_TRACER -USE MOD_DYN USE o_PARAM USE o_ARRAYS USE g_config @@ -86,17 +65,13 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) use g_cvmix_pp use g_cvmix_kpp use g_cvmix_tidal -use g_backscatter use Toy_Channel_Soufflet use oce_initial_state_interface use oce_adv_tra_fct_interfaces -use init_ale_interface -use init_thickness_ale_interface IMPLICIT NONE type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers -type(t_dyn), intent(inout), target :: dynamics integer :: n !___setup virt_salt_flux____________________________________________________ ! if the ale thinkness remain unchanged (like in 'linfs' case) the vitrual @@ -120,10 +95,7 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) write(*,*) ' --> initialise ALE arrays + sparse SSH stiff matrix' write(*,*) end if - - if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_ale'//achar(27)//'[0m' - call init_ale(dynamics, partit, mesh) - if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_stiff_mat_ale'//achar(27)//'[0m' + call init_ale(partit, mesh) call init_stiff_mat_ale(partit, mesh) !!PS test !___________________________________________________________________________ @@ -151,24 +123,20 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) ! initialise fesom1.4 like KPP if (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) then - if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call oce_mixing_kpp_init'//achar(27)//'[0m' call oce_mixing_kpp_init(partit, mesh) ! initialise fesom1.4 like PP elseif (mix_scheme_nmb==2 .or. mix_scheme_nmb==27) then ! initialise cvmix_KPP elseif (mix_scheme_nmb==3 .or. mix_scheme_nmb==37) then - if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_cvmix_kpp'//achar(27)//'[0m' call init_cvmix_kpp(partit, mesh) ! initialise cvmix_PP elseif (mix_scheme_nmb==4 .or. mix_scheme_nmb==47) then - if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_cvmix_pp'//achar(27)//'[0m' call init_cvmix_pp(partit, mesh) ! initialise cvmix_TKE elseif (mix_scheme_nmb==5 .or. mix_scheme_nmb==56) then - if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_cvmix_tke'//achar(27)//'[0m' call init_cvmix_tke(partit, mesh) endif @@ -176,14 +144,12 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) ! initialise additional mixing cvmix_IDEMIX --> only in combination with ! cvmix_TKE+cvmix_IDEMIX or stand alone for debbuging as cvmix_TKE if (mod(mix_scheme_nmb,10)==6) then - if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_cvmix_idemix'//achar(27)//'[0m' call init_cvmix_idemix(partit, mesh) ! initialise additional mixing cvmix_TIDAL --> only in combination with ! KPP+cvmix_TIDAL, PP+cvmix_TIDAL, cvmix_KPP+cvmix_TIDAL, cvmix_PP+cvmix_TIDAL ! or stand alone for debbuging as cvmix_TIDAL elseif (mod(mix_scheme_nmb,10)==7) then - if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_cvmix_tidal'//achar(27)//'[0m' call init_cvmix_tidal(partit, mesh) end if @@ -204,7 +170,7 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) if(partit%mype==0) write(*,*) 'Arrays are set' !if(open_boundary) call set_open_boundary !TODO - if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call oce_adv_tra_fct_init'//achar(27)//'[0m' + call oce_adv_tra_fct_init(tracers%work, partit, mesh) call muscl_adv_init(tracers%work, partit, mesh) !!PS test !===================== @@ -214,11 +180,10 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) if (toy_ocean) then SELECT CASE (TRIM(which_toy)) CASE ("soufflet") !forcing update for soufflet testcase - if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call toy_channel'//achar(27)//'[0m' if (mod(mstep, soufflet_forc_update)==0) then - call initial_state_soufflet(dynamics, tracers, partit, mesh) + call initial_state_soufflet(tracers, partit, mesh) call compute_zonal_mean_ini(partit, mesh) - call compute_zonal_mean(dynamics, tracers, partit, mesh) + call compute_zonal_mean(tracers, partit, mesh) end if END SELECT else @@ -238,20 +203,14 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) write(*,*) ' --> call init_thickness_ale' write(*,*) end if - if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_thickness_ale'//achar(27)//'[0m' - call init_thickness_ale(dynamics, partit, mesh) - - !___________________________________________________________________________ - ! initialise arrays that are needed for backscatter_coef - if(dynamics%opt_visc==8) call init_backscatter(partit, mesh) - + call init_thickness_ale(partit, mesh) !___________________________________________________________________________ if(partit%mype==0) write(*,*) 'Initial state' - if (dynamics%use_wsplit .and. partit%mype==0) then + if (w_split .and. partit%mype==0) then write(*,*) '******************************************************************************' write(*,*) 'vertical velocity will be split onto explicit and implicit constitutes;' - write(*,*) 'maximum allowed CDF on explicit W is set to: ', dynamics%wsplit_maxcfl + write(*,*) 'maximum allowed CDF on explicit W is set to: ', w_max_cfl write(*,*) '******************************************************************************' end if end subroutine ocean_setup @@ -354,132 +313,6 @@ END SUBROUTINE tracer_init ! ! !_______________________________________________________________________________ -SUBROUTINE dynamics_init(dynamics, partit, mesh) - USE MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_DYN - USE o_param - IMPLICIT NONE - integer :: elem_size, node_size - integer, save :: nm_unit = 105 ! unit to open namelist file, skip 100-102 for cray - integer :: iost - - integer :: opt_visc - real(kind=WP) :: visc_gamma0, visc_gamma1, visc_gamma2 - real(kind=WP) :: visc_easybsreturn - logical :: use_ivertvisc - integer :: momadv_opt - logical :: use_freeslip - logical :: use_wsplit - real(kind=WP) :: wsplit_maxcfl - - type(t_mesh) , intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_dyn) , intent(inout), target :: dynamics - - ! define dynamics namelist parameter - namelist /dynamics_visc / opt_visc, visc_gamma0, visc_gamma1, visc_gamma2, & - use_ivertvisc, visc_easybsreturn - namelist /dynamics_general/ momadv_opt, use_freeslip, use_wsplit, wsplit_maxcfl - -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" - - ! open and read namelist for I/O - open(unit=nm_unit, file='namelist.dyn', form='formatted', access='sequential', status='old', iostat=iost ) - if (iost == 0) then - if (mype==0) write(*,*) ' file : ', 'namelist.dyn',' open ok' - else - if (mype==0) write(*,*) 'ERROR: --> bad opening file : ', 'namelist.dyn',' ; iostat=',iost - call par_ex(partit%MPI_COMM_FESOM, partit%mype) - stop - end if - read(nm_unit, nml=dynamics_visc, iostat=iost) - read(nm_unit, nml=dynamics_general, iostat=iost) - close(nm_unit) - - !___________________________________________________________________________ - ! set parameters in derived type - dynamics%opt_visc = opt_visc - dynamics%visc_gamma0 = visc_gamma0 - dynamics%visc_gamma1 = visc_gamma1 - dynamics%visc_gamma2 = visc_gamma2 - dynamics%visc_easybsreturn = visc_easybsreturn - dynamics%use_ivertvisc = use_ivertvisc - dynamics%momadv_opt = momadv_opt - dynamics%use_freeslip = use_freeslip - dynamics%use_wsplit = use_wsplit - dynamics%wsplit_maxcfl = wsplit_maxcfl - - !___________________________________________________________________________ - ! define local vertice & elem array size - elem_size=myDim_elem2D+eDim_elem2D - node_size=myDim_nod2D+eDim_nod2D - - !___________________________________________________________________________ - ! allocate/initialise horizontal velocity arrays in derived type - allocate(dynamics%uv( 2, nl-1, elem_size)) - allocate(dynamics%uv_rhs( 2, nl-1, elem_size)) - allocate(dynamics%uv_rhsAB( 2, nl-1, elem_size)) - allocate(dynamics%uvnode( 2, nl-1, node_size)) - dynamics%uv = 0.0_WP - dynamics%uv_rhs = 0.0_WP - dynamics%uv_rhsAB = 0.0_WP - dynamics%uvnode = 0.0_WP - if (Fer_GM) then - allocate(dynamics%fer_uv(2, nl-1, elem_size)) - dynamics%fer_uv = 0.0_WP - end if - - !___________________________________________________________________________ - ! allocate/initialise vertical velocity arrays in derived type - allocate(dynamics%w( nl, node_size)) - allocate(dynamics%w_e( nl, node_size)) - allocate(dynamics%w_i( nl, node_size)) - allocate(dynamics%cfl_z( nl, node_size)) - dynamics%w = 0.0_WP - dynamics%w_e = 0.0_WP - dynamics%w_i = 0.0_WP - dynamics%cfl_z = 0.0_WP - if (Fer_GM) then - allocate(dynamics%fer_w( nl, node_size)) - dynamics%fer_w = 0.0_WP - end if - - !___________________________________________________________________________ - ! allocate/initialise ssh arrays in derived type - allocate(dynamics%eta_n( node_size)) - allocate(dynamics%d_eta( node_size)) - allocate(dynamics%ssh_rhs( node_size)) - dynamics%eta_n = 0.0_WP - dynamics%d_eta = 0.0_WP - dynamics%ssh_rhs = 0.0_WP - !!PS allocate(dynamics%ssh_rhs_old(node_size)) - !!PS dynamics%ssh_rhs_old= 0.0_WP - - !___________________________________________________________________________ - ! inititalise working arrays - allocate(dynamics%work%uvnode_rhs(2, nl-1, node_size)) - allocate(dynamics%work%u_c(nl-1, elem_size)) - allocate(dynamics%work%v_c(nl-1, elem_size)) - dynamics%work%uvnode_rhs = 0.0_WP - dynamics%work%u_c = 0.0_WP - dynamics%work%v_c = 0.0_WP - if (dynamics%opt_visc==5) then - allocate(dynamics%work%u_b(nl-1, elem_size)) - allocate(dynamics%work%v_b(nl-1, elem_size)) - dynamics%work%u_b = 0.0_WP - dynamics%work%v_b = 0.0_WP - end if - - -END SUBROUTINE dynamics_init -! -! -!_______________________________________________________________________________ SUBROUTINE arrays_init(num_tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT @@ -512,11 +345,15 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) ! Velocities ! ================ !allocate(stress_diag(2, elem_size))!delete me -!!PS allocate(Visc(nl-1, elem_size)) +allocate(UV(2, nl-1, elem_size)) +allocate(UV_rhs(2,nl-1, elem_size)) +allocate(UV_rhsAB(2,nl-1, elem_size)) +allocate(Visc(nl-1, elem_size)) ! ================ ! elevation and its rhs ! ================ - +allocate(eta_n(node_size), d_eta(node_size)) +allocate(ssh_rhs(node_size)) ! ================ ! Monin-Obukhov ! ================ @@ -525,7 +362,9 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) ! ================ ! Vertical velocity and pressure ! ================ -allocate( hpressure(nl,node_size)) +allocate(Wvel(nl, node_size), hpressure(nl,node_size)) +allocate(Wvel_e(nl, node_size), Wvel_i(nl, node_size)) +allocate(CFL_z(nl, node_size)) ! vertical CFL criteria allocate(bvfreq(nl,node_size),mixlay_dep(node_size),bv_ref(node_size)) ! ================ ! Ocean forcing arrays @@ -548,6 +387,14 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) allocate(Tsurf_t(node_size,2), Ssurf_t(node_size,2)) allocate(tau_x_t(node_size,2), tau_y_t(node_size,2)) +! ================= +! All auxiliary arrays +! ================= + +!if(mom_adv==3) then +allocate(vorticity(nl-1,node_size)) +vorticity=0.0_WP +!end if ! ================= ! Visc and Diff coefs @@ -563,6 +410,35 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) !!PS call oce_mixing_kpp_init ! Setup constants, allocate arrays and construct look up table end if +! ================= +! Backscatter arrays +! ================= + +if(visc_option==8) then + +allocate(uke(nl-1,elem_size)) ! Unresolved kinetic energy for backscatter coefficient +allocate(v_back(nl-1,elem_size)) ! Backscatter viscosity +allocate(uke_dis(nl-1,elem_size), uke_back(nl-1,elem_size)) +allocate(uke_dif(nl-1,elem_size)) +allocate(uke_rhs(nl-1,elem_size), uke_rhs_old(nl-1,elem_size)) +allocate(UV_dis_tend(2,nl-1,elem_size), UV_back_tend(2,nl-1,elem_size)) +allocate(UV_total_tend(2,nl-1,elem_size)) + +uke=0.0_8 +v_back=0.0_8 +uke_dis=0.0_8 +uke_dif=0.0_8 +uke_back=0.0_8 +uke_rhs=0.0_8 +uke_rhs_old=0.0_8 +UV_dis_tend=0.0_8 +UV_back_tend=0.0_8 +UV_total_tend=0.0_8 +end if + +!Velocities at nodes +allocate(Unode(2,nl-1,node_size)) + ! tracer gradients & RHS allocate(ttrhs(nl-1,node_size)) allocate(tr_xy(2,nl-1,myDim_elem2D+eDim_elem2D+eXDim_elem2D)) @@ -600,7 +476,10 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) if (Fer_GM) then allocate(fer_c(node_size),fer_scal(node_size), fer_gamma(2, nl, node_size), fer_K(nl, node_size)) + allocate(fer_wvel(nl, node_size), fer_UV(2, nl-1, elem_size)) fer_gamma=0.0_WP + fer_uv=0.0_WP + fer_wvel=0.0_WP fer_K=500._WP fer_c=1._WP fer_scal = 0.0_WP @@ -615,6 +494,17 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) ! Initialize with zeros ! ================= + UV=0.0_WP + UV_rhs=0.0_WP + UV_rhsAB=0.0_WP +! + eta_n=0.0_WP + d_eta=0.0_WP + ssh_rhs=0.0_WP + Wvel=0.0_WP + Wvel_e =0.0_WP + Wvel_i =0.0_WP + CFL_z =0.0_WP hpressure=0.0_WP ! heat_flux=0.0_WP @@ -845,12 +735,11 @@ end subroutine oce_initial_state ! !========================================================================== ! Here we do things (if applicable) before the ocean timestep will be made -SUBROUTINE before_oce_step(dynamics, tracers, partit, mesh) +SUBROUTINE before_oce_step(tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP USE MOD_TRACER - USE MOD_DYN USE o_ARRAYS USE g_config USE Toy_Channel_Soufflet @@ -860,7 +749,6 @@ SUBROUTINE before_oce_step(dynamics, tracers, partit, mesh) type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers - type(t_dyn), intent(inout), target :: dynamics #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -871,7 +759,7 @@ SUBROUTINE before_oce_step(dynamics, tracers, partit, mesh) SELECT CASE (TRIM(which_toy)) CASE ("soufflet") !forcing update for soufflet testcase if (mod(mstep, soufflet_forc_update)==0) then - call compute_zonal_mean(dynamics, tracers, partit, mesh) + call compute_zonal_mean(tracers, partit, mesh) end if END SELECT end if diff --git a/src/oce_vel_rhs_vinv.F90 b/src/oce_vel_rhs_vinv.F90 new file mode 100755 index 000000000..b81ccf727 --- /dev/null +++ b/src/oce_vel_rhs_vinv.F90 @@ -0,0 +1,335 @@ +module relative_vorticity_interface + interface + subroutine relative_vorticity(partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + end subroutine + end interface +end module + +! Vector invariant momentum advection: +! (curl u+f)\times u+grad(u^2/2)+w du/dz +! +! =================================================================== +subroutine relative_vorticity(partit, mesh) + USE o_ARRAYS + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use g_comm_auto + IMPLICIT NONE + integer :: n, nz, el(2), enodes(2), nl1, nl2, edge, ul1, ul2, nl12, ul12 + real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2, c1 + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + !!PS DO n=1,myDim_nod2D + !!PS nl1 = nlevels_nod2D(n)-1 + !!PS ul1 = ulevels_nod2D(n) + !!PS vorticity(ul1:nl1,n)=0.0_WP + !!PS !!PS DO nz=1, nlevels_nod2D(n)-1 + !!PS !!PS vorticity(nz,n)=0.0_WP + !!PS !!PS END DO + !!PS END DO + vorticity(:,1:myDim_nod2D) = 0.0_WP + DO edge=1,myDim_edge2D + !! edge=myList_edge2D(m) + enodes=edges(:,edge) + el=edge_tri(:,edge) + nl1=nlevels(el(1))-1 + ul1=ulevels(el(1)) + deltaX1=edge_cross_dxdy(1,edge) + deltaY1=edge_cross_dxdy(2,edge) + nl2=0 + ul2=0 + if(el(2)>0) then + deltaX2=edge_cross_dxdy(3,edge) + deltaY2=edge_cross_dxdy(4,edge) + nl2=nlevels(el(2))-1 + ul2=ulevels(el(2)) + end if + nl12 = min(nl1,nl2) + ul12 = max(ul1,ul2) + + DO nz=ul1,ul12-1 + c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1)) + vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 + vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 + END DO + if (ul2>0) then + DO nz=ul2,ul12-1 + c1= -deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) + vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 + vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 + END DO + endif + !!PS DO nz=1,min(nl1,nl2) + DO nz=ul12,nl12 + c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1))- & + deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) + vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 + vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 + END DO + !!PS DO nz=min(nl1,nl2)+1,nl1 + DO nz=nl12+1,nl1 + c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1)) + vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 + vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 + END DO + !!PS DO nz=min(nl1,nl2)+1,nl2 + DO nz=nl12+1,nl2 + c1= -deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) + vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 + vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 + END DO + END DO + + ! vorticity = vorticity*area at this stage + ! It is correct only on myDim nodes + DO n=1,myDim_nod2D + !! n=myList_nod2D(m) + ul1 = ulevels_nod2D(n) + nl1 = nlevels_nod2D(n) + !!PS DO nz=1,nlevels_nod2D(n)-1 + DO nz=ul1,nl1-1 + vorticity(nz,n)=vorticity(nz,n)/areasvol(nz,n) + END DO + END DO + + call exchange_nod(vorticity, partit) + +! Now it the relative vorticity known on neighbors too +end subroutine relative_vorticity +! ========================================================================== +subroutine compute_vel_rhs_vinv(partit, mesh) !vector invariant + USE o_PARAM + USE o_ARRAYS + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE g_CONFIG + use g_comm_auto + use relative_vorticity_interface + IMPLICIT NONE + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: n, n1, nz, elem, elnodes(3), nl1, j, nzmin,nzmax + real(kind=WP) :: a, b, c, da, db, dc, dg, ff(3), gg, eta(3), pre(3), Fx, Fy,w + real(kind=WP) :: uvert(mesh%nl,2), umean, vmean, friction + logical, save :: lfirst=.true. + real(kind=WP) :: KE_node(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP) :: dZ_inv(2:mesh%nl-1), dzbar_inv(mesh%nl-1), elem_area_inv + real(kind=WP) :: density0_inv = 1./density_0 + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + w = 0.0_WP + + uvert=0.0_WP + + ! ====================== + ! Kinetic energy at nodes: + ! ====================== + + + KE_node(:,:)=0.0_WP + + DO elem=1, myDim_elem2D + !! elem=myList_elem2D(m) + elnodes=elem2D_nodes(:,elem) + nzmin = ulevels(elem) + nzmax = nlevels(elem) + DO j=1,3 !NR interchange loops => nz-loop vectorizes + !!PS DO nz=1,nlevels(elem)-1 + DO nz=nzmin,nzmax-1 + KE_node(nz,elnodes(j)) = KE_node(nz,elnodes(j))+(UV(1,nz,elem)*UV(1,nz,elem) & + +UV(2,nz,elem)*UV(2,nz,elem))*elem_area(elem) !NR/6.0_WP below + END DO + END DO + END DO + + DO n=1,myDim_nod2D + !! n=myList_nod2D(m) + nzmin = ulevels_nod2D(n) + nzmax = nlevels_nod2D(n) + !!PS DO nz=1, nlevels_nod2D(n)-1 + DO nz=nzmin, nzmax-1 + !DO nz=1, nl-1 + KE_node(nz,n)=KE_node(nz,n)/(6._WP*areasvol(nz,n)) !NR divide by 6 here + END DO + END DO + + ! Set the kinetic energy to zero at lateral walls: + DO n=1,myDim_edge2D + !! n=myList_edge2D(m) + if(myList_edge2D(n) > edge2D_in) then + elnodes(1:2)=edges(:,n) + KE_node(:,elnodes(1:2))=0.0_WP + endif + end DO + + call exchange_nod(KE_node, partit) + ! Now gradients of KE will be correct on myDim_elem2D + + ! ================== + ! AB contribution from the old time step + ! ================== + Do elem=1, myDim_elem2D !! P (a) + !! elem=myList_elem2D(m) + nzmin = ulevels(elem) + nzmax = nlevels(elem) + !!PS DO nz=1,nl-1 + DO nz=nzmin,nzmax-1 + UV_rhs(1,nz,elem)=-(0.5_WP+epsilon)*UV_rhsAB(1,nz,elem) + UV_rhs(2,nz,elem)=-(0.5_WP+epsilon)*UV_rhsAB(2,nz,elem) + END DO + END DO + + call relative_vorticity(partit, mesh) + ! ==================== + ! Sea level and pressure contribution -\nabla(g\eta +hpressure/rho_0+V^2/2) + ! and the Coriolis force (elemental part) + ! ==================== + + !DS KE_node=0. !DS + !DS vorticity=0. !DS + DO elem=1, myDim_elem2D !! P (b) elem=1,elem2D + !! elem=myList_elem2D(m) + elnodes = elem2D_nodes(:,elem) + eta = g*eta_n(elnodes) + gg = elem_area(elem) + ff = coriolis_node(elnodes) + + nzmin = ulevels(elem) + nzmax = nlevels(elem) + !!PS DO nz=1,nlevels(elem)-1 + DO nz=nzmin,nzmax-1 + pre = -(eta + hpressure(nz,elnodes)*density0_inv) + Fx = sum(gradient_sca(1:3,elem)*pre) + Fy = sum(gradient_sca(4:6,elem)*pre) + UV_rhs(1,nz,elem) = UV_rhs(1,nz,elem)+Fx*gg + UV_rhs(2,nz,elem) = UV_rhs(2,nz,elem)+Fy*gg + + pre = -KE_node(nz,elnodes) + Fx = sum(gradient_sca(1:3,elem)*pre) + Fy = sum(gradient_sca(4:6,elem)*pre) + + da = UV(2,nz,elem)*sum(ff+vorticity(nz,elnodes))/3.0_WP + db =-UV(1,nz,elem)*sum(ff+vorticity(nz,elnodes))/3.0_WP + + UV_rhsAB(1,nz,elem)=(da+Fx)*gg + UV_rhsAB(2,nz,elem)=(db+Fy)*gg + + END DO + END DO + ! ======================= + ! Compute w du/dz at elements: wdu/dz=d(wu)/dz-udw/dz + ! The central estimate of u in the flux term will correspond to energy + ! conservation + ! ======================= + + !NR precompute + DO nz=2,nl-1 + dZ_inv(nz) = 1.0_WP/(Z(nz-1)-Z(nz)) + ENDDO + DO nz=1,nl-1 + dzbar_inv(nz) = 1.0_WP/(zbar(nz)-zbar(nz+1)) + END DO + +!DO elem=1, myDim_elem2D +! !! elem=myList_elem2D(m) +! elnodes=elem2D_nodes(:,elem) +! nl1=nlevels(elem)-1 +! +! uvert(1,1:2)=0d0 +! uvert(nl1+1,1:2)=0d0 +! +! DO nz=2, nl1 +! w=sum(Wvel(nz,elnodes))/3.0_WP +! umean=0.5_WP*(UV(1,nz-1,elem)+UV(1,nz,elem)) +! vmean=0.5_WP*(UV(2,nz-1,elem)+UV(2,nz,elem)) +! uvert(nz,1)=-umean*w +! uvert(nz,2)=-vmean*w +! END DO +! DO nz=1,nl1 +! da=sum(Wvel(nz,elnodes)-Wvel(nz+1,elnodes))/3.0_WP +! UV_rhsAB(1,nz,elem) = UV_rhsAB(1,nz,elem) + (uvert(nz,1)-uvert(nz+1,1)+& +! da*UV(1,nz,elem))*elem_area(elem)*dzbar_inv(nz) !/(zbar(nz)-zbar(nz+1)) +! UV_rhsAB(2,nz,elem)=UV_rhsAB(2,nz,elem)+(uvert(nz,2)-uvert(nz+1,2)+& +! da*UV(2,nz,elem))*elem_area(elem)*dzbar_inv(nz) !/(zbar(nz)-zbar(nz+1)) +! +! END DO +!END DO + + + DO elem=1, myDim_elem2D + !! elem=myList_elem2D(m) + elnodes=elem2D_nodes(:,elem) + !!PS nl1=nlevels(elem)-1 + nzmax=nlevels(elem)-1 + nzmin=ulevels(elem) + + ! w=sum(Wvel(2, elnodes))/3.0_WP + ! w=min(abs(w), 0.0001)*sign(1.0_WP, w) + uvert(1,1)=w*(UV(1,1,elem)-UV(1,2,elem))*dZ_inv(2)*0.5_WP + uvert(1,2)=w*(UV(2,1,elem)-UV(2,2,elem))*dZ_inv(2)*0.5_WP + + ! w=sum(Wvel(nl1, elnodes))/3.0_WP + ! w=min(abs(w), 0.0001)*sign(1.0_WP, w) + !!PS uvert(nl1,1)=w*(UV(1,nl1-1,elem)-UV(1,nl1,elem))*dZ_inv(nl1)*0.5_WP + !!PS uvert(nl1,2)=w*(UV(2,nl1-1,elem)-UV(2,nl1,elem))*dZ_inv(nl1)*0.5_WP + uvert(nzmax,1)=w*(UV(1,nzmax-1,elem)-UV(1,nzmax,elem))*dZ_inv(nzmax)*0.5_WP + uvert(nzmax,2)=w*(UV(2,nzmax-1,elem)-UV(2,nzmax,elem))*dZ_inv(nzmax)*0.5_WP + + + !!PS DO nz=2, nl1-1 + DO nz=nzmin+1, nzmax-1 + ! w=sum(Wvel(nz,elnodes)+Wvel(nz+1,elnodes))/6.0_WP + ! w=min(abs(w), 0.0001)*sign(1.0_WP, w) + if (w >= 0.0_WP) then + uvert(nz,1)=w*(UV(1,nz,elem)-UV(1,nz+1,elem))*dZ_inv(nz+1) + uvert(nz,2)=w*(UV(2,nz,elem)-UV(2,nz+1,elem))*dZ_inv(nz+1) + else + uvert(nz,1)=w*(UV(1,nz-1,elem)-UV(1,nz,elem))*dZ_inv(nz) + uvert(nz,2)=w*(UV(2,nz-1,elem)-UV(2,nz,elem))*dZ_inv(nz) + end if + END DO + !!PS UV_rhsAB(1,1:nl1,elem) = UV_rhsAB(1,1:nl1,elem) - uvert(1:nl1,1)*elem_area(elem) + !!PS UV_rhsAB(2,1:nl1,elem) = UV_rhsAB(2,1:nl1,elem) - uvert(1:nl1,2)*elem_area(elem) + UV_rhsAB(1,nzmin:nzmax,elem) = UV_rhsAB(1,nzmin:nzmax,elem) - uvert(nzmin:nzmax,1)*elem_area(elem) + UV_rhsAB(2,nzmin:nzmax,elem) = UV_rhsAB(2,nzmin:nzmax,elem) - uvert(nzmin:nzmax,2)*elem_area(elem) + + END DO + + ! ======================= + ! Update the rhs + ! ======================= + gg=(1.5_WP+epsilon) + if(lfirst.and.(.not.r_restart)) then + gg=1.0_WP + lfirst=.false. + end if + + DO elem=1, myDim_elem2D !! P(e) elem=1, elem2D + !! elem=myList_elem2D(m) + elem_area_inv = dt/elem_area(elem) + nzmin = ulevels(elem) + nzmax = nlevels(elem) + !!PS DO nz=1,nlevels(elem)-1 + DO nz=nzmin,nzmax-1 + UV_rhs(1,nz,elem)= (UV_rhs(1,nz,elem)+UV_rhsAB(1,nz,elem)*gg) *elem_area_inv + UV_rhs(2,nz,elem)= (UV_rhs(2,nz,elem)+UV_rhsAB(2,nz,elem)*gg) *elem_area_inv + END DO + END DO + ! U_rhs contains all contributions to velocity from old time steps +end subroutine compute_vel_rhs_vinv diff --git a/src/toy_channel_soufflet.F90 b/src/toy_channel_soufflet.F90 index 53052ccc5..bf355e527 100644 --- a/src/toy_channel_soufflet.F90 +++ b/src/toy_channel_soufflet.F90 @@ -3,7 +3,6 @@ MODULE Toy_Channel_Soufflet USE MOD_PARTIT USE MOD_PARSUP USE MOD_TRACER - USE MOD_DYN USE o_ARRAYS USE o_PARAM USE g_config @@ -45,22 +44,17 @@ MODULE Toy_Channel_Soufflet ! !-------------------------------------------------------------------------------------------- ! -subroutine relax_zonal_vel(dynamics, partit, mesh) +subroutine relax_zonal_vel(partit, mesh) implicit none integer :: elem, nz, nn, nn1 real(kind=WP) :: a, yy, uzon - - type(t_dyn) , intent(inout), target :: dynamics + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - real(kind=WP), dimension(:,:,:), pointer :: UV_rhs - #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV_rhs=>dynamics%uv_rhs(:,:,:) - + DO elem=1, myDim_elem2D ! ======== ! Interpolation @@ -177,20 +171,17 @@ subroutine compute_zonal_mean_ini(partit, mesh) ! no division by 0 is occurring end subroutine compute_zonal_mean_ini !========================================================================== -subroutine compute_zonal_mean(dynamics, tracers, partit, mesh) +subroutine compute_zonal_mean(tracers, partit, mesh) implicit none integer :: elem, nz, m, elnodes(3) real(kind=8), allocatable :: zvel1D(:), znum1D(:) - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers - type(t_dyn) , intent(inout), target :: dynamics - real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) ztem=0. zvel=0. @@ -244,25 +235,22 @@ subroutine compute_zonal_mean(dynamics, tracers, partit, mesh) end subroutine compute_zonal_mean ! ==================================================================================== -subroutine initial_state_soufflet(dynamics, tracers, partit, mesh) +subroutine initial_state_soufflet(tracers, partit, mesh) ! Profiles Soufflet 2016 (OM) implicit none - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers - type(t_dyn) , intent(inout), target :: dynamics integer :: n, nz, elnodes(3) real(kind=8) :: dst, yn, Fy, Lx ! real(kind=8) :: Ljet,rhomax,Sb, drho_No, drho_So ! real(kind=8) :: z_No, z_So,dz_No,dz_So, drhosurf_No, drhosurf_So, zsurf real(kind=8) :: d_No(mesh%nl-1), d_So(mesh%nl-1), rho_No(mesh%nl-1), rho_So(mesh%nl-1) - real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) dy=ysize/nybins/r_earth @@ -367,23 +355,18 @@ subroutine initial_state_soufflet(dynamics, tracers, partit, mesh) write(*,*) mype, 'Vel', maxval(UV(1,:,:)), minval(UV(1,:,:)) END subroutine initial_state_soufflet ! =============================================================================== -subroutine energy_out_soufflet(dynamics, partit, mesh) +subroutine energy_out_soufflet(partit, mesh) implicit none real(kind=8) :: tke(2), aux(2), ww, wwaux integer :: elem, nz, m, elnodes(3), nybins real(kind=8), allocatable :: zvel1D(:), znum1D(:) - type(t_dyn) , intent(inout), target :: dynamics + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - -real(kind=WP), dimension(:,:,:), pointer :: UV -real(kind=WP), dimension(:,:), pointer :: Wvel + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" -UV => dynamics%uv(:,:,:) -Wvel => dynamics%w(:,:) nybins=100 diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index 1b84011ab..ac4d1d73f 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -1,48 +1,42 @@ module write_step_info_interface interface - subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) + subroutine write_step_info(istep,outfreq,tracers,partit,mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use MOD_TRACER - use MOD_DYN integer :: istep,outfreq - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in) , target :: tracers - type(t_dyn) , intent(in) , target :: dynamics + type(t_tracer), intent(in), target :: tracers end subroutine end interface end module module check_blowup_interface interface - subroutine check_blowup(istep, dynamics, tracers, partit, mesh) + subroutine check_blowup(istep, tracers,partit,mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use MOD_TRACER - use MOD_DYN integer :: istep type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in), target :: tracers - type(t_dyn) , intent(in) , target :: dynamics end subroutine end interface end module ! ! !=============================================================================== -subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) +subroutine write_step_info(istep, outfreq, tracers, partit, mesh) use g_config, only: dt, use_ice use MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - use MOD_TRACER - use MOD_DYN + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_TRACER use o_PARAM - use o_ARRAYS, only: water_flux, heat_flux, & - pgf_x, pgf_y, Av, Kv + use o_ARRAYS use i_ARRAYS use g_comm_auto implicit none @@ -58,24 +52,13 @@ subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) max_cfl_z, max_pgfx, max_pgfy, max_kv, max_av real(kind=WP) :: int_deta , int_dhbar real(kind=WP) :: loc, loc_eta, loc_hbar, loc_deta, loc_dhbar, loc_wflux,loc_hflux, loc_temp, loc_salt - type(t_mesh), intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in) , target :: tracers - type(t_dyn) , intent(in) , target :: dynamics - real(kind=WP), dimension(:,:,:), pointer :: UV, UVnode - real(kind=WP), dimension(:,:) , pointer :: Wvel, CFL_z - real(kind=WP), dimension(:) , pointer :: eta_n, d_eta + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" -#include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) - UVnode => dynamics%uvnode(:,:,:) - Wvel => dynamics%w(:,:) - CFL_z => dynamics%cfl_z(:,:) - eta_n => dynamics%eta_n(:) - d_eta => dynamics%d_eta(:) - +#include "associate_mesh_ass.h" if (mod(istep,outfreq)==0) then !_______________________________________________________________________ @@ -152,13 +135,13 @@ subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) call MPI_AllREDUCE(loc , min_wvel , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) loc = minval(Wvel(2,1:myDim_nod2D)) call MPI_AllREDUCE(loc , min_wvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(UVnode(1,1,1:myDim_nod2D)) + loc = minval(Unode(1,1,1:myDim_nod2D)) call MPI_AllREDUCE(loc , min_uvel , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(UVnode(1,2,1:myDim_nod2D)) + loc = minval(Unode(1,2,1:myDim_nod2D)) call MPI_AllREDUCE(loc , min_uvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(UVnode(2,1,1:myDim_nod2D)) + loc = minval(Unode(2,1,1:myDim_nod2D)) call MPI_AllREDUCE(loc , min_vvel , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(UVnode(2,2,1:myDim_nod2D)) + loc = minval(Unode(2,2,1:myDim_nod2D)) call MPI_AllREDUCE(loc , min_vvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) loc = minval(d_eta(1:myDim_nod2D)) call MPI_AllREDUCE(loc , min_deta , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) @@ -184,13 +167,13 @@ subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) call MPI_AllREDUCE(loc , max_wvel , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) loc = maxval(Wvel(2,1:myDim_nod2D)) call MPI_AllREDUCE(loc , max_wvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(UVnode(1,1,1:myDim_nod2D)) + loc = maxval(Unode(1,1,1:myDim_nod2D)) call MPI_AllREDUCE(loc , max_uvel , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(UVnode(1,2,1:myDim_nod2D)) + loc = maxval(Unode(1,2,1:myDim_nod2D)) call MPI_AllREDUCE(loc , max_uvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(UVnode(2,1,1:myDim_nod2D)) + loc = maxval(Unode(2,1,1:myDim_nod2D)) call MPI_AllREDUCE(loc , max_vvel , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(UVnode(2,2,1:myDim_nod2D)) + loc = maxval(Unode(2,2,1:myDim_nod2D)) call MPI_AllREDUCE(loc , max_vvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) loc = maxval(d_eta(1:myDim_nod2D)) call MPI_AllREDUCE(loc , max_deta , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) @@ -259,16 +242,14 @@ end subroutine write_step_info ! ! !=============================================================================== -subroutine check_blowup(istep, dynamics, tracers, partit, mesh) +subroutine check_blowup(istep, tracers, partit, mesh) use g_config, only: logfile_outfreq, which_ALE use MOD_MESH - use MOD_TRACER - USE MOD_PARTIT - USE MOD_PARSUP - use MOD_DYN + use MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP use o_PARAM - use o_ARRAYS, only: water_flux, stress_surf, & - heat_flux, Kv, Av + use o_ARRAYS use i_ARRAYS use g_comm_auto use io_BLOWUP @@ -278,27 +259,14 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) implicit none integer :: n, nz, istep, found_blowup_loc=0, found_blowup=0 - integer :: el, elidx - type(t_mesh) , intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in) , target :: tracers - type(t_dyn) , intent(in) , target :: dynamics - real(kind=WP), dimension(:,:,:), pointer :: UV - real(kind=WP), dimension(:,:) , pointer :: Wvel, CFL_z - real(kind=WP), dimension(:) , pointer :: ssh_rhs, ssh_rhs_old - real(kind=WP), dimension(:) , pointer :: eta_n, d_eta + integer :: el, elidx + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" -#include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) - Wvel => dynamics%w(:,:) - CFL_z => dynamics%cfl_z(:,:) - ssh_rhs => dynamics%ssh_rhs(:) - ssh_rhs_old => dynamics%ssh_rhs_old(:) - eta_n => dynamics%eta_n(:) - d_eta => dynamics%d_eta(:) - +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! ! if (mod(istep,logfile_outfreq)==0) then ! ! if (mype==0) then @@ -541,7 +509,7 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) ! moment only over CPU mype==0 call MPI_AllREDUCE(found_blowup_loc , found_blowup , 1, MPI_INTEGER, MPI_MAX, MPI_COMM_FESOM, MPIerr) if (found_blowup==1) then - call write_step_info(istep, 1, dynamics, tracers,partit,mesh) + call write_step_info(istep,1,tracers,partit,mesh) if (mype==0) then call sleep(1) write(*,*) @@ -561,10 +529,8 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) write(*,*) ' _____.,-#%&$@%#&#~,._____' write(*,*) end if - call blowup(istep, dynamics, tracers, partit, mesh) + call blowup(istep, tracers, partit, mesh) if (mype==0) write(*,*) ' --> finished writing blow up file' call par_ex(partit%MPI_COMM_FESOM, partit%mype) endif end subroutine - - From 761f65ad0240f620ec3a6b61d537b3e2ac582eb5 Mon Sep 17 00:00:00 2001 From: dsidoren Date: Tue, 9 Nov 2021 14:26:17 +0100 Subject: [PATCH 466/909] Update oce_ale_tracer.F90 eDim_nod2D forgotten in one loop --- src/oce_ale_tracer.F90 | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index a73719608..891831453 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -175,9 +175,13 @@ subroutine solve_tracers_ale(tracers, partit, mesh) !$OMP PARALLEL DO do node=1, myDim_nod2d tracers%work%del_ttf(:, node)=tracers%work%del_ttf(:, node)+tracers%work%del_ttf_advhoriz(:, node)+tracers%work%del_ttf_advvert(:, node) + end do +!$OMP END PARALLEL DO !___________________________________________________________________________ ! AB is not needed after the advection step. Initialize it with the current tracer before it is modified. ! call init_tracers_AB at the beginning of this loop will compute AB for the next time step then. +!$OMP PARALLEL DO + do node=1, myDim_nod2d+eDim_nod2D tracers%data(tr_num)%valuesAB(:, node)=tracers%data(tr_num)%values(:, node) !DS: check that this is the right place! end do !$OMP END PARALLEL DO From dcdb3e35d9f01498ab006e23ec49affdb81eddba Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 9 Nov 2021 16:23:35 +0100 Subject: [PATCH 467/909] add a switch to build FESOM as library --- src/CMakeLists.txt | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 9500ef1ea..8cd92174b 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -9,6 +9,10 @@ if(${ENABLE_OPENMP}) find_package(OpenMP REQUIRED) endif() +# option to trigger building a library version of FESOM +# we do not always build the library along with the executable to avoid having two targets here in the CMakeLists.txt +# two targets would allow e.g. setting different compiler options or preprocessor definition, which would be error prone. +option(BUILD_FESOM_AS_LIBRARY "Build a library instead of an executable" OFF) # get our source files set(src_home ${CMAKE_CURRENT_LIST_DIR}) # path to src directory starting from the dir containing our CMakeLists.txt @@ -37,6 +41,7 @@ add_custom_command(OUTPUT 5303B6F4_E4F4_45B2_A6E5_8E2B9FB5CDC4 ${FESOM_GENERATED #endif() list(REMOVE_ITEM sources_Fortran ${src_home}/fvom_init.F90 ${src_home}/oce_local.F90 ${src_home}/gen_comm.F90) list(REMOVE_ITEM sources_C ${src_home}/fort_part.c) +list(REMOVE_ITEM sources_Fortran ${src_home}/fvom_main.F90) # depends on the metis library #add_subdirectory(../lib/metis-5.1.0 ${PROJECT_BINARY_DIR}/metis) @@ -52,8 +57,15 @@ add_library(${PROJECT_NAME}_C ${sources_C}) target_compile_definitions(${PROJECT_NAME}_C PRIVATE PARMS USE_MPI REAL=double DBL HAS_BLAS FORTRAN_UNDERSCORE VOID_POINTER_SIZE_8 SGI LINUX UNDER_ MPI2) target_link_libraries(${PROJECT_NAME}_C parms) #metis -# create our binary (set its name to name of this project) -add_executable(${PROJECT_NAME} ${sources_Fortran}) + +# create our binary or library (set its name to name of this project) +# we do not always build the library along with the executable to avoid having two targets here in the CMakeLists.txt +# two targets would allow e.g. setting different compiler options or preprocessor definition, which would be error prone. +if(${BUILD_FESOM_AS_LIBRARY}) + add_library(${PROJECT_NAME} ${sources_Fortran}) +else() + add_executable(${PROJECT_NAME} ${sources_Fortran} ${src_home}/fvom_main.F90) +endif() target_compile_definitions(${PROJECT_NAME} PRIVATE PARMS -DMETIS_VERSION=5 -DPART_WEIGHTED -DMETISRANDOMSEED=35243) if(${DISABLE_MULTITHREADING}) target_compile_definitions(${PROJECT_NAME} PRIVATE DISABLE_MULTITHREADING) @@ -94,7 +106,13 @@ if(${ENABLE_OPENMP}) target_link_libraries(${PROJECT_NAME} OpenMP::OpenMP_Fortran) endif() -set(FESOM_INSTALL_FILEPATH "${CMAKE_CURRENT_LIST_DIR}/../bin/fesom.x" CACHE FILEPATH "file path where the FESOM binary should be put") -get_filename_component(FESOM_INSTALL_PATH ${FESOM_INSTALL_FILEPATH} DIRECTORY) -get_filename_component(FESOM_INSTALL_NAME ${FESOM_INSTALL_FILEPATH} NAME) -install(PROGRAMS ${PROJECT_BINARY_DIR}/${PROJECT_NAME} DESTINATION ${FESOM_INSTALL_PATH} RENAME ${FESOM_INSTALL_NAME}) + +set(FESOM_INSTALL_PREFIX "${CMAKE_CURRENT_LIST_DIR}/.." CACHE FILEPATH "directory where FESOM will be installed to via 'make install'") +if(${BUILD_FESOM_AS_LIBRARY}) + install(TARGETS ${PROJECT_NAME} DESTINATION "${FESOM_INSTALL_PREFIX}/lib") +else() + set(FESOM_INSTALL_FILEPATH "${FESOM_INSTALL_PREFIX}/bin/fesom.x") + get_filename_component(FESOM_INSTALL_PATH ${FESOM_INSTALL_FILEPATH} DIRECTORY) + get_filename_component(FESOM_INSTALL_NAME ${FESOM_INSTALL_FILEPATH} NAME) + install(PROGRAMS ${PROJECT_BINARY_DIR}/${PROJECT_NAME} DESTINATION ${FESOM_INSTALL_PATH} RENAME ${FESOM_INSTALL_NAME}) +endif() From 969f9ccca7423376f6e0c3f883f180bc26378f37 Mon Sep 17 00:00:00 2001 From: dsidoren Date: Tue, 9 Nov 2021 17:30:47 +0100 Subject: [PATCH 468/909] Update oce_adv_tra_driver.F90 OPENMP bug fix, a couple of variable have been forgotten to be declared as private. Dangerous stuff!!! --- src/oce_adv_tra_driver.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/oce_adv_tra_driver.F90 b/src/oce_adv_tra_driver.F90 index b3a1bd8b1..450647e9f 100644 --- a/src/oce_adv_tra_driver.F90 +++ b/src/oce_adv_tra_driver.F90 @@ -113,7 +113,7 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, partit, mesh) fct_LO(:,n) = 0.0_WP end do !$OMP END PARALLEL DO -!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(e, enodes, el, nl1, nu1, nl2, nu2, nz) +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(e, enodes, el, nl1, nu1, nl2, nu2, nu12, nl12, nz) do e=1, myDim_edge2D enodes=edges(:,e) el=edge_tri(:,e) From 98583111164c495796e89a0f1a9605d802b34f25 Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 1 Nov 2021 22:10:20 +0100 Subject: [PATCH 469/909] add module file for derived type of dynamics --- src/MOD_DYN.F90 | 176 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 176 insertions(+) create mode 100644 src/MOD_DYN.F90 diff --git a/src/MOD_DYN.F90 b/src/MOD_DYN.F90 new file mode 100644 index 000000000..920a77b4c --- /dev/null +++ b/src/MOD_DYN.F90 @@ -0,0 +1,176 @@ +!========================================================== +MODULE MOD_DYN +USE O_PARAM +USE, intrinsic :: ISO_FORTRAN_ENV +USE MOD_WRITE_BINARY_ARRAYS +USE MOD_READ_BINARY_ARRAYS +IMPLICIT NONE +SAVE + +! +! +!_______________________________________________________________________________ +TYPE T_solverinfo + integer :: maxiter=2000 + integer :: restarts=15 + integer :: fillin=3 + integer :: lutype=2 + integer :: nrhs=1 + real(kind=WP) :: droptol=1.e-7 + real(kind=WP) :: soltol =1e-10 !1.e-10 + logical :: lfirst=.true. +END TYPE T_solverinfo + +! +! +!_______________________________________________________________________________ +! set main structure for dynamicss, contains viscosity options and parameters + +! option for momentum advection +TYPE T_DYN + ! instant zonal merdional velocity & Adams-Bashfort rhs + real(kind=WP), allocatable, dimension(:,:,:):: uv, uv_rhs, uv_rhsAB + + ! instant vertical velm explicite+implicite part + real(kind=WP), allocatable, dimension(:,:) :: w, w_e, w_i, cfl_z + + real(kind=WP), allocatable, dimension(:,:,:):: uvnode, uvnode_rhs + + real(kind=WP), allocatable, dimension(:) :: eta_n, d_eta, ssh_rhs, ssh_rhs_old + + ! summarizes solver input parameter + type(t_solverinfo) :: solverinfo + + + ! visc_option=... + ! 1=Harmonic Leith parameterization; + ! 2=Laplacian+Leith+biharmonic background + ! 3=Biharmonic Leith parameterization + ! 4=Biharmonic flow aware + ! 5=Kinematic (easy) Backscatter + ! 6=Biharmonic flow aware (viscosity depends on velocity Laplacian) + ! 7=Biharmonic flow aware (viscosity depends on velocity differences) + ! 8=Dynamic Backscatter + integer :: visc_opt = 5 + + ! gamma0 [m/s], backgroung viscosity= gamma0*len, it should be as small + ! as possible (keep it < 0.01 m/s). + ! gamma1 [nodim], for computation of the flow aware viscosity + ! gamma2 [s/m], is only used in easy backscatter option + real(kind=WP) :: gamma0_visc = 0.03 + real(kind=WP) :: gamma1_visc = 0.1 + real(kind=WP) :: gamma2_visc = 0.285 + + ! div_c the strength of the modified Leith viscosity, nondimensional, 0.3 -- 1.0 + ! leith the strength of the Leith viscosity + real(kind=WP) :: div_c_visc = 0.5 + real(kind=WP) :: leith_c_visc = 0.05 + + ! coefficient for returned sub-gridscale energy, to be used with visc_option=5 + ! (easy backscatter) + real(kind=WP) :: easy_bs_return= 1.5 + + logical :: use_ivertvisc = .true. + integer :: momadv_opt = 2 + + ! Switch on free slip + logical :: use_freeslip = .false. + + ! do implicite, explicite spliting of vertical velocity + logical :: use_wsplit = .false. + ! maximum allowed CFL criteria in vertical (0.5 < w_max_cfl < 1.) + ! in older FESOM it used to be w_exp_max=1.e-3 + real(kind=WP) :: wsplit_maxcfl= 1.0 + + !___________________________________________________________________________ + contains + procedure WRITE_T_DYN + procedure READ_T_DYN + generic :: write(unformatted) => WRITE_T_DYN + generic :: read(unformatted) => READ_T_DYN +END TYPE T_DYN + +contains + +! +! +!_______________________________________________________________________________ +! set unformatted writing and reading for T_DYN +subroutine WRITE_T_DYN(dynamics, unit, iostat, iomsg) + IMPLICIT NONE + class(T_DYN), intent(in) :: dynamics + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + !___________________________________________________________________________ + call write_bin_array(dynamics%uv , unit, iostat, iomsg) + call write_bin_array(dynamics%uv_rhs , unit, iostat, iomsg) + call write_bin_array(dynamics%uv_rhsAB , unit, iostat, iomsg) + call write_bin_array(dynamics%uvnode , unit, iostat, iomsg) + call write_bin_array(dynamics%uvnode_rhs, unit, iostat, iomsg) + + call write_bin_array(dynamics%w , unit, iostat, iomsg) + call write_bin_array(dynamics%w_e , unit, iostat, iomsg) + call write_bin_array(dynamics%w_i , unit, iostat, iomsg) + + call write_bin_array(dynamics%cfl_z , unit, iostat, iomsg) + + !___________________________________________________________________________ + write(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_opt + write(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma0_visc + write(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma1_visc + write(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma2_visc + write(unit, iostat=iostat, iomsg=iomsg) dynamics%div_c_visc + write(unit, iostat=iostat, iomsg=iomsg) dynamics%leith_c_visc + + !___________________________________________________________________________ + write(unit, iostat=iostat, iomsg=iomsg) dynamics%use_ivertvisc + write(unit, iostat=iostat, iomsg=iomsg) dynamics%momadv_opt + + !___________________________________________________________________________ + write(unit, iostat=iostat, iomsg=iomsg) dynamics%use_freeslip + write(unit, iostat=iostat, iomsg=iomsg) dynamics%use_wsplit + write(unit, iostat=iostat, iomsg=iomsg) dynamics%wsplit_maxcfl + +end subroutine WRITE_T_DYN + +subroutine READ_T_DYN(dynamics, unit, iostat, iomsg) + IMPLICIT NONE + class(T_DYN), intent(inout) :: dynamics + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + !___________________________________________________________________________ + call read_bin_array(dynamics%uv , unit, iostat, iomsg) + call read_bin_array(dynamics%uv_rhs , unit, iostat, iomsg) + call read_bin_array(dynamics%uv_rhsAB , unit, iostat, iomsg) + call read_bin_array(dynamics%uvnode , unit, iostat, iomsg) + call read_bin_array(dynamics%uvnode_rhs, unit, iostat, iomsg) + + call read_bin_array(dynamics%w , unit, iostat, iomsg) + call read_bin_array(dynamics%w_e , unit, iostat, iomsg) + call read_bin_array(dynamics%w_i , unit, iostat, iomsg) + + call read_bin_array(dynamics%cfl_z , unit, iostat, iomsg) + + !___________________________________________________________________________ + read(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_opt + read(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma0_visc + read(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma1_visc + read(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma2_visc + read(unit, iostat=iostat, iomsg=iomsg) dynamics%div_c_visc + read(unit, iostat=iostat, iomsg=iomsg) dynamics%leith_c_visc + + !___________________________________________________________________________ + read(unit, iostat=iostat, iomsg=iomsg) dynamics%use_ivertvisc + read(unit, iostat=iostat, iomsg=iomsg) dynamics%momadv_opt + + !___________________________________________________________________________ + read(unit, iostat=iostat, iomsg=iomsg) dynamics%use_freeslip + read(unit, iostat=iostat, iomsg=iomsg) dynamics%use_wsplit + read(unit, iostat=iostat, iomsg=iomsg) dynamics%wsplit_maxcfl + +end subroutine READ_T_DYN + +END MODULE MOD_DYN \ No newline at end of file From 2acab545d05ed469bc8878cec675f1ed0759ff43 Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 1 Nov 2021 22:10:57 +0100 Subject: [PATCH 470/909] add initialisation for dynamics derived type --- src/oce_setup_step.F90 | 138 ++++++++++++++++++++++++++++++++++++++--- 1 file changed, 128 insertions(+), 10 deletions(-) diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 2842b69f0..6469eb99f 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -11,6 +11,7 @@ subroutine oce_initial_state(tracers, partit, mesh) end subroutine end interface end module + module tracer_init_interface interface subroutine tracer_init(tracers, partit, mesh) @@ -24,6 +25,21 @@ subroutine tracer_init(tracers, partit, mesh) end subroutine end interface end module + +module dynamics_init_interface + interface + subroutine dynamics_init(dynamics, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_DYN + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_dyn) , intent(inout), target :: dynamics + end subroutine + end interface +end module + module ocean_setup_interface interface subroutine ocean_setup(tracers, partit, mesh) @@ -37,6 +53,7 @@ subroutine ocean_setup(tracers, partit, mesh) end subroutine end interface end module + module before_oce_step_interface interface subroutine before_oce_step(tracers, partit, mesh) @@ -51,11 +68,12 @@ subroutine before_oce_step(tracers, partit, mesh) end interface end module !_______________________________________________________________________________ -subroutine ocean_setup(tracers, partit, mesh) +subroutine ocean_setup(dynamics, tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP USE MOD_TRACER +USE MOD_DYN USE o_PARAM USE o_ARRAYS USE g_config @@ -69,9 +87,10 @@ subroutine ocean_setup(tracers, partit, mesh) use oce_initial_state_interface use oce_adv_tra_fct_interfaces IMPLICIT NONE -type(t_mesh), intent(inout), target :: mesh -type(t_partit), intent(inout), target :: partit +type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracers +type(t_partit), intent(inout), target :: partit +type(t_mesh) , intent(inout), target :: mesh integer :: n !___setup virt_salt_flux____________________________________________________ ! if the ale thinkness remain unchanged (like in 'linfs' case) the vitrual @@ -181,9 +200,9 @@ subroutine ocean_setup(tracers, partit, mesh) SELECT CASE (TRIM(which_toy)) CASE ("soufflet") !forcing update for soufflet testcase if (mod(mstep, soufflet_forc_update)==0) then - call initial_state_soufflet(tracers, partit, mesh) + call initial_state_soufflet(dynamics, tracers, partit, mesh) call compute_zonal_mean_ini(partit, mesh) - call compute_zonal_mean(tracers, partit, mesh) + call compute_zonal_mean(dynamics, tracers, partit, mesh) end if END SELECT else @@ -313,6 +332,103 @@ END SUBROUTINE tracer_init ! ! !_______________________________________________________________________________ +SUBROUTINE dynamics_init(dynamics, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + IMPLICIT NONE + integer :: elem_size, node_size + integer, save :: nm_unit = 104 ! unit to open namelist file, skip 100-102 for cray + integer :: iost + + integer :: visc_opt + real(kind=WP) :: gamma0_visc, gamma1_visc, gamma2_visc + real(kind=WP) :: div_c_visc, leith_c_visc, easybackscat_return + logical :: use_ivertvisc + integer :: momadv_opt + logical :: use_freeslip + logical :: use_wsplit + real(kind=WP) :: wsplit_maxcfl + + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_dyn) , intent(inout), target :: dynamics + + ! define dynamics namelist parameter + namelist /dynamics_visc / visc_opt, gamma0_visc, gamma1_visc, gamma2_visc, & + div_c_visc, leith_c_visc, use_ivertvisc, easy_bs_return + namelist /dynamics_general / momadv_opt, use_freeslip, use_wsplit, wsplit_maxcfl + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + ! open and read namelist for I/O + open(unit=nm_unit, file='namelist.dyn', form='formatted', access='sequential', status='old', iostat=iost ) + if (iost == 0) then + if (mype==0) write(*,*) ' file : ', 'namelist.dyn',' open ok' + else + if (mype==0) write(*,*) 'ERROR: --> bad opening file : ', 'namelist.dyn',' ; iostat=',iost + call par_ex(partit%MPI_COMM_FESOM, partit%mype) + stop + end if + read(nm_unit, nml=dynamics_visc , iostat=iost) + read(nm_unit, nml=dynamics_general, iostat=iost) + close(nm_unit) + + ! define local vertice & elem array size + elem_size=myDim_elem2D+eDim_elem2D + node_size=myDim_nod2D+eDim_nod2D + + ! allocate data arrays in derived type + allocate(dynamics%uv( 2, nl-1, elem_size)) + allocate(dynamics%uv_rhs( 2, nl-1, elem_size)) + allocate(dynamics%uv_rhsAB( 2, nl-1, elem_size)) + allocate(dynamics%uvnode( 2, nl-1, node_size)) + allocate(dynamics%uvnode_rhs(2, nl-1, node_size)) + dynamics%uv = 0.0_WP + dynamics%uv_rhs = 0.0_WP + dynamics%uv_rhsAB = 0.0_WP + dynamics%uvnode = 0.0_WP + dynamics%uvnode_rhs = 0.0_WP + + allocate(dynamics%w( nl, node_size)) + allocate(dynamics%w_e( nl, node_size)) + allocate(dynamics%w_i( nl, node_size)) + allocate(dynamics%cfl_z( nl, node_size)) + dynamics%w = 0.0_WP + dynamics%w_e = 0.0_WP + dynamics%w_i = 0.0_WP + dynamics%cfl_z = 0.0_WP + + allocate(dynamics%eta_n( node_size)) + allocate(dynamics%d_eta( node_size)) + allocate(dynamics%ssh_rhs( node_size)) + allocate(dynamics%ssh_rhs_old(node_size)) + dynamics%eta_n = 0.0_WP + dynamics%d_eta = 0.0_WP + dynamics%ssh_rhs = 0.0_WP + dynamics%ssh_rhs_old= 0.0_WP + + ! set parameters in derived type + dynamics%visc_opt = visc_opt + dynamics%gamma0_visc = gamma0_visc + dynamics%gamma1_visc = gamma1_visc + dynamics%gamma2_visc = gamma2_visc + dynamics%div_c_visc = div_c_visc + dynamics%leith_c_visc = leith_c_visc + dynamics%use_ivertvisc = use_ivertvisc + dynamics%momadv_opt = momadv_opt + dynamics%use_freeslip = use_freeslip + dynamics%use_wsplit = use_wsplit + dynamics%wsplit_maxcfl = wsplit_maxcfl + +END SUBROUTINE dynamics_init +! +! +!_______________________________________________________________________________ SUBROUTINE arrays_init(num_tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT @@ -345,7 +461,7 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) ! Velocities ! ================ !allocate(stress_diag(2, elem_size))!delete me -allocate(UV(2, nl-1, elem_size)) +!!PS allocate(UV(2, nl-1, elem_size)) allocate(UV_rhs(2,nl-1, elem_size)) allocate(UV_rhsAB(2,nl-1, elem_size)) allocate(Visc(nl-1, elem_size)) @@ -494,7 +610,7 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) ! Initialize with zeros ! ================= - UV=0.0_WP +!!PS UV=0.0_WP UV_rhs=0.0_WP UV_rhsAB=0.0_WP ! @@ -735,20 +851,22 @@ end subroutine oce_initial_state ! !========================================================================== ! Here we do things (if applicable) before the ocean timestep will be made -SUBROUTINE before_oce_step(tracers, partit, mesh) +SUBROUTINE before_oce_step(dynamics, tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP USE MOD_TRACER + use MOD_DYN USE o_ARRAYS USE g_config USE Toy_Channel_Soufflet implicit none integer :: i, k, counter, rcounter3, id character(len=10) :: i_string, id_string - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers + type(t_dyn) , intent(inout), target :: dynamics #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -759,7 +877,7 @@ SUBROUTINE before_oce_step(tracers, partit, mesh) SELECT CASE (TRIM(which_toy)) CASE ("soufflet") !forcing update for soufflet testcase if (mod(mstep, soufflet_forc_update)==0) then - call compute_zonal_mean(tracers, partit, mesh) + call compute_zonal_mean(dynamics, tracers, partit, mesh) end if END SELECT end if From f35fd4d27974fefe1ba587f0de6a7199e43a2d91 Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 1 Nov 2021 22:12:20 +0100 Subject: [PATCH 471/909] exchange UV with dynamics derived type dynamics%uv --- src/cavity_param.F90 | 12 +- src/fvom_main.F90 | 26 ++-- src/gen_modules_diag.F90 | 50 ++++---- src/ice_oce_coupling.F90 | 41 ++++--- src/io_blowup.F90 | 33 ++--- src/io_meandata.F90 | 26 ++-- src/io_restart.F90 | 23 ++-- src/oce_ale.F90 | 70 +++++++---- src/oce_ale_tracer.F90 | 34 ++++-- src/oce_ale_vel_rhs.F90 | 31 +++-- src/oce_dyn.F90 | 226 +++++++++++++++++++++++------------ src/oce_modules.F90 | 2 +- src/oce_vel_rhs_vinv.F90 | 38 ++++-- src/toy_channel_soufflet.F90 | 31 +++-- src/write_step_info.F90 | 66 ++++++---- 15 files changed, 455 insertions(+), 254 deletions(-) diff --git a/src/cavity_param.F90 b/src/cavity_param.F90 index eb8591754..3c3236c39 100644 --- a/src/cavity_param.F90 +++ b/src/cavity_param.F90 @@ -382,25 +382,29 @@ end subroutine cavity_heat_water_fluxes_2eq !_______________________________________________________________________________ ! Compute the momentum fluxes under ice cavity ! Moved to this separated routine by Qiang, 20.1.2012 -subroutine cavity_momentum_fluxes(partit, mesh) +subroutine cavity_momentum_fluxes(dynamics, partit, mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN use o_PARAM , only: density_0, C_d, WP - use o_ARRAYS, only: UV, Unode, stress_surf, stress_node_surf + use o_ARRAYS, only: Unode, stress_surf, stress_node_surf use i_ARRAYS, only: u_w, v_w implicit none !___________________________________________________________________________ + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh integer :: elem, elnodes(3), nzmin, node real(kind=WP) :: aux - + real(kind=WP), dimension(:,:,:), pointer :: UV + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV=>dynamics%uv(:,:,:) !___________________________________________________________________________ do elem=1,myDim_elem2D diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index a9cb7142f..e9f4cb699 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -8,9 +8,10 @@ program main USE MOD_MESH -USE MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP +USE MOD_TRACER +USE MOD_DYN USE o_ARRAYS USE o_PARAM USE i_PARAM @@ -56,10 +57,10 @@ program main real(kind=real32) :: runtime_alltimesteps -type(t_mesh), target, save :: mesh -type(t_tracer), target, save :: tracers -type(t_partit), target, save :: partit - +type(t_mesh) , target, save :: mesh +type(t_partit), target, save :: partit +type(t_tracer), target, save :: tracers +type(t_dyn) , target, save :: dynamics character(LEN=256) :: dump_dir, dump_filename logical :: L_EXISTS @@ -122,6 +123,7 @@ program main if (mype==0) t2=MPI_Wtime() call tracer_init(tracers, partit, mesh) ! allocate array of ocean tracers (derived type "t_tracer") + call dynamics_init(dynamics, partit, mesh) ! allocate array of ocean dynamics (derived type "t_tracer") call arrays_init(tracers%num_tracers, partit, mesh) ! allocate other arrays (to be refactured same as tracers in the future) call ocean_setup(tracers, partit, mesh) @@ -139,7 +141,7 @@ program main if (mype==0) write(*,*) 'EVP scheme option=', whichEVP endif if (mype==0) t5=MPI_Wtime() - call compute_diagnostics(0, tracers, partit, mesh) ! allocate arrays for diagnostic + call compute_diagnostics(0, dynamics, tracers, partit, mesh) ! allocate arrays for diagnostic #if defined (__oasis) call cpl_oasis3mct_define_unstr(partit, mesh) if(mype==0) write(*,*) 'FESOM ----> cpl_oasis3mct_define_unstr nsend, nrecv:',nsend, nrecv @@ -164,7 +166,7 @@ program main ! if l_write is TRUE the restart will be forced ! if l_read the restart will be read ! as an example, for reading restart one does: call restart(0, .false., .false., .true., tracers, partit, mesh) - call restart(0, .false., r_restart, tracers, partit, mesh) ! istep, l_write, l_read + call restart(0, .false., r_restart, dynamics, tracers, partit, mesh) ! istep, l_write, l_read if (mype==0) t7=MPI_Wtime() ! store grid information into netcdf file if (.not. r_restart) call write_mesh_info(partit, mesh) @@ -272,7 +274,7 @@ program main if(use_ice) then !___compute fluxes from ocean to ice________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call ocean2ice(n)'//achar(27)//'[0m' - call ocean2ice(tracers, partit, mesh) + call ocean2ice(dynamics, tracers, partit, mesh) !___compute update of atmospheric forcing____________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call update_atm_forcing(n)'//achar(27)//'[0m' @@ -291,7 +293,7 @@ program main if (ice_update) call ice_timestep(n, partit, mesh) !___compute fluxes to the ocean: heat, freshwater, momentum_________ if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call oce_fluxes_mom...'//achar(27)//'[0m' - call oce_fluxes_mom(partit, mesh) ! momentum only + call oce_fluxes_mom(dynamics, partit, mesh) ! momentum only call oce_fluxes(tracers, partit, mesh) end if call before_oce_step(tracers, partit, mesh) ! prepare the things if required @@ -304,15 +306,15 @@ program main t3 = MPI_Wtime() !___compute energy diagnostics..._______________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call compute_diagnostics(1)'//achar(27)//'[0m' - call compute_diagnostics(1, tracers, partit, mesh) + call compute_diagnostics(1, dynamics, tracers, partit, mesh) t4 = MPI_Wtime() !___prepare output______________________________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call output (n)'//achar(27)//'[0m' - call output (n, tracers, partit, mesh) + call output (n, dynamics, tracers, partit, mesh) t5 = MPI_Wtime() - call restart(n, .false., .false., tracers, partit, mesh) + call restart(n, .false., .false., dynamics, tracers, partit, mesh) t6 = MPI_Wtime() rtime_fullice = rtime_fullice + t2 - t1 diff --git a/src/gen_modules_diag.F90 b/src/gen_modules_diag.F90 index 231345f2d..5a0e47f90 100755 --- a/src/gen_modules_diag.F90 +++ b/src/gen_modules_diag.F90 @@ -4,7 +4,8 @@ module diagnostics use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - use mod_tracer + use MOD_TRACER + use MOD_DYN use g_clock use g_comm_auto use o_ARRAYS @@ -149,19 +150,21 @@ subroutine diag_curl_stress_surf(mode, partit, mesh) end subroutine diag_curl_stress_surf ! ============================================================== !3D curl(velocity) -subroutine diag_curl_vel3(mode, partit, mesh) +subroutine diag_curl_vel3(mode, dynamics, partit, mesh) implicit none - type(t_mesh), intent(in), target :: mesh + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh integer, intent(in) :: mode logical, save :: firstcall=.true. integer :: enodes(2), el(2), ed, n, nz, nl1, nl2, nl12, nu1, nu2, nu12 real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2, c1 - + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" -#include "associate_mesh_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) !===================== if (firstcall) then !allocate the stuff at the first call @@ -229,21 +232,24 @@ subroutine diag_curl_vel3(mode, partit, mesh) end subroutine diag_curl_vel3 ! ============================================================== !energy budget -subroutine diag_energy(mode, partit, mesh) +subroutine diag_energy(mode, dynamics, partit, mesh) implicit none - type(t_mesh), intent(in), target :: mesh + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh integer, intent(in) :: mode logical, save :: firstcall=.true. integer :: n, nz, k, i, elem, nzmax, nzmin, elnodes(3) integer :: iup, ilo real(kind=WP) :: ux, vx, uy, vy, tvol, rval(2) real(kind=WP) :: geo_grad_x(3), geo_grad_y(3), geo_u(3), geo_v(3) - + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" -#include "associate_mesh_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) + !===================== if (firstcall) then !allocate the stuff at the first call allocate(wrhof(nl, myDim_nod2D), rhof(nl, myDim_nod2D)) @@ -401,12 +407,13 @@ subroutine diag_energy(mode, partit, mesh) END DO end subroutine diag_energy ! ============================================================== -subroutine diag_densMOC(mode, tracers, partit, mesh) +subroutine diag_densMOC(mode, dynamics, tracers, partit, mesh) implicit none integer, intent(in) :: mode - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in), target :: tracers + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in) , target :: tracers + type(t_dyn) , intent(in) , target :: dynamics integer :: nz, snz, elem, nzmax, nzmin, elnodes(3), is, ie, pos integer :: e, edge, enodes(2), eelems(2) real(kind=WP) :: div, deltaX, deltaY, locz @@ -417,10 +424,12 @@ subroutine diag_densMOC(mode, tracers, partit, mesh) real(kind=WP), save, allocatable :: std_dens_w(:,:), std_dens_VOL1(:,:), std_dens_VOL2(:,:) logical, save :: firstcall_s=.true., firstcall_e=.true. real(kind=WP), dimension(:,:), pointer :: temp, salt + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" -#include "associate_mesh_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) temp=>tracers%data(1)%values(:,:) salt=>tracers%data(2)%values(:,:) @@ -657,20 +666,21 @@ subroutine diag_densMOC(mode, tracers, partit, mesh) end subroutine diag_densMOC ! ============================================================== -subroutine compute_diagnostics(mode, tracers, partit, mesh) +subroutine compute_diagnostics(mode, dynamics, tracers, partit, mesh) implicit none - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in), target :: tracers + type(t_tracer), intent(inout), target :: tracers + type(t_dyn) , intent(inout), target :: dynamics integer, intent(in) :: mode !constructor mode (0=only allocation; any other=do diagnostic) real(kind=WP) :: val !1. solver diagnostic if (ldiag_solver) call diag_solver(mode, partit, mesh) !2. compute curl(stress_surf) if (lcurt_stress_surf) call diag_curl_stress_surf(mode, partit, mesh) !3. compute curl(velocity) - if (ldiag_curl_vel3) call diag_curl_vel3(mode, partit, mesh) + if (ldiag_curl_vel3) call diag_curl_vel3(mode, dynamics, partit, mesh) !4. compute energy budget - if (ldiag_energy) call diag_energy(mode, partit, mesh) + if (ldiag_energy) call diag_energy(mode, dynamics, partit, mesh) !5. print integrated temperature if (ldiag_salt3d) then if (mod(mstep,logfile_outfreq)==0) then @@ -681,7 +691,7 @@ subroutine compute_diagnostics(mode, tracers, partit, mesh) end if end if !6. MOC in density coordinate - if (ldiag_dMOC) call diag_densMOC(mode, tracers, partit, mesh) + if (ldiag_dMOC) call diag_densMOC(mode, dynamics, tracers, partit, mesh) end subroutine compute_diagnostics diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index 30dedc505..a0aa70ceb 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -1,13 +1,16 @@ module ocean2ice_interface interface - subroutine ocean2ice(tracers, partit, mesh) + subroutine ocean2ice(dynamics, tracers, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP use mod_tracer + use MOD_DYN + type(t_dyn) , intent(in) , target :: dynamics + type(t_tracer), intent(in) , target :: tracers type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(inout), target :: tracers + type(t_mesh) , intent(in) , target :: mesh + end subroutine end interface end module @@ -20,8 +23,8 @@ subroutine oce_fluxes(tracers, partit, mesh) USE MOD_PARSUP use mod_tracer type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(inout), target :: tracers + type(t_mesh) , intent(in) , target :: mesh + type(t_tracer), intent(in) , target :: tracers end subroutine end interface end module @@ -29,7 +32,7 @@ subroutine oce_fluxes(tracers, partit, mesh) ! ! !_______________________________________________________________________________ -subroutine oce_fluxes_mom(partit, mesh) +subroutine oce_fluxes_mom(dynamics, partit, mesh) ! transmits the relevant fields from the ice to the ocean model ! use o_PARAM @@ -37,6 +40,7 @@ subroutine oce_fluxes_mom(partit, mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN use i_ARRAYS use i_PARAM USE g_CONFIG @@ -50,8 +54,9 @@ subroutine oce_fluxes_mom(partit, mesh) integer :: n, elem, elnodes(3),n1 real(kind=WP) :: aux, aux1 + type(t_dyn) , intent(in) , target :: dynamics type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -105,39 +110,43 @@ subroutine oce_fluxes_mom(partit, mesh) END DO !___________________________________________________________________________ - if (use_cavity) call cavity_momentum_fluxes(partit, mesh) + if (use_cavity) call cavity_momentum_fluxes(dynamics, partit, mesh) end subroutine oce_fluxes_mom ! ! !_______________________________________________________________________________ -subroutine ocean2ice(tracers, partit, mesh) +subroutine ocean2ice(dynamics, tracers, partit, mesh) ! transmits the relevant fields from the ocean to the ice model use o_PARAM - use o_ARRAYS use i_ARRAYS use MOD_MESH + use MOD_DYN use MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP USE g_CONFIG use g_comm_auto implicit none - + type(t_dyn) , intent(in) , target :: dynamics + type(t_tracer), intent(in) , target :: tracers type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers + type(t_mesh) , intent(in) , target :: mesh + + integer :: n, elem, k real(kind=WP) :: uw, vw, vol - real(kind=WP), dimension(:,:), pointer :: temp, salt + real(kind=WP), dimension(:,:) , pointer :: temp, salt + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - temp=>tracers%data(1)%values(:,:) - salt=>tracers%data(2)%values(:,:) + temp => tracers%data(1)%values(:,:) + salt => tracers%data(2)%values(:,:) + UV => dynamics%uv(:,:,:) ! the arrays in the ice model are renamed diff --git a/src/io_blowup.F90 b/src/io_blowup.F90 index a4bbae11d..9ad2146c8 100644 --- a/src/io_blowup.F90 +++ b/src/io_blowup.F90 @@ -2,10 +2,11 @@ MODULE io_BLOWUP use g_config use g_clock use g_comm_auto - USE MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_TRACER + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_TRACER + USE MOD_DYN use o_arrays use i_arrays implicit none @@ -64,12 +65,13 @@ MODULE io_BLOWUP !_______________________________________________________________________________ ! ini_ocean_io initializes bid datatype which contains information of all variables need to be written into ! the ocean restart file. This is the only place need to be modified if a new variable is added! - subroutine ini_blowup_io(year, tracers, partit, mesh) + subroutine ini_blowup_io(year, dynamics, tracers, partit, mesh) implicit none integer, intent(in) :: year - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in), target :: tracers + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in) , target :: tracers + type(t_dyn) , intent(in) , target :: dynamics integer :: ncid, j integer :: varid character(500) :: longname @@ -108,8 +110,8 @@ subroutine ini_blowup_io(year, tracers, partit, mesh) !___Define the netCDF variables for 3D fields_______________________________ call def_variable(bid, 'hnode' , (/nl-1, nod2D/) , 'ALE stuff', '?', hnode); call def_variable(bid, 'helem' , (/nl-1, elem2D/) , 'Element layer thickness', 'm/s', helem(:,:)); - call def_variable(bid, 'u' , (/nl-1, elem2D/) , 'zonal velocity', 'm/s', UV(1,:,:)); - call def_variable(bid, 'v' , (/nl-1, elem2D/) , 'meridional velocity', 'm/s', UV(2,:,:)); + call def_variable(bid, 'u' , (/nl-1, elem2D/) , 'zonal velocity', 'm/s', dynamics%uv(1,:,:)); + call def_variable(bid, 'v' , (/nl-1, elem2D/) , 'meridional velocity', 'm/s', dynamics%uv(2,:,:)); call def_variable(bid, 'u_rhs' , (/nl-1, elem2D/) , 'zonal velocity', 'm/s', UV_rhs(1,:,:)); call def_variable(bid, 'v_rhs' , (/nl-1, elem2D/) , 'meridional velocity', 'm/s', UV_rhs(2,:,:)); call def_variable(bid, 'urhs_AB' , (/nl-1, elem2D/) , 'Adams–Bashforth for u', 'm/s', UV_rhsAB(1,:,:)); @@ -173,15 +175,16 @@ end subroutine ini_blowup_io ! ! !_______________________________________________________________________________ - subroutine blowup(istep, tracers, partit, mesh) + subroutine blowup(istep, dynamics, tracers, partit, mesh) implicit none - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in), target :: tracers + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in) , target :: tracers + type(t_dyn) , intent(in) , target :: dynamics integer :: istep ctime=timeold+(dayold-1.)*86400 - call ini_blowup_io(yearnew, tracers, partit, mesh) + call ini_blowup_io(yearnew, dynamics, tracers, partit, mesh) if(partit%mype==0) write(*,*)'Do output (netCDF, blowup) ...' if(partit%mype==0) write(*,*)' --> call assoc_ids(bid)' call assoc_ids(bid, partit) ; call was_error(bid, partit) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 59c6a6fbf..969bbb69a 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -93,11 +93,12 @@ subroutine destructor(this) end subroutine -subroutine ini_mean_io(tracers, partit, mesh) +subroutine ini_mean_io(dynamics, tracers, partit, mesh) use MOD_MESH use MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN use g_cvmix_tke use g_cvmix_idemix use g_cvmix_kpp @@ -111,9 +112,10 @@ subroutine ini_mean_io(tracers, partit, mesh) integer,dimension(15) :: sel_forcvar=0 character(len=10) :: id_string - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in), target :: tracers + type(t_tracer), intent(in) , target :: tracers + type(t_dyn) , intent(in) , target :: dynamics namelist /nml_listsize/ io_listsize namelist /nml_list / io_list @@ -315,9 +317,9 @@ subroutine ini_mean_io(tracers, partit, mesh) CASE ('Kv ') call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'Kv', 'vertical diffusivity Kv', 'm2/s', Kv(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('u ') - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u', 'horizontal velocity','m/s', uv(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u', 'horizontal velocity','m/s', dynamics.uv(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('v ') - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v', 'meridional velocity','m/s', uv(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v', 'meridional velocity','m/s', dynamics.uv(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('w ') call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'w', 'vertical velocity', 'm/s', Wvel(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('Av ') @@ -429,8 +431,8 @@ subroutine ini_mean_io(tracers, partit, mesh) call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'unod', 'horizontal velocity at nodes', 'm/s', Unode(1,:,:), 1, 'm', i_real8, partit, mesh) call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'vnod', 'meridional velocity at nodes', 'm/s', Unode(2,:,:), 1, 'm', i_real8, partit, mesh) - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'um', 'horizontal velocity', 'm/s', uv(1,:,:), 1, 'm', i_real4, partit, mesh) - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'vm', 'meridional velocity', 'm/s', uv(2,:,:), 1, 'm', i_real4, partit, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'um', 'horizontal velocity', 'm/s', dynamics%uv(1,:,:), 1, 'm', i_real4, partit, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'vm', 'meridional velocity', 'm/s', dynamics%uv(2,:,:), 1, 'm', i_real4, partit, mesh) call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'wm', 'vertical velocity', 'm/s', Wvel(:,:), 1, 'm', i_real8, partit, mesh) call def_stream(elem2D, myDim_elem2D, 'utau_surf', '(u, tau) at the surface', 'N/(m s)', utau_surf(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) @@ -805,11 +807,12 @@ subroutine update_means ! !-------------------------------------------------------------------------------------------- ! -subroutine output(istep, tracers, partit, mesh) +subroutine output(istep, dynamics, tracers, partit, mesh) use g_clock use mod_mesh USE MOD_PARTIT USE MOD_PARSUP + use MOD_DYN use mod_tracer use io_gather_module #if defined (__icepack) @@ -821,15 +824,16 @@ subroutine output(istep, tracers, partit, mesh) integer :: n, k logical :: do_output type(Meandata), pointer :: entry - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in), target :: tracers + type(t_tracer), intent(in) , target :: tracers + type(t_dyn) , intent(in) , target :: dynamics character(:), allocatable :: filepath real(real64) :: rtime !timestamp of the record ctime=timeold+(dayold-1.)*86400 if (lfirst) then - call ini_mean_io(tracers, partit, mesh) + call ini_mean_io(dynamics, tracers, partit, mesh) #if defined (__icepack) call init_io_icepack(mesh) !icapack has its copy of p_partit => partit #endif diff --git a/src/io_restart.F90 b/src/io_restart.F90 index a9d2aac22..4d8a4d722 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -6,6 +6,7 @@ MODULE io_RESTART USE MOD_PARTIT USE MOD_PARSUP use mod_tracer + use MOD_DYN use o_arrays use i_arrays use g_cvmix_tke @@ -79,7 +80,7 @@ MODULE io_RESTART !-------------------------------------------------------------------------------------------- ! ini_ocean_io initializes oid datatype which contains information of all variables need to be written into ! the ocean restart file. This is the only place need to be modified if a new variable is added! -subroutine ini_ocean_io(year, tracers, partit, mesh) +subroutine ini_ocean_io(year, dynamics, tracers, partit, mesh) implicit none integer, intent(in) :: year @@ -89,9 +90,10 @@ subroutine ini_ocean_io(year, tracers, partit, mesh) character(500) :: filename character(500) :: trname, units character(4) :: cyear - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in), target :: tracers + type(t_tracer), intent(in) , target :: tracers + type(t_dyn) , intent(in) , target :: dynamics #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -120,8 +122,8 @@ subroutine ini_ocean_io(year, tracers, partit, mesh) call def_variable(oid, 'hnode', (/nl-1, nod2D/), 'nodal layer thickness', 'm', hnode); !___Define the netCDF variables for 3D fields_______________________________ - call def_variable(oid, 'u', (/nl-1, elem2D/), 'zonal velocity', 'm/s', UV(1,:,:)); - call def_variable(oid, 'v', (/nl-1, elem2D/), 'meridional velocity', 'm/s', UV(2,:,:)); + call def_variable(oid, 'u', (/nl-1, elem2D/), 'zonal velocity', 'm/s', dynamics.uv(1,:,:)); + call def_variable(oid, 'v', (/nl-1, elem2D/), 'meridional velocity', 'm/s', dynamics.uv(2,:,:)); call def_variable(oid, 'urhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for u', 'm/s', UV_rhsAB(1,:,:)); call def_variable(oid, 'vrhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for v', 'm/s', UV_rhsAB(2,:,:)); @@ -207,7 +209,7 @@ end subroutine ini_ice_io ! !-------------------------------------------------------------------------------------------- ! -subroutine restart(istep, l_write, l_read, tracers, partit, mesh) +subroutine restart(istep, l_write, l_read, dynamics, tracers, partit, mesh) #if defined(__icepack) use icedrv_main, only: init_restart_icepack @@ -222,18 +224,19 @@ subroutine restart(istep, l_write, l_read, tracers, partit, mesh) logical :: l_write, l_read logical :: is_restart integer :: mpierr - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in), target :: tracers + type(t_tracer), intent(in) , target :: tracers + type(t_dyn) , intent(in) , target :: dynamics ctime=timeold+(dayold-1.)*86400 if (.not. l_read) then - call ini_ocean_io(yearnew, tracers, partit, mesh) + call ini_ocean_io(yearnew, dynamics, tracers, partit, mesh) if (use_ice) call ini_ice_io (yearnew, partit, mesh) #if defined(__icepack) if (use_ice) call init_restart_icepack(yearnew, mesh) !icapack has its copy of p_partit => partit #endif else - call ini_ocean_io(yearold, tracers, partit, mesh) + call ini_ocean_io(yearold, dynamics, tracers, partit, mesh) if (use_ice) call ini_ice_io (yearold, partit, mesh) #if defined(__icepack) if (use_ice) call init_restart_icepack(yearold, mesh) !icapack has its copy of p_partit => partit diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index fb4ee6336..7caab848a 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -64,20 +64,25 @@ subroutine solve_ssh_ale(partit, mesh) type(t_partit), intent(inout), target :: partit end subroutine - subroutine compute_hbar_ale(partit, mesh) + subroutine compute_hbar_ale(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine - subroutine vert_vel_ale(partit, mesh) + subroutine vert_vel_ale(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + USE MOD_DYN + type(t_dyn) , intent(in) , target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh end subroutine subroutine update_thickness_ale(partit, mesh) @@ -1599,13 +1604,14 @@ end subroutine update_stiff_mat_ale !"FESOM2: from finite elements to finite volumes" ! ! ssh_rhs = alpha * grad[ int_hbot^hbar(n+0.5)( u^n+deltau)dz + W(n+0.5) ] -subroutine compute_ssh_rhs_ale(partit, mesh) +subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) use g_config,only: which_ALE,dt use MOD_MESH - use o_ARRAYS + use o_ARRAYS, only: ssh_rhs, ssh_rhs_old, UV_rhs, water_flux use o_PARAM USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN use g_comm_auto implicit none @@ -1615,14 +1621,16 @@ subroutine compute_ssh_rhs_ale(partit, mesh) integer :: ed, el(2), enodes(2), nz, n, nzmin, nzmax real(kind=WP) :: c1, c2, deltaX1, deltaX2, deltaY1, deltaY2 real(kind=WP) :: dumc1_1, dumc1_2, dumc2_1, dumc2_2 !!PS - type(t_mesh), intent(inout), target :: mesh + type(t_dyn) , intent(in) , target :: dynamics type(t_partit), intent(inout), target :: partit - + type(t_mesh) , intent(in) , target :: mesh + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV=>dynamics%uv(:,:,:) ssh_rhs=0.0_WP !___________________________________________________________________________ @@ -1712,13 +1720,14 @@ end subroutine compute_ssh_rhs_ale ! hbar(n+0.5) = hbar(n-0.5) - tau*ssh_rhs_old ! ! in S. Danilov et al.: "FESOM2: from finite elements to finite volumes" -subroutine compute_hbar_ale(partit, mesh) +subroutine compute_hbar_ale(dynamics, partit, mesh) use g_config,only: dt, which_ALE, use_cavity use MOD_MESH - use o_ARRAYS + use o_ARRAYS, only: ssh_rhs, ssh_rhs_old, water_flux use o_PARAM USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN use g_comm_auto implicit none @@ -1730,13 +1739,16 @@ subroutine compute_hbar_ale(partit, mesh) integer :: ed, el(2), enodes(2), nz,n, elnodes(3), elem, nzmin, nzmax real(kind=WP) :: c1, c2, deltaX1, deltaX2, deltaY1, deltaY2 - type(t_mesh), intent(inout), target :: mesh + type(t_dyn) , intent(inout), target :: dynamics + type(t_mesh) , intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV=>dynamics%uv(:,:,:) !___________________________________________________________________________ ! compute the rhs @@ -1824,13 +1836,15 @@ end subroutine compute_hbar_ale ! > for zlevel: dh_k/dt_k=1 != 0 ! > for zstar : dh_k/dt_k=1...kbot-1 != 0 ! -subroutine vert_vel_ale(partit, mesh) +subroutine vert_vel_ale(dynamics, partit, mesh) use g_config,only: dt, which_ALE, min_hnode, lzstar_lev, flag_warn_cflz use MOD_MESH - use o_ARRAYS + use o_ARRAYS, only: Wvel, fer_Wvel, fer_UV, CFL_z, water_flux, ssh_rhs, & + ssh_rhs_old, eta_n, d_eta, Wvel_e, Wvel_i use o_PARAM USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN use g_comm_auto use io_RESTART !!PS use i_arrays !!PS @@ -1845,13 +1859,16 @@ subroutine vert_vel_ale(partit, mesh) real(kind=WP) :: dhbar_total, dhbar_rest, distrib_dhbar_int !PS real(kind=WP), dimension(:), allocatable :: max_dhbar2distr,cumsum_maxdhbar,distrib_dhbar integer , dimension(:), allocatable :: idx - type(t_mesh), intent(inout), target :: mesh + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV=>dynamics%uv(:,:,:) !___________________________________________________________________________ ! Contributions from levels in divergence @@ -2495,27 +2512,30 @@ end subroutine solve_ssh_ale ! ! !=============================================================================== -subroutine impl_vert_visc_ale(partit, mesh) +subroutine impl_vert_visc_ale(dynamics, partit, mesh) USE MOD_MESH USE o_PARAM USE o_ARRAYS USE MOD_PARTIT USE MOD_PARSUP +USE MOD_DYN USE g_CONFIG,only: dt IMPLICIT NONE -type(t_mesh), intent(inout), target :: mesh +type(t_mesh) , intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit +type(t_dyn) , intent(inout), target :: dynamics real(kind=WP) :: a(mesh%nl-1), b(mesh%nl-1), c(mesh%nl-1), ur(mesh%nl-1), vr(mesh%nl-1) real(kind=WP) :: cp(mesh%nl-1), up(mesh%nl-1), vp(mesh%nl-1) integer :: nz, elem, nzmax, nzmin, elnodes(3) real(kind=WP) :: zinv, m, friction, wu, wd - +real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" +UV=>dynamics%uv(:,:,:) DO elem=1,myDim_elem2D elnodes=elem2D_nodes(:,elem) @@ -2674,10 +2694,11 @@ end subroutine impl_vert_visc_ale ! ! !=============================================================================== -subroutine oce_timestep_ale(n, tracers, partit, mesh) +subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) use g_config use MOD_MESH use MOD_TRACER + use MOD_DYN use o_ARRAYS use o_PARAM USE MOD_PARTIT @@ -2700,9 +2721,10 @@ subroutine oce_timestep_ale(n, tracers, partit, mesh) use write_step_info_interface use check_blowup_interface IMPLICIT NONE - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers + type(t_dyn) , intent(inout), target :: dynamics real(kind=8) :: t0,t1, t2, t30, t3, t4, t5, t6, t7, t8, t9, t10, loc, glo integer :: n, node @@ -2861,7 +2883,7 @@ subroutine oce_timestep_ale(n, tracers, partit, mesh) t30=MPI_Wtime() call solve_ssh_ale(partit, mesh) - if ((toy_ocean) .AND. (TRIM(which_toy)=="soufflet")) call relax_zonal_vel(partit, mesh) + if ((toy_ocean) .AND. (TRIM(which_toy)=="soufflet")) call relax_zonal_vel(dynamics, partit, mesh) t3=MPI_Wtime() ! estimate new horizontal velocity u^(n+1) @@ -2874,7 +2896,7 @@ subroutine oce_timestep_ale(n, tracers, partit, mesh) ! Update to hbar(n+3/2) and compute dhe to be used on the next step if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call compute_hbar_ale'//achar(27)//'[0m' - call compute_hbar_ale(partit, mesh) + call compute_hbar_ale(dynamics, partit, mesh) !___________________________________________________________________________ ! - Current dynamic elevation alpha*hbar(n+1/2)+(1-alpha)*hbar(n-1/2) @@ -2909,7 +2931,7 @@ subroutine oce_timestep_ale(n, tracers, partit, mesh) ! The main step of ALE procedure --> this is were the magic happens --> here ! is decided how change in hbar is distributed over the vertical layers if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call vert_vel_ale'//achar(27)//'[0m' - call vert_vel_ale(partit, mesh) + call vert_vel_ale(dynamics, partit, mesh) t7=MPI_Wtime() !___________________________________________________________________________ @@ -2925,11 +2947,11 @@ subroutine oce_timestep_ale(n, tracers, partit, mesh) t9=MPI_Wtime() !___________________________________________________________________________ ! write out global fields for debugging - call write_step_info(n,logfile_outfreq, tracers, partit, mesh) + call write_step_info(n,logfile_outfreq, dynamics, tracers, partit, mesh) ! check model for blowup --> ! write_step_info and check_blowup require ! togeather around 2.5% of model runtime - call check_blowup(n, tracers, partit, mesh) + call check_blowup(n, dynamics, tracers, partit, mesh) t10=MPI_Wtime() !___________________________________________________________________________ diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index ed5145ec2..336817503 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -127,13 +127,14 @@ subroutine solve_tracers_ale(tracers, partit, mesh) ! !=============================================================================== ! Driving routine Here with ALE changes!!! -subroutine solve_tracers_ale(tracers, partit, mesh) +subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) use g_config use o_PARAM, only: SPP, Fer_GM - use o_arrays + use o_arrays, only: Wvel, Wvel_e, fer_Wvel, fer_UV use mod_mesh USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN use mod_tracer use g_comm_auto use o_tracers @@ -142,15 +143,19 @@ subroutine solve_tracers_ale(tracers, partit, mesh) use diff_tracers_ale_interface implicit none + type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracers - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit integer :: tr_num, node, nzmax, nzmin - + real(kind=WP), dimension(:,:,:), pointer :: UV + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) + !___________________________________________________________________________ if (SPP) call cal_rejected_salt(partit, mesh) if (SPP) call app_rejected_salt(tracers%data(2)%values, partit, mesh) @@ -218,12 +223,13 @@ end subroutine solve_tracers_ale ! ! !=============================================================================== -subroutine adv_tracers_ale(dt, tr_num, tracers, partit, mesh) +subroutine adv_tracers_ale(dt, tr_num, dynamics, tracers, partit, mesh) use g_config, only: flag_debug use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - use mod_tracer + use MOD_TRACER + use MOD_DYN use o_arrays use diagnostics, only: ldiag_DVD, compute_diag_dvd_2ndmoment_klingbeil_etal_2014, & compute_diag_dvd_2ndmoment_burchard_etal_2008, compute_diag_dvd @@ -234,9 +240,10 @@ subroutine adv_tracers_ale(dt, tr_num, tracers, partit, mesh) real(kind=WP), intent(in), target :: dt integer :: node, nz integer, intent(in) :: tr_num - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers + type(t_dyn) , intent(inout), target :: dynamics ! del_ttf ... initialised and setted to zero in call init_tracers_AB(tr_num) ! --> del_ttf ... equivalent to R_T^n in Danilov etal FESOM2: "from finite element ! to finite volume". At the end R_T^n should contain all advection therms and @@ -256,7 +263,7 @@ subroutine adv_tracers_ale(dt, tr_num, tracers, partit, mesh) ! here --> add horizontal advection part to del_ttf(nz,n) = del_ttf(nz,n) + ... tracers%work%del_ttf_advhoriz = 0.0_WP tracers%work%del_ttf_advvert = 0.0_WP - call do_oce_adv_tra(dt, UV, wvel, wvel_i, wvel_e, tr_num, tracers, partit, mesh) + call do_oce_adv_tra(dt, dynamics%uv, wvel, wvel_i, wvel_e, tr_num, tracers, partit, mesh) !___________________________________________________________________________ ! update array for total tracer flux del_ttf with the fluxes from horizontal ! and vertical advection @@ -1146,29 +1153,34 @@ end subroutine diff_part_hor_redi ! ! !=============================================================================== -SUBROUTINE diff_part_bh(tr_num, tracers, partit, mesh) - use o_ARRAYS +SUBROUTINE diff_part_bh(tr_num, dynamics, tracers, partit, mesh) + use o_ARRAYS, only: use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use MOD_TRACER + use MOD_DYN use o_param use g_config use g_comm_auto IMPLICIT NONE integer, intent(in), target :: tr_num + type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracers - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit real(kind=WP) :: u1, v1, len, vi, tt, ww integer :: nz, ed, el(2), en(2), k, elem, nl1, ul1 real(kind=WP), allocatable :: temporary_ttf(:,:) real(kind=WP), pointer :: ttf(:,:) + real(kind=WP), dimension(:,:,:), pointer :: UV + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) ttf => tracers%data(tr_num)%values ed=myDim_nod2D+eDim_nod2D diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index 98c730732..66a6cdbfb 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -1,11 +1,14 @@ module momentum_adv_scalar_interface interface - subroutine momentum_adv_scalar(partit, mesh) + subroutine momentum_adv_scalar(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine end interface end module @@ -13,11 +16,12 @@ subroutine momentum_adv_scalar(partit, mesh) ! ! !_______________________________________________________________________________ -subroutine compute_vel_rhs(partit, mesh) +subroutine compute_vel_rhs(dynamics, partit, mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP - use o_ARRAYS + USE MOD_DYN + use o_ARRAYS, only: UV_rhs, UV_rhsAB, eta_n, coriolis, ssh_gp, pgf_x, pgf_y use i_ARRAYS use i_therm_param use o_PARAM @@ -29,8 +33,9 @@ subroutine compute_vel_rhs(partit, mesh) use momentum_adv_scalar_interface implicit none - type(t_mesh), intent(in), target :: mesh + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh integer :: elem, elnodes(3), nz, nzmax, nzmin real(kind=WP) :: ff, mm real(kind=WP) :: Fx, Fy, pre(3) @@ -38,10 +43,13 @@ subroutine compute_vel_rhs(partit, mesh) real(kind=WP) :: t1, t2, t3, t4 real(kind=WP) :: p_ice(3), p_air(3), p_eta(3) integer :: use_pice + real(kind=WP), dimension(:,:,:), pointer :: UV + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV=>dynamics%uv(:,:,:) t1=MPI_Wtime() use_pice=0 @@ -121,7 +129,7 @@ subroutine compute_vel_rhs(partit, mesh) if (mype==0) write(*,*) 'in moment not adapted mom_adv advection typ for ALE, check your namelist' call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) elseif (mom_adv==2) then - call momentum_adv_scalar(partit, mesh) + call momentum_adv_scalar(dynamics, partit, mesh) end if t3=MPI_Wtime() @@ -158,27 +166,32 @@ END SUBROUTINE compute_vel_rhs ! Momentum advection on scalar control volumes with ALE adaption--> exchange zinv(nz) ! against hnode(nz,node) !_______________________________________________________________________________ -subroutine momentum_adv_scalar(partit, mesh) +subroutine momentum_adv_scalar(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP -USE o_ARRAYS +use MOD_DYN +USE o_ARRAYS, only: Wvel_e, UV_rhsAB USE o_PARAM use g_comm_auto IMPLICIT NONE -type(t_mesh), intent(in), target :: mesh +type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit +type(t_mesh) , intent(in) , target :: mesh + integer :: n, nz, el1, el2 integer :: nl1, nl2, ul1, ul2, nod(2), el, ed, k, nle, ule real(kind=WP) :: un1(1:mesh%nl-1), un2(1:mesh%nl-1) real(kind=WP) :: wu(1:mesh%nl), wv(1:mesh%nl) real(kind=WP) :: Unode_rhs(2,mesh%nl-1,partit%myDim_nod2d+partit%eDim_nod2D) +real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV=>dynamics%uv(:,:,:) !___________________________________________________________________________ ! 1st. compute vertical momentum advection component: w * du/dz, w*dv/dz diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 36b9f6d04..2a2ea25d8 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -13,112 +13,142 @@ ! 5. Leith_c=? (need to be adjusted) module h_viscosity_leith_interface interface - subroutine h_viscosity_leith(partit, mesh) + subroutine h_viscosity_leith(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine end interface end module module visc_filt_harmon_interface interface - subroutine visc_filt_harmon(partit, mesh) + subroutine visc_filt_harmon(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine end interface end module module visc_filt_hbhmix_interface interface - subroutine visc_filt_hbhmix(partit, mesh) + subroutine visc_filt_hbhmix(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine end interface end module module visc_filt_biharm_interface interface - subroutine visc_filt_biharm(option, partit, mesh) + subroutine visc_filt_biharm(option, dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN integer :: option - type(t_mesh), intent(in), target :: mesh + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine end interface end module module visc_filt_bcksct_interface interface - subroutine visc_filt_bcksct(partit, mesh) + subroutine visc_filt_bcksct(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine end interface end module module visc_filt_bilapl_interface interface - subroutine visc_filt_bilapl(partit, mesh) + subroutine visc_filt_bilapl(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine end interface end module module visc_filt_bidiff_interface interface - subroutine visc_filt_bidiff(partit, mesh) + subroutine visc_filt_bidiff(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine end interface end module module visc_filt_dbcksc_interface interface - subroutine visc_filt_dbcksc(partit, mesh) + subroutine visc_filt_dbcksc(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine end interface end module module backscatter_coef_interface interface - subroutine backscatter_coef(partit, mesh) + subroutine backscatter_coef(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine end interface end module module uke_update_interface interface - subroutine uke_update(partit, mesh) + subroutine uke_update(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine end interface end module @@ -128,11 +158,13 @@ subroutine uke_update(partit, mesh) ! Contains routines needed for computations of dynamics. ! includes: update_vel, compute_vel_nodes ! =================================================================== -SUBROUTINE update_vel(partit, mesh) +SUBROUTINE update_vel(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP - USE o_ARRAYS + USE MOD_DYN + + USE o_ARRAYS, only: d_eta, eta_n, UV_rhs USE o_PARAM USE g_CONFIG use g_comm_auto @@ -140,13 +172,16 @@ SUBROUTINE update_vel(partit, mesh) integer :: elem, elnodes(3), nz, m, nzmax, nzmin real(kind=WP) :: eta(3) real(kind=WP) :: Fx, Fy - type(t_mesh), intent(in), target :: mesh + type(t_dyn) , intent(inout), target :: dynamics + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV=>dynamics%uv(:,:,:) DO elem=1, myDim_elem2D elnodes=elem2D_nodes(:,elem) @@ -165,23 +200,27 @@ SUBROUTINE update_vel(partit, mesh) call exchange_elem(UV, partit) end subroutine update_vel !========================================================================== -subroutine compute_vel_nodes(partit, mesh) +subroutine compute_vel_nodes(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN USE o_PARAM USE o_ARRAYS use g_comm_auto IMPLICIT NONE integer :: n, nz, k, elem, nln, uln, nle, ule real(kind=WP) :: tx, ty, tvol - type(t_mesh), intent(in), target :: mesh + + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit - + type(t_mesh) , intent(in) , target :: mesh + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV=>dynamics%uv(:,:,:) DO n=1, myDim_nod2D uln = ulevels_nod2D(n) @@ -208,11 +247,12 @@ subroutine compute_vel_nodes(partit, mesh) call exchange_nod(Unode, partit) end subroutine compute_vel_nodes !=========================================================================== -subroutine viscosity_filter(option, partit, mesh) +subroutine viscosity_filter(option, dynamics, partit, mesh) use o_PARAM use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP +use MOD_DYN use h_viscosity_leith_interface use visc_filt_harmon_interface use visc_filt_hbhmix_interface @@ -224,7 +264,8 @@ subroutine viscosity_filter(option, partit, mesh) use backscatter_coef_interface IMPLICIT NONE integer :: option -type(t_mesh), intent(in), target :: mesh +type(t_dyn) , intent(inout), target :: dynamics +type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit ! Driving routine @@ -241,34 +282,34 @@ subroutine viscosity_filter(option, partit, mesh) ! ==== ! Harmonic Leith parameterization ! ==== - call h_viscosity_leith(partit, mesh) - call visc_filt_harmon(partit, mesh) + call h_viscosity_leith(dynamics, partit, mesh) + call visc_filt_harmon(dynamics, partit, mesh) CASE (2) ! === ! Laplacian+Leith+biharmonic background ! === - call h_viscosity_leith(partit, mesh) - call visc_filt_hbhmix(partit, mesh) + call h_viscosity_leith(dynamics, partit, mesh) + call visc_filt_hbhmix(dynamics, partit, mesh) CASE (3) ! === ! Biharmonic Leith parameterization ! === - call h_viscosity_leith(partit, mesh) - call visc_filt_biharm(2, partit, mesh) + call h_viscosity_leith(dynamics, partit, mesh) + call visc_filt_biharm(2, dynamics, partit, mesh) CASE (4) ! === ! Biharmonic+upwind-type ! === - call visc_filt_biharm(1, partit, mesh) + call visc_filt_biharm(1, dynamics, partit, mesh) CASE (5) - call visc_filt_bcksct(partit, mesh) + call visc_filt_bcksct(dynamics, partit, mesh) CASE (6) - call visc_filt_bilapl(partit, mesh) + call visc_filt_bilapl(dynamics, partit, mesh) CASE (7) - call visc_filt_bidiff(partit, mesh) + call visc_filt_bidiff(dynamics, partit, mesh) CASE (8) - call backscatter_coef(partit, mesh) - call visc_filt_dbcksc(partit, mesh) + call backscatter_coef(dynamics, partit, mesh) + call visc_filt_dbcksc(dynamics, partit, mesh) CASE DEFAULT if (partit%mype==0) write(*,*) 'mixing scheme with option ' , option, 'has not yet been implemented' call par_ex(partit%MPI_COMM_FESOM, partit%mype) @@ -276,24 +317,27 @@ subroutine viscosity_filter(option, partit, mesh) END SELECT end subroutine viscosity_filter ! =================================================================== -SUBROUTINE visc_filt_harmon(partit, mesh) +SUBROUTINE visc_filt_harmon(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP -USE o_ARRAYS +USE MOD_DYN +USE o_ARRAYS, only: Visc, UV_rhs USE o_PARAM USE g_CONFIG IMPLICIT NONE real(kind=WP) :: u1, v1, le(2), len, vi integer :: nz, ed, el(2) , nzmin,nzmax -type(t_mesh), intent(in), target :: mesh +type(t_dyn) , intent(inout), target :: dynamics +type(t_mesh) , intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - +real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" +UV => dynamics%uv(:,:,:) ! An analog of harmonic viscosity operator. ! It adds to the rhs(0) Visc*(u1+u2+u3-3*u0)/area @@ -320,11 +364,12 @@ SUBROUTINE visc_filt_harmon(partit, mesh) END DO end subroutine visc_filt_harmon ! =================================================================== -SUBROUTINE visc_filt_biharm(option, partit, mesh) +SUBROUTINE visc_filt_biharm(option, dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP - USE o_ARRAYS + use MOD_DYN + USE o_ARRAYS, only: Visc, UV_rhs USE o_PARAM USE g_CONFIG use g_comm_auto @@ -335,13 +380,15 @@ SUBROUTINE visc_filt_biharm(option, partit, mesh) real(kind=WP) :: u1, v1, vi, len integer :: ed, el(2), nz, option, nzmin, nzmax real(kind=WP), allocatable :: U_c(:,:), V_c(:,:) - type(t_mesh), intent(in), target :: mesh + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit - + type(t_mesh) , intent(in) , target :: mesh + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) ! Filter is applied twice. ed=myDim_elem2D+eDim_elem2D @@ -426,11 +473,12 @@ SUBROUTINE visc_filt_biharm(option, partit, mesh) end subroutine visc_filt_biharm ! =================================================================== -SUBROUTINE visc_filt_hbhmix(partit, mesh) +SUBROUTINE visc_filt_hbhmix(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP - USE o_ARRAYS + use MOD_DYN + USE o_ARRAYS, only: Visc, UV_rhs USE o_PARAM USE g_CONFIG use g_comm_auto @@ -443,13 +491,15 @@ SUBROUTINE visc_filt_hbhmix(partit, mesh) real(kind=WP) :: u1, v1, vi, len, crosslen, le(2) integer :: ed, el(2), nz, nzmin, nzmax real(kind=WP), allocatable :: U_c(:,:), V_c(:,:) - type(t_mesh), intent(in), target :: mesh + type(t_dyn), intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit - + type(t_mesh), intent(in), target :: mesh + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) ! Filter is applied twice. ed=myDim_elem2D+eDim_elem2D @@ -516,12 +566,13 @@ SUBROUTINE visc_filt_hbhmix(partit, mesh) end subroutine visc_filt_hbhmix ! =================================================================== -SUBROUTINE h_viscosity_leith(partit, mesh) +SUBROUTINE h_viscosity_leith(dynamics, partit, mesh) ! ! Coefficient of horizontal viscosity is a combination of the Leith (with Leith_c) and modified Leith (with Div_c) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP + use MOD_DYN USE o_ARRAYS USE o_PARAM USE g_CONFIG @@ -531,9 +582,10 @@ SUBROUTINE h_viscosity_leith(partit, mesh) integer :: elem, nl1, nz, elnodes(3), n, k, nt, ul1 real(kind=WP) :: leithx, leithy real(kind=WP), allocatable :: aux(:,:) - type(t_mesh), intent(in), target :: mesh + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit - + type(t_mesh) , intent(in) , target :: mesh + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -622,11 +674,12 @@ SUBROUTINE h_viscosity_leith(partit, mesh) deallocate(aux) END subroutine h_viscosity_leith ! ======================================================================= -SUBROUTINE visc_filt_bcksct(partit, mesh) +SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP - USE o_ARRAYS + use MOD_DYN + USE o_ARRAYS, only: UV_rhs USE o_PARAM USE g_CONFIG USE g_comm_auto @@ -635,13 +688,15 @@ SUBROUTINE visc_filt_bcksct(partit, mesh) real(kind=8) :: u1, v1, len, vi integer :: nz, ed, el(2), nelem(3),k, elem, nzmin, nzmax real(kind=8), allocatable :: U_b(:,:), V_b(:,:), U_c(:,:), V_c(:,:) - type(t_mesh), intent(in), target :: mesh + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit - + type(t_mesh) , intent(in) , target :: mesh + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) ! An analog of harmonic viscosity operator. ! Same as visc_filt_h, but with the backscatter. @@ -722,11 +777,12 @@ end subroutine visc_filt_bcksct ! \nu=|3u_c-u_n1-u_n2-u_n3|*sqrt(S_c)/100. There is an additional term ! in viscosity that is proportional to the velocity amplitude squared. ! The coefficient has to be selected experimentally. -SUBROUTINE visc_filt_bilapl(partit, mesh) +SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP - USE o_ARRAYS + use MOD_DYN + USE o_ARRAYS, only: UV_rhs USE o_PARAM USE g_CONFIG USE g_comm_auto @@ -734,13 +790,17 @@ SUBROUTINE visc_filt_bilapl(partit, mesh) real(kind=8) :: u1, v1, vi, len integer :: ed, el(2), nz, nzmin, nzmax real(kind=8), allocatable :: U_c(:,:), V_c(:,:) - type(t_mesh), intent(in), target :: mesh + + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit - + type(t_mesh) , intent(in) , target :: mesh + + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) ed=myDim_elem2D+eDim_elem2D allocate(U_c(nl-1,ed), V_c(nl-1, ed)) @@ -804,11 +864,12 @@ end subroutine visc_filt_bilapl ! On each edge, \nu=sqrt(|u_c1-u_c2|*sqrt(S_c1+S_c2)/100) ! The effect is \nu^2 ! Quadratic in velocity term can be introduced if needed. -SUBROUTINE visc_filt_bidiff(partit, mesh) +SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP - USE o_ARRAYS + use MOD_DYN + USE o_ARRAYS, only: UV_rhs USE o_PARAM USE g_CONFIG USE g_comm_auto @@ -816,13 +877,16 @@ SUBROUTINE visc_filt_bidiff(partit, mesh) real(kind=8) :: u1, v1, vi, len integer :: ed, el(2), nz, nzmin, nzmax real(kind=8), allocatable :: U_c(:,:), V_c(:,:) - type(t_mesh), intent(in), target :: mesh + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit - + type(t_mesh) , intent(in) , target :: mesh + + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) ! ed=myDim_elem2D+eDim_elem2D allocate(U_c(nl-1,ed), V_c(nl-1, ed)) @@ -880,11 +944,13 @@ end subroutine visc_filt_bidiff ! =================================================================== -SUBROUTINE visc_filt_dbcksc(partit, mesh) +SUBROUTINE visc_filt_dbcksc(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP -USE o_ARRAYS +use MOD_DYN +USE o_ARRAYS, only: UV_rhs, v_back, UV_dis_tend, UV_total_tend, UV_back_tend, & + uke, uke_dif USE o_PARAM USE g_CONFIG USE g_comm_auto @@ -896,12 +962,15 @@ SUBROUTINE visc_filt_dbcksc(partit, mesh) integer :: nz, ed, el(2) real(kind=8), allocatable :: U_c(:,:), V_c(:,:), UV_back(:,:,:), UV_dis(:,:,:), uke_d(:,:) real(kind=8), allocatable :: uuu(:) -type(t_mesh), intent(in), target :: mesh +type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit +type(t_mesh) , intent(in) , target :: mesh +real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" +UV => dynamics%uv(:,:,:) ! An analog of harmonic viscosity operator. ! It adds to the rhs(0) Visc*(u1+u2+u3-3*u0)/area @@ -1028,7 +1097,7 @@ SUBROUTINE visc_filt_dbcksc(partit, mesh) UV_back_tend=UV_back uke_dif=uke_d - call uke_update(partit, mesh) + call uke_update(dynamics, partit, mesh) deallocate(V_c,U_c) deallocate(UV_dis,UV_back) deallocate(uke_d) @@ -1073,11 +1142,13 @@ SUBROUTINE backscatter_coef(partit, mesh) end subroutine backscatter_coef !=========================================================================== -SUBROUTINE uke_update(partit, mesh) +SUBROUTINE uke_update(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP -USE o_ARRAYS +use MOD_DYN +USE o_ARRAYS, only: uke_rhs, uke_dif, uke_back, uke_dis, uke, UV_dis_tend, uv_back_tend, uke_rhs_old, & + bvfreq, coriolis_node USE o_PARAM USE g_CONFIG use g_comm_auto @@ -1089,18 +1160,22 @@ SUBROUTINE uke_update(partit, mesh) !Why is it necessary to implement the length of the array? It doesn't work without! !integer, intent(in) :: t_levels -type(t_mesh), intent(in), target :: mesh +type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit +type(t_mesh) , intent(in) , target :: mesh + real(kind=8) :: hall, h1_eta, hnz, vol integer :: elnodes(3), nz, ed, edi, node, j, elem, q real(kind=8), allocatable :: uuu(:), work_array(:), U_work(:,:), V_work(:,:), rosb_array(:,:), work_uv(:) integer :: kk, nzmax, el real(kind=8) :: c1, rosb, vel_u, vel_v, vel_uv, scaling, reso real*8 :: c_min=0.5, f_min=1.e-6, r_max=200000., ex, ey, a1, a2, len_reg, dist_reg(2) ! Are those values still correct? +real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" -#include "associate_mesh_ass.h" +#include "associate_mesh_ass.h" +UV => dynamics%uv(:,:,:) !rosb_dis=1._8 !Should be variable to control how much of the dissipated energy is backscattered !rossby_num=2 @@ -1241,3 +1316,4 @@ SUBROUTINE uke_update(partit, mesh) end subroutine uke_update ! =================================================================== + diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index 3576ef01f..e34b07cf4 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -183,7 +183,7 @@ MODULE o_ARRAYS IMPLICIT NONE ! Arrays are described in subroutine array_setup real(kind=WP), allocatable, target :: Wvel(:,:), Wvel_e(:,:), Wvel_i(:,:) -real(kind=WP), allocatable :: UV(:,:,:) +!!PS real(kind=WP), allocatable :: UV(:,:,:) real(kind=WP), allocatable :: UV_rhs(:,:,:), UV_rhsAB(:,:,:) real(kind=WP), allocatable :: uke(:,:), v_back(:,:), uke_back(:,:), uke_dis(:,:), uke_dif(:,:) real(kind=WP), allocatable :: uke_rhs(:,:), uke_rhs_old(:,:) diff --git a/src/oce_vel_rhs_vinv.F90 b/src/oce_vel_rhs_vinv.F90 index b81ccf727..f7bf16720 100755 --- a/src/oce_vel_rhs_vinv.F90 +++ b/src/oce_vel_rhs_vinv.F90 @@ -1,11 +1,14 @@ module relative_vorticity_interface interface - subroutine relative_vorticity(partit, mesh) + subroutine relative_vorticity(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + use MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine end interface end module @@ -14,21 +17,26 @@ subroutine relative_vorticity(partit, mesh) ! (curl u+f)\times u+grad(u^2/2)+w du/dz ! ! =================================================================== -subroutine relative_vorticity(partit, mesh) - USE o_ARRAYS +subroutine relative_vorticity(dynamics, partit, mesh) + USE o_ARRAYS, only: vorticity USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN use g_comm_auto IMPLICIT NONE integer :: n, nz, el(2), enodes(2), nl1, nl2, edge, ul1, ul2, nl12, ul12 real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2, c1 - type(t_mesh), intent(in), target :: mesh + + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" -#include "associate_mesh_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) !!PS DO n=1,myDim_nod2D !!PS nl1 = nlevels_nod2D(n)-1 @@ -108,18 +116,23 @@ subroutine relative_vorticity(partit, mesh) ! Now it the relative vorticity known on neighbors too end subroutine relative_vorticity ! ========================================================================== -subroutine compute_vel_rhs_vinv(partit, mesh) !vector invariant +subroutine compute_vel_rhs_vinv(dynamics, partit, mesh) !vector invariant USE o_PARAM - USE o_ARRAYS + USE o_ARRAYS, only: UV_rhs, UV_rhsAB, eta_n, coriolis_node, hpressure, vorticity + USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP + use MOD_DYN USE g_CONFIG use g_comm_auto use relative_vorticity_interface IMPLICIT NONE - type(t_mesh), intent(in), target :: mesh + + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + integer :: n, n1, nz, elem, elnodes(3), nl1, j, nzmin,nzmax real(kind=WP) :: a, b, c, da, db, dc, dg, ff(3), gg, eta(3), pre(3), Fx, Fy,w real(kind=WP) :: uvert(mesh%nl,2), umean, vmean, friction @@ -127,11 +140,12 @@ subroutine compute_vel_rhs_vinv(partit, mesh) !vector invariant real(kind=WP) :: KE_node(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP) :: dZ_inv(2:mesh%nl-1), dzbar_inv(mesh%nl-1), elem_area_inv real(kind=WP) :: density0_inv = 1./density_0 - + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" -#include "associate_mesh_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) w = 0.0_WP @@ -195,7 +209,7 @@ subroutine compute_vel_rhs_vinv(partit, mesh) !vector invariant END DO END DO - call relative_vorticity(partit, mesh) + call relative_vorticity(dynamics, partit, mesh) ! ==================== ! Sea level and pressure contribution -\nabla(g\eta +hpressure/rho_0+V^2/2) ! and the Coriolis force (elemental part) diff --git a/src/toy_channel_soufflet.F90 b/src/toy_channel_soufflet.F90 index bf355e527..cf34e60ee 100644 --- a/src/toy_channel_soufflet.F90 +++ b/src/toy_channel_soufflet.F90 @@ -3,6 +3,7 @@ MODULE Toy_Channel_Soufflet USE MOD_PARTIT USE MOD_PARSUP USE MOD_TRACER + USE MOD_DYN USE o_ARRAYS USE o_PARAM USE g_config @@ -44,12 +45,15 @@ MODULE Toy_Channel_Soufflet ! !-------------------------------------------------------------------------------------------- ! -subroutine relax_zonal_vel(partit, mesh) +subroutine relax_zonal_vel(dynamics, partit, mesh) implicit none integer :: elem, nz, nn, nn1 real(kind=WP) :: a, yy, uzon - type(t_mesh), intent(in), target :: mesh + + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -171,17 +175,20 @@ subroutine compute_zonal_mean_ini(partit, mesh) ! no division by 0 is occurring end subroutine compute_zonal_mean_ini !========================================================================== -subroutine compute_zonal_mean(tracers, partit, mesh) +subroutine compute_zonal_mean(dynamics, tracers, partit, mesh) implicit none integer :: elem, nz, m, elnodes(3) real(kind=8), allocatable :: zvel1D(:), znum1D(:) - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers + type(t_dyn) , intent(inout), target :: dynamics + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) ztem=0. zvel=0. @@ -235,22 +242,25 @@ subroutine compute_zonal_mean(tracers, partit, mesh) end subroutine compute_zonal_mean ! ==================================================================================== -subroutine initial_state_soufflet(tracers, partit, mesh) +subroutine initial_state_soufflet(dynamics, tracers, partit, mesh) ! Profiles Soufflet 2016 (OM) implicit none - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers + type(t_dyn) , intent(inout), target :: dynamics integer :: n, nz, elnodes(3) real(kind=8) :: dst, yn, Fy, Lx ! real(kind=8) :: Ljet,rhomax,Sb, drho_No, drho_So ! real(kind=8) :: z_No, z_So,dz_No,dz_So, drhosurf_No, drhosurf_So, zsurf real(kind=8) :: d_No(mesh%nl-1), d_So(mesh%nl-1), rho_No(mesh%nl-1), rho_So(mesh%nl-1) + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) dy=ysize/nybins/r_earth @@ -355,18 +365,21 @@ subroutine initial_state_soufflet(tracers, partit, mesh) write(*,*) mype, 'Vel', maxval(UV(1,:,:)), minval(UV(1,:,:)) END subroutine initial_state_soufflet ! =============================================================================== -subroutine energy_out_soufflet(partit, mesh) +subroutine energy_out_soufflet(dynamics, partit, mesh) implicit none real(kind=8) :: tke(2), aux(2), ww, wwaux integer :: elem, nz, m, elnodes(3), nybins real(kind=8), allocatable :: zvel1D(:), znum1D(:) - type(t_mesh), intent(in), target :: mesh + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit - + type(t_mesh) , intent(in) , target :: mesh + +real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" +UV => dynamics%uv(:,:,:) nybins=100 diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index ac4d1d73f..c83e5ec8f 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -1,42 +1,48 @@ module write_step_info_interface interface - subroutine write_step_info(istep,outfreq,tracers,partit,mesh) + subroutine write_step_info(istep,outfreq,dynamics, tracers,partit,mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use MOD_TRACER + use MOD_DYN integer :: istep,outfreq - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in), target :: tracers + type(t_tracer), intent(in) , target :: tracers + type(t_dyn) , intent(in) , target :: dynamics end subroutine end interface end module module check_blowup_interface interface - subroutine check_blowup(istep, tracers,partit,mesh) + subroutine check_blowup(istep, dynamics, tracers,partit,mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use MOD_TRACER + use MOD_DYN integer :: istep type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in), target :: tracers + type(t_dyn) , intent(in) , target :: dynamics end subroutine end interface end module ! ! !=============================================================================== -subroutine write_step_info(istep, outfreq, tracers, partit, mesh) +subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) use g_config, only: dt, use_ice use MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - use MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_TRACER + use MOD_DYN use o_PARAM - use o_ARRAYS + use o_ARRAYS, only: eta_n, d_eta, water_flux, heat_flux, Wvel, Unode, CFL_z, & + pgf_x, pgf_y, Av, Kv use i_ARRAYS use g_comm_auto implicit none @@ -52,13 +58,17 @@ subroutine write_step_info(istep, outfreq, tracers, partit, mesh) max_cfl_z, max_pgfx, max_pgfy, max_kv, max_av real(kind=WP) :: int_deta , int_dhbar real(kind=WP) :: loc, loc_eta, loc_hbar, loc_deta, loc_dhbar, loc_wflux,loc_hflux, loc_temp, loc_salt - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in), target :: tracers + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in) , target :: tracers + type(t_dyn) , intent(in) , target :: dynamics + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" -#include "associate_mesh_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) + if (mod(istep,outfreq)==0) then !_______________________________________________________________________ @@ -242,14 +252,16 @@ end subroutine write_step_info ! ! !=============================================================================== -subroutine check_blowup(istep, tracers, partit, mesh) +subroutine check_blowup(istep, dynamics, tracers, partit, mesh) use g_config, only: logfile_outfreq, which_ALE use MOD_MESH - use MOD_TRACER - USE MOD_PARTIT - USE MOD_PARSUP + use MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_DYN use o_PARAM - use o_ARRAYS + use o_ARRAYS, only: eta_n, d_eta, ssh_rhs, ssh_rhs_old, water_flux, stress_surf, & + Wvel, CFL_z, heat_flux, Kv, Av use i_ARRAYS use g_comm_auto use io_BLOWUP @@ -259,14 +271,18 @@ subroutine check_blowup(istep, tracers, partit, mesh) implicit none integer :: n, nz, istep, found_blowup_loc=0, found_blowup=0 - integer :: el, elidx - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in), target :: tracers + integer :: el, elidx + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in) , target :: tracers + type(t_dyn) , intent(in) , target :: dynamics + real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" -#include "associate_mesh_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) + !___________________________________________________________________________ ! ! if (mod(istep,logfile_outfreq)==0) then ! ! if (mype==0) then @@ -509,7 +525,7 @@ subroutine check_blowup(istep, tracers, partit, mesh) ! moment only over CPU mype==0 call MPI_AllREDUCE(found_blowup_loc , found_blowup , 1, MPI_INTEGER, MPI_MAX, MPI_COMM_FESOM, MPIerr) if (found_blowup==1) then - call write_step_info(istep,1,tracers,partit,mesh) + call write_step_info(istep, 1, dynamics, tracers,partit,mesh) if (mype==0) then call sleep(1) write(*,*) @@ -529,7 +545,7 @@ subroutine check_blowup(istep, tracers, partit, mesh) write(*,*) ' _____.,-#%&$@%#&#~,._____' write(*,*) end if - call blowup(istep, tracers, partit, mesh) + call blowup(istep, dynamics, tracers, partit, mesh) if (mype==0) write(*,*) ' --> finished writing blow up file' call par_ex(partit%MPI_COMM_FESOM, partit%mype) endif From 0f17f32efe6b29e97ed6fb35b33d12d992d716c7 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 1 Nov 2021 14:41:34 +0100 Subject: [PATCH 472/909] be able to write transposed 3D output files via preprocessor definition (as in 3bcd313, but intentionally without a switch to turn it off) --- src/io_meandata.F90 | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 969bbb69a..0d91521fa 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -639,8 +639,7 @@ subroutine create_new_file(entry, partit, mesh) call assert_nf( nf_put_att_text(entry%ncid, entry%tID, 'axis', len_trim('T'), trim('T')), __LINE__) call assert_nf( nf_put_att_text(entry%ncid, entry%tID, 'stored_direction', len_trim('increasing'), trim('increasing')), __LINE__) - call assert_nf( nf_def_var(entry%ncid, trim(entry%name), entry%data_strategy%netcdf_type(), entry%ndim+1, & - (/entry%dimid(1:entry%ndim), entry%recID/), entry%varID), __LINE__) + call assert_nf( nf_def_var(entry%ncid, trim(entry%name), entry%data_strategy%netcdf_type(), entry%ndim+1, (/entry%dimid(entry%ndim:1:-1), entry%recID/), entry%varID), __LINE__) !CHUNKING stuff (netcdf libraries not always compited with it) !if (entry%ndim==2) then ! call assert_nf( nf_def_var_chunking(entry%ncid, entry%varID, NF_CHUNKED, (/1, entry%glsize(1)/)), __LINE__); @@ -750,7 +749,7 @@ subroutine write_mean(entry, entry_index) if (entry%ndim==1) then call assert_nf( nf_put_vara_double(entry%ncid, entry%varID, (/1, entry%rec_count/), (/size2, 1/), entry%aux_r8, 1), __LINE__) elseif (entry%ndim==2) then - call assert_nf( nf_put_vara_double(entry%ncid, entry%varID, (/lev, 1, entry%rec_count/), (/1, size2, 1/), entry%aux_r8, 1), __LINE__) + call assert_nf( nf_put_vara_double(entry%ncid, entry%varID, (/1, lev, entry%rec_count/), (/size2, 1, 1/), entry%aux_r8, 1), __LINE__) end if end if end do @@ -770,7 +769,7 @@ subroutine write_mean(entry, entry_index) if (entry%ndim==1) then call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/1, entry%rec_count/), (/size2, 1/), entry%aux_r4, 1), __LINE__) elseif (entry%ndim==2) then - call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/lev, 1, entry%rec_count/), (/1, size2, 1/), entry%aux_r4, 1), __LINE__) + call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/1, lev, entry%rec_count/), (/size2, 1, 1/), entry%aux_r4, 1), __LINE__) end if end if end do From fa2054c4c6f5399471aa28034958559f9a33fea0 Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 1 Nov 2021 22:19:06 +0100 Subject: [PATCH 473/909] fix bug --- src/io_restart.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 4d8a4d722..e198507ab 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -122,8 +122,8 @@ subroutine ini_ocean_io(year, dynamics, tracers, partit, mesh) call def_variable(oid, 'hnode', (/nl-1, nod2D/), 'nodal layer thickness', 'm', hnode); !___Define the netCDF variables for 3D fields_______________________________ - call def_variable(oid, 'u', (/nl-1, elem2D/), 'zonal velocity', 'm/s', dynamics.uv(1,:,:)); - call def_variable(oid, 'v', (/nl-1, elem2D/), 'meridional velocity', 'm/s', dynamics.uv(2,:,:)); + call def_variable(oid, 'u', (/nl-1, elem2D/), 'zonal velocity', 'm/s', dynamics%uv(1,:,:)); + call def_variable(oid, 'v', (/nl-1, elem2D/), 'meridional velocity', 'm/s', dynamics%uv(2,:,:)); call def_variable(oid, 'urhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for u', 'm/s', UV_rhsAB(1,:,:)); call def_variable(oid, 'vrhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for v', 'm/s', UV_rhsAB(2,:,:)); From 15738ffa4b0efcb868535825b393e3c2f92def29 Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 1 Nov 2021 22:23:27 +0100 Subject: [PATCH 474/909] fix bug --- src/io_meandata.F90 | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 0d91521fa..4acbd17eb 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -317,9 +317,9 @@ subroutine ini_mean_io(dynamics, tracers, partit, mesh) CASE ('Kv ') call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'Kv', 'vertical diffusivity Kv', 'm2/s', Kv(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('u ') - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u', 'horizontal velocity','m/s', dynamics.uv(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u', 'horizontal velocity','m/s', dynamics%uv(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('v ') - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v', 'meridional velocity','m/s', dynamics.uv(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v', 'meridional velocity','m/s', dynamics%uv(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('w ') call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'w', 'vertical velocity', 'm/s', Wvel(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('Av ') @@ -639,7 +639,8 @@ subroutine create_new_file(entry, partit, mesh) call assert_nf( nf_put_att_text(entry%ncid, entry%tID, 'axis', len_trim('T'), trim('T')), __LINE__) call assert_nf( nf_put_att_text(entry%ncid, entry%tID, 'stored_direction', len_trim('increasing'), trim('increasing')), __LINE__) - call assert_nf( nf_def_var(entry%ncid, trim(entry%name), entry%data_strategy%netcdf_type(), entry%ndim+1, (/entry%dimid(entry%ndim:1:-1), entry%recID/), entry%varID), __LINE__) + call assert_nf( nf_def_var(entry%ncid, trim(entry%name), entry%data_strategy%netcdf_type(), entry%ndim+1, & + (/entry%dimid(1:entry%ndim), entry%recID/), entry%varID), __LINE__) !CHUNKING stuff (netcdf libraries not always compited with it) !if (entry%ndim==2) then ! call assert_nf( nf_def_var_chunking(entry%ncid, entry%varID, NF_CHUNKED, (/1, entry%glsize(1)/)), __LINE__); @@ -749,7 +750,7 @@ subroutine write_mean(entry, entry_index) if (entry%ndim==1) then call assert_nf( nf_put_vara_double(entry%ncid, entry%varID, (/1, entry%rec_count/), (/size2, 1/), entry%aux_r8, 1), __LINE__) elseif (entry%ndim==2) then - call assert_nf( nf_put_vara_double(entry%ncid, entry%varID, (/1, lev, entry%rec_count/), (/size2, 1, 1/), entry%aux_r8, 1), __LINE__) + call assert_nf( nf_put_vara_double(entry%ncid, entry%varID, (/lev, 1, entry%rec_count/), (/1, size2, 1/), entry%aux_r8, 1), __LINE__) end if end if end do @@ -769,7 +770,7 @@ subroutine write_mean(entry, entry_index) if (entry%ndim==1) then call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/1, entry%rec_count/), (/size2, 1/), entry%aux_r4, 1), __LINE__) elseif (entry%ndim==2) then - call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/1, lev, entry%rec_count/), (/size2, 1, 1/), entry%aux_r4, 1), __LINE__) + call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/lev, 1, entry%rec_count/), (/1, size2, 1/), entry%aux_r4, 1), __LINE__) end if end if end do From fe078b4439652674181cc61bc13cc2d4d641a112 Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 2 Nov 2021 11:30:07 +0100 Subject: [PATCH 475/909] fix remaining compiler error for the derived dyamics%uv --- src/fvom_main.F90 | 8 +++-- src/oce_ale.F90 | 35 ++++++++++-------- src/oce_ale_tracer.F90 | 36 ++++++++++++------- src/oce_setup_step.F90 | 82 +++++++++++++++++++++++++----------------- 4 files changed, 97 insertions(+), 64 deletions(-) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index e9f4cb699..46ecfa650 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -125,7 +125,7 @@ program main call tracer_init(tracers, partit, mesh) ! allocate array of ocean tracers (derived type "t_tracer") call dynamics_init(dynamics, partit, mesh) ! allocate array of ocean dynamics (derived type "t_tracer") call arrays_init(tracers%num_tracers, partit, mesh) ! allocate other arrays (to be refactured same as tracers in the future) - call ocean_setup(tracers, partit, mesh) + call ocean_setup(dynamics, tracers, partit, mesh) if (mype==0) then write(*,*) 'FESOM ocean_setup... complete' @@ -294,14 +294,16 @@ program main !___compute fluxes to the ocean: heat, freshwater, momentum_________ if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call oce_fluxes_mom...'//achar(27)//'[0m' call oce_fluxes_mom(dynamics, partit, mesh) ! momentum only + if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call oce_fluxes...'//achar(27)//'[0m' call oce_fluxes(tracers, partit, mesh) end if - call before_oce_step(tracers, partit, mesh) ! prepare the things if required + if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call before_oce_step...'//achar(27)//'[0m' + call before_oce_step(dynamics, tracers, partit, mesh) ! prepare the things if required t2 = MPI_Wtime() !___model ocean step____________________________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call oce_timestep_ale'//achar(27)//'[0m' - call oce_timestep_ale(n, tracers, partit, mesh) + call oce_timestep_ale(n, dynamics, tracers, partit, mesh) t3 = MPI_Wtime() !___compute energy diagnostics..._______________________________________ diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 7caab848a..48388545c 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -32,12 +32,14 @@ subroutine init_surface_node_depth(partit, mesh) type(t_partit), intent(inout), target :: partit end subroutine - subroutine impl_vert_visc_ale(partit, mesh) + subroutine impl_vert_visc_ale(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + USE MOD_DYN + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit + type(t_dyn) , intent(inout), target :: dynamics end subroutine subroutine update_stiff_mat_ale(partit, mesh) @@ -48,12 +50,14 @@ subroutine update_stiff_mat_ale(partit, mesh) type(t_partit), intent(inout), target :: partit end subroutine - subroutine compute_ssh_rhs_ale(partit, mesh) + subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh + USE MOD_DYN + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit + type(t_dyn) , intent(inout), target :: dynamics end subroutine subroutine solve_ssh_ale(partit, mesh) @@ -97,15 +101,17 @@ subroutine update_thickness_ale(partit, mesh) module oce_timestep_ale_interface interface - subroutine oce_timestep_ale(n, tracers, partit, mesh) + subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - use mod_tracer - integer, intent(in) :: n - type(t_mesh), intent(in), target :: mesh + use MOD_TRACER + use MOD_DYN + integer, intent(in) :: n + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers + type(t_dyn) , intent(inout), target :: dynamics end subroutine end interface end module @@ -1625,7 +1631,6 @@ subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh real(kind=WP), dimension(:,:,:), pointer :: UV - #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -2855,17 +2860,17 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) !!PS if (any(abs(Wvel_e)>1.0e20)) write(*,*) n, mype,' --> found Inf Wvel_e before compute_vel_rhs' if(mom_adv/=3) then - call compute_vel_rhs(partit, mesh) + call compute_vel_rhs(dynamics, partit, mesh) else call compute_vel_rhs_vinv(partit, mesh) end if !___________________________________________________________________________ - call viscosity_filter(visc_option, partit, mesh) + call viscosity_filter(visc_option, dynamics, partit, mesh) !___________________________________________________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call impl_vert_visc_ale'//achar(27)//'[0m' - if(i_vert_visc) call impl_vert_visc_ale(partit, mesh) + if(i_vert_visc) call impl_vert_visc_ale(dynamics, partit, mesh) t2=MPI_Wtime() !___________________________________________________________________________ @@ -2877,7 +2882,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call compute_ssh_rhs_ale'//achar(27)//'[0m' ! ssh_rhs=-alpha*\nabla\int(U_n+U_rhs)dz-(1-alpha)*... ! see "FESOM2: from finite elements to finte volumes, S. Danilov..." eq. (18) rhs - call compute_ssh_rhs_ale(partit, mesh) + call compute_ssh_rhs_ale(dynamics, partit, mesh) ! Take updated ssh matrix and solve --> new ssh! t30=MPI_Wtime() @@ -2889,7 +2894,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) ! estimate new horizontal velocity u^(n+1) ! u^(n+1) = u* + [-g * tau * theta * grad(eta^(n+1)-eta^(n)) ] if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call update_vel'//achar(27)//'[0m' - call update_vel(partit, mesh) + call update_vel(partit, dynamics, mesh) ! --> eta_(n) --> eta_(n+1) = eta_(n) + deta = eta_(n) + (eta_(n+1) + eta_(n)) t4=MPI_Wtime() @@ -2937,7 +2942,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) !___________________________________________________________________________ ! solve tracer equation if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call solve_tracers_ale'//achar(27)//'[0m' - call solve_tracers_ale(tracers, partit, mesh) + call solve_tracers_ale(dynamics, tracers, partit, mesh) t8=MPI_Wtime() !___________________________________________________________________________ diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 336817503..c962697bb 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -14,15 +14,17 @@ subroutine diff_part_hor_redi(tr_num, tracer, partit, mesh) end module module adv_tracers_ale_interface interface - subroutine adv_tracers_ale(dt, tr_num, tracer, partit, mesh) + subroutine adv_tracers_ale(dt, tr_num, dynamics, tracer, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP use mod_tracer + use MOD_DYN real(kind=WP), intent(in), target :: dt integer, intent(in), target :: tr_num + type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracer - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit end subroutine end interface @@ -71,14 +73,16 @@ subroutine diff_ver_part_impl_ale(tr_num, tracer, partit, mesh) end module module diff_tracers_ale_interface interface - subroutine diff_tracers_ale(tr_num, tracer, partit, mesh) + subroutine diff_tracers_ale(tr_num, dynamics, tracer, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP use mod_tracer + use MOD_DYN integer, intent(in), target :: tr_num + type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracer - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit end subroutine end interface @@ -98,28 +102,32 @@ function bc_surface(n, id, sval, partit) end module module diff_part_bh_interface interface - subroutine diff_part_bh(tr_num, tracer, partit, mesh) + subroutine diff_part_bh(tr_num, dynamics, tracer, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP use mod_tracer + use MOD_DYN integer, intent(in), target :: tr_num + type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracer - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit end subroutine end interface end module module solve_tracers_ale_interface interface - subroutine solve_tracers_ale(tracers, partit, mesh) + subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - use mod_tracer + use mod_tracer + use MOD_DYN type(t_tracer), intent(inout), target :: tracers - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit + type(t_dyn) , intent(inout), target :: dynamics end subroutine end interface end module @@ -177,10 +185,10 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) call init_tracers_AB(tr_num, tracers, partit, mesh) ! advect tracers if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call adv_tracers_ale'//achar(27)//'[0m' - call adv_tracers_ale(dt, tr_num, tracers, partit, mesh) + call adv_tracers_ale(dt, tr_num, dynamics, tracers, partit, mesh) ! diffuse tracers if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call diff_tracers_ale'//achar(27)//'[0m' - call diff_tracers_ale(tr_num, tracers, partit, mesh) + call diff_tracers_ale(tr_num, dynamics, tracers, partit, mesh) ! relax to salt and temp climatology if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call relax_to_clim'//achar(27)//'[0m' ! if ((toy_ocean) .AND. ((tr_num==1) .AND. (TRIM(which_toy)=="soufflet"))) then @@ -280,11 +288,12 @@ end subroutine adv_tracers_ale ! ! !=============================================================================== -subroutine diff_tracers_ale(tr_num, tracers, partit, mesh) +subroutine diff_tracers_ale(tr_num, dynamics, tracers, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP use mod_tracer + use MOD_DYN use o_arrays use o_tracers use diff_part_hor_redi_interface @@ -296,6 +305,7 @@ subroutine diff_tracers_ale(tr_num, tracers, partit, mesh) integer :: n, nzmax, nzmin integer, intent(in), target :: tr_num + type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracers type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit @@ -357,7 +367,7 @@ subroutine diff_tracers_ale(tr_num, tracers, partit, mesh) !init_tracers will set it to zero for the next timestep !init_tracers will set it to zero for the next timestep if (tracers%smooth_bh_tra) then - call diff_part_bh(tr_num, tracers, partit, mesh) ! alpply biharmonic diffusion (implemented as filter) + call diff_part_bh(tr_num, dynamics, tracers, partit, mesh) ! alpply biharmonic diffusion (implemented as filter) end if end subroutine diff_tracers_ale ! diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 6469eb99f..c54297598 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -42,28 +42,32 @@ subroutine dynamics_init(dynamics, partit, mesh) module ocean_setup_interface interface - subroutine ocean_setup(tracers, partit, mesh) + subroutine ocean_setup(dynamics, tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use mod_tracer - type(t_mesh), intent(in), target :: mesh + use MOD_DYN + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers + type(t_dyn) , intent(inout), target :: dynamics end subroutine end interface end module module before_oce_step_interface interface - subroutine before_oce_step(tracers, partit, mesh) + subroutine before_oce_step(dynamics, tracers, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use mod_tracer - type(t_mesh), intent(in), target :: mesh + use MOD_DYN + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers + type(t_dyn) , intent(inout), target :: dynamics end subroutine end interface end module @@ -354,29 +358,28 @@ SUBROUTINE dynamics_init(dynamics, partit, mesh) type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_dyn) , intent(inout), target :: dynamics - - ! define dynamics namelist parameter - namelist /dynamics_visc / visc_opt, gamma0_visc, gamma1_visc, gamma2_visc, & - div_c_visc, leith_c_visc, use_ivertvisc, easy_bs_return - namelist /dynamics_general / momadv_opt, use_freeslip, use_wsplit, wsplit_maxcfl - #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - ! open and read namelist for I/O - open(unit=nm_unit, file='namelist.dyn', form='formatted', access='sequential', status='old', iostat=iost ) - if (iost == 0) then - if (mype==0) write(*,*) ' file : ', 'namelist.dyn',' open ok' - else - if (mype==0) write(*,*) 'ERROR: --> bad opening file : ', 'namelist.dyn',' ; iostat=',iost - call par_ex(partit%MPI_COMM_FESOM, partit%mype) - stop - end if - read(nm_unit, nml=dynamics_visc , iostat=iost) - read(nm_unit, nml=dynamics_general, iostat=iost) - close(nm_unit) +!!PS ! define dynamics namelist parameter +!!PS namelist /dynamics_visc / visc_opt, gamma0_visc, gamma1_visc, gamma2_visc, & +!!PS div_c_visc, leith_c_visc, use_ivertvisc, easy_bs_return +!!PS namelist /dynamics_general / momadv_opt, use_freeslip, use_wsplit, wsplit_maxcfl +!!PS +!!PS ! open and read namelist for I/O +!!PS open(unit=nm_unit, file='namelist.dyn', form='formatted', access='sequential', status='old', iostat=iost ) +!!PS if (iost == 0) then +!!PS if (mype==0) write(*,*) ' file : ', 'namelist.dyn',' open ok' +!!PS else +!!PS if (mype==0) write(*,*) 'ERROR: --> bad opening file : ', 'namelist.dyn',' ; iostat=',iost +!!PS call par_ex(partit%MPI_COMM_FESOM, partit%mype) +!!PS stop +!!PS end if +!!PS read(nm_unit, nml=dynamics_visc , iostat=iost) +!!PS read(nm_unit, nml=dynamics_general, iostat=iost) +!!PS close(nm_unit) ! define local vertice & elem array size elem_size=myDim_elem2D+eDim_elem2D @@ -413,17 +416,30 @@ SUBROUTINE dynamics_init(dynamics, partit, mesh) dynamics%ssh_rhs_old= 0.0_WP ! set parameters in derived type - dynamics%visc_opt = visc_opt - dynamics%gamma0_visc = gamma0_visc - dynamics%gamma1_visc = gamma1_visc - dynamics%gamma2_visc = gamma2_visc - dynamics%div_c_visc = div_c_visc - dynamics%leith_c_visc = leith_c_visc - dynamics%use_ivertvisc = use_ivertvisc - dynamics%momadv_opt = momadv_opt - dynamics%use_freeslip = use_freeslip - dynamics%use_wsplit = use_wsplit - dynamics%wsplit_maxcfl = wsplit_maxcfl +!!PS dynamics%visc_opt = visc_opt +!!PS dynamics%gamma0_visc = gamma0_visc +!!PS dynamics%gamma1_visc = gamma1_visc +!!PS dynamics%gamma2_visc = gamma2_visc +!!PS dynamics%div_c_visc = div_c_visc +!!PS dynamics%leith_c_visc = leith_c_visc +!!PS dynamics%use_ivertvisc = use_ivertvisc +!!PS dynamics%momadv_opt = momadv_opt +!!PS dynamics%use_freeslip = use_freeslip +!!PS dynamics%use_wsplit = use_wsplit +!!PS dynamics%wsplit_maxcfl = wsplit_maxcfl + + dynamics%visc_opt = visc_option + dynamics%gamma0_visc = gamma0 + dynamics%gamma1_visc = gamma1 + dynamics%gamma2_visc = gamma2 + dynamics%div_c_visc = Div_c + dynamics%leith_c_visc = Leith_c + dynamics%use_ivertvisc = i_vert_visc + dynamics%momadv_opt = mom_adv + dynamics%use_freeslip = free_slip + dynamics%use_wsplit = w_split + dynamics%wsplit_maxcfl = w_max_cfl + END SUBROUTINE dynamics_init ! From 1b3507a46121d357c815a080347f2ce4f147a041 Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 2 Nov 2021 11:57:57 +0100 Subject: [PATCH 476/909] set flag_debug=.true. --- src/gen_modules_config.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gen_modules_config.F90 b/src/gen_modules_config.F90 index f265ea898..b9d3d1807 100755 --- a/src/gen_modules_config.F90 +++ b/src/gen_modules_config.F90 @@ -107,7 +107,7 @@ module g_config real(kind=WP) :: cavity_partial_cell_thresh=0.0_WP ! same as partial_cell_tresh but for surface logical :: toy_ocean=.false. ! Ersatz forcing has to be supplied character(100) :: which_toy="soufflet" - logical :: flag_debug=.false. ! prints name of actual subroutine he is in + logical :: flag_debug=.true. ! prints name of actual subroutine he is in logical :: flag_warn_cflz=.true. ! switches off cflz warning namelist /run_config/ use_ice,use_floatice, use_sw_pene, use_cavity, & use_cavity_partial_cell, cavity_partial_cell_thresh, toy_ocean, which_toy, flag_debug, flag_warn_cflz From a0f933ab58710bdde70eeb480dac42119dcb21c6 Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 2 Nov 2021 17:14:44 +0100 Subject: [PATCH 477/909] fix bug in fvom_main.F90 and oce_dyn.F90 --- src/fvom_main.F90 | 3 ++- src/oce_dyn.F90 | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index 46ecfa650..020860ff4 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -25,6 +25,7 @@ program main use io_mesh_info use diagnostics use mo_tidal +use dynamics_init_interface use tracer_init_interface use ocean_setup_interface use ice_setup_interface @@ -268,7 +269,7 @@ program main #endif call clock !___compute horizontal velocity on nodes (originaly on elements)________ - call compute_vel_nodes(partit, mesh) + call compute_vel_nodes(partit, dynamics, mesh) !___model sea-ice step__________________________________________________ t1 = MPI_Wtime() if(use_ice) then diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 2a2ea25d8..403239a12 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -206,7 +206,7 @@ subroutine compute_vel_nodes(dynamics, partit, mesh) USE MOD_PARSUP USE MOD_DYN USE o_PARAM - USE o_ARRAYS + USE o_ARRAYS, only: Unode use g_comm_auto IMPLICIT NONE integer :: n, nz, k, elem, nln, uln, nle, ule From f9d6fd5f701f4e94b69e60c4c289f7be37584d0d Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 2 Nov 2021 20:33:52 +0100 Subject: [PATCH 478/909] fix bug in fvom_main.F90 --- src/fvom_main.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index 020860ff4..9c618fc3a 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -269,7 +269,7 @@ program main #endif call clock !___compute horizontal velocity on nodes (originaly on elements)________ - call compute_vel_nodes(partit, dynamics, mesh) + call compute_vel_nodes(dynamics, partit, mesh) !___model sea-ice step__________________________________________________ t1 = MPI_Wtime() if(use_ice) then From b4ebe4eb77171178e4a19b731947730503299604 Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 2 Nov 2021 20:46:36 +0100 Subject: [PATCH 479/909] fix bug oce_ale.F90 --- src/fvom_main.F90 | 1 + src/oce_ale.F90 | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index 9c618fc3a..a5c2c16de 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -270,6 +270,7 @@ program main call clock !___compute horizontal velocity on nodes (originaly on elements)________ call compute_vel_nodes(dynamics, partit, mesh) + !___model sea-ice step__________________________________________________ t1 = MPI_Wtime() if(use_ice) then diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 48388545c..3b1486925 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2894,7 +2894,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) ! estimate new horizontal velocity u^(n+1) ! u^(n+1) = u* + [-g * tau * theta * grad(eta^(n+1)-eta^(n)) ] if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call update_vel'//achar(27)//'[0m' - call update_vel(partit, dynamics, mesh) + call update_vel(dynamics, partit, mesh) ! --> eta_(n) --> eta_(n+1) = eta_(n) + deta = eta_(n) + (eta_(n+1) + eta_(n)) t4=MPI_Wtime() From 6915f4513493a45b013af6b72f0ce8611b99eff6 Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 2 Nov 2021 21:03:17 +0100 Subject: [PATCH 480/909] fix bug in src/oce_ale_vel_rhs.F90 src/oce_ale.F90 --- src/oce_ale.F90 | 2 ++ src/oce_ale_vel_rhs.F90 | 14 ++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 3b1486925..a356ad406 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2723,6 +2723,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) use pressure_force_4_linfs_interface use pressure_force_4_zxxxx_interface use solve_tracers_ale_interface + use compute_vel_rhs_interface use write_step_info_interface use check_blowup_interface IMPLICIT NONE @@ -2866,6 +2867,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) end if !___________________________________________________________________________ + if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call viscosity_filter'//achar(27)//'[0m' call viscosity_filter(visc_option, dynamics, partit, mesh) !___________________________________________________________________________ diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index 66a6cdbfb..b9e9385fe 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -13,6 +13,20 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) end interface end module +module compute_vel_rhs_interface + interface + subroutine compute_vel_rhs(dynamics, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + + end subroutine + end interface +end module ! ! !_______________________________________________________________________________ From 201ed65fc529708d207a80e2d703ceafb1981b9c Mon Sep 17 00:00:00 2001 From: a270042 Date: Tue, 2 Nov 2021 22:15:52 +0100 Subject: [PATCH 481/909] fix bug and compiler issue --- src/fvom_main.F90 | 16 ++++++------- src/oce_ale.F90 | 52 ++++++++++++++++++++--------------------- src/oce_ale_vel_rhs.F90 | 1 + src/oce_dyn.F90 | 13 ++++------- src/oce_setup_step.F90 | 23 ++++++++---------- src/write_step_info.F90 | 4 ++-- 6 files changed, 51 insertions(+), 58 deletions(-) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index a5c2c16de..c23cb5f76 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -8,9 +8,9 @@ program main USE MOD_MESH +USE MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP -USE MOD_TRACER USE MOD_DYN USE o_ARRAYS USE o_PARAM @@ -58,10 +58,11 @@ program main real(kind=real32) :: runtime_alltimesteps -type(t_mesh) , target, save :: mesh -type(t_partit), target, save :: partit -type(t_tracer), target, save :: tracers -type(t_dyn) , target, save :: dynamics +type(t_mesh) , target, save :: mesh +type(t_tracer), target, save :: tracers +type(t_partit), target, save :: partit +type(t_dyn) , target, save :: dynamics + character(LEN=256) :: dump_dir, dump_filename logical :: L_EXISTS @@ -123,8 +124,8 @@ program main call check_mesh_consistency(partit, mesh) if (mype==0) t2=MPI_Wtime() + call dynamics_init(dynamics, partit, mesh) call tracer_init(tracers, partit, mesh) ! allocate array of ocean tracers (derived type "t_tracer") - call dynamics_init(dynamics, partit, mesh) ! allocate array of ocean dynamics (derived type "t_tracer") call arrays_init(tracers%num_tracers, partit, mesh) ! allocate other arrays (to be refactured same as tracers in the future) call ocean_setup(dynamics, tracers, partit, mesh) @@ -269,6 +270,7 @@ program main #endif call clock !___compute horizontal velocity on nodes (originaly on elements)________ + if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call compute_vel_nodes'//achar(27)//'[0m' call compute_vel_nodes(dynamics, partit, mesh) !___model sea-ice step__________________________________________________ @@ -296,10 +298,8 @@ program main !___compute fluxes to the ocean: heat, freshwater, momentum_________ if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call oce_fluxes_mom...'//achar(27)//'[0m' call oce_fluxes_mom(dynamics, partit, mesh) ! momentum only - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call oce_fluxes...'//achar(27)//'[0m' call oce_fluxes(tracers, partit, mesh) end if - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call before_oce_step...'//achar(27)//'[0m' call before_oce_step(dynamics, tracers, partit, mesh) ! prepare the things if required t2 = MPI_Wtime() !___model ocean step____________________________________________________ diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index a356ad406..171fbdd49 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -37,9 +37,9 @@ subroutine impl_vert_visc_ale(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_DYN - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_dyn) , intent(inout), target :: dynamics + type(t_dyn), intent(inout), target :: dynamics end subroutine subroutine update_stiff_mat_ale(partit, mesh) @@ -54,10 +54,10 @@ subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN - type(t_mesh) , intent(in) , target :: mesh + use MOD_DYN + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_dyn) , intent(inout), target :: dynamics + type(t_dyn), intent(inout), target :: dynamics end subroutine subroutine solve_ssh_ale(partit, mesh) @@ -73,10 +73,9 @@ subroutine compute_hbar_ale(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_DYN - type(t_dyn) , intent(inout), target :: dynamics + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - + type(t_dyn), intent(inout), target :: dynamics end subroutine subroutine vert_vel_ale(dynamics, partit, mesh) @@ -84,9 +83,9 @@ subroutine vert_vel_ale(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_DYN - type(t_dyn) , intent(in) , target :: dynamics + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh + type(t_dyn), intent(inout), target :: dynamics end subroutine subroutine update_thickness_ale(partit, mesh) @@ -105,13 +104,13 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - use MOD_TRACER + use mod_tracer use MOD_DYN - integer, intent(in) :: n - type(t_mesh) , intent(in) , target :: mesh + integer, intent(in) :: n + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers - type(t_dyn) , intent(inout), target :: dynamics + type(t_dyn), intent(inout), target :: dynamics end subroutine end interface end module @@ -1627,9 +1626,9 @@ subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) integer :: ed, el(2), enodes(2), nz, n, nzmin, nzmax real(kind=WP) :: c1, c2, deltaX1, deltaX2, deltaY1, deltaY2 real(kind=WP) :: dumc1_1, dumc1_2, dumc2_1, dumc2_2 !!PS - type(t_dyn) , intent(in) , target :: dynamics + type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh + type(t_dyn), intent(inout), target :: dynamics real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -1744,9 +1743,9 @@ subroutine compute_hbar_ale(dynamics, partit, mesh) integer :: ed, el(2), enodes(2), nz,n, elnodes(3), elem, nzmin, nzmax real(kind=WP) :: c1, c2, deltaX1, deltaX2, deltaY1, deltaY2 - type(t_dyn) , intent(inout), target :: dynamics - type(t_mesh) , intent(inout), target :: mesh + type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit + type(t_dyn) , intent(inout), target :: dynamics real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" @@ -1865,10 +1864,9 @@ subroutine vert_vel_ale(dynamics, partit, mesh) real(kind=WP), dimension(:), allocatable :: max_dhbar2distr,cumsum_maxdhbar,distrib_dhbar integer , dimension(:), allocatable :: idx type(t_dyn) , intent(inout), target :: dynamics + type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(inout), target :: mesh real(kind=WP), dimension(:,:,:), pointer :: UV - #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -2520,14 +2518,14 @@ end subroutine solve_ssh_ale subroutine impl_vert_visc_ale(dynamics, partit, mesh) USE MOD_MESH USE o_PARAM -USE o_ARRAYS +USE o_ARRAYS, only: UV_rhs, Av, Wvel_i, stress_surf USE MOD_PARTIT USE MOD_PARSUP USE MOD_DYN USE g_CONFIG,only: dt IMPLICIT NONE -type(t_mesh) , intent(inout), target :: mesh +type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit type(t_dyn) , intent(inout), target :: dynamics @@ -2699,7 +2697,7 @@ end subroutine impl_vert_visc_ale ! ! !=============================================================================== -subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) +subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) use g_config use MOD_MESH use MOD_TRACER @@ -2722,15 +2720,15 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) use pressure_bv_interface use pressure_force_4_linfs_interface use pressure_force_4_zxxxx_interface - use solve_tracers_ale_interface use compute_vel_rhs_interface + use solve_tracers_ale_interface use write_step_info_interface use check_blowup_interface IMPLICIT NONE - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers - type(t_dyn) , intent(inout), target :: dynamics + type(t_dyn), intent(inout), target :: dynamics real(kind=8) :: t0,t1, t2, t30, t3, t4, t5, t6, t7, t8, t9, t10, loc, glo integer :: n, node @@ -2872,7 +2870,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) !___________________________________________________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call impl_vert_visc_ale'//achar(27)//'[0m' - if(i_vert_visc) call impl_vert_visc_ale(dynamics, partit, mesh) + if(i_vert_visc) call impl_vert_visc_ale(dynamics,partit, mesh) t2=MPI_Wtime() !___________________________________________________________________________ diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index b9e9385fe..b2b5d0c38 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -27,6 +27,7 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) end subroutine end interface end module + ! ! !_______________________________________________________________________________ diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 403239a12..784cc9e62 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -13,12 +13,10 @@ ! 5. Leith_c=? (need to be adjusted) module h_viscosity_leith_interface interface - subroutine h_viscosity_leith(dynamics, partit, mesh) + subroutine h_viscosity_leith(partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN - type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh @@ -282,19 +280,19 @@ subroutine viscosity_filter(option, dynamics, partit, mesh) ! ==== ! Harmonic Leith parameterization ! ==== - call h_viscosity_leith(dynamics, partit, mesh) + call h_viscosity_leith(partit, mesh) call visc_filt_harmon(dynamics, partit, mesh) CASE (2) ! === ! Laplacian+Leith+biharmonic background ! === - call h_viscosity_leith(dynamics, partit, mesh) + call h_viscosity_leith(partit, mesh) call visc_filt_hbhmix(dynamics, partit, mesh) CASE (3) ! === ! Biharmonic Leith parameterization ! === - call h_viscosity_leith(dynamics, partit, mesh) + call h_viscosity_leith(partit, mesh) call visc_filt_biharm(2, dynamics, partit, mesh) CASE (4) ! === @@ -566,7 +564,7 @@ SUBROUTINE visc_filt_hbhmix(dynamics, partit, mesh) end subroutine visc_filt_hbhmix ! =================================================================== -SUBROUTINE h_viscosity_leith(dynamics, partit, mesh) +SUBROUTINE h_viscosity_leith(partit, mesh) ! ! Coefficient of horizontal viscosity is a combination of the Leith (with Leith_c) and modified Leith (with Div_c) USE MOD_MESH @@ -582,7 +580,6 @@ SUBROUTINE h_viscosity_leith(dynamics, partit, mesh) integer :: elem, nl1, nz, elnodes(3), n, k, nt, ul1 real(kind=WP) :: leithx, leithy real(kind=WP), allocatable :: aux(:,:) - type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index c54297598..544b5db87 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -48,14 +48,13 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) USE MOD_PARSUP use mod_tracer use MOD_DYN - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers - type(t_dyn) , intent(inout), target :: dynamics + type(t_dyn), intent(inout), target :: dynamics end subroutine end interface end module - module before_oce_step_interface interface subroutine before_oce_step(dynamics, tracers, partit, mesh) @@ -64,10 +63,10 @@ subroutine before_oce_step(dynamics, tracers, partit, mesh) USE MOD_PARSUP use mod_tracer use MOD_DYN - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers - type(t_dyn) , intent(inout), target :: dynamics + type(t_dyn), intent(inout), target :: dynamics end subroutine end interface end module @@ -91,10 +90,10 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) use oce_initial_state_interface use oce_adv_tra_fct_interfaces IMPLICIT NONE -type(t_dyn) , intent(inout), target :: dynamics -type(t_tracer), intent(inout), target :: tracers +type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit -type(t_mesh) , intent(inout), target :: mesh +type(t_tracer), intent(inout), target :: tracers +type(t_dyn), intent(inout), target :: dynamics integer :: n !___setup virt_salt_flux____________________________________________________ ! if the ale thinkness remain unchanged (like in 'linfs' case) the vitrual @@ -439,8 +438,6 @@ SUBROUTINE dynamics_init(dynamics, partit, mesh) dynamics%use_freeslip = free_slip dynamics%use_wsplit = w_split dynamics%wsplit_maxcfl = w_max_cfl - - END SUBROUTINE dynamics_init ! ! @@ -872,17 +869,17 @@ SUBROUTINE before_oce_step(dynamics, tracers, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_TRACER - use MOD_DYN + USE MOD_DYN USE o_ARRAYS USE g_config USE Toy_Channel_Soufflet implicit none integer :: i, k, counter, rcounter3, id character(len=10) :: i_string, id_string - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers - type(t_dyn) , intent(inout), target :: dynamics + type(t_dyn), intent(inout), target :: dynamics #include "associate_part_def.h" #include "associate_mesh_def.h" diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index c83e5ec8f..5c68ff9af 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -1,6 +1,6 @@ module write_step_info_interface interface - subroutine write_step_info(istep,outfreq,dynamics, tracers,partit,mesh) + subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP @@ -16,7 +16,7 @@ subroutine write_step_info(istep,outfreq,dynamics, tracers,partit,mesh) end module module check_blowup_interface interface - subroutine check_blowup(istep, dynamics, tracers,partit,mesh) + subroutine check_blowup(istep, dynamics, tracers, partit, mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP From c7df56bed5eeb68803c1efe5b8ebd651cdd6269d Mon Sep 17 00:00:00 2001 From: a270042 Date: Tue, 2 Nov 2021 22:40:19 +0100 Subject: [PATCH 482/909] exchange UV_rhsAB against derived type dynamics%uv_rhsAB --- src/io_blowup.F90 | 4 ++-- src/io_restart.F90 | 4 ++-- src/oce_ale_vel_rhs.F90 | 24 ++++++++++++++---------- src/oce_modules.F90 | 3 ++- src/oce_setup_step.F90 | 4 ++-- 5 files changed, 22 insertions(+), 17 deletions(-) diff --git a/src/io_blowup.F90 b/src/io_blowup.F90 index 9ad2146c8..ea0e063db 100644 --- a/src/io_blowup.F90 +++ b/src/io_blowup.F90 @@ -114,8 +114,8 @@ subroutine ini_blowup_io(year, dynamics, tracers, partit, mesh) call def_variable(bid, 'v' , (/nl-1, elem2D/) , 'meridional velocity', 'm/s', dynamics%uv(2,:,:)); call def_variable(bid, 'u_rhs' , (/nl-1, elem2D/) , 'zonal velocity', 'm/s', UV_rhs(1,:,:)); call def_variable(bid, 'v_rhs' , (/nl-1, elem2D/) , 'meridional velocity', 'm/s', UV_rhs(2,:,:)); - call def_variable(bid, 'urhs_AB' , (/nl-1, elem2D/) , 'Adams–Bashforth for u', 'm/s', UV_rhsAB(1,:,:)); - call def_variable(bid, 'vrhs_AB' , (/nl-1, elem2D/) , 'Adams–Bashforth for v', 'm/s', UV_rhsAB(2,:,:)); + call def_variable(bid, 'urhs_AB' , (/nl-1, elem2D/) , 'Adams–Bashforth for u', 'm/s', dynamics%uv_rhsAB(1,:,:)); + call def_variable(bid, 'vrhs_AB' , (/nl-1, elem2D/) , 'Adams–Bashforth for v', 'm/s', dynamics%uv_rhsAB(2,:,:)); call def_variable(bid, 'zbar_n_bot' , (/nod2D/) , 'node bottom depth', 'm', zbar_n_bot); call def_variable(bid, 'zbar_e_bot' , (/elem2d/) , 'elem bottom depth', 'm', zbar_e_bot); call def_variable(bid, 'bottom_node_thickness' , (/nod2D/) , 'node bottom thickness', 'm', bottom_node_thickness); diff --git a/src/io_restart.F90 b/src/io_restart.F90 index e198507ab..fdd5512ca 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -124,8 +124,8 @@ subroutine ini_ocean_io(year, dynamics, tracers, partit, mesh) !___Define the netCDF variables for 3D fields_______________________________ call def_variable(oid, 'u', (/nl-1, elem2D/), 'zonal velocity', 'm/s', dynamics%uv(1,:,:)); call def_variable(oid, 'v', (/nl-1, elem2D/), 'meridional velocity', 'm/s', dynamics%uv(2,:,:)); - call def_variable(oid, 'urhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for u', 'm/s', UV_rhsAB(1,:,:)); - call def_variable(oid, 'vrhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for v', 'm/s', UV_rhsAB(2,:,:)); + call def_variable(oid, 'urhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for u', 'm/s', dynamics%uv_rhsAB(1,:,:)); + call def_variable(oid, 'vrhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for v', 'm/s', dynamics%uv_rhsAB(2,:,:)); !___Save restart variables for TKE and IDEMIX_________________________________ if (trim(mix_scheme)=='cvmix_TKE' .or. trim(mix_scheme)=='cvmix_TKE+IDEMIX') then diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index b2b5d0c38..fd3faa7bd 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -1,6 +1,7 @@ -module momentum_adv_scalar_interface + +module compute_vel_rhs_interface interface - subroutine momentum_adv_scalar(dynamics, partit, mesh) + subroutine compute_vel_rhs(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP @@ -13,9 +14,9 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) end interface end module -module compute_vel_rhs_interface +module momentum_adv_scalar_interface interface - subroutine compute_vel_rhs(dynamics, partit, mesh) + subroutine momentum_adv_scalar(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP @@ -28,6 +29,7 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) end interface end module + ! ! !_______________________________________________________________________________ @@ -36,7 +38,7 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_DYN - use o_ARRAYS, only: UV_rhs, UV_rhsAB, eta_n, coriolis, ssh_gp, pgf_x, pgf_y + use o_ARRAYS, only: UV_rhs, eta_n, coriolis, ssh_gp, pgf_x, pgf_y use i_ARRAYS use i_therm_param use o_PARAM @@ -58,13 +60,14 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) real(kind=WP) :: t1, t2, t3, t4 real(kind=WP) :: p_ice(3), p_air(3), p_eta(3) integer :: use_pice - real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhsAB #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV=>dynamics%uv(:,:,:) + UV =>dynamics%uv(:,:,:) + UV_rhsAB =>dynamics%uv_rhsAB(:,:,:) t1=MPI_Wtime() use_pice=0 @@ -186,7 +189,7 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP use MOD_DYN -USE o_ARRAYS, only: Wvel_e, UV_rhsAB +USE o_ARRAYS, only: Wvel_e USE o_PARAM use g_comm_auto IMPLICIT NONE @@ -200,13 +203,14 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) real(kind=WP) :: un1(1:mesh%nl-1), un2(1:mesh%nl-1) real(kind=WP) :: wu(1:mesh%nl), wv(1:mesh%nl) real(kind=WP) :: Unode_rhs(2,mesh%nl-1,partit%myDim_nod2d+partit%eDim_nod2D) -real(kind=WP), dimension(:,:,:), pointer :: UV +real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhsAB #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV=>dynamics%uv(:,:,:) + UV =>dynamics%uv(:,:,:) + UV_rhsAB=>dynamics%uv_rhsAB(:,:,:) !___________________________________________________________________________ ! 1st. compute vertical momentum advection component: w * du/dz, w*dv/dz diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index e34b07cf4..7d791d5a4 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -184,7 +184,8 @@ MODULE o_ARRAYS ! Arrays are described in subroutine array_setup real(kind=WP), allocatable, target :: Wvel(:,:), Wvel_e(:,:), Wvel_i(:,:) !!PS real(kind=WP), allocatable :: UV(:,:,:) -real(kind=WP), allocatable :: UV_rhs(:,:,:), UV_rhsAB(:,:,:) +real(kind=WP), allocatable :: UV_rhs(:,:,:) +!!PS real(kind=WP), allocatable :: UV_rhsAB(:,:,:) real(kind=WP), allocatable :: uke(:,:), v_back(:,:), uke_back(:,:), uke_dis(:,:), uke_dif(:,:) real(kind=WP), allocatable :: uke_rhs(:,:), uke_rhs_old(:,:) real(kind=WP), allocatable :: UV_dis_tend(:,:,:), UV_back_tend(:,:,:), UV_total_tend(:,:,:), UV_dis_tend_node(:,:,:) diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 544b5db87..b0445da47 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -476,7 +476,7 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) !allocate(stress_diag(2, elem_size))!delete me !!PS allocate(UV(2, nl-1, elem_size)) allocate(UV_rhs(2,nl-1, elem_size)) -allocate(UV_rhsAB(2,nl-1, elem_size)) +!!PS allocate(UV_rhsAB(2,nl-1, elem_size)) allocate(Visc(nl-1, elem_size)) ! ================ ! elevation and its rhs @@ -625,7 +625,7 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) !!PS UV=0.0_WP UV_rhs=0.0_WP - UV_rhsAB=0.0_WP +!!PS UV_rhsAB=0.0_WP ! eta_n=0.0_WP d_eta=0.0_WP From 040659bd2da4a06316fe060c9262babb4c51e4c1 Mon Sep 17 00:00:00 2001 From: a270042 Date: Tue, 2 Nov 2021 22:47:26 +0100 Subject: [PATCH 483/909] fix bug in ../src/oce_vel_rhs_vinv.F90 --- src/oce_vel_rhs_vinv.F90 | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/oce_vel_rhs_vinv.F90 b/src/oce_vel_rhs_vinv.F90 index f7bf16720..d9cf24c99 100755 --- a/src/oce_vel_rhs_vinv.F90 +++ b/src/oce_vel_rhs_vinv.F90 @@ -118,7 +118,7 @@ end subroutine relative_vorticity ! ========================================================================== subroutine compute_vel_rhs_vinv(dynamics, partit, mesh) !vector invariant USE o_PARAM - USE o_ARRAYS, only: UV_rhs, UV_rhsAB, eta_n, coriolis_node, hpressure, vorticity + USE o_ARRAYS, only: UV_rhs, eta_n, coriolis_node, hpressure, vorticity USE MOD_MESH USE MOD_PARTIT @@ -140,12 +140,13 @@ subroutine compute_vel_rhs_vinv(dynamics, partit, mesh) !vector invariant real(kind=WP) :: KE_node(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP) :: dZ_inv(2:mesh%nl-1), dzbar_inv(mesh%nl-1), elem_area_inv real(kind=WP) :: density0_inv = 1./density_0 - real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhsAB #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) + UV => dynamics%uv(:,:,:) + UV_rhsAB => dynamics%uv_rhsAB(:,:,:) w = 0.0_WP From 0df4e4f165c2b06a8f73bf919731faf9dcd6117a Mon Sep 17 00:00:00 2001 From: a270042 Date: Tue, 2 Nov 2021 23:32:44 +0100 Subject: [PATCH 484/909] exchange Wvel, Wvel_e, Wvel_i with coresponding derived types --- src/gen_modules_diag.F90 | 4 +++- src/io_blowup.F90 | 6 +++--- src/io_meandata.F90 | 6 +++--- src/io_restart.F90 | 6 +++--- src/oce_ale.F90 | 18 +++++++++++++----- src/oce_ale_tracer.F90 | 27 ++++++++++++++++++--------- src/oce_ale_vel_rhs.F90 | 3 ++- src/oce_dyn.F90 | 18 +++++++++++------- src/oce_setup_step.F90 | 8 +------- src/toy_channel_soufflet.F90 | 2 ++ src/write_step_info.F90 | 11 ++++++++--- 11 files changed, 67 insertions(+), 42 deletions(-) diff --git a/src/gen_modules_diag.F90 b/src/gen_modules_diag.F90 index 5a0e47f90..0870a7052 100755 --- a/src/gen_modules_diag.F90 +++ b/src/gen_modules_diag.F90 @@ -244,11 +244,13 @@ subroutine diag_energy(mode, dynamics, partit, mesh) real(kind=WP) :: ux, vx, uy, vy, tvol, rval(2) real(kind=WP) :: geo_grad_x(3), geo_grad_y(3), geo_u(3), geo_v(3) real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:), pointer :: Wvel #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) + UV => dynamics%uv(:,:,:) + Wvel => dynamics%w(:,:) !===================== if (firstcall) then !allocate the stuff at the first call diff --git a/src/io_blowup.F90 b/src/io_blowup.F90 index ea0e063db..c88e367c1 100644 --- a/src/io_blowup.F90 +++ b/src/io_blowup.F90 @@ -143,9 +143,9 @@ subroutine ini_blowup_io(year, dynamics, tracers, partit, mesh) !!PS longname=trim(longname)//', Adams–Bashforth' !!PS call def_variable(bid, trim(trname)//'_AB',(/nl-1, nod2D/), trim(longname), trim(units), tracers%data(j)%valuesAB(:,:)(:,:)); end do - call def_variable(bid, 'w' , (/nl, nod2D/) , 'vertical velocity', 'm/s', Wvel); - call def_variable(bid, 'w_expl' , (/nl, nod2D/) , 'vertical velocity', 'm/s', Wvel_e); - call def_variable(bid, 'w_impl' , (/nl, nod2D/) , 'vertical velocity', 'm/s', Wvel_i); + call def_variable(bid, 'w' , (/nl, nod2D/) , 'vertical velocity', 'm/s', dynamics%w); + call def_variable(bid, 'w_expl' , (/nl, nod2D/) , 'vertical velocity', 'm/s', dynamics%w_e); + call def_variable(bid, 'w_impl' , (/nl, nod2D/) , 'vertical velocity', 'm/s', dynamics%w_i); call def_variable(bid, 'cfl_z' , (/nl-1, nod2D/) , 'vertical CFL criteria', '', CFL_z); !_____________________________________________________________________________ diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 4acbd17eb..5c6e0636f 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -157,7 +157,7 @@ subroutine ini_mean_io(dynamics, tracers, partit, mesh) CASE ('ssh ') call def_stream(nod2D, myDim_nod2D, 'ssh', 'sea surface elevation', 'm', eta_n, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('vve_5 ') - call def_stream(nod2D, myDim_nod2D, 'vve_5', 'vertical velocity at 5th level', 'm/s', Wvel(5,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'vve_5', 'vertical velocity at 5th level', 'm/s', dynamics%w(5,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('ssh_rhs ') call def_stream(nod2D, myDim_nod2D, 'ssh_rhs', 'ssh rhs', '?', ssh_rhs, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) @@ -321,7 +321,7 @@ subroutine ini_mean_io(dynamics, tracers, partit, mesh) CASE ('v ') call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v', 'meridional velocity','m/s', dynamics%uv(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('w ') - call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'w', 'vertical velocity', 'm/s', Wvel(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'w', 'vertical velocity', 'm/s', dynamics%w(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('Av ') call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'Av', 'vertical viscosity Av', 'm2/s', Av(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('u_dis_tend') @@ -433,7 +433,7 @@ subroutine ini_mean_io(dynamics, tracers, partit, mesh) call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'um', 'horizontal velocity', 'm/s', dynamics%uv(1,:,:), 1, 'm', i_real4, partit, mesh) call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'vm', 'meridional velocity', 'm/s', dynamics%uv(2,:,:), 1, 'm', i_real4, partit, mesh) - call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'wm', 'vertical velocity', 'm/s', Wvel(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'wm', 'vertical velocity', 'm/s', dynamics%w(:,:), 1, 'm', i_real8, partit, mesh) call def_stream(elem2D, myDim_elem2D, 'utau_surf', '(u, tau) at the surface', 'N/(m s)', utau_surf(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) call def_stream(elem2D, myDim_elem2D, 'utau_bott', '(u, tau) at the bottom', 'N/(m s)', utau_bott(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index fdd5512ca..dd2f5307d 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -158,9 +158,9 @@ subroutine ini_ocean_io(year, dynamics, tracers, partit, mesh) longname=trim(longname)//', Adams–Bashforth' call def_variable(oid, trim(trname)//'_AB',(/nl-1, nod2D/), trim(longname), trim(units), tracers%data(j)%valuesAB(:,:)); end do - call def_variable(oid, 'w', (/nl, nod2D/), 'vertical velocity', 'm/s', Wvel); - call def_variable(oid, 'w_expl', (/nl, nod2D/), 'vertical velocity', 'm/s', Wvel_e); - call def_variable(oid, 'w_impl', (/nl, nod2D/), 'vertical velocity', 'm/s', Wvel_i); + call def_variable(oid, 'w', (/nl, nod2D/), 'vertical velocity', 'm/s', dynamics%w); + call def_variable(oid, 'w_expl', (/nl, nod2D/), 'vertical velocity', 'm/s', dynamics%w_e); + call def_variable(oid, 'w_impl', (/nl, nod2D/), 'vertical velocity', 'm/s', dynamics%w_i); end subroutine ini_ocean_io ! !-------------------------------------------------------------------------------------------- diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 171fbdd49..0d20a8f43 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -1843,8 +1843,8 @@ end subroutine compute_hbar_ale subroutine vert_vel_ale(dynamics, partit, mesh) use g_config,only: dt, which_ALE, min_hnode, lzstar_lev, flag_warn_cflz use MOD_MESH - use o_ARRAYS, only: Wvel, fer_Wvel, fer_UV, CFL_z, water_flux, ssh_rhs, & - ssh_rhs_old, eta_n, d_eta, Wvel_e, Wvel_i + use o_ARRAYS, only: fer_Wvel, fer_UV, CFL_z, water_flux, ssh_rhs, & + ssh_rhs_old, eta_n, d_eta use o_PARAM USE MOD_PARTIT USE MOD_PARSUP @@ -1867,11 +1867,15 @@ subroutine vert_vel_ale(dynamics, partit, mesh) type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:) , pointer :: Wvel, Wvel_e, Wvel_i #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV=>dynamics%uv(:,:,:) + UV =>dynamics%uv(:,:,:) + Wvel =>dynamics%w(:,:) + Wvel_e=>dynamics%w_e(:,:) + Wvel_i=>dynamics%w_i(:,:) !___________________________________________________________________________ ! Contributions from levels in divergence @@ -2518,7 +2522,7 @@ end subroutine solve_ssh_ale subroutine impl_vert_visc_ale(dynamics, partit, mesh) USE MOD_MESH USE o_PARAM -USE o_ARRAYS, only: UV_rhs, Av, Wvel_i, stress_surf +USE o_ARRAYS, only: UV_rhs, Av, stress_surf USE MOD_PARTIT USE MOD_PARSUP USE MOD_DYN @@ -2534,11 +2538,13 @@ subroutine impl_vert_visc_ale(dynamics, partit, mesh) integer :: nz, elem, nzmax, nzmin, elnodes(3) real(kind=WP) :: zinv, m, friction, wu, wd real(kind=WP), dimension(:,:,:), pointer :: UV +real(kind=WP), dimension(:,:) , pointer :: Wvel_i #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" -UV=>dynamics%uv(:,:,:) +UV =>dynamics%uv(:,:,:) +Wvel_i=>dynamics%w_i(:,:) DO elem=1,myDim_elem2D elnodes=elem2D_nodes(:,elem) @@ -2952,10 +2958,12 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) t9=MPI_Wtime() !___________________________________________________________________________ ! write out global fields for debugging + if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call write_step_info'//achar(27)//'[0m' call write_step_info(n,logfile_outfreq, dynamics, tracers, partit, mesh) ! check model for blowup --> ! write_step_info and check_blowup require ! togeather around 2.5% of model runtime + if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call check_blowup'//achar(27)//'[0m' call check_blowup(n, dynamics, tracers, partit, mesh) t10=MPI_Wtime() diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index c962697bb..5335528d6 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -59,12 +59,14 @@ subroutine diff_ver_part_redi_expl(tr_num, tracer, partit, mesh) end module module diff_ver_part_impl_ale_interface interface - subroutine diff_ver_part_impl_ale(tr_num, tracer, partit, mesh) + subroutine diff_ver_part_impl_ale(tr_num, dynamics, tracer, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP use mod_tracer + use MOD_DYN integer, intent(in), target :: tr_num + type(t_dyn), intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracer type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit @@ -138,7 +140,7 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) use g_config use o_PARAM, only: SPP, Fer_GM - use o_arrays, only: Wvel, Wvel_e, fer_Wvel, fer_UV + use o_arrays, only: fer_Wvel, fer_UV use mod_mesh USE MOD_PARTIT USE MOD_PARSUP @@ -157,12 +159,15 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) type(t_partit), intent(inout), target :: partit integer :: tr_num, node, nzmax, nzmin real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:) , pointer :: Wvel, Wvel_e #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) + UV => dynamics%uv(:,:,:) + Wvel => dynamics%w(:,:) + Wvel_e => dynamics%w_e(:,:) !___________________________________________________________________________ if (SPP) call cal_rejected_salt(partit, mesh) @@ -271,7 +276,7 @@ subroutine adv_tracers_ale(dt, tr_num, dynamics, tracers, partit, mesh) ! here --> add horizontal advection part to del_ttf(nz,n) = del_ttf(nz,n) + ... tracers%work%del_ttf_advhoriz = 0.0_WP tracers%work%del_ttf_advvert = 0.0_WP - call do_oce_adv_tra(dt, dynamics%uv, wvel, wvel_i, wvel_e, tr_num, tracers, partit, mesh) + call do_oce_adv_tra(dt, dynamics%uv, dynamics%w, dynamics%w_i, dynamics%w_e, tr_num, tracers, partit, mesh) !___________________________________________________________________________ ! update array for total tracer flux del_ttf with the fluxes from horizontal ! and vertical advection @@ -360,7 +365,7 @@ subroutine diff_tracers_ale(tr_num, dynamics, tracers, partit, mesh) !___________________________________________________________________________ if (tracers%i_vert_diff) then ! do vertical diffusion: implicite - call diff_ver_part_impl_ale(tr_num, tracers, partit, mesh) + call diff_ver_part_impl_ale(tr_num, dynamics, tracers, partit, mesh) end if !We DO not set del_ttf to zero because it will not be used in this timestep anymore @@ -454,13 +459,14 @@ end subroutine diff_ver_part_expl_ale ! !=============================================================================== ! vertical diffusivity augmented with Redi contribution [vertical flux of K(3,3)*d_zT] -subroutine diff_ver_part_impl_ale(tr_num, tracers, partit, mesh) +subroutine diff_ver_part_impl_ale(tr_num, dynamics, tracers, partit, mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use MOD_TRACER + use MOD_DYN use o_PARAM - use o_ARRAYS + use o_ARRAYS, only: Ki, Kv, heat_flux, water_flux, slope_tapered use i_ARRAYS USE MOD_PARTIT USE MOD_PARSUP @@ -472,8 +478,9 @@ subroutine diff_ver_part_impl_ale(tr_num, tracers, partit, mesh) implicit none integer, intent(in), target :: tr_num + type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracers - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit real(kind=WP) :: a(mesh%nl), b(mesh%nl), c(mesh%nl), tr(mesh%nl) real(kind=WP) :: cp(mesh%nl), tp(mesh%nl) @@ -485,12 +492,14 @@ subroutine diff_ver_part_impl_ale(tr_num, tracers, partit, mesh) logical :: do_wimpl=.true. real(kind=WP), dimension(:,:), pointer :: trarr + real(kind=WP), dimension(:,:), pointer :: Wvel_i #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - trarr=>tracers%data(tr_num)%values(:,:) + trarr => tracers%data(tr_num)%values(:,:) + Wvel_i => dynamics%w_i(:,:) !___________________________________________________________________________ if ((trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') .OR. (.not. w_split)) do_wimpl=.false. diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index fd3faa7bd..926dda43a 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -189,7 +189,6 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP use MOD_DYN -USE o_ARRAYS, only: Wvel_e USE o_PARAM use g_comm_auto IMPLICIT NONE @@ -204,6 +203,7 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) real(kind=WP) :: wu(1:mesh%nl), wv(1:mesh%nl) real(kind=WP) :: Unode_rhs(2,mesh%nl-1,partit%myDim_nod2d+partit%eDim_nod2D) real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhsAB +real(kind=WP), dimension(:,:), pointer :: Wvel_e #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -211,6 +211,7 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) #include "associate_mesh_ass.h" UV =>dynamics%uv(:,:,:) UV_rhsAB=>dynamics%uv_rhsAB(:,:,:) + Wvel_e =>dynamics%w_e(:,:) !___________________________________________________________________________ ! 1st. compute vertical momentum advection component: w * du/dz, w*dv/dz diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 784cc9e62..65dcf69a1 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -13,10 +13,12 @@ ! 5. Leith_c=? (need to be adjusted) module h_viscosity_leith_interface interface - subroutine h_viscosity_leith(partit, mesh) + subroutine h_viscosity_leith(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN + type(t_dyn), intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh @@ -280,19 +282,19 @@ subroutine viscosity_filter(option, dynamics, partit, mesh) ! ==== ! Harmonic Leith parameterization ! ==== - call h_viscosity_leith(partit, mesh) + call h_viscosity_leith(dynamics, partit, mesh) call visc_filt_harmon(dynamics, partit, mesh) CASE (2) ! === ! Laplacian+Leith+biharmonic background ! === - call h_viscosity_leith(partit, mesh) + call h_viscosity_leith(dynamics, partit, mesh) call visc_filt_hbhmix(dynamics, partit, mesh) CASE (3) ! === ! Biharmonic Leith parameterization ! === - call h_viscosity_leith(partit, mesh) + call h_viscosity_leith(dynamics, partit, mesh) call visc_filt_biharm(2, dynamics, partit, mesh) CASE (4) ! === @@ -564,14 +566,14 @@ SUBROUTINE visc_filt_hbhmix(dynamics, partit, mesh) end subroutine visc_filt_hbhmix ! =================================================================== -SUBROUTINE h_viscosity_leith(partit, mesh) +SUBROUTINE h_viscosity_leith(dynamics, partit, mesh) ! ! Coefficient of horizontal viscosity is a combination of the Leith (with Leith_c) and modified Leith (with Div_c) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use MOD_DYN - USE o_ARRAYS + USE o_ARRAYS, only: Visc, vorticity USE o_PARAM USE g_CONFIG use g_comm_auto @@ -580,13 +582,15 @@ SUBROUTINE h_viscosity_leith(partit, mesh) integer :: elem, nl1, nz, elnodes(3), n, k, nt, ul1 real(kind=WP) :: leithx, leithy real(kind=WP), allocatable :: aux(:,:) + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh - + real(kind=WP), dimension(:,:), pointer :: Wvel #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + Wvel =>dynamics%w(:,:) ! if(mom_adv<4) call relative_vorticity(partit, mesh) !!! vorticity array should be allocated ! Fill in viscosity: diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index b0445da47..aead52a12 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -474,9 +474,7 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) ! Velocities ! ================ !allocate(stress_diag(2, elem_size))!delete me -!!PS allocate(UV(2, nl-1, elem_size)) allocate(UV_rhs(2,nl-1, elem_size)) -!!PS allocate(UV_rhsAB(2,nl-1, elem_size)) allocate(Visc(nl-1, elem_size)) ! ================ ! elevation and its rhs @@ -491,8 +489,7 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) ! ================ ! Vertical velocity and pressure ! ================ -allocate(Wvel(nl, node_size), hpressure(nl,node_size)) -allocate(Wvel_e(nl, node_size), Wvel_i(nl, node_size)) +allocate( hpressure(nl,node_size)) allocate(CFL_z(nl, node_size)) ! vertical CFL criteria allocate(bvfreq(nl,node_size),mixlay_dep(node_size),bv_ref(node_size)) ! ================ @@ -630,9 +627,6 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) eta_n=0.0_WP d_eta=0.0_WP ssh_rhs=0.0_WP - Wvel=0.0_WP - Wvel_e =0.0_WP - Wvel_i =0.0_WP CFL_z =0.0_WP hpressure=0.0_WP ! diff --git a/src/toy_channel_soufflet.F90 b/src/toy_channel_soufflet.F90 index cf34e60ee..8d05a0e73 100644 --- a/src/toy_channel_soufflet.F90 +++ b/src/toy_channel_soufflet.F90 @@ -375,11 +375,13 @@ subroutine energy_out_soufflet(dynamics, partit, mesh) type(t_mesh) , intent(in) , target :: mesh real(kind=WP), dimension(:,:,:), pointer :: UV +real(kind=WP), dimension(:,:), pointer :: Wvel #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV => dynamics%uv(:,:,:) +Wvel => dynamics%w(:,:) nybins=100 diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index 5c68ff9af..369e3aec1 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -41,7 +41,7 @@ subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) use MOD_TRACER use MOD_DYN use o_PARAM - use o_ARRAYS, only: eta_n, d_eta, water_flux, heat_flux, Wvel, Unode, CFL_z, & + use o_ARRAYS, only: eta_n, d_eta, water_flux, heat_flux, Unode, CFL_z, & pgf_x, pgf_y, Av, Kv use i_ARRAYS use g_comm_auto @@ -63,11 +63,13 @@ subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) type(t_tracer), intent(in) , target :: tracers type(t_dyn) , intent(in) , target :: dynamics real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:), pointer :: Wvel #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV => dynamics%uv(:,:,:) + Wvel => dynamics%w(:,:) if (mod(istep,outfreq)==0) then @@ -261,7 +263,7 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) use MOD_DYN use o_PARAM use o_ARRAYS, only: eta_n, d_eta, ssh_rhs, ssh_rhs_old, water_flux, stress_surf, & - Wvel, CFL_z, heat_flux, Kv, Av + CFL_z, heat_flux, Kv, Av use i_ARRAYS use g_comm_auto use io_BLOWUP @@ -277,11 +279,13 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) type(t_tracer), intent(in) , target :: tracers type(t_dyn) , intent(in) , target :: dynamics real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:), pointer :: Wvel #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) + UV => dynamics%uv(:,:,:) + Wvel => dynamics%w(:,:) !___________________________________________________________________________ ! ! if (mod(istep,logfile_outfreq)==0) then @@ -550,3 +554,4 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) call par_ex(partit%MPI_COMM_FESOM, partit%mype) endif end subroutine + From 1d3d86ff65530e54cc5893b59834079f2d651300 Mon Sep 17 00:00:00 2001 From: a270042 Date: Tue, 2 Nov 2021 23:55:07 +0100 Subject: [PATCH 485/909] exchange UV_rhs against derived type --- src/io_blowup.F90 | 4 ++-- src/oce_ale.F90 | 14 +++++++------ src/oce_ale_vel_rhs.F90 | 5 +++-- src/oce_dyn.F90 | 38 +++++++++++++++++++++--------------- src/oce_modules.F90 | 4 ---- src/oce_setup_step.F90 | 5 ----- src/oce_vel_rhs_vinv.F90 | 5 +++-- src/toy_channel_soufflet.F90 | 4 +++- 8 files changed, 41 insertions(+), 38 deletions(-) diff --git a/src/io_blowup.F90 b/src/io_blowup.F90 index c88e367c1..39eda3107 100644 --- a/src/io_blowup.F90 +++ b/src/io_blowup.F90 @@ -112,8 +112,8 @@ subroutine ini_blowup_io(year, dynamics, tracers, partit, mesh) call def_variable(bid, 'helem' , (/nl-1, elem2D/) , 'Element layer thickness', 'm/s', helem(:,:)); call def_variable(bid, 'u' , (/nl-1, elem2D/) , 'zonal velocity', 'm/s', dynamics%uv(1,:,:)); call def_variable(bid, 'v' , (/nl-1, elem2D/) , 'meridional velocity', 'm/s', dynamics%uv(2,:,:)); - call def_variable(bid, 'u_rhs' , (/nl-1, elem2D/) , 'zonal velocity', 'm/s', UV_rhs(1,:,:)); - call def_variable(bid, 'v_rhs' , (/nl-1, elem2D/) , 'meridional velocity', 'm/s', UV_rhs(2,:,:)); + call def_variable(bid, 'u_rhs' , (/nl-1, elem2D/) , 'zonal velocity', 'm/s', dynamics%uv_rhs(1,:,:)); + call def_variable(bid, 'v_rhs' , (/nl-1, elem2D/) , 'meridional velocity', 'm/s', dynamics%uv_rhs(2,:,:)); call def_variable(bid, 'urhs_AB' , (/nl-1, elem2D/) , 'Adams–Bashforth for u', 'm/s', dynamics%uv_rhsAB(1,:,:)); call def_variable(bid, 'vrhs_AB' , (/nl-1, elem2D/) , 'Adams–Bashforth for v', 'm/s', dynamics%uv_rhsAB(2,:,:)); call def_variable(bid, 'zbar_n_bot' , (/nod2D/) , 'node bottom depth', 'm', zbar_n_bot); diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 0d20a8f43..025827b07 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -1612,7 +1612,7 @@ end subroutine update_stiff_mat_ale subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) use g_config,only: which_ALE,dt use MOD_MESH - use o_ARRAYS, only: ssh_rhs, ssh_rhs_old, UV_rhs, water_flux + use o_ARRAYS, only: ssh_rhs, ssh_rhs_old, water_flux use o_PARAM USE MOD_PARTIT USE MOD_PARSUP @@ -1629,12 +1629,13 @@ subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit type(t_dyn), intent(inout), target :: dynamics - real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV=>dynamics%uv(:,:,:) + UV_rhs=>dynamics%uv_rhs(:,:,:) ssh_rhs=0.0_WP !___________________________________________________________________________ @@ -2522,7 +2523,7 @@ end subroutine solve_ssh_ale subroutine impl_vert_visc_ale(dynamics, partit, mesh) USE MOD_MESH USE o_PARAM -USE o_ARRAYS, only: UV_rhs, Av, stress_surf +USE o_ARRAYS, only: Av, stress_surf USE MOD_PARTIT USE MOD_PARSUP USE MOD_DYN @@ -2537,14 +2538,15 @@ subroutine impl_vert_visc_ale(dynamics, partit, mesh) real(kind=WP) :: cp(mesh%nl-1), up(mesh%nl-1), vp(mesh%nl-1) integer :: nz, elem, nzmax, nzmin, elnodes(3) real(kind=WP) :: zinv, m, friction, wu, wd -real(kind=WP), dimension(:,:,:), pointer :: UV +real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs real(kind=WP), dimension(:,:) , pointer :: Wvel_i #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" -UV =>dynamics%uv(:,:,:) -Wvel_i=>dynamics%w_i(:,:) +UV =>dynamics%uv(:,:,:) +UV_rhs =>dynamics%uv_rhs(:,:,:) +Wvel_i =>dynamics%w_i(:,:) DO elem=1,myDim_elem2D elnodes=elem2D_nodes(:,elem) diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index 926dda43a..799094bdf 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -38,7 +38,7 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_DYN - use o_ARRAYS, only: UV_rhs, eta_n, coriolis, ssh_gp, pgf_x, pgf_y + use o_ARRAYS, only: eta_n, coriolis, ssh_gp, pgf_x, pgf_y use i_ARRAYS use i_therm_param use o_PARAM @@ -60,13 +60,14 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) real(kind=WP) :: t1, t2, t3, t4 real(kind=WP) :: p_ice(3), p_air(3), p_eta(3) integer :: use_pice - real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhsAB + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhsAB, UV_rhs #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV =>dynamics%uv(:,:,:) + UV_rhs =>dynamics%uv_rhs(:,:,:) UV_rhsAB =>dynamics%uv_rhsAB(:,:,:) t1=MPI_Wtime() diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 65dcf69a1..be2a1de72 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -164,7 +164,7 @@ SUBROUTINE update_vel(dynamics, partit, mesh) USE MOD_PARSUP USE MOD_DYN - USE o_ARRAYS, only: d_eta, eta_n, UV_rhs + USE o_ARRAYS, only: d_eta, eta_n USE o_PARAM USE g_CONFIG use g_comm_auto @@ -175,13 +175,14 @@ SUBROUTINE update_vel(dynamics, partit, mesh) type(t_dyn) , intent(inout), target :: dynamics type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit - real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV=>dynamics%uv(:,:,:) + UV_rhs=>dynamics%uv_rhs(:,:,:) DO elem=1, myDim_elem2D elnodes=elem2D_nodes(:,elem) @@ -322,7 +323,7 @@ SUBROUTINE visc_filt_harmon(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_DYN -USE o_ARRAYS, only: Visc, UV_rhs +USE o_ARRAYS, only: Visc USE o_PARAM USE g_CONFIG IMPLICIT NONE @@ -332,12 +333,13 @@ SUBROUTINE visc_filt_harmon(dynamics, partit, mesh) type(t_dyn) , intent(inout), target :: dynamics type(t_mesh) , intent(in), target :: mesh type(t_partit), intent(inout), target :: partit -real(kind=WP), dimension(:,:,:), pointer :: UV +real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV => dynamics%uv(:,:,:) +UV_rhs => dynamics%uv_rhs(:,:,:) ! An analog of harmonic viscosity operator. ! It adds to the rhs(0) Visc*(u1+u2+u3-3*u0)/area @@ -369,7 +371,7 @@ SUBROUTINE visc_filt_biharm(option, dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP use MOD_DYN - USE o_ARRAYS, only: Visc, UV_rhs + USE o_ARRAYS, only: Visc USE o_PARAM USE g_CONFIG use g_comm_auto @@ -383,12 +385,13 @@ SUBROUTINE visc_filt_biharm(option, dynamics, partit, mesh) type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh - real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV => dynamics%uv(:,:,:) + UV_rhs => dynamics%uv_rhs(:,:,:) ! Filter is applied twice. ed=myDim_elem2D+eDim_elem2D @@ -478,7 +481,7 @@ SUBROUTINE visc_filt_hbhmix(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP use MOD_DYN - USE o_ARRAYS, only: Visc, UV_rhs + USE o_ARRAYS, only: Visc USE o_PARAM USE g_CONFIG use g_comm_auto @@ -494,12 +497,13 @@ SUBROUTINE visc_filt_hbhmix(dynamics, partit, mesh) type(t_dyn), intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh - real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV => dynamics%uv(:,:,:) + UV_rhs => dynamics%uv_rhs(:,:,:) ! Filter is applied twice. ed=myDim_elem2D+eDim_elem2D @@ -680,7 +684,6 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP use MOD_DYN - USE o_ARRAYS, only: UV_rhs USE o_PARAM USE g_CONFIG USE g_comm_auto @@ -692,12 +695,13 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh - real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV => dynamics%uv(:,:,:) + UV_rhs => dynamics%uv_rhs(:,:,:) ! An analog of harmonic viscosity operator. ! Same as visc_filt_h, but with the backscatter. @@ -783,7 +787,6 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP use MOD_DYN - USE o_ARRAYS, only: UV_rhs USE o_PARAM USE g_CONFIG USE g_comm_auto @@ -796,12 +799,13 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh - real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV => dynamics%uv(:,:,:) + UV_rhs => dynamics%uv_rhs(:,:,:) ed=myDim_elem2D+eDim_elem2D allocate(U_c(nl-1,ed), V_c(nl-1, ed)) @@ -870,7 +874,6 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP use MOD_DYN - USE o_ARRAYS, only: UV_rhs USE o_PARAM USE g_CONFIG USE g_comm_auto @@ -882,12 +885,13 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh - real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV => dynamics%uv(:,:,:) + UV_rhs => dynamics%uv_rhs(:,:,:) ! ed=myDim_elem2D+eDim_elem2D allocate(U_c(nl-1,ed), V_c(nl-1, ed)) @@ -950,7 +954,7 @@ SUBROUTINE visc_filt_dbcksc(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP use MOD_DYN -USE o_ARRAYS, only: UV_rhs, v_back, UV_dis_tend, UV_total_tend, UV_back_tend, & +USE o_ARRAYS, only: v_back, UV_dis_tend, UV_total_tend, UV_back_tend, & uke, uke_dif USE o_PARAM USE g_CONFIG @@ -966,12 +970,13 @@ SUBROUTINE visc_filt_dbcksc(dynamics, partit, mesh) type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh -real(kind=WP), dimension(:,:,:), pointer :: UV +real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV => dynamics%uv(:,:,:) +UV_rhs => dynamics%uv_rhs(:,:,:) ! An analog of harmonic viscosity operator. ! It adds to the rhs(0) Visc*(u1+u2+u3-3*u0)/area @@ -1318,3 +1323,4 @@ end subroutine uke_update ! =================================================================== + diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index 7d791d5a4..52abf3e95 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -182,10 +182,6 @@ MODULE o_ARRAYS USE o_PARAM IMPLICIT NONE ! Arrays are described in subroutine array_setup -real(kind=WP), allocatable, target :: Wvel(:,:), Wvel_e(:,:), Wvel_i(:,:) -!!PS real(kind=WP), allocatable :: UV(:,:,:) -real(kind=WP), allocatable :: UV_rhs(:,:,:) -!!PS real(kind=WP), allocatable :: UV_rhsAB(:,:,:) real(kind=WP), allocatable :: uke(:,:), v_back(:,:), uke_back(:,:), uke_dis(:,:), uke_dif(:,:) real(kind=WP), allocatable :: uke_rhs(:,:), uke_rhs_old(:,:) real(kind=WP), allocatable :: UV_dis_tend(:,:,:), UV_back_tend(:,:,:), UV_total_tend(:,:,:), UV_dis_tend_node(:,:,:) diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index aead52a12..26311d642 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -474,7 +474,6 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) ! Velocities ! ================ !allocate(stress_diag(2, elem_size))!delete me -allocate(UV_rhs(2,nl-1, elem_size)) allocate(Visc(nl-1, elem_size)) ! ================ ! elevation and its rhs @@ -620,10 +619,6 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) ! Initialize with zeros ! ================= -!!PS UV=0.0_WP - UV_rhs=0.0_WP -!!PS UV_rhsAB=0.0_WP -! eta_n=0.0_WP d_eta=0.0_WP ssh_rhs=0.0_WP diff --git a/src/oce_vel_rhs_vinv.F90 b/src/oce_vel_rhs_vinv.F90 index d9cf24c99..f1b87d68f 100755 --- a/src/oce_vel_rhs_vinv.F90 +++ b/src/oce_vel_rhs_vinv.F90 @@ -118,7 +118,7 @@ end subroutine relative_vorticity ! ========================================================================== subroutine compute_vel_rhs_vinv(dynamics, partit, mesh) !vector invariant USE o_PARAM - USE o_ARRAYS, only: UV_rhs, eta_n, coriolis_node, hpressure, vorticity + USE o_ARRAYS, only: eta_n, coriolis_node, hpressure, vorticity USE MOD_MESH USE MOD_PARTIT @@ -140,12 +140,13 @@ subroutine compute_vel_rhs_vinv(dynamics, partit, mesh) !vector invariant real(kind=WP) :: KE_node(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP) :: dZ_inv(2:mesh%nl-1), dzbar_inv(mesh%nl-1), elem_area_inv real(kind=WP) :: density0_inv = 1./density_0 - real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhsAB + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs, UV_rhsAB #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV => dynamics%uv(:,:,:) + UV_rhs => dynamics%uv_rhs(:,:,:) UV_rhsAB => dynamics%uv_rhsAB(:,:,:) w = 0.0_WP diff --git a/src/toy_channel_soufflet.F90 b/src/toy_channel_soufflet.F90 index 8d05a0e73..53052ccc5 100644 --- a/src/toy_channel_soufflet.F90 +++ b/src/toy_channel_soufflet.F90 @@ -53,12 +53,14 @@ subroutine relax_zonal_vel(dynamics, partit, mesh) type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh + real(kind=WP), dimension(:,:,:), pointer :: UV_rhs #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - + UV_rhs=>dynamics%uv_rhs(:,:,:) + DO elem=1, myDim_elem2D ! ======== ! Interpolation From bb06c5128ae1a152602d7669988978c326d9c281 Mon Sep 17 00:00:00 2001 From: a270042 Date: Wed, 3 Nov 2021 00:26:29 +0100 Subject: [PATCH 486/909] exchange Unode against derived type dynamics%uvnode --- src/cavity_param.F90 | 22 +++++++++++++--------- src/gen_modules_cvmix_kpp.F90 | 31 ++++++++++++++++++------------- src/gen_modules_cvmix_pp.F90 | 12 ++++++++---- src/gen_modules_cvmix_tke.F90 | 16 ++++++++++------ src/gen_modules_diag.F90 | 21 +++++++++++---------- src/io_meandata.F90 | 4 ++-- src/oce_ale.F90 | 10 +++++----- src/oce_ale_mixing_kpp.F90 | 27 ++++++++++++++++----------- src/oce_ale_mixing_pp.F90 | 13 +++++++++---- src/oce_dyn.F90 | 10 +++++----- src/oce_modules.F90 | 2 +- src/oce_setup_step.F90 | 2 +- src/write_step_info.F90 | 21 +++++++++++---------- 13 files changed, 110 insertions(+), 81 deletions(-) diff --git a/src/cavity_param.F90 b/src/cavity_param.F90 index 3c3236c39..35ed3bdf5 100644 --- a/src/cavity_param.F90 +++ b/src/cavity_param.F90 @@ -137,19 +137,21 @@ end subroutine compute_nrst_pnt2cavline ! adjusted for use in FESOM by Ralph Timmermann, 16.02.2011 ! Reviewed by ? ! adapted by P. SCholz for FESOM2.0 -subroutine cavity_heat_water_fluxes_3eq(tracers, partit, mesh) +subroutine cavity_heat_water_fluxes_3eq(dynamics, tracers, partit, mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use MOD_TRACER + use MOD_DYN use o_PARAM , only: density_0, WP - use o_ARRAYS, only: heat_flux, water_flux, Unode, density_m_rho0,density_ref + use o_ARRAYS, only: heat_flux, water_flux, density_m_rho0, density_ref use i_ARRAYS, only: net_heat_flux, fresh_wa_flux implicit none !___________________________________________________________________________ type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh type(t_tracer), intent(in), target :: tracers + type(t_dyn), intent(in), target :: dynamics real (kind=WP) :: temp,sal,tin,zice real (kind=WP) :: rhow, rhor, rho real (kind=WP) :: gats1, gats2, gas, gat @@ -187,11 +189,12 @@ subroutine cavity_heat_water_fluxes_3eq(tracers, partit, mesh) ! hemw= 4.02*14. ! oomw= -30. ! oofw= -2.5 - + real(kind=WP), dimension(:,:,:), pointer :: UVnode #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UVnode=>dynamics%uvnode(:,:,:) !___________________________________________________________________________ do node=1,myDim_nod2D !+eDim_nod2D @@ -216,7 +219,7 @@ subroutine cavity_heat_water_fluxes_3eq(tracers, partit, mesh) ! if(vt1.eq.0.) vt1=0.001 !rt re = Hz_r(i,j,N)*ds/un !Reynolds number - vt1 = sqrt(Unode(1,nzmin,node)*Unode(1,nzmin,node)+Unode(2,nzmin,node)*Unode(2,nzmin,node)) + vt1 = sqrt(UVnode(1,nzmin,node)*UVnode(1,nzmin,node)+UVnode(2,nzmin,node)*UVnode(2,nzmin,node)) vt1 = max(vt1,0.001_WP) !vt1 = max(vt1,0.005) ! CW re = 10._WP/un !vt1*re (=velocity times length scale over kinematic viscosity) is the Reynolds number @@ -388,7 +391,7 @@ subroutine cavity_momentum_fluxes(dynamics, partit, mesh) USE MOD_PARSUP USE MOD_DYN use o_PARAM , only: density_0, C_d, WP - use o_ARRAYS, only: Unode, stress_surf, stress_node_surf + use o_ARRAYS, only: stress_surf, stress_node_surf use i_ARRAYS, only: u_w, v_w implicit none @@ -398,13 +401,14 @@ subroutine cavity_momentum_fluxes(dynamics, partit, mesh) type(t_mesh) , intent(in) , target :: mesh integer :: elem, elnodes(3), nzmin, node real(kind=WP) :: aux - real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:,:), pointer :: UV, UVnode #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV=>dynamics%uv(:,:,:) + UVnode=>dynamics%uvnode(:,:,:) !___________________________________________________________________________ do elem=1,myDim_elem2D @@ -428,9 +432,9 @@ subroutine cavity_momentum_fluxes(dynamics, partit, mesh) ! momentum stress: ! need to check the sensitivity to the drag coefficient ! here I use the bottom stress coefficient, which is 3e-3, for this FO2 work. - aux=sqrt(Unode(1,nzmin,node)**2+Unode(2,nzmin,node)**2)*density_0*C_d - stress_node_surf(1,node)=-aux*Unode(1,nzmin,node) - stress_node_surf(2,node)=-aux*Unode(2,nzmin,node) + aux=sqrt(UVnode(1,nzmin,node)**2+UVnode(2,nzmin,node)**2)*density_0*C_d + stress_node_surf(1,node)=-aux*UVnode(1,nzmin,node) + stress_node_surf(2,node)=-aux*UVnode(2,nzmin,node) end do end subroutine cavity_momentum_fluxes ! diff --git a/src/gen_modules_cvmix_kpp.F90 b/src/gen_modules_cvmix_kpp.F90 index 81c35cfdd..33c587016 100644 --- a/src/gen_modules_cvmix_kpp.F90 +++ b/src/gen_modules_cvmix_kpp.F90 @@ -26,6 +26,7 @@ module g_cvmix_kpp USE MOD_PARTIT USE MOD_PARSUP use mod_tracer + use MOD_DYN use o_arrays use g_comm_auto use i_arrays @@ -347,10 +348,11 @@ end subroutine init_cvmix_kpp ! !=========================================================================== ! calculate PP vertrical mixing coefficients from CVMIX library - subroutine calc_cvmix_kpp(tracers, partit, mesh) + subroutine calc_cvmix_kpp(dynamics, tracers, partit, mesh) type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in), target :: tracers + type(t_dyn) , intent(in), target :: dynamics integer :: node, elem, nz, nln, nun, elnodes(3), aux_nz real(kind=WP) :: vshear2, dz2, aux, aux_wm(mesh%nl), aux_ws(mesh%nl) real(kind=WP) :: aux_coeff, sigma, stable @@ -361,12 +363,15 @@ subroutine calc_cvmix_kpp(tracers, partit, mesh) real(kind=WP) :: rhopot, bulk_0, bulk_pz, bulk_pz2 real(kind=WP) :: sfc_rhopot, sfc_bulk_0, sfc_bulk_pz, sfc_bulk_pz2 real(kind=WP), dimension(:,:), pointer :: temp, salt + real(kind=WP), dimension(:,:,:), pointer :: UVnode #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" temp=>tracers%data(1)%values(:,:) salt=>tracers%data(2)%values(:,:) + UVnode=>dynamics%uvnode(:,:,:) + !_______________________________________________________________________ kpp_Av = 0.0_WP kpp_Kv = 0.0_WP @@ -402,15 +407,15 @@ subroutine calc_cvmix_kpp(tracers, partit, mesh) !___________________________________________________________ ! calculate squared velocity shear referenced to the surface ! --> cvmix wants to have it with respect to the midlevel rather than full levels - !!PS kpp_dvsurf2(nz) = ((Unode(1,nz-1,node)+Unode(1,nz,node))*0.5 - Unode( 1,1,node) )**2 + & - !!PS ((Unode(2,nz-1,node)+Unode(2,nz,node))*0.5 - Unode( 2,1,node) )**2 - kpp_dvsurf2(nz) = ((Unode(1,nz-1,node)+Unode(1,nz,node))*0.5 - Unode( 1,nun,node) )**2 + & - ((Unode(2,nz-1,node)+Unode(2,nz,node))*0.5 - Unode( 2,nun,node) )**2 + !!PS kpp_dvsurf2(nz) = ((UVnode(1,nz-1,node)+UVnode(1,nz,node))*0.5 - UVnode( 1,1,node) )**2 + & + !!PS ((UVnode(2,nz-1,node)+UVnode(2,nz,node))*0.5 - UVnode( 2,1,node) )**2 + kpp_dvsurf2(nz) = ((UVnode(1,nz-1,node)+UVnode(1,nz,node))*0.5 - UVnode( 1,nun,node) )**2 + & + ((UVnode(2,nz-1,node)+UVnode(2,nz,node))*0.5 - UVnode( 2,nun,node) )**2 !___________________________________________________________ ! calculate shear Richardson number Ri = N^2/(du/dz)^2 dz2 = (Z_3d_n( nz-1,node)-Z_3d_n( nz,node))**2 - vshear2 = (Unode(1,nz-1,node)-Unode(1,nz,node))**2 + & - (Unode(2,nz-1,node)-Unode(2,nz,node))**2 + vshear2 = (UVnode(1,nz-1,node)-UVnode(1,nz,node))**2 + & + (UVnode(2,nz-1,node)-UVnode(2,nz,node))**2 vshear2 = vshear2/dz2 kpp_shearRi(nz) = max(bvfreq(nz,node),0.0_WP)/(vshear2+kpp_epsln) @@ -457,8 +462,8 @@ subroutine calc_cvmix_kpp(tracers, partit, mesh) htot = htot+delh sfc_temp = sfc_temp + temp(nztmp,node)*delh sfc_salt = sfc_salt + salt(nztmp,node)*delh - sfc_u = sfc_u + Unode(1,nztmp,node) *delh - sfc_v = sfc_v + Unode(2,nztmp,node) *delh + sfc_u = sfc_u + UVnode(1,nztmp,node) *delh + sfc_v = sfc_v + UVnode(2,nztmp,node) *delh end do sfc_temp = sfc_temp/htot sfc_salt = sfc_salt/htot @@ -468,8 +473,8 @@ subroutine calc_cvmix_kpp(tracers, partit, mesh) !___________________________________________________________ ! calculate vertical shear between present layer and surface ! averaged sfc_u and sfc_v - kpp_dvsurf2(nz) = (Unode(1,nz,node)-sfc_u)**2 + & - (Unode(2,nz,node)-sfc_v)**2 + kpp_dvsurf2(nz) = (UVnode(1,nz,node)-sfc_u)**2 + & + (UVnode(2,nz,node)-sfc_v)**2 !___________________________________________________________ ! calculate buoyancy difference between the surface averaged @@ -492,8 +497,8 @@ subroutine calc_cvmix_kpp(tracers, partit, mesh) ! calculate shear Richardson number Ri = N^2/(du/dz)^2 for ! mixing parameterisation below ocean boundary layer dz2 = (Z_3d_n( nz-1,node)-Z_3d_n( nz,node))**2 - vshear2 = (Unode(1,nz-1,node)-Unode(1,nz,node))**2 + & - (Unode(2,nz-1,node)-Unode(2,nz,node))**2 + vshear2 = (UVnode(1,nz-1,node)-UVnode(1,nz,node))**2 + & + (UVnode(2,nz-1,node)-UVnode(2,nz,node))**2 vshear2 = vshear2/dz2 kpp_shearRi(nz) = max(bvfreq(nz,node),0.0_WP)/(vshear2+kpp_epsln) end do ! --> do nz=1, nln diff --git a/src/gen_modules_cvmix_pp.F90 b/src/gen_modules_cvmix_pp.F90 index 39dfa5673..58e9f2104 100644 --- a/src/gen_modules_cvmix_pp.F90 +++ b/src/gen_modules_cvmix_pp.F90 @@ -27,6 +27,7 @@ module g_cvmix_pp use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN use o_arrays use g_comm_auto use i_arrays @@ -66,7 +67,6 @@ module g_cvmix_pp ! allocate and initialize CVMIX PP variables --> call initialisation ! routine from cvmix library subroutine init_cvmix_pp(partit, mesh) - use MOD_MESH implicit none type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit @@ -162,17 +162,21 @@ end subroutine init_cvmix_pp ! !=========================================================================== ! calculate PP vertrical mixing coefficients from CVMIX library - subroutine calc_cvmix_pp(partit, mesh) + subroutine calc_cvmix_pp(dynamics, partit, mesh) use MOD_MESH + implicit none type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit + type(t_dyn), intent(inout), target :: dynamics integer :: node, elem, nz, nln, nun, elnodes(3), windnl=2, node_size real(kind=WP) :: vshear2, dz2, Kvb + real(kind=WP), dimension(:,:,:), pointer :: UVnode #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UVnode=>dynamics%uvnode(:,:,:) node_size = myDim_nod2D !_______________________________________________________________________ do node = 1,node_size @@ -186,8 +190,8 @@ subroutine calc_cvmix_pp(partit, mesh) !!PS do nz=2,nln do nz=nun+1,nln dz2 = (Z_3d_n( nz-1,node)-Z_3d_n( nz,node))**2 - vshear2 = (Unode(1,nz-1,node)-Unode(1,nz,node))**2 +& - (Unode(2,nz-1,node)-Unode(2,nz,node))**2 + vshear2 = (UVnode(1,nz-1,node)-UVnode(1,nz,node))**2 +& + (UVnode(2,nz-1,node)-UVnode(2,nz,node))**2 vshear2 = vshear2/dz2 ! WIKIPEDIA: The Richardson number is always ! considered positive. A negative value of N² (i.e. complex N) diff --git a/src/gen_modules_cvmix_tke.F90 b/src/gen_modules_cvmix_tke.F90 index c286cf5f4..aa1deae21 100644 --- a/src/gen_modules_cvmix_tke.F90 +++ b/src/gen_modules_cvmix_tke.F90 @@ -28,6 +28,7 @@ module g_cvmix_tke use mod_mesh USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN use o_arrays use g_comm_auto implicit none @@ -249,20 +250,23 @@ end subroutine init_cvmix_tke ! !=========================================================================== ! calculate TKE vertical mixing coefficients from CVMIX library - subroutine calc_cvmix_tke(partit, mesh) + subroutine calc_cvmix_tke(dynamics, partit, mesh) implicit none type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit + type(t_dyn), intent(inout), target :: dynamics integer :: node, elem, nelem, nz, nln, nun, elnodes(3), node_size real(kind=WP) :: tvol real(kind=WP) :: dz_trr(mesh%nl), bvfreq2(mesh%nl), vshear2(mesh%nl) real(kind=WP) :: tke_Av_old(mesh%nl), tke_Kv_old(mesh%nl), tke_old(mesh%nl) - + real(kind=WP), dimension(:,:,:), pointer :: UVnode + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - + UVnode=>dynamics%uvnode(:,:,:) + node_size = myDim_nod2D !_______________________________________________________________________ ! calculate all neccessary forcing for TKE @@ -297,8 +301,8 @@ subroutine calc_cvmix_tke(partit, mesh) ! calculate for TKE 3D vertical velocity shear vshear2=0.0_WP do nz=nun+1,nln - vshear2(nz)=(( Unode(1, nz-1, node) - Unode(1, nz, node))**2 + & - ( Unode(2, nz-1, node) - Unode(2, nz, node))**2)/ & + vshear2(nz)=(( UVnode(1, nz-1, node) - UVnode(1, nz, node))**2 + & + ( UVnode(2, nz-1, node) - UVnode(2, nz, node))**2)/ & ((Z_3d_n(nz-1,node)-Z_3d_n(nz,node))**2) end do @@ -399,4 +403,4 @@ subroutine calc_cvmix_tke(partit, mesh) end do end do end subroutine calc_cvmix_tke -end module g_cvmix_tke +end module g_cvmix_tke \ No newline at end of file diff --git a/src/gen_modules_diag.F90 b/src/gen_modules_diag.F90 index 0870a7052..5015da263 100755 --- a/src/gen_modules_diag.F90 +++ b/src/gen_modules_diag.F90 @@ -243,14 +243,15 @@ subroutine diag_energy(mode, dynamics, partit, mesh) integer :: iup, ilo real(kind=WP) :: ux, vx, uy, vy, tvol, rval(2) real(kind=WP) :: geo_grad_x(3), geo_grad_y(3), geo_u(3), geo_v(3) - real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:,:), pointer :: UV, UVnode real(kind=WP), dimension(:,:), pointer :: Wvel #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) - Wvel => dynamics%w(:,:) + UV => dynamics%uv(:,:,:) + UVnode=> dynamics%uvnode(:,:,:) + Wvel => dynamics%w(:,:) !===================== if (firstcall) then !allocate the stuff at the first call @@ -291,9 +292,9 @@ subroutine diag_energy(mode, dynamics, partit, mesh) if (mode==0) return end if - u_x_u=Unode(1,1:nl-1,1:myDim_nod2D)*Unode(1,1:nl-1,1:myDim_nod2D) - u_x_v=Unode(1,1:nl-1,1:myDim_nod2D)*Unode(2,1:nl-1,1:myDim_nod2D) - v_x_v=Unode(2,1:nl-1,1:myDim_nod2D)*Unode(2,1:nl-1,1:myDim_nod2D) + u_x_u=UVnode(1,1:nl-1,1:myDim_nod2D)*UVnode(1,1:nl-1,1:myDim_nod2D) + u_x_v=UVnode(1,1:nl-1,1:myDim_nod2D)*UVnode(2,1:nl-1,1:myDim_nod2D) + v_x_v=UVnode(2,1:nl-1,1:myDim_nod2D)*UVnode(2,1:nl-1,1:myDim_nod2D) ! this loop might be very expensive DO n=1, myDim_elem2D nzmax = nlevels(n) @@ -396,10 +397,10 @@ subroutine diag_energy(mode, dynamics, partit, mesh) if (nlevels(elem)-1 < nz) cycle elnodes=elem2D_nodes(:, elem) tvol=tvol+elem_area(elem) - ux=ux+sum(gradient_sca(1:3,elem)*Unode(1,nz,elnodes))*elem_area(elem) !accumulate tensor of velocity derivatives - vx=vx+sum(gradient_sca(1:3,elem)*Unode(2,nz,elnodes))*elem_area(elem) - uy=uy+sum(gradient_sca(4:6,elem)*Unode(1,nz,elnodes))*elem_area(elem) - vy=vy+sum(gradient_sca(4:6,elem)*Unode(2,nz,elnodes))*elem_area(elem) + ux=ux+sum(gradient_sca(1:3,elem)*UVnode(1,nz,elnodes))*elem_area(elem) !accumulate tensor of velocity derivatives + vx=vx+sum(gradient_sca(1:3,elem)*UVnode(2,nz,elnodes))*elem_area(elem) + uy=uy+sum(gradient_sca(4:6,elem)*UVnode(1,nz,elnodes))*elem_area(elem) + vy=vy+sum(gradient_sca(4:6,elem)*UVnode(2,nz,elnodes))*elem_area(elem) END DO dudx(nz,n)=ux/tvol!/area(nz, n)/3. dvdx(nz,n)=vx/tvol diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 5c6e0636f..4cf2f09cd 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -428,8 +428,8 @@ subroutine ini_mean_io(dynamics, tracers, partit, mesh) call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'av_dvdz', 'int(Av * dv/dz)', 'm3/s2', av_dvdz(:,:), 1, 'm', i_real4, partit, mesh) call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'av_dudz_sq', 'Av * (du/dz)^2', 'm^2/s^3', av_dudz_sq(:,:), 1, 'm', i_real4, partit, mesh) call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'Av', 'Vertical mixing A', 'm2/s', Av(:,:), 1, 'm', i_real4, partit, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'unod', 'horizontal velocity at nodes', 'm/s', Unode(1,:,:), 1, 'm', i_real8, partit, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'vnod', 'meridional velocity at nodes', 'm/s', Unode(2,:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'unod', 'horizontal velocity at nodes', 'm/s', dynamics%uvnode(1,:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'vnod', 'meridional velocity at nodes', 'm/s', dynamics%uvnode(2,:,:), 1, 'm', i_real8, partit, mesh) call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'um', 'horizontal velocity', 'm/s', dynamics%uv(1,:,:), 1, 'm', i_real4, partit, mesh) call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'vm', 'meridional velocity', 'm/s', dynamics%uv(2,:,:), 1, 'm', i_real4, partit, mesh) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 025827b07..d30938e10 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2808,7 +2808,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) ! use FESOM2.0 tuned k-profile parameterization for vertical mixing if (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call oce_mixing_KPP'//achar(27)//'[0m' - call oce_mixing_KPP(Av, Kv_double, tracers, partit, mesh) + call oce_mixing_KPP(Av, Kv_double, dynamics, tracers, partit, mesh) Kv=Kv_double(:,:,1) call mo_convect(partit, mesh) @@ -2816,13 +2816,13 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) ! mixing else if(mix_scheme_nmb==2 .or. mix_scheme_nmb==27) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call oce_mixing_PP'//achar(27)//'[0m' - call oce_mixing_PP(partit, mesh) + call oce_mixing_PP(dynamics, partit, mesh) call mo_convect(partit, mesh) ! use CVMIX KPP (Large at al. 1994) else if(mix_scheme_nmb==3 .or. mix_scheme_nmb==37) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call calc_cvmix_kpp'//achar(27)//'[0m' - call calc_cvmix_kpp(tracers, partit, mesh) + call calc_cvmix_kpp(dynamics, tracers, partit, mesh) call mo_convect(partit, mesh) ! use CVMIX PP (Pacanowski and Philander 1981) parameterisation for mixing @@ -2830,7 +2830,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) ! N^2 and vertical horizontal velocity shear dui/dz else if(mix_scheme_nmb==4 .or. mix_scheme_nmb==47) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call calc_cvmix_pp'//achar(27)//'[0m' - call calc_cvmix_pp(partit, mesh) + call calc_cvmix_pp(dynamics, partit, mesh) call mo_convect(partit, mesh) ! use CVMIX TKE (turbulent kinetic energy closure) parameterisation for @@ -2839,7 +2839,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) ! Model for the diapycnal diffusivity induced by internal gravity waves" else if(mix_scheme_nmb==5 .or. mix_scheme_nmb==56) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call calc_cvmix_tke'//achar(27)//'[0m' - call calc_cvmix_tke(partit, mesh) + call calc_cvmix_tke(dynamics, partit, mesh) call mo_convect(partit, mesh) end if diff --git a/src/oce_ale_mixing_kpp.F90 b/src/oce_ale_mixing_kpp.F90 index 5c62871e1..cc2a54890 100755 --- a/src/oce_ale_mixing_kpp.F90 +++ b/src/oce_ale_mixing_kpp.F90 @@ -11,6 +11,7 @@ MODULE o_mixing_KPP_mod USE MOD_PARTIT USE MOD_PARSUP USE MOD_TRACER + USE MOD_DYN USE o_ARRAYS USE g_config USE i_arrays @@ -242,7 +243,7 @@ end subroutine oce_mixing_kpp_init ! diffK = diffusion coefficient (m^2/s) ! !--------------------------------------------------------------- - subroutine oce_mixing_KPP(viscAE, diffK, tracers, partit, mesh) + subroutine oce_mixing_KPP(viscAE, diffK, dynamics, tracers, partit, mesh) IMPLICIT NONE @@ -253,6 +254,7 @@ subroutine oce_mixing_KPP(viscAE, diffK, tracers, partit, mesh) type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in), target :: tracers + type(t_dyn) , intent(in), target :: dynamics integer :: node, kn, elem, elnodes(3) integer :: nz, ns, j, q, lay, lay_mi, nzmin, nzmax real(KIND=WP) :: smftu, smftv, aux, vol @@ -265,11 +267,12 @@ subroutine oce_mixing_KPP(viscAE, diffK, tracers, partit, mesh) real(KIND=WP), dimension(mesh%nl, partit%myDim_elem2D+partit%eDim_elem2D), intent(inout) :: viscAE!for momentum (elements) real(KIND=WP), dimension(mesh%nl, partit%myDim_nod2D +partit%eDim_nod2D) :: viscA !for momentum (nodes) real(KIND=WP), dimension(mesh%nl, partit%myDim_nod2D +partit%eDim_nod2D, tracers%num_tracers), intent(inout) :: diffK !for T and S - + real(kind=WP), dimension(:,:,:), pointer :: UVnode #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UVnode=>dynamics%uvnode(:,:,:) ViscA=0.0_WP DO node=1, myDim_nod2D !+eDim_nod2D @@ -299,15 +302,15 @@ subroutine oce_mixing_KPP(viscAE, diffK, tracers, partit, mesh) dbsfc(nzmin,node) = 0.0_WP ! Surface velocity - usurf = Unode(1,nzmin,node) - vsurf = Unode(2,nzmin,node) + usurf = UVnode(1,nzmin,node) + vsurf = UVnode(2,nzmin,node) !!PS DO nz=2, nlevels_nod2d(node)-1 DO nz=nzmin+1, nzmax-1 ! Squared velocity shear referenced to surface (@ Z) - u_loc = 0.5_WP * ( Unode(1,nz-1,node) + Unode(1,nz,node) ) - v_loc = 0.5_WP * ( Unode(2,nz-1,node) + Unode(2,nz,node) ) + u_loc = 0.5_WP * ( UVnode(1,nz-1,node) + UVnode(1,nz,node) ) + v_loc = 0.5_WP * ( UVnode(2,nz-1,node) + UVnode(2,nz,node) ) dVsq(nz,node) = ( usurf - u_loc )**2 + ( vsurf - v_loc )**2 @@ -347,7 +350,7 @@ subroutine oce_mixing_KPP(viscAE, diffK, tracers, partit, mesh) ! compute interior mixing coefficients everywhere, due to constant ! internal wave activity, static instability, and local shear ! instability. - CALL ri_iwmix(viscA, diffK, tracers, partit, mesh) + CALL ri_iwmix(viscA, diffK, dynamics, tracers, partit, mesh) ! add double diffusion IF (double_diffusion) then CALL ddmix(diffK, tracers, partit, mesh) @@ -729,11 +732,12 @@ END SUBROUTINE wscale ! visc = viscosity coefficient (m**2/s) ! diff = diffusion coefficient (m**2/s) ! - subroutine ri_iwmix(viscA, diffK, tracers, partit, mesh) + subroutine ri_iwmix(viscA, diffK, dynamics, tracers, partit, mesh) IMPLICIT NONE type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in), target :: tracers + type(t_dyn), intent(in), target :: dynamics integer :: node, nz, mr, nzmin, nzmax real(KIND=WP) , parameter :: Riinfty = 0.8_WP ! local Richardson Number limit for shear instability (LMD 1994 uses 0.7) real(KIND=WP) :: ri_prev, tmp @@ -746,11 +750,12 @@ subroutine ri_iwmix(viscA, diffK, tracers, partit, mesh) ! Put them under the namelist.oce logical :: smooth_richardson_number = .false. integer :: num_smoothings = 1 ! for vertical smoothing of Richardson number - + real(kind=WP), dimension(:,:,:), pointer :: UVnode #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + UVnode=>dynamics%uvnode(:,:,:) ! Compute Richardson number and store it as diffK to save memory DO node=1, myDim_nod2D! +eDim_nod2D @@ -759,8 +764,8 @@ subroutine ri_iwmix(viscA, diffK, tracers, partit, mesh) !!PS DO nz=2,nlevels_nod2d(node)-1 DO nz=nzmin+1,nzmax-1 dz_inv = 1.0_WP / (Z_3d_n(nz-1,node)-Z_3d_n(nz,node)) ! > 0 - shear = ( Unode(1, nz-1, node) - Unode(1, nz, node) )**2 + & - ( Unode(2, nz-1, node) - Unode(2, nz, node) )**2 + shear = ( UVnode(1, nz-1, node) - UVnode(1, nz, node) )**2 + & + ( UVnode(2, nz-1, node) - UVnode(2, nz, node) )**2 shear = shear * dz_inv * dz_inv diffK(nz,node,1) = MAX( bvfreq(nz,node), 0.0_WP ) / (shear + epsln) ! To avoid NaNs at start END DO ! minimum Richardson number is 0 diff --git a/src/oce_ale_mixing_pp.F90 b/src/oce_ale_mixing_pp.F90 index b4c7958d2..36cf7d519 100644 --- a/src/oce_ale_mixing_pp.F90 +++ b/src/oce_ale_mixing_pp.F90 @@ -1,5 +1,5 @@ !======================================================================= -subroutine oce_mixing_pp(partit, mesh) +subroutine oce_mixing_pp(dynamics, partit, mesh) ! Compute Richardson number dependent Av and Kv following ! Pacanowski and Philander, 1981 ! Av = Avmax * factor**2 + Av0, @@ -18,6 +18,7 @@ subroutine oce_mixing_pp(partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP +USE MOD_DYN USE o_PARAM USE o_ARRAYS USE g_config @@ -26,13 +27,17 @@ subroutine oce_mixing_pp(partit, mesh) type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit +type(t_dyn), intent(inout), target :: dynamics real(kind=WP) :: dz_inv, bv, shear, a, rho_up, rho_dn, t, s, Kv0_b integer :: node, nz, nzmax, nzmin, elem, elnodes(3), i - +real(kind=WP), dimension(:,:,:), pointer :: UVnode #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" +UVnode=>dynamics%uvnode(:,:,:) + + !___________________________________________________________________________ do node=1, myDim_nod2D+eDim_nod2D nzmin = ulevels_nod2d(node) @@ -43,8 +48,8 @@ subroutine oce_mixing_pp(partit, mesh) !!PS do nz=2,nlevels_nod2d(node)-1 do nz=nzmin+1,nzmax-1 dz_inv=1.0_WP/(Z_3d_n(nz-1,node)-Z_3d_n(nz,node)) - shear = (Unode(1,nz-1,node)-Unode(1,nz,node))**2 +& - (Unode(2,nz-1,node)-Unode(2,nz,node))**2 + shear = (UVnode(1,nz-1,node)-UVnode(1,nz,node))**2 +& + (UVnode(2,nz-1,node)-UVnode(2,nz,node))**2 shear = shear*dz_inv*dz_inv Kv(nz,node) = shear/(shear+5._WP*max(bvfreq(nz,node),0.0_WP)+1.0e-14) ! To avoid NaNs at start end do diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index be2a1de72..0bff6401b 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -207,7 +207,6 @@ subroutine compute_vel_nodes(dynamics, partit, mesh) USE MOD_PARSUP USE MOD_DYN USE o_PARAM - USE o_ARRAYS, only: Unode use g_comm_auto IMPLICIT NONE integer :: n, nz, k, elem, nln, uln, nle, ule @@ -216,12 +215,13 @@ subroutine compute_vel_nodes(dynamics, partit, mesh) type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh - real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:,:), pointer :: UV, UVnode #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV=>dynamics%uv(:,:,:) + UVnode=>dynamics%uvnode(:,:,:) DO n=1, myDim_nod2D uln = ulevels_nod2D(n) @@ -241,11 +241,11 @@ subroutine compute_vel_nodes(dynamics, partit, mesh) tx=tx+UV(1,nz,elem)*elem_area(elem) ty=ty+UV(2,nz,elem)*elem_area(elem) END DO - Unode(1,nz,n)=tx/tvol - Unode(2,nz,n)=ty/tvol + UVnode(1,nz,n)=tx/tvol + UVnode(2,nz,n)=ty/tvol END DO END DO - call exchange_nod(Unode, partit) + call exchange_nod(UVnode, partit) end subroutine compute_vel_nodes !=========================================================================== subroutine viscosity_filter(option, dynamics, partit, mesh) diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index 52abf3e95..9045b44e3 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -220,7 +220,7 @@ MODULE o_ARRAYS real(kind=WP), allocatable,dimension(:,:,:) :: Kv_double real(kind=WP), allocatable,dimension(:) :: Kv0 !Velocities interpolated to nodes -real(kind=WP), allocatable,dimension(:,:,:) :: Unode +!!PS real(kind=WP), allocatable,dimension(:,:,:) :: Unode ! Auxiliary arrays to store Redi-GM fields real(kind=WP), allocatable,dimension(:,:,:) :: neutral_slope diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 26311d642..4c6951f9a 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -562,7 +562,7 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) end if !Velocities at nodes -allocate(Unode(2,nl-1,node_size)) +!!PS allocate(Unode(2,nl-1,node_size)) ! tracer gradients & RHS allocate(ttrhs(nl-1,node_size)) diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index 369e3aec1..d4ad1dbba 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -41,7 +41,7 @@ subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) use MOD_TRACER use MOD_DYN use o_PARAM - use o_ARRAYS, only: eta_n, d_eta, water_flux, heat_flux, Unode, CFL_z, & + use o_ARRAYS, only: eta_n, d_eta, water_flux, heat_flux, CFL_z, & pgf_x, pgf_y, Av, Kv use i_ARRAYS use g_comm_auto @@ -62,13 +62,14 @@ subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in) , target :: tracers type(t_dyn) , intent(in) , target :: dynamics - real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:,:), pointer :: UV, UVnode real(kind=WP), dimension(:,:), pointer :: Wvel #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV => dynamics%uv(:,:,:) + UVnode => dynamics%uvnode(:,:,:) Wvel => dynamics%w(:,:) if (mod(istep,outfreq)==0) then @@ -147,13 +148,13 @@ subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) call MPI_AllREDUCE(loc , min_wvel , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) loc = minval(Wvel(2,1:myDim_nod2D)) call MPI_AllREDUCE(loc , min_wvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(Unode(1,1,1:myDim_nod2D)) + loc = minval(UVnode(1,1,1:myDim_nod2D)) call MPI_AllREDUCE(loc , min_uvel , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(Unode(1,2,1:myDim_nod2D)) + loc = minval(UVnode(1,2,1:myDim_nod2D)) call MPI_AllREDUCE(loc , min_uvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(Unode(2,1,1:myDim_nod2D)) + loc = minval(UVnode(2,1,1:myDim_nod2D)) call MPI_AllREDUCE(loc , min_vvel , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(Unode(2,2,1:myDim_nod2D)) + loc = minval(UVnode(2,2,1:myDim_nod2D)) call MPI_AllREDUCE(loc , min_vvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) loc = minval(d_eta(1:myDim_nod2D)) call MPI_AllREDUCE(loc , min_deta , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) @@ -179,13 +180,13 @@ subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) call MPI_AllREDUCE(loc , max_wvel , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) loc = maxval(Wvel(2,1:myDim_nod2D)) call MPI_AllREDUCE(loc , max_wvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(Unode(1,1,1:myDim_nod2D)) + loc = maxval(UVnode(1,1,1:myDim_nod2D)) call MPI_AllREDUCE(loc , max_uvel , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(Unode(1,2,1:myDim_nod2D)) + loc = maxval(UVnode(1,2,1:myDim_nod2D)) call MPI_AllREDUCE(loc , max_uvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(Unode(2,1,1:myDim_nod2D)) + loc = maxval(UVnode(2,1,1:myDim_nod2D)) call MPI_AllREDUCE(loc , max_vvel , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(Unode(2,2,1:myDim_nod2D)) + loc = maxval(UVnode(2,2,1:myDim_nod2D)) call MPI_AllREDUCE(loc , max_vvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) loc = maxval(d_eta(1:myDim_nod2D)) call MPI_AllREDUCE(loc , max_deta , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) From 75a8c82d69112a76609300792b50b1c296588bf8 Mon Sep 17 00:00:00 2001 From: a270042 Date: Wed, 3 Nov 2021 00:35:19 +0100 Subject: [PATCH 487/909] exchange CFL_z with dynamics derived type --- src/io_blowup.F90 | 2 +- src/oce_ale.F90 | 5 +++-- src/oce_modules.F90 | 1 - src/oce_setup_step.F90 | 2 -- src/write_step_info.F90 | 10 ++++++---- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/io_blowup.F90 b/src/io_blowup.F90 index 39eda3107..d5cccd6e2 100644 --- a/src/io_blowup.F90 +++ b/src/io_blowup.F90 @@ -146,7 +146,7 @@ subroutine ini_blowup_io(year, dynamics, tracers, partit, mesh) call def_variable(bid, 'w' , (/nl, nod2D/) , 'vertical velocity', 'm/s', dynamics%w); call def_variable(bid, 'w_expl' , (/nl, nod2D/) , 'vertical velocity', 'm/s', dynamics%w_e); call def_variable(bid, 'w_impl' , (/nl, nod2D/) , 'vertical velocity', 'm/s', dynamics%w_i); - call def_variable(bid, 'cfl_z' , (/nl-1, nod2D/) , 'vertical CFL criteria', '', CFL_z); + call def_variable(bid, 'cfl_z' , (/nl-1, nod2D/) , 'vertical CFL criteria', '', dynamics%cfl_z); !_____________________________________________________________________________ ! write snapshot ice variables to blowup file diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index d30938e10..e577e4d4a 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -1844,7 +1844,7 @@ end subroutine compute_hbar_ale subroutine vert_vel_ale(dynamics, partit, mesh) use g_config,only: dt, which_ALE, min_hnode, lzstar_lev, flag_warn_cflz use MOD_MESH - use o_ARRAYS, only: fer_Wvel, fer_UV, CFL_z, water_flux, ssh_rhs, & + use o_ARRAYS, only: fer_Wvel, fer_UV, water_flux, ssh_rhs, & ssh_rhs_old, eta_n, d_eta use o_PARAM USE MOD_PARTIT @@ -1868,7 +1868,7 @@ subroutine vert_vel_ale(dynamics, partit, mesh) type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit real(kind=WP), dimension(:,:,:), pointer :: UV - real(kind=WP), dimension(:,:) , pointer :: Wvel, Wvel_e, Wvel_i + real(kind=WP), dimension(:,:) , pointer :: Wvel, Wvel_e, Wvel_i, CFL_z #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -1877,6 +1877,7 @@ subroutine vert_vel_ale(dynamics, partit, mesh) Wvel =>dynamics%w(:,:) Wvel_e=>dynamics%w_e(:,:) Wvel_i=>dynamics%w_i(:,:) + CFL_z =>dynamics%cfl_z(:,:) !___________________________________________________________________________ ! Contributions from levels in divergence diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index 9045b44e3..992dcd78f 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -188,7 +188,6 @@ MODULE o_ARRAYS real(kind=WP), allocatable :: UV_dis_posdef_b2(:,:), UV_dis_posdef(:,:), UV_back_posdef(:,:) real(kind=WP), allocatable :: eta_n(:), d_eta(:) real(kind=WP), allocatable :: ssh_rhs(:), hpressure(:,:) -real(kind=WP), allocatable :: CFL_z(:,:) real(kind=WP), allocatable :: stress_surf(:,:) real(kind=WP), allocatable :: stress_node_surf(:,:) REAL(kind=WP), ALLOCATABLE :: stress_atmoce_x(:) diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 4c6951f9a..f122880c4 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -489,7 +489,6 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) ! Vertical velocity and pressure ! ================ allocate( hpressure(nl,node_size)) -allocate(CFL_z(nl, node_size)) ! vertical CFL criteria allocate(bvfreq(nl,node_size),mixlay_dep(node_size),bv_ref(node_size)) ! ================ ! Ocean forcing arrays @@ -622,7 +621,6 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) eta_n=0.0_WP d_eta=0.0_WP ssh_rhs=0.0_WP - CFL_z =0.0_WP hpressure=0.0_WP ! heat_flux=0.0_WP diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index d4ad1dbba..a989d61da 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -41,7 +41,7 @@ subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) use MOD_TRACER use MOD_DYN use o_PARAM - use o_ARRAYS, only: eta_n, d_eta, water_flux, heat_flux, CFL_z, & + use o_ARRAYS, only: eta_n, d_eta, water_flux, heat_flux, & pgf_x, pgf_y, Av, Kv use i_ARRAYS use g_comm_auto @@ -63,7 +63,7 @@ subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) type(t_tracer), intent(in) , target :: tracers type(t_dyn) , intent(in) , target :: dynamics real(kind=WP), dimension(:,:,:), pointer :: UV, UVnode - real(kind=WP), dimension(:,:), pointer :: Wvel + real(kind=WP), dimension(:,:), pointer :: Wvel, CFL_z #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -71,6 +71,7 @@ subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) UV => dynamics%uv(:,:,:) UVnode => dynamics%uvnode(:,:,:) Wvel => dynamics%w(:,:) + CFL_z => dynamics%cfl_z(:,:) if (mod(istep,outfreq)==0) then @@ -264,7 +265,7 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) use MOD_DYN use o_PARAM use o_ARRAYS, only: eta_n, d_eta, ssh_rhs, ssh_rhs_old, water_flux, stress_surf, & - CFL_z, heat_flux, Kv, Av + heat_flux, Kv, Av use i_ARRAYS use g_comm_auto use io_BLOWUP @@ -280,13 +281,14 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) type(t_tracer), intent(in) , target :: tracers type(t_dyn) , intent(in) , target :: dynamics real(kind=WP), dimension(:,:,:), pointer :: UV - real(kind=WP), dimension(:,:), pointer :: Wvel + real(kind=WP), dimension(:,:), pointer :: Wvel, CFL_z #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV => dynamics%uv(:,:,:) Wvel => dynamics%w(:,:) + CFL_z => dynamics%cfl_z(:,:) !___________________________________________________________________________ ! ! if (mod(istep,logfile_outfreq)==0) then From f576c60a69345bb05ce5490ac8ba0a6a9234555d Mon Sep 17 00:00:00 2001 From: a270042 Date: Wed, 3 Nov 2021 14:49:17 +0100 Subject: [PATCH 488/909] exchange ssh_rhs and ssh_rhs_old with dynamics derived type --- src/fvom_main.F90 | 5 +++ src/io_blowup.F90 | 4 +- src/io_meandata.F90 | 4 +- src/oce_ale.F90 | 83 ++++++++++++++++++++++++++++++++--------- src/oce_modules.F90 | 2 +- src/oce_setup_step.F90 | 25 +++++++++---- src/write_step_info.F90 | 8 +++- 7 files changed, 100 insertions(+), 31 deletions(-) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index c23cb5f76..1dc4a6abb 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -112,6 +112,7 @@ program main call setup_model(partit) ! Read Namelists, always before clock_init call clock_init(partit) ! read the clock file call get_run_steps(nsteps, partit) + if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call mesh_setup'//achar(27)//'[0m' call mesh_setup(partit, mesh) if (mype==0) write(*,*) 'FESOM mesh_setup... complete' @@ -121,12 +122,16 @@ program main ! and additional arrays needed for ! fancy advection etc. !===================== + if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call check_mesh_consistency'//achar(27)//'[0m' call check_mesh_consistency(partit, mesh) if (mype==0) t2=MPI_Wtime() + if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call xxxx_init'//achar(27)//'[0m' call dynamics_init(dynamics, partit, mesh) call tracer_init(tracers, partit, mesh) ! allocate array of ocean tracers (derived type "t_tracer") call arrays_init(tracers%num_tracers, partit, mesh) ! allocate other arrays (to be refactured same as tracers in the future) + + if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call ocean_setup'//achar(27)//'[0m' call ocean_setup(dynamics, tracers, partit, mesh) if (mype==0) then diff --git a/src/io_blowup.F90 b/src/io_blowup.F90 index d5cccd6e2..4b594499b 100644 --- a/src/io_blowup.F90 +++ b/src/io_blowup.F90 @@ -105,8 +105,8 @@ subroutine ini_blowup_io(year, dynamics, tracers, partit, mesh) !___ALE related fields______________________________________________________ call def_variable(bid, 'hbar' , (/nod2D/) , 'ALE surface elevation hbar_n+0.5', 'm', hbar); !!PS call def_variable(bid, 'hbar_old' , (/nod2D/) , 'ALE surface elevation hbar_n-0.5', 'm', hbar_old); - call def_variable(bid, 'ssh_rhs' , (/nod2D/) , 'RHS for the elevation', '?', ssh_rhs); - call def_variable(bid, 'ssh_rhs_old', (/nod2D/) , 'RHS for the elevation', '?', ssh_rhs_old); + call def_variable(bid, 'ssh_rhs' , (/nod2D/) , 'RHS for the elevation', '?', dynamics%ssh_rhs); + call def_variable(bid, 'ssh_rhs_old', (/nod2D/) , 'RHS for the elevation', '?', dynamics%ssh_rhs_old); !___Define the netCDF variables for 3D fields_______________________________ call def_variable(bid, 'hnode' , (/nl-1, nod2D/) , 'ALE stuff', '?', hnode); call def_variable(bid, 'helem' , (/nl-1, elem2D/) , 'Element layer thickness', 'm/s', helem(:,:)); diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 4cf2f09cd..5b499e624 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -160,9 +160,9 @@ subroutine ini_mean_io(dynamics, tracers, partit, mesh) call def_stream(nod2D, myDim_nod2D, 'vve_5', 'vertical velocity at 5th level', 'm/s', dynamics%w(5,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('ssh_rhs ') - call def_stream(nod2D, myDim_nod2D, 'ssh_rhs', 'ssh rhs', '?', ssh_rhs, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'ssh_rhs', 'ssh rhs', '?', dynamics%ssh_rhs, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('ssh_rhs_old ') - call def_stream(nod2D, myDim_nod2D, 'ssh_rhs_old', 'ssh rhs', '?', ssh_rhs_old, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'ssh_rhs_old', 'ssh rhs', '?', dynamics%ssh_rhs_old, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) !___________________________________________________________________________________________________________________________________ ! output sea ice diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index e577e4d4a..bdd1d3b34 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -60,12 +60,14 @@ subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) type(t_dyn), intent(inout), target :: dynamics end subroutine - subroutine solve_ssh_ale(partit, mesh) + subroutine solve_ssh_ale(dynamics, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit + type(t_dyn), intent(inout), target :: dynamics end subroutine subroutine compute_hbar_ale(dynamics, partit, mesh) @@ -98,6 +100,34 @@ subroutine update_thickness_ale(partit, mesh) end interface end module +module init_ale_interface + interface + subroutine init_ale(dynamics, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_DYN + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_dyn) , intent(inout), target :: dynamics + end subroutine + end interface +end module + +module init_thickness_ale_interface + interface + subroutine init_thickness_ale(dynamics, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_DYN + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_dyn) , intent(inout), target :: dynamics + end subroutine + end interface +end module + module oce_timestep_ale_interface interface subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) @@ -136,11 +166,12 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) ! !=============================================================================== ! allocate & initialise arrays for Arbitrary-Langrangian-Eularian (ALE) method -subroutine init_ale(partit, mesh) +subroutine init_ale(dynamics, partit, mesh) USE o_PARAM USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN USE o_ARRAYS USE g_config, only: which_ale, use_cavity, use_partial_cell USE g_forcing_param, only: use_virt_salt @@ -150,6 +181,7 @@ subroutine init_ale(partit, mesh) integer :: n, nzmax, nzmin, elnodes(3), elem type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit + type(t_dyn) , intent(inout), target :: dynamics #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -160,7 +192,8 @@ subroutine init_ale(partit, mesh) allocate(mesh%hnode_new(1:nl-1, myDim_nod2D+eDim_nod2D)) ! ssh_rhs_old: auxiliary array to store an intermediate part of the rhs computations. - allocate(ssh_rhs_old(myDim_nod2D+eDim_nod2D)) + allocate(dynamics%ssh_rhs_old(myDim_nod2D+eDim_nod2D)) + dynamics%ssh_rhs_old = 0.0_WP ! hbar, hbar_old: correspond to the elevation, but on semi-integer time steps. allocate(mesh%hbar(myDim_nod2D+eDim_nod2D)) @@ -647,7 +680,7 @@ end subroutine init_surface_node_depth ! !=============================================================================== ! initialize thickness arrays based on the current hbar -subroutine init_thickness_ale(partit, mesh) +subroutine init_thickness_ale(dynamics, partit, mesh) ! For z-star case: we stretch scalar thicknesses (nodal) ! through nlevels_nod2D_min -2 layers. Layer nlevels_nod2D_min-1 ! should not be touched if partial cell is implemented (it is). @@ -658,17 +691,20 @@ subroutine init_thickness_ale(partit, mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP - use o_ARRAYS + USE MOD_DYN + use o_ARRAYS, only: eta_n implicit none integer :: n, nz, elem, elnodes(3), nzmin, nzmax real(kind=WP) :: dd type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit - + type(t_dyn), intent(inout), target :: dynamics + real(kind=WP), dimension(:), pointer :: ssh_rhs_old #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + ssh_rhs_old=>dynamics%ssh_rhs_old(:) if(mype==0) then write(*,*) '____________________________________________________________' @@ -1612,7 +1648,7 @@ end subroutine update_stiff_mat_ale subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) use g_config,only: which_ALE,dt use MOD_MESH - use o_ARRAYS, only: ssh_rhs, ssh_rhs_old, water_flux + use o_ARRAYS, only: water_flux use o_PARAM USE MOD_PARTIT USE MOD_PARSUP @@ -1630,12 +1666,15 @@ subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_dyn), intent(inout), target :: dynamics real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs + real(kind=WP), dimension(:), pointer :: ssh_rhs, ssh_rhs_old #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV=>dynamics%uv(:,:,:) UV_rhs=>dynamics%uv_rhs(:,:,:) + ssh_rhs=>dynamics%ssh_rhs(:) + ssh_rhs_old=>dynamics%ssh_rhs_old(:) ssh_rhs=0.0_WP !___________________________________________________________________________ @@ -1728,7 +1767,7 @@ end subroutine compute_ssh_rhs_ale subroutine compute_hbar_ale(dynamics, partit, mesh) use g_config,only: dt, which_ALE, use_cavity use MOD_MESH - use o_ARRAYS, only: ssh_rhs, ssh_rhs_old, water_flux + use o_ARRAYS, only: water_flux use o_PARAM USE MOD_PARTIT USE MOD_PARSUP @@ -1748,12 +1787,15 @@ subroutine compute_hbar_ale(dynamics, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_dyn) , intent(inout), target :: dynamics real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:), pointer :: ssh_rhs, ssh_rhs_old #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV=>dynamics%uv(:,:,:) + ssh_rhs=>dynamics%ssh_rhs(:) + ssh_rhs_old=>dynamics%ssh_rhs_old(:) !___________________________________________________________________________ ! compute the rhs @@ -1844,8 +1886,8 @@ end subroutine compute_hbar_ale subroutine vert_vel_ale(dynamics, partit, mesh) use g_config,only: dt, which_ALE, min_hnode, lzstar_lev, flag_warn_cflz use MOD_MESH - use o_ARRAYS, only: fer_Wvel, fer_UV, water_flux, ssh_rhs, & - ssh_rhs_old, eta_n, d_eta + use o_ARRAYS, only: fer_Wvel, fer_UV, water_flux, & + eta_n, d_eta use o_PARAM USE MOD_PARTIT USE MOD_PARSUP @@ -1869,6 +1911,7 @@ subroutine vert_vel_ale(dynamics, partit, mesh) type(t_partit), intent(inout), target :: partit real(kind=WP), dimension(:,:,:), pointer :: UV real(kind=WP), dimension(:,:) , pointer :: Wvel, Wvel_e, Wvel_i, CFL_z + real(kind=WP), dimension(:) , pointer :: ssh_rhs, ssh_rhs_old #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -1878,6 +1921,8 @@ subroutine vert_vel_ale(dynamics, partit, mesh) Wvel_e=>dynamics%w_e(:,:) Wvel_i=>dynamics%w_i(:,:) CFL_z =>dynamics%cfl_z(:,:) + ssh_rhs =>dynamics%ssh_rhs(:) + ssh_rhs_old =>dynamics%ssh_rhs_old(:) !___________________________________________________________________________ ! Contributions from levels in divergence @@ -2372,12 +2417,13 @@ end subroutine vert_vel_ale !=============================================================================== ! solve eq.18 in S. Danilov et al. : FESOM2: from finite elements to finite volumes. ! for (eta^(n+1)-eta^n) = d_eta -subroutine solve_ssh_ale(partit, mesh) +subroutine solve_ssh_ale(dynamics, partit, mesh) use o_PARAM use MOD_MESH use o_ARRAYS USE MOD_PARTIT USE MOD_PARSUP +USE MOD_DYN use g_comm_auto use g_config, only: which_ale ! @@ -2401,14 +2447,16 @@ subroutine solve_ssh_ale(partit, mesh) real(kind=WP), allocatable :: arr_nod2D(:),arr_nod2D2(:,:),arr_nod2D3(:) real(kind=WP) :: cssh1,cssh2,crhs integer :: i -type(t_mesh), intent(inout), target :: mesh +type(t_mesh) , intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit - +type(t_dyn) , intent(inout), target :: dynamics +!!PS real(kind=WP), dimension(:), pointer :: ssh_rhs #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" +!!PS ssh_rhs=>dynamics%ssh_rhs(:) Pmode = PET_BLOCKP+PET_SOLVE + PET_BICGSTAB +PET_REPORT + PET_QUIET+ PET_RCM+PET_PCBJ if (lfirst) then @@ -2422,7 +2470,7 @@ subroutine solve_ssh_ale(partit, mesh) droptol, & soltol, & part, ssh_stiff%rowptr, ssh_stiff%colind, ssh_stiff%values, & - ssh_rhs, d_eta, & + dynamics%ssh_rhs, d_eta, & rinfo, MPI_COMM_FESOM, mesh) ! ! @@ -2438,8 +2486,9 @@ subroutine solve_ssh_ale(partit, mesh) integer(kind=C_INT) :: maxiter, restart, lutype, fillin real(kind=C_DOUBLE) :: droptol, soltol integer :: n -type(t_mesh), intent(inout), target :: mesh +type(t_mesh) , intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit +type(t_dyn) , intent(inout), target :: dynamics interface @@ -2509,7 +2558,7 @@ end subroutine psolve ssh_stiff%colind-1, ssh_stiff%values, reuse, MPI_COMM_FESOM) lfirst=.false. end if - call psolve(ident, ssh_rhs, ssh_stiff%values, d_eta, new_values) + call psolve(ident, dynamics%ssh_rhs, ssh_stiff%values, d_eta, new_values) #endif ! @@ -2895,7 +2944,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) ! Take updated ssh matrix and solve --> new ssh! t30=MPI_Wtime() - call solve_ssh_ale(partit, mesh) + call solve_ssh_ale(dynamics, partit, mesh) if ((toy_ocean) .AND. (TRIM(which_toy)=="soufflet")) call relax_zonal_vel(dynamics, partit, mesh) t3=MPI_Wtime() diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index 992dcd78f..b4a261c15 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -187,7 +187,7 @@ MODULE o_ARRAYS real(kind=WP), allocatable :: UV_dis_tend(:,:,:), UV_back_tend(:,:,:), UV_total_tend(:,:,:), UV_dis_tend_node(:,:,:) real(kind=WP), allocatable :: UV_dis_posdef_b2(:,:), UV_dis_posdef(:,:), UV_back_posdef(:,:) real(kind=WP), allocatable :: eta_n(:), d_eta(:) -real(kind=WP), allocatable :: ssh_rhs(:), hpressure(:,:) +real(kind=WP), allocatable :: hpressure(:,:) real(kind=WP), allocatable :: stress_surf(:,:) real(kind=WP), allocatable :: stress_node_surf(:,:) REAL(kind=WP), ALLOCATABLE :: stress_atmoce_x(:) diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index f122880c4..f6e80d60b 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -89,6 +89,8 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) use Toy_Channel_Soufflet use oce_initial_state_interface use oce_adv_tra_fct_interfaces +use init_ale_interface +use init_thickness_ale_interface IMPLICIT NONE type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit @@ -117,7 +119,10 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) write(*,*) ' --> initialise ALE arrays + sparse SSH stiff matrix' write(*,*) end if - call init_ale(partit, mesh) + + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_ale'//achar(27)//'[0m' + call init_ale(dynamics, partit, mesh) + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_stiff_mat_ale'//achar(27)//'[0m' call init_stiff_mat_ale(partit, mesh) !!PS test !___________________________________________________________________________ @@ -145,20 +150,24 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) ! initialise fesom1.4 like KPP if (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) then + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call oce_mixing_kpp_init'//achar(27)//'[0m' call oce_mixing_kpp_init(partit, mesh) ! initialise fesom1.4 like PP elseif (mix_scheme_nmb==2 .or. mix_scheme_nmb==27) then ! initialise cvmix_KPP elseif (mix_scheme_nmb==3 .or. mix_scheme_nmb==37) then + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_cvmix_kpp'//achar(27)//'[0m' call init_cvmix_kpp(partit, mesh) ! initialise cvmix_PP elseif (mix_scheme_nmb==4 .or. mix_scheme_nmb==47) then + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_cvmix_pp'//achar(27)//'[0m' call init_cvmix_pp(partit, mesh) ! initialise cvmix_TKE elseif (mix_scheme_nmb==5 .or. mix_scheme_nmb==56) then + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_cvmix_tke'//achar(27)//'[0m' call init_cvmix_tke(partit, mesh) endif @@ -166,12 +175,14 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) ! initialise additional mixing cvmix_IDEMIX --> only in combination with ! cvmix_TKE+cvmix_IDEMIX or stand alone for debbuging as cvmix_TKE if (mod(mix_scheme_nmb,10)==6) then + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_cvmix_idemix'//achar(27)//'[0m' call init_cvmix_idemix(partit, mesh) ! initialise additional mixing cvmix_TIDAL --> only in combination with ! KPP+cvmix_TIDAL, PP+cvmix_TIDAL, cvmix_KPP+cvmix_TIDAL, cvmix_PP+cvmix_TIDAL ! or stand alone for debbuging as cvmix_TIDAL elseif (mod(mix_scheme_nmb,10)==7) then + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_cvmix_tidal'//achar(27)//'[0m' call init_cvmix_tidal(partit, mesh) end if @@ -192,7 +203,7 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) if(partit%mype==0) write(*,*) 'Arrays are set' !if(open_boundary) call set_open_boundary !TODO - + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call oce_adv_tra_fct_init'//achar(27)//'[0m' call oce_adv_tra_fct_init(tracers%work, partit, mesh) call muscl_adv_init(tracers%work, partit, mesh) !!PS test !===================== @@ -202,6 +213,7 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) if (toy_ocean) then SELECT CASE (TRIM(which_toy)) CASE ("soufflet") !forcing update for soufflet testcase + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call toy_channel'//achar(27)//'[0m' if (mod(mstep, soufflet_forc_update)==0) then call initial_state_soufflet(dynamics, tracers, partit, mesh) call compute_zonal_mean_ini(partit, mesh) @@ -225,7 +237,8 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) write(*,*) ' --> call init_thickness_ale' write(*,*) end if - call init_thickness_ale(partit, mesh) + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_thickness_ale'//achar(27)//'[0m' + call init_thickness_ale(dynamics, partit, mesh) !___________________________________________________________________________ if(partit%mype==0) write(*,*) 'Initial state' @@ -408,11 +421,11 @@ SUBROUTINE dynamics_init(dynamics, partit, mesh) allocate(dynamics%eta_n( node_size)) allocate(dynamics%d_eta( node_size)) allocate(dynamics%ssh_rhs( node_size)) - allocate(dynamics%ssh_rhs_old(node_size)) + !!PS allocate(dynamics%ssh_rhs_old(node_size)) dynamics%eta_n = 0.0_WP dynamics%d_eta = 0.0_WP dynamics%ssh_rhs = 0.0_WP - dynamics%ssh_rhs_old= 0.0_WP +!!PS dynamics%ssh_rhs_old= 0.0_WP ! set parameters in derived type !!PS dynamics%visc_opt = visc_opt @@ -479,7 +492,6 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) ! elevation and its rhs ! ================ allocate(eta_n(node_size), d_eta(node_size)) -allocate(ssh_rhs(node_size)) ! ================ ! Monin-Obukhov ! ================ @@ -620,7 +632,6 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) eta_n=0.0_WP d_eta=0.0_WP - ssh_rhs=0.0_WP hpressure=0.0_WP ! heat_flux=0.0_WP diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index a989d61da..e03c5c475 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -264,7 +264,7 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) USE MOD_PARSUP use MOD_DYN use o_PARAM - use o_ARRAYS, only: eta_n, d_eta, ssh_rhs, ssh_rhs_old, water_flux, stress_surf, & + use o_ARRAYS, only: eta_n, d_eta, water_flux, stress_surf, & heat_flux, Kv, Av use i_ARRAYS use g_comm_auto @@ -281,7 +281,8 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) type(t_tracer), intent(in) , target :: tracers type(t_dyn) , intent(in) , target :: dynamics real(kind=WP), dimension(:,:,:), pointer :: UV - real(kind=WP), dimension(:,:), pointer :: Wvel, CFL_z + real(kind=WP), dimension(:,:) , pointer :: Wvel, CFL_z + real(kind=WP), dimension(:) , pointer :: ssh_rhs, ssh_rhs_old #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -289,6 +290,8 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) UV => dynamics%uv(:,:,:) Wvel => dynamics%w(:,:) CFL_z => dynamics%cfl_z(:,:) + ssh_rhs => dynamics%ssh_rhs(:) + ssh_rhs_old => dynamics%ssh_rhs_old(:) !___________________________________________________________________________ ! ! if (mod(istep,logfile_outfreq)==0) then @@ -558,3 +561,4 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) endif end subroutine + From 33ee1af41163693794be07973a3745d595bc946a Mon Sep 17 00:00:00 2001 From: a270042 Date: Wed, 3 Nov 2021 15:17:08 +0100 Subject: [PATCH 489/909] exchange eta_n and d_eta with dynamics derived type --- src/gen_modules_diag.F90 | 9 ++++++--- src/io_blowup.F90 | 4 ++-- src/io_meandata.F90 | 2 +- src/io_restart.F90 | 4 ++-- src/oce_ale.F90 | 20 ++++++++++++-------- src/oce_ale_vel_rhs.F90 | 4 +++- src/oce_dyn.F90 | 7 ++++--- src/oce_modules.F90 | 1 - src/oce_setup_step.F90 | 4 +--- src/oce_vel_rhs_vinv.F90 | 4 +++- src/write_step_info.F90 | 26 ++++++++++++++++---------- 11 files changed, 50 insertions(+), 35 deletions(-) diff --git a/src/gen_modules_diag.F90 b/src/gen_modules_diag.F90 index 5015da263..a2a7fed17 100755 --- a/src/gen_modules_diag.F90 +++ b/src/gen_modules_diag.F90 @@ -76,17 +76,20 @@ module diagnostics ! ============================================================== !rhs_diag=ssh_rhs? -subroutine diag_solver(mode, partit, mesh) +subroutine diag_solver(mode, dynamics, partit, mesh) implicit none - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in), target :: mesh type(t_partit), intent(inout), target :: partit + type(t_dyn) , intent(inout), target :: dynamics integer, intent(in) :: mode integer :: n, is, ie logical, save :: firstcall=.true. + real(kind=WP), dimension(:) , pointer :: d_eta #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + d_eta =>dynamics%d_eta(:) !===================== if (firstcall) then !allocate the stuff at the first call @@ -677,7 +680,7 @@ subroutine compute_diagnostics(mode, dynamics, tracers, partit, mesh) type(t_dyn) , intent(inout), target :: dynamics integer, intent(in) :: mode !constructor mode (0=only allocation; any other=do diagnostic) real(kind=WP) :: val !1. solver diagnostic - if (ldiag_solver) call diag_solver(mode, partit, mesh) + if (ldiag_solver) call diag_solver(mode, dynamics, partit, mesh) !2. compute curl(stress_surf) if (lcurt_stress_surf) call diag_curl_stress_surf(mode, partit, mesh) !3. compute curl(velocity) diff --git a/src/io_blowup.F90 b/src/io_blowup.F90 index 4b594499b..52b83d251 100644 --- a/src/io_blowup.F90 +++ b/src/io_blowup.F90 @@ -100,8 +100,8 @@ subroutine ini_blowup_io(year, dynamics, tracers, partit, mesh) !=========================================================================== !___Define the netCDF variables for 2D fields_______________________________ !___SSH_____________________________________________________________________ - call def_variable(bid, 'eta_n' , (/nod2D/) , 'sea surface elevation', 'm', eta_n); - call def_variable(bid, 'd_eta' , (/nod2D/) , 'change in ssh from solver', 'm', d_eta); + call def_variable(bid, 'eta_n' , (/nod2D/) , 'sea surface elevation', 'm', dynamics%eta_n); + call def_variable(bid, 'd_eta' , (/nod2D/) , 'change in ssh from solver', 'm', dynamics%d_eta); !___ALE related fields______________________________________________________ call def_variable(bid, 'hbar' , (/nod2D/) , 'ALE surface elevation hbar_n+0.5', 'm', hbar); !!PS call def_variable(bid, 'hbar_old' , (/nod2D/) , 'ALE surface elevation hbar_n-0.5', 'm', hbar_old); diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 5b499e624..9e53f6f1f 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -155,7 +155,7 @@ subroutine ini_mean_io(dynamics, tracers, partit, mesh) CASE ('sss ') call def_stream(nod2D, myDim_nod2D, 'sss', 'sea surface salinity', 'psu', tracers%data(2)%values(1,1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('ssh ') - call def_stream(nod2D, myDim_nod2D, 'ssh', 'sea surface elevation', 'm', eta_n, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'ssh', 'sea surface elevation', 'm', dynamics%eta_n, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('vve_5 ') call def_stream(nod2D, myDim_nod2D, 'vve_5', 'vertical velocity at 5th level', 'm/s', dynamics%w(5,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index dd2f5307d..e5a3f6b5b 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -114,11 +114,11 @@ subroutine ini_ocean_io(year, dynamics, tracers, partit, mesh) !=========================================================================== !___Define the netCDF variables for 2D fields_______________________________ !___SSH_____________________________________________________________________ - call def_variable(oid, 'ssh', (/nod2D/), 'sea surface elevation', 'm', eta_n); + call def_variable(oid, 'ssh', (/nod2D/), 'sea surface elevation', 'm', dynamics%eta_n); !___ALE related fields______________________________________________________ call def_variable(oid, 'hbar', (/nod2D/), 'ALE surface elevation', 'm', hbar); !!PS call def_variable(oid, 'ssh_rhs', (/nod2D/), 'RHS for the elevation', '?', ssh_rhs); - call def_variable(oid, 'ssh_rhs_old', (/nod2D/), 'RHS for the elevation', '?', ssh_rhs_old); + call def_variable(oid, 'ssh_rhs_old', (/nod2D/), 'RHS for the elevation', '?', dynamics%ssh_rhs_old); call def_variable(oid, 'hnode', (/nl-1, nod2D/), 'nodal layer thickness', 'm', hnode); !___Define the netCDF variables for 3D fields_______________________________ diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index bdd1d3b34..9e6f0edeb 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -692,19 +692,19 @@ subroutine init_thickness_ale(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_DYN - use o_ARRAYS, only: eta_n implicit none integer :: n, nz, elem, elnodes(3), nzmin, nzmax real(kind=WP) :: dd type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit type(t_dyn), intent(inout), target :: dynamics - real(kind=WP), dimension(:), pointer :: ssh_rhs_old + real(kind=WP), dimension(:), pointer :: ssh_rhs_old, eta_n #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" ssh_rhs_old=>dynamics%ssh_rhs_old(:) + eta_n =>dynamics%eta_n(:) if(mype==0) then write(*,*) '____________________________________________________________' @@ -1886,8 +1886,7 @@ end subroutine compute_hbar_ale subroutine vert_vel_ale(dynamics, partit, mesh) use g_config,only: dt, which_ALE, min_hnode, lzstar_lev, flag_warn_cflz use MOD_MESH - use o_ARRAYS, only: fer_Wvel, fer_UV, water_flux, & - eta_n, d_eta + use o_ARRAYS, only: fer_Wvel, fer_UV, water_flux use o_PARAM USE MOD_PARTIT USE MOD_PARSUP @@ -1912,6 +1911,7 @@ subroutine vert_vel_ale(dynamics, partit, mesh) real(kind=WP), dimension(:,:,:), pointer :: UV real(kind=WP), dimension(:,:) , pointer :: Wvel, Wvel_e, Wvel_i, CFL_z real(kind=WP), dimension(:) , pointer :: ssh_rhs, ssh_rhs_old + real(kind=WP), dimension(:) , pointer :: eta_n, d_eta #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -1923,6 +1923,8 @@ subroutine vert_vel_ale(dynamics, partit, mesh) CFL_z =>dynamics%cfl_z(:,:) ssh_rhs =>dynamics%ssh_rhs(:) ssh_rhs_old =>dynamics%ssh_rhs_old(:) + eta_n =>dynamics%eta_n(:) + d_eta =>dynamics%d_eta(:) !___________________________________________________________________________ ! Contributions from levels in divergence @@ -2558,13 +2560,13 @@ end subroutine psolve ssh_stiff%colind-1, ssh_stiff%values, reuse, MPI_COMM_FESOM) lfirst=.false. end if - call psolve(ident, dynamics%ssh_rhs, ssh_stiff%values, d_eta, new_values) + call psolve(ident, dynamics%ssh_rhs, ssh_stiff%values, dynamics%d_eta, new_values) #endif ! ! !___________________________________________________________________________ -call exchange_nod(d_eta, partit) !is this required after calling psolve ? +call exchange_nod(dynamics%d_eta, partit) !is this required after calling psolve ? end subroutine solve_ssh_ale ! @@ -2790,12 +2792,13 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) real(kind=8) :: t0,t1, t2, t30, t3, t4, t5, t6, t7, t8, t9, t10, loc, glo integer :: n, node - + real(kind=WP), dimension(:), pointer :: eta_n #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - + eta_n => dynamics%eta_n(:) + t0=MPI_Wtime() ! water_flux = 0.0_WP @@ -3049,3 +3052,4 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) write(*,*) end if end subroutine oce_timestep_ale + diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index 799094bdf..cabaa7fe0 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -38,7 +38,7 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_DYN - use o_ARRAYS, only: eta_n, coriolis, ssh_gp, pgf_x, pgf_y + use o_ARRAYS, only: coriolis, ssh_gp, pgf_x, pgf_y use i_ARRAYS use i_therm_param use o_PARAM @@ -61,6 +61,7 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) real(kind=WP) :: p_ice(3), p_air(3), p_eta(3) integer :: use_pice real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhsAB, UV_rhs + real(kind=WP), dimension(:) , pointer :: eta_n #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -69,6 +70,7 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) UV =>dynamics%uv(:,:,:) UV_rhs =>dynamics%uv_rhs(:,:,:) UV_rhsAB =>dynamics%uv_rhsAB(:,:,:) + eta_n =>dynamics%eta_n(:) t1=MPI_Wtime() use_pice=0 diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 0bff6401b..8fce89659 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -163,8 +163,6 @@ SUBROUTINE update_vel(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_DYN - - USE o_ARRAYS, only: d_eta, eta_n USE o_PARAM USE g_CONFIG use g_comm_auto @@ -176,6 +174,7 @@ SUBROUTINE update_vel(dynamics, partit, mesh) type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs + real(kind=WP), dimension(:), pointer :: eta_n, d_eta #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -183,7 +182,9 @@ SUBROUTINE update_vel(dynamics, partit, mesh) #include "associate_mesh_ass.h" UV=>dynamics%uv(:,:,:) UV_rhs=>dynamics%uv_rhs(:,:,:) - + eta_n=>dynamics%eta_n(:) + d_eta=>dynamics%d_eta(:) + DO elem=1, myDim_elem2D elnodes=elem2D_nodes(:,elem) eta=-g*theta*dt*d_eta(elnodes) diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index b4a261c15..2173ce1d0 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -186,7 +186,6 @@ MODULE o_ARRAYS real(kind=WP), allocatable :: uke_rhs(:,:), uke_rhs_old(:,:) real(kind=WP), allocatable :: UV_dis_tend(:,:,:), UV_back_tend(:,:,:), UV_total_tend(:,:,:), UV_dis_tend_node(:,:,:) real(kind=WP), allocatable :: UV_dis_posdef_b2(:,:), UV_dis_posdef(:,:), UV_back_posdef(:,:) -real(kind=WP), allocatable :: eta_n(:), d_eta(:) real(kind=WP), allocatable :: hpressure(:,:) real(kind=WP), allocatable :: stress_surf(:,:) real(kind=WP), allocatable :: stress_node_surf(:,:) diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index f6e80d60b..0116baf5f 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -491,7 +491,7 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) ! ================ ! elevation and its rhs ! ================ -allocate(eta_n(node_size), d_eta(node_size)) + ! ================ ! Monin-Obukhov ! ================ @@ -630,8 +630,6 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) ! Initialize with zeros ! ================= - eta_n=0.0_WP - d_eta=0.0_WP hpressure=0.0_WP ! heat_flux=0.0_WP diff --git a/src/oce_vel_rhs_vinv.F90 b/src/oce_vel_rhs_vinv.F90 index f1b87d68f..1ba0a34ee 100755 --- a/src/oce_vel_rhs_vinv.F90 +++ b/src/oce_vel_rhs_vinv.F90 @@ -118,7 +118,7 @@ end subroutine relative_vorticity ! ========================================================================== subroutine compute_vel_rhs_vinv(dynamics, partit, mesh) !vector invariant USE o_PARAM - USE o_ARRAYS, only: eta_n, coriolis_node, hpressure, vorticity + USE o_ARRAYS, only: coriolis_node, hpressure, vorticity USE MOD_MESH USE MOD_PARTIT @@ -141,6 +141,7 @@ subroutine compute_vel_rhs_vinv(dynamics, partit, mesh) !vector invariant real(kind=WP) :: dZ_inv(2:mesh%nl-1), dzbar_inv(mesh%nl-1), elem_area_inv real(kind=WP) :: density0_inv = 1./density_0 real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs, UV_rhsAB + real(kind=WP), dimension(:) , pointer :: eta_n #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -148,6 +149,7 @@ subroutine compute_vel_rhs_vinv(dynamics, partit, mesh) !vector invariant UV => dynamics%uv(:,:,:) UV_rhs => dynamics%uv_rhs(:,:,:) UV_rhsAB => dynamics%uv_rhsAB(:,:,:) + eta_n =>dynamics%eta_n(:) w = 0.0_WP diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index e03c5c475..1b84011ab 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -41,7 +41,7 @@ subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) use MOD_TRACER use MOD_DYN use o_PARAM - use o_ARRAYS, only: eta_n, d_eta, water_flux, heat_flux, & + use o_ARRAYS, only: water_flux, heat_flux, & pgf_x, pgf_y, Av, Kv use i_ARRAYS use g_comm_auto @@ -63,15 +63,18 @@ subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) type(t_tracer), intent(in) , target :: tracers type(t_dyn) , intent(in) , target :: dynamics real(kind=WP), dimension(:,:,:), pointer :: UV, UVnode - real(kind=WP), dimension(:,:), pointer :: Wvel, CFL_z + real(kind=WP), dimension(:,:) , pointer :: Wvel, CFL_z + real(kind=WP), dimension(:) , pointer :: eta_n, d_eta #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) + UV => dynamics%uv(:,:,:) UVnode => dynamics%uvnode(:,:,:) - Wvel => dynamics%w(:,:) - CFL_z => dynamics%cfl_z(:,:) + Wvel => dynamics%w(:,:) + CFL_z => dynamics%cfl_z(:,:) + eta_n => dynamics%eta_n(:) + d_eta => dynamics%d_eta(:) if (mod(istep,outfreq)==0) then @@ -264,7 +267,7 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) USE MOD_PARSUP use MOD_DYN use o_PARAM - use o_ARRAYS, only: eta_n, d_eta, water_flux, stress_surf, & + use o_ARRAYS, only: water_flux, stress_surf, & heat_flux, Kv, Av use i_ARRAYS use g_comm_auto @@ -283,15 +286,18 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) real(kind=WP), dimension(:,:,:), pointer :: UV real(kind=WP), dimension(:,:) , pointer :: Wvel, CFL_z real(kind=WP), dimension(:) , pointer :: ssh_rhs, ssh_rhs_old + real(kind=WP), dimension(:) , pointer :: eta_n, d_eta #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) - Wvel => dynamics%w(:,:) - CFL_z => dynamics%cfl_z(:,:) - ssh_rhs => dynamics%ssh_rhs(:) + UV => dynamics%uv(:,:,:) + Wvel => dynamics%w(:,:) + CFL_z => dynamics%cfl_z(:,:) + ssh_rhs => dynamics%ssh_rhs(:) ssh_rhs_old => dynamics%ssh_rhs_old(:) + eta_n => dynamics%eta_n(:) + d_eta => dynamics%d_eta(:) !___________________________________________________________________________ ! ! if (mod(istep,logfile_outfreq)==0) then From e34d5d50cd1fc75e4e307122da9d45c765009977 Mon Sep 17 00:00:00 2001 From: a270042 Date: Wed, 3 Nov 2021 16:16:55 +0100 Subject: [PATCH 490/909] exchange fer_UV and fer_Wvel with dynamics derived type --- src/MOD_DYN.F90 | 22 ++++++++++++++----- src/gen_modules_diag.F90 | 10 ++++----- src/io_meandata.F90 | 6 ++--- src/oce_ale.F90 | 14 +++++++----- src/oce_ale_tracer.F90 | 9 +++++--- src/oce_fer_gm.F90 | 47 +++++++++++++++++++++++++++++++++++----- src/oce_modules.F90 | 1 - src/oce_setup_step.F90 | 12 +++++++--- 8 files changed, 90 insertions(+), 31 deletions(-) diff --git a/src/MOD_DYN.F90 b/src/MOD_DYN.F90 index 920a77b4c..ddaad66a8 100644 --- a/src/MOD_DYN.F90 +++ b/src/MOD_DYN.F90 @@ -28,13 +28,15 @@ MODULE MOD_DYN ! option for momentum advection TYPE T_DYN ! instant zonal merdional velocity & Adams-Bashfort rhs - real(kind=WP), allocatable, dimension(:,:,:):: uv, uv_rhs, uv_rhsAB + real(kind=WP), allocatable, dimension(:,:,:):: uv, uv_rhs, uv_rhsAB, fer_uv - ! instant vertical velm explicite+implicite part - real(kind=WP), allocatable, dimension(:,:) :: w, w_e, w_i, cfl_z - + ! horizontal velocities at nodes real(kind=WP), allocatable, dimension(:,:,:):: uvnode, uvnode_rhs + ! instant vertical vel arrays + real(kind=WP), allocatable, dimension(:,:) :: w, w_e, w_i, cfl_z, fer_w + + ! sea surface height arrays real(kind=WP), allocatable, dimension(:) :: eta_n, d_eta, ssh_rhs, ssh_rhs_old ! summarizes solver input parameter @@ -112,9 +114,13 @@ subroutine WRITE_T_DYN(dynamics, unit, iostat, iomsg) call write_bin_array(dynamics%w , unit, iostat, iomsg) call write_bin_array(dynamics%w_e , unit, iostat, iomsg) call write_bin_array(dynamics%w_i , unit, iostat, iomsg) - call write_bin_array(dynamics%cfl_z , unit, iostat, iomsg) + if (Fer_GM) then + call write_bin_array(dynamics%fer_w , unit, iostat, iomsg) + call write_bin_array(dynamics%fer_uv , unit, iostat, iomsg) + end if + !___________________________________________________________________________ write(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_opt write(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma0_visc @@ -151,9 +157,13 @@ subroutine READ_T_DYN(dynamics, unit, iostat, iomsg) call read_bin_array(dynamics%w , unit, iostat, iomsg) call read_bin_array(dynamics%w_e , unit, iostat, iomsg) call read_bin_array(dynamics%w_i , unit, iostat, iomsg) - call read_bin_array(dynamics%cfl_z , unit, iostat, iomsg) + if (Fer_GM) then + call read_bin_array(dynamics%fer_w , unit, iostat, iomsg) + call read_bin_array(dynamics%fer_uv , unit, iostat, iomsg) + end if + !___________________________________________________________________________ read(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_opt read(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma0_visc diff --git a/src/gen_modules_diag.F90 b/src/gen_modules_diag.F90 index a2a7fed17..ddb845731 100755 --- a/src/gen_modules_diag.F90 +++ b/src/gen_modules_diag.F90 @@ -430,15 +430,15 @@ subroutine diag_densMOC(mode, dynamics, tracers, partit, mesh) real(kind=WP), save, allocatable :: std_dens_w(:,:), std_dens_VOL1(:,:), std_dens_VOL2(:,:) logical, save :: firstcall_s=.true., firstcall_e=.true. real(kind=WP), dimension(:,:), pointer :: temp, salt - real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:,:), pointer :: UV, fer_UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) - - temp=>tracers%data(1)%values(:,:) - salt=>tracers%data(2)%values(:,:) + UV => dynamics%uv(:,:,:) + temp => tracers%data(1)%values(:,:) + salt => tracers%data(2)%values(:,:) + fer_UV => dynamics%fer_uv(:,:,:) if (firstcall_s) then !allocate the stuff at the first call allocate(std_dens_UVDZ(2,std_dens_N, myDim_elem2D)) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 9e53f6f1f..0b1e3c7ee 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -352,15 +352,15 @@ subroutine ini_mean_io(dynamics, tracers, partit, mesh) ! output Ferrari/GM parameterisation CASE ('bolus_u ') if (Fer_GM) then - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'bolus_u', 'GM bolus velocity U','m/s', fer_uv(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'bolus_u', 'GM bolus velocity U','m/s', dynamics%fer_uv(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('bolus_v ') if (Fer_GM) then - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'bolus_v', 'GM bolus velocity V','m/s', fer_uv(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'bolus_v', 'GM bolus velocity V','m/s', dynamics%fer_uv(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('bolus_w ') if (Fer_GM) then - call def_stream((/nl , nod2D /), (/nl, myDim_nod2D /), 'bolus_w', 'GM bolus velocity W','m/s', fer_Wvel(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream((/nl , nod2D /), (/nl, myDim_nod2D /), 'bolus_w', 'GM bolus velocity W','m/s', dynamics%fer_w(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('fer_K ') if (Fer_GM) then diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 9e6f0edeb..b97726074 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -1886,7 +1886,7 @@ end subroutine compute_hbar_ale subroutine vert_vel_ale(dynamics, partit, mesh) use g_config,only: dt, which_ALE, min_hnode, lzstar_lev, flag_warn_cflz use MOD_MESH - use o_ARRAYS, only: fer_Wvel, fer_UV, water_flux + use o_ARRAYS, only: water_flux use o_PARAM USE MOD_PARTIT USE MOD_PARSUP @@ -1908,8 +1908,8 @@ subroutine vert_vel_ale(dynamics, partit, mesh) type(t_dyn) , intent(inout), target :: dynamics type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit - real(kind=WP), dimension(:,:,:), pointer :: UV - real(kind=WP), dimension(:,:) , pointer :: Wvel, Wvel_e, Wvel_i, CFL_z + real(kind=WP), dimension(:,:,:), pointer :: UV, fer_UV + real(kind=WP), dimension(:,:) , pointer :: Wvel, Wvel_e, Wvel_i, CFL_z, fer_Wvel real(kind=WP), dimension(:) , pointer :: ssh_rhs, ssh_rhs_old real(kind=WP), dimension(:) , pointer :: eta_n, d_eta #include "associate_part_def.h" @@ -1925,7 +1925,10 @@ subroutine vert_vel_ale(dynamics, partit, mesh) ssh_rhs_old =>dynamics%ssh_rhs_old(:) eta_n =>dynamics%eta_n(:) d_eta =>dynamics%d_eta(:) - + if (Fer_GM) then + fer_UV =>dynamics%fer_uv(:,:,:) + fer_Wvel =>dynamics%fer_w(:,:) + end if !___________________________________________________________________________ ! Contributions from levels in divergence Wvel=0.0_WP @@ -2784,6 +2787,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) use solve_tracers_ale_interface use write_step_info_interface use check_blowup_interface + use fer_solve_interface IMPLICIT NONE type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit @@ -2990,7 +2994,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) if (Fer_GM) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call fer_solve_Gamma'//achar(27)//'[0m' call fer_solve_Gamma(partit, mesh) - call fer_gamma2vel(partit, mesh) + call fer_gamma2vel(dynamics, partit, mesh) end if t6=MPI_Wtime() !___________________________________________________________________________ diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 5335528d6..78a8e5e94 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -140,7 +140,6 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) use g_config use o_PARAM, only: SPP, Fer_GM - use o_arrays, only: fer_Wvel, fer_UV use mod_mesh USE MOD_PARTIT USE MOD_PARSUP @@ -158,8 +157,8 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit integer :: tr_num, node, nzmax, nzmin - real(kind=WP), dimension(:,:,:), pointer :: UV - real(kind=WP), dimension(:,:) , pointer :: Wvel, Wvel_e + real(kind=WP), dimension(:,:,:), pointer :: UV, fer_UV + real(kind=WP), dimension(:,:) , pointer :: Wvel, Wvel_e, fer_Wvel #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -168,6 +167,10 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) UV => dynamics%uv(:,:,:) Wvel => dynamics%w(:,:) Wvel_e => dynamics%w_e(:,:) + if (Fer_GM) then + fer_UV => dynamics%fer_uv(:,:,:) + fer_Wvel => dynamics%fer_w(:,:) + end if !___________________________________________________________________________ if (SPP) call cal_rejected_salt(partit, mesh) diff --git a/src/oce_fer_gm.F90 b/src/oce_fer_gm.F90 index ab12e49ae..7db79c91e 100644 --- a/src/oce_fer_gm.F90 +++ b/src/oce_fer_gm.F90 @@ -1,3 +1,35 @@ +module fer_solve_interface + interface + subroutine fer_solve_Gamma(partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + end subroutine + + subroutine fer_gamma2vel(dynamics, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_dyn) , intent(inout), target :: dynamics + end subroutine + + subroutine init_Redi_GM(partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + end subroutine + end interface +end module + + + !--------------------------------------------------------------------------- !Implementation of Gent & McWiliams parameterization after R. Ferrari et al., 2010 !Contains: @@ -127,12 +159,13 @@ END subroutine fer_solve_Gamma ! ! !==================================================================== -subroutine fer_gamma2vel(partit, mesh) +subroutine fer_gamma2vel(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN USE o_PARAM - USE o_ARRAYS, ONLY: fer_gamma, fer_uv + USE o_ARRAYS, ONLY: fer_gamma USE g_CONFIG use g_comm_auto IMPLICIT NONE @@ -140,14 +173,18 @@ subroutine fer_gamma2vel(partit, mesh) integer :: nz, nzmax, el, elnod(3), nzmin real(kind=WP) :: zinv real(kind=WP) :: onethird=1._WP/3._WP + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - + type(t_mesh) , intent(in), target :: mesh + real(kind=WP), dimension(:,:,:), pointer :: fer_UV + real(kind=WP), dimension(:,:) , pointer :: fer_Wvel #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - + fer_UV =>dynamics%fer_uv(:,:,:) + fer_Wvel =>dynamics%fer_w(:,:) + DO el=1, myDim_elem2D elnod=elem2D_nodes(:,el) ! max. number of levels at element el diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index 2173ce1d0..5f613cb82 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -254,7 +254,6 @@ MODULE o_ARRAYS !GM_stuff real(kind=WP),allocatable :: bvfreq(:,:),mixlay_dep(:),bv_ref(:) -real(kind=WP), allocatable :: fer_UV(:,:,:), fer_wvel(:,:) real(kind=WP), target, allocatable :: fer_c(:), fer_scal(:), fer_K(:,:), fer_gamma(:,:,:) real(kind=WP), allocatable :: ice_rejected_salt(:) diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 0116baf5f..6a0aa8e05 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -353,6 +353,7 @@ SUBROUTINE dynamics_init(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_DYN + USE o_param IMPLICIT NONE integer :: elem_size, node_size integer, save :: nm_unit = 104 ! unit to open namelist file, skip 100-102 for cray @@ -425,6 +426,14 @@ SUBROUTINE dynamics_init(dynamics, partit, mesh) dynamics%eta_n = 0.0_WP dynamics%d_eta = 0.0_WP dynamics%ssh_rhs = 0.0_WP + + if (Fer_GM) then + allocate(dynamics%fer_uv(2, nl-1, elem_size)) + allocate(dynamics%fer_w( nl, node_size)) + dynamics%fer_uv = 0.0_WP + dynamics%fer_w = 0.0_WP + end if + !!PS dynamics%ssh_rhs_old= 0.0_WP ! set parameters in derived type @@ -612,10 +621,7 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) if (Fer_GM) then allocate(fer_c(node_size),fer_scal(node_size), fer_gamma(2, nl, node_size), fer_K(nl, node_size)) - allocate(fer_wvel(nl, node_size), fer_UV(2, nl-1, elem_size)) fer_gamma=0.0_WP - fer_uv=0.0_WP - fer_wvel=0.0_WP fer_K=500._WP fer_c=1._WP fer_scal = 0.0_WP From 5e98b2cfb9b15a03ce3614d921f19b0c7d6a1f31 Mon Sep 17 00:00:00 2001 From: a270042 Date: Wed, 3 Nov 2021 16:25:30 +0100 Subject: [PATCH 491/909] exchange Unode_rhs with dynamics derived type --- src/oce_ale_vel_rhs.F90 | 50 ++++++++++++++++++++--------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index cabaa7fe0..71e0487c2 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -204,17 +204,17 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) integer :: nl1, nl2, ul1, ul2, nod(2), el, ed, k, nle, ule real(kind=WP) :: un1(1:mesh%nl-1), un2(1:mesh%nl-1) real(kind=WP) :: wu(1:mesh%nl), wv(1:mesh%nl) -real(kind=WP) :: Unode_rhs(2,mesh%nl-1,partit%myDim_nod2d+partit%eDim_nod2D) -real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhsAB +real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhsAB, UVnode_rhs real(kind=WP), dimension(:,:), pointer :: Wvel_e #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV =>dynamics%uv(:,:,:) - UV_rhsAB=>dynamics%uv_rhsAB(:,:,:) - Wvel_e =>dynamics%w_e(:,:) + UV =>dynamics%uv(:,:,:) + UV_rhsAB =>dynamics%uv_rhsAB(:,:,:) + UVnode_rhs=>dynamics%uvnode_rhs(:,:,:) + Wvel_e =>dynamics%w_e(:,:) !___________________________________________________________________________ ! 1st. compute vertical momentum advection component: w * du/dz, w*dv/dz @@ -261,15 +261,15 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) !!PS if (ul1>1) write(*,*) mype, wu(ul1:nl1) ! Here 1/3 because 1/3 of the area is related to the node --> comes from ! averaging the elemental velocities - Unode_rhs(1,nz,n) = - (wu(nz) - wu(nz+1) ) / (3._WP*hnode(nz,n)) - Unode_rhs(2,nz,n) = - (wv(nz) - wv(nz+1) ) / (3._WP*hnode(nz,n)) + UVnode_rhs(1,nz,n) = - (wu(nz) - wu(nz+1) ) / (3._WP*hnode(nz,n)) + UVnode_rhs(2,nz,n) = - (wv(nz) - wv(nz+1) ) / (3._WP*hnode(nz,n)) enddo !_______________________________________________________________________ ! To get a clean checksum, set the remaining values to zero - Unode_rhs(1:2,nl1+1:nl-1,n) = 0._WP - Unode_rhs(1:2,1:ul1-1 ,n) = 0._WP + UVnode_rhs(1:2,nl1+1:nl-1,n) = 0._WP + UVnode_rhs(1:2,1:ul1-1 ,n) = 0._WP end do @@ -327,8 +327,8 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) if (nod(1) <= myDim_nod2d) then do nz=min(ul1,ul2), max(nl1,nl2) ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) - Unode_rhs(1,nz,nod(1)) = Unode_rhs(1,nz,nod(1)) + un1(nz)*UV(1,nz,el1) + un2(nz)*UV(1,nz,el2) - Unode_rhs(2,nz,nod(1)) = Unode_rhs(2,nz,nod(1)) + un1(nz)*UV(2,nz,el1) + un2(nz)*UV(2,nz,el2) + UVnode_rhs(1,nz,nod(1)) = UVnode_rhs(1,nz,nod(1)) + un1(nz)*UV(1,nz,el1) + un2(nz)*UV(1,nz,el2) + UVnode_rhs(2,nz,nod(1)) = UVnode_rhs(2,nz,nod(1)) + un1(nz)*UV(2,nz,el1) + un2(nz)*UV(2,nz,el2) end do endif @@ -336,8 +336,8 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) if (nod(2) <= myDim_nod2d) then do nz=min(ul1,ul2), max(nl1,nl2) ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) - Unode_rhs(1,nz,nod(2)) = Unode_rhs(1,nz,nod(2)) - un1(nz)*UV(1,nz,el1) - un2(nz)*UV(1,nz,el2) - Unode_rhs(2,nz,nod(2)) = Unode_rhs(2,nz,nod(2)) - un1(nz)*UV(2,nz,el1) - un2(nz)*UV(2,nz,el2) + UVnode_rhs(1,nz,nod(2)) = UVnode_rhs(1,nz,nod(2)) - un1(nz)*UV(1,nz,el1) - un2(nz)*UV(1,nz,el2) + UVnode_rhs(2,nz,nod(2)) = UVnode_rhs(2,nz,nod(2)) - un1(nz)*UV(2,nz,el1) - un2(nz)*UV(2,nz,el2) end do endif @@ -346,8 +346,8 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) if (nod(1) <= myDim_nod2d) then do nz=ul1, nl1 ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) - Unode_rhs(1,nz,nod(1)) = Unode_rhs(1,nz,nod(1)) + un1(nz)*UV(1,nz,el1) - Unode_rhs(2,nz,nod(1)) = Unode_rhs(2,nz,nod(1)) + un1(nz)*UV(2,nz,el1) + UVnode_rhs(1,nz,nod(1)) = UVnode_rhs(1,nz,nod(1)) + un1(nz)*UV(1,nz,el1) + UVnode_rhs(2,nz,nod(1)) = UVnode_rhs(2,nz,nod(1)) + un1(nz)*UV(2,nz,el1) end do ! --> do nz=ul1, nl1 endif @@ -356,8 +356,8 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) !!PS do nz=1, nl1 do nz=ul1, nl1 ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) - Unode_rhs(1,nz,nod(2)) = Unode_rhs(1,nz,nod(2)) - un1(nz)*UV(1,nz,el1) - Unode_rhs(2,nz,nod(2)) = Unode_rhs(2,nz,nod(2)) - un1(nz)*UV(2,nz,el1) + UVnode_rhs(1,nz,nod(2)) = UVnode_rhs(1,nz,nod(2)) - un1(nz)*UV(1,nz,el1) + UVnode_rhs(2,nz,nod(2)) = UVnode_rhs(2,nz,nod(2)) - un1(nz)*UV(2,nz,el1) end do ! --> do nz=ul1, nl1 endif endif ! --> if (el2>0) then @@ -368,14 +368,14 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) do n=1,myDim_nod2d nl1 = nlevels_nod2D(n)-1 ul1 = ulevels_nod2D(n) -!!PS Unode_rhs(1,ul1:nl1,n) = Unode_rhs(1,ul1:nl1,n) *area_inv(ul1:nl1,n) ! --> TEST_cavity -!!PS Unode_rhs(2,ul1:nl1,n) = Unode_rhs(2,ul1:nl1,n) *area_inv(ul1:nl1,n) ! --> TEST_cavity - Unode_rhs(1,ul1:nl1,n) = Unode_rhs(1,ul1:nl1,n) *areasvol_inv(ul1:nl1,n) - Unode_rhs(2,ul1:nl1,n) = Unode_rhs(2,ul1:nl1,n) *areasvol_inv(ul1:nl1,n) +!!PS UVnode_rhs(1,ul1:nl1,n) = UVnode_rhs(1,ul1:nl1,n) *area_inv(ul1:nl1,n) ! --> TEST_cavity +!!PS UVnode_rhs(2,ul1:nl1,n) = UVnode_rhs(2,ul1:nl1,n) *area_inv(ul1:nl1,n) ! --> TEST_cavity + UVnode_rhs(1,ul1:nl1,n) = UVnode_rhs(1,ul1:nl1,n) *areasvol_inv(ul1:nl1,n) + UVnode_rhs(2,ul1:nl1,n) = UVnode_rhs(2,ul1:nl1,n) *areasvol_inv(ul1:nl1,n) end do !-->do n=1,myDim_nod2d !___________________________________________________________________________ - call exchange_nod(Unode_rhs, partit) + call exchange_nod(UVnode_rhs, partit) !___________________________________________________________________________ ! convert total nodal advection from vertice --> elements @@ -383,9 +383,9 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) nl1 = nlevels(el)-1 ul1 = ulevels(el) UV_rhsAB(1:2,ul1:nl1,el) = UV_rhsAB(1:2,ul1:nl1,el) & - + elem_area(el)*(Unode_rhs(1:2,ul1:nl1,elem2D_nodes(1,el)) & - + Unode_rhs(1:2,ul1:nl1,elem2D_nodes(2,el)) & - + Unode_rhs(1:2,ul1:nl1,elem2D_nodes(3,el))) / 3.0_WP + + elem_area(el)*(UVnode_rhs(1:2,ul1:nl1,elem2D_nodes(1,el)) & + + UVnode_rhs(1:2,ul1:nl1,elem2D_nodes(2,el)) & + + UVnode_rhs(1:2,ul1:nl1,elem2D_nodes(3,el))) / 3.0_WP end do ! --> do el=1, myDim_elem2D end subroutine momentum_adv_scalar From ab3dd15af8a4872782e0014ea7dadc11bf5d2827 Mon Sep 17 00:00:00 2001 From: a270042 Date: Wed, 3 Nov 2021 17:20:48 +0100 Subject: [PATCH 492/909] exchange flags of solver with flags from dynamics drived type --- src/MOD_DYN.F90 | 16 ++-- src/oce_ale.F90 | 215 ++++++++++++++++++------------------------------ 2 files changed, 88 insertions(+), 143 deletions(-) diff --git a/src/MOD_DYN.F90 b/src/MOD_DYN.F90 index ddaad66a8..39bf61d0f 100644 --- a/src/MOD_DYN.F90 +++ b/src/MOD_DYN.F90 @@ -11,14 +11,14 @@ MODULE MOD_DYN ! !_______________________________________________________________________________ TYPE T_solverinfo - integer :: maxiter=2000 - integer :: restarts=15 - integer :: fillin=3 - integer :: lutype=2 - integer :: nrhs=1 - real(kind=WP) :: droptol=1.e-7 - real(kind=WP) :: soltol =1e-10 !1.e-10 - logical :: lfirst=.true. + integer :: ident = 1 + integer :: maxiter = 2000 + integer :: restart = 15 + integer :: fillin = 3 + integer :: lutype = 2 + real(kind=WP) :: droptol=1.e-8 + real(kind=WP) :: soltol =1e-10 !1.e-10 + END TYPE T_solverinfo ! diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index b97726074..88a751677 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2423,153 +2423,98 @@ end subroutine vert_vel_ale ! solve eq.18 in S. Danilov et al. : FESOM2: from finite elements to finite volumes. ! for (eta^(n+1)-eta^n) = d_eta subroutine solve_ssh_ale(dynamics, partit, mesh) -use o_PARAM -use MOD_MESH -use o_ARRAYS -USE MOD_PARTIT -USE MOD_PARSUP -USE MOD_DYN -use g_comm_auto -use g_config, only: which_ale - ! - ! - !___USE PETSC SOLVER________________________________________________________ - ! this is not longer used but is still kept in the code -#ifdef PETSC -implicit none -#include "petscf.h" -integer :: myrows -integer :: Pmode -real(kind=WP) :: rinfo(20,20) -integer :: maxiter=2000 -integer :: restarts=15 -integer :: fillin=3 -integer :: lutype=2 -integer :: nrhs=1 -real(kind=WP) :: droptol=1.e-7 -real(kind=WP) :: soltol =1e-10 !1.e-10 -logical, save :: lfirst=.true. -real(kind=WP), allocatable :: arr_nod2D(:),arr_nod2D2(:,:),arr_nod2D3(:) -real(kind=WP) :: cssh1,cssh2,crhs -integer :: i -type(t_mesh) , intent(inout), target :: mesh -type(t_partit), intent(inout), target :: partit -type(t_dyn) , intent(inout), target :: dynamics -!!PS real(kind=WP), dimension(:), pointer :: ssh_rhs - -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" -!!PS ssh_rhs=>dynamics%ssh_rhs(:) - -Pmode = PET_BLOCKP+PET_SOLVE + PET_BICGSTAB +PET_REPORT + PET_QUIET+ PET_RCM+PET_PCBJ -if (lfirst) then - Pmode = Pmode+PET_STRUCT+PET_PMVALS + PET_PCASM+PET_OVL_2 !+PET_PCBJ+PET_ILU - lfirst=.false. -end if -call PETSC_S(Pmode, 1, ssh_stiff%dim, ssh_stiff%nza, myrows, & - maxiter, & - restarts, & - fillin, & - droptol, & - soltol, & - part, ssh_stiff%rowptr, ssh_stiff%colind, ssh_stiff%values, & - dynamics%ssh_rhs, d_eta, & - rinfo, MPI_COMM_FESOM, mesh) - ! - ! - !___USE PARMS SOLVER (recommended)__________________________________________ -#elif defined(PARMS) - - use iso_c_binding, only: C_INT, C_DOUBLE - implicit none + use o_PARAM + use MOD_MESH + use o_ARRAYS + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + use g_comm_auto + use g_config, only: which_ale + use iso_c_binding, only: C_INT, C_DOUBLE + implicit none #include "fparms.h" -logical, save :: lfirst=.true. -integer(kind=C_INT) :: ident -integer(kind=C_INT) :: n3, reuse, new_values -integer(kind=C_INT) :: maxiter, restart, lutype, fillin -real(kind=C_DOUBLE) :: droptol, soltol -integer :: n -type(t_mesh) , intent(inout), target :: mesh -type(t_partit), intent(inout), target :: partit -type(t_dyn) , intent(inout), target :: dynamics - - -interface - subroutine psolver_init(ident, SOL, PCGLOB, PCLOC, lutype, & - fillin, droptol, maxiter, restart, soltol, & - part, rowptr, colind, values, reuse, MPI_COMM) bind(C) - use iso_c_binding, only: C_INT, C_DOUBLE - integer(kind=C_INT) :: ident, SOL, PCGLOB, PCLOC, lutype, & - fillin, maxiter, restart, & - part(*), rowptr(*), colind(*), reuse, MPI_COMM - real(kind=C_DOUBLE) :: droptol, soltol, values(*) - end subroutine psolver_init -end interface -interface - subroutine psolve(ident, ssh_rhs, values, d_eta, newvalues) bind(C) - - use iso_c_binding, only: C_INT, C_DOUBLE - integer(kind=C_INT) :: ident, newvalues - real(kind=C_DOUBLE) :: values(*), ssh_rhs(*), d_eta(*) - - end subroutine psolve -end interface + logical, save :: lfirst=.true. + integer(kind=C_INT) :: n3, reuse, new_values + integer :: n + type(t_mesh) , intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_dyn) , intent(inout), target :: dynamics + real(kind=C_DOUBLE), pointer :: droptol, soltol + integer(kind=C_INT), pointer :: maxiter, restart, lutype, fillin, ident + + interface + subroutine psolver_init(ident, SOL, PCGLOB, PCLOC, lutype, & + fillin, droptol, maxiter, restart, soltol, & + part, rowptr, colind, values, reuse, MPI_COMM) bind(C) + use iso_c_binding, only: C_INT, C_DOUBLE + integer(kind=C_INT) :: ident, SOL, PCGLOB, PCLOC, lutype, & + fillin, maxiter, restart, & + part(*), rowptr(*), colind(*), reuse, MPI_COMM + real(kind=C_DOUBLE) :: droptol, soltol, values(*) + end subroutine psolver_init + end interface + interface + subroutine psolve(ident, ssh_rhs, values, d_eta, newvalues) bind(C) + use iso_c_binding, only: C_INT, C_DOUBLE + integer(kind=C_INT) :: ident, newvalues + real(kind=C_DOUBLE) :: values(*), ssh_rhs(*), d_eta(*) + end subroutine psolve + end interface #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + ident => dynamics%solverinfo%ident + maxiter => dynamics%solverinfo%maxiter + restart => dynamics%solverinfo%restart + lutype => dynamics%solverinfo%lutype + fillin => dynamics%solverinfo%fillin + droptol => dynamics%solverinfo%droptol + soltol => dynamics%solverinfo%soltol + + if (trim(which_ale)=='linfs') then + reuse=0 + new_values=0 + else + reuse=1 ! For varying coefficients, set reuse=1 + new_values=1 !PS 1 ! and new_values=1, as soon as the coefficients have changed + end if -ident=1 -maxiter=2000 -restart=15 -fillin=3 -lutype=2 -droptol=1.e-8 -soltol=1.e-10 - -if (trim(which_ale)=='linfs') then - reuse=0 - new_values=0 -else - reuse=1 ! For varying coefficients, set reuse=1 - new_values=1 !PS 1 ! and new_values=1, as soon as the coefficients have changed -end if - -! reuse=0: matrix remains static -! reuse=1: keeps a copy of the matrix structure to apply scaling of the matrix fast - -! new_values=0: matrix coefficients unchanged (compared to the last call of psolve) -! new_values=1: replaces the matrix values (keeps the structure and the preconditioner) -! new_values=2: replaces the matrix values and recomputes the preconditioner (keeps the structure) - -! new_values>0 requires reuse=1 in psolver_init! - -if (lfirst) then - ! Set SOLCG for CG solver (symmetric, positiv definit matrices only, no precond available!!) - ! SOLBICGS for BiCGstab solver (arbitrary matrices) - ! SOLBICGS_RAS for BiCGstab solver (arbitrary matrices) with integrated RAS - the global - ! preconditioner setting is ignored! It saves a 4 vector copies per iteration - ! compared to SOLBICGS + PCRAS. - ! SOLPBICGS for pipelined BiCGstab solver (arbitrary matrices) - ! Should scale better than SOLBICGS, but be careful, it is still experimental. - ! SOLPBICGS_RAS is SOLPBICGS with integrated RAS (global preconditioner setting is ignored!) - ! for even better scalability, well, in the end, it does not matter much. - call psolver_init(ident, SOLBICGS_RAS, PCRAS, PCILUK, lutype, & - fillin, droptol, maxiter, restart, soltol, & - part-1, ssh_stiff%rowptr(:)-ssh_stiff%rowptr(1), & - ssh_stiff%colind-1, ssh_stiff%values, reuse, MPI_COMM_FESOM) - lfirst=.false. -end if + ! reuse=0: matrix remains static + ! reuse=1: keeps a copy of the matrix structure to apply scaling of the matrix fast + + ! new_values=0: matrix coefficients unchanged (compared to the last call of psolve) + ! new_values=1: replaces the matrix values (keeps the structure and the preconditioner) + ! new_values=2: replaces the matrix values and recomputes the preconditioner (keeps the structure) + + ! new_values>0 requires reuse=1 in psolver_init! + + if (lfirst) then + ! Set SOLCG for CG solver (symmetric, positiv definit matrices only, no precond available!!) + ! SOLBICGS for BiCGstab solver (arbitrary matrices) + ! SOLBICGS_RAS for BiCGstab solver (arbitrary matrices) with integrated RAS - the global + ! preconditioner setting is ignored! It saves a 4 vector copies per iteration + ! compared to SOLBICGS + PCRAS. + ! SOLPBICGS for pipelined BiCGstab solver (arbitrary matrices) + ! Should scale better than SOLBICGS, but be careful, it is still experimental. + ! SOLPBICGS_RAS is SOLPBICGS with integrated RAS (global preconditioner setting is ignored!) + ! for even better scalability, well, in the end, it does not matter much. + call psolver_init(ident, SOLBICGS_RAS, PCRAS, PCILUK, lutype, & + fillin, droptol, maxiter, restart, soltol, & + part-1, ssh_stiff%rowptr(:)-ssh_stiff%rowptr(1), & + ssh_stiff%colind-1, ssh_stiff%values, reuse, MPI_COMM_FESOM) + lfirst=.false. + end if + call psolve(ident, dynamics%ssh_rhs, ssh_stiff%values, dynamics%d_eta, new_values) -#endif + ! ! !___________________________________________________________________________ -call exchange_nod(dynamics%d_eta, partit) !is this required after calling psolve ? + call exchange_nod(dynamics%d_eta, partit) !is this required after calling psolve ? end subroutine solve_ssh_ale ! From 1563e44ed2269d26fdb9b2f9e749613ce6423c7b Mon Sep 17 00:00:00 2001 From: a270042 Date: Wed, 3 Nov 2021 17:30:12 +0100 Subject: [PATCH 493/909] update layout --- src/MOD_DYN.F90 | 5 ++--- src/oce_ale.F90 | 7 ++++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/MOD_DYN.F90 b/src/MOD_DYN.F90 index 39bf61d0f..a17fd0651 100644 --- a/src/MOD_DYN.F90 +++ b/src/MOD_DYN.F90 @@ -16,9 +16,8 @@ MODULE MOD_DYN integer :: restart = 15 integer :: fillin = 3 integer :: lutype = 2 - real(kind=WP) :: droptol=1.e-8 - real(kind=WP) :: soltol =1e-10 !1.e-10 - + real(kind=WP) :: droptol = 1.e-8 + real(kind=WP) :: soltol = 1e-10 !1.e-10 END TYPE T_solverinfo ! diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 88a751677..693bc5047 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2491,6 +2491,8 @@ end subroutine psolve ! new_values>0 requires reuse=1 in psolver_init! + ! + !___________________________________________________________________________ if (lfirst) then ! Set SOLCG for CG solver (symmetric, positiv definit matrices only, no precond available!!) ! SOLBICGS for BiCGstab solver (arbitrary matrices) @@ -2507,11 +2509,10 @@ end subroutine psolve ssh_stiff%colind-1, ssh_stiff%values, reuse, MPI_COMM_FESOM) lfirst=.false. end if - + ! + !___________________________________________________________________________ call psolve(ident, dynamics%ssh_rhs, ssh_stiff%values, dynamics%d_eta, new_values) - - ! ! !___________________________________________________________________________ call exchange_nod(dynamics%d_eta, partit) !is this required after calling psolve ? From 46f414ce8e55bcd6f4ea4bf55d0581a9fa1a29d2 Mon Sep 17 00:00:00 2001 From: a270042 Date: Wed, 3 Nov 2021 17:37:54 +0100 Subject: [PATCH 494/909] move subroutine relative_vorticity to oce_dyn.F90 and delete vector invariant momentum advection../src/oce_vel_rhs_vinv.F90 --- src/oce_ale.F90 | 13 +- src/oce_dyn.F90 | 119 ++++++++++++- src/oce_vel_rhs_vinv.F90 | 353 --------------------------------------- 3 files changed, 117 insertions(+), 368 deletions(-) delete mode 100755 src/oce_vel_rhs_vinv.F90 diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 693bc5047..561d5c2b2 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2862,18 +2862,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) !___________________________________________________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call compute_vel_rhs'//achar(27)//'[0m' - -!!PS if (any(UV_rhs/=UV_rhs)) write(*,*) n, mype,' --> found NaN UV_rhs before compute_vel_rhs' -!!PS if (any(UV/=UV)) write(*,*) n, mype,' --> found NaN UV before compute_vel_rhs' -!!PS if (any(ssh_rhs/=ssh_rhs)) write(*,*) n, mype,' --> found NaN ssh_rhs before compute_vel_rhs' -!!PS if (any(ssh_rhs_old/=ssh_rhs_old)) write(*,*) n, mype,' --> found NaN ssh_rhs_old before compute_vel_rhs' -!!PS if (any(abs(Wvel_e)>1.0e20)) write(*,*) n, mype,' --> found Inf Wvel_e before compute_vel_rhs' - - if(mom_adv/=3) then - call compute_vel_rhs(dynamics, partit, mesh) - else - call compute_vel_rhs_vinv(partit, mesh) - end if + call compute_vel_rhs(dynamics, partit, mesh) !___________________________________________________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call viscosity_filter'//achar(27)//'[0m' diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 8fce89659..a02c03d4a 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -153,6 +153,20 @@ subroutine uke_update(dynamics, partit, mesh) end interface end module +module relative_vorticity_interface + interface + subroutine relative_vorticity(dynamics, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + + end subroutine + end interface +end module ! =================================================================== ! Contains routines needed for computations of dynamics. @@ -582,6 +596,7 @@ SUBROUTINE h_viscosity_leith(dynamics, partit, mesh) USE o_PARAM USE g_CONFIG use g_comm_auto + use relative_vorticity_interface IMPLICIT NONE real(kind=WP) :: dz, div_elem(3), xe, ye, vi integer :: elem, nl1, nz, elnodes(3), n, k, nt, ul1 @@ -597,7 +612,7 @@ SUBROUTINE h_viscosity_leith(dynamics, partit, mesh) #include "associate_mesh_ass.h" Wvel =>dynamics%w(:,:) ! - if(mom_adv<4) call relative_vorticity(partit, mesh) !!! vorticity array should be allocated + if(mom_adv<4) call relative_vorticity(dynamics, partit, mesh) !!! vorticity array should be allocated ! Fill in viscosity: Visc = 0.0_WP DO elem=1, myDim_elem2D !! m=1, myDim_elem2D @@ -1321,7 +1336,105 @@ SUBROUTINE uke_update(dynamics, partit, mesh) deallocate(uuu) end subroutine uke_update +! +! +!_______________________________________________________________________________ +subroutine relative_vorticity(dynamics, partit, mesh) + USE o_ARRAYS, only: vorticity + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + use g_comm_auto + IMPLICIT NONE + integer :: n, nz, el(2), enodes(2), nl1, nl2, edge, ul1, ul2, nl12, ul12 + real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2, c1 + + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + real(kind=WP), dimension(:,:,:), pointer :: UV +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) -! =================================================================== - + !!PS DO n=1,myDim_nod2D + !!PS nl1 = nlevels_nod2D(n)-1 + !!PS ul1 = ulevels_nod2D(n) + !!PS vorticity(ul1:nl1,n)=0.0_WP + !!PS !!PS DO nz=1, nlevels_nod2D(n)-1 + !!PS !!PS vorticity(nz,n)=0.0_WP + !!PS !!PS END DO + !!PS END DO + vorticity(:,1:myDim_nod2D) = 0.0_WP + DO edge=1,myDim_edge2D + !! edge=myList_edge2D(m) + enodes=edges(:,edge) + el=edge_tri(:,edge) + nl1=nlevels(el(1))-1 + ul1=ulevels(el(1)) + deltaX1=edge_cross_dxdy(1,edge) + deltaY1=edge_cross_dxdy(2,edge) + nl2=0 + ul2=0 + if(el(2)>0) then + deltaX2=edge_cross_dxdy(3,edge) + deltaY2=edge_cross_dxdy(4,edge) + nl2=nlevels(el(2))-1 + ul2=ulevels(el(2)) + end if + nl12 = min(nl1,nl2) + ul12 = max(ul1,ul2) + + DO nz=ul1,ul12-1 + c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1)) + vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 + vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 + END DO + if (ul2>0) then + DO nz=ul2,ul12-1 + c1= -deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) + vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 + vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 + END DO + endif + !!PS DO nz=1,min(nl1,nl2) + DO nz=ul12,nl12 + c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1))- & + deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) + vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 + vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 + END DO + !!PS DO nz=min(nl1,nl2)+1,nl1 + DO nz=nl12+1,nl1 + c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1)) + vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 + vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 + END DO + !!PS DO nz=min(nl1,nl2)+1,nl2 + DO nz=nl12+1,nl2 + c1= -deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) + vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 + vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 + END DO + END DO + + ! vorticity = vorticity*area at this stage + ! It is correct only on myDim nodes + DO n=1,myDim_nod2D + !! n=myList_nod2D(m) + ul1 = ulevels_nod2D(n) + nl1 = nlevels_nod2D(n) + !!PS DO nz=1,nlevels_nod2D(n)-1 + DO nz=ul1,nl1-1 + vorticity(nz,n)=vorticity(nz,n)/areasvol(nz,n) + END DO + END DO + + call exchange_nod(vorticity, partit) + +! Now it the relative vorticity known on neighbors too +end subroutine relative_vorticity diff --git a/src/oce_vel_rhs_vinv.F90 b/src/oce_vel_rhs_vinv.F90 deleted file mode 100755 index 1ba0a34ee..000000000 --- a/src/oce_vel_rhs_vinv.F90 +++ /dev/null @@ -1,353 +0,0 @@ -module relative_vorticity_interface - interface - subroutine relative_vorticity(dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use MOD_DYN - type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - - end subroutine - end interface -end module - -! Vector invariant momentum advection: -! (curl u+f)\times u+grad(u^2/2)+w du/dz -! -! =================================================================== -subroutine relative_vorticity(dynamics, partit, mesh) - USE o_ARRAYS, only: vorticity - USE MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_DYN - use g_comm_auto - IMPLICIT NONE - integer :: n, nz, el(2), enodes(2), nl1, nl2, edge, ul1, ul2, nl12, ul12 - real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2, c1 - - type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - real(kind=WP), dimension(:,:,:), pointer :: UV -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) - - !!PS DO n=1,myDim_nod2D - !!PS nl1 = nlevels_nod2D(n)-1 - !!PS ul1 = ulevels_nod2D(n) - !!PS vorticity(ul1:nl1,n)=0.0_WP - !!PS !!PS DO nz=1, nlevels_nod2D(n)-1 - !!PS !!PS vorticity(nz,n)=0.0_WP - !!PS !!PS END DO - !!PS END DO - vorticity(:,1:myDim_nod2D) = 0.0_WP - DO edge=1,myDim_edge2D - !! edge=myList_edge2D(m) - enodes=edges(:,edge) - el=edge_tri(:,edge) - nl1=nlevels(el(1))-1 - ul1=ulevels(el(1)) - deltaX1=edge_cross_dxdy(1,edge) - deltaY1=edge_cross_dxdy(2,edge) - nl2=0 - ul2=0 - if(el(2)>0) then - deltaX2=edge_cross_dxdy(3,edge) - deltaY2=edge_cross_dxdy(4,edge) - nl2=nlevels(el(2))-1 - ul2=ulevels(el(2)) - end if - nl12 = min(nl1,nl2) - ul12 = max(ul1,ul2) - - DO nz=ul1,ul12-1 - c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1)) - vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 - vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 - END DO - if (ul2>0) then - DO nz=ul2,ul12-1 - c1= -deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) - vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 - vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 - END DO - endif - !!PS DO nz=1,min(nl1,nl2) - DO nz=ul12,nl12 - c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1))- & - deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) - vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 - vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 - END DO - !!PS DO nz=min(nl1,nl2)+1,nl1 - DO nz=nl12+1,nl1 - c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1)) - vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 - vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 - END DO - !!PS DO nz=min(nl1,nl2)+1,nl2 - DO nz=nl12+1,nl2 - c1= -deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) - vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 - vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 - END DO - END DO - - ! vorticity = vorticity*area at this stage - ! It is correct only on myDim nodes - DO n=1,myDim_nod2D - !! n=myList_nod2D(m) - ul1 = ulevels_nod2D(n) - nl1 = nlevels_nod2D(n) - !!PS DO nz=1,nlevels_nod2D(n)-1 - DO nz=ul1,nl1-1 - vorticity(nz,n)=vorticity(nz,n)/areasvol(nz,n) - END DO - END DO - - call exchange_nod(vorticity, partit) - -! Now it the relative vorticity known on neighbors too -end subroutine relative_vorticity -! ========================================================================== -subroutine compute_vel_rhs_vinv(dynamics, partit, mesh) !vector invariant - USE o_PARAM - USE o_ARRAYS, only: coriolis_node, hpressure, vorticity - - USE MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - use MOD_DYN - USE g_CONFIG - use g_comm_auto - use relative_vorticity_interface - IMPLICIT NONE - - type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - - integer :: n, n1, nz, elem, elnodes(3), nl1, j, nzmin,nzmax - real(kind=WP) :: a, b, c, da, db, dc, dg, ff(3), gg, eta(3), pre(3), Fx, Fy,w - real(kind=WP) :: uvert(mesh%nl,2), umean, vmean, friction - logical, save :: lfirst=.true. - real(kind=WP) :: KE_node(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) - real(kind=WP) :: dZ_inv(2:mesh%nl-1), dzbar_inv(mesh%nl-1), elem_area_inv - real(kind=WP) :: density0_inv = 1./density_0 - real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs, UV_rhsAB - real(kind=WP), dimension(:) , pointer :: eta_n -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) - UV_rhs => dynamics%uv_rhs(:,:,:) - UV_rhsAB => dynamics%uv_rhsAB(:,:,:) - eta_n =>dynamics%eta_n(:) - - w = 0.0_WP - - uvert=0.0_WP - - ! ====================== - ! Kinetic energy at nodes: - ! ====================== - - - KE_node(:,:)=0.0_WP - - DO elem=1, myDim_elem2D - !! elem=myList_elem2D(m) - elnodes=elem2D_nodes(:,elem) - nzmin = ulevels(elem) - nzmax = nlevels(elem) - DO j=1,3 !NR interchange loops => nz-loop vectorizes - !!PS DO nz=1,nlevels(elem)-1 - DO nz=nzmin,nzmax-1 - KE_node(nz,elnodes(j)) = KE_node(nz,elnodes(j))+(UV(1,nz,elem)*UV(1,nz,elem) & - +UV(2,nz,elem)*UV(2,nz,elem))*elem_area(elem) !NR/6.0_WP below - END DO - END DO - END DO - - DO n=1,myDim_nod2D - !! n=myList_nod2D(m) - nzmin = ulevels_nod2D(n) - nzmax = nlevels_nod2D(n) - !!PS DO nz=1, nlevels_nod2D(n)-1 - DO nz=nzmin, nzmax-1 - !DO nz=1, nl-1 - KE_node(nz,n)=KE_node(nz,n)/(6._WP*areasvol(nz,n)) !NR divide by 6 here - END DO - END DO - - ! Set the kinetic energy to zero at lateral walls: - DO n=1,myDim_edge2D - !! n=myList_edge2D(m) - if(myList_edge2D(n) > edge2D_in) then - elnodes(1:2)=edges(:,n) - KE_node(:,elnodes(1:2))=0.0_WP - endif - end DO - - call exchange_nod(KE_node, partit) - ! Now gradients of KE will be correct on myDim_elem2D - - ! ================== - ! AB contribution from the old time step - ! ================== - Do elem=1, myDim_elem2D !! P (a) - !! elem=myList_elem2D(m) - nzmin = ulevels(elem) - nzmax = nlevels(elem) - !!PS DO nz=1,nl-1 - DO nz=nzmin,nzmax-1 - UV_rhs(1,nz,elem)=-(0.5_WP+epsilon)*UV_rhsAB(1,nz,elem) - UV_rhs(2,nz,elem)=-(0.5_WP+epsilon)*UV_rhsAB(2,nz,elem) - END DO - END DO - - call relative_vorticity(dynamics, partit, mesh) - ! ==================== - ! Sea level and pressure contribution -\nabla(g\eta +hpressure/rho_0+V^2/2) - ! and the Coriolis force (elemental part) - ! ==================== - - !DS KE_node=0. !DS - !DS vorticity=0. !DS - DO elem=1, myDim_elem2D !! P (b) elem=1,elem2D - !! elem=myList_elem2D(m) - elnodes = elem2D_nodes(:,elem) - eta = g*eta_n(elnodes) - gg = elem_area(elem) - ff = coriolis_node(elnodes) - - nzmin = ulevels(elem) - nzmax = nlevels(elem) - !!PS DO nz=1,nlevels(elem)-1 - DO nz=nzmin,nzmax-1 - pre = -(eta + hpressure(nz,elnodes)*density0_inv) - Fx = sum(gradient_sca(1:3,elem)*pre) - Fy = sum(gradient_sca(4:6,elem)*pre) - UV_rhs(1,nz,elem) = UV_rhs(1,nz,elem)+Fx*gg - UV_rhs(2,nz,elem) = UV_rhs(2,nz,elem)+Fy*gg - - pre = -KE_node(nz,elnodes) - Fx = sum(gradient_sca(1:3,elem)*pre) - Fy = sum(gradient_sca(4:6,elem)*pre) - - da = UV(2,nz,elem)*sum(ff+vorticity(nz,elnodes))/3.0_WP - db =-UV(1,nz,elem)*sum(ff+vorticity(nz,elnodes))/3.0_WP - - UV_rhsAB(1,nz,elem)=(da+Fx)*gg - UV_rhsAB(2,nz,elem)=(db+Fy)*gg - - END DO - END DO - ! ======================= - ! Compute w du/dz at elements: wdu/dz=d(wu)/dz-udw/dz - ! The central estimate of u in the flux term will correspond to energy - ! conservation - ! ======================= - - !NR precompute - DO nz=2,nl-1 - dZ_inv(nz) = 1.0_WP/(Z(nz-1)-Z(nz)) - ENDDO - DO nz=1,nl-1 - dzbar_inv(nz) = 1.0_WP/(zbar(nz)-zbar(nz+1)) - END DO - -!DO elem=1, myDim_elem2D -! !! elem=myList_elem2D(m) -! elnodes=elem2D_nodes(:,elem) -! nl1=nlevels(elem)-1 -! -! uvert(1,1:2)=0d0 -! uvert(nl1+1,1:2)=0d0 -! -! DO nz=2, nl1 -! w=sum(Wvel(nz,elnodes))/3.0_WP -! umean=0.5_WP*(UV(1,nz-1,elem)+UV(1,nz,elem)) -! vmean=0.5_WP*(UV(2,nz-1,elem)+UV(2,nz,elem)) -! uvert(nz,1)=-umean*w -! uvert(nz,2)=-vmean*w -! END DO -! DO nz=1,nl1 -! da=sum(Wvel(nz,elnodes)-Wvel(nz+1,elnodes))/3.0_WP -! UV_rhsAB(1,nz,elem) = UV_rhsAB(1,nz,elem) + (uvert(nz,1)-uvert(nz+1,1)+& -! da*UV(1,nz,elem))*elem_area(elem)*dzbar_inv(nz) !/(zbar(nz)-zbar(nz+1)) -! UV_rhsAB(2,nz,elem)=UV_rhsAB(2,nz,elem)+(uvert(nz,2)-uvert(nz+1,2)+& -! da*UV(2,nz,elem))*elem_area(elem)*dzbar_inv(nz) !/(zbar(nz)-zbar(nz+1)) -! -! END DO -!END DO - - - DO elem=1, myDim_elem2D - !! elem=myList_elem2D(m) - elnodes=elem2D_nodes(:,elem) - !!PS nl1=nlevels(elem)-1 - nzmax=nlevels(elem)-1 - nzmin=ulevels(elem) - - ! w=sum(Wvel(2, elnodes))/3.0_WP - ! w=min(abs(w), 0.0001)*sign(1.0_WP, w) - uvert(1,1)=w*(UV(1,1,elem)-UV(1,2,elem))*dZ_inv(2)*0.5_WP - uvert(1,2)=w*(UV(2,1,elem)-UV(2,2,elem))*dZ_inv(2)*0.5_WP - - ! w=sum(Wvel(nl1, elnodes))/3.0_WP - ! w=min(abs(w), 0.0001)*sign(1.0_WP, w) - !!PS uvert(nl1,1)=w*(UV(1,nl1-1,elem)-UV(1,nl1,elem))*dZ_inv(nl1)*0.5_WP - !!PS uvert(nl1,2)=w*(UV(2,nl1-1,elem)-UV(2,nl1,elem))*dZ_inv(nl1)*0.5_WP - uvert(nzmax,1)=w*(UV(1,nzmax-1,elem)-UV(1,nzmax,elem))*dZ_inv(nzmax)*0.5_WP - uvert(nzmax,2)=w*(UV(2,nzmax-1,elem)-UV(2,nzmax,elem))*dZ_inv(nzmax)*0.5_WP - - - !!PS DO nz=2, nl1-1 - DO nz=nzmin+1, nzmax-1 - ! w=sum(Wvel(nz,elnodes)+Wvel(nz+1,elnodes))/6.0_WP - ! w=min(abs(w), 0.0001)*sign(1.0_WP, w) - if (w >= 0.0_WP) then - uvert(nz,1)=w*(UV(1,nz,elem)-UV(1,nz+1,elem))*dZ_inv(nz+1) - uvert(nz,2)=w*(UV(2,nz,elem)-UV(2,nz+1,elem))*dZ_inv(nz+1) - else - uvert(nz,1)=w*(UV(1,nz-1,elem)-UV(1,nz,elem))*dZ_inv(nz) - uvert(nz,2)=w*(UV(2,nz-1,elem)-UV(2,nz,elem))*dZ_inv(nz) - end if - END DO - !!PS UV_rhsAB(1,1:nl1,elem) = UV_rhsAB(1,1:nl1,elem) - uvert(1:nl1,1)*elem_area(elem) - !!PS UV_rhsAB(2,1:nl1,elem) = UV_rhsAB(2,1:nl1,elem) - uvert(1:nl1,2)*elem_area(elem) - UV_rhsAB(1,nzmin:nzmax,elem) = UV_rhsAB(1,nzmin:nzmax,elem) - uvert(nzmin:nzmax,1)*elem_area(elem) - UV_rhsAB(2,nzmin:nzmax,elem) = UV_rhsAB(2,nzmin:nzmax,elem) - uvert(nzmin:nzmax,2)*elem_area(elem) - - END DO - - ! ======================= - ! Update the rhs - ! ======================= - gg=(1.5_WP+epsilon) - if(lfirst.and.(.not.r_restart)) then - gg=1.0_WP - lfirst=.false. - end if - - DO elem=1, myDim_elem2D !! P(e) elem=1, elem2D - !! elem=myList_elem2D(m) - elem_area_inv = dt/elem_area(elem) - nzmin = ulevels(elem) - nzmax = nlevels(elem) - !!PS DO nz=1,nlevels(elem)-1 - DO nz=nzmin,nzmax-1 - UV_rhs(1,nz,elem)= (UV_rhs(1,nz,elem)+UV_rhsAB(1,nz,elem)*gg) *elem_area_inv - UV_rhs(2,nz,elem)= (UV_rhs(2,nz,elem)+UV_rhsAB(2,nz,elem)*gg) *elem_area_inv - END DO - END DO - ! U_rhs contains all contributions to velocity from old time steps -end subroutine compute_vel_rhs_vinv From 32b92343b7a1b78a4185a4464395552017d51571 Mon Sep 17 00:00:00 2001 From: a270042 Date: Wed, 3 Nov 2021 17:59:05 +0100 Subject: [PATCH 495/909] remove viscosity_option=1,2,3,4 and associated parameters Div_c, Leith_c, Visc from oce_dyn.F90 --- src/MOD_DYN.F90 | 13 - src/oce_dyn.F90 | 556 +++++------------------------------------ src/oce_modules.F90 | 2 +- src/oce_setup_step.F90 | 6 +- 4 files changed, 62 insertions(+), 515 deletions(-) diff --git a/src/MOD_DYN.F90 b/src/MOD_DYN.F90 index a17fd0651..18ab6f46b 100644 --- a/src/MOD_DYN.F90 +++ b/src/MOD_DYN.F90 @@ -43,10 +43,6 @@ MODULE MOD_DYN ! visc_option=... - ! 1=Harmonic Leith parameterization; - ! 2=Laplacian+Leith+biharmonic background - ! 3=Biharmonic Leith parameterization - ! 4=Biharmonic flow aware ! 5=Kinematic (easy) Backscatter ! 6=Biharmonic flow aware (viscosity depends on velocity Laplacian) ! 7=Biharmonic flow aware (viscosity depends on velocity differences) @@ -61,11 +57,6 @@ MODULE MOD_DYN real(kind=WP) :: gamma1_visc = 0.1 real(kind=WP) :: gamma2_visc = 0.285 - ! div_c the strength of the modified Leith viscosity, nondimensional, 0.3 -- 1.0 - ! leith the strength of the Leith viscosity - real(kind=WP) :: div_c_visc = 0.5 - real(kind=WP) :: leith_c_visc = 0.05 - ! coefficient for returned sub-gridscale energy, to be used with visc_option=5 ! (easy backscatter) real(kind=WP) :: easy_bs_return= 1.5 @@ -125,8 +116,6 @@ subroutine WRITE_T_DYN(dynamics, unit, iostat, iomsg) write(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma0_visc write(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma1_visc write(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma2_visc - write(unit, iostat=iostat, iomsg=iomsg) dynamics%div_c_visc - write(unit, iostat=iostat, iomsg=iomsg) dynamics%leith_c_visc !___________________________________________________________________________ write(unit, iostat=iostat, iomsg=iomsg) dynamics%use_ivertvisc @@ -168,8 +157,6 @@ subroutine READ_T_DYN(dynamics, unit, iostat, iomsg) read(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma0_visc read(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma1_visc read(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma2_visc - read(unit, iostat=iostat, iomsg=iomsg) dynamics%div_c_visc - read(unit, iostat=iostat, iomsg=iomsg) dynamics%leith_c_visc !___________________________________________________________________________ read(unit, iostat=iostat, iomsg=iomsg) dynamics%use_ivertvisc diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index a02c03d4a..b045b28b2 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -11,63 +11,7 @@ ! (5) visc_filt_bcksct, (6) visc_filt_bilapl, (7) visc_filt_bidiff ! 4. Div_c =1. should be default ! 5. Leith_c=? (need to be adjusted) -module h_viscosity_leith_interface - interface - subroutine h_viscosity_leith(dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_DYN - type(t_dyn), intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - - end subroutine - end interface -end module -module visc_filt_harmon_interface - interface - subroutine visc_filt_harmon(dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_DYN - type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - - end subroutine - end interface -end module -module visc_filt_hbhmix_interface - interface - subroutine visc_filt_hbhmix(dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_DYN - type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - - end subroutine - end interface -end module -module visc_filt_biharm_interface - interface - subroutine visc_filt_biharm(option, dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_DYN - integer :: option - type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - - end subroutine - end interface -end module + module visc_filt_bcksct_interface interface subroutine visc_filt_bcksct(dynamics, partit, mesh) @@ -168,10 +112,10 @@ subroutine relative_vorticity(dynamics, partit, mesh) end interface end module -! =================================================================== +! ! Contains routines needed for computations of dynamics. ! includes: update_vel, compute_vel_nodes -! =================================================================== +!_______________________________________________________________________________ SUBROUTINE update_vel(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT @@ -215,7 +159,9 @@ SUBROUTINE update_vel(dynamics, partit, mesh) eta_n=eta_n+d_eta call exchange_elem(UV, partit) end subroutine update_vel -!========================================================================== +! +! +!_______________________________________________________________________________ subroutine compute_vel_nodes(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT @@ -262,439 +208,53 @@ subroutine compute_vel_nodes(dynamics, partit, mesh) END DO call exchange_nod(UVnode, partit) end subroutine compute_vel_nodes -!=========================================================================== +! +! +!_______________________________________________________________________________ subroutine viscosity_filter(option, dynamics, partit, mesh) -use o_PARAM -use MOD_MESH -USE MOD_PARTIT -USE MOD_PARSUP -use MOD_DYN -use h_viscosity_leith_interface -use visc_filt_harmon_interface -use visc_filt_hbhmix_interface -use visc_filt_biharm_interface -use visc_filt_bcksct_interface -use visc_filt_bilapl_interface -use visc_filt_bidiff_interface -use visc_filt_dbcksc_interface -use backscatter_coef_interface -IMPLICIT NONE -integer :: option -type(t_dyn) , intent(inout), target :: dynamics -type(t_mesh) , intent(in) , target :: mesh -type(t_partit), intent(inout), target :: partit - -! Driving routine -! Background viscosity is selected in terms of Vl, where V is -! background velocity scale and l is the resolution. V is 0.005 -! or 0.01, perhaps it would be better to pass it as a parameter. - -! h_viscosity_leiht needs vorticity, so vorticity array should be -! allocated. At present, there are two rounds of smoothing in -! h_viscosity. - -SELECT CASE (option) -CASE (1) - ! ==== - ! Harmonic Leith parameterization - ! ==== - call h_viscosity_leith(dynamics, partit, mesh) - call visc_filt_harmon(dynamics, partit, mesh) -CASE (2) - ! === - ! Laplacian+Leith+biharmonic background - ! === - call h_viscosity_leith(dynamics, partit, mesh) - call visc_filt_hbhmix(dynamics, partit, mesh) -CASE (3) - ! === - ! Biharmonic Leith parameterization - ! === - call h_viscosity_leith(dynamics, partit, mesh) - call visc_filt_biharm(2, dynamics, partit, mesh) -CASE (4) - ! === - ! Biharmonic+upwind-type - ! === - call visc_filt_biharm(1, dynamics, partit, mesh) -CASE (5) - call visc_filt_bcksct(dynamics, partit, mesh) -CASE (6) - call visc_filt_bilapl(dynamics, partit, mesh) -CASE (7) - call visc_filt_bidiff(dynamics, partit, mesh) -CASE (8) - call backscatter_coef(dynamics, partit, mesh) - call visc_filt_dbcksc(dynamics, partit, mesh) -CASE DEFAULT - if (partit%mype==0) write(*,*) 'mixing scheme with option ' , option, 'has not yet been implemented' - call par_ex(partit%MPI_COMM_FESOM, partit%mype) - stop -END SELECT -end subroutine viscosity_filter -! =================================================================== -SUBROUTINE visc_filt_harmon(dynamics, partit, mesh) -USE MOD_MESH -USE MOD_PARTIT -USE MOD_PARSUP -USE MOD_DYN -USE o_ARRAYS, only: Visc -USE o_PARAM -USE g_CONFIG -IMPLICIT NONE - -real(kind=WP) :: u1, v1, le(2), len, vi -integer :: nz, ed, el(2) , nzmin,nzmax -type(t_dyn) , intent(inout), target :: dynamics -type(t_mesh) , intent(in), target :: mesh -type(t_partit), intent(inout), target :: partit -real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" -UV => dynamics%uv(:,:,:) -UV_rhs => dynamics%uv_rhs(:,:,:) - - ! An analog of harmonic viscosity operator. - ! It adds to the rhs(0) Visc*(u1+u2+u3-3*u0)/area - ! on triangles, which is Visc*Laplacian/4 on equilateral triangles. - ! The contribution from boundary edges is neglected (free slip). - DO ed=1, myDim_edge2D+eDim_edge2D - if(myList_edge2D(ed)>edge2D_in) cycle - el=edge_tri(:,ed) - len=sqrt(sum(elem_area(el(1:2)))) - nzmax = minval(nlevels(el)) - nzmin = maxval(ulevels(el)) - !!PS DO nz=1,minval(nlevels(el))-1 - DO nz=nzmin,nzmax-1 - vi=0.5_WP*(Visc(nz,el(1))+Visc(nz,el(2))) - vi=max(vi, gamma0*len)*dt ! limited from below by backgroung - u1=(UV(1,nz,el(1))-UV(1,nz,el(2)))*vi - v1=(UV(2,nz,el(1))-UV(2,nz,el(2)))*vi - - UV_rhs(1,nz,el(1))=UV_rhs(1,nz,el(1))-u1/elem_area(el(1)) - UV_rhs(1,nz,el(2))=UV_rhs(1,nz,el(2))+u1/elem_area(el(2)) - UV_rhs(2,nz,el(1))=UV_rhs(2,nz,el(1))-v1/elem_area(el(1)) - UV_rhs(2,nz,el(2))=UV_rhs(2,nz,el(2))+v1/elem_area(el(2)) - END DO - END DO -end subroutine visc_filt_harmon -! =================================================================== -SUBROUTINE visc_filt_biharm(option, dynamics, partit, mesh) - USE MOD_MESH + use o_PARAM + use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use MOD_DYN - USE o_ARRAYS, only: Visc - USE o_PARAM - USE g_CONFIG - use g_comm_auto - IMPLICIT NONE - ! An energy conserving version - ! Also, we use the Leith viscosity - ! - real(kind=WP) :: u1, v1, vi, len - integer :: ed, el(2), nz, option, nzmin, nzmax - real(kind=WP), allocatable :: U_c(:,:), V_c(:,:) + use visc_filt_bcksct_interface + use visc_filt_bilapl_interface + use visc_filt_bidiff_interface + use visc_filt_dbcksc_interface + use backscatter_coef_interface + IMPLICIT NONE + integer :: option type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh - real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) - UV_rhs => dynamics%uv_rhs(:,:,:) - - ! Filter is applied twice. - ed=myDim_elem2D+eDim_elem2D - allocate(U_c(nl-1,ed), V_c(nl-1, ed)) - U_c=0.0_WP - V_c=0.0_WP - DO ed=1, myDim_edge2D+eDim_edge2D - if(myList_edge2D(ed)>edge2D_in) cycle - el=edge_tri(:,ed) - nzmax = minval(nlevels(el)) - nzmin = maxval(ulevels(el)) - !!PS DO nz=1,minval(nlevels(el))-1 - DO nz=nzmin,nzmax-1 - u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) - v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) - U_c(nz,el(1))=U_c(nz,el(1))-u1 - U_c(nz,el(2))=U_c(nz,el(2))+u1 - V_c(nz,el(1))=V_c(nz,el(1))-v1 - V_c(nz,el(2))=V_c(nz,el(2))+v1 - END DO - END DO - - if(option==1) then - Do ed=1,myDim_elem2D - len=sqrt(elem_area(ed)) - nzmin = ulevels(ed) - nzmax = nlevels(ed) - !!PS Do nz=1,nlevels(ed)-1 - Do nz=nzmin,nzmax-1 - ! vi has the sense of harmonic viscosity coefficient because of - ! the division by area in the end - ! ==== - ! Case 1 -- an analog to the third-order upwind (vi=gamma1 * |u| * l) - ! ==== - vi=max(gamma0, gamma1*sqrt(UV(1,nz,ed)**2+UV(2,nz,ed)**2))*len*dt - U_c(nz,ed)=-U_c(nz,ed)*vi - V_c(nz,ed)=-V_c(nz,ed)*vi - END DO - end do - end if - - if(option==2) then - Do ed=1,myDim_elem2D - len=sqrt(elem_area(ed)) - nzmin = ulevels(ed) - nzmax = nlevels(ed) - !!PS Do nz=1,nlevels(ed)-1 - Do nz=nzmin,nzmax-1 - ! vi has the sense of harmonic viscosity coefficient because of - ! the division by area in the end - ! === - ! Case 2 -- Leith +background (do not forget to call h_viscosity_leith before using this option) - ! === - vi=max(Visc(nz,ed), gamma0*len)*dt ! limited from below by backgroung - ! - U_c(nz,ed)=-U_c(nz,ed)*vi - V_c(nz,ed)=-V_c(nz,ed)*vi - END DO - end do - end if - - call exchange_elem(U_c, partit) - call exchange_elem(V_c, partit) - DO ed=1, myDim_edge2D+eDim_edge2D - ! check if its a boudnary edge - if(myList_edge2D(ed)>edge2D_in) cycle - el=edge_tri(:,ed) - nzmin = maxval(ulevels(el)) - nzmax = minval(nlevels(el)) - !!PS DO nz=1,minval(nlevels(el))-1 - DO nz=nzmin,nzmax-1 - u1=(U_c(nz,el(1))-U_c(nz,el(2))) - v1=(V_c(nz,el(1))-V_c(nz,el(2))) - UV_rhs(1,nz,el(1))=UV_rhs(1,nz,el(1))-u1/elem_area(el(1)) - UV_rhs(1,nz,el(2))=UV_rhs(1,nz,el(2))+u1/elem_area(el(2)) - UV_rhs(2,nz,el(1))=UV_rhs(2,nz,el(1))-v1/elem_area(el(1)) - UV_rhs(2,nz,el(2))=UV_rhs(2,nz,el(2))+v1/elem_area(el(2)) - END DO - END DO - - deallocate(V_c,U_c) - -end subroutine visc_filt_biharm -! =================================================================== -SUBROUTINE visc_filt_hbhmix(dynamics, partit, mesh) - USE MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - use MOD_DYN - USE o_ARRAYS, only: Visc - USE o_PARAM - USE g_CONFIG - use g_comm_auto - IMPLICIT NONE - - ! An energy and momentum conserving version. - ! We use the harmonic Leith viscosity + biharmonic background viscosity - ! - - real(kind=WP) :: u1, v1, vi, len, crosslen, le(2) - integer :: ed, el(2), nz, nzmin, nzmax - real(kind=WP), allocatable :: U_c(:,:), V_c(:,:) - type(t_dyn), intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) - UV_rhs => dynamics%uv_rhs(:,:,:) - ! Filter is applied twice. - ed=myDim_elem2D+eDim_elem2D - allocate(U_c(nl-1,ed), V_c(nl-1, ed)) - U_c=0.0_WP - V_c=0.0_WP - DO ed=1, myDim_edge2D+eDim_edge2D - ! check if its a boudnary edge - if(myList_edge2D(ed)>edge2D_in) cycle - el=edge_tri(:,ed) - nzmin = maxval(ulevels(el)) - nzmax = minval(nlevels(el)) - !!PS DO nz=1,minval(nlevels(el))-1 - DO nz=nzmin,nzmax-1 - vi=dt*0.5_WP*(Visc(nz,el(1))+Visc(nz,el(2))) - ! backgroung is added later (biharmonically) - u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) - v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) - U_c(nz,el(1))=U_c(nz,el(1))-u1 - U_c(nz,el(2))=U_c(nz,el(2))+u1 - V_c(nz,el(1))=V_c(nz,el(1))-v1 - V_c(nz,el(2))=V_c(nz,el(2))+v1 - u1=u1*vi - v1=v1*vi - UV_rhs(1,nz,el(1))=UV_rhs(1,nz,el(1))-u1/elem_area(el(1)) - UV_rhs(1,nz,el(2))=UV_rhs(1,nz,el(2))+u1/elem_area(el(2)) - UV_rhs(2,nz,el(1))=UV_rhs(2,nz,el(1))-v1/elem_area(el(1)) - UV_rhs(2,nz,el(2))=UV_rhs(2,nz,el(2))+v1/elem_area(el(2)) - END DO - END DO - - Do ed=1,myDim_elem2D - len=sqrt(elem_area(ed)) - nzmin = ulevels(ed) - nzmax = nlevels(ed) - !!PS Do nz=1,nlevels(ed)-1 - Do nz=nzmin,nzmax-1 - vi=dt*gamma0*len ! add biharmonic backgroung - U_c(nz,ed)=-U_c(nz,ed)*vi - V_c(nz,ed)=-V_c(nz,ed)*vi - END DO - end do - call exchange_elem(U_c, partit) - call exchange_elem(V_c, partit) - DO ed=1, myDim_edge2D+eDim_edge2D - ! check if its a boudnary edge - if(myList_edge2D(ed)>edge2D_in) cycle - el=edge_tri(:,ed) - nzmin = maxval(ulevels(el)) - nzmax = minval(nlevels(el)) - !!PS DO nz=1,minval(nlevels(el))-1 - DO nz=nzmin,nzmax-1 - u1=(U_c(nz,el(1))-U_c(nz,el(2))) - v1=(V_c(nz,el(1))-V_c(nz,el(2))) - UV_rhs(1,nz,el(1))=UV_rhs(1,nz,el(1))-u1/elem_area(el(1)) - UV_rhs(1,nz,el(2))=UV_rhs(1,nz,el(2))+u1/elem_area(el(2)) - UV_rhs(2,nz,el(1))=UV_rhs(2,nz,el(1))-v1/elem_area(el(1)) - UV_rhs(2,nz,el(2))=UV_rhs(2,nz,el(2))+v1/elem_area(el(2)) - END DO - END DO - - deallocate(V_c,U_c) - -end subroutine visc_filt_hbhmix - -! =================================================================== -SUBROUTINE h_viscosity_leith(dynamics, partit, mesh) - ! - ! Coefficient of horizontal viscosity is a combination of the Leith (with Leith_c) and modified Leith (with Div_c) - USE MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - use MOD_DYN - USE o_ARRAYS, only: Visc, vorticity - USE o_PARAM - USE g_CONFIG - use g_comm_auto - use relative_vorticity_interface - IMPLICIT NONE - real(kind=WP) :: dz, div_elem(3), xe, ye, vi - integer :: elem, nl1, nz, elnodes(3), n, k, nt, ul1 - real(kind=WP) :: leithx, leithy - real(kind=WP), allocatable :: aux(:,:) - type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - real(kind=WP), dimension(:,:), pointer :: Wvel -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" - Wvel =>dynamics%w(:,:) - ! - if(mom_adv<4) call relative_vorticity(dynamics, partit, mesh) !!! vorticity array should be allocated - ! Fill in viscosity: - Visc = 0.0_WP - DO elem=1, myDim_elem2D !! m=1, myDim_elem2D - !! elem=myList_elem2D(m) - !_______________________________________________________________________ - ! Here can not exchange zbar_n & Z_n with zbar_3d_n & Z_3d_n because - ! they run over elements here - nl1 =nlevels(elem)-1 - ul1 =ulevels(elem) - - zbar_n=0.0_WP - ! in case of partial cells zbar_n(nzmax) is not any more at zbar(nzmax), - ! zbar_n(nzmax) is now zbar_e_bot(elem), - zbar_n(nl1+1)=zbar_e_bot(elem) - !!PS do nz=nl1,2,-1 - do nz=nl1,ul1+1,-1 - zbar_n(nz) = zbar_n(nz+1) + helem(nz,elem) - end do - !!PS zbar_n(1) = zbar_n(2) + helem(1,elem) - zbar_n(ul1) = zbar_n(ul1+1) + helem(ul1,elem) - - !_______________________________________________________________________ - elnodes=elem2D_nodes(:,elem) - !!PS do nz=1,nl1 - do nz=ul1,nl1 - dz=zbar_n(nz)-zbar_n(nz+1) - div_elem=(Wvel(nz,elnodes)-Wvel(nz+1,elnodes))/dz - xe=sum(gradient_sca(1:3,elem)*div_elem) - ye=sum(gradient_sca(4:6,elem)*div_elem) - div_elem=vorticity(nz,elnodes) - leithx=sum(gradient_sca(1:3,elem)*div_elem) - leithy=sum(gradient_sca(4:6,elem)*div_elem) - Visc(nz,elem)=min(gamma1*elem_area(elem)*sqrt((Div_c*(xe**2+ye**2) & - + Leith_c*(leithx**2+leithy**2))*elem_area(elem)), elem_area(elem)/dt) - end do !! 0.1 here comes from (2S)^{3/2}/pi^3 - do nz=nl1+1, nl-1 - Visc(nz, elem)=0.0_WP - end do - do nz=1,ul1-1 - Visc(nz, elem)=0.0_WP - end do - END DO - - allocate(aux(nl-1,myDim_nod2D+eDim_nod2D)) - aux = 0.0_WP - DO nt=1,2 - DO n=1, myDim_nod2D - nl1 = nlevels_nod2D(n) - ul1 = ulevels_nod2D(n) - !!PS DO nz=1, nlevels_nod2D(n)-1 - DO nz=ul1, nl1-1 - dz=0.0_WP - vi=0.0_WP - DO k=1, nod_in_elem2D_num(n) - elem=nod_in_elem2D(k,n) - dz=dz+elem_area(elem) - vi=vi+Visc(nz,elem)*elem_area(elem) - END DO - aux(nz,n)=vi/dz - END DO - END DO - call exchange_nod(aux, partit) - do elem=1, myDim_elem2D - elnodes=elem2D_nodes(:,elem) - nl1=nlevels(elem)-1 - ul1=ulevels(elem) - !!!PS Do nz=1, nl1 - Do nz=ul1, nl1 - Visc(nz,elem)=sum(aux(nz,elnodes))/3.0_WP - END DO - DO nz=nl1+1, nl-1 - Visc(nz,elem)=0.0_WP - END Do - DO nz=1, ul1-1 - Visc(nz,elem)=0.0_WP - END Do - end do - end do - call exchange_elem(Visc, partit) - deallocate(aux) -END subroutine h_viscosity_leith -! ======================================================================= + ! Driving routine + ! Background viscosity is selected in terms of Vl, where V is + ! background velocity scale and l is the resolution. V is 0.005 + ! or 0.01, perhaps it would be better to pass it as a parameter. + + ! h_viscosity_leiht needs vorticity, so vorticity array should be + ! allocated. At present, there are two rounds of smoothing in + ! h_viscosity. + SELECT CASE (option) + CASE (5) + call visc_filt_bcksct(dynamics, partit, mesh) + CASE (6) + call visc_filt_bilapl(dynamics, partit, mesh) + CASE (7) + call visc_filt_bidiff(dynamics, partit, mesh) + CASE (8) + call backscatter_coef(dynamics, partit, mesh) + call visc_filt_dbcksc(dynamics, partit, mesh) + CASE DEFAULT + if (partit%mype==0) write(*,*) 'mixing scheme with option ' , option, 'has not yet been implemented' + call par_ex(partit%MPI_COMM_FESOM, partit%mype) + stop + END SELECT +end subroutine viscosity_filter +! +! +!_______________________________________________________________________________ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT @@ -790,8 +350,9 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) end do deallocate(V_c,U_c,V_b,U_b) end subroutine visc_filt_bcksct - -! =================================================================== +! +! +!_______________________________________________________________________________ ! Strictly energy dissipative and momentum conserving version ! Viscosity depends on velocity Laplacian, i.e., on an analog of ! the Leith viscosity (Lapl==second derivatives) @@ -878,7 +439,9 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) deallocate(V_c,U_c) end subroutine visc_filt_bilapl -! =================================================================== +! +! +!_______________________________________________________________________________ ! Strictly energy dissipative and momentum conserving version ! Viscosity depends on velocity differences, and is introduced symmetrically ! into both stages of biharmonic operator @@ -961,10 +524,9 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) deallocate(V_c, U_c) end subroutine visc_filt_bidiff -! =================================================================== - - -! =================================================================== +! +! +!_______________________________________________________________________________ SUBROUTINE visc_filt_dbcksc(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT @@ -1126,8 +688,9 @@ SUBROUTINE visc_filt_dbcksc(dynamics, partit, mesh) deallocate(uuu) end subroutine visc_filt_dbcksc -!=========================================================================== - +! +! +!_______________________________________________________________________________ SUBROUTINE backscatter_coef(partit, mesh) USE MOD_MESH USE MOD_PARTIT @@ -1162,8 +725,9 @@ SUBROUTINE backscatter_coef(partit, mesh) call exchange_elem(v_back, partit) end subroutine backscatter_coef -!=========================================================================== - +! +! +!_______________________________________________________________________________ SUBROUTINE uke_update(dynamics, partit, mesh) USE MOD_MESH USE MOD_PARTIT diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index 5f613cb82..013495860 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -196,7 +196,7 @@ MODULE o_ARRAYS real(kind=WP), allocatable :: water_flux(:), Ssurf(:) real(kind=WP), allocatable :: virtual_salt(:), relax_salt(:) real(kind=WP), allocatable :: Tclim(:,:), Sclim(:,:) -real(kind=WP), allocatable :: Visc(:,:) +!!PS real(kind=WP), allocatable :: Visc(:,:) real(kind=WP), allocatable :: Tsurf_t(:,:), Ssurf_t(:,:) real(kind=WP), allocatable :: tau_x_t(:,:), tau_y_t(:,:) real(kind=WP), allocatable :: heat_flux_t(:,:), heat_rel_t(:,:), heat_rel(:) diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 6a0aa8e05..4a2bfbc60 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -441,8 +441,6 @@ SUBROUTINE dynamics_init(dynamics, partit, mesh) !!PS dynamics%gamma0_visc = gamma0_visc !!PS dynamics%gamma1_visc = gamma1_visc !!PS dynamics%gamma2_visc = gamma2_visc -!!PS dynamics%div_c_visc = div_c_visc -!!PS dynamics%leith_c_visc = leith_c_visc !!PS dynamics%use_ivertvisc = use_ivertvisc !!PS dynamics%momadv_opt = momadv_opt !!PS dynamics%use_freeslip = use_freeslip @@ -453,8 +451,6 @@ SUBROUTINE dynamics_init(dynamics, partit, mesh) dynamics%gamma0_visc = gamma0 dynamics%gamma1_visc = gamma1 dynamics%gamma2_visc = gamma2 - dynamics%div_c_visc = Div_c - dynamics%leith_c_visc = Leith_c dynamics%use_ivertvisc = i_vert_visc dynamics%momadv_opt = mom_adv dynamics%use_freeslip = free_slip @@ -496,7 +492,7 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) ! Velocities ! ================ !allocate(stress_diag(2, elem_size))!delete me -allocate(Visc(nl-1, elem_size)) +!!PS allocate(Visc(nl-1, elem_size)) ! ================ ! elevation and its rhs ! ================ From 1e2be43a522329bc57e15866dd497007eddf1387 Mon Sep 17 00:00:00 2001 From: a270042 Date: Thu, 4 Nov 2021 10:54:29 +0100 Subject: [PATCH 496/909] add derived type for dynamics working arrays --- src/MOD_DYN.F90 | 111 +++++++++++++++++++++++++++++++++++++--- src/oce_ale_vel_rhs.F90 | 2 +- src/oce_dyn.F90 | 31 +++++++---- src/oce_setup_step.F90 | 30 +++++------ 4 files changed, 141 insertions(+), 33 deletions(-) diff --git a/src/MOD_DYN.F90 b/src/MOD_DYN.F90 index 18ab6f46b..86b6cbfea 100644 --- a/src/MOD_DYN.F90 +++ b/src/MOD_DYN.F90 @@ -10,7 +10,7 @@ MODULE MOD_DYN ! ! !_______________________________________________________________________________ -TYPE T_solverinfo +TYPE T_SOLVERINFO integer :: ident = 1 integer :: maxiter = 2000 integer :: restart = 15 @@ -18,19 +18,37 @@ MODULE MOD_DYN integer :: lutype = 2 real(kind=WP) :: droptol = 1.e-8 real(kind=WP) :: soltol = 1e-10 !1.e-10 -END TYPE T_solverinfo - + contains + procedure WRITE_T_SOLVERINFO + procedure READ_T_SOLVERINFO + generic :: write(unformatted) => WRITE_T_SOLVERINFO + generic :: read(unformatted) => READ_T_SOLVERINFO +END TYPE T_SOLVERINFO +! +! +!_______________________________________________________________________________ +TYPE T_DYN_WORK + real(kind=WP), allocatable, dimension(:,:,:) :: uvnode_rhs + real(kind=WP), allocatable, dimension(:,:) :: u_c, v_c + real(kind=WP), allocatable, dimension(:,:) :: u_b, v_b + contains + procedure WRITE_T_DYN_WORK + procedure READ_T_DYN_WORK + generic :: write(unformatted) => WRITE_T_DYN_WORK + generic :: read(unformatted) => READ_T_DYN_WORK +END TYPE T_DYN_WORK ! ! !_______________________________________________________________________________ ! set main structure for dynamicss, contains viscosity options and parameters + ! option for momentum advection TYPE T_DYN +!___________________________________________________________________________ ! instant zonal merdional velocity & Adams-Bashfort rhs real(kind=WP), allocatable, dimension(:,:,:):: uv, uv_rhs, uv_rhsAB, fer_uv ! horizontal velocities at nodes - real(kind=WP), allocatable, dimension(:,:,:):: uvnode, uvnode_rhs + real(kind=WP), allocatable, dimension(:,:,:):: uvnode ! instant vertical vel arrays real(kind=WP), allocatable, dimension(:,:) :: w, w_e, w_i, cfl_z, fer_w @@ -38,10 +56,15 @@ MODULE MOD_DYN ! sea surface height arrays real(kind=WP), allocatable, dimension(:) :: eta_n, d_eta, ssh_rhs, ssh_rhs_old + !___________________________________________________________________________ ! summarizes solver input parameter type(t_solverinfo) :: solverinfo + !___________________________________________________________________________ + ! put dynmiacs working arrays + type(t_dyn_work) :: work + !___________________________________________________________________________ ! visc_option=... ! 5=Kinematic (easy) Backscatter ! 6=Biharmonic flow aware (viscosity depends on velocity Laplacian) @@ -83,6 +106,71 @@ MODULE MOD_DYN contains +! +! +!_______________________________________________________________________________ +! set unformatted writing and reading for T_DYN_WORK +subroutine WRITE_T_SOLVERINFO(tsolverinfo, unit, iostat, iomsg) + IMPLICIT NONE + class(T_SOLVERINFO), intent(in) :: tsolverinfo + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + !___________________________________________________________________________ + write(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%ident + write(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%maxiter + write(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%restart + write(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%fillin + write(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%lutype + write(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%droptol + write(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%soltol +end subroutine WRITE_T_SOLVERINFO + +subroutine READ_T_SOLVERINFO(tsolverinfo, unit, iostat, iomsg) + IMPLICIT NONE + class(T_SOLVERINFO), intent(inout) :: tsolverinfo + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + read(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%ident + read(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%maxiter + read(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%restart + read(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%fillin + read(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%lutype + read(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%droptol + read(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%soltol +end subroutine READ_T_SOLVERINFO + +! +! +!_______________________________________________________________________________ +! set unformatted writing and reading for T_DYN_WORK +subroutine WRITE_T_DYN_WORK(twork, unit, iostat, iomsg) + IMPLICIT NONE + class(T_DYN_WORK), intent(in) :: twork + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + call write_bin_array(twork%uvnode_rhs, unit, iostat, iomsg) + call write_bin_array(twork%u_c, unit, iostat, iomsg) + call write_bin_array(twork%v_c, unit, iostat, iomsg) + call write_bin_array(twork%u_b, unit, iostat, iomsg) + call write_bin_array(twork%v_b, unit, iostat, iomsg) +end subroutine WRITE_T_DYN_WORK + +subroutine READ_T_DYN_WORK(twork, unit, iostat, iomsg) + IMPLICIT NONE + class(T_DYN_WORK), intent(inout) :: twork + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + call read_bin_array(twork%uvnode_rhs, unit, iostat, iomsg) + call read_bin_array(twork%u_c, unit, iostat, iomsg) + call read_bin_array(twork%v_c, unit, iostat, iomsg) + call read_bin_array(twork%u_b, unit, iostat, iomsg) + call read_bin_array(twork%v_b, unit, iostat, iomsg) +end subroutine READ_T_DYN_WORK + ! ! !_______________________________________________________________________________ @@ -99,7 +187,6 @@ subroutine WRITE_T_DYN(dynamics, unit, iostat, iomsg) call write_bin_array(dynamics%uv_rhs , unit, iostat, iomsg) call write_bin_array(dynamics%uv_rhsAB , unit, iostat, iomsg) call write_bin_array(dynamics%uvnode , unit, iostat, iomsg) - call write_bin_array(dynamics%uvnode_rhs, unit, iostat, iomsg) call write_bin_array(dynamics%w , unit, iostat, iomsg) call write_bin_array(dynamics%w_e , unit, iostat, iomsg) @@ -107,10 +194,16 @@ subroutine WRITE_T_DYN(dynamics, unit, iostat, iomsg) call write_bin_array(dynamics%cfl_z , unit, iostat, iomsg) if (Fer_GM) then - call write_bin_array(dynamics%fer_w , unit, iostat, iomsg) - call write_bin_array(dynamics%fer_uv , unit, iostat, iomsg) + call write_bin_array(dynamics%fer_w , unit, iostat, iomsg) + call write_bin_array(dynamics%fer_uv, unit, iostat, iomsg) end if + !___________________________________________________________________________ + write(unit, iostat=iostat, iomsg=iomsg) dynamics%work + + !___________________________________________________________________________ + write(unit, iostat=iostat, iomsg=iomsg) dynamics%solverinfo + !___________________________________________________________________________ write(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_opt write(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma0_visc @@ -140,7 +233,6 @@ subroutine READ_T_DYN(dynamics, unit, iostat, iomsg) call read_bin_array(dynamics%uv_rhs , unit, iostat, iomsg) call read_bin_array(dynamics%uv_rhsAB , unit, iostat, iomsg) call read_bin_array(dynamics%uvnode , unit, iostat, iomsg) - call read_bin_array(dynamics%uvnode_rhs, unit, iostat, iomsg) call read_bin_array(dynamics%w , unit, iostat, iomsg) call read_bin_array(dynamics%w_e , unit, iostat, iomsg) @@ -152,6 +244,9 @@ subroutine READ_T_DYN(dynamics, unit, iostat, iomsg) call read_bin_array(dynamics%fer_uv , unit, iostat, iomsg) end if + !___________________________________________________________________________ + read(unit, iostat=iostat, iomsg=iomsg) dynamics%work + !___________________________________________________________________________ read(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_opt read(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma0_visc diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index 71e0487c2..bee2c0b7d 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -213,7 +213,7 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) #include "associate_mesh_ass.h" UV =>dynamics%uv(:,:,:) UV_rhsAB =>dynamics%uv_rhsAB(:,:,:) - UVnode_rhs=>dynamics%uvnode_rhs(:,:,:) + UVnode_rhs=>dynamics%work%uvnode_rhs(:,:,:) Wvel_e =>dynamics%w_e(:,:) !___________________________________________________________________________ diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index b045b28b2..036aaefea 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -11,7 +11,6 @@ ! (5) visc_filt_bcksct, (6) visc_filt_bilapl, (7) visc_filt_bidiff ! 4. Div_c =1. should be default ! 5. Leith_c=? (need to be adjusted) - module visc_filt_bcksct_interface interface subroutine visc_filt_bcksct(dynamics, partit, mesh) @@ -267,17 +266,21 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) real(kind=8) :: u1, v1, len, vi integer :: nz, ed, el(2), nelem(3),k, elem, nzmin, nzmax - real(kind=8), allocatable :: U_b(:,:), V_b(:,:), U_c(:,:), V_c(:,:) +!!PS real(kind=8), allocatable :: U_c(:,:), V_c(:,:) + real(kind=8), allocatable :: U_b(:,:), V_b(:,:) type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs + real(kind=WP), dimension(:,:) , pointer :: U_c, V_c #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) + UV => dynamics%uv(:,:,:) UV_rhs => dynamics%uv_rhs(:,:,:) + U_c => dynamics%work%u_c(:,:) + V_c => dynamics%work%v_c(:,:) ! An analog of harmonic viscosity operator. ! Same as visc_filt_h, but with the backscatter. @@ -370,19 +373,22 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) IMPLICIT NONE real(kind=8) :: u1, v1, vi, len integer :: ed, el(2), nz, nzmin, nzmax - real(kind=8), allocatable :: U_c(:,:), V_c(:,:) +!!PS real(kind=8), allocatable :: U_c(:,:), V_c(:,:) type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs + real(kind=WP), dimension(:,:) , pointer :: U_c, V_c #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV => dynamics%uv(:,:,:) UV_rhs => dynamics%uv_rhs(:,:,:) + U_c => dynamics%work%u_c(:,:) + V_c => dynamics%work%v_c(:,:) ed=myDim_elem2D+eDim_elem2D allocate(U_c(nl-1,ed), V_c(nl-1, ed)) @@ -459,18 +465,21 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) IMPLICIT NONE real(kind=8) :: u1, v1, vi, len integer :: ed, el(2), nz, nzmin, nzmax - real(kind=8), allocatable :: U_c(:,:), V_c(:,:) +!!PS real(kind=8), allocatable :: U_c(:,:), V_c(:,:) type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs + real(kind=WP), dimension(:,:) , pointer :: U_c, V_c #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV => dynamics%uv(:,:,:) UV_rhs => dynamics%uv_rhs(:,:,:) + U_c => dynamics%work%u_c(:,:) + V_c => dynamics%work%v_c(:,:) ! ed=myDim_elem2D+eDim_elem2D allocate(U_c(nl-1,ed), V_c(nl-1, ed)) @@ -543,18 +552,22 @@ SUBROUTINE visc_filt_dbcksc(dynamics, partit, mesh) real(kind=8) :: u1, v1, le(2), len, crosslen, vi, uke1 integer :: nz, ed, el(2) -real(kind=8), allocatable :: U_c(:,:), V_c(:,:), UV_back(:,:,:), UV_dis(:,:,:), uke_d(:,:) -real(kind=8), allocatable :: uuu(:) +!!PS real(kind=8), allocatable :: U_c(:,:), V_c(:,:) +real(kind=8) , allocatable :: UV_back(:,:,:), UV_dis(:,:,:), uke_d(:,:) +real(kind=8) , allocatable :: uuu(:) type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh -real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs +real(kind=WP) , dimension(:,:,:), pointer :: UV, UV_rhs +real(kind=WP) , dimension(:,:) , pointer :: U_c, V_c #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" -UV => dynamics%uv(:,:,:) +UV => dynamics%uv(:,:,:) UV_rhs => dynamics%uv_rhs(:,:,:) +U_c => dynamics%work%u_c(:,:) +V_c => dynamics%work%v_c(:,:) ! An analog of harmonic viscosity operator. ! It adds to the rhs(0) Visc*(u1+u2+u3-3*u0)/area diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 4a2bfbc60..cfe11d459 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -403,35 +403,35 @@ SUBROUTINE dynamics_init(dynamics, partit, mesh) allocate(dynamics%uv_rhs( 2, nl-1, elem_size)) allocate(dynamics%uv_rhsAB( 2, nl-1, elem_size)) allocate(dynamics%uvnode( 2, nl-1, node_size)) - allocate(dynamics%uvnode_rhs(2, nl-1, node_size)) - dynamics%uv = 0.0_WP - dynamics%uv_rhs = 0.0_WP - dynamics%uv_rhsAB = 0.0_WP - dynamics%uvnode = 0.0_WP - dynamics%uvnode_rhs = 0.0_WP + allocate(dynamics%work%uvnode_rhs(2, nl-1, node_size)) + dynamics%uv = 0.0_WP + dynamics%uv_rhs = 0.0_WP + dynamics%uv_rhsAB = 0.0_WP + dynamics%uvnode = 0.0_WP + dynamics%work%uvnode_rhs = 0.0_WP allocate(dynamics%w( nl, node_size)) allocate(dynamics%w_e( nl, node_size)) allocate(dynamics%w_i( nl, node_size)) allocate(dynamics%cfl_z( nl, node_size)) - dynamics%w = 0.0_WP - dynamics%w_e = 0.0_WP - dynamics%w_i = 0.0_WP - dynamics%cfl_z = 0.0_WP + dynamics%w = 0.0_WP + dynamics%w_e = 0.0_WP + dynamics%w_i = 0.0_WP + dynamics%cfl_z = 0.0_WP allocate(dynamics%eta_n( node_size)) allocate(dynamics%d_eta( node_size)) allocate(dynamics%ssh_rhs( node_size)) !!PS allocate(dynamics%ssh_rhs_old(node_size)) - dynamics%eta_n = 0.0_WP - dynamics%d_eta = 0.0_WP - dynamics%ssh_rhs = 0.0_WP + dynamics%eta_n = 0.0_WP + dynamics%d_eta = 0.0_WP + dynamics%ssh_rhs = 0.0_WP if (Fer_GM) then allocate(dynamics%fer_uv(2, nl-1, elem_size)) allocate(dynamics%fer_w( nl, node_size)) - dynamics%fer_uv = 0.0_WP - dynamics%fer_w = 0.0_WP + dynamics%fer_uv = 0.0_WP + dynamics%fer_w = 0.0_WP end if !!PS dynamics%ssh_rhs_old= 0.0_WP From c84303631c06482d09e5a3145d8895041cacc2fb Mon Sep 17 00:00:00 2001 From: a270042 Date: Fri, 5 Nov 2021 16:56:06 +0100 Subject: [PATCH 497/909] exchange arrays U_b, V_b against derived type for dynamics working arrays dynamics%work%u_b ... --- src/MOD_DYN.F90 | 2 + src/oce_dyn.F90 | 11 ++---- src/oce_setup_step.F90 | 88 ++++++++++++++++++++++++++---------------- 3 files changed, 61 insertions(+), 40 deletions(-) diff --git a/src/MOD_DYN.F90 b/src/MOD_DYN.F90 index 86b6cbfea..a3b570afc 100644 --- a/src/MOD_DYN.F90 +++ b/src/MOD_DYN.F90 @@ -30,6 +30,8 @@ MODULE MOD_DYN TYPE T_DYN_WORK real(kind=WP), allocatable, dimension(:,:,:) :: uvnode_rhs real(kind=WP), allocatable, dimension(:,:) :: u_c, v_c + + ! easy backscatter contribution real(kind=WP), allocatable, dimension(:,:) :: u_b, v_b contains procedure WRITE_T_DYN_WORK diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 036aaefea..f2834253d 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -266,21 +266,21 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) real(kind=8) :: u1, v1, len, vi integer :: nz, ed, el(2), nelem(3),k, elem, nzmin, nzmax -!!PS real(kind=8), allocatable :: U_c(:,:), V_c(:,:) - real(kind=8), allocatable :: U_b(:,:), V_b(:,:) type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs - real(kind=WP), dimension(:,:) , pointer :: U_c, V_c + real(kind=WP), dimension(:,:) , pointer :: U_c, V_c, U_b, V_b #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) + UV => dynamics%uv( :,:,:) UV_rhs => dynamics%uv_rhs(:,:,:) U_c => dynamics%work%u_c(:,:) V_c => dynamics%work%v_c(:,:) + U_b => dynamics%work%u_b(:,:) + V_b => dynamics%work%v_b(:,:) ! An analog of harmonic viscosity operator. ! Same as visc_filt_h, but with the backscatter. @@ -351,7 +351,6 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) UV_rhs(2,nz,ed)=UV_rhs(2,nz,ed)+V_b(nz,ed) -easy_bs_return*sum(V_c(nz,nelem))/3.0_WP END DO end do - deallocate(V_c,U_c,V_b,U_b) end subroutine visc_filt_bcksct ! ! @@ -373,7 +372,6 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) IMPLICIT NONE real(kind=8) :: u1, v1, vi, len integer :: ed, el(2), nz, nzmin, nzmax -!!PS real(kind=8), allocatable :: U_c(:,:), V_c(:,:) type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit @@ -465,7 +463,6 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) IMPLICIT NONE real(kind=8) :: u1, v1, vi, len integer :: ed, el(2), nz, nzmin, nzmax -!!PS real(kind=8), allocatable :: U_c(:,:), V_c(:,:) type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index cfe11d459..7d142e043 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -394,22 +394,50 @@ SUBROUTINE dynamics_init(dynamics, partit, mesh) !!PS read(nm_unit, nml=dynamics_general, iostat=iost) !!PS close(nm_unit) + !___________________________________________________________________________ + ! set parameters in derived type +!!PS dynamics%visc_opt = visc_opt +!!PS dynamics%gamma0_visc = gamma0_visc +!!PS dynamics%gamma1_visc = gamma1_visc +!!PS dynamics%gamma2_visc = gamma2_visc +!!PS dynamics%use_ivertvisc = use_ivertvisc +!!PS dynamics%momadv_opt = momadv_opt +!!PS dynamics%use_freeslip = use_freeslip +!!PS dynamics%use_wsplit = use_wsplit +!!PS dynamics%wsplit_maxcfl = wsplit_maxcfl + + dynamics%visc_opt = visc_option + dynamics%gamma0_visc = gamma0 + dynamics%gamma1_visc = gamma1 + dynamics%gamma2_visc = gamma2 + dynamics%use_ivertvisc = i_vert_visc + dynamics%momadv_opt = mom_adv + dynamics%use_freeslip = free_slip + dynamics%use_wsplit = w_split + dynamics%wsplit_maxcfl = w_max_cfl + + !___________________________________________________________________________ ! define local vertice & elem array size elem_size=myDim_elem2D+eDim_elem2D node_size=myDim_nod2D+eDim_nod2D - - ! allocate data arrays in derived type + + !___________________________________________________________________________ + ! allocate/initialise horizontal velocity arrays in derived type allocate(dynamics%uv( 2, nl-1, elem_size)) allocate(dynamics%uv_rhs( 2, nl-1, elem_size)) allocate(dynamics%uv_rhsAB( 2, nl-1, elem_size)) allocate(dynamics%uvnode( 2, nl-1, node_size)) - allocate(dynamics%work%uvnode_rhs(2, nl-1, node_size)) dynamics%uv = 0.0_WP dynamics%uv_rhs = 0.0_WP dynamics%uv_rhsAB = 0.0_WP dynamics%uvnode = 0.0_WP - dynamics%work%uvnode_rhs = 0.0_WP + if (Fer_GM) then + allocate(dynamics%fer_uv(2, nl-1, elem_size)) + dynamics%fer_uv = 0.0_WP + end if + !___________________________________________________________________________ + ! allocate/initialise vertical velocity arrays in derived type allocate(dynamics%w( nl, node_size)) allocate(dynamics%w_e( nl, node_size)) allocate(dynamics%w_i( nl, node_size)) @@ -418,44 +446,38 @@ SUBROUTINE dynamics_init(dynamics, partit, mesh) dynamics%w_e = 0.0_WP dynamics%w_i = 0.0_WP dynamics%cfl_z = 0.0_WP + if (Fer_GM) then + allocate(dynamics%fer_w( nl, node_size)) + dynamics%fer_w = 0.0_WP + end if + !___________________________________________________________________________ + ! allocate/initialise ssh arrays in derived type allocate(dynamics%eta_n( node_size)) allocate(dynamics%d_eta( node_size)) allocate(dynamics%ssh_rhs( node_size)) - !!PS allocate(dynamics%ssh_rhs_old(node_size)) dynamics%eta_n = 0.0_WP dynamics%d_eta = 0.0_WP dynamics%ssh_rhs = 0.0_WP - - if (Fer_GM) then - allocate(dynamics%fer_uv(2, nl-1, elem_size)) - allocate(dynamics%fer_w( nl, node_size)) - dynamics%fer_uv = 0.0_WP - dynamics%fer_w = 0.0_WP + !!PS allocate(dynamics%ssh_rhs_old(node_size)) + !!PS dynamics%ssh_rhs_old= 0.0_WP + + !___________________________________________________________________________ + ! inititalise working arrays + allocate(dynamics%work%uvnode_rhs(2, nl-1, node_size)) + allocate(dynamics%work%u_c(nl-1, elem_size)) + allocate(dynamics%work%v_c(nl-1, elem_size)) + dynamics%work%uvnode_rhs = 0.0_WP + dynamics%work%u_c = 0.0_WP + dynamics%work%v_c = 0.0_WP + if (dynamics%visc_opt==5) then + allocate(dynamics%work%u_b(nl-1, elem_size)) + allocate(dynamics%work%v_b(nl-1, elem_size)) + dynamics%work%u_b = 0.0_WP + dynamics%work%v_b = 0.0_WP end if - -!!PS dynamics%ssh_rhs_old= 0.0_WP - - ! set parameters in derived type -!!PS dynamics%visc_opt = visc_opt -!!PS dynamics%gamma0_visc = gamma0_visc -!!PS dynamics%gamma1_visc = gamma1_visc -!!PS dynamics%gamma2_visc = gamma2_visc -!!PS dynamics%use_ivertvisc = use_ivertvisc -!!PS dynamics%momadv_opt = momadv_opt -!!PS dynamics%use_freeslip = use_freeslip -!!PS dynamics%use_wsplit = use_wsplit -!!PS dynamics%wsplit_maxcfl = wsplit_maxcfl - dynamics%visc_opt = visc_option - dynamics%gamma0_visc = gamma0 - dynamics%gamma1_visc = gamma1 - dynamics%gamma2_visc = gamma2 - dynamics%use_ivertvisc = i_vert_visc - dynamics%momadv_opt = mom_adv - dynamics%use_freeslip = free_slip - dynamics%use_wsplit = w_split - dynamics%wsplit_maxcfl = w_max_cfl + END SUBROUTINE dynamics_init ! ! From 5df0ebd7f6f9cf7f8651ed224adddb59f45996ea Mon Sep 17 00:00:00 2001 From: a270042 Date: Sat, 6 Nov 2021 23:14:22 +0100 Subject: [PATCH 498/909] outsource stochastic backscatter into an own module src/gen_modules_backscatter.F90, where backscatter varaibles are declared and initialse by subroutine init_backscatter --- src/gen_modules_backscatter.F90 | 411 +++++++++++++++ src/oce_dyn.F90 | 856 ++++++++++++++++---------------- src/oce_setup_step.F90 | 48 +- 3 files changed, 869 insertions(+), 446 deletions(-) create mode 100644 src/gen_modules_backscatter.F90 diff --git a/src/gen_modules_backscatter.F90 b/src/gen_modules_backscatter.F90 new file mode 100644 index 000000000..f602c39c0 --- /dev/null +++ b/src/gen_modules_backscatter.F90 @@ -0,0 +1,411 @@ +module g_backscatter + + !___________________________________________________________________________ + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + + !___________________________________________________________________________ + USE o_ARRAYS, only: bvfreq, coriolis_node + + !___________________________________________________________________________ + USE o_param + USE g_CONFIG + USE g_comm_auto + USE g_support + USE g_rotate_grid + IMPLICIT NONE + + !___________________________________________________________________________ + ! allocate backscatter arrays + real(kind=WP), allocatable, dimension(:,:) :: v_back + real(kind=WP), allocatable, dimension(:,:) :: uke, uke_back, uke_dis, uke_dif + real(kind=WP), allocatable, dimension(:,:) :: uke_rhs, uke_rhs_old + real(kind=WP), allocatable, dimension(:,:) :: UV_dis_posdef_b2, UV_dis_posdef, UV_back_posdef + real(kind=WP), allocatable, dimension(:,:,:):: UV_back, UV_dis + real(kind=WP), allocatable, dimension(:,:,:):: UV_dis_tend, UV_total_tend, UV_back_tend + + contains + ! + ! + !___________________________________________________________________________ + ! allocate/initialise backscatter arrays + subroutine init_backscatter(partit, mesh) + implicit none + integer :: elem_size + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + elem_size = myDim_elem2D + eDim_elem2D + allocate(v_back( nl-1, elem_size)) ! Backscatter viscosity + allocate(uke( nl-1, elem_size)) ! Unresolved kinetic energy for backscatter coefficient + allocate(uke_dis( nl-1, elem_size)) + allocate(uke_back( nl-1, elem_size)) + allocate(uke_dif( nl-1, elem_size)) + allocate(uke_rhs( nl-1, elem_size)) + allocate(uke_rhs_old( nl-1, elem_size)) + allocate(UV_dis( 2, nl-1, elem_size)) + allocate(UV_back( 2, nl-1, elem_size)) + allocate(UV_dis_tend( 2, nl-1, elem_size)) + allocate(UV_back_tend( 2, nl-1, elem_size)) + allocate(UV_total_tend(2, nl-1, elem_size)) + uke = 0.0_WP + v_back = 0.0_WP + uke_dis = 0.0_WP + uke_dif = 0.0_WP + uke_back = 0.0_WP + uke_rhs = 0.0_WP + uke_rhs_old = 0.0_WP + UV_dis = 0.0_WP + UV_dis_tend = 0.0_WP + UV_back = 0.0_WP + UV_back_tend = 0.0_WP + UV_total_tend = 0.0_WP + + end subroutine init_backscatter + + ! + ! + !_______________________________________________________________________________ + subroutine visc_filt_dbcksc(dynamics, partit, mesh) + IMPLICIT NONE + + real(kind=WP) :: u1, v1, le(2), len, crosslen, vi, uke1 + integer :: nz, ed, el(2) + real(kind=WP) , allocatable :: uke_d(:,:) + !!PS real(kind=WP) , allocatable :: UV_back(:,:,:), UV_dis(:,:,:) + real(kind=WP) , allocatable :: uuu(:) + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + real(kind=WP) , dimension(:,:,:), pointer :: UV, UV_rhs + real(kind=WP) , dimension(:,:) , pointer :: U_c, V_c +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + UV => dynamics%uv(:,:,:) + UV_rhs => dynamics%uv_rhs(:,:,:) + U_c => dynamics%work%u_c(:,:) + V_c => dynamics%work%v_c(:,:) + + ! An analog of harmonic viscosity operator. + ! It adds to the rhs(0) Visc*(u1+u2+u3-3*u0)/area + ! on triangles, which is Visc*Laplacian/4 on equilateral triangles. + ! The contribution from boundary edges is neglected (free slip). + ! Filter is applied twice. + ed=myDim_elem2D+eDim_elem2D + !!PS allocate(UV_back(2,nl-1,ed), UV_dis(2,nl-1, ed)) + allocate(uke_d(nl-1,ed)) + allocate(uuu(ed)) + UV_back= 0.0_WP + UV_dis = 0.0_WP + uke_d = 0.0_WP + U_c = 0.0_WP + V_c = 0.0_WP + + DO ed=1, myDim_edge2D+eDim_edge2D + if(myList_edge2D(ed)>edge2D_in) cycle + el=edge_tri(:,ed) + DO nz=1,minval(nlevels(el))-1 + u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) + v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) + + U_c(nz,el(1))=U_c(nz,el(1))-u1 + U_c(nz,el(2))=U_c(nz,el(2))+u1 + V_c(nz,el(1))=V_c(nz,el(1))-v1 + V_c(nz,el(2))=V_c(nz,el(2))+v1 + END DO + END DO + + Do ed=1,myDim_elem2D + len=sqrt(elem_area(ed)) + len=dt*len/30.0_WP + Do nz=1,nlevels(ed)-1 + ! vi has the sense of harmonic viscosity coefficient because of + ! the division by area in the end + ! ==== + ! Case 1 -- an analog to the third-order upwind (vi=|u|l/12) + ! ==== + vi=max(0.2_WP,sqrt(UV(1,nz,ed)**2+UV(2,nz,ed)**2))*len + U_c(nz,ed)=-U_c(nz,ed)*vi + V_c(nz,ed)=-V_c(nz,ed)*vi + END DO + end do + call exchange_elem(U_c, partit) + call exchange_elem(V_c, partit) + + DO ed=1, myDim_edge2D+eDim_edge2D + if(myList_edge2D(ed)>edge2D_in) cycle + el=edge_tri(:,ed) + le=edge_dxdy(:,ed) + le(1)=le(1)*sum(elem_cos(el))*0.25_WP + len=sqrt(le(1)**2+le(2)**2)*r_earth + le(1)=edge_cross_dxdy(1,ed)-edge_cross_dxdy(3,ed) + le(2)=edge_cross_dxdy(2,ed)-edge_cross_dxdy(4,ed) + crosslen=sqrt(le(1)**2+le(2)**2) + + DO nz=1,minval(nlevels(el))-1 + vi=dt*len*(v_back(nz,el(1))+v_back(nz,el(2)))/crosslen + !if(mype==0) write(*,*) 'vi ', vi , ' and ed' , ed + !if(mype==0) write(*,*) 'dt*len/crosslen ', dt*len/crosslen, ' and ed' , ed + !vi=max(vi,0.005*len*dt) ! This helps to reduce noise in places where + ! Visc is small and decoupling might happen + !Backscatter contribution + u1=(UV(1,nz,el(1))-UV(1,nz,el(2)))*vi + v1=(UV(2,nz,el(1))-UV(2,nz,el(2)))*vi + + !UKE diffusion + vi=dt*len*(K_back*sqrt(elem_area(el(1))/scale_area)+K_back*sqrt(elem_area(el(2))/scale_area))/crosslen + uke1=(uke(nz,el(1))-uke(nz,el(2)))*vi + + UV_back(1,nz,el(1))=UV_back(1,nz,el(1))-u1/elem_area(el(1)) + UV_back(1,nz,el(2))=UV_back(1,nz,el(2))+u1/elem_area(el(2)) + UV_back(2,nz,el(1))=UV_back(2,nz,el(1))-v1/elem_area(el(1)) + UV_back(2,nz,el(2))=UV_back(2,nz,el(2))+v1/elem_area(el(2)) + + !Correct scaling for the diffusion? + uke_d(nz,el(1))=uke_d(nz,el(1))-uke1/elem_area(el(1)) + uke_d(nz,el(2))=uke_d(nz,el(2))+uke1/elem_area(el(2)) + + !Biharmonic contribution + u1=(U_c(nz,el(1))-U_c(nz,el(2))) + v1=(V_c(nz,el(1))-V_c(nz,el(2))) + + UV_dis(1,nz,el(1))=UV_dis(1,nz,el(1))-u1/elem_area(el(1)) + UV_dis(1,nz,el(2))=UV_dis(1,nz,el(2))+u1/elem_area(el(2)) + UV_dis(2,nz,el(1))=UV_dis(2,nz,el(1))-v1/elem_area(el(1)) + UV_dis(2,nz,el(2))=UV_dis(2,nz,el(2))+v1/elem_area(el(2)) + + END DO + END DO + call exchange_elem(UV_back, partit) + + DO nz=1, nl-1 + uuu=0.0_WP + uuu=UV_back(1,nz,:) + call smooth_elem(uuu,smooth_back_tend, partit, mesh) + UV_back(1,nz,:)=uuu + uuu=0.0_WP + uuu=UV_back(2,nz,:) + call smooth_elem(uuu,smooth_back_tend, partit, mesh) + UV_back(2,nz,:)=uuu + END DO + + DO ed=1, myDim_elem2D + DO nz=1,nlevels(ed)-1 + UV_rhs(1,nz,ed)=UV_rhs(1,nz,ed)+UV_dis(1,nz,ed)+UV_back(1,nz,ed) + UV_rhs(2,nz,ed)=UV_rhs(2,nz,ed)+UV_dis(2,nz,ed)+UV_back(2,nz,ed) + END DO + END DO + + UV_dis_tend=UV_dis!+UV_back + UV_total_tend=UV_dis+UV_back + UV_back_tend=UV_back + uke_dif=uke_d + + call uke_update(dynamics, partit, mesh) + + !!PS deallocate(UV_dis,UV_back) + deallocate(uke_d) + deallocate(uuu) + end subroutine visc_filt_dbcksc + + ! + ! + !_______________________________________________________________________________ + subroutine backscatter_coef(partit, mesh) + IMPLICIT NONE + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: elem, nz +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + !Potentially add the Rossby number scaling to the script... + !check if sign is right! Different in the Jansen paper + !Also check with the normalization by area; as before we use element length sqrt(2*elem_area(ed)) + v_back=0.0_WP + DO elem=1, myDim_elem2D + DO nz=1,nlevels(elem)-1 + !v_back(1,ed)=c_back*sqrt(2.0_WP*elem_area(ed))*sqrt(max(2.0_WP*uke(1,ed),0.0_WP))*(3600.0_WP*24.0_WP/tau_c)*4.0_WP/sqrt(2.0_WP*elem_area(ed))**2 !*sqrt(max(2.0_WP*uke(1,ed),0.0_WP)) + !v_back(nz,elem)=-c_back*sqrt(4._8/sqrt(3.0_8)*elem_area(elem))*sqrt(max(2.0_8*uke(nz,elem),0.0_8)) !Is the scaling correct + v_back(nz,elem)=min(-c_back*sqrt(elem_area(elem))*sqrt(max(2.0_8*uke(nz,elem),0.0_8)),0.2*elem_area(elem)/dt) !Is the scaling correct + !Scaling by sqrt(2*elem_area) or sqrt(elem_area)? + END DO + END DO + call exchange_elem(v_back, partit) + end subroutine backscatter_coef + ! + ! + !_______________________________________________________________________________ + subroutine uke_update(dynamics, partit, mesh) + IMPLICIT NONE + + !I had to change uke(:) to uke(:,:) to make output and restart work!! + !Why is it necessary to implement the length of the array? It doesn't work without! + !integer, intent(in) :: t_levels + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + + real(kind=WP) :: hall, h1_eta, hnz, vol + integer :: elnodes(3), nz, ed, edi, node, j, elem, q + real(kind=WP), allocatable :: uuu(:), work_array(:), U_work(:,:), V_work(:,:), rosb_array(:,:), work_uv(:) + integer :: kk, nzmax, el + real(kind=WP) :: c1, rosb, vel_u, vel_v, vel_uv, scaling, reso + real*8 :: c_min=0.5, f_min=1.e-6, r_max=200000., ex, ey, a1, a2, len_reg, dist_reg(2) ! Are those values still correct? + real(kind=WP), dimension(:,:,:), pointer :: UV +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) + + !rosb_dis=1._8 !Should be variable to control how much of the dissipated energy is backscattered + !rossby_num=2 + + ed=myDim_elem2D+eDim_elem2D + allocate(uuu(ed)) + + uke_back=0.0_WP + uke_dis=0.0_WP + DO ed=1, myDim_elem2D + DO nz=1, nlevels(ed)-1 + uke_dis(nz,ed) =(UV(1,nz,ed)*UV_dis_tend( 1,nz,ed)+UV(2,nz,ed)*UV_dis_tend( 2,nz,ed)) + uke_back(nz,ed)=(UV(1,nz,ed)*UV_back_tend(1,nz,ed)+UV(2,nz,ed)*UV_back_tend(2,nz,ed)) + END DO + END DO + + DO nz=1,nl-1 + uuu=0.0_8 + uuu=uke_back(nz,:) + call smooth_elem(uuu,smooth_back, partit, mesh) !3) ? + uke_back(nz,:)=uuu + END DO + + !Timestepping use simple backward timestepping; all components should have dt in it, unless they need it twice + !Amplitudes should be right given the correction of the viscosities; check for all, also for biharmonic + !uke(1,ed)=uke(1,ed)-uke_dis(1,ed)-uke_back(1,ed)+uke_dif(1,ed) + ed=myDim_elem2D+eDim_elem2D + allocate(U_work(nl-1,myDim_nod2D+eDim_nod2D),V_work(nl-1,myDim_nod2D+eDim_nod2D)) + allocate(work_uv(myDim_nod2D+eDim_nod2D)) + allocate(rosb_array(nl-1,ed)) + call exchange_elem(UV, partit) + rosb_array=0._WP + DO nz=1, nl-1 + work_uv=0._WP + DO node=1, myDim_nod2D + vol=0._WP + U_work(nz,node)=0._WP + V_work(nz,node)=0._WP + DO j=1, nod_in_elem2D_num(node) + elem=nod_in_elem2D(j, node) + U_work(nz,node)=U_work(nz,node)+UV(1,nz,elem)*elem_area(elem) + V_work(nz,node)=V_work(nz,node)+UV(2,nz,elem)*elem_area(elem) + vol=vol+elem_area(elem) + END DO + U_work(nz,node)=U_work(nz,node)/vol + V_work(nz,node)=U_work(nz,node)/vol + END DO + work_uv=U_work(nz,:) + call exchange_nod(work_uv, partit) + U_work(nz,:)=work_uv + work_uv=V_work(nz,:) + call exchange_nod(work_uv, partit) + V_work(nz,:)=work_uv + END DO + + DO el=1,myDim_elem2D + DO nz=1, nlevels(el)-1 + rosb_array(nz,el)=sqrt((sum(gradient_sca(1:3,el)*U_work(nz,elem2D_nodes(1:3,el)))-& + sum(gradient_sca(4:6, el)*V_work(nz,elem2D_nodes(1:3,el))))**2+& + (sum(gradient_sca(4:6, el)*U_work(nz,elem2D_nodes(1:3,el)))+& + sum(gradient_sca(1:3, el)*V_work(nz,elem2D_nodes(1:3,el))))**2) + ! hall=hall+hnz + END DO + ! rosb_array(el)=rosb_array(el)/hall + END DO + + DO ed=1, myDim_elem2D + scaling=1._WP + IF(uke_scaling) then + reso=sqrt(elem_area(ed)*4._wp/sqrt(3._wp)) + rosb=0._wp + elnodes=elem2D_nodes(:, ed) + DO kk=1,3 + c1=0._wp + nzmax=minval(nlevels(nod_in_elem2D(1:nod_in_elem2D_num(elnodes(kk)), elnodes(kk))), 1) + !Vertical average; same scaling in the vertical + DO nz=1, nzmax-1 + c1=c1+hnode_new(nz,elnodes(kk))*(sqrt(max(bvfreq(nz,elnodes(kk)), 0._WP))+sqrt(max(bvfreq(nz+1,elnodes(kk)), 0._WP)))/2. + END DO + c1=max(c_min, c1/pi) !ca. first baroclinic gravity wave speed limited from below by c_min + !Cutoff K_GM depending on (Resolution/Rossby radius) ratio + rosb=rosb+min(c1/max(abs(coriolis_node(elnodes(kk))), f_min), r_max) + END DO + rosb=rosb/3._WP + scaling=1._WP/(1._WP+(uke_scaling_factor*reso/rosb))!(4._wp*reso/rosb)) + END IF + + DO nz=1, nlevels(ed)-1 + elnodes=elem2D_nodes(:,ed) + + !Taking out that one place where it is always weird (Pacific Southern Ocean) + !Should not really be used later on, once we fix the issue with the 1/4 degree grid + if(.not. (TRIM(which_toy)=="soufflet")) then + call elem_center(ed, ex, ey) + !a1=-104.*rad + !a2=-49.*rad + call g2r(-104.*rad, -49.*rad, a1, a2) + dist_reg(1)=ex-a1 + dist_reg(2)=ey-a2 + call trim_cyclic(dist_reg(1)) + dist_reg(1)=dist_reg(1)*elem_cos(ed) + dist_reg=dist_reg*r_earth + len_reg=sqrt(dist_reg(1)**2+dist_reg(2)**2) + + !if(mype==0) write(*,*) 'len_reg ', len_reg , ' and dist_reg' , dist_reg, ' and ex, ey', ex, ey, ' and a ', a1, a2 + rosb_array(nz,ed)=rosb_array(nz,ed)/max(abs(sum(coriolis_node(elnodes(:)))), f_min) + !uke_dif(nz, ed)=scaling*(1-exp(-len_reg/300000))*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)!UV_dif(1,ed) + uke_dis(nz,ed)=scaling*(1-exp(-len_reg/300000))*1._WP/(1._WP+rosb_array(nz,ed)/rosb_dis)*uke_dis(nz,ed) + else + rosb_array(nz,ed)=rosb_array(nz,ed)/max(abs(sum(coriolis_node(elnodes(:)))), f_min) + !uke_dif(nz, ed)=scaling*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)!UV_dif(1,ed) + uke_dis(nz,ed)=scaling*1._WP/(1._WP+rosb_array(nz,ed)/rosb_dis)*uke_dis(nz,ed) + end if + END DO + END DO + + deallocate(U_work, V_work) + deallocate(rosb_array) + deallocate(work_uv) + + call exchange_elem(uke_dis, partit) + DO nz=1, nl-1 + uuu=uke_dis(nz,:) + call smooth_elem(uuu,smooth_dis, partit, mesh) + uke_dis(nz,:)=uuu + END DO + DO ed=1, myDim_elem2D + DO nz=1,nlevels(ed)-1 + uke_rhs_old(nz,ed)=uke_rhs(nz,ed) + uke_rhs(nz,ed)=-uke_dis(nz,ed)-uke_back(nz,ed)+uke_dif(nz,ed) + uke(nz,ed)=uke(nz,ed)+1.5_8*uke_rhs(nz,ed)-0.5_8*uke_rhs_old(nz,ed) + END DO + END DO + + call exchange_elem(uke, partit) + deallocate(uuu) + + end subroutine uke_update +end module g_backscatter + diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index f2834253d..cd78ac3c9 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -53,48 +53,48 @@ subroutine visc_filt_bidiff(dynamics, partit, mesh) end subroutine end interface end module -module visc_filt_dbcksc_interface - interface - subroutine visc_filt_dbcksc(dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_DYN - type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - - end subroutine - end interface -end module -module backscatter_coef_interface - interface - subroutine backscatter_coef(dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_DYN - type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - - end subroutine - end interface -end module -module uke_update_interface - interface - subroutine uke_update(dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_DYN - type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - - end subroutine - end interface -end module +!!PS module visc_filt_dbcksc_interface +!!PS interface +!!PS subroutine visc_filt_dbcksc(dynamics, partit, mesh) +!!PS use mod_mesh +!!PS USE MOD_PARTIT +!!PS USE MOD_PARSUP +!!PS USE MOD_DYN +!!PS type(t_dyn) , intent(inout), target :: dynamics +!!PS type(t_partit), intent(inout), target :: partit +!!PS type(t_mesh) , intent(in) , target :: mesh +!!PS +!!PS end subroutine +!!PS end interface +!!PS end module +!!PS module backscatter_coef_interface +!!PS interface +!!PS subroutine backscatter_coef(dynamics, partit, mesh) +!!PS use mod_mesh +!!PS USE MOD_PARTIT +!!PS USE MOD_PARSUP +!!PS USE MOD_DYN +!!PS type(t_dyn) , intent(inout), target :: dynamics +!!PS type(t_partit), intent(inout), target :: partit +!!PS type(t_mesh) , intent(in) , target :: mesh +!!PS +!!PS end subroutine +!!PS end interface +!!PS end module +!!PS module uke_update_interface +!!PS interface +!!PS subroutine uke_update(dynamics, partit, mesh) +!!PS use mod_mesh +!!PS USE MOD_PARTIT +!!PS USE MOD_PARSUP +!!PS USE MOD_DYN +!!PS type(t_dyn) , intent(inout), target :: dynamics +!!PS type(t_partit), intent(inout), target :: partit +!!PS type(t_mesh) , intent(in) , target :: mesh +!!PS +!!PS end subroutine +!!PS end interface +!!PS end module module relative_vorticity_interface interface @@ -219,8 +219,9 @@ subroutine viscosity_filter(option, dynamics, partit, mesh) use visc_filt_bcksct_interface use visc_filt_bilapl_interface use visc_filt_bidiff_interface - use visc_filt_dbcksc_interface - use backscatter_coef_interface +!!PS use visc_filt_dbcksc_interface +!!PS use backscatter_coef_interface + use g_backscatter IMPLICIT NONE integer :: option type(t_dyn) , intent(inout), target :: dynamics @@ -237,13 +238,18 @@ subroutine viscosity_filter(option, dynamics, partit, mesh) ! h_viscosity. SELECT CASE (option) CASE (5) + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[37m'//' --> call visc_filt_bcksct'//achar(27)//'[0m' call visc_filt_bcksct(dynamics, partit, mesh) CASE (6) + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[37m'//' --> call visc_filt_bilapl'//achar(27)//'[0m' call visc_filt_bilapl(dynamics, partit, mesh) CASE (7) + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[37m'//' --> call visc_filt_bidiff'//achar(27)//'[0m' call visc_filt_bidiff(dynamics, partit, mesh) CASE (8) - call backscatter_coef(dynamics, partit, mesh) + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[37m'//' --> call backscatter_coef'//achar(27)//'[0m' + call backscatter_coef(partit, mesh) + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[37m'//' --> call visc_filt_dbcksc'//achar(27)//'[0m' call visc_filt_dbcksc(dynamics, partit, mesh) CASE DEFAULT if (partit%mype==0) write(*,*) 'mixing scheme with option ' , option, 'has not yet been implemented' @@ -530,386 +536,386 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) deallocate(V_c, U_c) end subroutine visc_filt_bidiff -! -! -!_______________________________________________________________________________ -SUBROUTINE visc_filt_dbcksc(dynamics, partit, mesh) -USE MOD_MESH -USE MOD_PARTIT -USE MOD_PARSUP -use MOD_DYN -USE o_ARRAYS, only: v_back, UV_dis_tend, UV_total_tend, UV_back_tend, & - uke, uke_dif -USE o_PARAM -USE g_CONFIG -USE g_comm_auto -USE g_support -USE uke_update_interface -IMPLICIT NONE - -real(kind=8) :: u1, v1, le(2), len, crosslen, vi, uke1 -integer :: nz, ed, el(2) -!!PS real(kind=8), allocatable :: U_c(:,:), V_c(:,:) -real(kind=8) , allocatable :: UV_back(:,:,:), UV_dis(:,:,:), uke_d(:,:) -real(kind=8) , allocatable :: uuu(:) -type(t_dyn) , intent(inout), target :: dynamics -type(t_partit), intent(inout), target :: partit -type(t_mesh) , intent(in) , target :: mesh -real(kind=WP) , dimension(:,:,:), pointer :: UV, UV_rhs -real(kind=WP) , dimension(:,:) , pointer :: U_c, V_c -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" -UV => dynamics%uv(:,:,:) -UV_rhs => dynamics%uv_rhs(:,:,:) -U_c => dynamics%work%u_c(:,:) -V_c => dynamics%work%v_c(:,:) - - ! An analog of harmonic viscosity operator. - ! It adds to the rhs(0) Visc*(u1+u2+u3-3*u0)/area - ! on triangles, which is Visc*Laplacian/4 on equilateral triangles. - ! The contribution from boundary edges is neglected (free slip). - ! Filter is applied twice. - -ed=myDim_elem2D+eDim_elem2D -allocate(U_c(nl-1,ed), V_c(nl-1, ed)) -allocate(UV_back(2,nl-1,ed), UV_dis(2,nl-1, ed)) -allocate(uke_d(nl-1,ed)) -allocate(uuu(ed)) - - U_c=0.0_8 - V_c=0.0_8 - UV_back=0.0_8 - UV_dis=0.0_8 - uke_d=0.0_8 - - DO ed=1, myDim_edge2D+eDim_edge2D - if(myList_edge2D(ed)>edge2D_in) cycle - el=edge_tri(:,ed) - DO nz=1,minval(nlevels(el))-1 - u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) - v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) - - U_c(nz,el(1))=U_c(nz,el(1))-u1 - U_c(nz,el(2))=U_c(nz,el(2))+u1 - V_c(nz,el(1))=V_c(nz,el(1))-v1 - V_c(nz,el(2))=V_c(nz,el(2))+v1 - END DO - END DO - - - Do ed=1,myDim_elem2D - len=sqrt(elem_area(ed)) - len=dt*len/30.0_8 - Do nz=1,nlevels(ed)-1 - ! vi has the sense of harmonic viscosity coefficient because of - ! the division by area in the end - ! ==== - ! Case 1 -- an analog to the third-order upwind (vi=|u|l/12) - ! ==== - vi=max(0.2_8,sqrt(UV(1,nz,ed)**2+UV(2,nz,ed)**2))*len - U_c(nz,ed)=-U_c(nz,ed)*vi - V_c(nz,ed)=-V_c(nz,ed)*vi - END DO - end do - - - call exchange_elem(U_c, partit) - call exchange_elem(V_c, partit) - - DO ed=1, myDim_edge2D+eDim_edge2D - if(myList_edge2D(ed)>edge2D_in) cycle - el=edge_tri(:,ed) - le=edge_dxdy(:,ed) - le(1)=le(1)*sum(elem_cos(el))*0.25_8 - len=sqrt(le(1)**2+le(2)**2)*r_earth - le(1)=edge_cross_dxdy(1,ed)-edge_cross_dxdy(3,ed) - le(2)=edge_cross_dxdy(2,ed)-edge_cross_dxdy(4,ed) - crosslen=sqrt(le(1)**2+le(2)**2) - DO nz=1,minval(nlevels(el))-1 - vi=dt*len*(v_back(nz,el(1))+v_back(nz,el(2)))/crosslen - !if(mype==0) write(*,*) 'vi ', vi , ' and ed' , ed - !if(mype==0) write(*,*) 'dt*len/crosslen ', dt*len/crosslen, ' and ed' , ed - !vi=max(vi,0.005*len*dt) ! This helps to reduce noise in places where - ! Visc is small and decoupling might happen - !Backscatter contribution - u1=(UV(1,nz,el(1))-UV(1,nz,el(2)))*vi - v1=(UV(2,nz,el(1))-UV(2,nz,el(2)))*vi - - !UKE diffusion - vi=dt*len*(K_back*sqrt(elem_area(el(1))/scale_area)+K_back*sqrt(elem_area(el(2))/scale_area))/crosslen - - uke1=(uke(nz,el(1))-uke(nz,el(2)))*vi - - - UV_back(1,nz,el(1))=UV_back(1,nz,el(1))-u1/elem_area(el(1)) - UV_back(1,nz,el(2))=UV_back(1,nz,el(2))+u1/elem_area(el(2)) - UV_back(2,nz,el(1))=UV_back(2,nz,el(1))-v1/elem_area(el(1)) - UV_back(2,nz,el(2))=UV_back(2,nz,el(2))+v1/elem_area(el(2)) - - !Correct scaling for the diffusion? - uke_d(nz,el(1))=uke_d(nz,el(1))-uke1/elem_area(el(1)) - uke_d(nz,el(2))=uke_d(nz,el(2))+uke1/elem_area(el(2)) - - - - !Biharmonic contribution - u1=(U_c(nz,el(1))-U_c(nz,el(2))) - v1=(V_c(nz,el(1))-V_c(nz,el(2))) - - UV_dis(1,nz,el(1))=UV_dis(1,nz,el(1))-u1/elem_area(el(1)) - UV_dis(1,nz,el(2))=UV_dis(1,nz,el(2))+u1/elem_area(el(2)) - UV_dis(2,nz,el(1))=UV_dis(2,nz,el(1))-v1/elem_area(el(1)) - UV_dis(2,nz,el(2))=UV_dis(2,nz,el(2))+v1/elem_area(el(2)) - - END DO - END DO - -call exchange_elem(UV_back, partit) - -DO nz=1, nl-1 - uuu=0.0_8 - uuu=UV_back(1,nz,:) - call smooth_elem(uuu,smooth_back_tend, partit, mesh) - UV_back(1,nz,:)=uuu - uuu=0.0_8 - uuu=UV_back(2,nz,:) - call smooth_elem(uuu,smooth_back_tend, partit, mesh) - UV_back(2,nz,:)=uuu -END DO - - DO ed=1, myDim_elem2D - DO nz=1,nlevels(ed)-1 - UV_rhs(1,nz,ed)=UV_rhs(1,nz,ed)+UV_dis(1,nz,ed)+UV_back(1,nz,ed) - UV_rhs(2,nz,ed)=UV_rhs(2,nz,ed)+UV_dis(2,nz,ed)+UV_back(2,nz,ed) - END DO - END DO - - UV_dis_tend=UV_dis!+UV_back - UV_total_tend=UV_dis+UV_back - UV_back_tend=UV_back - uke_dif=uke_d - - call uke_update(dynamics, partit, mesh) - deallocate(V_c,U_c) - deallocate(UV_dis,UV_back) - deallocate(uke_d) - deallocate(uuu) - -end subroutine visc_filt_dbcksc -! -! -!_______________________________________________________________________________ -SUBROUTINE backscatter_coef(partit, mesh) -USE MOD_MESH -USE MOD_PARTIT -USE MOD_PARSUP -USE o_ARRAYS -USE o_PARAM -USE g_CONFIG -use g_comm_auto -IMPLICIT NONE -type(t_mesh), intent(in), target :: mesh -type(t_partit), intent(inout), target :: partit -integer :: elem, nz -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" - -!Potentially add the Rossby number scaling to the script... -!check if sign is right! Different in the Jansen paper -!Also check with the normalization by area; as before we use element length sqrt(2*elem_area(ed)) - -v_back=0.0_8 -DO elem=1, myDim_elem2D - DO nz=1,nlevels(elem)-1 -!v_back(1,ed)=c_back*sqrt(2.0_WP*elem_area(ed))*sqrt(max(2.0_WP*uke(1,ed),0.0_WP))*(3600.0_WP*24.0_WP/tau_c)*4.0_WP/sqrt(2.0_WP*elem_area(ed))**2 !*sqrt(max(2.0_WP*uke(1,ed),0.0_WP)) -!v_back(nz,elem)=-c_back*sqrt(4._8/sqrt(3.0_8)*elem_area(elem))*sqrt(max(2.0_8*uke(nz,elem),0.0_8)) !Is the scaling correct -v_back(nz,elem)=min(-c_back*sqrt(elem_area(elem))*sqrt(max(2.0_8*uke(nz,elem),0.0_8)),0.2*elem_area(elem)/dt) !Is the scaling correct -!Scaling by sqrt(2*elem_area) or sqrt(elem_area)? - END DO -END DO - -call exchange_elem(v_back, partit) - -end subroutine backscatter_coef -! -! -!_______________________________________________________________________________ -SUBROUTINE uke_update(dynamics, partit, mesh) -USE MOD_MESH -USE MOD_PARTIT -USE MOD_PARSUP -use MOD_DYN -USE o_ARRAYS, only: uke_rhs, uke_dif, uke_back, uke_dis, uke, UV_dis_tend, uv_back_tend, uke_rhs_old, & - bvfreq, coriolis_node -USE o_PARAM -USE g_CONFIG -use g_comm_auto -USE g_support -USE g_rotate_grid -IMPLICIT NONE - -!I had to change uke(:) to uke(:,:) to make output and restart work!! - -!Why is it necessary to implement the length of the array? It doesn't work without! -!integer, intent(in) :: t_levels -type(t_dyn) , intent(inout), target :: dynamics -type(t_partit), intent(inout), target :: partit -type(t_mesh) , intent(in) , target :: mesh - -real(kind=8) :: hall, h1_eta, hnz, vol -integer :: elnodes(3), nz, ed, edi, node, j, elem, q -real(kind=8), allocatable :: uuu(:), work_array(:), U_work(:,:), V_work(:,:), rosb_array(:,:), work_uv(:) -integer :: kk, nzmax, el -real(kind=8) :: c1, rosb, vel_u, vel_v, vel_uv, scaling, reso -real*8 :: c_min=0.5, f_min=1.e-6, r_max=200000., ex, ey, a1, a2, len_reg, dist_reg(2) ! Are those values still correct? -real(kind=WP), dimension(:,:,:), pointer :: UV -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" -UV => dynamics%uv(:,:,:) - -!rosb_dis=1._8 !Should be variable to control how much of the dissipated energy is backscattered -!rossby_num=2 - -ed=myDim_elem2D+eDim_elem2D -allocate(uuu(ed)) - -uke_back=0.0_8 -uke_dis=0.0_8 -DO ed=1, myDim_elem2D -DO nz=1, nlevels(ed)-1 - uke_dis(nz,ed)=(UV(1,nz,ed)*UV_dis_tend(1,nz,ed)+UV(2,nz,ed)*UV_dis_tend(2,nz,ed)) - uke_back(nz,ed)=(UV(1,nz,ed)*UV_back_tend(1,nz,ed)+UV(2,nz,ed)*UV_back_tend(2,nz,ed)) -END DO -END DO - -DO nz=1,nl-1 - uuu=0.0_8 - uuu=uke_back(nz,:) - call smooth_elem(uuu,smooth_back, partit, mesh) !3) ? - uke_back(nz,:)=uuu -END DO - - - -!Timestepping use simple backward timestepping; all components should have dt in it, unless they need it twice -!Amplitudes should be right given the correction of the viscosities; check for all, also for biharmonic -!uke(1,ed)=uke(1,ed)-uke_dis(1,ed)-uke_back(1,ed)+uke_dif(1,ed) -ed=myDim_elem2D+eDim_elem2D -allocate(U_work(nl-1,myDim_nod2D+eDim_nod2D),V_work(nl-1,myDim_nod2D+eDim_nod2D)) -allocate(work_uv(myDim_nod2D+eDim_nod2D)) -allocate(rosb_array(nl-1,ed)) -call exchange_elem(UV, partit) -rosb_array=0._8 -DO nz=1, nl-1 - work_uv=0._WP - DO node=1, myDim_nod2D - vol=0._WP - U_work(nz,node)=0._WP - V_work(nz,node)=0._WP - DO j=1, nod_in_elem2D_num(node) - elem=nod_in_elem2D(j, node) - U_work(nz,node)=U_work(nz,node)+UV(1,nz,elem)*elem_area(elem) - V_work(nz,node)=V_work(nz,node)+UV(2,nz,elem)*elem_area(elem) - vol=vol+elem_area(elem) - END DO - U_work(nz,node)=U_work(nz,node)/vol - V_work(nz,node)=U_work(nz,node)/vol - END DO - work_uv=U_work(nz,:) - call exchange_nod(work_uv, partit) - U_work(nz,:)=work_uv - work_uv=V_work(nz,:) - call exchange_nod(work_uv, partit) - V_work(nz,:)=work_uv -END DO - - DO el=1,myDim_elem2D - DO nz=1, nlevels(el)-1 - rosb_array(nz,el)=sqrt((sum(gradient_sca(1:3,el)*U_work(nz,elem2D_nodes(1:3,el)))-& - sum(gradient_sca(4:6, el)*V_work(nz,elem2D_nodes(1:3,el))))**2+& - (sum(gradient_sca(4:6, el)*U_work(nz,elem2D_nodes(1:3,el)))+& - sum(gradient_sca(1:3, el)*V_work(nz,elem2D_nodes(1:3,el))))**2) -! hall=hall+hnz - END DO -! rosb_array(el)=rosb_array(el)/hall - END DO -DO ed=1, myDim_elem2D - scaling=1._WP - IF(uke_scaling) then - reso=sqrt(elem_area(ed)*4._wp/sqrt(3._wp)) - rosb=0._wp - elnodes=elem2D_nodes(:, ed) - DO kk=1,3 - c1=0._wp - nzmax=minval(nlevels(nod_in_elem2D(1:nod_in_elem2D_num(elnodes(kk)), elnodes(kk))), 1) - !Vertical average; same scaling in the vertical - DO nz=1, nzmax-1 - c1=c1+hnode_new(nz,elnodes(kk))*(sqrt(max(bvfreq(nz,elnodes(kk)), 0._WP))+sqrt(max(bvfreq(nz+1,elnodes(kk)), 0._WP)))/2. - END DO - c1=max(c_min, c1/pi) !ca. first baroclinic gravity wave speed limited from below by c_min - !Cutoff K_GM depending on (Resolution/Rossby radius) ratio - rosb=rosb+min(c1/max(abs(coriolis_node(elnodes(kk))), f_min), r_max) - END DO - rosb=rosb/3._8 - scaling=1._WP/(1._WP+(uke_scaling_factor*reso/rosb))!(4._wp*reso/rosb)) - END IF - - DO nz=1, nlevels(ed)-1 - elnodes=elem2D_nodes(:,ed) - - !Taking out that one place where it is always weird (Pacific Southern Ocean) - !Should not really be used later on, once we fix the issue with the 1/4 degree grid - if(.not. (TRIM(which_toy)=="soufflet")) then - call elem_center(ed, ex, ey) - !a1=-104.*rad - !a2=-49.*rad - call g2r(-104.*rad, -49.*rad, a1, a2) - dist_reg(1)=ex-a1 - dist_reg(2)=ey-a2 - call trim_cyclic(dist_reg(1)) - dist_reg(1)=dist_reg(1)*elem_cos(ed) - dist_reg=dist_reg*r_earth - len_reg=sqrt(dist_reg(1)**2+dist_reg(2)**2) - - - !if(mype==0) write(*,*) 'len_reg ', len_reg , ' and dist_reg' , dist_reg, ' and ex, ey', ex, ey, ' and a ', a1, a2 - rosb_array(nz,ed)=rosb_array(nz,ed)/max(abs(sum(coriolis_node(elnodes(:)))), f_min) - !uke_dif(nz, ed)=scaling*(1-exp(-len_reg/300000))*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)!UV_dif(1,ed) - uke_dis(nz,ed)=scaling*(1-exp(-len_reg/300000))*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)*uke_dis(nz,ed) - else - rosb_array(nz,ed)=rosb_array(nz,ed)/max(abs(sum(coriolis_node(elnodes(:)))), f_min) - !uke_dif(nz, ed)=scaling*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)!UV_dif(1,ed) - uke_dis(nz,ed)=scaling*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)*uke_dis(nz,ed) - end if - - END DO -END DO -deallocate(U_work, V_work) -deallocate(rosb_array) -deallocate(work_uv) -call exchange_elem(uke_dis, partit) -DO nz=1, nl-1 - uuu=uke_dis(nz,:) - call smooth_elem(uuu,smooth_dis, partit, mesh) - uke_dis(nz,:)=uuu -END DO -DO ed=1, myDim_elem2D - DO nz=1,nlevels(ed)-1 - uke_rhs_old(nz,ed)=uke_rhs(nz,ed) - uke_rhs(nz,ed)=-uke_dis(nz,ed)-uke_back(nz,ed)+uke_dif(nz,ed) - uke(nz,ed)=uke(nz,ed)+1.5_8*uke_rhs(nz,ed)-0.5_8*uke_rhs_old(nz,ed) - END DO -END DO -call exchange_elem(uke, partit) - -deallocate(uuu) -end subroutine uke_update +!!PS ! +!!PS ! +!!PS !_______________________________________________________________________________ +!!PS SUBROUTINE visc_filt_dbcksc(dynamics, partit, mesh) +!!PS USE MOD_MESH +!!PS USE MOD_PARTIT +!!PS USE MOD_PARSUP +!!PS use MOD_DYN +!!PS USE o_ARRAYS, only: v_back, UV_dis_tend, UV_total_tend, UV_back_tend, & +!!PS uke, uke_dif +!!PS USE o_PARAM +!!PS USE g_CONFIG +!!PS USE g_comm_auto +!!PS USE g_support +!!PS USE uke_update_interface +!!PS IMPLICIT NONE +!!PS +!!PS real(kind=8) :: u1, v1, le(2), len, crosslen, vi, uke1 +!!PS integer :: nz, ed, el(2) +!!PS !!PS real(kind=8), allocatable :: U_c(:,:), V_c(:,:) +!!PS real(kind=8) , allocatable :: UV_back(:,:,:), UV_dis(:,:,:), uke_d(:,:) +!!PS real(kind=8) , allocatable :: uuu(:) +!!PS type(t_dyn) , intent(inout), target :: dynamics +!!PS type(t_partit), intent(inout), target :: partit +!!PS type(t_mesh) , intent(in) , target :: mesh +!!PS real(kind=WP) , dimension(:,:,:), pointer :: UV, UV_rhs +!!PS real(kind=WP) , dimension(:,:) , pointer :: U_c, V_c +!!PS #include "associate_part_def.h" +!!PS #include "associate_mesh_def.h" +!!PS #include "associate_part_ass.h" +!!PS #include "associate_mesh_ass.h" +!!PS UV => dynamics%uv(:,:,:) +!!PS UV_rhs => dynamics%uv_rhs(:,:,:) +!!PS U_c => dynamics%work%u_c(:,:) +!!PS V_c => dynamics%work%v_c(:,:) +!!PS +!!PS ! An analog of harmonic viscosity operator. +!!PS ! It adds to the rhs(0) Visc*(u1+u2+u3-3*u0)/area +!!PS ! on triangles, which is Visc*Laplacian/4 on equilateral triangles. +!!PS ! The contribution from boundary edges is neglected (free slip). +!!PS ! Filter is applied twice. +!!PS +!!PS ed=myDim_elem2D+eDim_elem2D +!!PS allocate(U_c(nl-1,ed), V_c(nl-1, ed)) +!!PS allocate(UV_back(2,nl-1,ed), UV_dis(2,nl-1, ed)) +!!PS allocate(uke_d(nl-1,ed)) +!!PS allocate(uuu(ed)) +!!PS +!!PS U_c=0.0_8 +!!PS V_c=0.0_8 +!!PS UV_back=0.0_8 +!!PS UV_dis=0.0_8 +!!PS uke_d=0.0_8 +!!PS +!!PS DO ed=1, myDim_edge2D+eDim_edge2D +!!PS if(myList_edge2D(ed)>edge2D_in) cycle +!!PS el=edge_tri(:,ed) +!!PS DO nz=1,minval(nlevels(el))-1 +!!PS u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) +!!PS v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) +!!PS +!!PS U_c(nz,el(1))=U_c(nz,el(1))-u1 +!!PS U_c(nz,el(2))=U_c(nz,el(2))+u1 +!!PS V_c(nz,el(1))=V_c(nz,el(1))-v1 +!!PS V_c(nz,el(2))=V_c(nz,el(2))+v1 +!!PS END DO +!!PS END DO +!!PS +!!PS +!!PS Do ed=1,myDim_elem2D +!!PS len=sqrt(elem_area(ed)) +!!PS len=dt*len/30.0_8 +!!PS Do nz=1,nlevels(ed)-1 +!!PS ! vi has the sense of harmonic viscosity coefficient because of +!!PS ! the division by area in the end +!!PS ! ==== +!!PS ! Case 1 -- an analog to the third-order upwind (vi=|u|l/12) +!!PS ! ==== +!!PS vi=max(0.2_8,sqrt(UV(1,nz,ed)**2+UV(2,nz,ed)**2))*len +!!PS U_c(nz,ed)=-U_c(nz,ed)*vi +!!PS V_c(nz,ed)=-V_c(nz,ed)*vi +!!PS END DO +!!PS end do +!!PS +!!PS +!!PS call exchange_elem(U_c, partit) +!!PS call exchange_elem(V_c, partit) +!!PS +!!PS DO ed=1, myDim_edge2D+eDim_edge2D +!!PS if(myList_edge2D(ed)>edge2D_in) cycle +!!PS el=edge_tri(:,ed) +!!PS le=edge_dxdy(:,ed) +!!PS le(1)=le(1)*sum(elem_cos(el))*0.25_8 +!!PS len=sqrt(le(1)**2+le(2)**2)*r_earth +!!PS le(1)=edge_cross_dxdy(1,ed)-edge_cross_dxdy(3,ed) +!!PS le(2)=edge_cross_dxdy(2,ed)-edge_cross_dxdy(4,ed) +!!PS crosslen=sqrt(le(1)**2+le(2)**2) +!!PS DO nz=1,minval(nlevels(el))-1 +!!PS vi=dt*len*(v_back(nz,el(1))+v_back(nz,el(2)))/crosslen +!!PS !if(mype==0) write(*,*) 'vi ', vi , ' and ed' , ed +!!PS !if(mype==0) write(*,*) 'dt*len/crosslen ', dt*len/crosslen, ' and ed' , ed +!!PS !vi=max(vi,0.005*len*dt) ! This helps to reduce noise in places where +!!PS ! Visc is small and decoupling might happen +!!PS !Backscatter contribution +!!PS u1=(UV(1,nz,el(1))-UV(1,nz,el(2)))*vi +!!PS v1=(UV(2,nz,el(1))-UV(2,nz,el(2)))*vi +!!PS +!!PS !UKE diffusion +!!PS vi=dt*len*(K_back*sqrt(elem_area(el(1))/scale_area)+K_back*sqrt(elem_area(el(2))/scale_area))/crosslen +!!PS +!!PS uke1=(uke(nz,el(1))-uke(nz,el(2)))*vi +!!PS +!!PS +!!PS UV_back(1,nz,el(1))=UV_back(1,nz,el(1))-u1/elem_area(el(1)) +!!PS UV_back(1,nz,el(2))=UV_back(1,nz,el(2))+u1/elem_area(el(2)) +!!PS UV_back(2,nz,el(1))=UV_back(2,nz,el(1))-v1/elem_area(el(1)) +!!PS UV_back(2,nz,el(2))=UV_back(2,nz,el(2))+v1/elem_area(el(2)) +!!PS +!!PS !Correct scaling for the diffusion? +!!PS uke_d(nz,el(1))=uke_d(nz,el(1))-uke1/elem_area(el(1)) +!!PS uke_d(nz,el(2))=uke_d(nz,el(2))+uke1/elem_area(el(2)) +!!PS +!!PS +!!PS +!!PS !Biharmonic contribution +!!PS u1=(U_c(nz,el(1))-U_c(nz,el(2))) +!!PS v1=(V_c(nz,el(1))-V_c(nz,el(2))) +!!PS +!!PS UV_dis(1,nz,el(1))=UV_dis(1,nz,el(1))-u1/elem_area(el(1)) +!!PS UV_dis(1,nz,el(2))=UV_dis(1,nz,el(2))+u1/elem_area(el(2)) +!!PS UV_dis(2,nz,el(1))=UV_dis(2,nz,el(1))-v1/elem_area(el(1)) +!!PS UV_dis(2,nz,el(2))=UV_dis(2,nz,el(2))+v1/elem_area(el(2)) +!!PS +!!PS END DO +!!PS END DO +!!PS +!!PS call exchange_elem(UV_back, partit) +!!PS +!!PS DO nz=1, nl-1 +!!PS uuu=0.0_8 +!!PS uuu=UV_back(1,nz,:) +!!PS call smooth_elem(uuu,smooth_back_tend, partit, mesh) +!!PS UV_back(1,nz,:)=uuu +!!PS uuu=0.0_8 +!!PS uuu=UV_back(2,nz,:) +!!PS call smooth_elem(uuu,smooth_back_tend, partit, mesh) +!!PS UV_back(2,nz,:)=uuu +!!PS END DO +!!PS +!!PS DO ed=1, myDim_elem2D +!!PS DO nz=1,nlevels(ed)-1 +!!PS UV_rhs(1,nz,ed)=UV_rhs(1,nz,ed)+UV_dis(1,nz,ed)+UV_back(1,nz,ed) +!!PS UV_rhs(2,nz,ed)=UV_rhs(2,nz,ed)+UV_dis(2,nz,ed)+UV_back(2,nz,ed) +!!PS END DO +!!PS END DO +!!PS +!!PS UV_dis_tend=UV_dis!+UV_back +!!PS UV_total_tend=UV_dis+UV_back +!!PS UV_back_tend=UV_back +!!PS uke_dif=uke_d +!!PS +!!PS call uke_update(dynamics, partit, mesh) +!!PS deallocate(V_c,U_c) +!!PS deallocate(UV_dis,UV_back) +!!PS deallocate(uke_d) +!!PS deallocate(uuu) +!!PS +!!PS end subroutine visc_filt_dbcksc +!!PS ! +!!PS ! +!!PS !_______________________________________________________________________________ +!!PS SUBROUTINE backscatter_coef(partit, mesh) +!!PS USE MOD_MESH +!!PS USE MOD_PARTIT +!!PS USE MOD_PARSUP +!!PS USE o_ARRAYS +!!PS USE o_PARAM +!!PS USE g_CONFIG +!!PS use g_comm_auto +!!PS IMPLICIT NONE +!!PS type(t_mesh), intent(in), target :: mesh +!!PS type(t_partit), intent(inout), target :: partit +!!PS integer :: elem, nz +!!PS #include "associate_part_def.h" +!!PS #include "associate_mesh_def.h" +!!PS #include "associate_part_ass.h" +!!PS #include "associate_mesh_ass.h" +!!PS +!!PS !Potentially add the Rossby number scaling to the script... +!!PS !check if sign is right! Different in the Jansen paper +!!PS !Also check with the normalization by area; as before we use element length sqrt(2*elem_area(ed)) +!!PS +!!PS v_back=0.0_8 +!!PS DO elem=1, myDim_elem2D +!!PS DO nz=1,nlevels(elem)-1 +!!PS !v_back(1,ed)=c_back*sqrt(2.0_WP*elem_area(ed))*sqrt(max(2.0_WP*uke(1,ed),0.0_WP))*(3600.0_WP*24.0_WP/tau_c)*4.0_WP/sqrt(2.0_WP*elem_area(ed))**2 !*sqrt(max(2.0_WP*uke(1,ed),0.0_WP)) +!!PS !v_back(nz,elem)=-c_back*sqrt(4._8/sqrt(3.0_8)*elem_area(elem))*sqrt(max(2.0_8*uke(nz,elem),0.0_8)) !Is the scaling correct +!!PS v_back(nz,elem)=min(-c_back*sqrt(elem_area(elem))*sqrt(max(2.0_8*uke(nz,elem),0.0_8)),0.2*elem_area(elem)/dt) !Is the scaling correct +!!PS !Scaling by sqrt(2*elem_area) or sqrt(elem_area)? +!!PS END DO +!!PS END DO +!!PS +!!PS call exchange_elem(v_back, partit) +!!PS +!!PS end subroutine backscatter_coef +!!PS ! +!!PS ! +!!PS !_______________________________________________________________________________ +!!PS SUBROUTINE uke_update(dynamics, partit, mesh) +!!PS USE MOD_MESH +!!PS USE MOD_PARTIT +!!PS USE MOD_PARSUP +!!PS use MOD_DYN +!!PS USE o_ARRAYS, only: uke_rhs, uke_dif, uke_back, uke_dis, uke, UV_dis_tend, uv_back_tend, uke_rhs_old, & +!!PS bvfreq, coriolis_node +!!PS USE o_PARAM +!!PS USE g_CONFIG +!!PS use g_comm_auto +!!PS USE g_support +!!PS USE g_rotate_grid +!!PS IMPLICIT NONE +!!PS +!!PS !I had to change uke(:) to uke(:,:) to make output and restart work!! +!!PS +!!PS !Why is it necessary to implement the length of the array? It doesn't work without! +!!PS !integer, intent(in) :: t_levels +!!PS type(t_dyn) , intent(inout), target :: dynamics +!!PS type(t_partit), intent(inout), target :: partit +!!PS type(t_mesh) , intent(in) , target :: mesh +!!PS +!!PS real(kind=8) :: hall, h1_eta, hnz, vol +!!PS integer :: elnodes(3), nz, ed, edi, node, j, elem, q +!!PS real(kind=8), allocatable :: uuu(:), work_array(:), U_work(:,:), V_work(:,:), rosb_array(:,:), work_uv(:) +!!PS integer :: kk, nzmax, el +!!PS real(kind=8) :: c1, rosb, vel_u, vel_v, vel_uv, scaling, reso +!!PS real*8 :: c_min=0.5, f_min=1.e-6, r_max=200000., ex, ey, a1, a2, len_reg, dist_reg(2) ! Are those values still correct? +!!PS real(kind=WP), dimension(:,:,:), pointer :: UV +!!PS #include "associate_part_def.h" +!!PS #include "associate_mesh_def.h" +!!PS #include "associate_part_ass.h" +!!PS #include "associate_mesh_ass.h" +!!PS UV => dynamics%uv(:,:,:) +!!PS +!!PS !rosb_dis=1._8 !Should be variable to control how much of the dissipated energy is backscattered +!!PS !rossby_num=2 +!!PS +!!PS ed=myDim_elem2D+eDim_elem2D +!!PS allocate(uuu(ed)) +!!PS +!!PS uke_back=0.0_8 +!!PS uke_dis=0.0_8 +!!PS DO ed=1, myDim_elem2D +!!PS DO nz=1, nlevels(ed)-1 +!!PS uke_dis(nz,ed)=(UV(1,nz,ed)*UV_dis_tend(1,nz,ed)+UV(2,nz,ed)*UV_dis_tend(2,nz,ed)) +!!PS uke_back(nz,ed)=(UV(1,nz,ed)*UV_back_tend(1,nz,ed)+UV(2,nz,ed)*UV_back_tend(2,nz,ed)) +!!PS END DO +!!PS END DO +!!PS +!!PS DO nz=1,nl-1 +!!PS uuu=0.0_8 +!!PS uuu=uke_back(nz,:) +!!PS call smooth_elem(uuu,smooth_back, partit, mesh) !3) ? +!!PS uke_back(nz,:)=uuu +!!PS END DO +!!PS +!!PS +!!PS +!!PS !Timestepping use simple backward timestepping; all components should have dt in it, unless they need it twice +!!PS !Amplitudes should be right given the correction of the viscosities; check for all, also for biharmonic +!!PS !uke(1,ed)=uke(1,ed)-uke_dis(1,ed)-uke_back(1,ed)+uke_dif(1,ed) +!!PS ed=myDim_elem2D+eDim_elem2D +!!PS allocate(U_work(nl-1,myDim_nod2D+eDim_nod2D),V_work(nl-1,myDim_nod2D+eDim_nod2D)) +!!PS allocate(work_uv(myDim_nod2D+eDim_nod2D)) +!!PS allocate(rosb_array(nl-1,ed)) +!!PS call exchange_elem(UV, partit) +!!PS rosb_array=0._8 +!!PS DO nz=1, nl-1 +!!PS work_uv=0._WP +!!PS DO node=1, myDim_nod2D +!!PS vol=0._WP +!!PS U_work(nz,node)=0._WP +!!PS V_work(nz,node)=0._WP +!!PS DO j=1, nod_in_elem2D_num(node) +!!PS elem=nod_in_elem2D(j, node) +!!PS U_work(nz,node)=U_work(nz,node)+UV(1,nz,elem)*elem_area(elem) +!!PS V_work(nz,node)=V_work(nz,node)+UV(2,nz,elem)*elem_area(elem) +!!PS vol=vol+elem_area(elem) +!!PS END DO +!!PS U_work(nz,node)=U_work(nz,node)/vol +!!PS V_work(nz,node)=U_work(nz,node)/vol +!!PS END DO +!!PS work_uv=U_work(nz,:) +!!PS call exchange_nod(work_uv, partit) +!!PS U_work(nz,:)=work_uv +!!PS work_uv=V_work(nz,:) +!!PS call exchange_nod(work_uv, partit) +!!PS V_work(nz,:)=work_uv +!!PS END DO +!!PS +!!PS DO el=1,myDim_elem2D +!!PS DO nz=1, nlevels(el)-1 +!!PS rosb_array(nz,el)=sqrt((sum(gradient_sca(1:3,el)*U_work(nz,elem2D_nodes(1:3,el)))-& +!!PS sum(gradient_sca(4:6, el)*V_work(nz,elem2D_nodes(1:3,el))))**2+& +!!PS (sum(gradient_sca(4:6, el)*U_work(nz,elem2D_nodes(1:3,el)))+& +!!PS sum(gradient_sca(1:3, el)*V_work(nz,elem2D_nodes(1:3,el))))**2) +!!PS ! hall=hall+hnz +!!PS END DO +!!PS ! rosb_array(el)=rosb_array(el)/hall +!!PS END DO +!!PS DO ed=1, myDim_elem2D +!!PS scaling=1._WP +!!PS IF(uke_scaling) then +!!PS reso=sqrt(elem_area(ed)*4._wp/sqrt(3._wp)) +!!PS rosb=0._wp +!!PS elnodes=elem2D_nodes(:, ed) +!!PS DO kk=1,3 +!!PS c1=0._wp +!!PS nzmax=minval(nlevels(nod_in_elem2D(1:nod_in_elem2D_num(elnodes(kk)), elnodes(kk))), 1) +!!PS !Vertical average; same scaling in the vertical +!!PS DO nz=1, nzmax-1 +!!PS c1=c1+hnode_new(nz,elnodes(kk))*(sqrt(max(bvfreq(nz,elnodes(kk)), 0._WP))+sqrt(max(bvfreq(nz+1,elnodes(kk)), 0._WP)))/2. +!!PS END DO +!!PS c1=max(c_min, c1/pi) !ca. first baroclinic gravity wave speed limited from below by c_min +!!PS !Cutoff K_GM depending on (Resolution/Rossby radius) ratio +!!PS rosb=rosb+min(c1/max(abs(coriolis_node(elnodes(kk))), f_min), r_max) +!!PS END DO +!!PS rosb=rosb/3._8 +!!PS scaling=1._WP/(1._WP+(uke_scaling_factor*reso/rosb))!(4._wp*reso/rosb)) +!!PS END IF +!!PS +!!PS DO nz=1, nlevels(ed)-1 +!!PS elnodes=elem2D_nodes(:,ed) +!!PS +!!PS !Taking out that one place where it is always weird (Pacific Southern Ocean) +!!PS !Should not really be used later on, once we fix the issue with the 1/4 degree grid +!!PS if(.not. (TRIM(which_toy)=="soufflet")) then +!!PS call elem_center(ed, ex, ey) +!!PS !a1=-104.*rad +!!PS !a2=-49.*rad +!!PS call g2r(-104.*rad, -49.*rad, a1, a2) +!!PS dist_reg(1)=ex-a1 +!!PS dist_reg(2)=ey-a2 +!!PS call trim_cyclic(dist_reg(1)) +!!PS dist_reg(1)=dist_reg(1)*elem_cos(ed) +!!PS dist_reg=dist_reg*r_earth +!!PS len_reg=sqrt(dist_reg(1)**2+dist_reg(2)**2) +!!PS +!!PS +!!PS !if(mype==0) write(*,*) 'len_reg ', len_reg , ' and dist_reg' , dist_reg, ' and ex, ey', ex, ey, ' and a ', a1, a2 +!!PS rosb_array(nz,ed)=rosb_array(nz,ed)/max(abs(sum(coriolis_node(elnodes(:)))), f_min) +!!PS !uke_dif(nz, ed)=scaling*(1-exp(-len_reg/300000))*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)!UV_dif(1,ed) +!!PS uke_dis(nz,ed)=scaling*(1-exp(-len_reg/300000))*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)*uke_dis(nz,ed) +!!PS else +!!PS rosb_array(nz,ed)=rosb_array(nz,ed)/max(abs(sum(coriolis_node(elnodes(:)))), f_min) +!!PS !uke_dif(nz, ed)=scaling*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)!UV_dif(1,ed) +!!PS uke_dis(nz,ed)=scaling*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)*uke_dis(nz,ed) +!!PS end if +!!PS +!!PS END DO +!!PS END DO +!!PS deallocate(U_work, V_work) +!!PS deallocate(rosb_array) +!!PS deallocate(work_uv) +!!PS call exchange_elem(uke_dis, partit) +!!PS DO nz=1, nl-1 +!!PS uuu=uke_dis(nz,:) +!!PS call smooth_elem(uuu,smooth_dis, partit, mesh) +!!PS uke_dis(nz,:)=uuu +!!PS END DO +!!PS DO ed=1, myDim_elem2D +!!PS DO nz=1,nlevels(ed)-1 +!!PS uke_rhs_old(nz,ed)=uke_rhs(nz,ed) +!!PS uke_rhs(nz,ed)=-uke_dis(nz,ed)-uke_back(nz,ed)+uke_dif(nz,ed) +!!PS uke(nz,ed)=uke(nz,ed)+1.5_8*uke_rhs(nz,ed)-0.5_8*uke_rhs_old(nz,ed) +!!PS END DO +!!PS END DO +!!PS call exchange_elem(uke, partit) +!!PS +!!PS deallocate(uuu) +!!PS end subroutine uke_update ! ! !_______________________________________________________________________________ diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 7d142e043..071627d84 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -86,6 +86,7 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) use g_cvmix_pp use g_cvmix_kpp use g_cvmix_tidal +use g_backscatter use Toy_Channel_Soufflet use oce_initial_state_interface use oce_adv_tra_fct_interfaces @@ -240,6 +241,11 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_thickness_ale'//achar(27)//'[0m' call init_thickness_ale(dynamics, partit, mesh) + !___________________________________________________________________________ + ! initialise arrays that are needed for backscatter_coef + if(dynamics%visc_opt==8) call init_backscatter(partit, mesh) + + !___________________________________________________________________________ if(partit%mype==0) write(*,*) 'Initial state' if (w_split .and. partit%mype==0) then @@ -577,27 +583,27 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) ! Backscatter arrays ! ================= -if(visc_option==8) then - -allocate(uke(nl-1,elem_size)) ! Unresolved kinetic energy for backscatter coefficient -allocate(v_back(nl-1,elem_size)) ! Backscatter viscosity -allocate(uke_dis(nl-1,elem_size), uke_back(nl-1,elem_size)) -allocate(uke_dif(nl-1,elem_size)) -allocate(uke_rhs(nl-1,elem_size), uke_rhs_old(nl-1,elem_size)) -allocate(UV_dis_tend(2,nl-1,elem_size), UV_back_tend(2,nl-1,elem_size)) -allocate(UV_total_tend(2,nl-1,elem_size)) - -uke=0.0_8 -v_back=0.0_8 -uke_dis=0.0_8 -uke_dif=0.0_8 -uke_back=0.0_8 -uke_rhs=0.0_8 -uke_rhs_old=0.0_8 -UV_dis_tend=0.0_8 -UV_back_tend=0.0_8 -UV_total_tend=0.0_8 -end if +!!PS if(visc_option==8) then +!!PS +!!PS allocate(uke(nl-1,elem_size)) ! Unresolved kinetic energy for backscatter coefficient +!!PS allocate(v_back(nl-1,elem_size)) ! Backscatter viscosity +!!PS allocate(uke_dis(nl-1,elem_size), uke_back(nl-1,elem_size)) +!!PS allocate(uke_dif(nl-1,elem_size)) +!!PS allocate(uke_rhs(nl-1,elem_size), uke_rhs_old(nl-1,elem_size)) +!!PS allocate(UV_dis_tend(2,nl-1,elem_size), UV_back_tend(2,nl-1,elem_size)) +!!PS allocate(UV_total_tend(2,nl-1,elem_size)) +!!PS +!!PS uke=0.0_8 +!!PS v_back=0.0_8 +!!PS uke_dis=0.0_8 +!!PS uke_dif=0.0_8 +!!PS uke_back=0.0_8 +!!PS uke_rhs=0.0_8 +!!PS uke_rhs_old=0.0_8 +!!PS UV_dis_tend=0.0_8 +!!PS UV_back_tend=0.0_8 +!!PS UV_total_tend=0.0_8 +!!PS end if !Velocities at nodes !!PS allocate(Unode(2,nl-1,node_size)) From 54d95bfe702fd3f70005333cfb3b97ca603f0d2a Mon Sep 17 00:00:00 2001 From: a270042 Date: Sun, 7 Nov 2021 16:56:42 +0100 Subject: [PATCH 499/909] set back flag_debug=.false. --- src/gen_modules_config.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gen_modules_config.F90 b/src/gen_modules_config.F90 index b9d3d1807..f265ea898 100755 --- a/src/gen_modules_config.F90 +++ b/src/gen_modules_config.F90 @@ -107,7 +107,7 @@ module g_config real(kind=WP) :: cavity_partial_cell_thresh=0.0_WP ! same as partial_cell_tresh but for surface logical :: toy_ocean=.false. ! Ersatz forcing has to be supplied character(100) :: which_toy="soufflet" - logical :: flag_debug=.true. ! prints name of actual subroutine he is in + logical :: flag_debug=.false. ! prints name of actual subroutine he is in logical :: flag_warn_cflz=.true. ! switches off cflz warning namelist /run_config/ use_ice,use_floatice, use_sw_pene, use_cavity, & use_cavity_partial_cell, cavity_partial_cell_thresh, toy_ocean, which_toy, flag_debug, flag_warn_cflz From e6c8372b4fc62fc23df23124a2f09c8055d8e7f5 Mon Sep 17 00:00:00 2001 From: a270042 Date: Mon, 8 Nov 2021 16:11:38 +0100 Subject: [PATCH 500/909] add new namelist for dynamics derived type --> namelist.dyn. exchange namelist and derived type parameters throughout the code. Since new namelist is introduced github testcase will fail for the moment --- config/namelist.dyn | 27 ++ src/MOD_DYN.F90 | 32 +- src/gen_modules_diag.F90 | 128 +++++++- src/io_meandata.F90 | 34 ++- src/io_restart.F90 | 2 +- src/oce_adv_tra_driver.F90 | 16 +- src/oce_ale.F90 | 18 +- src/oce_ale_tracer.F90 | 14 +- src/oce_ale_vel_rhs.F90 | 4 +- src/oce_dyn.F90 | 583 ++----------------------------------- src/oce_modules.F90 | 25 +- src/oce_setup_step.F90 | 121 +++----- 12 files changed, 288 insertions(+), 716 deletions(-) create mode 100644 config/namelist.dyn diff --git a/config/namelist.dyn b/config/namelist.dyn new file mode 100644 index 000000000..c729acea1 --- /dev/null +++ b/config/namelist.dyn @@ -0,0 +1,27 @@ +&dynamics_visc +visc_gamma0 = 0.003 ! [m/s], backgroung viscosity= gamma0*len, it should be as small a s possible (keep it < 0.01 m/s). +visc_gamma1 = 0.1 ! [nodim], for computation of the flow aware viscosity +visc_gamma2 = 0.285 ! [s/m], is only used in easy backscatter option +visc_easybsreturn= 1.5 + +opt_visc = 5 +! 5=Kinematic (easy) Backscatter +! 6=Biharmonic flow aware (viscosity depends on velocity Laplacian) +! 7=Biharmonic flow aware (viscosity depends on velocity differences) +! 8=Dynamic Backscatter + +use_ivertvisc= .true. +/ + +&dynamics_general +momadv_opt = 2 ! option for momentum advection in moment only =2 +use_freeslip = .false. ! Switch on free slip +use_wsplit = .false. ! Switch for implicite/explicte splitting of vert. velocity +wsplit_maxcfl= 1.0 ! maximum allowed CFL criteria in vertical (0.5 < w_max_cfl < 1.) + ! in older FESOM it used to be w_exp_max=1.e-3 +/ + +!&dynamics_phys +!A_ver = 1.e-4 ! Vertical viscosity, m^2/s +!scale_area = 5.8e9 ! Visc. and diffus. are for an element with scale_area +!/ \ No newline at end of file diff --git a/src/MOD_DYN.F90 b/src/MOD_DYN.F90 index a3b570afc..77438f64b 100644 --- a/src/MOD_DYN.F90 +++ b/src/MOD_DYN.F90 @@ -67,24 +67,24 @@ MODULE MOD_DYN type(t_dyn_work) :: work !___________________________________________________________________________ - ! visc_option=... + ! opt_visc=... ! 5=Kinematic (easy) Backscatter ! 6=Biharmonic flow aware (viscosity depends on velocity Laplacian) ! 7=Biharmonic flow aware (viscosity depends on velocity differences) ! 8=Dynamic Backscatter - integer :: visc_opt = 5 + integer :: opt_visc = 5 ! gamma0 [m/s], backgroung viscosity= gamma0*len, it should be as small ! as possible (keep it < 0.01 m/s). ! gamma1 [nodim], for computation of the flow aware viscosity ! gamma2 [s/m], is only used in easy backscatter option - real(kind=WP) :: gamma0_visc = 0.03 - real(kind=WP) :: gamma1_visc = 0.1 - real(kind=WP) :: gamma2_visc = 0.285 + real(kind=WP) :: visc_gamma0 = 0.03 + real(kind=WP) :: visc_gamma1 = 0.1 + real(kind=WP) :: visc_gamma2 = 0.285 - ! coefficient for returned sub-gridscale energy, to be used with visc_option=5 + ! coefficient for returned sub-gridscale energy, to be used with opt_visc=5 ! (easy backscatter) - real(kind=WP) :: easy_bs_return= 1.5 + real(kind=WP) :: visc_easybsreturn = 1.5 logical :: use_ivertvisc = .true. integer :: momadv_opt = 2 @@ -207,10 +207,11 @@ subroutine WRITE_T_DYN(dynamics, unit, iostat, iomsg) write(unit, iostat=iostat, iomsg=iomsg) dynamics%solverinfo !___________________________________________________________________________ - write(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_opt - write(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma0_visc - write(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma1_visc - write(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma2_visc + write(unit, iostat=iostat, iomsg=iomsg) dynamics%opt_visc + write(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_gamma0 + write(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_gamma1 + write(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_gamma2 + write(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_easybsreturn !___________________________________________________________________________ write(unit, iostat=iostat, iomsg=iomsg) dynamics%use_ivertvisc @@ -250,10 +251,11 @@ subroutine READ_T_DYN(dynamics, unit, iostat, iomsg) read(unit, iostat=iostat, iomsg=iomsg) dynamics%work !___________________________________________________________________________ - read(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_opt - read(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma0_visc - read(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma1_visc - read(unit, iostat=iostat, iomsg=iomsg) dynamics%gamma2_visc + read(unit, iostat=iostat, iomsg=iomsg) dynamics%opt_visc + read(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_gamma0 + read(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_gamma1 + read(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_gamma2 + read(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_easybsreturn !___________________________________________________________________________ read(unit, iostat=iostat, iomsg=iomsg) dynamics%use_ivertvisc diff --git a/src/gen_modules_diag.F90 b/src/gen_modules_diag.F90 index ddb845731..036afca1d 100755 --- a/src/gen_modules_diag.F90 +++ b/src/gen_modules_diag.F90 @@ -17,11 +17,15 @@ module diagnostics implicit none private - public :: ldiag_solver, lcurt_stress_surf, ldiag_energy, ldiag_dMOC, ldiag_DVD, ldiag_forc, ldiag_salt3D, ldiag_curl_vel3, diag_list, & - compute_diagnostics, rhs_diag, curl_stress_surf, curl_vel3, wrhof, rhof, & - u_x_u, u_x_v, v_x_v, v_x_w, u_x_w, dudx, dudy, dvdx, dvdy, dudz, dvdz, utau_surf, utau_bott, av_dudz_sq, av_dudz, av_dvdz, stress_bott, u_surf, v_surf, u_bott, v_bott, & - std_dens_min, std_dens_max, std_dens_N, std_dens, std_dens_UVDZ, std_dens_DIV, std_dens_Z, std_dens_dVdT, std_dens_flux, dens_flux_e, & - compute_diag_dvd_2ndmoment_klingbeil_etal_2014, compute_diag_dvd_2ndmoment_burchard_etal_2008, compute_diag_dvd + public :: ldiag_solver, lcurt_stress_surf, ldiag_energy, ldiag_dMOC, ldiag_DVD, & + ldiag_forc, ldiag_salt3D, ldiag_curl_vel3, diag_list, ldiag_vorticity, & + compute_diagnostics, rhs_diag, curl_stress_surf, curl_vel3, wrhof, rhof, & + u_x_u, u_x_v, v_x_v, v_x_w, u_x_w, dudx, dudy, dvdx, dvdy, dudz, dvdz, & + utau_surf, utau_bott, av_dudz_sq, av_dudz, av_dvdz, stress_bott, u_surf, & + v_surf, u_bott, v_bott, std_dens_min, std_dens_max, std_dens_N, std_dens, & + std_dens_UVDZ, std_dens_DIV, std_dens_Z, std_dens_dVdT, std_dens_flux, & + dens_flux_e, vorticity, compute_diag_dvd_2ndmoment_klingbeil_etal_2014, & + compute_diag_dvd_2ndmoment_burchard_etal_2008, compute_diag_dvd ! Arrays used for diagnostics, some shall be accessible to the I/O ! 1. solver diagnostics: A*x=rhs? ! A=ssh_stiff, x=d_eta, rhs=ssh_rhs; rhs_diag=A*x; @@ -33,6 +37,7 @@ module diagnostics real(kind=WP), save, allocatable, target :: dudx(:,:), dudy(:,:), dvdx(:,:), dvdy(:,:), dudz(:,:), dvdz(:,:), av_dudz(:,:), av_dvdz(:,:), av_dudz_sq(:,:) real(kind=WP), save, allocatable, target :: utau_surf(:), utau_bott(:) real(kind=WP), save, allocatable, target :: stress_bott(:,:), u_bott(:), v_bott(:), u_surf(:), v_surf(:) + real(kind=WP), save, allocatable, target :: vorticity(:,:) ! defining a set of standard density bins which will be used for computing densMOC ! integer, parameter :: std_dens_N = 100 @@ -69,8 +74,10 @@ module diagnostics logical :: ldiag_forc =.false. + logical :: ldiag_vorticity =.false. + namelist /diag_list/ ldiag_solver, lcurt_stress_surf, ldiag_curl_vel3, ldiag_energy, & - ldiag_dMOC, ldiag_DVD, ldiag_salt3D, ldiag_forc + ldiag_dMOC, ldiag_DVD, ldiag_salt3D, ldiag_forc, ldiag_vorticity contains @@ -670,8 +677,112 @@ subroutine diag_densMOC(mode, dynamics, tracers, partit, mesh) std_dens_VOL1=std_dens_VOL2 firstcall_e=.false. end subroutine diag_densMOC -! ============================================================== +! +! +!_______________________________________________________________________________ +subroutine relative_vorticity(mode, dynamics, partit, mesh) + IMPLICIT NONE + integer :: n, nz, el(2), enodes(2), nl1, nl2, edge, ul1, ul2, nl12, ul12 + real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2, c1 + integer, intent(in) :: mode + logical, save :: firstcall=.true. + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + real(kind=WP), dimension(:,:,:), pointer :: UV +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) + + !___________________________________________________________________________ + if (firstcall) then !allocate the stuff at the first call + allocate(vorticity(nl-1, myDim_nod2D+eDim_nod2D)) + firstcall=.false. + if (mode==0) return + end if + !!PS DO n=1,myDim_nod2D + !!PS nl1 = nlevels_nod2D(n)-1 + !!PS ul1 = ulevels_nod2D(n) + !!PS vorticity(ul1:nl1,n)=0.0_WP + !!PS !!PS DO nz=1, nlevels_nod2D(n)-1 + !!PS !!PS vorticity(nz,n)=0.0_WP + !!PS !!PS END DO + !!PS END DO + vorticity = 0.0_WP + DO edge=1,myDim_edge2D + !! edge=myList_edge2D(m) + enodes=edges(:,edge) + el=edge_tri(:,edge) + nl1=nlevels(el(1))-1 + ul1=ulevels(el(1)) + deltaX1=edge_cross_dxdy(1,edge) + deltaY1=edge_cross_dxdy(2,edge) + nl2=0 + ul2=0 + if(el(2)>0) then + deltaX2=edge_cross_dxdy(3,edge) + deltaY2=edge_cross_dxdy(4,edge) + nl2=nlevels(el(2))-1 + ul2=ulevels(el(2)) + end if + nl12 = min(nl1,nl2) + ul12 = max(ul1,ul2) + + DO nz=ul1,ul12-1 + c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1)) + vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 + vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 + END DO + if (ul2>0) then + DO nz=ul2,ul12-1 + c1= -deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) + vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 + vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 + END DO + endif + !!PS DO nz=1,min(nl1,nl2) + DO nz=ul12,nl12 + c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1))- & + deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) + vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 + vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 + END DO + !!PS DO nz=min(nl1,nl2)+1,nl1 + DO nz=nl12+1,nl1 + c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1)) + vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 + vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 + END DO + !!PS DO nz=min(nl1,nl2)+1,nl2 + DO nz=nl12+1,nl2 + c1= -deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) + vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 + vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 + END DO + END DO + + ! vorticity = vorticity*area at this stage + ! It is correct only on myDim nodes + DO n=1,myDim_nod2D + !! n=myList_nod2D(m) + ul1 = ulevels_nod2D(n) + nl1 = nlevels_nod2D(n) + !!PS DO nz=1,nlevels_nod2D(n)-1 + DO nz=ul1,nl1-1 + vorticity(nz,n)=vorticity(nz,n)/areasvol(nz,n) + END DO + END DO + + call exchange_nod(vorticity, partit) + +! Now it the relative vorticity known on neighbors too +end subroutine relative_vorticity + + +! ============================================================== subroutine compute_diagnostics(mode, dynamics, tracers, partit, mesh) implicit none type(t_mesh) , intent(in) , target :: mesh @@ -698,6 +809,9 @@ subroutine compute_diagnostics(mode, dynamics, tracers, partit, mesh) end if !6. MOC in density coordinate if (ldiag_dMOC) call diag_densMOC(mode, dynamics, tracers, partit, mesh) + + ! compute relative vorticity + if (ldiag_vorticity) call relative_vorticity(mode, dynamics, partit, mesh) end subroutine compute_diagnostics diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 0b1e3c7ee..f32b52e43 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -325,27 +325,27 @@ subroutine ini_mean_io(dynamics, tracers, partit, mesh) CASE ('Av ') call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'Av', 'vertical viscosity Av', 'm2/s', Av(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('u_dis_tend') - if(visc_option==8) then + if(dynamics%opt_visc==8) then call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u_dis_tend', 'horizontal velocity viscosity tendency', 'm/s', UV_dis_tend(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('v_dis_tend') - if(visc_option==8) then + if(dynamics%opt_visc==8) then call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v_dis_tend', 'meridional velocity viscosity tendency', 'm/s', UV_dis_tend(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('u_back_tend') - if(visc_option==8) then + if(dynamics%opt_visc==8) then call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u_back_tend', 'horizontal velocity backscatter tendency', 'm2/s2', UV_back_tend(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('v_back_tend') - if(visc_option==8) then + if(dynamics%opt_visc==8) then call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v_back_tend', 'meridional velocity backscatter tendency', 'm2/s2', UV_back_tend(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('u_total_tend') - if(visc_option==8) then + if(dynamics%opt_visc==8) then call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u_total_tend', 'horizontal velocity total viscosity tendency', 'm/s', UV_total_tend(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('v_total_tend') - if(visc_option==8) then + if(dynamics%opt_visc==8) then call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v_total_tend', 'meridional velocity total viscosity tendency', 'm/s', UV_total_tend(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if !___________________________________________________________________________________________________________________________________ @@ -582,11 +582,12 @@ function mesh_dimname_from_dimsize(size, partit, mesh) result(name) ! !-------------------------------------------------------------------------------------------- ! -subroutine create_new_file(entry, partit, mesh) +subroutine create_new_file(entry, dynamics, partit, mesh) use g_clock use mod_mesh USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN use fesom_version_info_module use g_config use i_PARAM @@ -596,6 +597,7 @@ subroutine create_new_file(entry, partit, mesh) character(2000) :: att_text type(t_mesh) , intent(in) :: mesh type(t_partit), intent(in) :: partit + type(t_dyn) , intent(in) :: dynamics type(Meandata), intent(inout) :: entry character(len=*), parameter :: global_attributes_prefix = "FESOM_" @@ -665,15 +667,15 @@ subroutine create_new_file(entry, partit, mesh) ! call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'tra_adv_lim', len_trim(tra_adv_lim), trim(tra_adv_lim)), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_partial_cell', NF_INT, 1, use_partial_cell), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'force_rotation', NF_INT, 1, force_rotation), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_partial_cell' , NF_INT, 1, use_partial_cell), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'force_rotation' , NF_INT, 1, force_rotation), __LINE__) call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'include_fleapyear', NF_INT, 1, include_fleapyear), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_floatice', NF_INT, 1, use_floatice), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'whichEVP', NF_INT, 1, whichEVP), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'evp_rheol_steps', NF_INT, 1, evp_rheol_steps), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'visc_option', NF_INT, 1, visc_option), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'w_split', NF_INT, 1, w_split), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_partial_cell', NF_INT, 1, use_partial_cell), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_floatice' , NF_INT, 1, use_floatice), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'whichEVP' , NF_INT, 1, whichEVP), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'evp_rheol_steps' , NF_INT, 1, evp_rheol_steps), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'opt_visc' , NF_INT, 1, dynamics%opt_visc), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_wsplit' , NF_INT, 1, dynamics%use_wsplit), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_partial_cell' , NF_INT, 1, use_partial_cell), __LINE__) @@ -881,7 +883,7 @@ subroutine output(istep, dynamics, tracers, partit, mesh) entry%filename = filepath ! use any existing file with this name or create a new one if( nf_open(entry%filename, nf_write, entry%ncid) /= nf_noerr ) then - call create_new_file(entry, partit, mesh) + call create_new_file(entry, dynamics, partit, mesh) call assert_nf( nf_open(entry%filename, nf_write, entry%ncid), __LINE__) end if call assoc_ids(entry) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index e5a3f6b5b..c5112b7f2 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -134,7 +134,7 @@ subroutine ini_ocean_io(year, dynamics, tracers, partit, mesh) if (trim(mix_scheme)=='cvmix_IDEMIX' .or. trim(mix_scheme)=='cvmix_TKE+IDEMIX') then call def_variable(oid, 'iwe', (/nl, nod2d/), 'Internal Wave eneryy', 'm2/s2', tke(:,:)); endif - if (visc_option==8) then + if (dynamics%opt_visc==8) then call def_variable(oid, 'uke', (/nl-1, elem2D/), 'unresolved kinetic energy', 'm2/s2', uke(:,:)); call def_variable(oid, 'uke_rhs', (/nl-1, elem2D/), 'unresolved kinetic energy rhs', 'm2/s2', uke_rhs(:,:)); endif diff --git a/src/oce_adv_tra_driver.F90 b/src/oce_adv_tra_driver.F90 index 54d9603c7..916133a3d 100644 --- a/src/oce_adv_tra_driver.F90 +++ b/src/oce_adv_tra_driver.F90 @@ -1,15 +1,17 @@ module oce_adv_tra_driver_interfaces interface - subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, partit, mesh) + subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, dynamics, tracers, partit, mesh) use MOD_MESH use MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN real(kind=WP), intent(in), target :: dt integer, intent(in) :: tr_num type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_tracer), intent(inout), target :: tracers + type(t_dyn) , intent(inout), target :: dynamics real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) real(kind=WP), intent(in), target :: W(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(in), target :: WI(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) @@ -41,11 +43,12 @@ subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, partit, ! ! !=============================================================================== -subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, partit, mesh) +subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, dynamics, tracers, partit, mesh) use MOD_MESH use MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN use g_comm_auto use oce_adv_tra_hor_interfaces use oce_adv_tra_ver_interfaces @@ -54,9 +57,10 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, partit, mesh) implicit none real(kind=WP), intent(in), target :: dt integer, intent(in) :: tr_num - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers + type(t_dyn) , intent(inout), target :: dynamics real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) real(kind=WP), intent(in), target :: W(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(in), target :: WI(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) @@ -143,8 +147,8 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, partit, mesh) fct_LO(nz,n)=(ttf(nz,n)*hnode(nz,n)+(fct_LO(nz,n)+(adv_flux_ver(nz, n)-adv_flux_ver(nz+1, n)))*dt/areasvol(nz,n))/hnode_new(nz,n) end do end do - if (w_split) then !wvel/=wvel_e - ! update for implicit contribution (w_split option) + if (dynamics%use_wsplit) then !wvel/=wvel_e + ! update for implicit contribution (use_wsplit option) call adv_tra_vert_impl(dt, wi, fct_LO, partit, mesh) ! compute the low order upwind vertical flux (full vertical velocity) ! zero the input/output flux before computation diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 561d5c2b2..8652312f5 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2396,19 +2396,19 @@ subroutine vert_vel_ale(dynamics, partit, mesh) !___________________________________________________________________________ ! Split implicit vertical velocity onto implicit and explicit components using CFL criteria: - ! w_max_cfl constrains the allowed explicit w according to the CFL at this place - ! w_max_cfl=1 means w_exp is cut at at the maximum of allowed CFL - ! w_max_cfl=0 means w_exp is zero (everything computed implicitly) - ! w_max_cfl=inf menas w_impl is zero (everything computed explicitly) - ! a guess for optimal choice of w_max_cfl would be 0.95 + ! wsplit_maxcfl constrains the allowed explicit w according to the CFL at this place + ! wsplit_maxcfl=1 means w_exp is cut at at the maximum of allowed CFL + ! wsplit_maxcfl=0 means w_exp is zero (everything computed implicitly) + ! wsplit_maxcfl=inf menas w_impl is zero (everything computed explicitly) + ! a guess for optimal choice of wsplit_maxcfl would be 0.95 do n=1, myDim_nod2D+eDim_nod2D nzmin = ulevels_nod2D(n) nzmax = nlevels_nod2D(n) do nz=nzmin,nzmax c1=1.0_WP c2=0.0_WP - if (w_split .and. (CFL_z(nz, n) > w_max_cfl)) then - dd=max((CFL_z(nz, n)-w_max_cfl), 0.0_WP)/max(w_max_cfl, 1.e-12) + if (dynamics%use_wsplit .and. (CFL_z(nz, n) > dynamics%wsplit_maxcfl)) then + dd=max((CFL_z(nz, n)-dynamics%wsplit_maxcfl), 0.0_WP)/max(dynamics%wsplit_maxcfl, 1.e-12) c1=1.0_WP/(1.0_WP+dd) !explicit part =1. if dd=0. c2=dd /(1.0_WP+dd) !implicit part =1. if dd=inf end if @@ -2866,11 +2866,11 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) !___________________________________________________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call viscosity_filter'//achar(27)//'[0m' - call viscosity_filter(visc_option, dynamics, partit, mesh) + call viscosity_filter(dynamics%opt_visc, dynamics, partit, mesh) !___________________________________________________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call impl_vert_visc_ale'//achar(27)//'[0m' - if(i_vert_visc) call impl_vert_visc_ale(dynamics,partit, mesh) + if(dynamics%use_ivertvisc) call impl_vert_visc_ale(dynamics,partit, mesh) t2=MPI_Wtime() !___________________________________________________________________________ diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 78a8e5e94..6a941cf91 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -279,7 +279,7 @@ subroutine adv_tracers_ale(dt, tr_num, dynamics, tracers, partit, mesh) ! here --> add horizontal advection part to del_ttf(nz,n) = del_ttf(nz,n) + ... tracers%work%del_ttf_advhoriz = 0.0_WP tracers%work%del_ttf_advvert = 0.0_WP - call do_oce_adv_tra(dt, dynamics%uv, dynamics%w, dynamics%w_i, dynamics%w_e, tr_num, tracers, partit, mesh) + call do_oce_adv_tra(dt, dynamics%uv, dynamics%w, dynamics%w_i, dynamics%w_e, tr_num, dynamics, tracers, partit, mesh) !___________________________________________________________________________ ! update array for total tracer flux del_ttf with the fluxes from horizontal ! and vertical advection @@ -504,7 +504,7 @@ subroutine diff_ver_part_impl_ale(tr_num, dynamics, tracers, partit, mesh) trarr => tracers%data(tr_num)%values(:,:) Wvel_i => dynamics%w_i(:,:) !___________________________________________________________________________ - if ((trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') .OR. (.not. w_split)) do_wimpl=.false. + if ((trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') .OR. (.not. dynamics%use_wsplit)) do_wimpl=.false. if (Redi) isredi=1._WP dt_inv=1.0_WP/dt @@ -1221,7 +1221,10 @@ SUBROUTINE diff_part_bh(tr_num, dynamics, tracers, partit, mesh) v1=UV(2, nz,el(1))-UV(2, nz,el(2)) vi=u1*u1+v1*v1 tt=ttf(nz,en(1))-ttf(nz,en(2)) - vi=sqrt(max(gamma0, max(gamma1*sqrt(vi), gamma2*vi))*len) + vi=sqrt(max(dynamics%visc_gamma0, & + max(dynamics%visc_gamma1*sqrt(vi), & + dynamics%visc_gamma2*vi) & + )*len) !vi=sqrt(max(sqrt(u1*u1+v1*v1),0.04)*le) ! 10m^2/s for 10 km (0.04 h/50) !vi=sqrt(10.*le) tt=tt*vi @@ -1245,7 +1248,10 @@ SUBROUTINE diff_part_bh(tr_num, dynamics, tracers, partit, mesh) v1=UV(2, nz,el(1))-UV(2, nz,el(2)) vi=u1*u1+v1*v1 tt=temporary_ttf(nz,en(1))-temporary_ttf(nz,en(2)) - vi=sqrt(max(gamma0, max(gamma1*sqrt(vi), gamma2*vi))*len) + vi=sqrt(max(dynamics%visc_gamma0, & + max(dynamics%visc_gamma1*sqrt(vi), & + dynamics%visc_gamma2*vi) & + )*len) !vi=sqrt(max(sqrt(u1*u1+v1*v1),0.04)*le) ! 10m^2/s for 10 km (0.04 h/50) !vi=sqrt(10.*le) tt=-tt*vi*dt diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index bee2c0b7d..ea77166ed 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -146,10 +146,10 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) t2=MPI_Wtime() !___________________________________________________________________________ ! advection - if (mom_adv==1) then + if (dynamics%momadv_opt==1) then if (mype==0) write(*,*) 'in moment not adapted mom_adv advection typ for ALE, check your namelist' call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) - elseif (mom_adv==2) then + elseif (dynamics%momadv_opt==2) then call momentum_adv_scalar(dynamics, partit, mesh) end if t3=MPI_Wtime() diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index cd78ac3c9..5b80ccf7a 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -1,13 +1,13 @@ ! A set of routines for computing the horizonlal viscosity ! the control parameters (their default values) are: -! gamma0 (0.01 [m/s]), gamma1 (0.1 [no dim.]), gamma2 (10.[s/m]), Div_c [1.], Leith_c[1.?] -! 1. gamma0 has the dimension of velocity. It should be as small as possible, but in any case smaller than 0.01 m/s. +! dynamics%visc_gamma0 (0.01 [m/s]), dynamics%visc_gamma1 (0.1 [no dim.]), dynamics%visc_gamma2 (10.[s/m]), Div_c [1.], Leith_c[1.?] +! 1. dynamics%visc_gamma0 has the dimension of velocity. It should be as small as possible, but in any case smaller than 0.01 m/s. ! All major ocean circulation models are stable with harmonic viscosity 0.01*len. -! 2. gamma1 is nondimensional. In commonly used Leith or Smagorinsky parameterizations it is C/pi^2=0.1 (C is about 1). +! 2. dynamics%visc_gamma1 is nondimensional. In commonly used Leith or Smagorinsky parameterizations it is C/pi^2=0.1 (C is about 1). ! We therefore try to follow this, allowing some adjustments (because our mesh is triangular, our resolution is different, etc.). -! We however, try to keep gamma1<0.1 -! 3. gamma2 is dimensional (1/velocity). If it is 10, then the respective term dominates starting from |u|=0.1 m/s an so on. It is only used in: +! We however, try to keep dynamics%visc_gamma1<0.1 +! 3. dynamics%visc_gamma2 is dimensional (1/velocity). If it is 10, then the respective term dominates starting from |u|=0.1 m/s an so on. It is only used in: ! (5) visc_filt_bcksct, (6) visc_filt_bilapl, (7) visc_filt_bidiff ! 4. Div_c =1. should be default ! 5. Leith_c=? (need to be adjusted) @@ -25,6 +25,7 @@ subroutine visc_filt_bcksct(dynamics, partit, mesh) end subroutine end interface end module + module visc_filt_bilapl_interface interface subroutine visc_filt_bilapl(dynamics, partit, mesh) @@ -39,6 +40,7 @@ subroutine visc_filt_bilapl(dynamics, partit, mesh) end subroutine end interface end module + module visc_filt_bidiff_interface interface subroutine visc_filt_bidiff(dynamics, partit, mesh) @@ -53,63 +55,6 @@ subroutine visc_filt_bidiff(dynamics, partit, mesh) end subroutine end interface end module -!!PS module visc_filt_dbcksc_interface -!!PS interface -!!PS subroutine visc_filt_dbcksc(dynamics, partit, mesh) -!!PS use mod_mesh -!!PS USE MOD_PARTIT -!!PS USE MOD_PARSUP -!!PS USE MOD_DYN -!!PS type(t_dyn) , intent(inout), target :: dynamics -!!PS type(t_partit), intent(inout), target :: partit -!!PS type(t_mesh) , intent(in) , target :: mesh -!!PS -!!PS end subroutine -!!PS end interface -!!PS end module -!!PS module backscatter_coef_interface -!!PS interface -!!PS subroutine backscatter_coef(dynamics, partit, mesh) -!!PS use mod_mesh -!!PS USE MOD_PARTIT -!!PS USE MOD_PARSUP -!!PS USE MOD_DYN -!!PS type(t_dyn) , intent(inout), target :: dynamics -!!PS type(t_partit), intent(inout), target :: partit -!!PS type(t_mesh) , intent(in) , target :: mesh -!!PS -!!PS end subroutine -!!PS end interface -!!PS end module -!!PS module uke_update_interface -!!PS interface -!!PS subroutine uke_update(dynamics, partit, mesh) -!!PS use mod_mesh -!!PS USE MOD_PARTIT -!!PS USE MOD_PARSUP -!!PS USE MOD_DYN -!!PS type(t_dyn) , intent(inout), target :: dynamics -!!PS type(t_partit), intent(inout), target :: partit -!!PS type(t_mesh) , intent(in) , target :: mesh -!!PS -!!PS end subroutine -!!PS end interface -!!PS end module - -module relative_vorticity_interface - interface - subroutine relative_vorticity(dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use MOD_DYN - type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - - end subroutine - end interface -end module ! ! Contains routines needed for computations of dynamics. @@ -311,9 +256,12 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) DO nz=nzmin,nzmax-1 u1=UV(1,nz,el(1))-UV(1,nz,el(2)) v1=UV(2,nz,el(1))-UV(2,nz,el(2)) - vi=dt*max(gamma0, max(gamma1*sqrt(u1*u1+v1*v1), gamma2*(u1*u1+v1*v1)))*len -! vi=dt*max(gamma0, gamma1*max(sqrt(u1*u1+v1*v1), gamma2*(u1*u1+v1*v1)))*len - !here gamma2 is dimensional (1/velocity). If it is 10, then the respective term dominates starting from |u|=0.1 m/s an so on. + vi=dt*max(dynamics%visc_gamma0, & + max(dynamics%visc_gamma1*sqrt(u1*u1+v1*v1), & + dynamics%visc_gamma2*(u1*u1+v1*v1)) & + )*len +! vi=dt*max(dynamics%visc_gamma0, dynamics%visc_gamma1*max(sqrt(u1*u1+v1*v1), dynamics%visc_gamma2*(u1*u1+v1*v1)))*len + !here dynamics%visc_gamma2 is dimensional (1/velocity). If it is 10, then the respective term dominates starting from |u|=0.1 m/s an so on. u1=u1*vi v1=v1*vi U_b(nz,el(1))=U_b(nz,el(1))-u1/elem_area(el(1)) @@ -353,8 +301,8 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) nzmax = nlevels(ed) !!PS Do nz=1, nlevels(ed)-1 Do nz=nzmin, nzmax-1 - UV_rhs(1,nz,ed)=UV_rhs(1,nz,ed)+U_b(nz,ed) -easy_bs_return*sum(U_c(nz,nelem))/3.0_WP - UV_rhs(2,nz,ed)=UV_rhs(2,nz,ed)+V_b(nz,ed) -easy_bs_return*sum(V_c(nz,nelem))/3.0_WP + UV_rhs(1,nz,ed)=UV_rhs(1,nz,ed)+U_b(nz,ed) -dynamics%visc_easybsreturn*sum(U_c(nz,nelem))/3.0_WP + UV_rhs(2,nz,ed)=UV_rhs(2,nz,ed)+V_b(nz,ed) -dynamics%visc_easybsreturn*sum(V_c(nz,nelem))/3.0_WP END DO end do end subroutine visc_filt_bcksct @@ -423,7 +371,10 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) ! vi has the sense of harmonic viscosity coef. because of ! division by area in the end u1=U_c(nz,ed)**2+V_c(nz,ed)**2 - vi=max(gamma0, max(gamma1*sqrt(u1), gamma2*u1))*len*dt + vi=max(dynamics%visc_gamma0, & + max(dynamics%visc_gamma1*sqrt(u1), & + dynamics%visc_gamma2*u1) & + )*len*dt U_c(nz,ed)=-U_c(nz,ed)*vi V_c(nz,ed)=-V_c(nz,ed)*vi END DO @@ -499,8 +450,11 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) vi=u1*u1+v1*v1 - vi=sqrt(max(gamma0, max(gamma1*sqrt(vi), gamma2*vi))*len) - ! vi=sqrt(max(gamma0, gamma1*max(sqrt(vi), gamma2*vi))*len) + vi=sqrt(max(dynamics%visc_gamma0, & + max(dynamics%visc_gamma1*sqrt(vi), & + dynamics%visc_gamma2*vi) & + )*len) + ! vi=sqrt(max(dynamics%visc_gamma0, dynamics%visc_gamma1*max(sqrt(vi), dynamics%visc_gamma2*vi))*len) u1=u1*vi v1=v1*vi U_c(nz,el(1))=U_c(nz,el(1))-u1 @@ -523,8 +477,11 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) vi=u1*u1+v1*v1 - vi=-dt*sqrt(max(gamma0, max(gamma1*sqrt(vi), gamma2*vi))*len) - ! vi=-dt*sqrt(max(gamma0, gamma1*max(sqrt(vi), gamma2*vi))*len) + vi=-dt*sqrt(max(dynamics%visc_gamma0, & + max(dynamics%visc_gamma1*sqrt(vi), & + dynamics%visc_gamma2*vi) & + )*len) + ! vi=-dt*sqrt(max(dynamics%visc_gamma0, dynamics%visc_gamma1*max(sqrt(vi), dynamics%visc_gamma2*vi))*len) u1=vi*(U_c(nz,el(1))-U_c(nz,el(2))) v1=vi*(V_c(nz,el(1))-V_c(nz,el(2))) UV_rhs(1,nz,el(1))=UV_rhs(1,nz,el(1))-u1/elem_area(el(1)) @@ -534,487 +491,5 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) END DO END DO deallocate(V_c, U_c) - end subroutine visc_filt_bidiff -!!PS ! -!!PS ! -!!PS !_______________________________________________________________________________ -!!PS SUBROUTINE visc_filt_dbcksc(dynamics, partit, mesh) -!!PS USE MOD_MESH -!!PS USE MOD_PARTIT -!!PS USE MOD_PARSUP -!!PS use MOD_DYN -!!PS USE o_ARRAYS, only: v_back, UV_dis_tend, UV_total_tend, UV_back_tend, & -!!PS uke, uke_dif -!!PS USE o_PARAM -!!PS USE g_CONFIG -!!PS USE g_comm_auto -!!PS USE g_support -!!PS USE uke_update_interface -!!PS IMPLICIT NONE -!!PS -!!PS real(kind=8) :: u1, v1, le(2), len, crosslen, vi, uke1 -!!PS integer :: nz, ed, el(2) -!!PS !!PS real(kind=8), allocatable :: U_c(:,:), V_c(:,:) -!!PS real(kind=8) , allocatable :: UV_back(:,:,:), UV_dis(:,:,:), uke_d(:,:) -!!PS real(kind=8) , allocatable :: uuu(:) -!!PS type(t_dyn) , intent(inout), target :: dynamics -!!PS type(t_partit), intent(inout), target :: partit -!!PS type(t_mesh) , intent(in) , target :: mesh -!!PS real(kind=WP) , dimension(:,:,:), pointer :: UV, UV_rhs -!!PS real(kind=WP) , dimension(:,:) , pointer :: U_c, V_c -!!PS #include "associate_part_def.h" -!!PS #include "associate_mesh_def.h" -!!PS #include "associate_part_ass.h" -!!PS #include "associate_mesh_ass.h" -!!PS UV => dynamics%uv(:,:,:) -!!PS UV_rhs => dynamics%uv_rhs(:,:,:) -!!PS U_c => dynamics%work%u_c(:,:) -!!PS V_c => dynamics%work%v_c(:,:) -!!PS -!!PS ! An analog of harmonic viscosity operator. -!!PS ! It adds to the rhs(0) Visc*(u1+u2+u3-3*u0)/area -!!PS ! on triangles, which is Visc*Laplacian/4 on equilateral triangles. -!!PS ! The contribution from boundary edges is neglected (free slip). -!!PS ! Filter is applied twice. -!!PS -!!PS ed=myDim_elem2D+eDim_elem2D -!!PS allocate(U_c(nl-1,ed), V_c(nl-1, ed)) -!!PS allocate(UV_back(2,nl-1,ed), UV_dis(2,nl-1, ed)) -!!PS allocate(uke_d(nl-1,ed)) -!!PS allocate(uuu(ed)) -!!PS -!!PS U_c=0.0_8 -!!PS V_c=0.0_8 -!!PS UV_back=0.0_8 -!!PS UV_dis=0.0_8 -!!PS uke_d=0.0_8 -!!PS -!!PS DO ed=1, myDim_edge2D+eDim_edge2D -!!PS if(myList_edge2D(ed)>edge2D_in) cycle -!!PS el=edge_tri(:,ed) -!!PS DO nz=1,minval(nlevels(el))-1 -!!PS u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) -!!PS v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) -!!PS -!!PS U_c(nz,el(1))=U_c(nz,el(1))-u1 -!!PS U_c(nz,el(2))=U_c(nz,el(2))+u1 -!!PS V_c(nz,el(1))=V_c(nz,el(1))-v1 -!!PS V_c(nz,el(2))=V_c(nz,el(2))+v1 -!!PS END DO -!!PS END DO -!!PS -!!PS -!!PS Do ed=1,myDim_elem2D -!!PS len=sqrt(elem_area(ed)) -!!PS len=dt*len/30.0_8 -!!PS Do nz=1,nlevels(ed)-1 -!!PS ! vi has the sense of harmonic viscosity coefficient because of -!!PS ! the division by area in the end -!!PS ! ==== -!!PS ! Case 1 -- an analog to the third-order upwind (vi=|u|l/12) -!!PS ! ==== -!!PS vi=max(0.2_8,sqrt(UV(1,nz,ed)**2+UV(2,nz,ed)**2))*len -!!PS U_c(nz,ed)=-U_c(nz,ed)*vi -!!PS V_c(nz,ed)=-V_c(nz,ed)*vi -!!PS END DO -!!PS end do -!!PS -!!PS -!!PS call exchange_elem(U_c, partit) -!!PS call exchange_elem(V_c, partit) -!!PS -!!PS DO ed=1, myDim_edge2D+eDim_edge2D -!!PS if(myList_edge2D(ed)>edge2D_in) cycle -!!PS el=edge_tri(:,ed) -!!PS le=edge_dxdy(:,ed) -!!PS le(1)=le(1)*sum(elem_cos(el))*0.25_8 -!!PS len=sqrt(le(1)**2+le(2)**2)*r_earth -!!PS le(1)=edge_cross_dxdy(1,ed)-edge_cross_dxdy(3,ed) -!!PS le(2)=edge_cross_dxdy(2,ed)-edge_cross_dxdy(4,ed) -!!PS crosslen=sqrt(le(1)**2+le(2)**2) -!!PS DO nz=1,minval(nlevels(el))-1 -!!PS vi=dt*len*(v_back(nz,el(1))+v_back(nz,el(2)))/crosslen -!!PS !if(mype==0) write(*,*) 'vi ', vi , ' and ed' , ed -!!PS !if(mype==0) write(*,*) 'dt*len/crosslen ', dt*len/crosslen, ' and ed' , ed -!!PS !vi=max(vi,0.005*len*dt) ! This helps to reduce noise in places where -!!PS ! Visc is small and decoupling might happen -!!PS !Backscatter contribution -!!PS u1=(UV(1,nz,el(1))-UV(1,nz,el(2)))*vi -!!PS v1=(UV(2,nz,el(1))-UV(2,nz,el(2)))*vi -!!PS -!!PS !UKE diffusion -!!PS vi=dt*len*(K_back*sqrt(elem_area(el(1))/scale_area)+K_back*sqrt(elem_area(el(2))/scale_area))/crosslen -!!PS -!!PS uke1=(uke(nz,el(1))-uke(nz,el(2)))*vi -!!PS -!!PS -!!PS UV_back(1,nz,el(1))=UV_back(1,nz,el(1))-u1/elem_area(el(1)) -!!PS UV_back(1,nz,el(2))=UV_back(1,nz,el(2))+u1/elem_area(el(2)) -!!PS UV_back(2,nz,el(1))=UV_back(2,nz,el(1))-v1/elem_area(el(1)) -!!PS UV_back(2,nz,el(2))=UV_back(2,nz,el(2))+v1/elem_area(el(2)) -!!PS -!!PS !Correct scaling for the diffusion? -!!PS uke_d(nz,el(1))=uke_d(nz,el(1))-uke1/elem_area(el(1)) -!!PS uke_d(nz,el(2))=uke_d(nz,el(2))+uke1/elem_area(el(2)) -!!PS -!!PS -!!PS -!!PS !Biharmonic contribution -!!PS u1=(U_c(nz,el(1))-U_c(nz,el(2))) -!!PS v1=(V_c(nz,el(1))-V_c(nz,el(2))) -!!PS -!!PS UV_dis(1,nz,el(1))=UV_dis(1,nz,el(1))-u1/elem_area(el(1)) -!!PS UV_dis(1,nz,el(2))=UV_dis(1,nz,el(2))+u1/elem_area(el(2)) -!!PS UV_dis(2,nz,el(1))=UV_dis(2,nz,el(1))-v1/elem_area(el(1)) -!!PS UV_dis(2,nz,el(2))=UV_dis(2,nz,el(2))+v1/elem_area(el(2)) -!!PS -!!PS END DO -!!PS END DO -!!PS -!!PS call exchange_elem(UV_back, partit) -!!PS -!!PS DO nz=1, nl-1 -!!PS uuu=0.0_8 -!!PS uuu=UV_back(1,nz,:) -!!PS call smooth_elem(uuu,smooth_back_tend, partit, mesh) -!!PS UV_back(1,nz,:)=uuu -!!PS uuu=0.0_8 -!!PS uuu=UV_back(2,nz,:) -!!PS call smooth_elem(uuu,smooth_back_tend, partit, mesh) -!!PS UV_back(2,nz,:)=uuu -!!PS END DO -!!PS -!!PS DO ed=1, myDim_elem2D -!!PS DO nz=1,nlevels(ed)-1 -!!PS UV_rhs(1,nz,ed)=UV_rhs(1,nz,ed)+UV_dis(1,nz,ed)+UV_back(1,nz,ed) -!!PS UV_rhs(2,nz,ed)=UV_rhs(2,nz,ed)+UV_dis(2,nz,ed)+UV_back(2,nz,ed) -!!PS END DO -!!PS END DO -!!PS -!!PS UV_dis_tend=UV_dis!+UV_back -!!PS UV_total_tend=UV_dis+UV_back -!!PS UV_back_tend=UV_back -!!PS uke_dif=uke_d -!!PS -!!PS call uke_update(dynamics, partit, mesh) -!!PS deallocate(V_c,U_c) -!!PS deallocate(UV_dis,UV_back) -!!PS deallocate(uke_d) -!!PS deallocate(uuu) -!!PS -!!PS end subroutine visc_filt_dbcksc -!!PS ! -!!PS ! -!!PS !_______________________________________________________________________________ -!!PS SUBROUTINE backscatter_coef(partit, mesh) -!!PS USE MOD_MESH -!!PS USE MOD_PARTIT -!!PS USE MOD_PARSUP -!!PS USE o_ARRAYS -!!PS USE o_PARAM -!!PS USE g_CONFIG -!!PS use g_comm_auto -!!PS IMPLICIT NONE -!!PS type(t_mesh), intent(in), target :: mesh -!!PS type(t_partit), intent(inout), target :: partit -!!PS integer :: elem, nz -!!PS #include "associate_part_def.h" -!!PS #include "associate_mesh_def.h" -!!PS #include "associate_part_ass.h" -!!PS #include "associate_mesh_ass.h" -!!PS -!!PS !Potentially add the Rossby number scaling to the script... -!!PS !check if sign is right! Different in the Jansen paper -!!PS !Also check with the normalization by area; as before we use element length sqrt(2*elem_area(ed)) -!!PS -!!PS v_back=0.0_8 -!!PS DO elem=1, myDim_elem2D -!!PS DO nz=1,nlevels(elem)-1 -!!PS !v_back(1,ed)=c_back*sqrt(2.0_WP*elem_area(ed))*sqrt(max(2.0_WP*uke(1,ed),0.0_WP))*(3600.0_WP*24.0_WP/tau_c)*4.0_WP/sqrt(2.0_WP*elem_area(ed))**2 !*sqrt(max(2.0_WP*uke(1,ed),0.0_WP)) -!!PS !v_back(nz,elem)=-c_back*sqrt(4._8/sqrt(3.0_8)*elem_area(elem))*sqrt(max(2.0_8*uke(nz,elem),0.0_8)) !Is the scaling correct -!!PS v_back(nz,elem)=min(-c_back*sqrt(elem_area(elem))*sqrt(max(2.0_8*uke(nz,elem),0.0_8)),0.2*elem_area(elem)/dt) !Is the scaling correct -!!PS !Scaling by sqrt(2*elem_area) or sqrt(elem_area)? -!!PS END DO -!!PS END DO -!!PS -!!PS call exchange_elem(v_back, partit) -!!PS -!!PS end subroutine backscatter_coef -!!PS ! -!!PS ! -!!PS !_______________________________________________________________________________ -!!PS SUBROUTINE uke_update(dynamics, partit, mesh) -!!PS USE MOD_MESH -!!PS USE MOD_PARTIT -!!PS USE MOD_PARSUP -!!PS use MOD_DYN -!!PS USE o_ARRAYS, only: uke_rhs, uke_dif, uke_back, uke_dis, uke, UV_dis_tend, uv_back_tend, uke_rhs_old, & -!!PS bvfreq, coriolis_node -!!PS USE o_PARAM -!!PS USE g_CONFIG -!!PS use g_comm_auto -!!PS USE g_support -!!PS USE g_rotate_grid -!!PS IMPLICIT NONE -!!PS -!!PS !I had to change uke(:) to uke(:,:) to make output and restart work!! -!!PS -!!PS !Why is it necessary to implement the length of the array? It doesn't work without! -!!PS !integer, intent(in) :: t_levels -!!PS type(t_dyn) , intent(inout), target :: dynamics -!!PS type(t_partit), intent(inout), target :: partit -!!PS type(t_mesh) , intent(in) , target :: mesh -!!PS -!!PS real(kind=8) :: hall, h1_eta, hnz, vol -!!PS integer :: elnodes(3), nz, ed, edi, node, j, elem, q -!!PS real(kind=8), allocatable :: uuu(:), work_array(:), U_work(:,:), V_work(:,:), rosb_array(:,:), work_uv(:) -!!PS integer :: kk, nzmax, el -!!PS real(kind=8) :: c1, rosb, vel_u, vel_v, vel_uv, scaling, reso -!!PS real*8 :: c_min=0.5, f_min=1.e-6, r_max=200000., ex, ey, a1, a2, len_reg, dist_reg(2) ! Are those values still correct? -!!PS real(kind=WP), dimension(:,:,:), pointer :: UV -!!PS #include "associate_part_def.h" -!!PS #include "associate_mesh_def.h" -!!PS #include "associate_part_ass.h" -!!PS #include "associate_mesh_ass.h" -!!PS UV => dynamics%uv(:,:,:) -!!PS -!!PS !rosb_dis=1._8 !Should be variable to control how much of the dissipated energy is backscattered -!!PS !rossby_num=2 -!!PS -!!PS ed=myDim_elem2D+eDim_elem2D -!!PS allocate(uuu(ed)) -!!PS -!!PS uke_back=0.0_8 -!!PS uke_dis=0.0_8 -!!PS DO ed=1, myDim_elem2D -!!PS DO nz=1, nlevels(ed)-1 -!!PS uke_dis(nz,ed)=(UV(1,nz,ed)*UV_dis_tend(1,nz,ed)+UV(2,nz,ed)*UV_dis_tend(2,nz,ed)) -!!PS uke_back(nz,ed)=(UV(1,nz,ed)*UV_back_tend(1,nz,ed)+UV(2,nz,ed)*UV_back_tend(2,nz,ed)) -!!PS END DO -!!PS END DO -!!PS -!!PS DO nz=1,nl-1 -!!PS uuu=0.0_8 -!!PS uuu=uke_back(nz,:) -!!PS call smooth_elem(uuu,smooth_back, partit, mesh) !3) ? -!!PS uke_back(nz,:)=uuu -!!PS END DO -!!PS -!!PS -!!PS -!!PS !Timestepping use simple backward timestepping; all components should have dt in it, unless they need it twice -!!PS !Amplitudes should be right given the correction of the viscosities; check for all, also for biharmonic -!!PS !uke(1,ed)=uke(1,ed)-uke_dis(1,ed)-uke_back(1,ed)+uke_dif(1,ed) -!!PS ed=myDim_elem2D+eDim_elem2D -!!PS allocate(U_work(nl-1,myDim_nod2D+eDim_nod2D),V_work(nl-1,myDim_nod2D+eDim_nod2D)) -!!PS allocate(work_uv(myDim_nod2D+eDim_nod2D)) -!!PS allocate(rosb_array(nl-1,ed)) -!!PS call exchange_elem(UV, partit) -!!PS rosb_array=0._8 -!!PS DO nz=1, nl-1 -!!PS work_uv=0._WP -!!PS DO node=1, myDim_nod2D -!!PS vol=0._WP -!!PS U_work(nz,node)=0._WP -!!PS V_work(nz,node)=0._WP -!!PS DO j=1, nod_in_elem2D_num(node) -!!PS elem=nod_in_elem2D(j, node) -!!PS U_work(nz,node)=U_work(nz,node)+UV(1,nz,elem)*elem_area(elem) -!!PS V_work(nz,node)=V_work(nz,node)+UV(2,nz,elem)*elem_area(elem) -!!PS vol=vol+elem_area(elem) -!!PS END DO -!!PS U_work(nz,node)=U_work(nz,node)/vol -!!PS V_work(nz,node)=U_work(nz,node)/vol -!!PS END DO -!!PS work_uv=U_work(nz,:) -!!PS call exchange_nod(work_uv, partit) -!!PS U_work(nz,:)=work_uv -!!PS work_uv=V_work(nz,:) -!!PS call exchange_nod(work_uv, partit) -!!PS V_work(nz,:)=work_uv -!!PS END DO -!!PS -!!PS DO el=1,myDim_elem2D -!!PS DO nz=1, nlevels(el)-1 -!!PS rosb_array(nz,el)=sqrt((sum(gradient_sca(1:3,el)*U_work(nz,elem2D_nodes(1:3,el)))-& -!!PS sum(gradient_sca(4:6, el)*V_work(nz,elem2D_nodes(1:3,el))))**2+& -!!PS (sum(gradient_sca(4:6, el)*U_work(nz,elem2D_nodes(1:3,el)))+& -!!PS sum(gradient_sca(1:3, el)*V_work(nz,elem2D_nodes(1:3,el))))**2) -!!PS ! hall=hall+hnz -!!PS END DO -!!PS ! rosb_array(el)=rosb_array(el)/hall -!!PS END DO -!!PS DO ed=1, myDim_elem2D -!!PS scaling=1._WP -!!PS IF(uke_scaling) then -!!PS reso=sqrt(elem_area(ed)*4._wp/sqrt(3._wp)) -!!PS rosb=0._wp -!!PS elnodes=elem2D_nodes(:, ed) -!!PS DO kk=1,3 -!!PS c1=0._wp -!!PS nzmax=minval(nlevels(nod_in_elem2D(1:nod_in_elem2D_num(elnodes(kk)), elnodes(kk))), 1) -!!PS !Vertical average; same scaling in the vertical -!!PS DO nz=1, nzmax-1 -!!PS c1=c1+hnode_new(nz,elnodes(kk))*(sqrt(max(bvfreq(nz,elnodes(kk)), 0._WP))+sqrt(max(bvfreq(nz+1,elnodes(kk)), 0._WP)))/2. -!!PS END DO -!!PS c1=max(c_min, c1/pi) !ca. first baroclinic gravity wave speed limited from below by c_min -!!PS !Cutoff K_GM depending on (Resolution/Rossby radius) ratio -!!PS rosb=rosb+min(c1/max(abs(coriolis_node(elnodes(kk))), f_min), r_max) -!!PS END DO -!!PS rosb=rosb/3._8 -!!PS scaling=1._WP/(1._WP+(uke_scaling_factor*reso/rosb))!(4._wp*reso/rosb)) -!!PS END IF -!!PS -!!PS DO nz=1, nlevels(ed)-1 -!!PS elnodes=elem2D_nodes(:,ed) -!!PS -!!PS !Taking out that one place where it is always weird (Pacific Southern Ocean) -!!PS !Should not really be used later on, once we fix the issue with the 1/4 degree grid -!!PS if(.not. (TRIM(which_toy)=="soufflet")) then -!!PS call elem_center(ed, ex, ey) -!!PS !a1=-104.*rad -!!PS !a2=-49.*rad -!!PS call g2r(-104.*rad, -49.*rad, a1, a2) -!!PS dist_reg(1)=ex-a1 -!!PS dist_reg(2)=ey-a2 -!!PS call trim_cyclic(dist_reg(1)) -!!PS dist_reg(1)=dist_reg(1)*elem_cos(ed) -!!PS dist_reg=dist_reg*r_earth -!!PS len_reg=sqrt(dist_reg(1)**2+dist_reg(2)**2) -!!PS -!!PS -!!PS !if(mype==0) write(*,*) 'len_reg ', len_reg , ' and dist_reg' , dist_reg, ' and ex, ey', ex, ey, ' and a ', a1, a2 -!!PS rosb_array(nz,ed)=rosb_array(nz,ed)/max(abs(sum(coriolis_node(elnodes(:)))), f_min) -!!PS !uke_dif(nz, ed)=scaling*(1-exp(-len_reg/300000))*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)!UV_dif(1,ed) -!!PS uke_dis(nz,ed)=scaling*(1-exp(-len_reg/300000))*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)*uke_dis(nz,ed) -!!PS else -!!PS rosb_array(nz,ed)=rosb_array(nz,ed)/max(abs(sum(coriolis_node(elnodes(:)))), f_min) -!!PS !uke_dif(nz, ed)=scaling*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)!UV_dif(1,ed) -!!PS uke_dis(nz,ed)=scaling*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)*uke_dis(nz,ed) -!!PS end if -!!PS -!!PS END DO -!!PS END DO -!!PS deallocate(U_work, V_work) -!!PS deallocate(rosb_array) -!!PS deallocate(work_uv) -!!PS call exchange_elem(uke_dis, partit) -!!PS DO nz=1, nl-1 -!!PS uuu=uke_dis(nz,:) -!!PS call smooth_elem(uuu,smooth_dis, partit, mesh) -!!PS uke_dis(nz,:)=uuu -!!PS END DO -!!PS DO ed=1, myDim_elem2D -!!PS DO nz=1,nlevels(ed)-1 -!!PS uke_rhs_old(nz,ed)=uke_rhs(nz,ed) -!!PS uke_rhs(nz,ed)=-uke_dis(nz,ed)-uke_back(nz,ed)+uke_dif(nz,ed) -!!PS uke(nz,ed)=uke(nz,ed)+1.5_8*uke_rhs(nz,ed)-0.5_8*uke_rhs_old(nz,ed) -!!PS END DO -!!PS END DO -!!PS call exchange_elem(uke, partit) -!!PS -!!PS deallocate(uuu) -!!PS end subroutine uke_update -! -! -!_______________________________________________________________________________ -subroutine relative_vorticity(dynamics, partit, mesh) - USE o_ARRAYS, only: vorticity - USE MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_DYN - use g_comm_auto - IMPLICIT NONE - integer :: n, nz, el(2), enodes(2), nl1, nl2, edge, ul1, ul2, nl12, ul12 - real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2, c1 - - type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - real(kind=WP), dimension(:,:,:), pointer :: UV -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) - - !!PS DO n=1,myDim_nod2D - !!PS nl1 = nlevels_nod2D(n)-1 - !!PS ul1 = ulevels_nod2D(n) - !!PS vorticity(ul1:nl1,n)=0.0_WP - !!PS !!PS DO nz=1, nlevels_nod2D(n)-1 - !!PS !!PS vorticity(nz,n)=0.0_WP - !!PS !!PS END DO - !!PS END DO - vorticity(:,1:myDim_nod2D) = 0.0_WP - DO edge=1,myDim_edge2D - !! edge=myList_edge2D(m) - enodes=edges(:,edge) - el=edge_tri(:,edge) - nl1=nlevels(el(1))-1 - ul1=ulevels(el(1)) - deltaX1=edge_cross_dxdy(1,edge) - deltaY1=edge_cross_dxdy(2,edge) - nl2=0 - ul2=0 - if(el(2)>0) then - deltaX2=edge_cross_dxdy(3,edge) - deltaY2=edge_cross_dxdy(4,edge) - nl2=nlevels(el(2))-1 - ul2=ulevels(el(2)) - end if - nl12 = min(nl1,nl2) - ul12 = max(ul1,ul2) - - DO nz=ul1,ul12-1 - c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1)) - vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 - vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 - END DO - if (ul2>0) then - DO nz=ul2,ul12-1 - c1= -deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) - vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 - vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 - END DO - endif - !!PS DO nz=1,min(nl1,nl2) - DO nz=ul12,nl12 - c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1))- & - deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) - vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 - vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 - END DO - !!PS DO nz=min(nl1,nl2)+1,nl1 - DO nz=nl12+1,nl1 - c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1)) - vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 - vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 - END DO - !!PS DO nz=min(nl1,nl2)+1,nl2 - DO nz=nl12+1,nl2 - c1= -deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) - vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 - vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 - END DO - END DO - - ! vorticity = vorticity*area at this stage - ! It is correct only on myDim nodes - DO n=1,myDim_nod2D - !! n=myList_nod2D(m) - ul1 = ulevels_nod2D(n) - nl1 = nlevels_nod2D(n) - !!PS DO nz=1,nlevels_nod2D(n)-1 - DO nz=ul1,nl1-1 - vorticity(nz,n)=vorticity(nz,n)/areasvol(nz,n) - END DO - END DO - - call exchange_nod(vorticity, partit) - -! Now it the relative vorticity known on neighbors too -end subroutine relative_vorticity diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index 013495860..1a0c078a0 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -23,14 +23,7 @@ MODULE o_PARAM real(kind=WP) :: C_d= 0.0025_WP ! Bottom drag coefficient real(kind=WP) :: kappa=0.4 !von Karman's constant real(kind=WP) :: mix_coeff_PP=0.01_WP ! mixing coef for PP scheme -real(kind=WP) :: gamma0=0.01! [m/s], gamma0*len*dt is the background viscosity -real(kind=WP) :: gamma1=0.1! [non dim.], or computation of the flow aware viscosity -real(kind=WP) :: gamma2=10.! [s/m], is only used in easy backscatter option -real(kind=WP) :: Div_c =1.0_WP !modified Leith viscosity weight -real(kind=WP) :: Leith_c=1.0_WP !Leith viscosity weight. It needs vorticity! -real(kind=WP) :: easy_bs_return=1.0 !backscatter option only (how much to return) real(kind=WP) :: A_ver=0.001_WP ! Vertical harm. visc. -integer :: visc_option=5 logical :: uke_scaling=.true. real(kind=WP) :: uke_scaling_factor=1._WP real(kind=WP) :: rosb_dis=1._WP @@ -81,9 +74,6 @@ MODULE o_PARAM ! elevation and divergence real(kind=WP) :: epsilon=0.1_WP ! AB2 offset ! Tracers -logical :: i_vert_visc= .true. -logical :: w_split =.false. -real(kind=WP) :: w_max_cfl=1.e-5_WP logical :: SPP=.false. @@ -99,9 +89,9 @@ MODULE o_PARAM ! Momentum -logical :: free_slip=.false. - ! false=no slip -integer :: mom_adv=2 +!!PS logical :: free_slip=.false. +!!PS ! false=no slip +!!PS integer :: mom_adv=2 ! 1 vector control volumes, p1 velocities ! 2 scalar control volumes ! 3 vector invariant @@ -161,11 +151,11 @@ MODULE o_PARAM character(20) :: which_pgf='shchepetkin' - NAMELIST /oce_dyn/ state_equation, C_d, A_ver, gamma0, gamma1, gamma2, Leith_c, Div_c, easy_bs_return, & - scale_area, mom_adv, free_slip, i_vert_visc, w_split, w_max_cfl, SPP,& + NAMELIST /oce_dyn/ state_equation, C_d, A_ver, & + scale_area, SPP,& Fer_GM, K_GM_max, K_GM_min, K_GM_bvref, K_GM_resscalorder, K_GM_rampmax, K_GM_rampmin, & scaling_Ferreira, scaling_Rossby, scaling_resolution, scaling_FESOM14, & - Redi, visc_sh_limit, mix_scheme, Ricr, concv, which_pgf, visc_option, alpha, theta, use_density_ref, & + Redi, visc_sh_limit, mix_scheme, Ricr, concv, which_pgf, alpha, theta, use_density_ref, & K_back, c_back, uke_scaling, uke_scaling_factor, smooth_back, smooth_dis, & smooth_back_tend, rosb_dis @@ -210,9 +200,6 @@ MODULE o_ARRAYS real(kind=WP), allocatable :: tr_xy(:,:,:) real(kind=WP), allocatable :: tr_z(:,:) -! Auxiliary arrays for vector-invariant form of momentum advection -real(kind=WP), allocatable,dimension(:,:) :: vorticity - !Viscosity and diff coefs real(kind=WP), allocatable,dimension(:,:) :: Av,Kv real(kind=WP), allocatable,dimension(:,:,:) :: Kv_double diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 071627d84..8309a1ca6 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -243,15 +243,15 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) !___________________________________________________________________________ ! initialise arrays that are needed for backscatter_coef - if(dynamics%visc_opt==8) call init_backscatter(partit, mesh) + if(dynamics%opt_visc==8) call init_backscatter(partit, mesh) !___________________________________________________________________________ if(partit%mype==0) write(*,*) 'Initial state' - if (w_split .and. partit%mype==0) then + if (dynamics%use_wsplit .and. partit%mype==0) then write(*,*) '******************************************************************************' write(*,*) 'vertical velocity will be split onto explicit and implicit constitutes;' - write(*,*) 'maximum allowed CDF on explicit W is set to: ', w_max_cfl + write(*,*) 'maximum allowed CDF on explicit W is set to: ', dynamics%wsplit_maxcfl write(*,*) '******************************************************************************' end if end subroutine ocean_setup @@ -362,12 +362,12 @@ SUBROUTINE dynamics_init(dynamics, partit, mesh) USE o_param IMPLICIT NONE integer :: elem_size, node_size - integer, save :: nm_unit = 104 ! unit to open namelist file, skip 100-102 for cray + integer, save :: nm_unit = 105 ! unit to open namelist file, skip 100-102 for cray integer :: iost - integer :: visc_opt - real(kind=WP) :: gamma0_visc, gamma1_visc, gamma2_visc - real(kind=WP) :: div_c_visc, leith_c_visc, easybackscat_return + integer :: opt_visc + real(kind=WP) :: visc_gamma0, visc_gamma1, visc_gamma2 + real(kind=WP) :: visc_easybsreturn logical :: use_ivertvisc integer :: momadv_opt logical :: use_freeslip @@ -377,50 +377,42 @@ SUBROUTINE dynamics_init(dynamics, partit, mesh) type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_dyn) , intent(inout), target :: dynamics + + ! define dynamics namelist parameter + namelist /dynamics_visc / opt_visc, visc_gamma0, visc_gamma1, visc_gamma2, & + use_ivertvisc, visc_easybsreturn + namelist /dynamics_general/ momadv_opt, use_freeslip, use_wsplit, wsplit_maxcfl + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" -#include "associate_mesh_ass.h" - -!!PS ! define dynamics namelist parameter -!!PS namelist /dynamics_visc / visc_opt, gamma0_visc, gamma1_visc, gamma2_visc, & -!!PS div_c_visc, leith_c_visc, use_ivertvisc, easy_bs_return -!!PS namelist /dynamics_general / momadv_opt, use_freeslip, use_wsplit, wsplit_maxcfl -!!PS -!!PS ! open and read namelist for I/O -!!PS open(unit=nm_unit, file='namelist.dyn', form='formatted', access='sequential', status='old', iostat=iost ) -!!PS if (iost == 0) then -!!PS if (mype==0) write(*,*) ' file : ', 'namelist.dyn',' open ok' -!!PS else -!!PS if (mype==0) write(*,*) 'ERROR: --> bad opening file : ', 'namelist.dyn',' ; iostat=',iost -!!PS call par_ex(partit%MPI_COMM_FESOM, partit%mype) -!!PS stop -!!PS end if -!!PS read(nm_unit, nml=dynamics_visc , iostat=iost) -!!PS read(nm_unit, nml=dynamics_general, iostat=iost) -!!PS close(nm_unit) +#include "associate_mesh_ass.h" + + ! open and read namelist for I/O + open(unit=nm_unit, file='namelist.dyn', form='formatted', access='sequential', status='old', iostat=iost ) + if (iost == 0) then + if (mype==0) write(*,*) ' file : ', 'namelist.dyn',' open ok' + else + if (mype==0) write(*,*) 'ERROR: --> bad opening file : ', 'namelist.dyn',' ; iostat=',iost + call par_ex(partit%MPI_COMM_FESOM, partit%mype) + stop + end if + read(nm_unit, nml=dynamics_visc, iostat=iost) + read(nm_unit, nml=dynamics_general, iostat=iost) + close(nm_unit) !___________________________________________________________________________ ! set parameters in derived type -!!PS dynamics%visc_opt = visc_opt -!!PS dynamics%gamma0_visc = gamma0_visc -!!PS dynamics%gamma1_visc = gamma1_visc -!!PS dynamics%gamma2_visc = gamma2_visc -!!PS dynamics%use_ivertvisc = use_ivertvisc -!!PS dynamics%momadv_opt = momadv_opt -!!PS dynamics%use_freeslip = use_freeslip -!!PS dynamics%use_wsplit = use_wsplit -!!PS dynamics%wsplit_maxcfl = wsplit_maxcfl - - dynamics%visc_opt = visc_option - dynamics%gamma0_visc = gamma0 - dynamics%gamma1_visc = gamma1 - dynamics%gamma2_visc = gamma2 - dynamics%use_ivertvisc = i_vert_visc - dynamics%momadv_opt = mom_adv - dynamics%use_freeslip = free_slip - dynamics%use_wsplit = w_split - dynamics%wsplit_maxcfl = w_max_cfl + dynamics%opt_visc = opt_visc + dynamics%visc_gamma0 = visc_gamma0 + dynamics%visc_gamma1 = visc_gamma1 + dynamics%visc_gamma2 = visc_gamma2 + dynamics%visc_easybsreturn = visc_easybsreturn + dynamics%use_ivertvisc = use_ivertvisc + dynamics%momadv_opt = momadv_opt + dynamics%use_freeslip = use_freeslip + dynamics%use_wsplit = use_wsplit + dynamics%wsplit_maxcfl = wsplit_maxcfl !___________________________________________________________________________ ! define local vertice & elem array size @@ -476,7 +468,7 @@ SUBROUTINE dynamics_init(dynamics, partit, mesh) dynamics%work%uvnode_rhs = 0.0_WP dynamics%work%u_c = 0.0_WP dynamics%work%v_c = 0.0_WP - if (dynamics%visc_opt==5) then + if (dynamics%opt_visc==5) then allocate(dynamics%work%u_b(nl-1, elem_size)) allocate(dynamics%work%v_b(nl-1, elem_size)) dynamics%work%u_b = 0.0_WP @@ -556,14 +548,6 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) allocate(Tsurf_t(node_size,2), Ssurf_t(node_size,2)) allocate(tau_x_t(node_size,2), tau_y_t(node_size,2)) -! ================= -! All auxiliary arrays -! ================= - -!if(mom_adv==3) then -allocate(vorticity(nl-1,node_size)) -vorticity=0.0_WP -!end if ! ================= ! Visc and Diff coefs @@ -579,35 +563,6 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) !!PS call oce_mixing_kpp_init ! Setup constants, allocate arrays and construct look up table end if -! ================= -! Backscatter arrays -! ================= - -!!PS if(visc_option==8) then -!!PS -!!PS allocate(uke(nl-1,elem_size)) ! Unresolved kinetic energy for backscatter coefficient -!!PS allocate(v_back(nl-1,elem_size)) ! Backscatter viscosity -!!PS allocate(uke_dis(nl-1,elem_size), uke_back(nl-1,elem_size)) -!!PS allocate(uke_dif(nl-1,elem_size)) -!!PS allocate(uke_rhs(nl-1,elem_size), uke_rhs_old(nl-1,elem_size)) -!!PS allocate(UV_dis_tend(2,nl-1,elem_size), UV_back_tend(2,nl-1,elem_size)) -!!PS allocate(UV_total_tend(2,nl-1,elem_size)) -!!PS -!!PS uke=0.0_8 -!!PS v_back=0.0_8 -!!PS uke_dis=0.0_8 -!!PS uke_dif=0.0_8 -!!PS uke_back=0.0_8 -!!PS uke_rhs=0.0_8 -!!PS uke_rhs_old=0.0_8 -!!PS UV_dis_tend=0.0_8 -!!PS UV_back_tend=0.0_8 -!!PS UV_total_tend=0.0_8 -!!PS end if - -!Velocities at nodes -!!PS allocate(Unode(2,nl-1,node_size)) - ! tracer gradients & RHS allocate(ttrhs(nl-1,node_size)) allocate(tr_xy(2,nl-1,myDim_elem2D+eDim_elem2D+eXDim_elem2D)) From 1e7479815d1b446435bfbab6e4fb281ca56188e0 Mon Sep 17 00:00:00 2001 From: a270042 Date: Mon, 8 Nov 2021 16:26:38 +0100 Subject: [PATCH 501/909] update ../config/namelist.dyn --- config/namelist.dyn | 4 ---- 1 file changed, 4 deletions(-) diff --git a/config/namelist.dyn b/config/namelist.dyn index c729acea1..e35508f2f 100644 --- a/config/namelist.dyn +++ b/config/namelist.dyn @@ -21,7 +21,3 @@ wsplit_maxcfl= 1.0 ! maximum allowed CFL criteria in vertical (0.5 < w_max_c ! in older FESOM it used to be w_exp_max=1.e-3 / -!&dynamics_phys -!A_ver = 1.e-4 ! Vertical viscosity, m^2/s -!scale_area = 5.8e9 ! Visc. and diffus. are for an element with scale_area -!/ \ No newline at end of file From b60a24dc30ea49cf9627e4fc5f12b27983588ee3 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Wed, 3 Nov 2021 14:54:54 +0100 Subject: [PATCH 502/909] OpenMP implementation in the tracer advection part as it was suggested by Natalja --- src/oce_adv_tra_driver.F90 | 9 +- src/oce_adv_tra_fct.F90 | 233 +++++++++++++++++-------------------- src/oce_adv_tra_hor.F90 | 17 ++- src/oce_adv_tra_ver.F90 | 45 ++++--- 4 files changed, 159 insertions(+), 145 deletions(-) diff --git a/src/oce_adv_tra_driver.F90 b/src/oce_adv_tra_driver.F90 index 916133a3d..434c2186a 100644 --- a/src/oce_adv_tra_driver.F90 +++ b/src/oce_adv_tra_driver.F90 @@ -237,8 +237,10 @@ subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, partit, !___________________________________________________________________________ ! c. Update the solution ! Vertical +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, k, elem, enodes, num, el, nu12, nl12, nu1, nu2, nl1, nl2, edge) if (present(use_lo)) then if (use_lo) then +!$OMP DO do n=1, myDim_nod2d nu1 = ulevels_nod2D(n) nl1 = nlevels_nod2D(n) @@ -247,9 +249,10 @@ subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, partit, dttf_v(nz,n)=dttf_v(nz,n)-ttf(nz,n)*hnode(nz,n)+LO(nz,n)*hnode_new(nz,n) end do end do +!$OMP END DO end if end if - +!$OMP DO do n=1, myDim_nod2d nu1 = ulevels_nod2D(n) nl1 = nlevels_nod2D(n) @@ -257,8 +260,8 @@ subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, partit, dttf_v(nz,n)=dttf_v(nz,n) + (flux_v(nz,n)-flux_v(nz+1,n))*dt/areasvol(nz,n) end do end do - - +!$OMP END DO +!$OMP END PARALLEL ! Horizontal do edge=1, myDim_edge2D enodes(1:2)=edges(:,edge) diff --git a/src/oce_adv_tra_fct.F90 b/src/oce_adv_tra_fct.F90 index d76c3aebc..78b45899b 100644 --- a/src/oce_adv_tra_fct.F90 +++ b/src/oce_adv_tra_fct.F90 @@ -23,7 +23,7 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, real(kind=WP), intent(in) :: lo (mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(inout) :: adf_h(mesh%nl-1, partit%myDim_edge2D) real(kind=WP), intent(inout) :: adf_v(mesh%nl, partit%myDim_nod2D) - real(kind=WP), intent(inout) :: fct_plus(mesh%nl-1, partit%myDim_edge2D) + real(kind=WP), intent(inout) :: fct_plus(mesh%nl-1, partit%myDim_nod2D) real(kind=WP), intent(inout) :: fct_minus(mesh%nl, partit%myDim_nod2D) real(kind=WP), intent(inout) :: AUX(:,:,:) !a large auxuary array end subroutine @@ -40,7 +40,7 @@ subroutine oce_adv_tra_fct_init(twork, partit, mesh) implicit none integer :: my_size type(t_mesh), intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit + type(t_partit), intent(inout), target :: partit type(t_tracer_work), intent(inout), target :: twork #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -81,6 +81,7 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, USE MOD_PARTIT USE MOD_PARSUP use g_comm_auto + use omp_lib implicit none real(kind=WP), intent(in), target :: dt type(t_mesh), intent(in), target :: mesh @@ -99,12 +100,25 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, real(kind=WP) :: flux_eps=1e-16 real(kind=WP) :: bignumber=1e3 integer :: vlimit=1 - + integer(omp_lock_kind), allocatable, save :: plock(:) + integer(omp_lock_kind) :: mlock(partit%myDim_nod2D) + logical, save :: l_first=.true. #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, k, elem, enodes, num, el, nl1, nl2, nu1, nu2, nl12, nu12, edge, & +!$OMP flux, ae,tvert_max, tvert_min) +!$OMP MASTER + if (l_first) then + allocate(plock(partit%myDim_nod2D+partit%eDim_nod2D)) + do n=1, myDim_nod2D+partit%eDim_nod2D + call omp_init_lock_with_hint(plock(n),omp_sync_hint_speculative+omp_sync_hint_uncontended) + enddo + l_first = .false. + endif +!$OMP END MASTER ! -------------------------------------------------------------------------- ! ttf is the tracer field on step n ! del_ttf is the increment @@ -112,6 +126,7 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, ! -------------------------------------------------------------------------- !___________________________________________________________________________ ! a1. max, min between old solution and updated low-order solution per node +!$OMP DO do n=1,myDim_nod2D + edim_nod2d nu1 = ulevels_nod2D(n) nl1 = nlevels_nod2D(n) @@ -119,12 +134,13 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_ttf_max(nz,n)=max(LO(nz,n), ttf(nz,n)) fct_ttf_min(nz,n)=min(LO(nz,n), ttf(nz,n)) end do - end do - + end do +!$OMP END DO !___________________________________________________________________________ ! a2. Admissible increments on elements ! (only layers below the first and above the last layer) ! look for max, min bounds for each element --> AUX here auxilary array +!$OMP DO do elem=1, myDim_elem2D enodes=elem2D_nodes(:,elem) nu1 = ulevels(elem) @@ -140,98 +156,51 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, end do endif end do ! --> do elem=1, myDim_elem2D - +!$OMP END DO !___________________________________________________________________________ ! a3. Bounds on clusters and admissible increments ! Vertical1: In this version we look at the bounds on the clusters ! above and below, which leaves wide bounds because typically ! vertical gradients are larger. - if(vlimit==1) then !Horizontal - do n=1, myDim_nod2D - nu1 = ulevels_nod2D(n) - nl1 = nlevels_nod2D(n) - - !___________________________________________________________________ - do nz=nu1,nl1-1 - ! max,min horizontal bound in cluster around node n in every - ! vertical layer - ! nod_in_elem2D --> elem indices of which node n is surrounded - ! nod_in_elem2D_num --> max number of surrounded elem - tvert_max(nz)= maxval(AUX(1,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) - tvert_min(nz)= minval(AUX(2,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) - end do - - !___________________________________________________________________ - ! calc max,min increment of surface layer with respect to low order - ! solution - fct_ttf_max(nu1,n)=tvert_max(nu1)-LO(nu1,n) - fct_ttf_min(nu1,n)=tvert_min(nu1)-LO(nu1,n) - - ! calc max,min increment from nz-1:nz+1 with respect to low order - ! solution at layer nz - do nz=nu1+1,nl1-2 - fct_ttf_max(nz,n)=maxval(tvert_max(nz-1:nz+1))-LO(nz,n) - fct_ttf_min(nz,n)=minval(tvert_min(nz-1:nz+1))-LO(nz,n) - end do - ! calc max,min increment of bottom layer -1 with respect to low order - ! solution - nz=nl1-1 - fct_ttf_max(nz,n)=tvert_max(nz)-LO(nz,n) - fct_ttf_min(nz,n)=tvert_min(nz)-LO(nz,n) - end do - end if - - !___________________________________________________________________________ - ! Vertical2: Similar to the version above, but the vertical bounds are more - ! local - if(vlimit==2) then - do n=1, myDim_nod2D - nu1 = ulevels_nod2D(n) - nl1 = nlevels_nod2D(n) - do nz=nu1,nl1-1 - tvert_max(nz)= maxval(AUX(1,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) - tvert_min(nz)= minval(AUX(2,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) - end do - do nz=nu1+1, nl1-2 - tvert_max(nz)=max(tvert_max(nz),maxval(fct_ttf_max(nz-1:nz+1,n))) - tvert_min(nz)=min(tvert_min(nz),minval(fct_ttf_max(nz-1:nz+1,n))) - end do - do nz=nu1,nl1-1 - fct_ttf_max(nz,n)=tvert_max(nz)-LO(nz,n) - fct_ttf_min(nz,n)=tvert_min(nz)-LO(nz,n) - end do - end do - end if - - !___________________________________________________________________________ - ! Vertical3: Vertical bounds are taken into account only if they are narrower than the - ! horizontal ones - if(vlimit==3) then - do n=1, myDim_nod2D - nu1 = ulevels_nod2D(n) - nl1 = nlevels_nod2D(n) - do nz=nu1, nl1-1 - tvert_max(nz)= maxval(AUX(1,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) - tvert_min(nz)= minval(AUX(2,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) - end do - do nz=nu1+1, nl1-2 - tvert_max(nz)=min(tvert_max(nz),maxval(fct_ttf_max(nz-1:nz+1,n))) - tvert_min(nz)=max(tvert_min(nz),minval(fct_ttf_max(nz-1:nz+1,n))) - end do - do nz=nu1, nl1-1 - fct_ttf_max(nz,n)=tvert_max(nz)-LO(nz,n) - fct_ttf_min(nz,n)=tvert_min(nz)-LO(nz,n) - end do - end do - end if - +!$OMP DO + do n=1, myDim_nod2D + nu1 = ulevels_nod2D(n) + nl1 = nlevels_nod2D(n) + !___________________________________________________________________ + do nz=nu1,nl1-1 + ! max,min horizontal bound in cluster around node n in every + ! vertical layer + ! nod_in_elem2D --> elem indices of which node n is surrounded + ! nod_in_elem2D_num --> max number of surrounded elem + tvert_max(nz)= maxval(AUX(1,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) + tvert_min(nz)= minval(AUX(2,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) + end do + !___________________________________________________________________ + ! calc max,min increment of surface layer with respect to low order + ! solution + fct_ttf_max(nu1,n)=tvert_max(nu1)-LO(nu1,n) + fct_ttf_min(nu1,n)=tvert_min(nu1)-LO(nu1,n) + ! calc max,min increment from nz-1:nz+1 with respect to low order + ! solution at layer nz + do nz=nu1+1,nl1-2 + fct_ttf_max(nz,n)=maxval(tvert_max(nz-1:nz+1))-LO(nz,n) + fct_ttf_min(nz,n)=minval(tvert_min(nz-1:nz+1))-LO(nz,n) + end do + ! calc max,min increment of bottom layer -1 with respect to low order + ! solution + nz=nl1-1 + fct_ttf_max(nz,n)=tvert_max(nz)-LO(nz,n) + fct_ttf_min(nz,n)=tvert_min(nz)-LO(nz,n) + end do +!$OMP END DO !___________________________________________________________________________ ! b1. Split positive and negative antidiffusive contributions ! --> sum all positive (fct_plus), negative (fct_minus) antidiffusive ! horizontal element and vertical node contribution to node n and layer nz ! see. R. Löhner et al. "finite element flux corrected transport (FEM-FCT) ! for the euler and navier stoke equation +!$OMP DO do n=1, myDim_nod2D nu1 = ulevels_nod2D(n) nl1 = nlevels_nod2D(n) @@ -240,50 +209,54 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_minus(nz,n)=0._WP end do end do - +!$OMP END DO !Vertical +!$OMP DO do n=1, myDim_nod2D - nu1 = ulevels_nod2D(n) - nl1 = nlevels_nod2D(n) - do nz=nu1,nl1-1 -! fct_plus(nz,n)=fct_plus(nz,n)+ & -! (max(0.0_WP,adf_v(nz,n))+max(0.0_WP,-adf_v(nz+1,n))) & -! /hnode(nz,n) -! fct_minus(nz,n)=fct_minus(nz,n)+ & -! (min(0.0_WP,adf_v(nz,n))+min(0.0_WP,-adf_v(nz+1,n))) & -! /hnode(nz,n) - fct_plus(nz,n) =fct_plus(nz,n) +(max(0.0_WP,adf_v(nz,n))+max(0.0_WP,-adf_v(nz+1,n))) - fct_minus(nz,n)=fct_minus(nz,n)+(min(0.0_WP,adf_v(nz,n))+min(0.0_WP,-adf_v(nz+1,n))) - end do + nu1 = ulevels_nod2D(n) + nl1 = nlevels_nod2D(n) + do nz=nu1,nl1-1 + fct_plus(nz,n) =fct_plus(nz,n) +(max(0.0_WP,adf_v(nz,n))+max(0.0_WP,-adf_v(nz+1,n))) + fct_minus(nz,n)=fct_minus(nz,n)+(min(0.0_WP,adf_v(nz,n))+min(0.0_WP,-adf_v(nz+1,n))) + end do end do - +!$OMP END DO + +!$OMP DO !Horizontal do edge=1, myDim_edge2D - enodes(1:2)=edges(:,edge) - el=edge_tri(:,edge) - nl1=nlevels(el(1))-1 - nu1=ulevels(el(1)) - nl2=0 - nu2=0 - if(el(2)>0) then - nl2=nlevels(el(2))-1 - nu2=ulevels(el(2)) - end if + enodes(1:2)=edges(:,edge) + el=edge_tri(:,edge) + nl1=nlevels(el(1))-1 + nu1=ulevels(el(1)) + nl2=0 + nu2=0 + if (el(2)>0) then + nl2=nlevels(el(2))-1 + nu2=ulevels(el(2)) + end if - nl12 = max(nl1,nl2) - nu12 = nu1 - if (nu2>0) nu12 = min(nu1,nu2) - - do nz=nu12, nl12 - fct_plus (nz,enodes(1))=fct_plus (nz,enodes(1)) + max(0.0_WP, adf_h(nz,edge)) - fct_minus(nz,enodes(1))=fct_minus(nz,enodes(1)) + min(0.0_WP, adf_h(nz,edge)) - fct_plus (nz,enodes(2))=fct_plus (nz,enodes(2)) + max(0.0_WP,-adf_h(nz,edge)) - fct_minus(nz,enodes(2))=fct_minus(nz,enodes(2)) + min(0.0_WP,-adf_h(nz,edge)) - end do - end do - + nl12 = max(nl1,nl2) + nu12 = nu1 + if (nu2>0) nu12 = min(nu1,nu2) + call omp_set_lock(plock(enodes(1))) + do nz=nu12, nl12 + fct_plus (nz,enodes(1))=fct_plus (nz,enodes(1)) + max(0.0_WP, adf_h(nz,edge)) + fct_minus(nz,enodes(1))=fct_minus(nz,enodes(1)) + min(0.0_WP, adf_h(nz,edge)) + end do + call omp_unset_lock(plock(enodes(1))) + + call omp_set_lock(plock(enodes(2))) + do nz=nu12, nl12 + fct_plus (nz,enodes(2))=fct_plus (nz,enodes(2)) + max(0.0_WP,-adf_h(nz,edge)) + fct_minus(nz,enodes(2))=fct_minus(nz,enodes(2)) + min(0.0_WP,-adf_h(nz,edge)) + end do + call omp_unset_lock(plock(enodes(2))) + end do +!$OMP END DO !___________________________________________________________________________ ! b2. Limiting factors +!$OMP DO do n=1,myDim_nod2D nu1=ulevels_nod2D(n) nl1=nlevels_nod2D(n) @@ -294,13 +267,16 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_minus(nz,n)=min(1.0_WP,fct_ttf_min(nz,n)/flux) end do end do - +!$OMP END DO ! fct_minus and fct_plus must be known to neighbouring PE +!$OMP MASTER call exchange_nod(fct_plus, fct_minus, partit) - +!$OMP END MASTER +!!$OMP BARRIER !___________________________________________________________________________ ! b3. Limiting !Vertical +!$OMP DO do n=1, myDim_nod2D nu1=ulevels_nod2D(n) nl1=nlevels_nod2D(n) @@ -331,10 +307,13 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, end do ! the bottom flux is always zero end do - - call exchange_nod_end(partit) ! fct_plus, fct_minus - +!$OMP END DO +!!$OMP MASTER +! call exchange_nod_end(partit) ! fct_plus, fct_minus +!!$OMP END MASTER +!!$OMP BARRIER !Horizontal +!$OMP DO do edge=1, myDim_edge2D enodes(1:2)=edges(:,edge) el=edge_tri(:,edge) @@ -366,4 +345,6 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, adf_h(nz,edge)=ae*adf_h(nz,edge) end do end do +!$OMP END DO +!$OMP END PARALLEL end subroutine oce_tra_adv_fct diff --git a/src/oce_adv_tra_hor.F90 b/src/oce_adv_tra_hor.F90 index 441372ba8..9214a277d 100644 --- a/src/oce_adv_tra_hor.F90 +++ b/src/oce_adv_tra_hor.F90 @@ -92,6 +92,9 @@ subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, init_zero) ! The result is the low-order solution horizontal fluxes ! They are put into flux !___________________________________________________________________________ +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(edge, deltaX1, deltaY1, deltaX2, deltaY2, & +!$OMP a, vflux, el, enodes, nz, nu12, nl12, nl1, nl2, nu1, nu2) +!$OMP DO do edge=1, myDim_edge2D ! local indice of nodes that span up edge ed enodes=edges(:,edge) @@ -214,6 +217,8 @@ subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, init_zero) ttf(nz, enodes(2))*(vflux-abs(vflux)))-flux(nz, edge) end do end do +!$OMP END DO +!$OMP END PARALLEL end subroutine adv_tra_hor_upw1 ! ! @@ -255,6 +260,9 @@ subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_g ! The result is the low-order solution horizontal fluxes ! They are put into flux !___________________________________________________________________________ +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(edge, deltaX1, deltaY1, deltaX2, deltaY2, Tmean1, Tmean2, cHO, & +!$OMP c_lo, a, vflux, el, enodes, nz, nu12, nl12, nl1, nl2, nu1, nu2) +!$OMP DO do edge=1, myDim_edge2D ! local indice of nodes that span up edge ed enodes=edges(:,edge) @@ -487,6 +495,8 @@ subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_g flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) end do end do +!$OMP END DO +!$OMP END PARALLEL end subroutine adv_tra_hor_muscl ! ! @@ -498,7 +508,7 @@ subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_gr USE MOD_PARSUP use g_comm_auto implicit none - type(t_partit),intent(in), target :: partit + type(t_partit),intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) @@ -526,6 +536,9 @@ subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_gr ! The result is the low-order solution horizontal fluxes ! They are put into flux !___________________________________________________________________________ +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(edge, deltaX1, deltaY1, deltaX2, deltaY2, Tmean1, Tmean2, cHO, & +!$OMP a, vflux, el, enodes, nz, nu12, nl12, nl1, nl2, nu1, nu2) +!$OMP DO do edge=1, myDim_edge2D ! local indice of nodes that span up edge ed enodes=edges(:,edge) @@ -741,5 +754,7 @@ subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_gr flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) end do end do +!$OMP END DO +!$OMP END PARALLEL end subroutine adv_tra_hor_mfct diff --git a/src/oce_adv_tra_ver.F90 b/src/oce_adv_tra_ver.F90 index 84ee55173..03a7cb4e8 100644 --- a/src/oce_adv_tra_ver.F90 +++ b/src/oce_adv_tra_ver.F90 @@ -24,10 +24,10 @@ subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, init_zero) USE MOD_PARSUP type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh - real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) - real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) - real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) - logical, optional :: init_zero + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) + logical, optional :: init_zero end subroutine !=============================================================================== ! QR (4th order centerd) @@ -103,7 +103,7 @@ subroutine adv_tra_vert_impl(dt, w, ttf, partit, mesh) real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP) :: a(mesh%nl), b(mesh%nl), c(mesh%nl), tr(mesh%nl) real(kind=WP) :: cp(mesh%nl), tp(mesh%nl) - integer :: nz, n, nzmax, nzmin, tr_num + integer :: nz, n, nzmax, nzmin real(kind=WP) :: m, zinv, dt_inv, dz real(kind=WP) :: c1, v_adv @@ -113,7 +113,8 @@ subroutine adv_tra_vert_impl(dt, w, ttf, partit, mesh) #include "associate_mesh_ass.h" dt_inv=1.0_WP/dt - +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(a, b, c, tr, cp, tp, n, nz, nzmax, nzmin, m, zinv, dz, c1, v_adv) +!$OMP DO !___________________________________________________________________________ ! loop over local nodes do n=1,myDim_nod2D @@ -233,6 +234,8 @@ subroutine adv_tra_vert_impl(dt, w, ttf, partit, mesh) ttf(nz,n)=ttf(nz,n)+tr(nz) end do end do ! --> do n=1,myDim_nod2D +!$OMP END DO +!$OMP END PARALLEL end subroutine adv_tra_vert_impl ! ! @@ -263,7 +266,8 @@ subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, init_zero) else flux=0.0_WP end if - +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(tvert, n, nz, nzmax, nzmin) +!$OMP DO do n=1, myDim_nod2D !_______________________________________________________________________ nzmax=nlevels_nod2D(n) @@ -291,6 +295,8 @@ subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, init_zero) ttf(nz-1,n)*(W(nz,n)-abs(W(nz,n))))*area(nz,n)-flux(nz,n) end do end do +!$OMP END DO +!$OMP END PARALLEL end subroutine adv_tra_ver_upw1 ! ! @@ -324,7 +330,8 @@ subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, init_zero) else flux=0.0_WP end if - +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(tvert,n, nz, nzmax, nzmin, Tmean, Tmean1, Tmean2, qc, qu,qd) +!$OMP DO do n=1, myDim_nod2D !_______________________________________________________________________ nzmax=nlevels_nod2D(n) @@ -364,10 +371,11 @@ subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, init_zero) Tmean1=ttf(nz ,n)+(2*qc+qu)*(zbar_3d_n(nz,n)-Z_3d_n(nz ,n))/3.0_WP Tmean2=ttf(nz-1,n)+(2*qc+qd)*(zbar_3d_n(nz,n)-Z_3d_n(nz-1,n))/3.0_WP Tmean =(W(nz,n)+abs(W(nz,n)))*Tmean1+(W(nz,n)-abs(W(nz,n)))*Tmean2 - ! flux(nz,n)=-0.5_WP*(num_ord*(Tmean1+Tmean2)*W(nz,n)+(1.0_WP-num_ord)*Tmean)*area(nz,n)-flux(nz,n) flux(nz,n)=(-0.5_WP*(1.0_WP-num_ord)*Tmean - num_ord*(0.5_WP*(Tmean1+Tmean2))*W(nz,n))*area(nz,n)-flux(nz,n) end do end do +!$OMP END DO +!$OMP END PARALLEL end subroutine adv_tra_ver_qr4c ! ! @@ -389,7 +397,7 @@ subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, init_zero) real(kind=WP) :: tvert(mesh%nl), tv(mesh%nl), aL, aR, aj, x real(kind=WP) :: dzjm1, dzj, dzjp1, dzjp2, deltaj, deltajp1 integer :: n, nz, nzmax, nzmin - integer :: overshoot_counter, counter +! integer :: overshoot_counter, counter #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -410,8 +418,10 @@ subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, init_zero) ! non-uniformity into account, but this is more cumbersome. This is the version for AB ! time stepping ! -------------------------------------------------------------------------- - overshoot_counter=0 - counter =0 +! overshoot_counter=0 +! counter =0 +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(tvert, tv, aL, aR, aj, x, dzjm1, dzj, dzjp1, dzjp2, deltaj, deltajp1, n, nz, nzmax, nzmin) +!$OMP DO do n=1, myDim_nod2D !_______________________________________________________________________ !Interpolate to zbar...depth levels --> all quantities (tracer ...) are @@ -510,12 +520,12 @@ subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, init_zero) ! loop over layers (segments) do nz=nzmin, nzmax-1 if ((W(nz,n)<=0._WP) .AND. (W(nz+1,n)>=0._WP)) CYCLE - counter=counter+1 + !counter=counter+1 aL=tv(nz) aR=tv(nz+1) if ((aR-ttf(nz, n))*(ttf(nz, n)-aL)<=0._WP) then ! write(*,*) aL, ttf(nz, n), aR - overshoot_counter=overshoot_counter+1 + ! overshoot_counter=overshoot_counter+1 aL =ttf(nz, n) aR =ttf(nz, n) end if @@ -552,6 +562,8 @@ subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, init_zero) flux(nzmin:nzmax, n)=tvert(nzmin:nzmax)-flux(nzmin:nzmax, n) end do ! --> do n=1, myDim_nod2D ! if (mype==0) write(*,*) 'PPM overshoot statistics:', real(overshoot_counter)/real(counter) +!$OMP END DO +!$OMP END PARALLEL end subroutine adv_tra_vert_ppm ! ! @@ -581,7 +593,8 @@ subroutine adv_tra_ver_cdiff(w, ttf, partit, mesh, flux, init_zero) else flux=0.0_WP end if - +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, nzmax, nzmin, tv, tvert) +!$OMP DO do n=1, myDim_nod2D !_______________________________________________________________________ nzmax=nlevels_nod2D(n)-1 @@ -605,4 +618,6 @@ subroutine adv_tra_ver_cdiff(w, ttf, partit, mesh, flux, init_zero) !_______________________________________________________________________ flux(nzmin:nzmax, n)=tvert(nzmin:nzmax)-flux(nzmin:nzmax, n) end do ! --> do n=1, myDim_nod2D +!$OMP END DO +!$OMP END PARALLEL end subroutine adv_tra_ver_cdiff From 2dcaaba1d176dc522548104e4e754f2f0a713c79 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Wed, 3 Nov 2021 15:00:36 +0100 Subject: [PATCH 503/909] added -qopenmp into CMake (always compiled with OpenMP) --- src/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index fd13d5d2e..01892778c 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -67,7 +67,7 @@ if(${VERBOSE}) endif() # CMAKE_Fortran_COMPILER_ID will also work if a wrapper is being used (e.g. mpif90 wraps ifort -> compiler id is Intel) if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Intel ) - target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -init=zero -no-wrap-margin) + target_compile_options(${PROJECT_NAME} PRIVATE -qopenmp -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -init=zero -no-wrap-margin) # target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -g -traceback -check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU ) target_compile_options(${PROJECT_NAME} PRIVATE -O3 -finit-local-zero -finline-functions -march=native -fimplicit-none -fdefault-real-8 -ffree-line-length-none) From 572e7bb88f1de742d747b99648e2bba0dd0d11f2 Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Wed, 3 Nov 2021 15:04:56 +0100 Subject: [PATCH 504/909] More tests and local testing with containers (#194) * rename workflows * add more tests and run_tests scripts * change test path * add simplest test, plus documentation * Update fesom2.1.yml * Update fesom2_icepack.yml --- .github/workflows/fesom2.1.yml | 4 +- .github/workflows/fesom2_icepack.yml | 4 +- setups/paths.yml | 6 +-- setups/test_pi_floatice/setup.yml | 80 ++++++++++++++++++++++++++++ setups/test_pi_linfs/setup.yml | 80 ++++++++++++++++++++++++++++ setups/test_pi_partial/setup.yml | 80 ++++++++++++++++++++++++++++ setups/test_pi_visc7/setup.yml | 80 ++++++++++++++++++++++++++++ setups/test_pi_zstar/setup.yml | 80 ++++++++++++++++++++++++++++ test.sh | 32 +++++++++++ test/run_tests.sh | 21 ++++++++ 10 files changed, 458 insertions(+), 9 deletions(-) create mode 100644 setups/test_pi_floatice/setup.yml create mode 100644 setups/test_pi_linfs/setup.yml create mode 100644 setups/test_pi_partial/setup.yml create mode 100644 setups/test_pi_visc7/setup.yml create mode 100644 setups/test_pi_zstar/setup.yml create mode 100755 test.sh create mode 100755 test/run_tests.sh diff --git a/.github/workflows/fesom2.1.yml b/.github/workflows/fesom2.1.yml index 53cf575ca..733eaf55d 100644 --- a/.github/workflows/fesom2.1.yml +++ b/.github/workflows/fesom2.1.yml @@ -8,11 +8,11 @@ on: [push, pull_request] # A workflow run is made up of one or more jobs that can run sequentially or in parallel jobs: - gfortran_ubuntu: + general_test: # Containers must run in Linux based operating systems runs-on: ubuntu-latest # Docker Hub image that `container-job` executes in - container: koldunovn/fesom2_test:f2.1_tracers + container: koldunovn/fesom2_test:refactoring # Service containers to run with `gfortran_ubuntu` steps: diff --git a/.github/workflows/fesom2_icepack.yml b/.github/workflows/fesom2_icepack.yml index df4fcd2bd..06d84ba64 100644 --- a/.github/workflows/fesom2_icepack.yml +++ b/.github/workflows/fesom2_icepack.yml @@ -8,11 +8,11 @@ on: [push, pull_request] # A workflow run is made up of one or more jobs that can run sequentially or in parallel jobs: - gfortran_ubuntu: + icepack_test: # Containers must run in Linux based operating systems runs-on: ubuntu-latest # Docker Hub image that `container-job` executes in - container: koldunovn/fesom2_test:f2.1_tracers + container: koldunovn/fesom2_test:refactoring # Service containers to run with `gfortran_ubuntu` steps: diff --git a/setups/paths.yml b/setups/paths.yml index e74aa8cb6..963785b8c 100644 --- a/setups/paths.yml +++ b/setups/paths.yml @@ -54,18 +54,14 @@ docker: lnodename: - ' ' meshes: - pi: /fesom/pi/ test_souf: ./test/meshes/soufflet/ test_global: ./test/meshes/pi/ forcing: - CORE2: /fesom/dCORE2/ - JRA55: /fesom/dJRA55/ test_global: ./test/input/global/ clim: - phc: /fesom/phc3/ test_global: ./test/input/global/ opath: - opath: ../results/ + opath: ./test/ juwels: lnodename: diff --git a/setups/test_pi_floatice/setup.yml b/setups/test_pi_floatice/setup.yml new file mode 100644 index 000000000..6f99efd5a --- /dev/null +++ b/setups/test_pi_floatice/setup.yml @@ -0,0 +1,80 @@ +mesh: test_global +forcing: test_global +clim: + type: test_global + filelist: ['woa18_netcdf_5deg.nc','woa18_netcdf_5deg.nc'] + varlist: ['salt', 'temp'] +ntasks: 2 +time: "00:10:00" + +namelist.config: + timestep: + step_per_day: 96 + run_length: 1 + run_length_unit: "d" + geometry: + force_rotation: True + restart_log: + restart_length: 1 + restart_length_unit: "d" + logfile_outfreq: 10 + run_config: + use_floatice: True + +namelist.oce: + oce_dyn: + Div_c: 0.5 + Leith_c: 0.05 + w_split: True + +namelist.ice: + ice_dyn: + whichEVP: 1 + evp_rheol_steps: 120 + +namelist.io: + diag_list: + ldiag_energy: False + nml_list: + io_list: + "sst ": + freq: 1 + unit: d + prec: 8 + "a_ice ": + freq: 1 + unit: d + prec: 8 + "temp ": + freq: 1 + unit: d + prec: 8 + "salt ": + freq: 1 + unit: d + prec: 8 + "u ": + freq: 1 + unit: d + prec: 8 + "v ": + freq: 1 + unit: d + prec: 8 + +fcheck: + a_ice: 0.26880359680085886 + salt: 23.943630158896298 + temp: 1.7010247885672327 + sst: 8.509590362118958 + u: -0.005721019451264724 + v: 0.00047682952470964814 + + + + + + + + + diff --git a/setups/test_pi_linfs/setup.yml b/setups/test_pi_linfs/setup.yml new file mode 100644 index 000000000..bc604dccc --- /dev/null +++ b/setups/test_pi_linfs/setup.yml @@ -0,0 +1,80 @@ +mesh: test_global +forcing: test_global +clim: + type: test_global + filelist: ['woa18_netcdf_5deg.nc','woa18_netcdf_5deg.nc'] + varlist: ['salt', 'temp'] +ntasks: 2 +time: "00:10:00" + +namelist.config: + timestep: + step_per_day: 96 + run_length: 1 + run_length_unit: "d" + geometry: + force_rotation: True + restart_log: + restart_length: 1 + restart_length_unit: "d" + logfile_outfreq: 10 + ale_def: + which_ALE: "linfs" + +namelist.oce: + oce_dyn: + Div_c: 0.5 + Leith_c: 0.05 + w_split: True + +namelist.ice: + ice_dyn: + whichEVP: 1 + evp_rheol_steps: 120 + +namelist.io: + diag_list: + ldiag_energy: False + nml_list: + io_list: + "sst ": + freq: 1 + unit: d + prec: 8 + "a_ice ": + freq: 1 + unit: d + prec: 8 + "temp ": + freq: 1 + unit: d + prec: 8 + "salt ": + freq: 1 + unit: d + prec: 8 + "u ": + freq: 1 + unit: d + prec: 8 + "v ": + freq: 1 + unit: d + prec: 8 + +fcheck: + a_ice: 0.2685778327298968 + salt: 23.944511945072648 + temp: 1.7011044195264193 + sst: 8.51781304844356 + u: -0.0013090250570688075 + v: 0.00013164013131872999 + + + + + + + + + diff --git a/setups/test_pi_partial/setup.yml b/setups/test_pi_partial/setup.yml new file mode 100644 index 000000000..bae697a6a --- /dev/null +++ b/setups/test_pi_partial/setup.yml @@ -0,0 +1,80 @@ +mesh: test_global +forcing: test_global +clim: + type: test_global + filelist: ['woa18_netcdf_5deg.nc','woa18_netcdf_5deg.nc'] + varlist: ['salt', 'temp'] +ntasks: 2 +time: "00:10:00" + +namelist.config: + timestep: + step_per_day: 96 + run_length: 1 + run_length_unit: "d" + geometry: + force_rotation: True + restart_log: + restart_length: 1 + restart_length_unit: "d" + logfile_outfreq: 10 + ale_def: + use_partial_cell: False + +namelist.oce: + oce_dyn: + Div_c: 0.5 + Leith_c: 0.05 + w_split: True + +namelist.ice: + ice_dyn: + whichEVP: 1 + evp_rheol_steps: 120 + +namelist.io: + diag_list: + ldiag_energy: False + nml_list: + io_list: + "sst ": + freq: 1 + unit: d + prec: 8 + "a_ice ": + freq: 1 + unit: d + prec: 8 + "temp ": + freq: 1 + unit: d + prec: 8 + "salt ": + freq: 1 + unit: d + prec: 8 + "u ": + freq: 1 + unit: d + prec: 8 + "v ": + freq: 1 + unit: d + prec: 8 + +fcheck: + a_ice: 0.2691270793874835 + salt: 23.944032641762846 + temp: 1.7014629411995628 + sst: 8.531605186060785 + u: -0.0014154276919262456 + v: 0.00013994193864008374 + + + + + + + + + diff --git a/setups/test_pi_visc7/setup.yml b/setups/test_pi_visc7/setup.yml new file mode 100644 index 000000000..c4d616619 --- /dev/null +++ b/setups/test_pi_visc7/setup.yml @@ -0,0 +1,80 @@ +mesh: test_global +forcing: test_global +clim: + type: test_global + filelist: ['woa18_netcdf_5deg.nc','woa18_netcdf_5deg.nc'] + varlist: ['salt', 'temp'] +ntasks: 2 +time: "00:10:00" + +namelist.config: + timestep: + step_per_day: 96 + run_length: 1 + run_length_unit: "d" + geometry: + force_rotation: True + restart_log: + restart_length: 1 + restart_length_unit: "d" + logfile_outfreq: 10 + +namelist.oce: + oce_dyn: + Div_c: 0.5 + Leith_c: 0.05 + w_split: True + visc_option: 7 + + +namelist.ice: + ice_dyn: + whichEVP: 1 + evp_rheol_steps: 120 + +namelist.io: + diag_list: + ldiag_energy: False + nml_list: + io_list: + "sst ": + freq: 1 + unit: d + prec: 8 + "a_ice ": + freq: 1 + unit: d + prec: 8 + "temp ": + freq: 1 + unit: d + prec: 8 + "salt ": + freq: 1 + unit: d + prec: 8 + "u ": + freq: 1 + unit: d + prec: 8 + "v ": + freq: 1 + unit: d + prec: 8 + +fcheck: + a_ice: 0.2691276109603212 + salt: 23.944024690144552 + temp: 1.7017686482560304 + sst: 8.531529100200583 + u: -0.0014071010764418097 + v: 0.00014173175700137738 + + + + + + + + + diff --git a/setups/test_pi_zstar/setup.yml b/setups/test_pi_zstar/setup.yml new file mode 100644 index 000000000..e487659ae --- /dev/null +++ b/setups/test_pi_zstar/setup.yml @@ -0,0 +1,80 @@ +mesh: test_global +forcing: test_global +clim: + type: test_global + filelist: ['woa18_netcdf_5deg.nc','woa18_netcdf_5deg.nc'] + varlist: ['salt', 'temp'] +ntasks: 2 +time: "00:10:00" + +namelist.config: + timestep: + step_per_day: 96 + run_length: 1 + run_length_unit: "d" + geometry: + force_rotation: True + restart_log: + restart_length: 1 + restart_length_unit: "d" + logfile_outfreq: 10 + ale_def: + which_ALE: "zstar" + +namelist.oce: + oce_dyn: + Div_c: 0.5 + Leith_c: 0.05 + w_split: True + +namelist.ice: + ice_dyn: + whichEVP: 1 + evp_rheol_steps: 120 + +namelist.io: + diag_list: + ldiag_energy: False + nml_list: + io_list: + "sst ": + freq: 1 + unit: d + prec: 8 + "a_ice ": + freq: 1 + unit: d + prec: 8 + "temp ": + freq: 1 + unit: d + prec: 8 + "salt ": + freq: 1 + unit: d + prec: 8 + "u ": + freq: 1 + unit: d + prec: 8 + "v ": + freq: 1 + unit: d + prec: 8 + +fcheck: + a_ice: 0.2691276443855294 + salt: 23.944024712806094 + temp: 1.701768707848739 + sst: 8.531522995932146 + u: -0.001407225233294229 + v: 0.00014182969591235959 + + + + + + + + + diff --git a/test.sh b/test.sh new file mode 100755 index 000000000..81fe8ec34 --- /dev/null +++ b/test.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# Run simples FESOM2 test in a container. +# +# With singularity on ollie +# +# module load singularity/3.5.1 +# cd fesom2 +# singularity exec /home/ollie/nkolduno/SINGULARITY/fesom_refactoring.sif ./test.sh +# +# With docker on Linux/Mac +# docker run -it -v "$(pwd)"/fesom2:/fesom/fesom2 koldunovn/fesom2_test:refactoring /bin/bash +# cd fesom2 +# ./test.sh +# + +set -e + +machine="docker" +tests="test_pi" + +for test in $tests; do + + ./configure.sh ubuntu + echo $test + mkrun pi $test -m $machine + cd work_pi + chmod +x job_docker_new + ./job_docker_new + fcheck . + cd ../ + +done diff --git a/test/run_tests.sh b/test/run_tests.sh new file mode 100755 index 000000000..f19bdfe50 --- /dev/null +++ b/test/run_tests.sh @@ -0,0 +1,21 @@ +#!/bin/bash +set -e +cd ../ + +machine="docker" +tests="test_pi test_pi_linfs test_pi_zstar test_pi_partial test_pi_floatice test_pi_visc7 test_pi_zstar" + +for test in $tests; do + +./configure.sh ubuntu +echo $test + mkrun pi $test -m $machine + pwd + cd work_pi + chmod +x job_docker_new + ./job_docker_new + fcheck . + cd ../ + +done + From eeaee366ead6b78e91c53acbbac90d2ea19964b1 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Wed, 3 Nov 2021 15:16:58 +0100 Subject: [PATCH 505/909] old GNU compiler nows little about OMP locks :(. OpenMP has been commented out per default! --- src/CMakeLists.txt | 2 +- src/oce_adv_tra_fct.F90 | 19 +++++++++++++++---- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 01892778c..fd13d5d2e 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -67,7 +67,7 @@ if(${VERBOSE}) endif() # CMAKE_Fortran_COMPILER_ID will also work if a wrapper is being used (e.g. mpif90 wraps ifort -> compiler id is Intel) if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Intel ) - target_compile_options(${PROJECT_NAME} PRIVATE -qopenmp -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -init=zero -no-wrap-margin) + target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -init=zero -no-wrap-margin) # target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -g -traceback -check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU ) target_compile_options(${PROJECT_NAME} PRIVATE -O3 -finit-local-zero -finline-functions -march=native -fimplicit-none -fdefault-real-8 -ffree-line-length-none) diff --git a/src/oce_adv_tra_fct.F90 b/src/oce_adv_tra_fct.F90 index 78b45899b..c68facd23 100644 --- a/src/oce_adv_tra_fct.F90 +++ b/src/oce_adv_tra_fct.F90 @@ -80,8 +80,10 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, use MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP - use g_comm_auto - use omp_lib + USE g_comm_auto +#if defined(_OPENMP) + USE OMP_LIB +#endif implicit none real(kind=WP), intent(in), target :: dt type(t_mesh), intent(in), target :: mesh @@ -100,8 +102,10 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, real(kind=WP) :: flux_eps=1e-16 real(kind=WP) :: bignumber=1e3 integer :: vlimit=1 +#if defined(_OPENMP) integer(omp_lock_kind), allocatable, save :: plock(:) integer(omp_lock_kind) :: mlock(partit%myDim_nod2D) +#endif logical, save :: l_first=.true. #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -110,7 +114,8 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, k, elem, enodes, num, el, nl1, nl2, nu1, nu2, nl12, nu12, edge, & !$OMP flux, ae,tvert_max, tvert_min) -!$OMP MASTER +!$OMP MASTER +#if defined(_OPENMP) if (l_first) then allocate(plock(partit%myDim_nod2D+partit%eDim_nod2D)) do n=1, myDim_nod2D+partit%eDim_nod2D @@ -118,6 +123,7 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, enddo l_first = .false. endif +#endif !$OMP END MASTER ! -------------------------------------------------------------------------- ! ttf is the tracer field on step n @@ -239,19 +245,24 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, nl12 = max(nl1,nl2) nu12 = nu1 if (nu2>0) nu12 = min(nu1,nu2) +#if defined(_OPENMP) call omp_set_lock(plock(enodes(1))) +#endif do nz=nu12, nl12 fct_plus (nz,enodes(1))=fct_plus (nz,enodes(1)) + max(0.0_WP, adf_h(nz,edge)) fct_minus(nz,enodes(1))=fct_minus(nz,enodes(1)) + min(0.0_WP, adf_h(nz,edge)) end do +#if defined(_OPENMP) call omp_unset_lock(plock(enodes(1))) - call omp_set_lock(plock(enodes(2))) +#endif do nz=nu12, nl12 fct_plus (nz,enodes(2))=fct_plus (nz,enodes(2)) + max(0.0_WP,-adf_h(nz,edge)) fct_minus(nz,enodes(2))=fct_minus(nz,enodes(2)) + min(0.0_WP,-adf_h(nz,edge)) end do +#if defined(_OPENMP) call omp_unset_lock(plock(enodes(2))) +#endif end do !$OMP END DO !___________________________________________________________________________ From 87e952222af2b41229b87e8d0105b88f772206dd Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Wed, 3 Nov 2021 23:58:38 +0100 Subject: [PATCH 506/909] tracer advection part has been fullly OpenMP parallelized. It turns out that OpenMP does not slow down the modes as compared to MPI. On ollie: a setup with 288 MPI tasks is only 5% faster than a setup with 8MPI x 36 OpenMP a setup with 16MPI tasks x 18 OpenMP is faster than a setup with 288MPI tasks further increase in throuput has been observed up to 20 MPI x 18 OpenMP I didnt chek for more resources since the mesh is small (CORE2) --- src/MOD_PARTIT.F90 | 9 ++- src/gen_modules_partitioning.F90 | 8 +++ src/oce_adv_tra_driver.F90 | 83 ++++++++++++++++------- src/oce_adv_tra_fct.F90 | 28 ++------ src/oce_adv_tra_hor.F90 | 69 ++++++++++++------- src/oce_adv_tra_ver.F90 | 112 ++++++++++++++++++++----------- 6 files changed, 196 insertions(+), 113 deletions(-) diff --git a/src/MOD_PARTIT.F90 b/src/MOD_PARTIT.F90 index bd3b7dec2..5d6b917c3 100644 --- a/src/MOD_PARTIT.F90 +++ b/src/MOD_PARTIT.F90 @@ -5,6 +5,9 @@ module MOD_PARTIT USE, intrinsic :: ISO_FORTRAN_ENV USE MOD_WRITE_BINARY_ARRAYS USE MOD_READ_BINARY_ARRAYS +#if defined(_OPENMP) + USE OMP_LIB +#endif IMPLICIT NONE SAVE include 'mpif.h' @@ -69,11 +72,15 @@ module MOD_PARTIT integer, allocatable, dimension(:) :: myList_edge2D integer :: pe_status = 0 ! if /=0 then something is wrong - !!! remPtr_* are constructed during the runtime ans shall not be dumped!!! + !!! remPtr_* are constructed during the runtime and shall not be dumped!!! integer, allocatable :: remPtr_nod2D(:), remList_nod2D(:) integer, allocatable :: remPtr_elem2D(:), remList_elem2D(:) logical :: elem_full_flag +#if defined(_OPENMP) + !!! plock is constructed during the runtime and shall not be dumped!!! + integer(omp_lock_kind), allocatable :: plock(:) +#endif contains procedure WRITE_T_PARTIT procedure READ_T_PARTIT diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index 1c74cc724..552349af7 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -506,6 +506,14 @@ subroutine init_gatherLists(partit) call MPI_SEND(myList_elem2D, myDim_elem2D, MPI_INTEGER, 0, 3, MPI_COMM_FESOM, MPIerr ) endif +!$OMP MASTER +#if defined(_OPENMP) + allocate(partit%plock(partit%myDim_nod2D+partit%eDim_nod2D)) + do n=1, myDim_nod2D+partit%eDim_nod2D + call omp_init_lock_with_hint(partit%plock(n),omp_sync_hint_speculative+omp_sync_hint_uncontended) + enddo +#endif +!$OMP END MASTER end subroutine init_gatherLists !=================================================================== subroutine status_check(partit) diff --git a/src/oce_adv_tra_driver.F90 b/src/oce_adv_tra_driver.F90 index 434c2186a..b405c93cf 100644 --- a/src/oce_adv_tra_driver.F90 +++ b/src/oce_adv_tra_driver.F90 @@ -108,11 +108,16 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, dynamics, tracers, partit, ! part of antidiffusive flux if (trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') then ! compute the low order upwind horizontal flux - ! init_zero=.true. : zero the horizontal flux before computation - ! init_zero=.false. : input flux will be substracted - call adv_tra_hor_upw1(vel, ttf, partit, mesh, adv_flux_hor, init_zero=.true.) + ! o_init_zero=.true. : zero the horizontal flux before computation + ! o_init_zero=.false. : input flux will be substracted + call adv_tra_hor_upw1(vel, ttf, partit, mesh, adv_flux_hor, o_init_zero=.true.) ! update the LO solution for horizontal contribution - fct_LO=0.0_WP +!$OMP PARALLEL DO + do n=1, myDim_nod2D+eDim_nod2D + fct_LO(:,n)=0.0_WP + end do +!$OMP END PARALLEL DO +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(e, enodes, el, nl1, nu1, nl2, nu2, nz) do e=1, myDim_edge2D enodes=edges(:,e) el=edge_tri(:,e) @@ -130,15 +135,29 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, dynamics, tracers, partit, if (nu2>0) nu12 = min(nu1,nu2) !!PS do nz=1, max(nl1, nl2) +#if defined(_OPENMP) + call omp_set_lock(partit%plock(enodes(1))) +#endif + do nz=nu12, nl12 + fct_LO(nz, enodes(1))=fct_LO(nz, enodes(1))+adv_flux_hor(nz, e) + end do +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(enodes(1))) + call omp_set_lock (partit%plock(enodes(2))) +#endif do nz=nu12, nl12 - fct_LO(nz, enodes(1))=fct_LO(nz, enodes(1))+adv_flux_hor(nz, e) - fct_LO(nz, enodes(2))=fct_LO(nz, enodes(2))-adv_flux_hor(nz, e) + fct_LO(nz, enodes(2))=fct_LO(nz, enodes(2))-adv_flux_hor(nz, e) end do - end do +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(enodes(2))) +#endif + end do +!$OMP END PARALLEL DO ! compute the low order upwind vertical flux (explicit part only) ! zero the input/output flux before computation - call adv_tra_ver_upw1(we, ttf, partit, mesh, adv_flux_ver, init_zero=.true.) + call adv_tra_ver_upw1(we, ttf, partit, mesh, adv_flux_ver, o_init_zero=.true.) ! update the LO solution for vertical contribution +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nu1, nl1, nz) do n=1, myDim_nod2D nu1 = ulevels_nod2D(n) nl1 = nlevels_nod2D(n) @@ -147,30 +166,35 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, dynamics, tracers, partit, fct_LO(nz,n)=(ttf(nz,n)*hnode(nz,n)+(fct_LO(nz,n)+(adv_flux_ver(nz, n)-adv_flux_ver(nz+1, n)))*dt/areasvol(nz,n))/hnode_new(nz,n) end do end do +<<<<<<< HEAD if (dynamics%use_wsplit) then !wvel/=wvel_e ! update for implicit contribution (use_wsplit option) +======= +!$OMP END PARALLEL DO + if (w_split) then !wvel/=wvel_e + ! update for implicit contribution (w_split option) +>>>>>>> tracer advection part has been fullly OpenMP parallelized. It turns out that OpenMP does not slow down the modes as compared to MPI. call adv_tra_vert_impl(dt, wi, fct_LO, partit, mesh) ! compute the low order upwind vertical flux (full vertical velocity) ! zero the input/output flux before computation ! --> compute here low order part of vertical anti diffusive fluxes, ! has to be done on the full vertical velocity w - call adv_tra_ver_upw1(w, ttf, partit, mesh, adv_flux_ver, init_zero=.true.) - end if + call adv_tra_ver_upw1(w, ttf, partit, mesh, adv_flux_ver, o_init_zero=.true.) + end if call exchange_nod(fct_LO, partit) end if - do_zero_flux=.true. if (trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') do_zero_flux=.false. !___________________________________________________________________________ ! do horizontal tracer advection, in case of FCT high order solution SELECT CASE(trim(tracers%data(tr_num)%tra_adv_hor)) CASE('MUSCL') - ! compute the untidiffusive horizontal flux (init_zero=.false.: input is the LO horizontal flux computed above) - call adv_tra_hor_muscl(vel, ttfAB, partit, mesh, opth, adv_flux_hor, edge_up_dn_grad, nboundary_lay, init_zero=do_zero_flux) + ! compute the untidiffusive horizontal flux (o_init_zero=.false.: input is the LO horizontal flux computed above) + call adv_tra_hor_muscl(vel, ttfAB, partit, mesh, opth, adv_flux_hor, edge_up_dn_grad, nboundary_lay, o_init_zero=do_zero_flux) CASE('MFCT') - call adv_tra_hor_mfct(vel, ttfAB, partit, mesh, opth, adv_flux_hor, edge_up_dn_grad, init_zero=do_zero_flux) + call adv_tra_hor_mfct(vel, ttfAB, partit, mesh, opth, adv_flux_hor, edge_up_dn_grad, o_init_zero=do_zero_flux) CASE('UPW1') - call adv_tra_hor_upw1(vel, ttfAB, partit, mesh, adv_flux_hor, init_zero=do_zero_flux) + call adv_tra_hor_upw1(vel, ttfAB, partit, mesh, adv_flux_hor, o_init_zero=do_zero_flux) CASE DEFAULT !unknown if (mype==0) write(*,*) 'Unknown horizontal advection type ', trim(tracers%data(tr_num)%tra_adv_hor), '! Check your namelists!' call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) @@ -184,14 +208,14 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, dynamics, tracers, partit, ! do vertical tracer advection, in case of FCT high order solution SELECT CASE(trim(tracers%data(tr_num)%tra_adv_ver)) CASE('QR4C') - ! compute the untidiffusive vertical flux (init_zero=.false.:input is the LO vertical flux computed above) - call adv_tra_ver_qr4c ( pwvel, ttfAB, partit, mesh, optv, adv_flux_ver, init_zero=do_zero_flux) + ! compute the untidiffusive vertical flux (o_init_zero=.false.:input is the LO vertical flux computed above) + call adv_tra_ver_qr4c ( pwvel, ttfAB, partit, mesh, optv, adv_flux_ver, o_init_zero=do_zero_flux) CASE('CDIFF') - call adv_tra_ver_cdiff( pwvel, ttfAB, partit, mesh, adv_flux_ver, init_zero=do_zero_flux) + call adv_tra_ver_cdiff( pwvel, ttfAB, partit, mesh, adv_flux_ver, o_init_zero=do_zero_flux) CASE('PPM') - call adv_tra_vert_ppm(dt, pwvel, ttfAB, partit, mesh, adv_flux_ver, init_zero=do_zero_flux) + call adv_tra_vert_ppm(dt, pwvel, ttfAB, partit, mesh, adv_flux_ver, o_init_zero=do_zero_flux) CASE('UPW1') - call adv_tra_ver_upw1 ( pwvel, ttfAB, partit, mesh, adv_flux_ver, init_zero=do_zero_flux) + call adv_tra_ver_upw1 ( pwvel, ttfAB, partit, mesh, adv_flux_ver, o_init_zero=do_zero_flux) CASE DEFAULT !unknown if (mype==0) write(*,*) 'Unknown vertical advection type ', trim(tracers%data(tr_num)%tra_adv_ver), '! Check your namelists!' call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) @@ -260,9 +284,9 @@ subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, partit, dttf_v(nz,n)=dttf_v(nz,n) + (flux_v(nz,n)-flux_v(nz+1,n))*dt/areasvol(nz,n) end do end do -!$OMP END DO -!$OMP END PARALLEL +!$OMP END DO ! Horizontal +!$OMP DO do edge=1, myDim_edge2D enodes(1:2)=edges(:,edge) el=edge_tri(:,edge) @@ -280,10 +304,23 @@ subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, partit, nu12 = nu1 if (nu2>0) nu12 = min(nu1,nu2) - !!PS do nz=1, max(nl1, nl2) +#if defined(_OPENMP) + call omp_set_lock(partit%plock(enodes(1))) +#endif do nz=nu12, nl12 dttf_h(nz,enodes(1))=dttf_h(nz,enodes(1))+flux_h(nz,edge)*dt/areasvol(nz,enodes(1)) + end do +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(enodes(1))) + call omp_set_lock (partit%plock(enodes(2))) +#endif + do nz=nu12, nl12 dttf_h(nz,enodes(2))=dttf_h(nz,enodes(2))-flux_h(nz,edge)*dt/areasvol(nz,enodes(2)) end do +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(enodes(2))) +#endif end do +!$OMP END DO +!$OMP END PARALLEL end subroutine oce_tra_adv_flux2dtracer diff --git a/src/oce_adv_tra_fct.F90 b/src/oce_adv_tra_fct.F90 index c68facd23..3b176d1ab 100644 --- a/src/oce_adv_tra_fct.F90 +++ b/src/oce_adv_tra_fct.F90 @@ -81,9 +81,6 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, USE MOD_PARTIT USE MOD_PARSUP USE g_comm_auto -#if defined(_OPENMP) - USE OMP_LIB -#endif implicit none real(kind=WP), intent(in), target :: dt type(t_mesh), intent(in), target :: mesh @@ -101,12 +98,6 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, real(kind=WP) :: flux, ae,tvert_max(mesh%nl-1),tvert_min(mesh%nl-1) real(kind=WP) :: flux_eps=1e-16 real(kind=WP) :: bignumber=1e3 - integer :: vlimit=1 -#if defined(_OPENMP) - integer(omp_lock_kind), allocatable, save :: plock(:) - integer(omp_lock_kind) :: mlock(partit%myDim_nod2D) -#endif - logical, save :: l_first=.true. #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -114,17 +105,6 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, k, elem, enodes, num, el, nl1, nl2, nu1, nu2, nl12, nu12, edge, & !$OMP flux, ae,tvert_max, tvert_min) -!$OMP MASTER -#if defined(_OPENMP) - if (l_first) then - allocate(plock(partit%myDim_nod2D+partit%eDim_nod2D)) - do n=1, myDim_nod2D+partit%eDim_nod2D - call omp_init_lock_with_hint(plock(n),omp_sync_hint_speculative+omp_sync_hint_uncontended) - enddo - l_first = .false. - endif -#endif -!$OMP END MASTER ! -------------------------------------------------------------------------- ! ttf is the tracer field on step n ! del_ttf is the increment @@ -246,22 +226,22 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, nu12 = nu1 if (nu2>0) nu12 = min(nu1,nu2) #if defined(_OPENMP) - call omp_set_lock(plock(enodes(1))) + call omp_set_lock(partit%plock(enodes(1))) #endif do nz=nu12, nl12 fct_plus (nz,enodes(1))=fct_plus (nz,enodes(1)) + max(0.0_WP, adf_h(nz,edge)) fct_minus(nz,enodes(1))=fct_minus(nz,enodes(1)) + min(0.0_WP, adf_h(nz,edge)) end do #if defined(_OPENMP) - call omp_unset_lock(plock(enodes(1))) - call omp_set_lock(plock(enodes(2))) + call omp_unset_lock(partit%plock(enodes(1))) + call omp_set_lock (partit%plock(enodes(2))) #endif do nz=nu12, nl12 fct_plus (nz,enodes(2))=fct_plus (nz,enodes(2)) + max(0.0_WP,-adf_h(nz,edge)) fct_minus(nz,enodes(2))=fct_minus(nz,enodes(2)) + min(0.0_WP,-adf_h(nz,edge)) end do #if defined(_OPENMP) - call omp_unset_lock(plock(enodes(2))) + call omp_unset_lock(partit%plock(enodes(2))) #endif end do !$OMP END DO diff --git a/src/oce_adv_tra_hor.F90 b/src/oce_adv_tra_hor.F90 index 9214a277d..01ae06a26 100644 --- a/src/oce_adv_tra_hor.F90 +++ b/src/oce_adv_tra_hor.F90 @@ -8,7 +8,7 @@ module oce_adv_tra_hor_interfaces ! IF init_zero=.TRUE. : flux will be set to zero before computation ! IF init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, init_zero) + subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, o_init_zero) use MOD_MESH use MOD_TRACER USE MOD_PARTIT @@ -18,7 +18,7 @@ subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, init_zero) real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) - logical, optional :: init_zero + logical, optional :: o_init_zero end subroutine !=============================================================================== ! MUSCL @@ -27,7 +27,7 @@ subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, init_zero) ! IF init_zero=.TRUE. : flux will be set to zero before computation ! IF init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, nboundary_lay, init_zero) + subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, nboundary_lay, o_init_zero) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP @@ -39,11 +39,11 @@ subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_g real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) integer, intent(in) :: nboundary_lay(partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, partit%myDim_edge2D) - logical, optional :: init_zero + logical, optional :: o_init_zero end subroutine ! a not stable version of MUSCL (reconstruction in the vicinity of bottom topography is not upwind) ! it runs with FCT option only - subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, init_zero) + subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, o_init_zero) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP @@ -54,14 +54,14 @@ subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_gr real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, partit%myDim_edge2D) - logical, optional :: init_zero + logical, optional :: o_init_zero end subroutine end interface end module ! ! !=============================================================================== -subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, init_zero) +subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, o_init_zero) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP @@ -72,7 +72,8 @@ subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, init_zero) real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) - logical, optional :: init_zero + logical, optional :: o_init_zero + logical :: l_init_zero real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2 real(kind=WP) :: a, vflux integer :: el(2), enodes(2), nz, edge @@ -83,10 +84,16 @@ subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, init_zero) #include "associate_part_ass.h" #include "associate_mesh_ass.h" - if (present(init_zero))then - if (init_zero) flux=0.0_WP - else - flux=0.0_WP + l_init_zero=.true. + if (present(o_init_zero)) then + l_init_zero=o_init_zero + end if + if (l_init_zero) then +!$OMP PARALLEL DO + do edge=1, myDim_edge2D + flux(:,edge)=0.0_WP + end do +!$OMP END PARALLEL DO end if ! The result is the low-order solution horizontal fluxes @@ -223,7 +230,7 @@ end subroutine adv_tra_hor_upw1 ! ! !=============================================================================== -subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, nboundary_lay, init_zero) +subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, nboundary_lay, o_init_zero) use MOD_MESH use MOD_TRACER USE MOD_PARTIT @@ -238,7 +245,8 @@ subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_g real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) integer, intent(in) :: nboundary_lay(partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, partit%myDim_edge2D) - logical, optional :: init_zero + logical, optional :: o_init_zero + logical :: l_init_zero real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2 real(kind=WP) :: Tmean1, Tmean2, cHO real(kind=WP) :: c_lo(2) @@ -251,10 +259,16 @@ subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_g #include "associate_part_ass.h" #include "associate_mesh_ass.h" - if (present(init_zero))then - if (init_zero) flux=0.0_WP - else - flux=0.0_WP + l_init_zero=.true. + if (present(o_init_zero)) then + l_init_zero=o_init_zero + end if + if (l_init_zero) then +!$OMP PARALLEL DO + do edge=1, myDim_edge2D + flux(:,edge)=0.0_WP + end do +!$OMP END PARALLEL DO end if ! The result is the low-order solution horizontal fluxes @@ -501,7 +515,7 @@ end subroutine adv_tra_hor_muscl ! ! !=============================================================================== - subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, init_zero) + subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, o_init_zero) use MOD_MESH use MOD_TRACER USE MOD_PARTIT @@ -515,7 +529,8 @@ subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_gr real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, partit%myDim_edge2D) - logical, optional :: init_zero + logical, optional :: o_init_zero + logical :: l_init_zero real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2 real(kind=WP) :: Tmean1, Tmean2, cHO real(kind=WP) :: a, vflux @@ -527,10 +542,16 @@ subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_gr #include "associate_part_ass.h" #include "associate_mesh_ass.h" - if (present(init_zero))then - if (init_zero) flux=0.0_WP - else - flux=0.0_WP + l_init_zero=.true. + if (present(o_init_zero)) then + l_init_zero=o_init_zero + end if + if (l_init_zero) then +!$OMP PARALLEL DO + do edge=1, myDim_edge2D + flux(:,edge)=0.0_WP + end do +!$OMP END PARALLEL DO end if ! The result is the low-order solution horizontal fluxes diff --git a/src/oce_adv_tra_ver.F90 b/src/oce_adv_tra_ver.F90 index 03a7cb4e8..d8f3bea5e 100644 --- a/src/oce_adv_tra_ver.F90 +++ b/src/oce_adv_tra_ver.F90 @@ -15,10 +15,10 @@ subroutine adv_tra_vert_impl(dt, w, ttf, partit, mesh) !=============================================================================== ! 1st order upwind (explicit) ! returns flux given at vertical interfaces of scalar volumes -! IF init_zero=.TRUE. : flux will be set to zero before computation -! IF init_zero=.FALSE. : flux=flux-input flux +! IF o_init_zero=.TRUE. : flux will be set to zero before computation +! IF o_init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, init_zero) + subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, o_init_zero) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP @@ -27,15 +27,15 @@ subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, init_zero) real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) - logical, optional :: init_zero + logical, optional :: o_init_zero end subroutine !=============================================================================== ! QR (4th order centerd) ! returns flux given at vertical interfaces of scalar volumes -! IF init_zero=.TRUE. : flux will be set to zero before computation -! IF init_zero=.FALSE. : flux=flux-input flux +! IF o_init_zero=.TRUE. : flux will be set to zero before computation +! IF o_init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, init_zero) + subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, o_init_zero) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP @@ -45,34 +45,33 @@ subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, init_zero) real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) - logical, optional :: init_zero + logical, optional :: o_init_zero end subroutine !=============================================================================== ! Vertical advection with PPM reconstruction (5th order) ! returns flux given at vertical interfaces of scalar volumes -! IF init_zero=.TRUE. : flux will be set to zero before computation -! IF init_zero=.FALSE. : flux=flux-input flux +! IF o_init_zero=.TRUE. : flux will be set to zero before computation +! IF o_init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, init_zero) + subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, o_init_zero) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP real(kind=WP), intent(in), target :: dt type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh - integer :: n, nz, nl1 real(kind=WP) :: tvert(mesh%nl), tv real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) - logical, optional :: init_zero + logical, optional :: o_init_zero end subroutine ! central difference reconstruction (2nd order, use only with FCT) ! returns flux given at vertical interfaces of scalar volumes -! IF init_zero=.TRUE. : flux will be set to zero before computation -! IF init_zero=.FALSE. : flux=flux-input flux +! IF o_init_zero=.TRUE. : flux will be set to zero before computation +! IF o_init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_ver_cdiff(w, ttf, partit, mesh, flux, init_zero) + subroutine adv_tra_ver_cdiff(w, ttf, partit, mesh, flux, o_init_zero) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP @@ -83,7 +82,7 @@ subroutine adv_tra_ver_cdiff(w, ttf, partit, mesh, flux, init_zero) real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) - logical, optional :: init_zero + logical, optional :: o_init_zero end subroutine end interface end module @@ -240,7 +239,7 @@ end subroutine adv_tra_vert_impl ! ! !=============================================================================== -subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, init_zero) +subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, o_init_zero) use MOD_MESH use MOD_TRACER USE MOD_PARTIT @@ -255,19 +254,27 @@ subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, init_zero) real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) - logical, optional :: init_zero + logical, optional :: o_init_zero + logical :: l_init_zero #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - if (present(init_zero))then - if (init_zero) flux=0.0_WP - else - flux=0.0_WP + l_init_zero=.true. + if (present(o_init_zero)) then + l_init_zero=o_init_zero + end if + if (l_init_zero) then +!$OMP PARALLEL DO + do n=1, myDim_nod2D + flux(:, n)=0.0_WP + end do +!$OMP END PARALLEL DO end if !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(tvert, n, nz, nzmax, nzmin) !$OMP DO + do n=1, myDim_nod2D !_______________________________________________________________________ nzmax=nlevels_nod2D(n) @@ -301,7 +308,7 @@ end subroutine adv_tra_ver_upw1 ! ! !=============================================================================== -subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, init_zero) +subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, o_init_zero) use MOD_MESH use o_ARRAYS use o_PARAM @@ -314,7 +321,8 @@ subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, init_zero) real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) - logical, optional :: init_zero + logical, optional :: o_init_zero + logical :: l_init_zero real(kind=WP) :: tvert(mesh%nl) integer :: n, nz, nzmax, nzmin real(kind=WP) :: Tmean, Tmean1, Tmean2 @@ -325,13 +333,20 @@ subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, init_zero) #include "associate_part_ass.h" #include "associate_mesh_ass.h" - if (present(init_zero))then - if (init_zero) flux=0.0_WP - else - flux=0.0_WP + l_init_zero=.true. + if (present(o_init_zero)) then + l_init_zero=o_init_zero + end if + if (l_init_zero) then +!$OMP PARALLEL DO + do n=1, myDim_nod2D + flux(:, n)=0.0_WP + end do +!$OMP END PARALLEL DO end if !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(tvert,n, nz, nzmax, nzmin, Tmean, Tmean1, Tmean2, qc, qu,qd) !$OMP DO + do n=1, myDim_nod2D !_______________________________________________________________________ nzmax=nlevels_nod2D(n) @@ -380,7 +395,7 @@ end subroutine adv_tra_ver_qr4c ! ! !=============================================================================== -subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, init_zero) +subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, o_init_zero) use MOD_MESH use MOD_TRACER USE MOD_PARTIT @@ -393,7 +408,8 @@ subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, init_zero) real(kind=WP), intent(in) :: ttf (mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) - logical, optional :: init_zero + logical, optional :: o_init_zero + logical :: l_init_zero real(kind=WP) :: tvert(mesh%nl), tv(mesh%nl), aL, aR, aj, x real(kind=WP) :: dzjm1, dzj, dzjp1, dzjp2, deltaj, deltajp1 integer :: n, nz, nzmax, nzmin @@ -404,10 +420,16 @@ subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, init_zero) #include "associate_part_ass.h" #include "associate_mesh_ass.h" - if (present(init_zero))then - if (init_zero) flux=0.0_WP - else - flux=0.0_WP + l_init_zero=.true. + if (present(o_init_zero)) then + l_init_zero=o_init_zero + end if + if (l_init_zero) then +!$OMP PARALLEL DO + do n=1, myDim_nod2D + flux(:, n)=0.0_WP + end do +!$OMP END PARALLEL DO end if ! -------------------------------------------------------------------------- @@ -568,7 +590,7 @@ end subroutine adv_tra_vert_ppm ! ! !=============================================================================== -subroutine adv_tra_ver_cdiff(w, ttf, partit, mesh, flux, init_zero) +subroutine adv_tra_ver_cdiff(w, ttf, partit, mesh, flux, o_init_zero) use MOD_MESH use MOD_TRACER USE MOD_PARTIT @@ -580,7 +602,8 @@ subroutine adv_tra_ver_cdiff(w, ttf, partit, mesh, flux, init_zero) real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) - logical, optional :: init_zero + logical, optional :: o_init_zero + logical :: l_init_zero integer :: n, nz, nzmax, nzmin real(kind=WP) :: tvert(mesh%nl), tv #include "associate_part_def.h" @@ -588,11 +611,18 @@ subroutine adv_tra_ver_cdiff(w, ttf, partit, mesh, flux, init_zero) #include "associate_part_ass.h" #include "associate_mesh_ass.h" - if (present(init_zero))then - if (init_zero) flux=0.0_WP - else - flux=0.0_WP + l_init_zero=.true. + if (present(o_init_zero)) then + l_init_zero=o_init_zero end if + if (l_init_zero) then +!$OMP PARALLEL DO + do n=1, myDim_nod2D + flux(:, n)=0.0_WP + end do +!$OMP END PARALLEL DO + end if + !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, nzmax, nzmin, tv, tvert) !$OMP DO do n=1, myDim_nod2D From 3ba99f0673d30f1421d11a405e421d008d3036eb Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Thu, 4 Nov 2021 11:55:36 +0100 Subject: [PATCH 507/909] omp_init_lock_with_hint is implemented only OPENMP v.5 and we recommend to use it. For older versions omp_init_lock will be used and is less efficient. --- src/gen_modules_partitioning.F90 | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index 552349af7..4f69080c6 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -510,7 +510,13 @@ subroutine init_gatherLists(partit) #if defined(_OPENMP) allocate(partit%plock(partit%myDim_nod2D+partit%eDim_nod2D)) do n=1, myDim_nod2D+partit%eDim_nod2D +!experiments showd that OPENMP5 implementation of the lock (201811) is >10% more efficient +!make sure you use OPENMP v. 5.0 +#if _OPENMP >= 201811 call omp_init_lock_with_hint(partit%plock(n),omp_sync_hint_speculative+omp_sync_hint_uncontended) +#else + call omp_init_lock(partit%plock(n)) +#endif enddo #endif !$OMP END MASTER From 6fcb265b8d5b35f7ef09fca21aaa465b0c3f3aff Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Wed, 3 Nov 2021 17:07:43 +0000 Subject: [PATCH 508/909] use consistent target attribute, otherwise compilation fails with cray compiler --- src/gen_modules_partitioning.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index 4f69080c6..6522a8b2d 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -25,7 +25,7 @@ subroutine init_mpi_types(partit, mesh) USE MOD_PARTIT USE MOD_PARSUP implicit none - type(t_partit), intent(in), target :: partit + type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh end subroutine From bee016c713087870a4838f7d4b2d509c72cf22c1 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 3 Nov 2021 17:14:12 +0100 Subject: [PATCH 509/909] compile FESOM with oenmp support --- src/CMakeLists.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index fd13d5d2e..9fbc8a8dc 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -4,6 +4,8 @@ project(fesom C Fortran) option(DISABLE_MULTITHREADING "disable asynchronous operations" OFF) +find_package(OpenMP REQUIRED) + # get our source files set(src_home ${CMAKE_CURRENT_LIST_DIR}) # path to src directory starting from the dir containing our CMakeLists.txt if(${USE_ICEPACK}) @@ -84,6 +86,7 @@ target_link_libraries(${PROJECT_NAME} ${PROJECT_NAME}_C ${NETCDF_Fortran_LIBRARI target_link_libraries(${PROJECT_NAME} ${PROJECT_NAME}_C ${MCT_Fortran_LIBRARIES} ${MPEU_Fortran_LIBRARIES} ${SCRIP_Fortran_LIBRARIES}) target_link_libraries(${PROJECT_NAME} async_threads_cpp) set_target_properties(${PROJECT_NAME} PROPERTIES LINKER_LANGUAGE Fortran) +target_link_libraries(${PROJECT_NAME} OpenMP::OpenMP_Fortran) set(FESOM_INSTALL_FILEPATH "${CMAKE_CURRENT_LIST_DIR}/../bin/fesom.x" CACHE FILEPATH "file path where the FESOM binary should be put") get_filename_component(FESOM_INSTALL_PATH ${FESOM_INSTALL_FILEPATH} DIRECTORY) From 4146c7c97cc922b913d73cbe16af2062cc0d5d70 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 3 Nov 2021 17:20:13 +0100 Subject: [PATCH 510/909] be able to switch building with OpenMP on and off --- src/CMakeLists.txt | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 9fbc8a8dc..9500ef1ea 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -4,7 +4,11 @@ project(fesom C Fortran) option(DISABLE_MULTITHREADING "disable asynchronous operations" OFF) -find_package(OpenMP REQUIRED) +option(ENABLE_OPENMP "build FESOM with OpenMP" OFF) +if(${ENABLE_OPENMP}) + find_package(OpenMP REQUIRED) +endif() + # get our source files set(src_home ${CMAKE_CURRENT_LIST_DIR}) # path to src directory starting from the dir containing our CMakeLists.txt @@ -86,7 +90,9 @@ target_link_libraries(${PROJECT_NAME} ${PROJECT_NAME}_C ${NETCDF_Fortran_LIBRARI target_link_libraries(${PROJECT_NAME} ${PROJECT_NAME}_C ${MCT_Fortran_LIBRARIES} ${MPEU_Fortran_LIBRARIES} ${SCRIP_Fortran_LIBRARIES}) target_link_libraries(${PROJECT_NAME} async_threads_cpp) set_target_properties(${PROJECT_NAME} PROPERTIES LINKER_LANGUAGE Fortran) -target_link_libraries(${PROJECT_NAME} OpenMP::OpenMP_Fortran) +if(${ENABLE_OPENMP}) + target_link_libraries(${PROJECT_NAME} OpenMP::OpenMP_Fortran) +endif() set(FESOM_INSTALL_FILEPATH "${CMAKE_CURRENT_LIST_DIR}/../bin/fesom.x" CACHE FILEPATH "file path where the FESOM binary should be put") get_filename_component(FESOM_INSTALL_PATH ${FESOM_INSTALL_FILEPATH} DIRECTORY) From 2b900270e48fd2a91585ab1b584226e4e1277020 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 1 Nov 2021 18:15:24 +0100 Subject: [PATCH 511/909] skeleton for FESOM main in three parts (as required for IFS coupling) --- src/fvom.F90 | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100755 src/fvom.F90 diff --git a/src/fvom.F90 b/src/fvom.F90 new file mode 100755 index 000000000..82affbbf4 --- /dev/null +++ b/src/fvom.F90 @@ -0,0 +1,26 @@ +module fvom_module + implicit none + public fesom_init, fesom_runloop, fesom_finalize + private + +contains + + subroutine fesom_init(nsteps) + integer, intent(out) :: nsteps + ! EO parameters + + end subroutine + + + subroutine fesom_runloop(nsteps) + integer, intent(in) :: nsteps + ! EO parameters + + end subroutine + + + subroutine fesom_finalize() + + end subroutine + +end module From 6e27468c4b0f121ca785adc3bdd7f6b3487e1a79 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 2 Nov 2021 10:54:03 +0100 Subject: [PATCH 512/909] add modue to save state (e.g. derived types) between calls to fesom_runloop --- src/fvom.F90 | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/fvom.F90 b/src/fvom.F90 index 82affbbf4..7d3faa7f0 100755 --- a/src/fvom.F90 +++ b/src/fvom.F90 @@ -1,3 +1,12 @@ +! synopsis: save any derived types we initialize +! so they can be reused after fesom_init +module fvom_types_storage_module + +end module + +! synopsis: main FESOM program split into 3 parts +! this way FESOM can e.g. be used as a library with an external time loop driver +! used with IFS-FESOM module fvom_module implicit none public fesom_init, fesom_runloop, fesom_finalize From a34506e08ee922160baa6472bd3bbc0ee10d54ca Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 2 Nov 2021 11:09:03 +0100 Subject: [PATCH 513/909] move FESOM initialization to separate subroutine --- src/fvom.F90 | 217 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 217 insertions(+) diff --git a/src/fvom.F90 b/src/fvom.F90 index 7d3faa7f0..34310a41f 100755 --- a/src/fvom.F90 +++ b/src/fvom.F90 @@ -15,8 +15,225 @@ module fvom_module contains subroutine fesom_init(nsteps) + USE MOD_MESH + USE MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + USE o_ARRAYS + USE o_PARAM + USE i_PARAM + use i_ARRAYS + use g_clock + use g_config + use g_comm_auto + use g_forcing_arrays + use io_RESTART + use io_MEANDATA + use io_mesh_info + use diagnostics + use mo_tidal + use tracer_init_interface + use ocean_setup_interface + use ice_setup_interface + use ocean2ice_interface + use oce_fluxes_interface + use update_atm_forcing_interface + use before_oce_step_interface + use oce_timestep_ale_interface + use read_mesh_interface + use fesom_version_info_module + use command_line_options_module + ! Define icepack module +#if defined (__icepack) + use icedrv_main, only: set_icepack, init_icepack, alloc_icepack +#endif + +#if defined (__oasis) + use cpl_driver +#endif + + implicit none + integer, intent(out) :: nsteps ! EO parameters + + integer :: n, offset, row, i, provided + integer, pointer :: mype, npes, MPIerr, MPI_COMM_FESOM + real(kind=WP) :: t0, t1, t2, t3, t4, t5, t6, t7, t8, t0_ice, t1_ice, t0_frc, t1_frc + real(kind=WP) :: rtime_fullice, rtime_write_restart, rtime_write_means, rtime_compute_diag, rtime_read_forcing + real(kind=real32) :: rtime_setup_mesh, rtime_setup_ocean, rtime_setup_forcing + real(kind=real32) :: rtime_setup_ice, rtime_setup_other, rtime_setup_restart + real(kind=real32) :: mean_rtime(15), max_rtime(15), min_rtime(15) + real(kind=real32) :: runtime_alltimesteps + + + type(t_mesh), target, save :: mesh + type(t_tracer), target, save :: tracers + type(t_partit), target, save :: partit + + + character(LEN=256) :: dump_dir, dump_filename + logical :: L_EXISTS + type(t_mesh), target, save :: mesh_copy + type(t_tracer), target, save :: tracers_copy + + character(LEN=MPI_MAX_LIBRARY_VERSION_STRING) :: mpi_version_txt + integer mpi_version_len + + + if(command_argument_count() > 0) then + call command_line_options%parse() + stop + end if + +#ifndef __oifs + !ECHAM6-FESOM2 coupling: cpl_oasis3mct_init is called here in order to avoid circular dependencies between modules (cpl_driver and g_PARSUP) + !OIFS-FESOM2 coupling: does not require MPI_INIT here as this is done by OASIS + call MPI_INIT_THREAD(MPI_THREAD_MULTIPLE, provided, i) +#endif + + +#if defined (__oasis) + call cpl_oasis3mct_init(partit%MPI_COMM_FESOM) +#endif + t1 = MPI_Wtime() + + call par_init(partit) + + mype =>partit%mype + MPIerr =>partit%MPIerr + MPI_COMM_FESOM=>partit%MPI_COMM_FESOM + npes =>partit%npes + if(mype==0) then + write(*,*) + print *,"FESOM2 git SHA: "//fesom_git_sha() + call MPI_Get_library_version(mpi_version_txt, mpi_version_len, MPIERR) + print *,"MPI library version: "//trim(mpi_version_txt) + print *, achar(27)//'[32m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;32m'//' --> FESOM BUILDS UP MODEL CONFIGURATION '//achar(27)//'[0m' + end if + !===================== + ! Read configuration data, + ! load the mesh and fill in + ! auxiliary mesh arrays + !===================== + call setup_model(partit) ! Read Namelists, always before clock_init + call clock_init(partit) ! read the clock file + call get_run_steps(nsteps, partit) + call mesh_setup(partit, mesh) + + if (mype==0) write(*,*) 'FESOM mesh_setup... complete' + + !===================== + ! Allocate field variables + ! and additional arrays needed for + ! fancy advection etc. + !===================== + call check_mesh_consistency(partit, mesh) + if (mype==0) t2=MPI_Wtime() + + call tracer_init(tracers, partit, mesh) ! allocate array of ocean tracers (derived type "t_tracer") + call arrays_init(tracers%num_tracers, partit, mesh) ! allocate other arrays (to be refactured same as tracers in the future) + call ocean_setup(tracers, partit, mesh) + + if (mype==0) then + write(*,*) 'FESOM ocean_setup... complete' + t3=MPI_Wtime() + endif + call forcing_setup(partit, mesh) + + if (mype==0) t4=MPI_Wtime() + if (use_ice) then + call ice_setup(tracers, partit, mesh) + ice_steps_since_upd = ice_ave_steps-1 + ice_update=.true. + if (mype==0) write(*,*) 'EVP scheme option=', whichEVP + endif + if (mype==0) t5=MPI_Wtime() + call compute_diagnostics(0, tracers, partit, mesh) ! allocate arrays for diagnostic +#if defined (__oasis) + call cpl_oasis3mct_define_unstr(partit, mesh) + if(mype==0) write(*,*) 'FESOM ----> cpl_oasis3mct_define_unstr nsend, nrecv:',nsend, nrecv +#endif + +#if defined (__icepack) + !===================== + ! Setup icepack + !===================== + if (mype==0) write(*,*) 'Icepack: reading namelists from namelist.icepack' + call set_icepack(partit) + call alloc_icepack + call init_icepack(tracers%data(1), mesh) + if (mype==0) write(*,*) 'Icepack: setup complete' +#endif + call clock_newyear ! check if it is a new year + if (mype==0) t6=MPI_Wtime() + !___CREATE NEW RESTART FILE IF APPLICABLE___________________________________ + ! The interface to the restart module is made via call restart ! + ! The inputs are: istep, l_write, l_create + ! if istep is not zero it will be decided whether restart shall be written + ! if l_write is TRUE the restart will be forced + ! if l_read the restart will be read + ! as an example, for reading restart one does: call restart(0, .false., .false., .true., tracers, partit, mesh) + call restart(0, .false., r_restart, tracers, partit, mesh) ! istep, l_write, l_read + if (mype==0) t7=MPI_Wtime() + ! store grid information into netcdf file + if (.not. r_restart) call write_mesh_info(partit, mesh) + + !___IF RESTART WITH ZLEVEL OR ZSTAR IS DONE, ALSO THE ACTUAL LEVELS AND ____ + !___MIDDEPTH LEVELS NEEDS TO BE CALCULATET AT RESTART_______________________ + if (r_restart) then + call restart_thickness_ale(partit, mesh) + end if + if (mype==0) then + t8=MPI_Wtime() + + rtime_setup_mesh = real( t2 - t1 ,real32) + rtime_setup_ocean = real( t3 - t2 ,real32) + rtime_setup_forcing = real( t4 - t3 ,real32) + rtime_setup_ice = real( t5 - t4 ,real32) + rtime_setup_restart = real( t7 - t6 ,real32) + rtime_setup_other = real((t8 - t7) + (t6 - t5) ,real32) + + write(*,*) '==========================================' + write(*,*) 'MODEL SETUP took on mype=0 [seconds] ' + write(*,*) 'runtime setup total ',real(t8-t1,real32) + write(*,*) ' > runtime setup mesh ',rtime_setup_mesh + write(*,*) ' > runtime setup ocean ',rtime_setup_ocean + write(*,*) ' > runtime setup forcing ',rtime_setup_forcing + write(*,*) ' > runtime setup ice ',rtime_setup_ice + write(*,*) ' > runtime setup restart ',rtime_setup_restart + write(*,*) ' > runtime setup other ',rtime_setup_other + write(*,*) '============================================' + endif + + DUMP_DIR='DUMP/' + INQUIRE(file=trim(dump_dir), EXIST=L_EXISTS) + if (.not. L_EXISTS) call system('mkdir '//trim(dump_dir)) + + write (dump_filename, "(A7,I7.7)") "t_mesh.", mype + open (mype+300, file=TRIM(DUMP_DIR)//trim(dump_filename), status='replace', form="unformatted") + write (mype+300) mesh + close (mype+300) + + ! open (mype+300, file=trim(dump_filename), status='old', form="unformatted") + ! read (mype+300) mesh_copy + ! close (mype+300) + + write (dump_filename, "(A9,I7.7)") "t_tracer.", mype + open (mype+300, file=TRIM(DUMP_DIR)//trim(dump_filename), status='replace', form="unformatted") + write (mype+300) tracers + close (mype+300) + + ! open (mype+300, file=trim(dump_filename), status='old', form="unformatted") + ! read (mype+300) tracers_copy + ! close (mype+300) + + !call par_ex(partit%MPI_COMM_FESOM, partit%mype) + !stop + ! + ! if (mype==10) write(,) mesh1%ssh_stiff%values-mesh%ssh_stiff%value + end subroutine From 38fbd6327005694354be7cc817ff942ac93b74d5 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 2 Nov 2021 12:29:24 +0100 Subject: [PATCH 514/909] store all variables from the FESOM main program in a separate type which we could reuse across multiple calls to fesom_runloop --- src/fvom.F90 | 264 ++++++++++++++++++++++++++------------------------- 1 file changed, 135 insertions(+), 129 deletions(-) diff --git a/src/fvom.F90 b/src/fvom.F90 index 34310a41f..6d5ebfab6 100755 --- a/src/fvom.F90 +++ b/src/fvom.F90 @@ -1,63 +1,48 @@ ! synopsis: save any derived types we initialize ! so they can be reused after fesom_init -module fvom_types_storage_module - -end module - -! synopsis: main FESOM program split into 3 parts -! this way FESOM can e.g. be used as a library with an external time loop driver -! used with IFS-FESOM -module fvom_module - implicit none - public fesom_init, fesom_runloop, fesom_finalize - private - -contains - - subroutine fesom_init(nsteps) - USE MOD_MESH - USE MOD_TRACER - USE MOD_PARTIT - USE MOD_PARSUP - USE o_ARRAYS - USE o_PARAM - USE i_PARAM - use i_ARRAYS - use g_clock - use g_config - use g_comm_auto - use g_forcing_arrays - use io_RESTART - use io_MEANDATA - use io_mesh_info - use diagnostics - use mo_tidal - use tracer_init_interface - use ocean_setup_interface - use ice_setup_interface - use ocean2ice_interface - use oce_fluxes_interface - use update_atm_forcing_interface - use before_oce_step_interface - use oce_timestep_ale_interface - use read_mesh_interface - use fesom_version_info_module - use command_line_options_module - ! Define icepack module +module fesom_main_storage_module + USE MOD_MESH + USE MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + USE o_ARRAYS + USE o_PARAM + USE i_PARAM + use i_ARRAYS + use g_clock + use g_config + use g_comm_auto + use g_forcing_arrays + use io_RESTART + use io_MEANDATA + use io_mesh_info + use diagnostics + use mo_tidal + use tracer_init_interface + use ocean_setup_interface + use ice_setup_interface + use ocean2ice_interface + use oce_fluxes_interface + use update_atm_forcing_interface + use before_oce_step_interface + use oce_timestep_ale_interface + use read_mesh_interface + use fesom_version_info_module + use command_line_options_module + ! Define icepack module #if defined (__icepack) - use icedrv_main, only: set_icepack, init_icepack, alloc_icepack + use icedrv_main, only: set_icepack, init_icepack, alloc_icepack #endif #if defined (__oasis) - use cpl_driver + use cpl_driver #endif - implicit none - - integer, intent(out) :: nsteps - ! EO parameters + implicit none + + type :: fesom_main_storage_type - integer :: n, offset, row, i, provided + integer :: n, nsteps, offset, row, i, provided integer, pointer :: mype, npes, MPIerr, MPI_COMM_FESOM real(kind=WP) :: t0, t1, t2, t3, t4, t5, t6, t7, t8, t0_ice, t1_ice, t0_frc, t1_frc real(kind=WP) :: rtime_fullice, rtime_write_restart, rtime_write_means, rtime_compute_diag, rtime_read_forcing @@ -67,20 +52,40 @@ subroutine fesom_init(nsteps) real(kind=real32) :: runtime_alltimesteps - type(t_mesh), target, save :: mesh - type(t_tracer), target, save :: tracers - type(t_partit), target, save :: partit + type(t_mesh) mesh + type(t_tracer) tracers + type(t_partit) partit character(LEN=256) :: dump_dir, dump_filename logical :: L_EXISTS - type(t_mesh), target, save :: mesh_copy - type(t_tracer), target, save :: tracers_copy + type(t_mesh) mesh_copy + type(t_tracer) tracers_copy character(LEN=MPI_MAX_LIBRARY_VERSION_STRING) :: mpi_version_txt integer mpi_version_len + + end type + type(fesom_main_storage_type), save, target :: f + +end module +! synopsis: main FESOM program split into 3 parts +! this way FESOM can e.g. be used as a library with an external time loop driver +! used with IFS-FESOM +module fvom_module + implicit none + public fesom_init, fesom_runloop, fesom_finalize + private + +contains + + subroutine fesom_init(fesom_used_nsteps) + use fesom_main_storage_module + integer, intent(out) :: fesom_used_nsteps + ! EO parameters + if(command_argument_count() > 0) then call command_line_options%parse() stop @@ -89,26 +94,26 @@ subroutine fesom_init(nsteps) #ifndef __oifs !ECHAM6-FESOM2 coupling: cpl_oasis3mct_init is called here in order to avoid circular dependencies between modules (cpl_driver and g_PARSUP) !OIFS-FESOM2 coupling: does not require MPI_INIT here as this is done by OASIS - call MPI_INIT_THREAD(MPI_THREAD_MULTIPLE, provided, i) + call MPI_INIT_THREAD(MPI_THREAD_MULTIPLE, f%provided, f%i) #endif #if defined (__oasis) call cpl_oasis3mct_init(partit%MPI_COMM_FESOM) #endif - t1 = MPI_Wtime() + f%t1 = MPI_Wtime() - call par_init(partit) + call par_init(f%partit) - mype =>partit%mype - MPIerr =>partit%MPIerr - MPI_COMM_FESOM=>partit%MPI_COMM_FESOM - npes =>partit%npes - if(mype==0) then + f%mype =>f%partit%mype + f%MPIerr =>f%partit%MPIerr + f%MPI_COMM_FESOM=>f%partit%MPI_COMM_FESOM + f%npes =>f%partit%npes + if(f%mype==0) then write(*,*) print *,"FESOM2 git SHA: "//fesom_git_sha() - call MPI_Get_library_version(mpi_version_txt, mpi_version_len, MPIERR) - print *,"MPI library version: "//trim(mpi_version_txt) + call MPI_Get_library_version(f%mpi_version_txt, f%mpi_version_len, f%MPIERR) + print *,"MPI library version: "//trim(f%mpi_version_txt) print *, achar(27)//'[32m' //'____________________________________________________________'//achar(27)//'[0m' print *, achar(27)//'[7;32m'//' --> FESOM BUILDS UP MODEL CONFIGURATION '//achar(27)//'[0m' end if @@ -117,57 +122,57 @@ subroutine fesom_init(nsteps) ! load the mesh and fill in ! auxiliary mesh arrays !===================== - call setup_model(partit) ! Read Namelists, always before clock_init - call clock_init(partit) ! read the clock file - call get_run_steps(nsteps, partit) - call mesh_setup(partit, mesh) + call setup_model(f%partit) ! Read Namelists, always before clock_init + call clock_init(f%partit) ! read the clock file + call get_run_steps(f%nsteps, f%partit) + call mesh_setup(f%partit, f%mesh) - if (mype==0) write(*,*) 'FESOM mesh_setup... complete' + if (f%mype==0) write(*,*) 'FESOM mesh_setup... complete' !===================== ! Allocate field variables ! and additional arrays needed for ! fancy advection etc. !===================== - call check_mesh_consistency(partit, mesh) - if (mype==0) t2=MPI_Wtime() + call check_mesh_consistency(f%partit, f%mesh) + if (f%mype==0) f%t2=MPI_Wtime() - call tracer_init(tracers, partit, mesh) ! allocate array of ocean tracers (derived type "t_tracer") - call arrays_init(tracers%num_tracers, partit, mesh) ! allocate other arrays (to be refactured same as tracers in the future) - call ocean_setup(tracers, partit, mesh) + call tracer_init(f%tracers, f%partit, f%mesh) ! allocate array of ocean tracers (derived type "t_tracer") + call arrays_init(f%tracers%num_tracers, f%partit, f%mesh) ! allocate other arrays (to be refactured same as tracers in the future) + call ocean_setup(f%tracers, f%partit, f%mesh) - if (mype==0) then + if (f%mype==0) then write(*,*) 'FESOM ocean_setup... complete' - t3=MPI_Wtime() + f%t3=MPI_Wtime() endif - call forcing_setup(partit, mesh) + call forcing_setup(f%partit, f%mesh) - if (mype==0) t4=MPI_Wtime() + if (f%mype==0) f%t4=MPI_Wtime() if (use_ice) then - call ice_setup(tracers, partit, mesh) + call ice_setup(f%tracers, f%partit, f%mesh) ice_steps_since_upd = ice_ave_steps-1 ice_update=.true. - if (mype==0) write(*,*) 'EVP scheme option=', whichEVP + if (f%mype==0) write(*,*) 'EVP scheme option=', whichEVP endif - if (mype==0) t5=MPI_Wtime() - call compute_diagnostics(0, tracers, partit, mesh) ! allocate arrays for diagnostic + if (f%mype==0) f%t5=MPI_Wtime() + call compute_diagnostics(0, f%tracers, f%partit, f%mesh) ! allocate arrays for diagnostic #if defined (__oasis) - call cpl_oasis3mct_define_unstr(partit, mesh) - if(mype==0) write(*,*) 'FESOM ----> cpl_oasis3mct_define_unstr nsend, nrecv:',nsend, nrecv + call cpl_oasis3mct_define_unstr(f%partit, f%mesh) + if(f%mype==0) write(*,*) 'FESOM ----> cpl_oasis3mct_define_unstr nsend, nrecv:',nsend, nrecv #endif #if defined (__icepack) !===================== ! Setup icepack !===================== - if (mype==0) write(*,*) 'Icepack: reading namelists from namelist.icepack' - call set_icepack(partit) + if (f%mype==0) write(*,*) 'Icepack: reading namelists from namelist.icepack' + call set_icepack(f%partit) call alloc_icepack - call init_icepack(tracers%data(1), mesh) - if (mype==0) write(*,*) 'Icepack: setup complete' + call init_icepack(f%tracers%data(1), f%mesh) + if (f%mype==0) write(*,*) 'Icepack: setup complete' #endif call clock_newyear ! check if it is a new year - if (mype==0) t6=MPI_Wtime() + if (f%mype==0) f%t6=MPI_Wtime() !___CREATE NEW RESTART FILE IF APPLICABLE___________________________________ ! The interface to the restart module is made via call restart ! ! The inputs are: istep, l_write, l_create @@ -175,66 +180,67 @@ subroutine fesom_init(nsteps) ! if l_write is TRUE the restart will be forced ! if l_read the restart will be read ! as an example, for reading restart one does: call restart(0, .false., .false., .true., tracers, partit, mesh) - call restart(0, .false., r_restart, tracers, partit, mesh) ! istep, l_write, l_read - if (mype==0) t7=MPI_Wtime() + call restart(0, .false., r_restart, f%tracers, f%partit, f%mesh) ! istep, l_write, l_read + if (f%mype==0) f%t7=MPI_Wtime() ! store grid information into netcdf file - if (.not. r_restart) call write_mesh_info(partit, mesh) + if (.not. r_restart) call write_mesh_info(f%partit, f%mesh) !___IF RESTART WITH ZLEVEL OR ZSTAR IS DONE, ALSO THE ACTUAL LEVELS AND ____ !___MIDDEPTH LEVELS NEEDS TO BE CALCULATET AT RESTART_______________________ if (r_restart) then - call restart_thickness_ale(partit, mesh) + call restart_thickness_ale(f%partit, f%mesh) end if - if (mype==0) then - t8=MPI_Wtime() + if (f%mype==0) then + f%t8=MPI_Wtime() - rtime_setup_mesh = real( t2 - t1 ,real32) - rtime_setup_ocean = real( t3 - t2 ,real32) - rtime_setup_forcing = real( t4 - t3 ,real32) - rtime_setup_ice = real( t5 - t4 ,real32) - rtime_setup_restart = real( t7 - t6 ,real32) - rtime_setup_other = real((t8 - t7) + (t6 - t5) ,real32) + f%rtime_setup_mesh = real( f%t2 - f%t1 ,real32) + f%rtime_setup_ocean = real( f%t3 - f%t2 ,real32) + f%rtime_setup_forcing = real( f%t4 - f%t3 ,real32) + f%rtime_setup_ice = real( f%t5 - f%t4 ,real32) + f%rtime_setup_restart = real( f%t7 - f%t6 ,real32) + f%rtime_setup_other = real((f%t8 - f%t7) + (f%t6 - f%t5) ,real32) write(*,*) '==========================================' write(*,*) 'MODEL SETUP took on mype=0 [seconds] ' - write(*,*) 'runtime setup total ',real(t8-t1,real32) - write(*,*) ' > runtime setup mesh ',rtime_setup_mesh - write(*,*) ' > runtime setup ocean ',rtime_setup_ocean - write(*,*) ' > runtime setup forcing ',rtime_setup_forcing - write(*,*) ' > runtime setup ice ',rtime_setup_ice - write(*,*) ' > runtime setup restart ',rtime_setup_restart - write(*,*) ' > runtime setup other ',rtime_setup_other + write(*,*) 'runtime setup total ',real(f%t8-f%t1,real32) + write(*,*) ' > runtime setup mesh ',f%rtime_setup_mesh + write(*,*) ' > runtime setup ocean ',f%rtime_setup_ocean + write(*,*) ' > runtime setup forcing ',f%rtime_setup_forcing + write(*,*) ' > runtime setup ice ',f%rtime_setup_ice + write(*,*) ' > runtime setup restart ',f%rtime_setup_restart + write(*,*) ' > runtime setup other ',f%rtime_setup_other write(*,*) '============================================' endif - DUMP_DIR='DUMP/' - INQUIRE(file=trim(dump_dir), EXIST=L_EXISTS) - if (.not. L_EXISTS) call system('mkdir '//trim(dump_dir)) + f%dump_dir='DUMP/' + INQUIRE(file=trim(f%dump_dir), EXIST=f%L_EXISTS) + if (.not. f%L_EXISTS) call system('mkdir '//trim(f%dump_dir)) - write (dump_filename, "(A7,I7.7)") "t_mesh.", mype - open (mype+300, file=TRIM(DUMP_DIR)//trim(dump_filename), status='replace', form="unformatted") - write (mype+300) mesh - close (mype+300) + write (f%dump_filename, "(A7,I7.7)") "t_mesh.", f%mype + open (f%mype+300, file=TRIM(f%dump_dir)//trim(f%dump_filename), status='replace', form="unformatted") + write (f%mype+300) f%mesh + close (f%mype+300) - ! open (mype+300, file=trim(dump_filename), status='old', form="unformatted") - ! read (mype+300) mesh_copy - ! close (mype+300) + ! open (f%mype+300, file=trim(f%dump_filename), status='old', form="unformatted") + ! read (f%mype+300) f%mesh_copy + ! close (f%mype+300) - write (dump_filename, "(A9,I7.7)") "t_tracer.", mype - open (mype+300, file=TRIM(DUMP_DIR)//trim(dump_filename), status='replace', form="unformatted") - write (mype+300) tracers - close (mype+300) + write (f%dump_filename, "(A9,I7.7)") "t_tracer.", f%mype + open (f%mype+300, file=TRIM(f%dump_dir)//trim(f%dump_filename), status='replace', form="unformatted") + write (f%mype+300) f%tracers + close (f%mype+300) - ! open (mype+300, file=trim(dump_filename), status='old', form="unformatted") - ! read (mype+300) tracers_copy - ! close (mype+300) + ! open (f%mype+300, file=trim(f%dump_filename), status='old', form="unformatted") + ! read (f%mype+300) f%tracers_copy + ! close (f%mype+300) - !call par_ex(partit%MPI_COMM_FESOM, partit%mype) + !call par_ex(f%partit%MPI_COMM_FESOM, f%partit%mype) !stop ! - ! if (mype==10) write(,) mesh1%ssh_stiff%values-mesh%ssh_stiff%value + ! if (f%mype==10) write(,) f%mesh1%ssh_stiff%values-f%mesh%ssh_stiff%value + fesom_used_nsteps = f%nsteps end subroutine From 79f0f634dfce00999c47474218b013bdbdc2ec4f Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 2 Nov 2021 12:41:27 +0100 Subject: [PATCH 515/909] - rename total fesom timsteps paramerter - remove timing variables which are lokal to the runloop --- src/fvom.F90 | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/fvom.F90 b/src/fvom.F90 index 6d5ebfab6..6230ea7b5 100755 --- a/src/fvom.F90 +++ b/src/fvom.F90 @@ -45,7 +45,6 @@ module fesom_main_storage_module integer :: n, nsteps, offset, row, i, provided integer, pointer :: mype, npes, MPIerr, MPI_COMM_FESOM real(kind=WP) :: t0, t1, t2, t3, t4, t5, t6, t7, t8, t0_ice, t1_ice, t0_frc, t1_frc - real(kind=WP) :: rtime_fullice, rtime_write_restart, rtime_write_means, rtime_compute_diag, rtime_read_forcing real(kind=real32) :: rtime_setup_mesh, rtime_setup_ocean, rtime_setup_forcing real(kind=real32) :: rtime_setup_ice, rtime_setup_other, rtime_setup_restart real(kind=real32) :: mean_rtime(15), max_rtime(15), min_rtime(15) @@ -81,9 +80,9 @@ module fvom_module contains - subroutine fesom_init(fesom_used_nsteps) + subroutine fesom_init(fesom_total_nsteps) use fesom_main_storage_module - integer, intent(out) :: fesom_used_nsteps + integer, intent(out) :: fesom_total_nsteps ! EO parameters if(command_argument_count() > 0) then @@ -240,7 +239,7 @@ subroutine fesom_init(fesom_used_nsteps) ! if (f%mype==10) write(,) f%mesh1%ssh_stiff%values-f%mesh%ssh_stiff%value - fesom_used_nsteps = f%nsteps + fesom_total_nsteps = f%nsteps end subroutine From 2e0f121f6c5d76cce109a7e974548847f71637b6 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 3 Nov 2021 09:45:12 +0100 Subject: [PATCH 516/909] - move FESOM runloop to separate subroutine - run for a number of given steps - store number of done steps --- src/fvom.F90 | 119 +++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 111 insertions(+), 8 deletions(-) diff --git a/src/fvom.F90 b/src/fvom.F90 index 6230ea7b5..eb576089a 100755 --- a/src/fvom.F90 +++ b/src/fvom.F90 @@ -42,7 +42,7 @@ module fesom_main_storage_module type :: fesom_main_storage_type - integer :: n, nsteps, offset, row, i, provided + integer :: n, from_nstep, offset, row, i, provided integer, pointer :: mype, npes, MPIerr, MPI_COMM_FESOM real(kind=WP) :: t0, t1, t2, t3, t4, t5, t6, t7, t8, t0_ice, t1_ice, t0_frc, t1_frc real(kind=real32) :: rtime_setup_mesh, rtime_setup_ocean, rtime_setup_forcing @@ -123,7 +123,7 @@ subroutine fesom_init(fesom_total_nsteps) !===================== call setup_model(f%partit) ! Read Namelists, always before clock_init call clock_init(f%partit) ! read the clock file - call get_run_steps(f%nsteps, f%partit) + call get_run_steps(fesom_total_nsteps, f%partit) call mesh_setup(f%partit, f%mesh) if (f%mype==0) write(*,*) 'FESOM mesh_setup... complete' @@ -236,17 +236,120 @@ subroutine fesom_init(fesom_total_nsteps) !call par_ex(f%partit%MPI_COMM_FESOM, f%partit%mype) !stop ! - ! if (f%mype==10) write(,) f%mesh1%ssh_stiff%values-f%mesh%ssh_stiff%value - - - fesom_total_nsteps = f%nsteps + ! if (f%mype==10) write(,) f%mesh1%ssh_stiff%values-f%mesh%ssh_stiff%value + + f%from_nstep = 1 end subroutine - subroutine fesom_runloop(nsteps) - integer, intent(in) :: nsteps + subroutine fesom_runloop(current_nsteps) + use fesom_main_storage_module + integer, intent(in) :: current_nsteps ! EO parameters + integer n + real(kind=WP) :: rtime_fullice, rtime_write_restart, rtime_write_means, rtime_compute_diag, rtime_read_forcing + + !===================== + ! Time stepping + !===================== + +! Initialize timers + rtime_fullice = 0._WP + rtime_write_restart = 0._WP + rtime_write_means = 0._WP + rtime_compute_diag = 0._WP + rtime_read_forcing = 0._WP + + if (f%mype==0) write(*,*) 'FESOM start iteration before the barrier...' + call MPI_Barrier(f%MPI_COMM_FESOM, f%MPIERR) + + if (f%mype==0) then + write(*,*) 'FESOM start iteration after the barrier...' + f%t0 = MPI_Wtime() + endif + if(f%mype==0) then + write(*,*) + print *, achar(27)//'[32m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;32m'//' --> FESOM STARTS TIME LOOP '//achar(27)//'[0m' + end if + !___MODEL TIME STEPPING LOOP________________________________________________ + if (use_global_tides) then + call foreph_ini(yearnew, month, f%partit) + end if + do n=f%from_nstep, f%from_nstep-1+current_nsteps + if (use_global_tides) then + call foreph(f%partit, f%mesh) + end if + mstep = n + if (mod(n,logfile_outfreq)==0 .and. f%mype==0) then + write(*,*) 'FESOM =======================================================' +! write(*,*) 'FESOM step:',n,' day:', n*dt/24./3600., + write(*,*) 'FESOM step:',n,' day:', daynew,' year:',yearnew + write(*,*) + end if +#if defined (__oifs) || defined (__oasis) + seconds_til_now=INT(dt)*(n-1) +#endif + call clock + !___compute horizontal velocity on nodes (originaly on elements)________ + call compute_vel_nodes(f%partit, f%mesh) + !___model sea-ice step__________________________________________________ + f%t1 = MPI_Wtime() + if(use_ice) then + !___compute fluxes from ocean to ice________________________________ + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call ocean2ice(n)'//achar(27)//'[0m' + call ocean2ice(f%tracers, f%partit, f%mesh) + + !___compute update of atmospheric forcing____________________________ + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call update_atm_forcing(n)'//achar(27)//'[0m' + f%t0_frc = MPI_Wtime() + call update_atm_forcing(n, f%tracers, f%partit, f%mesh) + f%t1_frc = MPI_Wtime() + !___compute ice step________________________________________________ + if (ice_steps_since_upd>=ice_ave_steps-1) then + ice_update=.true. + ice_steps_since_upd = 0 + else + ice_update=.false. + ice_steps_since_upd=ice_steps_since_upd+1 + endif + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call ice_timestep(n)'//achar(27)//'[0m' + if (ice_update) call ice_timestep(n, f%partit, f%mesh) + !___compute fluxes to the ocean: heat, freshwater, momentum_________ + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call oce_fluxes_mom...'//achar(27)//'[0m' + call oce_fluxes_mom(f%partit, f%mesh) ! momentum only + call oce_fluxes(f%tracers, f%partit, f%mesh) + end if + call before_oce_step(f%tracers, f%partit, f%mesh) ! prepare the things if required + f%t2 = MPI_Wtime() + !___model ocean step____________________________________________________ + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call oce_timestep_ale'//achar(27)//'[0m' + + call oce_timestep_ale(n, f%tracers, f%partit, f%mesh) + + f%t3 = MPI_Wtime() + !___compute energy diagnostics..._______________________________________ + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call compute_diagnostics(1)'//achar(27)//'[0m' + call compute_diagnostics(1, f%tracers, f%partit, f%mesh) + + f%t4 = MPI_Wtime() + !___prepare output______________________________________________________ + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call output (n)'//achar(27)//'[0m' + call output (n, f%tracers, f%partit, f%mesh) + + f%t5 = MPI_Wtime() + call restart(n, .false., .false., f%tracers, f%partit, f%mesh) + f%t6 = MPI_Wtime() + + rtime_fullice = rtime_fullice + f%t2 - f%t1 + rtime_compute_diag = rtime_compute_diag + f%t4 - f%t3 + rtime_write_means = rtime_write_means + f%t5 - f%t4 + rtime_write_restart = rtime_write_restart + f%t6 - f%t5 + rtime_read_forcing = rtime_read_forcing + f%t1_frc - f%t0_frc + end do + + f%from_nstep = f%from_nstep+current_nsteps end subroutine From 919608533bf9549c1c5b4b494be8374e59bf4035 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 3 Nov 2021 10:22:09 +0100 Subject: [PATCH 517/909] move back some variables from the runloop which are needed in the global storage for the finalize subroutine --- src/fvom.F90 | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/src/fvom.F90 b/src/fvom.F90 index eb576089a..02fd57700 100755 --- a/src/fvom.F90 +++ b/src/fvom.F90 @@ -45,6 +45,7 @@ module fesom_main_storage_module integer :: n, from_nstep, offset, row, i, provided integer, pointer :: mype, npes, MPIerr, MPI_COMM_FESOM real(kind=WP) :: t0, t1, t2, t3, t4, t5, t6, t7, t8, t0_ice, t1_ice, t0_frc, t1_frc + real(kind=WP) :: rtime_fullice, rtime_write_restart, rtime_write_means, rtime_compute_diag, rtime_read_forcing real(kind=real32) :: rtime_setup_mesh, rtime_setup_ocean, rtime_setup_forcing real(kind=real32) :: rtime_setup_ice, rtime_setup_other, rtime_setup_restart real(kind=real32) :: mean_rtime(15), max_rtime(15), min_rtime(15) @@ -238,6 +239,13 @@ subroutine fesom_init(fesom_total_nsteps) ! ! if (f%mype==10) write(,) f%mesh1%ssh_stiff%values-f%mesh%ssh_stiff%value + ! Initialize timers + f%rtime_fullice = 0._WP + f%rtime_write_restart = 0._WP + f%rtime_write_means = 0._WP + f%rtime_compute_diag = 0._WP + f%rtime_read_forcing = 0._WP + f%from_nstep = 1 end subroutine @@ -248,19 +256,11 @@ subroutine fesom_runloop(current_nsteps) ! EO parameters integer n - real(kind=WP) :: rtime_fullice, rtime_write_restart, rtime_write_means, rtime_compute_diag, rtime_read_forcing !===================== ! Time stepping !===================== -! Initialize timers - rtime_fullice = 0._WP - rtime_write_restart = 0._WP - rtime_write_means = 0._WP - rtime_compute_diag = 0._WP - rtime_read_forcing = 0._WP - if (f%mype==0) write(*,*) 'FESOM start iteration before the barrier...' call MPI_Barrier(f%MPI_COMM_FESOM, f%MPIERR) @@ -342,11 +342,11 @@ subroutine fesom_runloop(current_nsteps) call restart(n, .false., .false., f%tracers, f%partit, f%mesh) f%t6 = MPI_Wtime() - rtime_fullice = rtime_fullice + f%t2 - f%t1 - rtime_compute_diag = rtime_compute_diag + f%t4 - f%t3 - rtime_write_means = rtime_write_means + f%t5 - f%t4 - rtime_write_restart = rtime_write_restart + f%t6 - f%t5 - rtime_read_forcing = rtime_read_forcing + f%t1_frc - f%t0_frc + f%rtime_fullice = f%rtime_fullice + f%t2 - f%t1 + f%rtime_compute_diag = f%rtime_compute_diag + f%t4 - f%t3 + f%rtime_write_means = f%rtime_write_means + f%t5 - f%t4 + f%rtime_write_restart = f%rtime_write_restart + f%t6 - f%t5 + f%rtime_read_forcing = f%rtime_read_forcing + f%t1_frc - f%t0_frc end do f%from_nstep = f%from_nstep+current_nsteps From 6905b9ea09f2a879fd831e854e225e530d7eb8f9 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 3 Nov 2021 14:35:21 +0100 Subject: [PATCH 518/909] move FESOM finalization code to separate subroutine --- src/fvom.F90 | 64 +++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 63 insertions(+), 1 deletion(-) diff --git a/src/fvom.F90 b/src/fvom.F90 index 02fd57700..4ab323df9 100755 --- a/src/fvom.F90 +++ b/src/fvom.F90 @@ -48,7 +48,6 @@ module fesom_main_storage_module real(kind=WP) :: rtime_fullice, rtime_write_restart, rtime_write_means, rtime_compute_diag, rtime_read_forcing real(kind=real32) :: rtime_setup_mesh, rtime_setup_ocean, rtime_setup_forcing real(kind=real32) :: rtime_setup_ice, rtime_setup_other, rtime_setup_restart - real(kind=real32) :: mean_rtime(15), max_rtime(15), min_rtime(15) real(kind=real32) :: runtime_alltimesteps @@ -354,7 +353,70 @@ subroutine fesom_runloop(current_nsteps) subroutine fesom_finalize() + use fesom_main_storage_module + ! EO parameters + real(kind=real32) :: mean_rtime(15), max_rtime(15), min_rtime(15) + + call finalize_output() + + !___FINISH MODEL RUN________________________________________________________ + + call MPI_Barrier(f%MPI_COMM_FESOM, f%MPIERR) + if (f%mype==0) then + f%t1 = MPI_Wtime() + f%runtime_alltimesteps = real(f%t1-f%t0,real32) + write(*,*) 'FESOM Run is finished, updating clock' + endif + + mean_rtime(1) = rtime_oce + mean_rtime(2) = rtime_oce_mixpres + mean_rtime(3) = rtime_oce_dyn + mean_rtime(4) = rtime_oce_dynssh + mean_rtime(5) = rtime_oce_solvessh + mean_rtime(6) = rtime_oce_GMRedi + mean_rtime(7) = rtime_oce_solvetra + mean_rtime(8) = rtime_ice + mean_rtime(9) = rtime_tot + mean_rtime(10) = f%rtime_fullice - f%rtime_read_forcing + mean_rtime(11) = f%rtime_compute_diag + mean_rtime(12) = f%rtime_write_means + mean_rtime(13) = f%rtime_write_restart + mean_rtime(14) = f%rtime_read_forcing + + max_rtime(1:14) = mean_rtime(1:14) + min_rtime(1:14) = mean_rtime(1:14) + call MPI_AllREDUCE(MPI_IN_PLACE, mean_rtime, 14, MPI_REAL, MPI_SUM, f%MPI_COMM_FESOM, f%MPIerr) + mean_rtime(1:14) = mean_rtime(1:14) / real(f%npes,real32) + call MPI_AllREDUCE(MPI_IN_PLACE, max_rtime, 14, MPI_REAL, MPI_MAX, f%MPI_COMM_FESOM, f%MPIerr) + call MPI_AllREDUCE(MPI_IN_PLACE, min_rtime, 14, MPI_REAL, MPI_MIN, f%MPI_COMM_FESOM, f%MPIerr) + + if (f%mype==0) then + write(*,*) '___MODEL RUNTIME mean, min, max per task [seconds]________________________' + write(*,*) ' runtime ocean:',mean_rtime(1), min_rtime(1), max_rtime(1) + write(*,*) ' > runtime oce. mix,pres. :',mean_rtime(2), min_rtime(2), max_rtime(2) + write(*,*) ' > runtime oce. dyn. u,v,w:',mean_rtime(3), min_rtime(3), max_rtime(3) + write(*,*) ' > runtime oce. dyn. ssh :',mean_rtime(4), min_rtime(4), max_rtime(4) + write(*,*) ' > runtime oce. solve ssh:',mean_rtime(5), min_rtime(5), max_rtime(5) + write(*,*) ' > runtime oce. GM/Redi :',mean_rtime(6), min_rtime(6), max_rtime(6) + write(*,*) ' > runtime oce. tracer :',mean_rtime(7), min_rtime(7), max_rtime(7) + write(*,*) ' runtime ice :',mean_rtime(10), min_rtime(10), max_rtime(10) + write(*,*) ' > runtime ice step :',mean_rtime(8), min_rtime(8), max_rtime(8) + write(*,*) ' runtime diag: ', mean_rtime(11), min_rtime(11), max_rtime(11) + write(*,*) ' runtime output: ', mean_rtime(12), min_rtime(12), max_rtime(12) + write(*,*) ' runtime restart:', mean_rtime(13), min_rtime(13), max_rtime(13) + write(*,*) ' runtime forcing:', mean_rtime(14), min_rtime(14), max_rtime(14) + write(*,*) ' runtime total (ice+oce):',mean_rtime(9), min_rtime(9), max_rtime(9) + write(*,*) + write(*,*) '============================================' + write(*,*) '=========== BENCHMARK RUNTIME ==============' + write(*,*) ' Number of cores : ',f%npes + write(*,*) ' Runtime for all timesteps : ',f%runtime_alltimesteps,' sec' + write(*,*) '============================================' + write(*,*) + end if +! call clock_finish + call par_ex(f%partit%MPI_COMM_FESOM, f%partit%mype) end subroutine end module From dd93bafad2761eb7d556a736a611f0c9e785b9f5 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 3 Nov 2021 16:17:17 +0100 Subject: [PATCH 519/909] use 3-part FESOM main program --- src/fvom_main.F90 | 391 +--------------------------------------------- 1 file changed, 6 insertions(+), 385 deletions(-) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 index 1dc4a6abb..a48953ed2 100755 --- a/src/fvom_main.F90 +++ b/src/fvom_main.F90 @@ -7,391 +7,12 @@ !=============================================================================! program main -USE MOD_MESH -USE MOD_TRACER -USE MOD_PARTIT -USE MOD_PARSUP -USE MOD_DYN -USE o_ARRAYS -USE o_PARAM -USE i_PARAM -use i_ARRAYS -use g_clock -use g_config -use g_comm_auto -use g_forcing_arrays -use io_RESTART -use io_MEANDATA -use io_mesh_info -use diagnostics -use mo_tidal -use dynamics_init_interface -use tracer_init_interface -use ocean_setup_interface -use ice_setup_interface -use ocean2ice_interface -use oce_fluxes_interface -use update_atm_forcing_interface -use before_oce_step_interface -use oce_timestep_ale_interface -use read_mesh_interface -use fesom_version_info_module -use command_line_options_module -! Define icepack module -#if defined (__icepack) -use icedrv_main, only: set_icepack, init_icepack, alloc_icepack -#endif + use fvom_module -#if defined (__oasis) -use cpl_driver -#endif + integer nsteps -IMPLICIT NONE + call fesom_init(nsteps) + call fesom_runloop(nsteps) + call fesom_finalize -integer :: n, nsteps, offset, row, i, provided -integer, pointer :: mype, npes, MPIerr, MPI_COMM_FESOM -real(kind=WP) :: t0, t1, t2, t3, t4, t5, t6, t7, t8, t0_ice, t1_ice, t0_frc, t1_frc -real(kind=WP) :: rtime_fullice, rtime_write_restart, rtime_write_means, rtime_compute_diag, rtime_read_forcing -real(kind=real32) :: rtime_setup_mesh, rtime_setup_ocean, rtime_setup_forcing -real(kind=real32) :: rtime_setup_ice, rtime_setup_other, rtime_setup_restart -real(kind=real32) :: mean_rtime(15), max_rtime(15), min_rtime(15) -real(kind=real32) :: runtime_alltimesteps - - -type(t_mesh) , target, save :: mesh -type(t_tracer), target, save :: tracers -type(t_partit), target, save :: partit -type(t_dyn) , target, save :: dynamics - - -character(LEN=256) :: dump_dir, dump_filename -logical :: L_EXISTS -type(t_mesh), target, save :: mesh_copy -type(t_tracer), target, save :: tracers_copy - -character(LEN=MPI_MAX_LIBRARY_VERSION_STRING) :: mpi_version_txt -integer mpi_version_len - - - if(command_argument_count() > 0) then - call command_line_options%parse() - stop - end if - -#ifndef __oifs - !ECHAM6-FESOM2 coupling: cpl_oasis3mct_init is called here in order to avoid circular dependencies between modules (cpl_driver and g_PARSUP) - !OIFS-FESOM2 coupling: does not require MPI_INIT here as this is done by OASIS - call MPI_INIT_THREAD(MPI_THREAD_MULTIPLE, provided, i) -#endif - - -#if defined (__oasis) - call cpl_oasis3mct_init(partit%MPI_COMM_FESOM) -#endif - t1 = MPI_Wtime() - - call par_init(partit) - - mype =>partit%mype - MPIerr =>partit%MPIerr - MPI_COMM_FESOM=>partit%MPI_COMM_FESOM - npes =>partit%npes - if(mype==0) then - write(*,*) - print *,"FESOM2 git SHA: "//fesom_git_sha() - call MPI_Get_library_version(mpi_version_txt, mpi_version_len, MPIERR) - print *,"MPI library version: "//trim(mpi_version_txt) - print *, achar(27)//'[32m' //'____________________________________________________________'//achar(27)//'[0m' - print *, achar(27)//'[7;32m'//' --> FESOM BUILDS UP MODEL CONFIGURATION '//achar(27)//'[0m' - end if - !===================== - ! Read configuration data, - ! load the mesh and fill in - ! auxiliary mesh arrays - !===================== - call setup_model(partit) ! Read Namelists, always before clock_init - call clock_init(partit) ! read the clock file - call get_run_steps(nsteps, partit) - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call mesh_setup'//achar(27)//'[0m' - call mesh_setup(partit, mesh) - - if (mype==0) write(*,*) 'FESOM mesh_setup... complete' - - !===================== - ! Allocate field variables - ! and additional arrays needed for - ! fancy advection etc. - !===================== - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call check_mesh_consistency'//achar(27)//'[0m' - call check_mesh_consistency(partit, mesh) - if (mype==0) t2=MPI_Wtime() - - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call xxxx_init'//achar(27)//'[0m' - call dynamics_init(dynamics, partit, mesh) - call tracer_init(tracers, partit, mesh) ! allocate array of ocean tracers (derived type "t_tracer") - call arrays_init(tracers%num_tracers, partit, mesh) ! allocate other arrays (to be refactured same as tracers in the future) - - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call ocean_setup'//achar(27)//'[0m' - call ocean_setup(dynamics, tracers, partit, mesh) - - if (mype==0) then - write(*,*) 'FESOM ocean_setup... complete' - t3=MPI_Wtime() - endif - call forcing_setup(partit, mesh) - - if (mype==0) t4=MPI_Wtime() - if (use_ice) then - call ice_setup(tracers, partit, mesh) - ice_steps_since_upd = ice_ave_steps-1 - ice_update=.true. - if (mype==0) write(*,*) 'EVP scheme option=', whichEVP - endif - if (mype==0) t5=MPI_Wtime() - call compute_diagnostics(0, dynamics, tracers, partit, mesh) ! allocate arrays for diagnostic -#if defined (__oasis) - call cpl_oasis3mct_define_unstr(partit, mesh) - if(mype==0) write(*,*) 'FESOM ----> cpl_oasis3mct_define_unstr nsend, nrecv:',nsend, nrecv -#endif - -#if defined (__icepack) - !===================== - ! Setup icepack - !===================== - if (mype==0) write(*,*) 'Icepack: reading namelists from namelist.icepack' - call set_icepack(partit) - call alloc_icepack - call init_icepack(tracers%data(1), mesh) - if (mype==0) write(*,*) 'Icepack: setup complete' -#endif - call clock_newyear ! check if it is a new year - if (mype==0) t6=MPI_Wtime() - !___CREATE NEW RESTART FILE IF APPLICABLE___________________________________ - ! The interface to the restart module is made via call restart ! - ! The inputs are: istep, l_write, l_create - ! if istep is not zero it will be decided whether restart shall be written - ! if l_write is TRUE the restart will be forced - ! if l_read the restart will be read - ! as an example, for reading restart one does: call restart(0, .false., .false., .true., tracers, partit, mesh) - call restart(0, .false., r_restart, dynamics, tracers, partit, mesh) ! istep, l_write, l_read - if (mype==0) t7=MPI_Wtime() - ! store grid information into netcdf file - if (.not. r_restart) call write_mesh_info(partit, mesh) - - !___IF RESTART WITH ZLEVEL OR ZSTAR IS DONE, ALSO THE ACTUAL LEVELS AND ____ - !___MIDDEPTH LEVELS NEEDS TO BE CALCULATET AT RESTART_______________________ - if (r_restart) then - call restart_thickness_ale(partit, mesh) - end if - if (mype==0) then - t8=MPI_Wtime() - - rtime_setup_mesh = real( t2 - t1 ,real32) - rtime_setup_ocean = real( t3 - t2 ,real32) - rtime_setup_forcing = real( t4 - t3 ,real32) - rtime_setup_ice = real( t5 - t4 ,real32) - rtime_setup_restart = real( t7 - t6 ,real32) - rtime_setup_other = real((t8 - t7) + (t6 - t5) ,real32) - - write(*,*) '==========================================' - write(*,*) 'MODEL SETUP took on mype=0 [seconds] ' - write(*,*) 'runtime setup total ',real(t8-t1,real32) - write(*,*) ' > runtime setup mesh ',rtime_setup_mesh - write(*,*) ' > runtime setup ocean ',rtime_setup_ocean - write(*,*) ' > runtime setup forcing ',rtime_setup_forcing - write(*,*) ' > runtime setup ice ',rtime_setup_ice - write(*,*) ' > runtime setup restart ',rtime_setup_restart - write(*,*) ' > runtime setup other ',rtime_setup_other - write(*,*) '============================================' - endif - - DUMP_DIR='DUMP/' - INQUIRE(file=trim(dump_dir), EXIST=L_EXISTS) - if (.not. L_EXISTS) call system('mkdir '//trim(dump_dir)) - - write (dump_filename, "(A7,I7.7)") "t_mesh.", mype - open (mype+300, file=TRIM(DUMP_DIR)//trim(dump_filename), status='replace', form="unformatted") - write (mype+300) mesh - close (mype+300) - -! open (mype+300, file=trim(dump_filename), status='old', form="unformatted") -! read (mype+300) mesh_copy -! close (mype+300) - - write (dump_filename, "(A9,I7.7)") "t_tracer.", mype - open (mype+300, file=TRIM(DUMP_DIR)//trim(dump_filename), status='replace', form="unformatted") - write (mype+300) tracers - close (mype+300) - -! open (mype+300, file=trim(dump_filename), status='old', form="unformatted") -! read (mype+300) tracers_copy -! close (mype+300) - -!call par_ex(partit%MPI_COMM_FESOM, partit%mype) -!stop -! -! if (mype==10) write(,) mesh1%ssh_stiff%values-mesh%ssh_stiff%value - - !===================== - ! Time stepping - !===================== - -! Initialize timers - rtime_fullice = 0._WP - rtime_write_restart = 0._WP - rtime_write_means = 0._WP - rtime_compute_diag = 0._WP - rtime_read_forcing = 0._WP - - if (mype==0) write(*,*) 'FESOM start iteration before the barrier...' - call MPI_Barrier(MPI_COMM_FESOM, MPIERR) - - if (mype==0) then - write(*,*) 'FESOM start iteration after the barrier...' - t0 = MPI_Wtime() - endif - if(mype==0) then - write(*,*) - print *, achar(27)//'[32m' //'____________________________________________________________'//achar(27)//'[0m' - print *, achar(27)//'[7;32m'//' --> FESOM STARTS TIME LOOP '//achar(27)//'[0m' - end if - !___MODEL TIME STEPPING LOOP________________________________________________ - if (use_global_tides) then - call foreph_ini(yearnew, month, partit) - end if - do n=1, nsteps - if (use_global_tides) then - call foreph(partit, mesh) - end if - mstep = n - if (mod(n,logfile_outfreq)==0 .and. mype==0) then - write(*,*) 'FESOM =======================================================' -! write(*,*) 'FESOM step:',n,' day:', n*dt/24./3600., - write(*,*) 'FESOM step:',n,' day:', daynew,' year:',yearnew - write(*,*) - end if -#if defined (__oifs) || defined (__oasis) - seconds_til_now=INT(dt)*(n-1) -#endif - call clock - !___compute horizontal velocity on nodes (originaly on elements)________ - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call compute_vel_nodes'//achar(27)//'[0m' - call compute_vel_nodes(dynamics, partit, mesh) - - !___model sea-ice step__________________________________________________ - t1 = MPI_Wtime() - if(use_ice) then - !___compute fluxes from ocean to ice________________________________ - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call ocean2ice(n)'//achar(27)//'[0m' - call ocean2ice(dynamics, tracers, partit, mesh) - - !___compute update of atmospheric forcing____________________________ - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call update_atm_forcing(n)'//achar(27)//'[0m' - t0_frc = MPI_Wtime() - call update_atm_forcing(n, tracers, partit, mesh) - t1_frc = MPI_Wtime() - !___compute ice step________________________________________________ - if (ice_steps_since_upd>=ice_ave_steps-1) then - ice_update=.true. - ice_steps_since_upd = 0 - else - ice_update=.false. - ice_steps_since_upd=ice_steps_since_upd+1 - endif - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call ice_timestep(n)'//achar(27)//'[0m' - if (ice_update) call ice_timestep(n, partit, mesh) - !___compute fluxes to the ocean: heat, freshwater, momentum_________ - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call oce_fluxes_mom...'//achar(27)//'[0m' - call oce_fluxes_mom(dynamics, partit, mesh) ! momentum only - call oce_fluxes(tracers, partit, mesh) - end if - call before_oce_step(dynamics, tracers, partit, mesh) ! prepare the things if required - t2 = MPI_Wtime() - !___model ocean step____________________________________________________ - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call oce_timestep_ale'//achar(27)//'[0m' - - call oce_timestep_ale(n, dynamics, tracers, partit, mesh) - - t3 = MPI_Wtime() - !___compute energy diagnostics..._______________________________________ - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call compute_diagnostics(1)'//achar(27)//'[0m' - call compute_diagnostics(1, dynamics, tracers, partit, mesh) - - t4 = MPI_Wtime() - !___prepare output______________________________________________________ - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call output (n)'//achar(27)//'[0m' - call output (n, dynamics, tracers, partit, mesh) - - t5 = MPI_Wtime() - call restart(n, .false., .false., dynamics, tracers, partit, mesh) - t6 = MPI_Wtime() - - rtime_fullice = rtime_fullice + t2 - t1 - rtime_compute_diag = rtime_compute_diag + t4 - t3 - rtime_write_means = rtime_write_means + t5 - t4 - rtime_write_restart = rtime_write_restart + t6 - t5 - rtime_read_forcing = rtime_read_forcing + t1_frc - t0_frc - end do - - call finalize_output() - - !___FINISH MODEL RUN________________________________________________________ - - call MPI_Barrier(MPI_COMM_FESOM, MPIERR) - if (mype==0) then - t1 = MPI_Wtime() - runtime_alltimesteps = real(t1-t0,real32) - write(*,*) 'FESOM Run is finished, updating clock' - endif - - mean_rtime(1) = rtime_oce - mean_rtime(2) = rtime_oce_mixpres - mean_rtime(3) = rtime_oce_dyn - mean_rtime(4) = rtime_oce_dynssh - mean_rtime(5) = rtime_oce_solvessh - mean_rtime(6) = rtime_oce_GMRedi - mean_rtime(7) = rtime_oce_solvetra - mean_rtime(8) = rtime_ice - mean_rtime(9) = rtime_tot - mean_rtime(10) = rtime_fullice - rtime_read_forcing - mean_rtime(11) = rtime_compute_diag - mean_rtime(12) = rtime_write_means - mean_rtime(13) = rtime_write_restart - mean_rtime(14) = rtime_read_forcing - - max_rtime(1:14) = mean_rtime(1:14) - min_rtime(1:14) = mean_rtime(1:14) - - call MPI_AllREDUCE(MPI_IN_PLACE, mean_rtime, 14, MPI_REAL, MPI_SUM, MPI_COMM_FESOM, MPIerr) - mean_rtime(1:14) = mean_rtime(1:14) / real(npes,real32) - call MPI_AllREDUCE(MPI_IN_PLACE, max_rtime, 14, MPI_REAL, MPI_MAX, MPI_COMM_FESOM, MPIerr) - call MPI_AllREDUCE(MPI_IN_PLACE, min_rtime, 14, MPI_REAL, MPI_MIN, MPI_COMM_FESOM, MPIerr) - - if (mype==0) then - write(*,*) '___MODEL RUNTIME mean, min, max per task [seconds]________________________' - write(*,*) ' runtime ocean:',mean_rtime(1), min_rtime(1), max_rtime(1) - write(*,*) ' > runtime oce. mix,pres. :',mean_rtime(2), min_rtime(2), max_rtime(2) - write(*,*) ' > runtime oce. dyn. u,v,w:',mean_rtime(3), min_rtime(3), max_rtime(3) - write(*,*) ' > runtime oce. dyn. ssh :',mean_rtime(4), min_rtime(4), max_rtime(4) - write(*,*) ' > runtime oce. solve ssh:',mean_rtime(5), min_rtime(5), max_rtime(5) - write(*,*) ' > runtime oce. GM/Redi :',mean_rtime(6), min_rtime(6), max_rtime(6) - write(*,*) ' > runtime oce. tracer :',mean_rtime(7), min_rtime(7), max_rtime(7) - write(*,*) ' runtime ice :',mean_rtime(10), min_rtime(10), max_rtime(10) - write(*,*) ' > runtime ice step :',mean_rtime(8), min_rtime(8), max_rtime(8) - write(*,*) ' runtime diag: ', mean_rtime(11), min_rtime(11), max_rtime(11) - write(*,*) ' runtime output: ', mean_rtime(12), min_rtime(12), max_rtime(12) - write(*,*) ' runtime restart:', mean_rtime(13), min_rtime(13), max_rtime(13) - write(*,*) ' runtime forcing:', mean_rtime(14), min_rtime(14), max_rtime(14) - write(*,*) ' runtime total (ice+oce):',mean_rtime(9), min_rtime(9), max_rtime(9) - write(*,*) - write(*,*) '============================================' - write(*,*) '=========== BENCHMARK RUNTIME ==============' - write(*,*) ' Number of cores : ',npes - write(*,*) ' Runtime for all timesteps : ',runtime_alltimesteps,' sec' - write(*,*) '============================================' - write(*,*) - end if -! call clock_finish - call par_ex(partit%MPI_COMM_FESOM, partit%mype) -end program main - +end program From 7676181761824eb50ee590d5b753551fe0fefc6e Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 8 Nov 2021 11:08:24 +0100 Subject: [PATCH 520/909] reshaped structure of oce_ale_tracers calls before making OpenMP directives --- src/oce_ale_tracer.F90 | 43 ++++++------- src/oce_tracer_mod.F90 | 139 +++++++++++++++++++---------------------- 2 files changed, 83 insertions(+), 99 deletions(-) diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 6a941cf91..a029b1342 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -148,9 +148,8 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) use g_comm_auto use o_tracers use Toy_Channel_Soufflet - use adv_tracers_ale_interface use diff_tracers_ale_interface - + use oce_adv_tra_driver_interfaces implicit none type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracers @@ -180,9 +179,13 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) ! 1. bolus velocities are computed according to GM implementation after R. Ferrari et al., 2010 ! 2. bolus velocities are used only for advecting tracers and shall be subtracted back afterwards if (Fer_GM) then - UV =UV +fer_UV - Wvel_e=Wvel_e+fer_Wvel - Wvel =Wvel +fer_Wvel + do elem=1, myDim_elem2D+eDim_elem2D + UV(:, :, elem) =UV(:, :, elem) + fer_UV(:, :, elem) + end do + do node=1, myDim_nod2D+eDim_nod2D + Wvel_e(:, node)=Wvel_e(:, node)+fer_Wvel(:, node) + Wvel (:, node)=Wvel (:, node)+fer_Wvel(:, node) + end do end if !___________________________________________________________________________ ! loop over all tracers @@ -215,10 +218,14 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) !___________________________________________________________________________ ! subtract the the bolus velocities back from 3D velocities: if (Fer_GM) then - UV =UV -fer_UV - Wvel_e=Wvel_e-fer_Wvel - Wvel =Wvel -fer_Wvel - end if + do elem=1, myDim_elem2D+eDim_elem2D + UV(:, :, elem) =UV(:, :, elem) - fer_UV(:, :, elem) + end do + do node=1, myDim_nod2D+eDim_nod2D + Wvel_e(:, node)=Wvel_e(:, node)-fer_Wvel(:, node) + Wvel (:, node)=Wvel (:, node)-fer_Wvel(:, node) + end do + end if !___________________________________________________________________________ ! to avoid crash with high salinities when coupled to atmosphere ! --> if we do only where (tr_arr(:,:,2) < 3._WP ) we also fill up the bottom @@ -326,35 +333,23 @@ subroutine diff_tracers_ale(tr_num, dynamics, tracers, partit, mesh) del_ttf => tracers%work%del_ttf !___________________________________________________________________________ - ! convert tr_arr_old(:,:,tr_num)=ttr_n-0.5 --> prepare to calc ttr_n+0.5 - ! eliminate AB (adams bashfort) interpolates tracer, which is only needed for - ! tracer advection. For diffusion only need tracer from previouse time step - tracers%data(tr_num)%valuesAB(:,:)=tracers%data(tr_num)%values(:,:) !DS: check that this is the right place! - !___________________________________________________________________________ ! do horizontal diffusiion ! write there also horizontal diffusion rhs to del_ttf which is equal the R_T^n ! in danilovs srcipt ! includes Redi diffusivity if Redi=.true. call diff_part_hor_redi(tr_num, tracers, partit, mesh) ! seems to be ~9% faster than diff_part_hor !___________________________________________________________________________ - ! do vertical diffusion: explicite + ! do vertical diffusion: explicit if (.not. tracers%i_vert_diff) call diff_ver_part_expl_ale(tr_num, tracers, partit, mesh) ! A projection of horizontal Redi diffussivity onto vertical. This par contains horizontal ! derivatives and has to be computed explicitly! - if (Redi) call diff_ver_part_redi_expl(tr_num, tracers, partit, mesh) - + if (Redi) call diff_ver_part_redi_expl(tr_num, tracers, partit, mesh) !___________________________________________________________________________ - ! Update tracers --> calculate T* see Danilov etal "FESOM2 from finite elements - ! to finite volume" + ! Update tracers --> calculate T* see Danilov et al. (2017) ! T* = (dt*R_T^n + h^(n-0.5)*T^(n-0.5))/h^(n+0.5) do n=1, myDim_nod2D nzmax=nlevels_nod2D(n)-1 nzmin=ulevels_nod2D(n) - !!PS del_ttf(1:nzmax,n)=del_ttf(1:nzmax,n)+tr_arr(1:nzmax,n,tr_num)* & - !!PS (hnode(1:nzmax,n)-hnode_new(1:nzmax,n)) - !!PS tr_arr(1:nzmax,n,tr_num)=tr_arr(1:nzmax,n,tr_num)+ & - !!PS del_ttf(1:nzmax,n)/hnode_new(1:nzmax,n) - del_ttf(nzmin:nzmax,n)=del_ttf(nzmin:nzmax,n)+tracers%data(tr_num)%values(nzmin:nzmax,n)* & (hnode(nzmin:nzmax,n)-hnode_new(nzmin:nzmax,n)) tracers%data(tr_num)%values(nzmin:nzmax,n)=tracers%data(tr_num)%values(nzmin:nzmax,n)+ & diff --git a/src/oce_tracer_mod.F90 b/src/oce_tracer_mod.F90 index 5b0528724..0334328b4 100755 --- a/src/oce_tracer_mod.F90 +++ b/src/oce_tracer_mod.F90 @@ -6,20 +6,50 @@ MODULE o_tracers USE MOD_PARSUP IMPLICIT NONE -interface - subroutine tracer_gradient_z(ttf, partit, mesh) +CONTAINS +! +! +!=============================================================================== +SUBROUTINE init_tracers_AB(tr_num, tracers, partit, mesh) USE MOD_MESH - USE MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP + USE MOD_TRACER + use g_config, only: flag_debug + use o_arrays + use g_comm_auto IMPLICIT NONE + integer, intent(in) :: tr_num type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - real(kind=WP) :: ttf(mesh%nl-1,partit%myDim_nod2D+partit%eDim_nod2D) - end subroutine -end interface + type(t_tracer), intent(inout), target :: tracers + integer :: n,nz -CONTAINS + do n=1, partit%myDim_nod2D+partit%eDim_nod2D + ! del_ttf will contain all advection / diffusion contributions for this tracer. Set it to 0 at the beginning! + tracers%work%del_ttf(:, n) = 0.0_WP + ! AB interpolation + tracers%data(tr_num)%valuesAB(:, n)=-(0.5_WP+epsilon)*tracers%data(tr_num)%valuesAB(:, n)+(1.5_WP+epsilon)*tracers%data(tr_num)%values(:, n) + end do + + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_elements'//achar(27)//'[0m' + call tracer_gradient_elements(tracers%data(tr_num)%valuesAB, partit, mesh) + call exchange_elem_begin(tr_xy, partit) + + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_z'//achar(27)//'[0m' + call tracer_gradient_z(tracers%data(tr_num)%values, partit, mesh) !WHY NOT AB HERE? DSIDOREN! + call exchange_elem_end(partit) ! tr_xy used in fill_up_dn_grad + call exchange_nod_begin(tr_z, partit) ! not used in fill_up_dn_grad + + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call fill_up_dn_grad'//achar(27)//'[0m' + call fill_up_dn_grad(tracers%work, partit, mesh) + call exchange_nod_end(partit) ! tr_z halos should have arrived by now. + + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_elements'//achar(27)//'[0m' + call tracer_gradient_elements(tracers%data(tr_num)%values, partit, mesh) !redefine tr_arr to the current timestep + call exchange_elem(tr_xy, partit) + +END SUBROUTINE init_tracers_AB ! ! !======================================================================= @@ -58,44 +88,42 @@ END SUBROUTINE tracer_gradient_elements ! ! !======================================================================================== -SUBROUTINE init_tracers_AB(tr_num, tracers, partit, mesh) +SUBROUTINE tracer_gradient_z(ttf, partit, mesh) + !computes vertical gradient of tracer USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP USE MOD_TRACER - use g_config, only: flag_debug - use o_arrays - use g_comm_auto + USE o_PARAM + USE o_ARRAYS + USE g_CONFIG IMPLICIT NONE - integer, intent(in) :: tr_num type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(inout), target :: tracers - integer :: n,nz - !filling work arrays - tracers%work%del_ttf=0.0_WP - - !AB interpolation - tracers%data(tr_num)%valuesAB(:,:)=-(0.5_WP+epsilon)*tracers%data(tr_num)%valuesAB(:,:)+(1.5_WP+epsilon)*tracers%data(tr_num)%values(:,:) - - if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_elements'//achar(27)//'[0m' - call tracer_gradient_elements(tracers%data(tr_num)%valuesAB, partit, mesh) - call exchange_elem_begin(tr_xy, partit) - - if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_z'//achar(27)//'[0m' - call tracer_gradient_z(tracers%data(tr_num)%values, partit, mesh) !WHY NOT AB HERE? DSIDOREN! - call exchange_elem_end(partit) ! tr_xy used in fill_up_dn_grad - call exchange_nod_begin(tr_z, partit) ! not used in fill_up_dn_grad - - if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call fill_up_dn_grad'//achar(27)//'[0m' - call fill_up_dn_grad(tracers%work, partit, mesh) - call exchange_nod_end(partit) ! tr_z halos should have arrived by now. + real(kind=WP) :: ttf(mesh%nl-1,partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP) :: dz + integer :: n, nz, nzmin, nzmax - if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_elements'//achar(27)//'[0m' - call tracer_gradient_elements(tracers%data(tr_num)%values, partit, mesh) !redefine tr_arr to the current timestep - call exchange_elem(tr_xy, partit) +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" -END SUBROUTINE init_tracers_AB + DO n=1, myDim_nod2D+eDim_nod2D + !!PS nlev=nlevels_nod2D(n) + nzmax=nlevels_nod2D(n) + nzmin=ulevels_nod2D(n) + !!PS DO nz=2, nlev-1 + DO nz=nzmin+1, nzmax-1 + dz=0.5_WP*(hnode_new(nz-1,n)+hnode_new(nz,n)) + tr_z(nz, n)=(ttf(nz-1,n)-ttf(nz,n))/dz + END DO + !!PS tr_z(1, n)=0.0_WP + !!PS tr_z(nlev, n)=0.0_WP + tr_z(nzmin, n)=0.0_WP + tr_z(nzmax, n)=0.0_WP + END DO +END SUBROUTINE tracer_gradient_z ! ! !======================================================================================== @@ -141,42 +169,3 @@ SUBROUTINE relax_to_clim(tr_num, tracers, partit, mesh) END IF END SUBROUTINE relax_to_clim END MODULE o_tracers -! -! -!======================================================================================== -SUBROUTINE tracer_gradient_z(ttf, partit, mesh) - !computes vertical gradient of tracer - USE MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_TRACER - USE o_PARAM - USE o_ARRAYS - USE g_CONFIG - IMPLICIT NONE - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - real(kind=WP) :: ttf(mesh%nl-1,partit%myDim_nod2D+partit%eDim_nod2D) - real(kind=WP) :: dz - integer :: n, nz, nzmin, nzmax - -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" - - DO n=1, myDim_nod2D+eDim_nod2D - !!PS nlev=nlevels_nod2D(n) - nzmax=nlevels_nod2D(n) - nzmin=ulevels_nod2D(n) - !!PS DO nz=2, nlev-1 - DO nz=nzmin+1, nzmax-1 - dz=0.5_WP*(hnode_new(nz-1,n)+hnode_new(nz,n)) - tr_z(nz, n)=(ttf(nz-1,n)-ttf(nz,n))/dz - END DO - !!PS tr_z(1, n)=0.0_WP - !!PS tr_z(nlev, n)=0.0_WP - tr_z(nzmin, n)=0.0_WP - tr_z(nzmax, n)=0.0_WP - END DO -END SUBROUTINE tracer_gradient_z From f6b288152383f33525b1dd94f73f6940c5826013 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 8 Nov 2021 11:22:21 +0100 Subject: [PATCH 521/909] forgot to set del_ttf_advhoriz and del_ttf_advvert to zero before doing tracer advection --- src/oce_tracer_mod.F90 | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/oce_tracer_mod.F90 b/src/oce_tracer_mod.F90 index 0334328b4..da2d7baa7 100755 --- a/src/oce_tracer_mod.F90 +++ b/src/oce_tracer_mod.F90 @@ -27,7 +27,9 @@ SUBROUTINE init_tracers_AB(tr_num, tracers, partit, mesh) do n=1, partit%myDim_nod2D+partit%eDim_nod2D ! del_ttf will contain all advection / diffusion contributions for this tracer. Set it to 0 at the beginning! - tracers%work%del_ttf(:, n) = 0.0_WP + tracers%work%del_ttf (:, n) = 0.0_WP + tracers%work%del_ttf_advhoriz (:, n) = 0.0_WP + tracers%work%del_ttf_advvert (:, n) = 0.0_WP ! AB interpolation tracers%data(tr_num)%valuesAB(:, n)=-(0.5_WP+epsilon)*tracers%data(tr_num)%valuesAB(:, n)+(1.5_WP+epsilon)*tracers%data(tr_num)%values(:, n) end do From abb14e3e8b39ab8f95121824db55d88cb8690326 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 8 Nov 2021 11:55:30 +0100 Subject: [PATCH 522/909] as usually some bug fixes due to the lack of git knowledge :) --- src/oce_adv_tra_driver.F90 | 2 +- src/oce_ale_tracer.F90 | 15 ++++++++++++++- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/src/oce_adv_tra_driver.F90 b/src/oce_adv_tra_driver.F90 index b405c93cf..cf9725b24 100644 --- a/src/oce_adv_tra_driver.F90 +++ b/src/oce_adv_tra_driver.F90 @@ -114,7 +114,7 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, dynamics, tracers, partit, ! update the LO solution for horizontal contribution !$OMP PARALLEL DO do n=1, myDim_nod2D+eDim_nod2D - fct_LO(:,n)=0.0_WP + fct_LO(:,n) = 0.0_WP end do !$OMP END PARALLEL DO !$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(e, enodes, el, nl1, nu1, nl2, nu2, nz) diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index a029b1342..c335dae6f 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -196,7 +196,20 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) call init_tracers_AB(tr_num, tracers, partit, mesh) ! advect tracers if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call adv_tracers_ale'//achar(27)//'[0m' - call adv_tracers_ale(dt, tr_num, dynamics, tracers, partit, mesh) + ! it will update del_ttf with contributions from horizontal and vertical advection parts (del_ttf_advhoriz and del_ttf_advvert) + call do_oce_adv_tra(dt, UV, wvel, wvel_i, wvel_e, tr_num, tracers, partit, mesh) + !___________________________________________________________________________ + ! update array for total tracer flux del_ttf with the fluxes from horizontal + ! and vertical advection +!$OMP DO + do node=1, myDim_nod2d + tracers%work%del_ttf(:, node)=tracers%work%del_ttf(:, node)+tracers%work%del_ttf_advhoriz(:, node)+tracers%work%del_ttf_advvert(:, node) + end do +!$OMP END DO + !___________________________________________________________________________ + ! AB is not needed after the advection step. Initialize it with the current tracer before it is modified. + ! call init_tracers_AB at the beginning of this loop will compute AB for the next time step then. + tracers%data(tr_num)%valuesAB(:,:)=tracers%data(tr_num)%values(:,:) !DS: check that this is the right place! ! diffuse tracers if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call diff_tracers_ale'//achar(27)//'[0m' call diff_tracers_ale(tr_num, dynamics, tracers, partit, mesh) From cfc6ae4b7b51c5bbbd5554782db82099b10159c5 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 8 Nov 2021 12:00:26 +0100 Subject: [PATCH 523/909] let us update del_ttf due to advection terms in solve_tracers_ale directly to make it visible what happens --- src/oce_ale_tracer.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index c335dae6f..48991795b 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -201,11 +201,11 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) !___________________________________________________________________________ ! update array for total tracer flux del_ttf with the fluxes from horizontal ! and vertical advection -!$OMP DO +!$OMP PARALLEL DO do node=1, myDim_nod2d tracers%work%del_ttf(:, node)=tracers%work%del_ttf(:, node)+tracers%work%del_ttf_advhoriz(:, node)+tracers%work%del_ttf_advvert(:, node) end do -!$OMP END DO +!$OMP END PARALLEL DO !___________________________________________________________________________ ! AB is not needed after the advection step. Initialize it with the current tracer before it is modified. ! call init_tracers_AB at the beginning of this loop will compute AB for the next time step then. From fcfadb6ebd5993e8791cbc984c8043f71678b84c Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 8 Nov 2021 13:19:46 +0100 Subject: [PATCH 524/909] OpenMP in the main loop if solve_tracers_ale. the main job just starts :) --- src/oce_ale_tracer.F90 | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 48991795b..8865b2ebc 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -179,13 +179,17 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) ! 1. bolus velocities are computed according to GM implementation after R. Ferrari et al., 2010 ! 2. bolus velocities are used only for advecting tracers and shall be subtracted back afterwards if (Fer_GM) then +!$OMP PARALLEL DO do elem=1, myDim_elem2D+eDim_elem2D UV(:, :, elem) =UV(:, :, elem) + fer_UV(:, :, elem) end do +!$OMP END PARALLEL DO +!$OMP PARALLEL DO do node=1, myDim_nod2D+eDim_nod2D Wvel_e(:, node)=Wvel_e(:, node)+fer_Wvel(:, node) Wvel (:, node)=Wvel (:, node)+fer_Wvel(:, node) end do +!$OMP END PARALLEL DO end if !___________________________________________________________________________ ! loop over all tracers @@ -204,12 +208,12 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) !$OMP PARALLEL DO do node=1, myDim_nod2d tracers%work%del_ttf(:, node)=tracers%work%del_ttf(:, node)+tracers%work%del_ttf_advhoriz(:, node)+tracers%work%del_ttf_advvert(:, node) + !___________________________________________________________________________ + ! AB is not needed after the advection step. Initialize it with the current tracer before it is modified. + ! call init_tracers_AB at the beginning of this loop will compute AB for the next time step then. + tracers%data(tr_num)%valuesAB(:, node)=tracers%data(tr_num)%values(:, node) !DS: check that this is the right place! end do !$OMP END PARALLEL DO - !___________________________________________________________________________ - ! AB is not needed after the advection step. Initialize it with the current tracer before it is modified. - ! call init_tracers_AB at the beginning of this loop will compute AB for the next time step then. - tracers%data(tr_num)%valuesAB(:,:)=tracers%data(tr_num)%values(:,:) !DS: check that this is the right place! ! diffuse tracers if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call diff_tracers_ale'//achar(27)//'[0m' call diff_tracers_ale(tr_num, dynamics, tracers, partit, mesh) @@ -224,26 +228,32 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) call exchange_nod(tracers%data(tr_num)%values(:,:), partit) end do !___________________________________________________________________________ + ! 3D restoring for "passive" tracers + !!!$OMPTODO: add OpenMP later, not needed right now! do tr_num=1, ptracers_restore_total - tracers%data(ptracers_restore(tr_num)%locid)%values(:,ptracers_restore(tr_num)%ind2)=1.0_WP + tracers%data(ptracers_restore(tr_num)%locid)%values(:, ptracers_restore(tr_num)%ind2)=1.0_WP end do - !___________________________________________________________________________ ! subtract the the bolus velocities back from 3D velocities: if (Fer_GM) then +!$OMP PARALLEL DO do elem=1, myDim_elem2D+eDim_elem2D UV(:, :, elem) =UV(:, :, elem) - fer_UV(:, :, elem) end do +!$OMP END PARALLEL DO +!$OMP PARALLEL DO do node=1, myDim_nod2D+eDim_nod2D Wvel_e(:, node)=Wvel_e(:, node)-fer_Wvel(:, node) Wvel (:, node)=Wvel (:, node)-fer_Wvel(:, node) end do +!$OMP END PARALLEL DO end if !___________________________________________________________________________ ! to avoid crash with high salinities when coupled to atmosphere ! --> if we do only where (tr_arr(:,:,2) < 3._WP ) we also fill up the bottom ! topogrpahy with values which are then writte into the output --> thats why ! do node=1,.... and tr_arr(node,1:nzmax,2) +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, nzmin, nzmax) do node=1,myDim_nod2D+eDim_nod2D nzmax=nlevels_nod2D(node)-1 nzmin=ulevels_nod2D(node) @@ -255,6 +265,7 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) tracers%data(2)%values(nzmin:nzmax,node) = 3._WP end where end do +!$OMP END PARALLEL DO end subroutine solve_tracers_ale ! ! From cb096aae4c8d7532569bac0a36749cb647c5f873 Mon Sep 17 00:00:00 2001 From: a270042 Date: Mon, 8 Nov 2021 17:42:36 +0100 Subject: [PATCH 525/909] solve conflicts from merging with refactoring branch --- src/fvom.F90 | 54 +++++++++++++++------ src/oce_adv_tra_driver.F90 | 11 +++-- src/oce_ale_tracer.F90 | 96 +++++++++++--------------------------- 3 files changed, 73 insertions(+), 88 deletions(-) diff --git a/src/fvom.F90 b/src/fvom.F90 index 4ab323df9..82752d507 100755 --- a/src/fvom.F90 +++ b/src/fvom.F90 @@ -5,6 +5,7 @@ module fesom_main_storage_module USE MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP + USE MOD_DYN USE o_ARRAYS USE o_PARAM USE i_PARAM @@ -51,15 +52,17 @@ module fesom_main_storage_module real(kind=real32) :: runtime_alltimesteps - type(t_mesh) mesh + type(t_mesh) mesh type(t_tracer) tracers + type(t_dyn) dynamics type(t_partit) partit character(LEN=256) :: dump_dir, dump_filename logical :: L_EXISTS - type(t_mesh) mesh_copy + type(t_mesh) mesh_copy type(t_tracer) tracers_copy + type(t_dyn) dynamics_copy character(LEN=MPI_MAX_LIBRARY_VERSION_STRING) :: mpi_version_txt integer mpi_version_len @@ -124,6 +127,7 @@ subroutine fesom_init(fesom_total_nsteps) call setup_model(f%partit) ! Read Namelists, always before clock_init call clock_init(f%partit) ! read the clock file call get_run_steps(fesom_total_nsteps, f%partit) + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call mesh_setup'//achar(27)//'[0m' call mesh_setup(f%partit, f%mesh) if (f%mype==0) write(*,*) 'FESOM mesh_setup... complete' @@ -133,12 +137,21 @@ subroutine fesom_init(fesom_total_nsteps) ! and additional arrays needed for ! fancy advection etc. !===================== + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call check_mesh_consistency'//achar(27)//'[0m' call check_mesh_consistency(f%partit, f%mesh) if (f%mype==0) f%t2=MPI_Wtime() + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call dynamics_init'//achar(27)//'[0m' + call dynamics_init(f%dynamics, f%partit, f%mesh) + + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call tracer_init'//achar(27)//'[0m' call tracer_init(f%tracers, f%partit, f%mesh) ! allocate array of ocean tracers (derived type "t_tracer") + + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call arrays_init'//achar(27)//'[0m' call arrays_init(f%tracers%num_tracers, f%partit, f%mesh) ! allocate other arrays (to be refactured same as tracers in the future) - call ocean_setup(f%tracers, f%partit, f%mesh) + + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call ocean_setup'//achar(27)//'[0m' + call ocean_setup(f%dynamics, f%tracers, f%partit, f%mesh) if (f%mype==0) then write(*,*) 'FESOM ocean_setup... complete' @@ -154,7 +167,7 @@ subroutine fesom_init(fesom_total_nsteps) if (f%mype==0) write(*,*) 'EVP scheme option=', whichEVP endif if (f%mype==0) f%t5=MPI_Wtime() - call compute_diagnostics(0, f%tracers, f%partit, f%mesh) ! allocate arrays for diagnostic + call compute_diagnostics(0, f%dynamics, f%tracers, f%partit, f%mesh) ! allocate arrays for diagnostic #if defined (__oasis) call cpl_oasis3mct_define_unstr(f%partit, f%mesh) if(f%mype==0) write(*,*) 'FESOM ----> cpl_oasis3mct_define_unstr nsend, nrecv:',nsend, nrecv @@ -179,7 +192,7 @@ subroutine fesom_init(fesom_total_nsteps) ! if l_write is TRUE the restart will be forced ! if l_read the restart will be read ! as an example, for reading restart one does: call restart(0, .false., .false., .true., tracers, partit, mesh) - call restart(0, .false., r_restart, f%tracers, f%partit, f%mesh) ! istep, l_write, l_read + call restart(0, .false., r_restart, f%dynamics, f%tracers, f%partit, f%mesh) ! istep, l_write, l_read if (f%mype==0) f%t7=MPI_Wtime() ! store grid information into netcdf file if (.not. r_restart) call write_mesh_info(f%partit, f%mesh) @@ -230,9 +243,18 @@ subroutine fesom_init(fesom_total_nsteps) close (f%mype+300) ! open (f%mype+300, file=trim(f%dump_filename), status='old', form="unformatted") - ! read (f%mype+300) f%tracers_copy + ! read (f%mype+300) f%dynamics_copy ! close (f%mype+300) + write (f%dump_filename, "(A9,I7.7)") "t_dynamics.", f%mype + open (f%mype+300, file=TRIM(f%dump_dir)//trim(f%dump_filename), status='replace', form="unformatted") + write (f%mype+300) f%dynamics + close (f%mype+300) + + ! open (f%mype+300, file=trim(f%dump_filename), status='old', form="unformatted") + ! read (f%mype+300) f%tracers_copy + ! close (f%mype+300) + !call par_ex(f%partit%MPI_COMM_FESOM, f%partit%mype) !stop ! @@ -292,13 +314,15 @@ subroutine fesom_runloop(current_nsteps) #endif call clock !___compute horizontal velocity on nodes (originaly on elements)________ - call compute_vel_nodes(f%partit, f%mesh) + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call compute_vel_nodes'//achar(27)//'[0m' + call compute_vel_nodes(f%dynamics, f%partit, f%mesh) + !___model sea-ice step__________________________________________________ f%t1 = MPI_Wtime() if(use_ice) then !___compute fluxes from ocean to ice________________________________ if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call ocean2ice(n)'//achar(27)//'[0m' - call ocean2ice(f%tracers, f%partit, f%mesh) + call ocean2ice(f%dynamics, f%tracers, f%partit, f%mesh) !___compute update of atmospheric forcing____________________________ if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call update_atm_forcing(n)'//achar(27)//'[0m' @@ -317,28 +341,28 @@ subroutine fesom_runloop(current_nsteps) if (ice_update) call ice_timestep(n, f%partit, f%mesh) !___compute fluxes to the ocean: heat, freshwater, momentum_________ if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call oce_fluxes_mom...'//achar(27)//'[0m' - call oce_fluxes_mom(f%partit, f%mesh) ! momentum only + call oce_fluxes_mom(f%dynamics, f%partit, f%mesh) ! momentum only call oce_fluxes(f%tracers, f%partit, f%mesh) end if - call before_oce_step(f%tracers, f%partit, f%mesh) ! prepare the things if required + call before_oce_step(f%dynamics, f%tracers, f%partit, f%mesh) ! prepare the things if required f%t2 = MPI_Wtime() + !___model ocean step____________________________________________________ if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call oce_timestep_ale'//achar(27)//'[0m' - - call oce_timestep_ale(n, f%tracers, f%partit, f%mesh) + call oce_timestep_ale(n, f%dynamics, f%tracers, f%partit, f%mesh) f%t3 = MPI_Wtime() !___compute energy diagnostics..._______________________________________ if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call compute_diagnostics(1)'//achar(27)//'[0m' - call compute_diagnostics(1, f%tracers, f%partit, f%mesh) + call compute_diagnostics(1, f%dynamics, f%tracers, f%partit, f%mesh) f%t4 = MPI_Wtime() !___prepare output______________________________________________________ if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call output (n)'//achar(27)//'[0m' - call output (n, f%tracers, f%partit, f%mesh) + call output (n, f%dynamics, f%tracers, f%partit, f%mesh) f%t5 = MPI_Wtime() - call restart(n, .false., .false., f%tracers, f%partit, f%mesh) + call restart(n, .false., .false., f%dynamics, f%tracers, f%partit, f%mesh) f%t6 = MPI_Wtime() f%rtime_fullice = f%rtime_fullice + f%t2 - f%t1 diff --git a/src/oce_adv_tra_driver.F90 b/src/oce_adv_tra_driver.F90 index cf9725b24..739b07740 100644 --- a/src/oce_adv_tra_driver.F90 +++ b/src/oce_adv_tra_driver.F90 @@ -166,14 +166,15 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, dynamics, tracers, partit, fct_LO(nz,n)=(ttf(nz,n)*hnode(nz,n)+(fct_LO(nz,n)+(adv_flux_ver(nz, n)-adv_flux_ver(nz+1, n)))*dt/areasvol(nz,n))/hnode_new(nz,n) end do end do -<<<<<<< HEAD - if (dynamics%use_wsplit) then !wvel/=wvel_e - ! update for implicit contribution (use_wsplit option) -======= !$OMP END PARALLEL DO - if (w_split) then !wvel/=wvel_e + + if (dynamics%use_wsplit) then !wvel/=wvel_e ! update for implicit contribution (w_split option) +<<<<<<< HEAD >>>>>>> tracer advection part has been fullly OpenMP parallelized. It turns out that OpenMP does not slow down the modes as compared to MPI. +======= + +>>>>>>> solve conflicts from merging with refactoring branch call adv_tra_vert_impl(dt, wi, fct_LO, partit, mesh) ! compute the low order upwind vertical flux (full vertical velocity) ! zero the input/output flux before computation diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 8865b2ebc..23593c551 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -12,6 +12,7 @@ subroutine diff_part_hor_redi(tr_num, tracer, partit, mesh) end subroutine end interface end module +<<<<<<< HEAD module adv_tracers_ale_interface interface subroutine adv_tracers_ale(dt, tr_num, dynamics, tracer, partit, mesh) @@ -29,6 +30,9 @@ subroutine adv_tracers_ale(dt, tr_num, dynamics, tracer, partit, mesh) end subroutine end interface end module +======= + +>>>>>>> solve conflicts from merging with refactoring branch module diff_ver_part_expl_ale_interface interface subroutine diff_ver_part_expl_ale(tr_num, tracer, partit, mesh) @@ -43,6 +47,7 @@ subroutine diff_ver_part_expl_ale(tr_num, tracer, partit, mesh) end subroutine end interface end module + module diff_ver_part_redi_expl_interface interface subroutine diff_ver_part_redi_expl(tr_num, tracer, partit, mesh) @@ -57,6 +62,7 @@ subroutine diff_ver_part_redi_expl(tr_num, tracer, partit, mesh) end subroutine end interface end module + module diff_ver_part_impl_ale_interface interface subroutine diff_ver_part_impl_ale(tr_num, dynamics, tracer, partit, mesh) @@ -73,6 +79,7 @@ subroutine diff_ver_part_impl_ale(tr_num, dynamics, tracer, partit, mesh) end subroutine end interface end module + module diff_tracers_ale_interface interface subroutine diff_tracers_ale(tr_num, dynamics, tracer, partit, mesh) @@ -89,6 +96,7 @@ subroutine diff_tracers_ale(tr_num, dynamics, tracer, partit, mesh) end subroutine end interface end module + module bc_surface_interface interface function bc_surface(n, id, sval, partit) @@ -102,6 +110,7 @@ function bc_surface(n, id, sval, partit) end function end interface end module + module diff_part_bh_interface interface subroutine diff_part_bh(tr_num, dynamics, tracer, partit, mesh) @@ -118,6 +127,7 @@ subroutine diff_part_bh(tr_num, dynamics, tracer, partit, mesh) end subroutine end interface end module + module solve_tracers_ale_interface interface subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) @@ -151,14 +161,15 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) use diff_tracers_ale_interface use oce_adv_tra_driver_interfaces implicit none - type(t_dyn) , intent(inout), target :: dynamics - type(t_tracer), intent(inout), target :: tracers - type(t_mesh) , intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit - integer :: tr_num, node, nzmax, nzmin + + type(t_dyn) , intent(inout), target :: dynamics + type(t_tracer), intent(inout), target :: tracers + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: tr_num, node, elem, nzmax, nzmin real(kind=WP), dimension(:,:,:), pointer :: UV, fer_UV - real(kind=WP), dimension(:,:) , pointer :: Wvel, Wvel_e, fer_Wvel - + real(kind=WP), dimension(:,:) , pointer :: Wvel, Wvel_e, Wvel_i, fer_Wvel + real(kind=WP), dimension(:,:) , pointer :: del_ttf #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -166,14 +177,17 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) UV => dynamics%uv(:,:,:) Wvel => dynamics%w(:,:) Wvel_e => dynamics%w_e(:,:) + Wvel_i => dynamics%w_i(:,:) if (Fer_GM) then fer_UV => dynamics%fer_uv(:,:,:) fer_Wvel => dynamics%fer_w(:,:) end if + del_ttf => tracers%work%del_ttf !___________________________________________________________________________ if (SPP) call cal_rejected_salt(partit, mesh) - if (SPP) call app_rejected_salt(tracers%data(2)%values, partit, mesh) + if (SPP) call app_rejected_salt(tracers%data(2)%values, partit, mesh) + !___________________________________________________________________________ ! update 3D velocities with the bolus velocities: ! 1. bolus velocities are computed according to GM implementation after R. Ferrari et al., 2010 @@ -191,6 +205,7 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) end do !$OMP END PARALLEL DO end if + !___________________________________________________________________________ ! loop over all tracers do tr_num=1, tracers%num_tracers @@ -198,10 +213,12 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) ! needed if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call init_tracers_AB'//achar(27)//'[0m' call init_tracers_AB(tr_num, tracers, partit, mesh) + ! advect tracers if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call adv_tracers_ale'//achar(27)//'[0m' ! it will update del_ttf with contributions from horizontal and vertical advection parts (del_ttf_advhoriz and del_ttf_advvert) - call do_oce_adv_tra(dt, UV, wvel, wvel_i, wvel_e, tr_num, tracers, partit, mesh) + call do_oce_adv_tra(dt, UV, Wvel, Wvel_i, Wvel_e, tr_num, dynamics, tracers, partit, mesh) + !___________________________________________________________________________ ! update array for total tracer flux del_ttf with the fluxes from horizontal ! and vertical advection @@ -217,6 +234,7 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) ! diffuse tracers if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call diff_tracers_ale'//achar(27)//'[0m' call diff_tracers_ale(tr_num, dynamics, tracers, partit, mesh) + ! relax to salt and temp climatology if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call relax_to_clim'//achar(27)//'[0m' ! if ((toy_ocean) .AND. ((tr_num==1) .AND. (TRIM(which_toy)=="soufflet"))) then @@ -270,63 +288,6 @@ end subroutine solve_tracers_ale ! ! !=============================================================================== -subroutine adv_tracers_ale(dt, tr_num, dynamics, tracers, partit, mesh) - use g_config, only: flag_debug - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use MOD_TRACER - use MOD_DYN - use o_arrays - use diagnostics, only: ldiag_DVD, compute_diag_dvd_2ndmoment_klingbeil_etal_2014, & - compute_diag_dvd_2ndmoment_burchard_etal_2008, compute_diag_dvd -! use adv_tracers_muscle_ale_interface -! use adv_tracers_vert_ppm_ale_interface - use oce_adv_tra_driver_interfaces - implicit none - real(kind=WP), intent(in), target :: dt - integer :: node, nz - integer, intent(in) :: tr_num - type(t_mesh) , intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(inout), target :: tracers - type(t_dyn) , intent(inout), target :: dynamics - ! del_ttf ... initialised and setted to zero in call init_tracers_AB(tr_num) - ! --> del_ttf ... equivalent to R_T^n in Danilov etal FESOM2: "from finite element - ! to finite volume". At the end R_T^n should contain all advection therms and - ! the terms due to diffusion. - ! del_ttf=0d0 - - !___________________________________________________________________________ - ! if ldiag_DVD=.true. --> compute tracer second moments for the calcualtion - ! of discret variance decay - if (ldiag_DVD .and. tr_num <= 2) then - if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call compute_diag_dvd_2ndmoment'//achar(27)//'[0m' - call compute_diag_dvd_2ndmoment_klingbeil_etal_2014(tr_num, tracers, partit, mesh) - end if - - !___________________________________________________________________________ - ! horizontal ale tracer advection - ! here --> add horizontal advection part to del_ttf(nz,n) = del_ttf(nz,n) + ... - tracers%work%del_ttf_advhoriz = 0.0_WP - tracers%work%del_ttf_advvert = 0.0_WP - call do_oce_adv_tra(dt, dynamics%uv, dynamics%w, dynamics%w_i, dynamics%w_e, tr_num, dynamics, tracers, partit, mesh) - !___________________________________________________________________________ - ! update array for total tracer flux del_ttf with the fluxes from horizontal - ! and vertical advection - tracers%work%del_ttf=tracers%work%del_ttf+tracers%work%del_ttf_advhoriz+tracers%work%del_ttf_advvert - - !___________________________________________________________________________ - ! compute discrete variance decay after Burchard and Rennau 2008 - if (ldiag_DVD .and. tr_num <= 2) then - if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call compute_diag_dvd'//achar(27)//'[0m' - call compute_diag_dvd(tr_num, tracers, partit, mesh) - end if - -end subroutine adv_tracers_ale -! -! -!=============================================================================== subroutine diff_tracers_ale(tr_num, dynamics, tracers, partit, mesh) use mod_mesh USE MOD_PARTIT @@ -391,8 +352,7 @@ subroutine diff_tracers_ale(tr_num, dynamics, tracers, partit, mesh) end if !We DO not set del_ttf to zero because it will not be used in this timestep anymore - !init_tracers will set it to zero for the next timestep - !init_tracers will set it to zero for the next timestep + !init_tracers_AB will set it to zero for the next timestep if (tracers%smooth_bh_tra) then call diff_part_bh(tr_num, dynamics, tracers, partit, mesh) ! alpply biharmonic diffusion (implemented as filter) end if From 592d340614e73aa02fc3b2daa0bf3c942b2da214 Mon Sep 17 00:00:00 2001 From: a270042 Date: Tue, 9 Nov 2021 11:04:16 +0100 Subject: [PATCH 526/909] add reduced namelist.oce --- config/namelist.oce | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/config/namelist.oce b/config/namelist.oce index a69770154..7af6867f7 100644 --- a/config/namelist.oce +++ b/config/namelist.oce @@ -2,27 +2,8 @@ &oce_dyn C_d=0.0025 ! Bottom drag, nondimensional -gamma0=0.003 ! [m/s], backgroung viscosity= gamma0*len, it should be as small as possible (keep it < 0.01 m/s). -gamma1=0.1 ! [nodim], for computation of the flow aware viscosity -gamma2=0.285 ! [s/m], is only used in easy backscatter option -Div_c=.5 ! the strength of the modified Leith viscosity, nondimensional, 0.3 -- 1.0 -Leith_c=.05 ! the strength of the Leith viscosity -visc_option=5 ! 1=Harmonic Leith parameterization; - ! 2=Laplacian+Leith+biharmonic background - ! 3=Biharmonic Leith parameterization - ! 4=Biharmonic flow aware - ! 5=Kinematic (easy) Backscatter - ! 6=Biharmonic flow aware (viscosity depends on velocity Laplacian) - ! 7=Biharmonic flow aware (viscosity depends on velocity differences) - ! 8=Dynamic Backscatter -easy_bs_return= 1.5 ! coefficient for returned sub-gridscale energy, to be used with visc_option=5 (easy backscatter) A_ver= 1.e-4 ! Vertical viscosity, m^2/s scale_area=5.8e9 ! Visc. and diffus. are for an element with scale_area -mom_adv=2 ! 1=vector CV, p1 vel, 2=sca. CV, 3=vector inv. -free_slip=.false. ! Switch on free slip -i_vert_visc=.true. -w_split=.false. -w_max_cfl=1.0 ! maximum allowed CFL criteria in vertical (0.5 < w_max_cfl < 1.) ! in older FESOM it used to be w_exp_max=1.e-3 SPP=.false. ! Salt Plume Parameterization Fer_GM=.true. ! to swith on/off GM after Ferrari et al. 2010 K_GM_max = 2000.0 ! max. GM thickness diffusivity (m2/s) From f65bd3be3c7d67f28e83fac3f8aee7efee23cdc7 Mon Sep 17 00:00:00 2001 From: a270042 Date: Tue, 9 Nov 2021 14:04:44 +0100 Subject: [PATCH 527/909] fix biug in oce_dyn.F90, subroutines visc_... that caused virtual memory error on ollie --> caused memory leakage --- src/oce_dyn.F90 | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 5b80ccf7a..7ad39dd0a 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -238,10 +238,6 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) ! Here the contribution from squared velocities is added to the viscosity. ! The contribution from boundary edges is neglected (free slip). - ed=myDim_elem2D+eDim_elem2D - allocate(U_b(nl-1,ed), V_b(nl-1, ed)) - ed=myDim_nod2D+eDim_nod2D - allocate(U_c(nl-1,ed), V_c(nl-1,ed)) U_b=0.0_WP V_b=0.0_WP U_c=0.0_WP @@ -342,8 +338,6 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) U_c => dynamics%work%u_c(:,:) V_c => dynamics%work%v_c(:,:) - ed=myDim_elem2D+eDim_elem2D - allocate(U_c(nl-1,ed), V_c(nl-1, ed)) U_c=0.0_WP V_c=0.0_WP DO ed=1, myDim_edge2D+eDim_edge2D @@ -396,8 +390,7 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) UV_rhs(2,nz,el(1))=UV_rhs(2,nz,el(1))-v1/elem_area(el(1)) UV_rhs(2,nz,el(2))=UV_rhs(2,nz,el(2))+v1/elem_area(el(2)) END DO - END DO - deallocate(V_c,U_c) + END DO end subroutine visc_filt_bilapl ! @@ -434,9 +427,7 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) UV_rhs => dynamics%uv_rhs(:,:,:) U_c => dynamics%work%u_c(:,:) V_c => dynamics%work%v_c(:,:) - ! - ed=myDim_elem2D+eDim_elem2D - allocate(U_c(nl-1,ed), V_c(nl-1, ed)) + U_c=0.0_WP V_c=0.0_WP DO ed=1, myDim_edge2D+eDim_edge2D @@ -490,6 +481,5 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) UV_rhs(2,nz,el(2))=UV_rhs(2,nz,el(2))+v1/elem_area(el(2)) END DO END DO - deallocate(V_c, U_c) end subroutine visc_filt_bidiff From 04ddcbb80dee56739a6edef275e8c8bc2c57905e Mon Sep 17 00:00:00 2001 From: a270042 Date: Tue, 9 Nov 2021 15:14:57 +0100 Subject: [PATCH 528/909] add dimas bigfix 761f65a by hand --- src/oce_ale_tracer.F90 | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 23593c551..fc61acc1c 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -225,9 +225,14 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) !$OMP PARALLEL DO do node=1, myDim_nod2d tracers%work%del_ttf(:, node)=tracers%work%del_ttf(:, node)+tracers%work%del_ttf_advhoriz(:, node)+tracers%work%del_ttf_advvert(:, node) - !___________________________________________________________________________ - ! AB is not needed after the advection step. Initialize it with the current tracer before it is modified. - ! call init_tracers_AB at the beginning of this loop will compute AB for the next time step then. + end do +!$OMP END PARALLEL DO + + !___________________________________________________________________________ + ! AB is not needed after the advection step. Initialize it with the current tracer before it is modified. + ! call init_tracers_AB at the beginning of this loop will compute AB for the next time step then. +!$OMP PARALLEL DO + do node=1, myDim_nod2d+eDim_nod2D tracers%data(tr_num)%valuesAB(:, node)=tracers%data(tr_num)%values(:, node) !DS: check that this is the right place! end do !$OMP END PARALLEL DO From ccea0291eee44dc9a3ba31953bc5453ad364a683 Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Tue, 9 Nov 2021 16:48:02 +0100 Subject: [PATCH 529/909] fix tests for dynamical derived types --- .github/workflows/fesom2.1.yml | 2 +- .github/workflows/fesom2_icepack.yml | 2 +- setups/test_pi/setup.yml | 8 +++----- setups/test_pi_floatice/setup.yml | 8 +++----- setups/test_pi_icepack/setup.yml | 8 +++----- setups/test_pi_linfs/setup.yml | 8 +++----- setups/test_pi_partial/setup.yml | 8 +++----- setups/test_pi_visc7/setup.yml | 11 +++++------ setups/test_pi_zstar/setup.yml | 8 +++----- setups/test_souf/setup.yml | 9 +++++---- test/run_tests.sh | 5 +++-- 11 files changed, 33 insertions(+), 44 deletions(-) diff --git a/.github/workflows/fesom2.1.yml b/.github/workflows/fesom2.1.yml index 733eaf55d..abcabbe97 100644 --- a/.github/workflows/fesom2.1.yml +++ b/.github/workflows/fesom2.1.yml @@ -12,7 +12,7 @@ jobs: # Containers must run in Linux based operating systems runs-on: ubuntu-latest # Docker Hub image that `container-job` executes in - container: koldunovn/fesom2_test:refactoring + container: koldunovn/fesom2_test:refactoring2 # Service containers to run with `gfortran_ubuntu` steps: diff --git a/.github/workflows/fesom2_icepack.yml b/.github/workflows/fesom2_icepack.yml index 06d84ba64..9057057b1 100644 --- a/.github/workflows/fesom2_icepack.yml +++ b/.github/workflows/fesom2_icepack.yml @@ -12,7 +12,7 @@ jobs: # Containers must run in Linux based operating systems runs-on: ubuntu-latest # Docker Hub image that `container-job` executes in - container: koldunovn/fesom2_test:refactoring + container: koldunovn/fesom2_test:refactoring2 # Service containers to run with `gfortran_ubuntu` steps: diff --git a/setups/test_pi/setup.yml b/setups/test_pi/setup.yml index b38c480f0..e303fbc0f 100644 --- a/setups/test_pi/setup.yml +++ b/setups/test_pi/setup.yml @@ -19,11 +19,9 @@ namelist.config: restart_length_unit: "d" logfile_outfreq: 10 -namelist.oce: - oce_dyn: - Div_c: 0.5 - Leith_c: 0.05 - w_split: True +namelist.dyn: + dynamics_general: + use_wsplit: True namelist.ice: ice_dyn: diff --git a/setups/test_pi_floatice/setup.yml b/setups/test_pi_floatice/setup.yml index 6f99efd5a..0a23d073f 100644 --- a/setups/test_pi_floatice/setup.yml +++ b/setups/test_pi_floatice/setup.yml @@ -21,11 +21,9 @@ namelist.config: run_config: use_floatice: True -namelist.oce: - oce_dyn: - Div_c: 0.5 - Leith_c: 0.05 - w_split: True +namelist.dyn: + dynamics_general: + use_wsplit: True namelist.ice: ice_dyn: diff --git a/setups/test_pi_icepack/setup.yml b/setups/test_pi_icepack/setup.yml index b7a18cf82..35da53540 100644 --- a/setups/test_pi_icepack/setup.yml +++ b/setups/test_pi_icepack/setup.yml @@ -19,11 +19,9 @@ namelist.config: restart_length_unit: "d" logfile_outfreq: 10 -namelist.oce: - oce_dyn: - Div_c: 0.5 - Leith_c: 0.05 - w_split: True +namelist.dyn: + dynamics_general: + use_wsplit: True namelist.ice: ice_dyn: diff --git a/setups/test_pi_linfs/setup.yml b/setups/test_pi_linfs/setup.yml index bc604dccc..88650a51f 100644 --- a/setups/test_pi_linfs/setup.yml +++ b/setups/test_pi_linfs/setup.yml @@ -21,11 +21,9 @@ namelist.config: ale_def: which_ALE: "linfs" -namelist.oce: - oce_dyn: - Div_c: 0.5 - Leith_c: 0.05 - w_split: True +namelist.dyn: + dynamics_general: + use_wsplit: True namelist.ice: ice_dyn: diff --git a/setups/test_pi_partial/setup.yml b/setups/test_pi_partial/setup.yml index bae697a6a..b3e74290d 100644 --- a/setups/test_pi_partial/setup.yml +++ b/setups/test_pi_partial/setup.yml @@ -21,11 +21,9 @@ namelist.config: ale_def: use_partial_cell: False -namelist.oce: - oce_dyn: - Div_c: 0.5 - Leith_c: 0.05 - w_split: True +namelist.dyn: + dynamics_general: + use_wsplit: True namelist.ice: ice_dyn: diff --git a/setups/test_pi_visc7/setup.yml b/setups/test_pi_visc7/setup.yml index c4d616619..afcee51e4 100644 --- a/setups/test_pi_visc7/setup.yml +++ b/setups/test_pi_visc7/setup.yml @@ -19,12 +19,11 @@ namelist.config: restart_length_unit: "d" logfile_outfreq: 10 -namelist.oce: - oce_dyn: - Div_c: 0.5 - Leith_c: 0.05 - w_split: True - visc_option: 7 +namelist.dyn: + dynamics_visc: + opt_visc: 7 + dynamics_general: + use_wsplit: True namelist.ice: diff --git a/setups/test_pi_zstar/setup.yml b/setups/test_pi_zstar/setup.yml index e487659ae..b69816202 100644 --- a/setups/test_pi_zstar/setup.yml +++ b/setups/test_pi_zstar/setup.yml @@ -21,11 +21,9 @@ namelist.config: ale_def: which_ALE: "zstar" -namelist.oce: - oce_dyn: - Div_c: 0.5 - Leith_c: 0.05 - w_split: True +namelist.dyn: + dynamics_general: + use_wsplit: True namelist.ice: ice_dyn: diff --git a/setups/test_souf/setup.yml b/setups/test_souf/setup.yml index 4c4bd67cd..2ddb4a5e2 100644 --- a/setups/test_souf/setup.yml +++ b/setups/test_souf/setup.yml @@ -36,13 +36,14 @@ namelist.config: namelist.oce: oce_dyn: state_equation: 0 - Div_c: 0.5 - Leith_c: 0.05 - w_split: False Fer_GM: False Redi: False mix_scheme: "PP" - + +namelist.dyn: + dynamics_general: + use_wsplit: False + namelist.tra: tracer_phys: use_momix: False diff --git a/test/run_tests.sh b/test/run_tests.sh index f19bdfe50..78f72fbaa 100755 --- a/test/run_tests.sh +++ b/test/run_tests.sh @@ -3,11 +3,12 @@ set -e cd ../ machine="docker" -tests="test_pi test_pi_linfs test_pi_zstar test_pi_partial test_pi_floatice test_pi_visc7 test_pi_zstar" +tests="test_pi test_souf test_pi_linfs test_pi_zstar test_pi_partial test_pi_floatice test_pi_visc7 test_pi_zstar" + +./configure.sh ubuntu for test in $tests; do -./configure.sh ubuntu echo $test mkrun pi $test -m $machine pwd From 2338ae1098ead4694286212e9a65338b7e1a8ce4 Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Tue, 9 Nov 2021 23:05:28 +0100 Subject: [PATCH 530/909] small fixes --- src/oce_adv_tra_driver.F90 | 5 ----- src/oce_ale_tracer.F90 | 21 --------------------- 2 files changed, 26 deletions(-) diff --git a/src/oce_adv_tra_driver.F90 b/src/oce_adv_tra_driver.F90 index 739b07740..f6af723cb 100644 --- a/src/oce_adv_tra_driver.F90 +++ b/src/oce_adv_tra_driver.F90 @@ -170,11 +170,6 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, dynamics, tracers, partit, if (dynamics%use_wsplit) then !wvel/=wvel_e ! update for implicit contribution (w_split option) -<<<<<<< HEAD ->>>>>>> tracer advection part has been fullly OpenMP parallelized. It turns out that OpenMP does not slow down the modes as compared to MPI. -======= - ->>>>>>> solve conflicts from merging with refactoring branch call adv_tra_vert_impl(dt, wi, fct_LO, partit, mesh) ! compute the low order upwind vertical flux (full vertical velocity) ! zero the input/output flux before computation diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index fc61acc1c..f3e133f61 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -12,27 +12,6 @@ subroutine diff_part_hor_redi(tr_num, tracer, partit, mesh) end subroutine end interface end module -<<<<<<< HEAD -module adv_tracers_ale_interface - interface - subroutine adv_tracers_ale(dt, tr_num, dynamics, tracer, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use mod_tracer - use MOD_DYN - real(kind=WP), intent(in), target :: dt - integer, intent(in), target :: tr_num - type(t_dyn) , intent(inout), target :: dynamics - type(t_tracer), intent(inout), target :: tracer - type(t_mesh) , intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit - end subroutine - end interface -end module -======= - ->>>>>>> solve conflicts from merging with refactoring branch module diff_ver_part_expl_ale_interface interface subroutine diff_ver_part_expl_ale(tr_num, tracer, partit, mesh) From 05f5dfd112d22859a9efe3a3789347b1a0f7e269 Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Tue, 9 Nov 2021 23:25:07 +0100 Subject: [PATCH 531/909] fix wrong conflict resolution --- src/oce_ale.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 20a33da31..a1b0a0a9b 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2742,7 +2742,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) type(t_dyn), intent(inout), target :: dynamics real(kind=8) :: t0,t1, t2, t30, t3, t4, t5, t6, t7, t8, t9, t10, loc, glo - integer :: n, node + integer :: node real(kind=WP), dimension(:), pointer :: eta_n #include "associate_part_def.h" #include "associate_mesh_def.h" From 1ba3c0a831aa40d7ae86d5b90068b88de23a7487 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Wed, 10 Nov 2021 09:06:44 +0000 Subject: [PATCH 532/909] add ifs interface for cycle 46r1 that was used in the Cycle 1 nextgems runs --- src/ifs_interface.F90 | 1506 +++++++++++++++++++++++++++++++++ src/ifs_modules.F90 | 1859 +++++++++++++++++++++++++++++++++++++++++ src/ifs_notused.F90 | 371 ++++++++ 3 files changed, 3736 insertions(+) create mode 100644 src/ifs_interface.F90 create mode 100644 src/ifs_modules.F90 create mode 100644 src/ifs_notused.F90 diff --git a/src/ifs_interface.F90 b/src/ifs_interface.F90 new file mode 100644 index 000000000..4467dfa9a --- /dev/null +++ b/src/ifs_interface.F90 @@ -0,0 +1,1506 @@ +#if defined (__ifsinterface) +!===================================================== +! IFS interface for calling FESOM2 as a subroutine. +! +! -Original code for NEMO by Kristian Mogensen, ECMWF. +! -Adapted to FESOM2 by Thomas Rackow, AWI, 2018. +!----------------------------------------------------- + +MODULE nemogcmcoup_steps + INTEGER :: substeps !per IFS timestep +END MODULE nemogcmcoup_steps + +SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & + & lwaveonly, iatmunit, lwrite ) + + ! Initialize the FESOM model for single executable coupling + + USE par_kind !in ifs_modules.F90 + USE g_PARSUP, only: MPI_COMM_FESOM, mype + USE g_config, only: dt + USE g_clock, only: timenew, daynew, yearnew, month, day_in_month + USE nemogcmcoup_steps, ONLY : substeps + + IMPLICIT NONE + + ! Input arguments + + ! Message passing information + INTEGER, INTENT(IN) :: icomm + ! Initial date (e.g. 20170906), time, initial timestep and final time step + INTEGER, INTENT(OUT) :: inidate, initime, itini, itend + ! Length of the time step + REAL(wpIFS), INTENT(OUT) :: zstp + + ! inherited from interface to NEMO, not used here: + ! Coupling to waves only + LOGICAL, INTENT(IN) :: lwaveonly + ! Logfile unit (used if >=0) + INTEGER :: iatmunit + ! Write to this unit + LOGICAL :: lwrite + ! FESOM might perform substeps + INTEGER :: itend_fesom + INTEGER :: i + NAMELIST/namfesomstep/substeps + + ! TODO hard-coded here, put in namelist + substeps=2 + OPEN(9,file='namfesomstep.in') + READ(9,namfesomstep) + CLOSE(9) + + MPI_COMM_FESOM=icomm + itini = 1 + CALL main_initialize(itend_fesom) !also sets mype and npes + itend=itend_fesom/substeps + if(mype==0) then + WRITE(0,*)'!======================================' + WRITE(0,*)'! FESOM is initialized from within IFS.' + WRITE(0,*)'! get MPI_COMM_FESOM. =================' + WRITE(0,*)'! main_initialize done. ===============' + endif + + ! Set more information for the caller + + ! initial date and time (time is not used) + inidate = yearnew*10000 + month*100 + day_in_month ! e.g. 20170906 + initime = 0 + if(mype==0) then + WRITE(0,*)'! FESOM initial date is ', inidate ,' ======' + WRITE(0,*)'! FESOM substeps are ', substeps ,' ======' + endif + + ! fesom timestep (as seen by IFS) + zstp = REAL(substeps,wpIFS)*dt + if(mype==0) then + WRITE(0,*)'! FESOM timestep as seen by IFS is ', real(zstp,4), 'sec (',substeps,'xdt)' + WRITE(0,*)'!======================================' + endif + +END SUBROUTINE nemogcmcoup_init + + +SUBROUTINE nemogcmcoup_coupinit( mypeIN, npesIN, icomm, & + & npoints, nlocmsk, ngloind ) + + ! FESOM modules + USE g_PARSUP, only: mype, npes, myDim_nod2D, eDim_nod2D, myDim_elem2D, eDim_elem2D, eXDim_elem2D, & + myDim_edge2D, eDim_edge2D, myList_nod2D, myList_elem2D + USE MOD_MESH + !USE o_MESH, only: nod2D, elem2D + USE g_init2timestepping, only: meshinmod + + ! Initialize single executable coupling + USE parinter + USE scripremap + USE interinfo + IMPLICIT NONE + + ! Input arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mypeIN,npesIN,icomm + ! Gaussian grid information + ! Number of points + INTEGER, INTENT(IN) :: npoints + ! Integer mask and global indices + INTEGER, DIMENSION(npoints), INTENT(IN) :: nlocmsk, ngloind + INTEGER :: iunit = 0 + + ! Local variables + type(t_mesh), target :: mesh + integer , pointer :: nod2D + integer , pointer :: elem2D + + ! Namelist containing the file names of the weights + CHARACTER(len=256) :: cdfile_gauss_to_T, cdfile_gauss_to_UV, & + & cdfile_T_to_gauss, cdfile_UV_to_gauss + CHARACTER(len=256) :: cdpathdist + LOGICAL :: lwritedist, lreaddist + LOGICAL :: lcommout + CHARACTER(len=128) :: commoutprefix + NAMELIST/namfesomcoup/cdfile_gauss_to_T,& + & cdfile_gauss_to_UV,& + & cdfile_T_to_gauss,& + & cdfile_UV_to_gauss,& + & cdpathdist, & + & lreaddist, & + & lwritedist, & + & lcommout, & + & commoutprefix,& + & lparbcast + + ! Global number of gaussian gridpoints + INTEGER :: nglopoints + ! Ocean grids accessed with NEMO modules + INTEGER :: noglopoints,nopoints + INTEGER, ALLOCATABLE, DIMENSION(:) :: omask,ogloind + ! SCRIP remapping data structures. + TYPE(scripremaptype) :: remap_gauss_to_T, remap_T_to_gauss, & + & remap_gauss_to_UV, remap_UV_to_gauss + ! Misc variables + INTEGER :: i,j,k,ierr + LOGICAL :: lexists + + ! associate the mesh, only what is needed here + ! #include "associate_mesh.h" + mesh = meshinmod + nod2D => mesh%nod2D + elem2D => mesh%elem2D + + + ! here FESOM knows about the (total number of) MPI tasks + + if(mype==0) then + write(*,*) 'MPI has been initialized in the atmospheric model' + write(*, *) 'Running on ', npes, ' PEs' + end if + + ! Read namelists + + cdfile_gauss_to_T = 'gausstoT.nc' + cdfile_gauss_to_UV = 'gausstoUV.nc' + cdfile_T_to_gauss = 'Ttogauss.nc' + cdfile_UV_to_gauss = 'UVtogauss.nc' + lcommout = .FALSE. + commoutprefix = 'parinter_comm' + cdpathdist = './' + lreaddist = .FALSE. + lwritedist = .FALSE. + + OPEN(9,file='namfesomcoup.in') + READ(9,namfesomcoup) + CLOSE(9) + + ! Global number of Gaussian gridpoints + + CALL mpi_allreduce( npoints, nglopoints, 1, & + & mpi_integer, mpi_sum, icomm, ierr) + + + if(mype==0) then + WRITE(0,*)'!======================================' + WRITE(0,*)'! SCALARS =============================' + + WRITE(0,*)'Update FESOM global scalar points' + endif + + noglopoints=nod2D + nopoints=myDim_nod2d + + ! Ocean mask and global indicies + + ALLOCATE(omask(MAX(nopoints,1)),ogloind(MAX(nopoints,1))) + omask(:)= 1 ! all points are ocean points + ogloind(1:myDim_nod2d)= myList_nod2D(1:myDim_nod2d) ! global index for local point number + + ! Could be helpful later: + ! Replace global numbering with a local one + ! tmp(1:nod2d)=0 + ! DO n=1, myDim_nod2D+eDim_nod2D + ! tmp(myList_nod2D(n))=n + + ! Read the interpolation weights and setup the parallel interpolation + ! from atmosphere Gaussian grid to ocean T-grid + + IF (lreaddist) THEN + CALL parinter_read( mype, npes, nglopoints, noglopoints, gausstoT, & + & cdpathdist,'ifs_to_fesom_gridT',lexists) + ENDIF + IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN + IF (lparbcast) THEN + CALL scripremap_read_sgl(cdfile_gauss_to_T,remap_gauss_to_T,& + & mype,npes,icomm,.TRUE.) + ELSE + CALL scripremap_read(cdfile_gauss_to_T,remap_gauss_to_T) + ENDIF + CALL parinter_init( mype, npes, icomm, & + & npoints, nglopoints, nlocmsk, ngloind, & + & nopoints, noglopoints, omask, ogloind, & + & remap_gauss_to_T, gausstoT, lcommout, TRIM(commoutprefix)//'_gtoT', & + & iunit ) + CALL scripremap_dealloc(remap_gauss_to_T) + IF (lwritedist) THEN + CALL parinter_write( mype, npes, nglopoints, noglopoints, gausstoT, & + & cdpathdist,'ifs_to_fesom_gridT') + ENDIF + ENDIF + + ! From ocean T-grid to atmosphere Gaussian grid + + IF (lreaddist) THEN + CALL parinter_read( mype, npes, noglopoints, nglopoints, Ttogauss, & + & cdpathdist,'fesom_gridT_to_ifs',lexists) + ENDIF + IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN + IF (lparbcast) THEN + CALL scripremap_read_sgl(cdfile_T_to_gauss,remap_T_to_gauss,& + & mype,npes,icomm,.TRUE.) + ELSE + CALL scripremap_read(cdfile_T_to_gauss,remap_T_to_gauss) + ENDIF + + CALL parinter_init( mype, npes, icomm, & + & nopoints, noglopoints, omask, ogloind, & + & npoints, nglopoints, nlocmsk, ngloind, & + & remap_T_to_gauss, Ttogauss, lcommout, TRIM(commoutprefix)//'_Ttog', & + & iunit ) + CALL scripremap_dealloc(remap_T_to_gauss) + IF (lwritedist) THEN + CALL parinter_write( mype, npes, noglopoints, nglopoints, Ttogauss, & + & cdpathdist,'fesom_gridT_to_ifs') + ENDIF + ENDIF + + DEALLOCATE(omask,ogloind) + + + if(mype==0) then + WRITE(0,*)'!======================================' + WRITE(0,*)'! VECTORS =============================' + + WRITE(0,*)'Update FESOM global vector points' + endif + noglopoints=elem2D + nopoints=myDim_elem2D + + ! Ocean mask and global indicies + + ALLOCATE(omask(MAX(nopoints,1)),ogloind(MAX(nopoints,1))) + + omask(:)= 1 ! all elements are in the ocean + ogloind(1:myDim_elem2D) = myList_elem2D(1:myDim_elem2D) ! global index for local element number + + ! Read the interpolation weights and setup the parallel interpolation + ! from atmosphere Gaussian grid to ocean UV-grid + + IF (lreaddist) THEN + CALL parinter_read( mype, npes, nglopoints, noglopoints, gausstoUV, & + & cdpathdist,'ifs_to_fesom_gridUV',lexists) + ENDIF + IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN + IF (lparbcast) THEN + CALL scripremap_read_sgl(cdfile_gauss_to_UV,remap_gauss_to_UV,& + & mype,npes,icomm,.TRUE.) + ELSE + CALL scripremap_read(cdfile_gauss_to_UV,remap_gauss_to_UV) + ENDIF + CALL parinter_init( mype, npes, icomm, & + & npoints, nglopoints, nlocmsk, ngloind, & + & nopoints, noglopoints, omask, ogloind, & + & remap_gauss_to_UV, gausstoUV, lcommout, TRIM(commoutprefix)//'_gtoUV', & + & iunit ) + CALL scripremap_dealloc(remap_gauss_to_UV) + IF (lwritedist) THEN + CALL parinter_write( mype, npes, nglopoints, noglopoints, gausstoUV, & + & cdpathdist,'ifs_to_fesom_gridUV') + ENDIF + ENDIF + + ! From ocean UV-grid to atmosphere Gaussian grid + + IF (lreaddist) THEN + CALL parinter_read( mype, npes, noglopoints, nglopoints, UVtogauss, & + & cdpathdist,'fesom_gridUV_to_ifs',lexists) + ENDIF + IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN + IF (lparbcast) THEN + CALL scripremap_read_sgl(cdfile_UV_to_gauss,remap_UV_to_gauss,& + & mype,npes,icomm,.TRUE.) + ELSE + CALL scripremap_read(cdfile_UV_to_gauss,remap_UV_to_gauss) + ENDIF + + CALL parinter_init( mype, npes, icomm, & + & nopoints, noglopoints, omask, ogloind, & + & npoints, nglopoints, nlocmsk, ngloind, & + & remap_UV_to_gauss, UVtogauss, lcommout, TRIM(commoutprefix)//'_UVtog', & + & iunit ) + CALL scripremap_dealloc(remap_UV_to_gauss) + IF (lwritedist) THEN + CALL parinter_write( mype, npes, noglopoints, nglopoints, UVtogauss, & + & cdpathdist,'fesom_gridUV_to_ifs') + ENDIF + ENDIF + + DEALLOCATE(omask,ogloind) + +END SUBROUTINE nemogcmcoup_coupinit + + +SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & + & nopoints, pgsst, pgist, pgalb, & + & pgifr, pghic, pghsn, pgucur, pgvcur, & + & pgistl, licelvls ) + + ! Interpolate sst, ice: surf T; albedo; concentration; thickness, + ! snow thickness and currents from the FESOM grid to the Gaussian grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + USE par_kind ! in ifs_modules.F90 + USE o_ARRAYS, ONLY : tr_arr, UV + USE i_arrays, ONLY : m_ice, a_ice, m_snow + USE i_therm_param, ONLY : tmelt + !USE o_PARAM, ONLY : WP + USE g_PARSUP, only: myDim_nod2D,eDim_nod2D, myDim_elem2D,eDim_elem2D,eXDim_elem2D + !USE o_MESH, only: elem2D_nodes, coord_nod2D + USE MOD_MESH + USE g_init2timestepping, only: meshinmod + + USE g_rotate_grid, only: vector_r2g + USE parinter + USE scripremap + USE interinfo + + IMPLICIT NONE + + ! Arguments + REAL(wpIFS), DIMENSION(nopoints) :: pgsst, pgist, pgalb, pgifr, pghic, pghsn, pgucur, pgvcur + REAL(wpIFS), DIMENSION(nopoints,3) :: pgistl + LOGICAL :: licelvls + + type(t_mesh), target :: mesh + real(kind=wpIFS), dimension(:,:), pointer :: coord_nod2D + integer, dimension(:,:) , pointer :: elem2D_nodes + + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + ! Number Gaussian grid points + INTEGER, INTENT(IN) :: nopoints + + ! Local variables + REAL(wpIFS), DIMENSION(myDim_nod2D) :: zsend + REAL(wpIFS), DIMENSION(myDim_elem2D) :: zsendU, zsendV + INTEGER :: elnodes(3) + REAL(wpIFS) :: rlon, rlat + + ! Loop variables + INTEGER :: n, elem, ierr + + !#include "associate_mesh.h" + ! associate what is needed only + mesh = meshinmod + coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%coord_nod2D + elem2D_nodes(1:3, 1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem2D_nodes + + + ! =================================================================== ! + ! Pack SST data and convert to K. 'pgsst' is on Gauss grid. + do n=1,myDim_nod2D + zsend(n)=tr_arr(1, n, 1)+tmelt ! sea surface temperature [K], + ! (1=surface, n=node, 1/2=T/S) + enddo + + ! Interpolate SST + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & myDim_nod2D, zsend, & + & nopoints, pgsst ) + + + ! =================================================================== ! + ! Pack ice fraction data [0..1] and interpolate: 'pgifr' on Gauss. + ! zsend(:)=a_ice(:) + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & myDim_nod2D, a_ice, & + & nopoints, pgifr ) + + + ! =================================================================== ! + ! Pack ice temperature data (already in K) + zsend(:)=273.15 + + ! Interpolate ice surface temperature: 'pgist' on Gaussian grid. + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & myDim_nod2D, zsend, & + & nopoints, pgist ) + + + ! =================================================================== ! + ! Pack ice albedo data and interpolate: 'pgalb' on Gaussian grid. + zsend(:)=0.7 + + ! Interpolate ice albedo + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & myDim_nod2D, zsend, & + & nopoints, pgalb ) + + + ! =================================================================== ! + ! Pack ice thickness data and interpolate: 'pghic' on Gaussian grid. + zsend(:)=m_ice(:)/max(a_ice(:),0.01) ! ice thickness (mean over ice) + + ! Interpolation of average ice thickness + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & myDim_nod2D, zsend, & + & nopoints, pghic ) + + + ! =================================================================== ! + ! Pack snow thickness data and interpolate: 'pghsn' on Gaussian grid. + zsend(:)=m_snow(:)/max(a_ice(:),0.01) ! snow thickness (mean over ice) + + ! Interpolation of snow thickness + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & myDim_nod2D, zsend, & + & nopoints, pghsn ) + + + ! =================================================================== ! + ! Surface currents need to be rotated to geographical grid + + ! Pack u(v) surface currents + zsendU(:)=UV(1,1,1:myDim_elem2D) + zsendV(:)=UV(2,1,1:myDim_elem2D) !UV includes eDim, leave those away here + + do elem=1, myDim_elem2D + + ! compute element midpoints + elnodes=elem2D_nodes(:,elem) + rlon=sum(coord_nod2D(1,elnodes))/3.0_wpIFS + rlat=sum(coord_nod2D(2,elnodes))/3.0_wpIFS + + ! Rotate vectors to geographical coordinates (r2g) + call vector_r2g(zsendU(elem), zsendV(elem), rlon, rlat, 0) ! 0-flag for rot. coord + + end do + +#ifdef FESOM_TODO + + ! We need to sort out the non-unique global index before we + ! can couple currents + + ! Interpolate: 'pgucur' and 'pgvcur' on Gaussian grid. + CALL parinter_fld( mype, npes, icomm, UVtogauss, & + & myDim_elem2D, zsendU, & + & nopoints, pgucur ) + + CALL parinter_fld( mype, npes, icomm, UVtogauss, & + & myDim_elem2D, zsendV, & + & nopoints, pgvcur ) + +#else + + pgucur(:) = 0.0 + pgvcur(:) = 0.0 + +#endif + +#ifndef FESOM_TODO + + if(mype==0) then + WRITE(0,*)'Everything implemented except ice level temperatures (licelvls).' + endif + +#else + + ! Ice level temperatures + + IF (licelvls) THEN + +#if defined key_lim2 + + DO jl = 1, 3 + + ! Pack ice temperatures data at level jl(already in K) + + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = tbif (ji,jj,jl) + ENDDO + ENDDO + + ! Interpolate ice temperature at level jl + + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & + & nopoints, pgistl(:,jl) ) + + ENDDO + +#else + WRITE(0,*)'licelvls needs to be sorted for LIM3' + CALL abort +#endif + + ENDIF + + IF(nn_timing == 1) CALL timing_stop('nemogcmcoup_lim2_get') + IF(lhook) CALL dr_hook('nemogcmcoup_lim2_get',1,zhook_handle) + +#endif + +END SUBROUTINE nemogcmcoup_lim2_get + + +SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & + & npoints, & + & taux_oce, tauy_oce, taux_ice, tauy_ice, & + & qs___oce, qs___ice, qns__oce, qns__ice, dqdt_ice, & + & evap_tot, evap_ice, prcp_liq, prcp_sol, & + & runoffIN, ocerunoff, tcc, lcc, tice_atm, & + & kt, ldebug, loceicemix, lqnsicefilt ) + + ! Update fluxes in nemogcmcoup_data by parallel + ! interpolation of the input gaussian grid data + + USE par_kind !in ifs_modules.F90 + USE g_PARSUP, only: myDim_nod2D, myDim_elem2D, par_ex, eDim_nod2D, eDim_elem2D, eXDim_elem2D, myDim_edge2D, eDim_edge2D + !USE o_MESH, only: coord_nod2D !elem2D_nodes + USE MOD_MESH + USE g_init2timestepping, only: meshinmod + !USE o_PARAM, ONLY : WP, use wpIFS from par_kind (IFS) + USE g_rotate_grid, only: vector_r2g, vector_g2r + USE g_forcing_arrays, only: shortwave, prec_rain, prec_snow, runoff, & + & evap_no_ifrac, sublimation !'longwave' only stand-alone, 'evaporation' filled later + USE i_ARRAYS, only: stress_atmice_x, stress_atmice_y, oce_heat_flux, ice_heat_flux + USE o_ARRAYS, only: stress_atmoce_x, stress_atmoce_y + USE g_comm_auto ! exchange_nod does the halo exchange + + ! all needed? + USE parinter + USE scripremap + USE interinfo + + IMPLICIT NONE + + ! =================================================================== ! + ! Arguments ========================================================= ! + + ! MPI communications + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Fluxes on the Gaussian grid. + INTEGER, INTENT(IN) :: npoints + REAL(wpIFS), DIMENSION(npoints), INTENT(IN) :: & + & taux_oce, tauy_oce, taux_ice, tauy_ice, & + & qs___oce, qs___ice, qns__oce, qns__ice, & + & dqdt_ice, evap_tot, evap_ice, prcp_liq, prcp_sol, & + & runoffIN, ocerunoff, tcc, lcc, tice_atm + + ! Current time step + INTEGER, INTENT(in) :: kt + ! Write debugging fields in netCDF + LOGICAL, INTENT(IN) :: ldebug + ! QS/QNS mixed switch + LOGICAL, INTENT(IN) :: loceicemix + ! QNS ice filter switch (requires tice_atm to be sent) + LOGICAL, INTENT(IN) :: lqnsicefilt + + type(t_mesh), target :: mesh + + ! Local variables + INTEGER :: n + REAL(wpIFS), parameter :: rhofwt = 1000. ! density of freshwater + + + ! Packed receive buffer + REAL(wpIFS), DIMENSION(myDim_nod2D) :: zrecv + REAL(wpIFS), DIMENSION(myDim_elem2D):: zrecvU, zrecvV + + + !#include "associate_mesh.h" + ! associate only the necessary things + real(kind=WP), dimension(:,:), pointer :: coord_nod2D + mesh = meshinmod + coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%coord_nod2D + + ! =================================================================== ! + ! Sort out incoming arrays from the IFS and put them on the ocean grid + + ! TODO + shortwave(:)=0. ! Done, updated below. What to do with shortwave over ice?? + !longwave(:)=0. ! Done. Only used in stand-alone mode. + prec_rain(:)=0. ! Done, updated below. + prec_snow(:)=0. ! Done, updated below. + evap_no_ifrac=0. ! Done, updated below. This is evap over ocean, does this correspond to evap_tot? + sublimation=0. ! Done, updated below. + ! + ice_heat_flux=0. ! Done. This is qns__ice currently. Is this the non-solar heat flux? ! non solar heat fluxes below ! (qns) + oce_heat_flux=0. ! Done. This is qns__oce currently. Is this the non-solar heat flux? + ! + runoff(:)=0. ! not used apparently. What is runoffIN, ocerunoff? + !evaporation(:)=0. + !ice_thermo_cpl.F90: !---- total evaporation (needed in oce_salt_balance.F90) + !ice_thermo_cpl.F90: evaporation = evap_no_ifrac*(1.-a_ice) + sublimation*a_ice + stress_atmice_x=0. ! Done, taux_ice + stress_atmice_y=0. ! Done, tauy_ice + stress_atmoce_x=0. ! Done, taux_oce + stress_atmoce_y=0. ! Done, tauy_oce + + + ! =================================================================== ! + !1. Interpolate ocean solar radiation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qs___oce, & + & myDim_nod2D, zrecv ) + + ! Unpack ocean solar radiation, without halo + shortwave(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + + ! Do the halo exchange + call exchange_nod(shortwave) + + + ! =================================================================== ! + !2. Interpolate ice solar radiation to T grid + ! DO NOTHING + + + ! =================================================================== ! + !3. Interpolate ocean non-solar radiation to T grid (is this non-solar heat flux?) + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qns__oce, & + & myDim_nod2D, zrecv ) + + ! Unpack ocean non-solar, without halo + oce_heat_flux(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + + ! Do the halo exchange + call exchange_nod(oce_heat_flux) + + + ! =================================================================== ! + !4. Interpolate non-solar radiation over ice to T grid (is this non-solar heat flux?) + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qns__ice, & + & myDim_nod2D, zrecv ) + + ! Unpack ice non-solar + ice_heat_flux(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + + ! Do the halo exchange + call exchange_nod(ice_heat_flux) + + + ! =================================================================== ! + !5. D(q)/dT to T grid + ! DO NOTHING + + + ! =================================================================== ! + !6. Interpolate total evaporation to T grid + ! =================================================================== ! + !ice_thermo_cpl.F90: total evaporation (needed in oce_salt_balance.F90) + !ice_thermo_cpl.F90: evaporation = evap_no_ifrac*(1.-a_ice) + sublimation*a_ice + ! =================================================================== ! + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, evap_tot, & + & myDim_nod2D, zrecv ) + + ! Unpack total evaporation, without halo + evap_no_ifrac(1:myDim_nod2D)=-zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s; change sign + + ! Do the halo exchange + call exchange_nod(evap_no_ifrac) + + !7. Interpolate sublimation (evaporation over ice) to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, evap_ice, & + & myDim_nod2D, zrecv ) + + ! Unpack sublimation (evaporation over ice), without halo + sublimation(1:myDim_nod2D)=-zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s; change sign + + ! Do the halo exchange + call exchange_nod(sublimation) + ! =================================================================== ! + ! =================================================================== ! + + + ! =================================================================== ! + !8. Interpolate liquid precipitation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, prcp_liq, & + & myDim_nod2D, zrecv ) + + ! Unpack liquid precipitation, without halo + prec_rain(1:myDim_nod2D)=zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s + + ! Do the halo exchange + call exchange_nod(prec_rain) + + + ! =================================================================== ! + !9. Interpolate solid precipitation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, prcp_sol, & + & myDim_nod2D, zrecv ) + + ! Unpack solid precipitation, without halo + prec_snow(1:myDim_nod2D)=zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s + + ! Do the halo exchange + call exchange_nod(prec_snow) + + + ! =================================================================== ! + !10. Interpolate runoff to T grid + ! + !CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, runoff, & + ! & myDim_nod2D, zrecv ) + ! + ! Unpack runoff, without halo + !runoff(1:myDim_nod2D)=zrecv(1:myDim_nod2D) !conversion?? + ! + ! Do the halo exchange + !call exchange_nod(runoff) + ! + !11. Interpolate ocean runoff to T grid + ! + !CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, ocerunoff, & + ! & myDim_nod2D, zrecv ) + ! + ! Unpack ocean runoff + ! ?? + + !12. Interpolate total cloud fractions to T grid (tcc) + ! + !13. Interpolate low cloud fractions to T grid (lcc) + + + ! =================================================================== ! + ! STRESSES + + ! OVER OCEAN: + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, taux_oce, & + & myDim_nod2D, zrecv ) + + ! Unpack x stress atm->oce, without halo; then do halo exchange + stress_atmoce_x(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + call exchange_nod(stress_atmoce_x) + + ! + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tauy_oce, & + & myDim_nod2D, zrecv ) + + ! Unpack y stress atm->oce, without halo; then do halo exchange + stress_atmoce_y(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + call exchange_nod(stress_atmoce_y) + + ! =================================================================== ! + ! OVER ICE: + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, taux_ice, & + & myDim_nod2D, zrecv ) + + ! Unpack x stress atm->ice, without halo; then do halo exchange + stress_atmice_x(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + call exchange_nod(stress_atmice_x) + + ! + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tauy_ice, & + & myDim_nod2D, zrecv ) + + ! Unpack y stress atm->ice, without halo; then do halo exchange + stress_atmice_y(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + call exchange_nod(stress_atmice_y) + + + ! =================================================================== ! + ! ROTATE VECTORS FROM GEOGRAPHIC TO FESOMS ROTATED GRID + + !if ((do_rotate_oce_wind .AND. do_rotate_ice_wind) .AND. rotated_grid) then + do n=1, myDim_nod2D+eDim_nod2D + call vector_g2r(stress_atmoce_x(n), stress_atmoce_y(n), coord_nod2D(1, n), coord_nod2D(2, n), 0) !0-flag for rot. coord. + call vector_g2r(stress_atmice_x(n), stress_atmice_y(n), coord_nod2D(1, n), coord_nod2D(2, n), 0) + end do + !do_rotate_oce_wind=.false. + !do_rotate_ice_wind=.false. + !end if + + +#ifdef FESOM_TODO + + ! Packed receive buffer + REAL(wpIFS), DIMENSION((nlei-nldi+1)*(nlej-nldj+1)) :: zrecv + ! Unpacked fields on ORCA grids + REAL(wpIFS), DIMENSION(jpi,jpj) :: zqs___oce, zqs___ice, zqns__oce, zqns__ice + REAL(wpIFS), DIMENSION(jpi,jpj) :: zdqdt_ice, zevap_tot, zevap_ice, zprcp_liq, zprcp_sol + REAL(wpIFS), DIMENSION(jpi,jpj) :: zrunoff, zocerunoff + REAL(wpIFS), DIMENSION(jpi,jpj) :: ztmp, zicefr + ! Arrays for rotation + REAL(wpIFS), DIMENSION(jpi,jpj) :: zuu,zvu,zuv,zvv,zutau,zvtau + ! Lead fraction for both LIM2/LIM3 + REAL(wpIFS), DIMENSION(jpi,jpj) :: zfrld + ! Mask for masking for I grid + REAL(wpIFS) :: zmsksum + ! For summing up LIM3 contributions to ice temperature + REAL(wpIFS) :: zval,zweig + + ! Loop variables + INTEGER :: ji,jj,jk,jl + ! netCDF debugging output variables + CHARACTER(len=128) :: cdoutfile + INTEGER :: inum + REAL(wpIFS) :: zhook_handle ! Dr Hook handle + + IF(lhook) CALL dr_hook('nemogcmcoup_lim2_update',0,zhook_handle) + IF(nn_timing == 1) CALL timing_start('nemogcmcoup_lim2_update') + + ! Allocate the storage data + + IF (.NOT.lallociceflx) THEN + ALLOCATE( & + & zsqns_tot(jpi,jpj), & + & zsqns_ice(jpi,jpj), & + & zsqsr_tot(jpi,jpj), & + & zsqsr_ice(jpi,jpj), & + & zsemp_tot(jpi,jpj), & + & zsemp_ice(jpi,jpj), & + & zsevap_ice(jpi,jpj), & + & zsdqdns_ice(jpi,jpj), & + & zssprecip(jpi,jpj), & + & zstprecip(jpi,jpj), & + & zstcc(jpi,jpj), & + & zslcc(jpi,jpj), & + & zsatmist(jpi,jpj), & + & zsqns_ice_add(jpi,jpj)& + & ) + lallociceflx = .TRUE. + ENDIF + IF (.NOT.lallocstress) THEN + ALLOCATE( & + & zsutau(jpi,jpj), & + & zsvtau(jpi,jpj), & + & zsutau_ice(jpi,jpj), & + & zsvtau_ice(jpi,jpj) & + & ) + lallocstress = .TRUE. + ENDIF + + ! Sort out incoming arrays from the IFS and put them on the ocean grid + + !1. Interpolate ocean solar radiation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qs___oce, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ocean solar radiation + + zqs___oce(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zqs___oce(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !2. Interpolate ice solar radiation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qs___ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ice solar radiation + + zqs___ice(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zqs___ice(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !3. Interpolate ocean non-solar radiation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qns__oce, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ocean non-solar radiation + + zqns__oce(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zqns__oce(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !4. Interpolate ice non-solar radiation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qns__ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ice non-solar radiation + + zqns__ice(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zqns__ice(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !5. Interpolate D(q)/dT to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, dqdt_ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack D(q)/D(T) + + zdqdt_ice(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zdqdt_ice(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !6. Interpolate total evaporation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, evap_tot, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack total evaporation + + zevap_tot(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zevap_tot(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !7. Interpolate evaporation over ice to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, evap_ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack evaporation over ice + + zevap_ice(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zevap_ice(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !8. Interpolate liquid precipitation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, prcp_liq, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack liquid precipitation + + zprcp_liq(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zprcp_liq(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !9. Interpolate solid precipitation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, prcp_sol, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack precipitation over ice + + zprcp_sol(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zprcp_sol(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !10. Interpolate runoff to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, runoff, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack runoff + + zrunoff(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zrunoff(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !11. Interpolate ocean runoff to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, ocerunoff, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ocean runoff + + zocerunoff(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zocerunoff(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !12. Interpolate total cloud fractions to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tcc, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ocean runoff + + zstcc(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zstcc(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !13. Interpolate low cloud fractions to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, lcc, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ocean runoff + + zslcc(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zslcc(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! get sea ice fraction and lead fraction + +#if defined key_lim2 + zfrld(:,:) = frld(:,:) + zicefr(:,:) = 1 - zfrld(:,:) +#else + zicefr(:,:) = 0.0_wpIFS + DO jl = 1, jpl + zicefr(:,:) = zicefr(:,:) + a_i(:,:,jl) + ENDDO + zfrld(:,:) = 1 - zicefr(:,:) +#endif + + zsemp_tot(:,:) = zevap_tot(:,:) - zprcp_liq(:,:) - zprcp_sol(:,:) + zstprecip(:,:) = zprcp_liq(:,:) + zprcp_sol(:,:) + ! More consistent with NEMO, but does changes the results, so + ! we don't do it for now. + ! zsemp_tot(:,:) = zevap_tot(:,:) - zstprecip(:,:) + zsemp_ice(:,:) = zevap_ice(:,:) - zprcp_sol(:,:) + zssprecip(:,:) = - zsemp_ice(:,:) + zsemp_tot(:,:) = zsemp_tot(:,:) - zrunoff(:,:) + zsemp_tot(:,:) = zsemp_tot(:,:) - zocerunoff(:,:) + zsevap_ice(:,:) = zevap_ice(:,:) + + ! non solar heat fluxes ! (qns) + IF (loceicemix) THEN + zsqns_tot(:,:) = zqns__oce(:,:) + ELSE + zsqns_tot(:,:) = zfrld(:,:) * zqns__oce(:,:) + zicefr(:,:) * zqns__ice(:,:) + ENDIF + zsqns_ice(:,:) = zqns__ice(:,:) + ztmp(:,:) = zfrld(:,:) * zprcp_sol(:,:) * lfus ! add the latent heat of solid precip. melting + + zsqns_tot(:,:) = zsqns_tot(:,:) - ztmp(:,:) ! over free ocean + ! solar heat fluxes ! (qsr) + + IF (loceicemix) THEN + zsqsr_tot(:,:) = zqs___oce(:,:) + ELSE + zsqsr_tot(:,:) = zfrld(:,:) * zqs___oce(:,:) + zicefr(:,:) * zqs___ice(:,:) + ENDIF + zsqsr_ice(:,:) = zqs___ice(:,:) + + IF( ln_dm2dc ) THEN ! modify qsr to include the diurnal cycle + zsqsr_tot(:,:) = sbc_dcy( zsqsr_tot(:,:) ) + zsqsr_ice(:,:) = sbc_dcy( zsqsr_ice(:,:) ) + ENDIF + + zsdqdns_ice(:,:) = zdqdt_ice(:,:) + + ! Apply lateral boundary condition + + CALL lbc_lnk(zsqns_tot, 'T', 1.0) + CALL lbc_lnk(zsqns_ice, 'T', 1.0) + CALL lbc_lnk(zsqsr_tot, 'T', 1.0) + CALL lbc_lnk(zsqsr_ice, 'T', 1.0) + CALL lbc_lnk(zsemp_tot, 'T', 1.0) + CALL lbc_lnk(zsemp_ice, 'T', 1.0) + CALL lbc_lnk(zsdqdns_ice, 'T', 1.0) + CALL lbc_lnk(zssprecip, 'T', 1.0) + CALL lbc_lnk(zstprecip, 'T', 1.0) + CALL lbc_lnk(zstcc, 'T', 1.0) + CALL lbc_lnk(zslcc, 'T', 1.0) + + ! Interpolate atmospheric ice temperature to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tice_atm, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack atmospheric ice temperature + + zsatmist(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zsatmist(ji,jj) = zrecv(jk) + ENDDO + ENDDO + CALL lbc_lnk(zsatmist, 'T', 1.0) + + zsqns_ice_add(:,:) = 0.0_wpIFS + + ! Use the dqns_ice filter + + IF (lqnsicefilt) THEN + + ! Add filtr to qns_ice + +#if defined key_lim2 + ztmp(:,:) = tn_ice(:,:,1) +#else + DO jj = nldj, nlej + DO ji = nldi, nlei + zval=0.0 + zweig=0.0 + DO jl = 1, jpl + zval = zval + tn_ice(ji,jj,jl) * a_i(ji,jj,jl) + zweig = zweig + a_i(ji,jj,jl) + ENDDO + IF ( zweig > 0.0 ) THEN + ztmp(ji,jj) = zval /zweig + ELSE + ztmp(ji,jj) = rt0 + ENDIF + ENDDO + ENDDO + CALL lbc_lnk(ztmp, 'T', 1.0) +#endif + + WHERE ( zicefr(:,:) > .001_wpIFS ) + zsqns_ice_add(:,:) = zsdqdns_ice(:,:) * ( ztmp(:,:) - zsatmist(:,:) ) + END WHERE + + zsqns_ice(:,:) = zsqns_ice(:,:) + zsqns_ice_add(:,:) + + ENDIF + + ! Interpolate u-stress to U grid + + CALL parinter_fld( mype, npes, icomm, gausstoU, npoints,taux_oce, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack u stress on U grid + + zuu(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zuu(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Interpolate v-stress to U grid + + CALL parinter_fld( mype, npes, icomm, gausstoU, npoints, tauy_oce, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack v stress on U grid + + zvu(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zvu(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Interpolate u-stress to V grid + + CALL parinter_fld( mype, npes, icomm, gausstoV, npoints,taux_oce, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack u stress on V grid + + zuv(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zuv(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Interpolate v-stress to V grid + + CALL parinter_fld( mype, npes, icomm, gausstoV, npoints, tauy_oce, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack v stress on V grid + + zvv(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zvv(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Rotate stresses from en to ij and put u,v stresses on U,V grids + + CALL repcmo( zuu, zvu, zuv, zvv, zsutau, zsvtau ) + + ! Apply lateral boundary condition on u,v stresses on the U,V grids + + CALL lbc_lnk( zsutau, 'U', -1.0 ) + CALL lbc_lnk( zsvtau, 'V', -1.0 ) + + ! Interpolate ice u-stress to U grid + + CALL parinter_fld( mype, npes, icomm, gausstoU, npoints,taux_ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ice u stress on U grid + + zuu(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zuu(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Interpolate ice v-stress to U grid + + CALL parinter_fld( mype, npes, icomm, gausstoU, npoints, tauy_ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ice v stress on U grid + + zvu(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zvu(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Interpolate ice u-stress to V grid + + CALL parinter_fld( mype, npes, icomm, gausstoV, npoints,taux_ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ice u stress on V grid + + zuv(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zuv(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Interpolate ice v-stress to V grid + + CALL parinter_fld( mype, npes, icomm, gausstoV, npoints, tauy_ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ice v stress on V grid + + zvv(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zvv(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Rotate stresses from en to ij and put u,v stresses on U,V grids + + CALL repcmo( zuu, zvu, zuv, zvv, zutau, zvtau ) + + ! Apply lateral boundary condition on u,v stresses on the U,V grids + + CALL lbc_lnk( zutau, 'U', -1.0 ) + CALL lbc_lnk( zvtau, 'V', -1.0 ) + +#if defined key_lim2_vp + + ! Convert to I grid for LIM2 for key_lim_vp + DO jj = 2, jpjm1 ! (U,V) ==> I + DO ji = 2, jpim1 ! NO vector opt. + zmsksum = umask(ji-1,jj,1) + umask(ji-1,jj-1,1) + zsutau_ice(ji,jj) = ( umask(ji-1,jj,1) * zutau(ji-1,jj) + & + & umask(ji-1,jj-1,1) * zutau(ji-1,jj-1) ) + IF ( zmsksum > 0.0 ) THEN + zsutau_ice(ji,jj) = zsutau_ice(ji,jj) / zmsksum + ENDIF + zmsksum = vmask(ji,jj-1,1) + vmask(ji-1,jj-1,1) + zsvtau_ice(ji,jj) = ( vmask(ji,jj-1,1) * zvtau(ji,jj-1) + & + & vmask(ji-1,jj-1,1) * zvtau(ji-1,jj-1) ) + IF ( zmsksum > 0.0 ) THEN + zsvtau_ice(ji,jj) = zsvtau_ice(ji,jj) / zmsksum + ENDIF + END DO + END DO + +#else + + zsutau_ice(:,:) = zutau(:,:) + zsvtau_ice(:,:) = zvtau(:,:) + +#endif + + CALL lbc_lnk( zsutau_ice, 'I', -1.0 ) + CALL lbc_lnk( zsvtau_ice, 'I', -1.0 ) + + ! Optionally write files write the data on the ORCA grid via IOM. + + IF (ldebug) THEN + WRITE(cdoutfile,'(A,I8.8)') 'zsutau_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsutau' , zsutau ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsvtau_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsvtau' , zsvtau ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsutau_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsutau_ice' , zsutau_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsvtau_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsvtau_ice' , zsvtau_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsqns_tot_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsqns_tot' , zsqns_tot ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsqns_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsqns_ice' , zsqns_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsqsr_tot_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsqsr_tot' , zsqsr_tot ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsqsr_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsqsr_ice' , zsqsr_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsemp_tot_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsemp_tot' , zsemp_tot ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsemp_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsemp_ice' , zsemp_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsdqdns_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsdqdns_ice' , zsdqdns_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zssprecip_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zssprecip' , zssprecip ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zstprecip_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zstprecip' , zstprecip ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsevap_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsevap_ice' , zsevap_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zstcc_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zstcc' , zstcc ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zslcc_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zslcc' , zslcc ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsatmist_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsatmist' , zsatmist ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsqns_ice_add_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsqns_ice_add' , zsqns_ice_add ) + CALL iom_close( inum ) + ENDIF + + IF(nn_timing == 1) CALL timing_stop('nemogcmcoup_lim2_update') + IF(lhook) CALL dr_hook('nemogcmcoup_lim2_update',1,zhook_handle) + +#else + + !FESOM part + !WRITE(0,*)'nemogcmcoup_lim2_update partially implemented. Proceeding...' + !CALL par_ex + +#endif + +END SUBROUTINE nemogcmcoup_lim2_update + + +SUBROUTINE nemogcmcoup_step( istp, icdate, ictime ) + + USE g_clock, only: yearnew, month, day_in_month + USE g_PARSUP, only: mype + USE nemogcmcoup_steps, ONLY : substeps + IMPLICIT NONE + + ! Arguments + + ! Time step + INTEGER, INTENT(IN) :: istp + + ! Data and time from NEMO + INTEGER, INTENT(OUT) :: icdate, ictime + + if(mype==0) then + WRITE(0,*)'! IFS at timestep ', istp, '. Do ', substeps , 'FESOM timesteps...' + endif + CALL main_timestepping(substeps) + + ! Compute date and time at the end of the time step + + icdate = yearnew*10000 + month*100 + day_in_month ! e.g. 20170906 + ictime = 0 ! (time is not used) + + if(mype==0) then + WRITE(0,*)'! FESOM date at end of timestep is ', icdate ,' ======' + endif + +#ifdef FESOM_TODO + iye = ndastp / 10000 + imo = ndastp / 100 - iye * 100 + ida = MOD( ndastp, 100 ) + CALL greg2jul( 0, 0, 0, ida, imo, iye, zjul ) + zjul = zjul + ( nsec_day + 0.5_wpIFS * rdttra(1) ) / 86400.0_wpIFS + CALL jul2greg( iss, imm, ihh, ida, imo, iye, zjul ) + icdate = iye * 10000 + imo * 100 + ida + ictime = ihh * 10000 + imm * 100 + iss +#endif + +END SUBROUTINE nemogcmcoup_step + + +SUBROUTINE nemogcmcoup_final + + USE g_PARSUP, only: mype + + ! Finalize the FESOM model + + IMPLICIT NONE + + if(mype==0) then + WRITE(*,*)'Finalization of FESOM from IFS.' + endif + CALL main_finalize + +END SUBROUTINE nemogcmcoup_final +#endif diff --git a/src/ifs_modules.F90 b/src/ifs_modules.F90 new file mode 100644 index 000000000..8f52ee153 --- /dev/null +++ b/src/ifs_modules.F90 @@ -0,0 +1,1859 @@ +#if defined (__ifsinterface) +#define __MYFILE__ 'ifs_modules.F90' +#define key_mpp_mpi +! Set of modules needed by the interface to IFS. +! +! -Original code by Kristian Mogensen, ECMWF. + +MODULE par_kind + IMPLICIT NONE + INTEGER, PUBLIC, PARAMETER :: & !: Floating point section + sp = SELECTED_REAL_KIND( 6, 37), & !: single precision (real 4) + dp = SELECTED_REAL_KIND(12,307), & !: double precision (real 8) + wpIFS = SELECTED_REAL_KIND(12,307), & !: double precision (real 8) + ik = SELECTED_INT_KIND(6) !: integer precision +END MODULE par_kind + +MODULE nctools + + ! Utility subroutines for netCDF access + ! Modified : MAB (nf90, handle_error, LINE&FILE) + ! Modifled : KSM (new shorter name) + + USE netcdf + + PUBLIC ldebug_netcdf, nchdlerr + LOGICAL :: ldebug_netcdf = .FALSE. ! Debug switch for netcdf + +CONTAINS + + SUBROUTINE nchdlerr(status,lineno,filename) + + ! Error handler for netCDF access + IMPLICIT NONE + + + INTEGER :: status ! netCDF return status + INTEGER :: lineno ! Line number (usually obtained from + ! preprocessing __LINE__,__MYFILE__) + CHARACTER(len=*),OPTIONAL :: filename + + IF (status/=nf90_noerr) THEN + WRITE(*,*)'Netcdf error, code ',status + IF (PRESENT(filename)) THEN + WRITE(*,*)'In file ',filename,' in line ',lineno + ELSE + WRITE(*,*)'In line ',lineno + END IF + WRITE(*,'(2A)')' Error message : ',nf90_strerror(status) + CALL abort + ENDIF + + END SUBROUTINE nchdlerr + +!---------------------------------------------------------------------- +END MODULE nctools + +MODULE scrippar + INTEGER, PARAMETER :: scripdp = SELECTED_REAL_KIND(12,307) + INTEGER, PARAMETER :: scriplen = 80 +END MODULE scrippar + +MODULE scripgrid + + USE nctools + USE scrippar + + IMPLICIT NONE + + TYPE scripgridtype + INTEGER :: grid_size + INTEGER :: grid_corners + INTEGER :: grid_rank + INTEGER, ALLOCATABLE, DIMENSION(:) :: grid_dims + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: grid_center_lat + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: grid_center_lon + INTEGER, ALLOCATABLE, DIMENSION(:) :: grid_imask + REAL(scripdp), ALLOCATABLE, DIMENSION(:,:) :: grid_corner_lat + REAL(scripdp), ALLOCATABLE, DIMENSION(:,:) :: grid_corner_lon + CHARACTER(len=scriplen) :: grid_center_lat_units + CHARACTER(len=scriplen) :: grid_center_lon_units + CHARACTER(len=scriplen) :: grid_imask_units + CHARACTER(len=scriplen) :: grid_corner_lat_units + CHARACTER(len=scriplen) :: grid_corner_lon_units + CHARACTER(len=scriplen) :: title + END TYPE scripgridtype + +CONTAINS + + SUBROUTINE scripgrid_read( cdfilename, grid ) + + CHARACTER(len=*) :: cdfilename + TYPE(scripgridtype) :: grid + + INTEGER :: ncid, dimid, varid + + CALL scripgrid_init(grid) + + CALL nchdlerr(nf90_open(TRIM(cdfilename),nf90_nowrite,ncid),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'grid_size',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=grid%grid_size),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'grid_corners',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=grid%grid_corners),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'grid_rank',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=grid%grid_rank),& + & __LINE__,__MYFILE__) + + CALL scripgrid_alloc(grid) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_dims',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_center_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_center_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_center_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_center_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_center_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_corner_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_corner_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_corner_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_corner_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_imask',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_imask_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_imask),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'title',grid%title),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_close(ncid),__LINE__,__MYFILE__) + + END SUBROUTINE scripgrid_read + + SUBROUTINE scripgrid_write( cdgridfile, grid ) + + CHARACTER(len=*) :: cdgridfile + TYPE(scripgridtype) :: grid + + INTEGER :: ncid + INTEGER :: ioldfill + INTEGER :: idimsize,idimxsize,idimysize,idimcorners,idimrank + INTEGER :: idims1rank(1),idims1size(1),idims2(2) + INTEGER :: iddims,idcentlat,idcentlon,idimask,idcornlat,idcornlon + INTEGER :: igriddims(2) + + ! Setup netcdf file + + CALL nchdlerr(nf90_create(TRIM(cdgridfile),nf90_clobber,ncid),& + & __LINE__,__MYFILE__) + + ! Define dimensions + + CALL nchdlerr(nf90_def_dim(ncid,'grid_size',& + & grid%grid_size,idimsize),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_def_dim(ncid,'grid_corners',& + & grid%grid_corners,idimcorners),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_def_dim(ncid,'grid_rank',& + & grid%grid_rank,idimrank),& + & __LINE__,__MYFILE__) + + idims1rank(1) = idimrank + + idims1size(1) = idimsize + + idims2(1) = idimcorners + idims2(2) = idimsize + + ! Define variables + + CALL nchdlerr(nf90_def_var(ncid,'grid_dims',& + & nf90_int,idims1rank,iddims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_var(ncid,'grid_center_lat',& + & nf90_double,idims1size,idcentlat),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idcentlat,'units',& + & grid%grid_center_lat_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_var(ncid,'grid_center_lon',& + & nf90_double,idims1size,idcentlon),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idcentlon,'units',& + & grid%grid_center_lon_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_var(ncid,'grid_imask',& + & nf90_int,idims1size,idimask),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idimask,'units',& + & grid%grid_imask_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_var(ncid,'grid_corner_lat',& + & nf90_double,idims2,idcornlat),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idcornlat,'units',& + & grid%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_var(ncid,'grid_corner_lon',& + & nf90_double,idims2,idcornlon),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idcornlon,'units',& + & grid%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'title',& + & TRIM(grid%title)),& + & __LINE__,__MYFILE__) + + ! End of netCDF definition phase + + CALL nchdlerr(nf90_enddef(ncid),__LINE__,__MYFILE__) + + ! Write variables + + + CALL nchdlerr(nf90_put_var(ncid,iddims,grid%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idcentlat,& + & grid%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idcentlon,& + & grid%grid_center_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idimask,& + & grid%grid_imask), & + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idcornlat,& + & grid%grid_corner_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idcornlon,& + & grid%grid_corner_lon),& + & __LINE__,__MYFILE__) + + ! Close file + + CALL nchdlerr(nf90_close(ncid),__LINE__,__MYFILE__) + + END SUBROUTINE scripgrid_write + + SUBROUTINE scripgrid_init( grid ) + + TYPE(scripgridtype) :: grid + + grid%grid_size=0 + grid%grid_corners=0 + grid%grid_rank=0 + grid%grid_center_lat_units='' + grid%grid_center_lon_units='' + grid%grid_imask_units='' + grid%grid_corner_lat_units='' + grid%grid_corner_lon_units='' + grid%title='' + + END SUBROUTINE scripgrid_init + + SUBROUTINE scripgrid_alloc( grid ) + + TYPE(scripgridtype) :: grid + + IF ( (grid%grid_size == 0) .OR. & + & (grid%grid_corners == 0) .OR. & + & (grid%grid_rank == 0) ) THEN + WRITE(*,*)'scripgridtype not initialized' + CALL abort + ENDIF + + ALLOCATE( & + & grid%grid_dims(grid%grid_rank), & + & grid%grid_center_lat(grid%grid_size), & + & grid%grid_center_lon(grid%grid_size), & + & grid%grid_corner_lat(grid%grid_corners, grid%grid_size), & + & grid%grid_corner_lon(grid%grid_corners, grid%grid_size), & + & grid%grid_imask(grid%grid_size) & + & ) + + END SUBROUTINE scripgrid_alloc + + SUBROUTINE scripgrid_dealloc( grid ) + + TYPE(scripgridtype) :: grid + + DEALLOCATE( & + & grid%grid_dims, & + & grid%grid_center_lat, & + & grid%grid_center_lon, & + & grid%grid_corner_lat, & + & grid%grid_corner_lon, & + & grid%grid_imask & + & ) + + END SUBROUTINE scripgrid_dealloc + +END MODULE scripgrid + +MODULE scripremap + +#if defined key_mpp_mpi + USE mpi +#endif + USE nctools + USE scrippar + USE scripgrid + + IMPLICIT NONE + + TYPE scripremaptype + INTEGER :: num_links + INTEGER :: num_wgts + TYPE(scripgridtype) :: src + TYPE(scripgridtype) :: dst + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: src_grid_area + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: dst_grid_area + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: src_grid_frac + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: dst_grid_frac + INTEGER, ALLOCATABLE, DIMENSION(:) :: src_address + INTEGER, ALLOCATABLE, DIMENSION(:) :: dst_address + REAL(scripdp), ALLOCATABLE, DIMENSION(:,:) :: remap_matrix + CHARACTER(len=scriplen) :: src_grid_area_units + CHARACTER(len=scriplen) :: dst_grid_area_units + CHARACTER(len=scriplen) :: src_grid_frac_units + CHARACTER(len=scriplen) :: dst_grid_frac_units + CHARACTER(len=scriplen) :: title + CHARACTER(len=scriplen) :: normalization + CHARACTER(len=scriplen) :: map_method + CHARACTER(len=scriplen) :: history + CHARACTER(len=scriplen) :: conventions + END TYPE scripremaptype + +CONTAINS + + SUBROUTINE scripremap_read_work(cdfilename,remap) + + CHARACTER(len=*) :: cdfilename + TYPE(scripremaptype) :: remap + + INTEGER :: ncid, dimid, varid + LOGICAL :: lcorners + + lcorners=.TRUE. + + CALL scripremap_init(remap) + + CALL nchdlerr(nf90_open(TRIM(cdfilename),nf90_nowrite,ncid),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'src_grid_size',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%src%grid_size),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'dst_grid_size',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%dst%grid_size),& + & __LINE__,__MYFILE__) + + + IF (nf90_inq_dimid(ncid,'src_grid_corners',dimid)==nf90_noerr) THEN + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%src%grid_corners),& + & __LINE__,__MYFILE__) + ELSE + lcorners=.FALSE. + remap%src%grid_corners=1 + ENDIF + + IF (lcorners) THEN + CALL nchdlerr(nf90_inq_dimid(ncid,'dst_grid_corners',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%dst%grid_corners),& + & __LINE__,__MYFILE__) + ELSE + remap%dst%grid_corners=1 + ENDIF + + CALL nchdlerr(nf90_inq_dimid(ncid,'src_grid_rank',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%src%grid_rank),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'dst_grid_rank',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%dst%grid_rank),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'num_links',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%num_links),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'num_wgts',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%num_wgts),& + & __LINE__,__MYFILE__) + + CALL scripremap_alloc(remap) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_dims',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_dims',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_center_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_center_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_center_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_center_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_center_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_center_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_center_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_center_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_center_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_center_lon),& + & __LINE__,__MYFILE__) + + IF (lcorners) THEN + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_corner_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_corner_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_corner_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_corner_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_corner_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_corner_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_corner_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_corner_lon),& + & __LINE__,__MYFILE__) + + ELSE + + remap%src%grid_corner_lat(:,:) = 0.0 + remap%src%grid_corner_lon(:,:) = 0.0 + remap%dst%grid_corner_lat(:,:) = 0.0 + remap%dst%grid_corner_lon(:,:) = 0.0 + remap%src%grid_corner_lat_units = '' + remap%src%grid_corner_lon_units = '' + remap%dst%grid_corner_lat_units = '' + remap%dst%grid_corner_lon_units = '' + + ENDIF + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_imask',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_imask_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_imask),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_imask',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_imask_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_imask),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_area',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src_grid_area_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src_grid_area),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_area',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst_grid_area_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst_grid_area),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_frac',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src_grid_frac_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src_grid_frac),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_frac',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst_grid_frac_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst_grid_frac),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_address',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_address',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'remap_matrix',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%remap_matrix),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'title',remap%title),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'normalization',remap%normalization),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'map_method',remap%map_method),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'history',remap%history),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'conventions',remap%conventions),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'dest_grid',remap%dst%title),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'source_grid',remap%src%title),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_close(ncid),__LINE__,__MYFILE__) + + END SUBROUTINE scripremap_read_work + + SUBROUTINE scripremap_read(cdfilename,remap) + + CHARACTER(len=*) :: cdfilename + TYPE(scripremaptype) :: remap + + CALL scripremap_read_work(cdfilename,remap) + + END SUBROUTINE scripremap_read + + + SUBROUTINE scripremap_read_sgl(cdfilename,remap,& + & mype,nproc,mycomm,linteronly) + + CHARACTER(len=*) :: cdfilename + TYPE(scripremaptype) :: remap + INTEGER :: mype,nproc,mycomm + LOGICAL :: linteronly + + INTEGER, DIMENSION(8) :: isizes + INTEGER :: ierr, ip + + IF (mype==0) THEN + CALL scripremap_read_work(cdfilename,remap) +#if defined key_mpp_mpi + isizes(1)=remap%src%grid_size + isizes(2)=remap%dst%grid_size + isizes(3)=remap%src%grid_corners + isizes(4)=remap%dst%grid_corners + isizes(5)=remap%src%grid_rank + isizes(6)=remap%dst%grid_rank + isizes(7)=remap%num_links + isizes(8)=remap%num_wgts + CALL mpi_bcast( isizes, 8, mpi_integer, 0, mycomm, ierr) + ELSE + CALL mpi_bcast( isizes, 8, mpi_integer, 0, mycomm, ierr) + CALL scripremap_init(remap) + remap%src%grid_size=isizes(1) + remap%dst%grid_size=isizes(2) + remap%src%grid_corners=isizes(3) + remap%dst%grid_corners=isizes(4) + remap%src%grid_rank=isizes(5) + remap%dst%grid_rank=isizes(6) + remap%num_links=isizes(7) + remap%num_wgts=isizes(8) + CALL scripremap_alloc(remap) +#endif + ENDIF + +#if defined key_mpp_mpi + + IF (.NOT.linteronly) THEN + + CALL mpi_bcast( remap%src%grid_dims, remap%src%grid_rank, & + & mpi_integer, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_center_lat, remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_center_lon, remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_corner_lat, remap%src%grid_corners*remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_corner_lon, remap%src%grid_corners*remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + + CALL mpi_bcast( remap%dst%grid_dims, remap%dst%grid_rank, & + & mpi_integer, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_center_lat, remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_center_lon, remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_corner_lat, remap%dst%grid_corners*remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_corner_lon, remap%dst%grid_corners*remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + + CALL mpi_bcast( remap%src_grid_area, remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst_grid_area, remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src_grid_frac, remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst_grid_frac, remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + + CALL mpi_bcast( remap%src%grid_center_lat_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_center_lat_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_center_lon_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_center_lon_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_corner_lat_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_corner_lon_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_corner_lat_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_corner_lon_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_imask_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_imask_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src_grid_area_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst_grid_area_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src_grid_frac_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst_grid_frac_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%title, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%normalization, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%map_method, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%history, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%conventions, scriplen, & + & mpi_character, 0, mycomm, ierr ) + ENDIF + + CALL mpi_bcast( remap%src_address, remap%num_links, & + & mpi_integer, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst_address, remap%num_links, & + & mpi_integer, 0, mycomm, ierr ) + CALL mpi_bcast( remap%remap_matrix, remap%num_wgts*remap%num_links, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_imask, remap%src%grid_size, & + & mpi_integer, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_imask, remap%dst%grid_size, & + & mpi_integer, 0, mycomm, ierr ) + +#endif + END SUBROUTINE scripremap_read_sgl + + SUBROUTINE scripremap_write(cdfilename,remap) + + CHARACTER(len=*) :: cdfilename + TYPE(scripremaptype) :: remap + + INTEGER :: ncid + INTEGER :: dimsgs,dimdgs,dimsgc,dimdgc,dimsgr,dimdgr,dimnl,dimnw + INTEGER :: dims1(1),dims2(2) + INTEGER :: idsgd,iddgd,idsgea,iddgea,idsgeo,iddgeo + INTEGER :: idsgoa,idsgoo,iddgoa,iddgoo,idsgim,iddgim,idsgar,iddgar + INTEGER :: idsgf,iddgf,idsga,iddga,idsa,idda,idrm + + CALL nchdlerr(nf90_create(TRIM(cdfilename),nf90_clobber,ncid), & + & __LINE__, __MYFILE__ ) + + CALL nchdlerr(nf90_def_dim(ncid,'src_grid_size',& + & remap%src%grid_size,dimsgs),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'dst_grid_size',& + & remap%dst%grid_size,dimdgs),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'src_grid_corners',& + & remap%src%grid_corners,dimsgc),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'dst_grid_corners',& + & remap%dst%grid_corners,dimdgc),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'src_grid_rank',& + & remap%src%grid_rank,dimsgr),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'dst_grid_rank',& + & remap%dst%grid_rank,dimdgr),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'num_links',& + & remap%num_links,dimnl),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'num_wgts',& + & remap%num_wgts,dimnw),& + & __LINE__,__MYFILE__) + + dims1(1)=dimsgr + CALL nchdlerr(nf90_def_var(ncid,'src_grid_dims',& + & nf90_int,dims1,idsgd),& + & __LINE__,__MYFILE__) + + dims1(1)=dimdgr + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_dims',& + & nf90_int,dims1,iddgd), & + & __LINE__,__MYFILE__) + + dims1(1)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_center_lat',& + & nf90_double,dims1,idsgea), & + & __LINE__,__MYFILE__) + + dims1(1)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_center_lat',& + & nf90_double,dims1,iddgea), & + & __LINE__,__MYFILE__) + + dims1(1)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_center_lon',& + & nf90_double,dims1,idsgeo), & + & __LINE__,__MYFILE__) + + dims1(1)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_center_lon',& + & nf90_double,dims1,iddgeo), & + & __LINE__,__MYFILE__) + + dims2(1)=dimsgc + dims2(2)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_corner_lat',& + & nf90_double,dims2,idsgoa), & + & __LINE__,__MYFILE__) + + dims2(1)=dimsgc + dims2(2)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_corner_lon',& + & nf90_double,dims2,idsgoo), & + & __LINE__,__MYFILE__) + + dims2(1)=dimdgc + dims2(2)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_corner_lat',& + & nf90_double,dims2,iddgoa), & + & __LINE__,__MYFILE__) + + dims2(1)=dimdgc + dims2(2)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_corner_lon',& + & nf90_double,dims2,iddgoo), & + & __LINE__,__MYFILE__) + + dims1(1)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_imask',& + & nf90_int,dims1,idsgim), & + & __LINE__,__MYFILE__) + + dims1(1)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_imask',& + & nf90_int,dims1,iddgim), & + & __LINE__,__MYFILE__) + + dims1(1)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_area',& + & nf90_double,dims1,idsga), & + & __LINE__,__MYFILE__) + + dims1(1)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_area',& + & nf90_double,dims1,iddga), & + & __LINE__,__MYFILE__) + + dims1(1)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_frac',& + & nf90_double,dims1,idsgf), & + & __LINE__,__MYFILE__) + + dims1(1)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_frac',& + & nf90_double,dims1,iddgf), & + & __LINE__,__MYFILE__) + + dims1(1)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'src_address',& + & nf90_int,dims1,idsa), & + & __LINE__,__MYFILE__) + + dims1(1)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'dst_address',& + & nf90_int,dims1,idda), & + & __LINE__,__MYFILE__) + + dims2(1)=dimnw + dims2(2)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'remap_matrix',& + & nf90_double,dims2,idrm), & + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_att(ncid,idsgea,'units',& + & remap%src%grid_center_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgea,'units',& + & remap%dst%grid_center_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsgeo,'units',& + & remap%src%grid_center_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgeo,'units',& + & remap%dst%grid_center_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsgoa,'units',& + & remap%src%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsgoo,'units',& + & remap%src%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgoa,'units',& + & remap%dst%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgoo,'units',& + & remap%dst%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsgim,'units',& + & remap%src%grid_imask_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgim,'units',& + & remap%dst%grid_imask_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsga,'units',& + & remap%src_grid_area_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddga,'units',& + & remap%dst_grid_area_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsgf,'units',& + & remap%src_grid_frac_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgf,'units',& + & remap%dst_grid_frac_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'title',& + & remap%title),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'normalization',& + & remap%normalization),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'map_method',& + & remap%map_method),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'history',& + & remap%history),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'conventions',& + & remap%conventions),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'dest_grid',& + & remap%dst%title),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'source_grid',& + & remap%src%title),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_enddef(ncid),__LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idsgd,remap%src%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,iddgd,remap%dst%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idsgea,remap%src%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,iddgea,remap%dst%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idsgeo,remap%src%grid_center_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,iddgeo,remap%dst%grid_center_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idsgoa,remap%src%grid_corner_lat),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idsgoo,remap%src%grid_corner_lon),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,iddgoa,remap%dst%grid_corner_lat),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,iddgoo,remap%dst%grid_corner_lon),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idsgim,remap%src%grid_imask),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,iddgim,remap%dst%grid_imask),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idsga,remap%src_grid_area),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,iddga,remap%dst_grid_area),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idsgf,remap%src_grid_frac),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,iddgf,remap%dst_grid_frac),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idsa,remap%src_address),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idda,remap%dst_address),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idrm,remap%remap_matrix),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_close(ncid),__LINE__, __MYFILE__ ) + + END SUBROUTINE scripremap_write + + SUBROUTINE scripremap_init(remap) + + TYPE(scripremaptype) :: remap + + CALL scripgrid_init(remap%src) + CALL scripgrid_init(remap%dst) + remap%num_links = 0 + remap%num_wgts = 0 + remap%title='' + remap%normalization='' + remap%map_method='' + remap%history='' + remap%conventions='' + remap%src_grid_area_units='' + remap%dst_grid_area_units='' + remap%src_grid_frac_units='' + remap%dst_grid_frac_units='' + + END SUBROUTINE scripremap_init + + SUBROUTINE scripremap_alloc(remap) + + TYPE(scripremaptype) :: remap + + IF ( (remap%num_links == 0) .OR. & + & (remap%num_wgts == 0) ) THEN + WRITE(*,*)'scripremaptype not initialized' + CALL abort + ENDIF + + CALL scripgrid_alloc(remap%src) + CALL scripgrid_alloc(remap%dst) + + ALLOCATE( & + & remap%src_grid_area(remap%src%grid_size), & + & remap%dst_grid_area(remap%dst%grid_size), & + & remap%src_grid_frac(remap%src%grid_size), & + & remap%dst_grid_frac(remap%dst%grid_size), & + & remap%src_address(remap%num_links), & + & remap%dst_address(remap%num_links), & + & remap%remap_matrix(remap%num_wgts, remap%num_links) & + & ) + + END SUBROUTINE scripremap_alloc + + SUBROUTINE scripremap_dealloc(remap) + + TYPE(scripremaptype) :: remap + + DEALLOCATE( & + & remap%src_grid_area, & + & remap%dst_grid_area, & + & remap%src_grid_frac, & + & remap%dst_grid_frac, & + & remap%src_address, & + & remap%dst_address, & + & remap%remap_matrix & + & ) + + CALL scripgrid_dealloc(remap%src) + CALL scripgrid_dealloc(remap%dst) + + CALL scripremap_init(remap) + + END SUBROUTINE scripremap_dealloc + +END MODULE scripremap + +MODULE parinter + +#if defined key_mpp_mpi + USE mpi +#endif + USE scripremap + USE scrippar + USE nctools + + IMPLICIT NONE + + ! Type to contains interpolation information + ! (like what is in scripremaptype) and message + ! passing information + + TYPE parinterinfo + ! Number of local links + INTEGER :: num_links + ! Destination side + INTEGER, POINTER, DIMENSION(:) :: dst_address + ! Source addresses and work array + INTEGER, POINTER, DIMENSION(:) :: src_address + ! Local remap matrix + REAL(scripdp), POINTER, DIMENSION(:,:) :: remap_matrix + ! Message passing information + ! Array of local addresses for send buffer + ! packing + INTEGER, POINTER, DIMENSION(:) :: send_address + ! Sending bookkeeping + INTEGER :: nsendtot + INTEGER, POINTER, DIMENSION(:) :: nsend,nsdisp + ! Receiving bookkeeping + INTEGER :: nrecvtot + INTEGER, POINTER, DIMENSION(:) :: nrecv,nrdisp + END TYPE parinterinfo + +CONTAINS + + SUBROUTINE parinter_init( mype, nproc, mpi_comm, & + & nsrclocpoints, nsrcglopoints, srcmask, srcgloind, & + & ndstlocpoints, ndstglopoints, dstmask, dstgloind, & + & remap, pinfo, lcommout, commoutprefix, iunit ) + + ! Setup interpolation based on SCRIP format weights in + ! remap and the source/destination grids information. + + ! Procedure: + + ! 1) A global SCRIP remapping file is read on all processors. + ! 2) Find local destination points in the global grid. + ! 3) Find which processor needs source data and setup buffer + ! information for sending data. + ! 4) Construct new src remapping for buffer received + + ! All information is stored in the TYPE(parinterinfo) output + ! data type + + ! Input arguments. + + ! Message passing information + INTEGER, INTENT(IN) :: mype, nproc, mpi_comm + ! Source grid local and global number of grid points + INTEGER, INTENT(IN) :: nsrclocpoints, nsrcglopoints + ! Source integer mask (0/1) for SCRIP compliance + INTEGER, INTENT(IN), DIMENSION(nsrclocpoints) :: srcmask + ! Source global addresses of each local grid point + INTEGER, INTENT(IN), DIMENSION(nsrclocpoints) :: srcgloind + ! Destination grid local and global number of grid points + INTEGER, INTENT(IN) :: ndstlocpoints, ndstglopoints + ! Destination integer mask (0/1) for SCRIP compliance + INTEGER, INTENT(IN), DIMENSION(ndstlocpoints) :: dstmask + ! Destination global addresses of each local grid point + INTEGER, INTENT(IN), DIMENSION(ndstlocpoints) :: dstgloind + ! SCRIP remapping data + TYPE(scripremaptype) :: remap + ! Switch for output communication patterns + LOGICAL :: lcommout + CHARACTER(len=*) :: commoutprefix + ! Unit to use for output + INTEGER :: iunit + + ! Output arguments + + ! Interpolation and message passing information + TYPE(parinterinfo), INTENT(OUT) :: pinfo + + ! Local variable + + ! Variable for glocal <-> local address/pe information + INTEGER, DIMENSION(nsrcglopoints) :: ilsrcmppmap, ilsrclocind + INTEGER, DIMENSION(nsrcglopoints) :: igsrcmppmap, igsrclocind + INTEGER, DIMENSION(ndstglopoints) :: ildstmppmap, ildstlocind + INTEGER, DIMENSION(ndstglopoints) :: igdstmppmap, igdstlocind + INTEGER, DIMENSION(nsrcglopoints) :: isrcpe,isrcpetmp + INTEGER, DIMENSION(nsrcglopoints) :: isrcaddtmp + INTEGER, DIMENSION(0:nproc-1) :: isrcoffset + INTEGER, DIMENSION(nproc) :: isrcno, isrcoff, isrccur + INTEGER, DIMENSION(nproc) :: ircvoff, ircvcur + INTEGER, DIMENSION(:), ALLOCATABLE :: isrctot, ircvtot + + ! Misc variable + INTEGER :: i,n,pe + INTEGER :: istatus + CHARACTER(len=256) :: cdfile + + ! Check that masks are consistent. + + ! Remark: More consistency tests between remapping information + ! and input argument could be code, but for now we settle + ! for checking the masks. + + ! Source grid + + DO i=1,nsrclocpoints + IF (srcmask(i)/=remap%src%grid_imask(srcgloind(i))) THEN + WRITE(iunit,*)'Source imask is inconsistent at ' + WRITE(iunit,*)'global index = ',srcgloind(i) + WRITE(iunit,*)'Source mask = ',srcmask(i) + WRITE(iunit,*)'Remap mask = ',remap%src%grid_imask(srcgloind(i)) + WRITE(iunit,*)'Latitude = ',remap%src%grid_center_lat(srcgloind(i)) + WRITE(iunit,*)'Longitude = ',remap%src%grid_center_lon(srcgloind(i)) + CALL flush(iunit) + CALL abort + ENDIF + ENDDO + + ! Destination grid + + DO i=1,ndstlocpoints + IF (dstmask(i)/=remap%dst%grid_imask(dstgloind(i))) THEN + WRITE(iunit,*)'Destination imask is inconsistent at ' + WRITE(iunit,*)'global index = ',dstgloind(i) + WRITE(iunit,*)'Destin mask = ',dstmask(i) + WRITE(iunit,*)'Remap mask = ',remap%dst%grid_imask(dstgloind(i)) + WRITE(iunit,*)'Latitude = ',remap%dst%grid_center_lat(dstgloind(i)) + WRITE(iunit,*)'Longitude = ',remap%dst%grid_center_lon(dstgloind(i)) + CALL flush(iunit) + CALL abort + ENDIF + ENDDO + + ! Setup global to local and vice versa mappings. + + ilsrcmppmap(:)=-1 + ilsrclocind(:)=0 + ildstmppmap(:)=-1 + ildstlocind(:)=0 + + DO i=1,nsrclocpoints + ilsrcmppmap(srcgloind(i))=mype + ilsrclocind(srcgloind(i))=i + ENDDO + + DO i=1,ndstlocpoints + ildstmppmap(dstgloind(i))=mype + ildstlocind(dstgloind(i))=i + ENDDO + +#if defined key_mpp_mpi + CALL mpi_allreduce(ilsrcmppmap,igsrcmppmap,nsrcglopoints, & + & mpi_integer,mpi_max,mpi_comm,istatus) + CALL mpi_allreduce(ilsrclocind,igsrclocind,nsrcglopoints, & + & mpi_integer,mpi_max,mpi_comm,istatus) + CALL mpi_allreduce(ildstmppmap,igdstmppmap,ndstglopoints, & + & mpi_integer,mpi_max,mpi_comm,istatus) + CALL mpi_allreduce(ildstlocind,igdstlocind,ndstglopoints, & + & mpi_integer,mpi_max,mpi_comm,istatus) +#else + igsrcmppmap(:)=ilsrcmppmap(:) + igsrclocind(:)=ilsrclocind(:) + igdstmppmap(:)=ildstmppmap(:) + igdstlocind(:)=ildstlocind(:) +#endif + + ! Optionally construct an ascii file listing what src and + ! dest points belongs to which task + + ! Since igsrcmppmap and igdstmppmap are global data only do + ! this for mype==0. + + IF (lcommout.AND.(mype==0)) THEN + WRITE(cdfile,'(A,I4.4,A)')commoutprefix//'_srcmppmap_',mype+1,'.dat' + OPEN(9,file=cdfile) + DO i=1,nsrcglopoints + WRITE(9,*)remap%src%grid_center_lat(i),& + & remap%src%grid_center_lon(i), & + & igsrcmppmap(i)+1,remap%src%grid_imask(i) + ENDDO + CLOSE(9) + WRITE(cdfile,'(A,I4.4,A)')commoutprefix//'_dstmppmap_',mype+1,'.dat' + OPEN(9,file=cdfile) + DO i=1,ndstglopoints + WRITE(9,*)remap%dst%grid_center_lat(i),& + & remap%dst%grid_center_lon(i), & + & igdstmppmap(i)+1,remap%dst%grid_imask(i) + ENDDO + CLOSE(9) + ENDIF + + ! + ! Standard interpolation in serial case is + ! + ! DO n=1,remap%num_links + ! zdst(remap%dst_address(n)) = zdst(remap%dst_address(n)) + & + ! & remap%remap_matrix(1,n)*zsrc(remap%src_address(n)) + ! END DO + ! + + ! In parallel we need to first find local number of links + + pinfo%num_links=0 + DO i=1,remap%num_links + IF (igdstmppmap(remap%dst_address(i))==mype) & + & pinfo%num_links=pinfo%num_links+1 + ENDDO + ALLOCATE(pinfo%dst_address(pinfo%num_links),& + & pinfo%src_address(pinfo%num_links),& + & pinfo%remap_matrix(1,pinfo%num_links)) + + ! Get local destination addresses + + n=0 + DO i=1,remap%num_links + IF (igdstmppmap(remap%dst_address(i))==mype) THEN + n=n+1 + pinfo%dst_address(n)=& + & igdstlocind(remap%dst_address(i)) + pinfo%remap_matrix(:,n)=& + & remap%remap_matrix(:,i) + ENDIF + ENDDO + + ! Get sending processors maps. + + ! The same data point might need to be sent to many processors + ! so first construct a map for processors needing the data + + isrcpe(:)=-1 + DO i=1,remap%num_links + IF (igdstmppmap(remap%dst_address(i))==mype) THEN + isrcpe(remap%src_address(i))=& + & igsrcmppmap(remap%src_address(i)) + ENDIF + ENDDO + + ! Optionally write a set if ascii file listing which tasks + ! mype needs to send to communicate with + + IF (lcommout) THEN + ! Destination processors + WRITE(cdfile,'(A,I4.4,A)')commoutprefix//'_dsts_',mype+1,'.dat' + OPEN(9,file=cdfile) + DO pe=0,nproc-1 + IF (pe==mype) THEN + isrcpetmp(:)=isrcpe(:) + ENDIF +#if defined key_mpp_mpi + CALL mpi_bcast(isrcpetmp,nsrcglopoints,mpi_integer,pe,mpi_comm,istatus) +#endif + DO i=1,nsrcglopoints + IF (isrcpetmp(i)==mype) THEN + WRITE(9,*)remap%src%grid_center_lat(i),& + & remap%src%grid_center_lon(i), & + & pe+1,mype+1 + ENDIF + ENDDO + ENDDO + CLOSE(9) + ENDIF + + ! Get number of points to send to each processor + + ALLOCATE(pinfo%nsend(0:nproc-1)) + isrcno(:)=0 + DO i=1,nsrcglopoints + IF (isrcpe(i)>=0) THEN + isrcno(isrcpe(i)+1)=isrcno(isrcpe(i)+1)+1 + ENDIF + ENDDO +#if defined key_mpp_mpi + CALL mpi_alltoall(isrcno,1,mpi_integer, & + & pinfo%nsend(0:nproc-1),1,mpi_integer, & + & mpi_comm,istatus) +#else + pinfo%nsend(0:nproc-1) = isrcno(1:nproc) +#endif + pinfo%nsendtot=SUM(pinfo%nsend(0:nproc-1)) + + ! Construct sending buffer mapping. Data is mapping in + ! processor order. + + ALLOCATE(pinfo%send_address(pinfo%nsendtot)) + + ! Temporary arrays for mpi all to all. + + ALLOCATE(isrctot(SUM(isrcno(1:nproc)))) + ALLOCATE(ircvtot(SUM(pinfo%nsend(0:nproc-1)))) + + ! Offset for message parsing + + isrcoff(1)=0 + ircvoff(1)=0 + DO i=1,nproc-1 + isrcoff(i+1) = isrcoff(i) + isrcno(i) + ircvoff(i+1) = pinfo%nsend(i-1) + ircvoff(i) + ENDDO + + ! Pack indices i into a buffer + + isrccur(:)=0 + DO i=1,nsrcglopoints + IF (isrcpe(i)>=0) THEN + isrccur(isrcpe(i)+1)=isrccur(isrcpe(i)+1)+1 + isrctot(isrccur(isrcpe(i)+1)+isrcoff(isrcpe(i)+1)) = i + ENDIF + ENDDO + + ! Send the data + +#if defined key_mpp_mpi + CALL mpi_alltoallv(& + & isrctot,isrccur,isrcoff,mpi_integer, & + & ircvtot,pinfo%nsend(0:nproc-1),ircvoff,mpi_integer, & + & mpi_comm,istatus) +#else + ircvtot(:)=isrctot(:) +#endif + + ! Get the send address. ircvtot will at this point contain the + ! addresses in the global index needed for message passing + + DO i=1,pinfo%nsendtot + pinfo%send_address(i)=igsrclocind(ircvtot(i)) + ENDDO + + ! Deallocate the mpi all to all arrays + + DEALLOCATE(ircvtot,isrctot) + + ! Get number of points to receive to each processor + + ALLOCATE(pinfo%nrecv(0:nproc-1)) + pinfo%nrecv(0:nproc-1)=0 + DO i=1,nsrcglopoints + IF (isrcpe(i)>=0 .AND. isrcpe(i)=0 .AND. isrcpe(i)0) THEN + CALL nchdlerr(nf90_def_dim(ncid,'num_links',& + & pinfo%num_links,dimnl),& + & __LINE__,__MYFILE__) + ENDIF + + CALL nchdlerr(nf90_def_dim(ncid,'num_wgts',& + & 1,dimnw),& + & __LINE__,__MYFILE__) + + IF (pinfo%nsendtot>0) THEN + CALL nchdlerr(nf90_def_dim(ncid,'nsendtot',& + & pinfo%nsendtot,dimnst),& + & __LINE__,__MYFILE__) + ENDIF + + IF (pinfo%nrecvtot>0) THEN + CALL nchdlerr(nf90_def_dim(ncid,'nrecvtot',& + & pinfo%nrecvtot,dimnrt),& + & __LINE__,__MYFILE__) + ENDIF + + CALL nchdlerr(nf90_def_dim(ncid,'nproc',& + & nproc,dimnpr),& + & __LINE__,__MYFILE__) + + IF (pinfo%num_links>0) THEN + + dims1(1)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'dst_address',& + & nf90_int,dims1,idda),& + & __LINE__,__MYFILE__) + + dims1(1)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'src_address',& + & nf90_int,dims1,idsa),& + & __LINE__,__MYFILE__) + + dims2(1)=dimnw + dims2(2)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'remap_matrix',& + & nf90_double,dims2,idrm),& + & __LINE__,__MYFILE__) + + ENDIF + + dims1(1)=dimnpr + CALL nchdlerr(nf90_def_var(ncid,'nsend',& + & nf90_int,dims1,idns),& + & __LINE__,__MYFILE__) + + IF (pinfo%nsendtot>0) THEN + + dims1(1)=dimnst + CALL nchdlerr(nf90_def_var(ncid,'send_address',& + & nf90_int,dims1,idsaa),& + & __LINE__,__MYFILE__) + + ENDIF + + dims1(1)=dimnpr + CALL nchdlerr(nf90_def_var(ncid,'nrecv',& + & nf90_int,dims1,idnr),& + & __LINE__,__MYFILE__) + + dims1(1)=dimnpr + CALL nchdlerr(nf90_def_var(ncid,'nsdisp',& + & nf90_int,dims1,idnsp),& + & __LINE__,__MYFILE__) + + dims1(1)=dimnpr + CALL nchdlerr(nf90_def_var(ncid,'nrdisp',& + & nf90_int,dims1,idnrp),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_enddef(ncid),__LINE__,__MYFILE__) + + + IF (pinfo%num_links>0) THEN + + CALL nchdlerr(nf90_put_var(ncid,idda,pinfo%dst_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idsa,pinfo%src_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idrm,pinfo%remap_matrix),& + & __LINE__,__MYFILE__) + + ENDIF + + CALL nchdlerr(nf90_put_var(ncid,idns,pinfo%nsend(0:nproc-1)),& + & __LINE__,__MYFILE__) + + IF (pinfo%nsendtot>0) THEN + + CALL nchdlerr(nf90_put_var(ncid,idsaa,pinfo%send_address),& + & __LINE__,__MYFILE__) + + ENDIF + + CALL nchdlerr(nf90_put_var(ncid,idnr,pinfo%nrecv(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idnsp,pinfo%nsdisp(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idnrp,pinfo%nrdisp(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_close(ncid),__LINE__, __MYFILE__ ) + + END SUBROUTINE parinter_write + + SUBROUTINE parinter_read( mype, nproc, & + & nsrcglopoints, ndstglopoints, & + & pinfo, cdpath, cdprefix, lexists ) + + ! Write pinfo information in a netCDF file in order to + ! be able to read it rather than calling parinter_init + + ! Input arguments. + + ! Message passing information + INTEGER, INTENT(IN) :: mype, nproc + ! Source grid local global number of grid points + INTEGER, INTENT(IN) :: nsrcglopoints + ! Destination grid global number of grid points + INTEGER, INTENT(IN) :: ndstglopoints + ! Interpolation and message passing information + TYPE(parinterinfo), INTENT(OUT) :: pinfo + ! Does the information exists + LOGICAL :: lexists + ! Path and file prefix + CHARACTER(len=*) :: cdpath, cdprefix + + ! Local variable + + ! Misc variable + CHARACTER(len=1024) :: cdfile + INTEGER :: ncid, dimid, varid, num_wgts + + WRITE(cdfile,'(A,2(I8.8,A),2(I4.4,A),A)') & + & TRIM(cdpath)//'/'//TRIM(cdprefix)//'_', & + & nsrcglopoints,'_',ndstglopoints,'_',mype,'_',nproc,'.nc' + + + lexists=nf90_open(TRIM(cdfile),nf90_nowrite,ncid)==nf90_noerr + + IF (lexists) THEN + + ! If num_links is not present we assume it to be zero. + + IF (nf90_inq_dimid(ncid,'num_links',dimid)==nf90_noerr) THEN + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=pinfo%num_links),& + & __LINE__,__MYFILE__) + ELSE + pinfo%num_links=0 + ENDIF + + CALL nchdlerr(nf90_inq_dimid(ncid,'num_wgts',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=num_wgts),& + & __LINE__,__MYFILE__) + IF (num_wgts/=1) THEN + WRITE(0,*)'parinter_read: num_wgts has to be 1 for now' + CALL abort + ENDIF + + ! If nsendtot is not present we assume it to be zero. + + IF (nf90_inq_dimid(ncid,'nsendtot',dimid)==nf90_noerr) THEN + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=pinfo%nsendtot),& + & __LINE__,__MYFILE__) + ELSE + pinfo%nsendtot=0 + ENDIF + + IF(nf90_inq_dimid(ncid,'nrecvtot',dimid)==nf90_noerr) THEN + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=pinfo%nrecvtot),& + & __LINE__,__MYFILE__) + ELSE + pinfo%nrecvtot=0 + ENDIF + + ALLOCATE(pinfo%dst_address(pinfo%num_links),& + & pinfo%src_address(pinfo%num_links),& + & pinfo%remap_matrix(num_wgts,pinfo%num_links),& + & pinfo%nsend(0:nproc-1),& + & pinfo%send_address(pinfo%nsendtot),& + & pinfo%nrecv(0:nproc-1),& + & pinfo%nsdisp(0:nproc-1),& + & pinfo%nrdisp(0:nproc-1)) + + IF (pinfo%num_links>0) THEN + CALL nchdlerr(nf90_inq_varid(ncid,'dst_address',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%dst_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_address',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%src_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'remap_matrix',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%remap_matrix),& + & __LINE__,__MYFILE__) + ENDIF + + CALL nchdlerr(nf90_inq_varid(ncid,'nsend',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nsend(0:nproc-1)),& + & __LINE__,__MYFILE__) + + IF (pinfo%nsendtot>0) THEN + + CALL nchdlerr(nf90_inq_varid(ncid,'send_address',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%send_address),& + & __LINE__,__MYFILE__) + + ENDIF + + CALL nchdlerr(nf90_inq_varid(ncid,'nrecv',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nrecv(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'nsdisp',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nsdisp(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'nrdisp',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nrdisp(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_close(ncid),__LINE__, __MYFILE__ ) + + ENDIF + + END SUBROUTINE parinter_read + +END MODULE parinter + +MODULE interinfo + + ! Parallel regridding information + + USE parinter + + IMPLICIT NONE + + SAVE + + ! IFS to NEMO + + TYPE(parinterinfo) :: gausstoT,gausstoUV + + ! NEMO to IFS + + TYPE(parinterinfo) :: Ttogauss, UVtogauss + + ! Read parinterinfo on task 0 only and broadcast. + + LOGICAL :: lparbcast = .FALSE. + +END MODULE interinfo +#endif diff --git a/src/ifs_notused.F90 b/src/ifs_notused.F90 new file mode 100644 index 000000000..d596169c4 --- /dev/null +++ b/src/ifs_notused.F90 @@ -0,0 +1,371 @@ +#if defined (__ifsinterface) +! Routines usually provided by the library that are currently +! not implemented for FESOM2. +! +! -Original code by Kristian Mogensen, ECMWF. + +SUBROUTINE nemogcmcoup_end_ioserver + +! End the NEMO mppio server + + WRITE(*,*)'No mpp_ioserver used' +! CALL abort + +END SUBROUTINE nemogcmcoup_end_ioserver + +SUBROUTINE nemogcmcoup_init_ioserver( icomm, lnemoioserver ) + + ! Initialize the NEMO mppio server + + IMPLICIT NONE + INTEGER :: icomm + LOGICAL :: lnemoioserver + + WRITE(*,*)'No mpp_ioserver' + !CALL abort + +END SUBROUTINE nemogcmcoup_init_ioserver + + +SUBROUTINE nemogcmcoup_init_ioserver_2( icomm ) + + ! Initialize the NEMO mppio server + + IMPLICIT NONE + INTEGER :: icomm + + WRITE(*,*)'No mpp_ioserver' + CALL abort + +END SUBROUTINE nemogcmcoup_init_ioserver_2 + + +SUBROUTINE nemogcmcoup_mlflds_get( mype, npes, icomm, & + & nlev, nopoints, pgt3d, pgs3d, pgu3d, pgv3d ) + + ! Interpolate sst, ice: surf T; albedo; concentration; thickness, + ! snow thickness and currents from the ORCA grid to the Gaussian grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + USE par_kind + IMPLICIT NONE + + ! Arguments + REAL(wpIFS), DIMENSION(nopoints,nlev) :: pgt3d, pgs3d, pgu3d, pgv3d + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + ! Number Gaussian grid points + INTEGER, INTENT(IN) :: nopoints,nlev + + ! Local variables + + WRITE(0,*)'nemogcmcoup_mlflds_get should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_mlflds_get + + +SUBROUTINE nemogcmcoup_get( mype, npes, icomm, & + & nopoints, pgsst, pgice, pgucur, pgvcur ) + + ! Interpolate sst, ice and currents from the ORCA grid + ! to the Gaussian grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + USE par_kind + + IMPLICIT NONE + + + ! Arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + ! Number Gaussian grid points + INTEGER, INTENT(IN) :: nopoints + ! Local arrays of sst, ice and currents + REAL(wpIFS), DIMENSION(nopoints) :: pgsst, pgice, pgucur, pgvcur + + ! Local variables + + WRITE(0,*)'nemogcmcoup_get should not be called with FESOM' + CALL abort + +END SUBROUTINE nemogcmcoup_get + + +SUBROUTINE nemogcmcoup_exflds_get( mype, npes, icomm, & + & nopoints, pgssh, pgmld, pg20d, pgsss, & + & pgtem300, pgsal300 ) + + ! Interpolate sst, ice: surf T; albedo; concentration; thickness, + ! snow thickness and currents from the ORCA grid to the Gaussian grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + USE par_kind + IMPLICIT NONE + + ! Arguments + REAL(wpIFS), DIMENSION(nopoints) :: pgssh, pgmld, pg20d, pgsss, & + & pgtem300, pgsal300 + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + ! Number Gaussian grid points + INTEGER, INTENT(IN) :: nopoints + + ! Local variables + + WRITE(0,*)'nemogcmcoup_exflds_get should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_exflds_get + + +SUBROUTINE nemogcmcoup_get_1way( mype, npes, icomm ) + + ! Interpolate sst, ice and currents from the ORCA grid + ! to the Gaussian grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + IMPLICIT NONE + + + ! Arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + + ! Local variables + + WRITE(0,*)'nemogcmcoup_get_1way should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_get_1way + + +SUBROUTINE nemogcmcoup_mlinit( mype, npes, icomm, & + & nlev, nopoints, pdep, pmask ) + + ! Get information about the vertical discretization of the ocean model + + ! nlevs are maximum levels on input and actual number levels on output + + USE par_kind + + IMPLICIT NONE + + ! Input arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Grid information + INTEGER, INTENT(INOUT) :: nlev, nopoints + REAL(wpIFS), INTENT(OUT), DIMENSION(nlev) :: pdep + REAL(wpIFS), INTENT(OUT), DIMENSION(nopoints,nlev) :: pmask + + ! Local variables + + ! dummy argument with explicit INTENT(OUT) declaration needs an explicit value + pdep=0. + pmask=0. + + WRITE(0,*)'nemogcmcoup_mlinit should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_mlinit + + +SUBROUTINE nemogcmcoup_update( mype, npes, icomm, & + & npoints, pgutau, pgvtau, & + & pgqsr, pgqns, pgemp, kt, ldebug ) + + ! Update fluxes in nemogcmcoup_data by parallel + ! interpolation of the input gaussian grid data + + USE par_kind + + IMPLICIT NONE + + ! Arguments + + ! MPI communications + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Fluxes on the Gaussian grid. + INTEGER, INTENT(IN) :: npoints + REAL(wpIFS), DIMENSION(npoints), intent(IN) :: & + & pgutau, pgvtau, pgqsr, pgqns, pgemp + ! Current time step + INTEGER, INTENT(in) :: kt + ! Write debugging fields in netCDF + LOGICAL, INTENT(IN) :: ldebug + + ! Local variables + + WRITE(0,*)'nemogcmcoup_update should be called with with.' + CALL abort + +END SUBROUTINE nemogcmcoup_update + +SUBROUTINE nemogcmcoup_update_add( mype, npes, icomm, & + & npoints, pgsst, pgtsk, kt, ldebug ) + + ! Update addetiona in nemogcmcoup_data by parallel + ! interpolation of the input gaussian grid data + + USE par_kind + + IMPLICIT NONE + + ! Arguments + + ! MPI communications + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Input on the Gaussian grid. + INTEGER, INTENT(IN) :: npoints + REAL(wpIFS), DIMENSION(npoints), intent(IN) :: & + & pgsst, pgtsk + ! Current time step + INTEGER, INTENT(in) :: kt + ! Write debugging fields in netCDF + LOGICAL, INTENT(IN) :: ldebug + + ! Local variables + + WRITE(0,*)'nemogcmcoup_update_add should not be called when coupling to fesom. Commented ABORT. Proceeding...' + !CALL abort + + +END SUBROUTINE nemogcmcoup_update_add + + +SUBROUTINE nemogcmcoup_wam_coupinit( mype, npes, icomm, & + & nlocpoints, nglopoints, & + & nlocmsk, ngloind, iunit ) + + ! Initialize single executable coupling between WAM and NEMO + ! This is called from WAM. + + IMPLICIT NONE + + ! Input arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype,npes,icomm + ! WAM grid information + ! Number of local and global points + INTEGER, INTENT(IN) :: nlocpoints, nglopoints + ! Integer mask and global indices + INTEGER, DIMENSION(nlocpoints), INTENT(IN) :: nlocmsk, ngloind + ! Unit for output in parinter_init + INTEGER :: iunit + + WRITE(0,*)'Wam coupling not implemented for FESOM' + CALL abort + +END SUBROUTINE nemogcmcoup_wam_coupinit + + +SUBROUTINE nemogcmcoup_wam_get( mype, npes, icomm, & + & nopoints, pwsst, pwicecov, pwicethk, & + & pwucur, pwvcur, licethk ) + + ! Interpolate from the ORCA grid + ! to the WAM grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + USE par_kind + IMPLICIT NONE + + ! Arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + ! Number WAM grid points + INTEGER, INTENT(IN) :: nopoints + ! Local arrays of sst, ice cover, ice thickness and currents + REAL(wpIFS), DIMENSION(nopoints) :: pwsst, pwicecov, pwicethk, pwucur, pwvcur + LOGICAL :: licethk + + ! Local variables + + WRITE(0,*)'nemogcmcoup_wam_get should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_wam_get + + +SUBROUTINE nemogcmcoup_wam_update( mype, npes, icomm, & + & npoints, pwswh, pwmwp, & + & pwphioc, pwtauoc, pwstrn, & + & pwustokes, pwvstokes, & + & cdtpro, ldebug ) + + ! Update fluxes in nemogcmcoup_data by parallel + ! interpolation of the input WAM grid data + + USE par_kind + + IMPLICIT NONE + + ! Arguments + + ! MPI communications + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Data on the WAM grid. + INTEGER, INTENT(IN) :: npoints + REAL(wpIFS), DIMENSION(npoints), INTENT(IN) :: & + & pwswh, pwmwp, pwphioc, pwtauoc, pwstrn, pwustokes, pwvstokes + ! Current time + CHARACTER(len=14), INTENT(IN) :: cdtpro + ! Write debugging fields in netCDF + LOGICAL, INTENT(IN) :: ldebug + + ! Local variables + + WRITE(0,*)'nemogcmcoup_wam_update should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_wam_update + + +SUBROUTINE nemogcmcoup_wam_update_stress( mype, npes, icomm, npoints, & + & pwutau, pwvtau, pwuv10n, pwphif,& + & cdtpro, ldebug ) + + ! Update stresses in nemogcmcoup_data by parallel + ! interpolation of the input WAM grid data + + USE par_kind + + IMPLICIT NONE + + ! Arguments + + ! MPI communications + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Data on the WAM grid. + INTEGER, INTENT(IN) :: npoints + REAL(wpIFS), DIMENSION(npoints), INTENT(IN) :: & + & pwutau, pwvtau, pwuv10n, pwphif + ! Current time step + CHARACTER(len=14), INTENT(IN) :: cdtpro + ! Write debugging fields in netCDF + LOGICAL, INTENT(IN) :: ldebug + + ! Local variables + + WRITE(0,*)'nemogcmcoup_wam_update_stress should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_wam_update_stress +#endif From 970cfa7d03d9fe173c4be00a59468dbd15a7f33d Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Wed, 10 Nov 2021 09:29:04 +0000 Subject: [PATCH 533/909] moved interface to its own separate folder, similar to how this is done for icepack --- src/{ => ifs_interface}/ifs_interface.F90 | 0 src/{ => ifs_interface}/ifs_modules.F90 | 0 src/{ => ifs_interface}/ifs_notused.F90 | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename src/{ => ifs_interface}/ifs_interface.F90 (100%) rename src/{ => ifs_interface}/ifs_modules.F90 (100%) rename src/{ => ifs_interface}/ifs_notused.F90 (100%) diff --git a/src/ifs_interface.F90 b/src/ifs_interface/ifs_interface.F90 similarity index 100% rename from src/ifs_interface.F90 rename to src/ifs_interface/ifs_interface.F90 diff --git a/src/ifs_modules.F90 b/src/ifs_interface/ifs_modules.F90 similarity index 100% rename from src/ifs_modules.F90 rename to src/ifs_interface/ifs_modules.F90 diff --git a/src/ifs_notused.F90 b/src/ifs_interface/ifs_notused.F90 similarity index 100% rename from src/ifs_notused.F90 rename to src/ifs_interface/ifs_notused.F90 From 59664e7af2b22326a003c8f584763b228ddeceb2 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Wed, 10 Nov 2021 09:41:33 +0000 Subject: [PATCH 534/909] add ifs interface case to CMakeLists.txt --- src/CMakeLists.txt | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index a455f227c..d38e57edd 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -20,6 +20,9 @@ if(${USE_ICEPACK}) file(GLOB sources_Fortran ${src_home}/*.F90 ${src_home}/icepack_drivers/*.F90 ${src_home}/icepack_drivers/Icepack/columnphysics/*.F90) +elseif(${BUILD_FESOM_AS_LIBRARY}) + file(GLOB sources_Fortran ${src_home}/*.F90 + ${src_home}/ifs_interface/*.F90) # ICEPACK + LIBRARY NOT SUPPORTED (YET) else() file(GLOB sources_Fortran ${src_home}/*.F90) endif() @@ -80,6 +83,9 @@ endif() if(${USE_ICEPACK}) target_compile_definitions(${PROJECT_NAME} PRIVATE __icepack) endif() +if(${BUILD_FESOM_AS_LIBRARY}) + target_compile_definitions(${PROJECT_NAME} PRIVATE __ifsinterface) +endif() if(${VERBOSE}) target_compile_definitions(${PROJECT_NAME} PRIVATE VERBOSE) endif() From e73e0ca89fddbe9ecdfb3b527f14802cd8529506 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Wed, 10 Nov 2021 15:03:02 +0100 Subject: [PATCH 535/909] tracer part has been nearly fully covered with OPENMP statements. the delay was due to several bugs which appeared on the way. the most difficult one was the treatment of z_bar_n and z_n these were in defined in t_mesh which is passed with (IN) attribute. however, these arrays were modified through the use of the pointer. this shall not happened and OPENMP crashed while MPI not. Hence, from now on we define z_bar_n and z_n locally in every subroutine where they are required. --- src/MOD_MESH.F90 | 10 +-- src/MOD_TRACER.F90 | 2 +- src/associate_mesh_ass.h | 4 +- src/associate_mesh_def.h | 4 +- src/gen_modules_config.F90 | 2 +- src/gen_modules_diag.F90 | 4 +- src/oce_adv_tra_ver.F90 | 1 + src/oce_ale.F90 | 16 ++-- src/oce_ale_pressure_bv.F90 | 10 ++- src/oce_ale_tracer.F90 | 142 ++++++++++++++++-------------------- src/oce_fer_gm.F90 | 2 + src/oce_modules.F90 | 1 - src/oce_setup_step.F90 | 1 - src/oce_tracer_mod.F90 | 21 ++++-- src/write_step_info.F90 | 3 +- 15 files changed, 113 insertions(+), 110 deletions(-) diff --git a/src/MOD_MESH.F90 b/src/MOD_MESH.F90 index 4eb0c23e1..8dc1c6414 100644 --- a/src/MOD_MESH.F90 +++ b/src/MOD_MESH.F90 @@ -117,7 +117,7 @@ MODULE MOD_MESH ! --> auxiliary array to store depth of layers and depth of mid level due to changing ! layer thinkness at every node -real(kind=WP), allocatable,dimension(:) :: zbar_n, Z_n +!real(kind=WP), allocatable,dimension(:) :: zbar_n, Z_n ! new bottom depth at node and element due to partial cells real(kind=WP), allocatable,dimension(:) :: zbar_n_bot @@ -222,8 +222,8 @@ subroutine write_t_mesh(mesh, unit, iostat, iomsg) call write_bin_array(mesh%dhe, unit, iostat, iomsg) call write_bin_array(mesh%hbar, unit, iostat, iomsg) call write_bin_array(mesh%hbar_old, unit, iostat, iomsg) - call write_bin_array(mesh%zbar_n, unit, iostat, iomsg) - call write_bin_array(mesh%Z_n, unit, iostat, iomsg) +! call write_bin_array(mesh%zbar_n, unit, iostat, iomsg) +! call write_bin_array(mesh%Z_n, unit, iostat, iomsg) call write_bin_array(mesh%zbar_n_bot, unit, iostat, iomsg) call write_bin_array(mesh%zbar_e_bot, unit, iostat, iomsg) call write_bin_array(mesh%zbar_n_srf, unit, iostat, iomsg) @@ -316,8 +316,8 @@ subroutine read_t_mesh(mesh, unit, iostat, iomsg) call read_bin_array(mesh%dhe, unit, iostat, iomsg) call read_bin_array(mesh%hbar, unit, iostat, iomsg) call read_bin_array(mesh%hbar_old, unit, iostat, iomsg) - call read_bin_array(mesh%zbar_n, unit, iostat, iomsg) - call read_bin_array(mesh%Z_n, unit, iostat, iomsg) +! call read_bin_array(mesh%zbar_n, unit, iostat, iomsg) +! call read_bin_array(mesh%Z_n, unit, iostat, iomsg) call read_bin_array(mesh%zbar_n_bot, unit, iostat, iomsg) call read_bin_array(mesh%zbar_e_bot, unit, iostat, iomsg) call read_bin_array(mesh%zbar_n_srf, unit, iostat, iomsg) diff --git a/src/MOD_TRACER.F90 b/src/MOD_TRACER.F90 index 8e8247830..242ee483d 100644 --- a/src/MOD_TRACER.F90 +++ b/src/MOD_TRACER.F90 @@ -28,7 +28,7 @@ MODULE MOD_TRACER TYPE T_TRACER_WORK !auxuary arrays to work with tracers: real(kind=WP), allocatable :: del_ttf(:,:) -real(kind=WP), allocatable :: del_ttf_advhoriz(:,:),del_ttf_advvert(:,:) +real(kind=WP), allocatable :: del_ttf_advhoriz(:,:), del_ttf_advvert(:,:) !_______________________________________________________________________________ ! in case ldiag_DVD=.true. --> calculate discrete variance decay (DVD) real(kind=WP), allocatable :: tr_dvd_horiz(:,:,:), tr_dvd_vert(:,:,:) diff --git a/src/associate_mesh_ass.h b/src/associate_mesh_ass.h index 018f3e347..db5b26d39 100644 --- a/src/associate_mesh_ass.h +++ b/src/associate_mesh_ass.h @@ -60,8 +60,8 @@ bottom_node_thickness(1:myDim_nod2D+eDim_nod2D) => mesh%bottom_node_t dhe(1:myDim_elem2D) => mesh%dhe hbar(1:myDim_nod2D+eDim_nod2D) => mesh%hbar hbar_old(1:myDim_nod2D+eDim_nod2D) => mesh%hbar_old -zbar_n(1:mesh%nl) => mesh%zbar_n -Z_n(1:mesh%nl-1) => mesh%Z_n +!zbar_n(1:mesh%nl) => mesh%zbar_n +!Z_n(1:mesh%nl-1) => mesh%Z_n zbar_n_bot(1:myDim_nod2D+eDim_nod2D) => mesh%zbar_n_bot zbar_e_bot(1:myDim_elem2D+eDim_elem2D) => mesh%zbar_e_bot zbar_n_srf(1:myDim_nod2D+eDim_nod2D) => mesh%zbar_n_srf diff --git a/src/associate_mesh_def.h b/src/associate_mesh_def.h index cf146d70b..1410938ad 100644 --- a/src/associate_mesh_def.h +++ b/src/associate_mesh_def.h @@ -44,8 +44,8 @@ real(kind=WP), dimension(:) , pointer :: bottom_node_thickness real(kind=WP), dimension(:) , pointer :: dhe real(kind=WP), dimension(:) , pointer :: hbar real(kind=WP), dimension(:) , pointer :: hbar_old -real(kind=WP), dimension(:) , pointer :: zbar_n -real(kind=WP), dimension(:) , pointer :: Z_n +!real(kind=WP), dimension(:) , pointer :: zbar_n +!real(kind=WP), dimension(:) , pointer :: Z_n real(kind=WP), dimension(:) , pointer :: zbar_n_bot real(kind=WP), dimension(:) , pointer :: zbar_e_bot real(kind=WP), dimension(:) , pointer :: zbar_n_srf diff --git a/src/gen_modules_config.F90 b/src/gen_modules_config.F90 index f265ea898..b9d3d1807 100755 --- a/src/gen_modules_config.F90 +++ b/src/gen_modules_config.F90 @@ -107,7 +107,7 @@ module g_config real(kind=WP) :: cavity_partial_cell_thresh=0.0_WP ! same as partial_cell_tresh but for surface logical :: toy_ocean=.false. ! Ersatz forcing has to be supplied character(100) :: which_toy="soufflet" - logical :: flag_debug=.false. ! prints name of actual subroutine he is in + logical :: flag_debug=.true. ! prints name of actual subroutine he is in logical :: flag_warn_cflz=.true. ! switches off cflz warning namelist /run_config/ use_ice,use_floatice, use_sw_pene, use_cavity, & use_cavity_partial_cell, cavity_partial_cell_thresh, toy_ocean, which_toy, flag_debug, flag_warn_cflz diff --git a/src/gen_modules_diag.F90 b/src/gen_modules_diag.F90 index 036afca1d..139d271f3 100755 --- a/src/gen_modules_diag.F90 +++ b/src/gen_modules_diag.F90 @@ -254,7 +254,9 @@ subroutine diag_energy(mode, dynamics, partit, mesh) real(kind=WP) :: ux, vx, uy, vy, tvol, rval(2) real(kind=WP) :: geo_grad_x(3), geo_grad_y(3), geo_u(3), geo_v(3) real(kind=WP), dimension(:,:,:), pointer :: UV, UVnode - real(kind=WP), dimension(:,:), pointer :: Wvel + real(kind=WP), dimension(:,:), pointer :: Wvel + real(kind=WP) :: zbar_n(mesh%nl), Z_n(mesh%nl-1) + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" diff --git a/src/oce_adv_tra_ver.F90 b/src/oce_adv_tra_ver.F90 index d8f3bea5e..1041a1607 100644 --- a/src/oce_adv_tra_ver.F90 +++ b/src/oce_adv_tra_ver.F90 @@ -102,6 +102,7 @@ subroutine adv_tra_vert_impl(dt, w, ttf, partit, mesh) real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP) :: a(mesh%nl), b(mesh%nl), c(mesh%nl), tr(mesh%nl) real(kind=WP) :: cp(mesh%nl), tp(mesh%nl) + real(kind=WP) :: zbar_n(mesh%nl), z_n(mesh%nl-1) integer :: nz, n, nzmax, nzmin real(kind=WP) :: m, zinv, dt_inv, dz real(kind=WP) :: c1, v_adv diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index a1b0a0a9b..489047b42 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -182,6 +182,7 @@ subroutine init_ale(dynamics, partit, mesh) type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit type(t_dyn) , intent(inout), target :: dynamics + real(kind=WP) :: zbar_n(mesh%nl), z_n(mesh%nl-1) #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -206,12 +207,9 @@ subroutine init_ale(dynamics, partit, mesh) ! of the ssh operator. allocate(mesh%dhe(myDim_elem2D)) - ! zbar_n: depth of layers due to ale thinkness variactions at ervery node n - allocate(mesh%zbar_n(nl)) allocate(mesh%zbar_3d_n(nl,myDim_nod2D+eDim_nod2D)) ! Z_n: mid depth of layers due to ale thinkness variactions at ervery node n - allocate(mesh%Z_n(nl-1)) allocate(mesh%Z_3d_n(nl-1,myDim_nod2D+eDim_nod2D)) ! bottom_elem_tickness: changed bottom layer thinkness due to partial cells @@ -237,8 +235,6 @@ subroutine init_ale(dynamics, partit, mesh) dhe(1:myDim_elem2D) => mesh%dhe hbar(1:myDim_nod2D+eDim_nod2D) => mesh%hbar hbar_old(1:myDim_nod2D+eDim_nod2D) => mesh%hbar_old - zbar_n(1:mesh%nl) => mesh%zbar_n - Z_n(1:mesh%nl-1) => mesh%Z_n zbar_n_bot(1:myDim_nod2D+eDim_nod2D) => mesh%zbar_n_bot zbar_e_bot(1:myDim_elem2D+eDim_elem2D) => mesh%zbar_e_bot zbar_n_srf(1:myDim_nod2D+eDim_nod2D) => mesh%zbar_n_srf @@ -447,6 +443,7 @@ subroutine init_bottom_node_thickness(partit, mesh) real(kind=WP) :: hnbot, tvol type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -2535,10 +2532,11 @@ subroutine impl_vert_visc_ale(dynamics, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_dyn) , intent(inout), target :: dynamics -real(kind=WP) :: a(mesh%nl-1), b(mesh%nl-1), c(mesh%nl-1), ur(mesh%nl-1), vr(mesh%nl-1) -real(kind=WP) :: cp(mesh%nl-1), up(mesh%nl-1), vp(mesh%nl-1) -integer :: nz, elem, nzmax, nzmin, elnodes(3) -real(kind=WP) :: zinv, m, friction, wu, wd +real(kind=WP) :: a(mesh%nl-1), b(mesh%nl-1), c(mesh%nl-1), ur(mesh%nl-1), vr(mesh%nl-1) +real(kind=WP) :: cp(mesh%nl-1), up(mesh%nl-1), vp(mesh%nl-1) +integer :: nz, elem, nzmax, nzmin, elnodes(3) +real(kind=WP) :: zinv, m, friction, wu, wd +real(kind=WP) :: zbar_n(mesh%nl), z_n(mesh%nl-1) real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs real(kind=WP), dimension(:,:) , pointer :: Wvel_i #include "associate_part_def.h" diff --git a/src/oce_ale_pressure_bv.F90 b/src/oce_ale_pressure_bv.F90 index 5f3284cdf..b0ee29d27 100644 --- a/src/oce_ale_pressure_bv.F90 +++ b/src/oce_ale_pressure_bv.F90 @@ -611,6 +611,7 @@ subroutine pressure_force_4_linfs_nemo(tracers, partit, mesh) dZn, dZn_i, dh, dval, mean_e_rho,dZn_rho_grad(2) real(kind=WP) :: rhopot, bulk_0, bulk_pz, bulk_pz2 real(kind=WP), dimension(:,:), pointer :: temp, salt + real(kind=WP) :: zbar_n(mesh%nl), z_n(mesh%nl-1) #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -780,6 +781,7 @@ subroutine pressure_force_4_linfs_shchepetkin(partit, mesh) integer :: elem, elnodes(3), nle, ule, nlz, idx(3),ni real(kind=WP) :: int_dp_dx(2), drho_dx, dz_dx, aux_sum real(kind=WP) :: dx10(3), dx20(3), dx21(3), df10(3), df21(3), drho_dz(3) + real(kind=WP) :: zbar_n(mesh%nl), z_n(mesh%nl-1) #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -1044,6 +1046,7 @@ subroutine pressure_force_4_linfs_easypgf(tracers, partit, mesh) real(kind=WP) :: rho_at_Zn(3), temp_at_Zn(3), salt_at_Zn(3), drho_dz(3), aux_dref real(kind=WP) :: rhopot(3), bulk_0(3), bulk_pz(3), bulk_pz2(3) real(kind=WP) :: dref_rhopot, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2 + real(kind=WP) :: zbar_n(mesh%nl), z_n(mesh%nl-1) real(kind=WP), dimension(:,:), pointer :: temp, salt #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -1401,7 +1404,8 @@ subroutine pressure_force_4_linfs_cubicspline(partit, mesh) real(kind=WP) :: interp_n_dens(3) integer :: s_ind(4) real(kind=WP) :: s_z(4), s_dens(4), s_H, aux1, aux2, s_dup, s_dlo - real(kind=WP) :: a, b, c, d, dz + real(kind=WP) :: a, b, c, d, dz + real(kind=WP) :: zbar_n(mesh%nl), z_n(mesh%nl-1) #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -1601,6 +1605,7 @@ subroutine pressure_force_4_linfs_cavity(partit, mesh) integer :: elem, elnodes(3), nle, ule, nlz, idx(3), ni real(kind=WP) :: int_dp_dx(2), drho_dx, dz_dx, aux_sum real(kind=WP) :: dx10(3), dx20(3), dx21(3), df10(3), df21(3), drho_dz(3) + real(kind=WP) :: zbar_n(mesh%nl), z_n(mesh%nl-1) #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -1858,6 +1863,7 @@ subroutine pressure_force_4_zxxxx_cubicspline(partit, mesh) integer :: s_ind(4) real(kind=WP) :: s_z(4), s_dens(4), s_H, aux1, aux2, aux(2), s_dup, s_dlo real(kind=WP) :: a, b, c, d, dz, rho_n(3), rhograd_e(2), p_grad(2) + real(kind=WP) :: zbar_n(mesh%nl), z_n(mesh%nl-1) #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -2043,6 +2049,7 @@ subroutine pressure_force_4_zxxxx_shchepetkin(partit, mesh) real(kind=WP) :: int_dp_dx(2), drho_dx, drho_dy, drho_dz(3), dz_dx, dz_dy, aux_sum real(kind=WP) :: dx10(3), dx20(3), dx21(3), df10(3), df21(3) real(kind=WP) :: rhopot(3), bulk_0(3), bulk_pz(3), bulk_pz2(3) + real(kind=WP) :: zbar_n(mesh%nl), z_n(mesh%nl-1) #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -2292,6 +2299,7 @@ subroutine pressure_force_4_zxxxx_easypgf(tracers, partit, mesh) real(kind=WP) :: rho_at_Zn(3), temp_at_Zn(3), salt_at_Zn(3), drho_dz(3), aux_dref real(kind=WP) :: rhopot(3), bulk_0(3), bulk_pz(3), bulk_pz2(3) real(kind=WP) :: dref_rhopot, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2 + real(kind=WP) :: zbar_n(mesh%nl), z_n(mesh%nl-1) real(kind=WP), dimension(:,:), pointer :: temp, salt #include "associate_part_def.h" #include "associate_mesh_def.h" diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 28c84ae16..37fa87f35 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -1,11 +1,10 @@ module diff_part_hor_redi_interface interface - subroutine diff_part_hor_redi(tr_num, tracer, partit, mesh) + subroutine diff_part_hor_redi(tracer, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP use mod_tracer - integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracer type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit @@ -29,12 +28,11 @@ subroutine diff_ver_part_expl_ale(tr_num, tracer, partit, mesh) module diff_ver_part_redi_expl_interface interface - subroutine diff_ver_part_redi_expl(tr_num, tracer, partit, mesh) + subroutine diff_ver_part_redi_expl(tracer, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP use mod_tracer - integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracer type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit @@ -305,16 +303,17 @@ subroutine diff_tracers_ale(tr_num, dynamics, tracers, partit, mesh) ! write there also horizontal diffusion rhs to del_ttf which is equal the R_T^n ! in danilovs srcipt ! includes Redi diffusivity if Redi=.true. - call diff_part_hor_redi(tr_num, tracers, partit, mesh) ! seems to be ~9% faster than diff_part_hor + call diff_part_hor_redi(tracers, partit, mesh) ! seems to be ~9% faster than diff_part_hor !___________________________________________________________________________ ! do vertical diffusion: explicit if (.not. tracers%i_vert_diff) call diff_ver_part_expl_ale(tr_num, tracers, partit, mesh) ! A projection of horizontal Redi diffussivity onto vertical. This par contains horizontal ! derivatives and has to be computed explicitly! - if (Redi) call diff_ver_part_redi_expl(tr_num, tracers, partit, mesh) + if (Redi) call diff_ver_part_redi_expl(tracers, partit, mesh) !___________________________________________________________________________ ! Update tracers --> calculate T* see Danilov et al. (2017) ! T* = (dt*R_T^n + h^(n-0.5)*T^(n-0.5))/h^(n+0.5) +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nzmin, nzmax) do n=1, myDim_nod2D nzmax=nlevels_nod2D(n)-1 nzmin=ulevels_nod2D(n) @@ -324,10 +323,10 @@ subroutine diff_tracers_ale(tr_num, dynamics, tracers, partit, mesh) del_ttf(nzmin:nzmax,n)/hnode_new(nzmin:nzmax,n) ! WHY NOT ??? --> whats advantage of above --> tested it --> the upper ! equation has a 30% smaller nummerical drift - !tr_arr(1:nzmax,n,tr_num)=(hnode(1:nzmax,n)*tr_arr(1:nzmax,n,tr_num)+ & - ! del_ttf(1:nzmax,n))/hnode_new(1:nzmax,n) + ! tr_arr(1:nzmax,n,tr_num)=(hnode(1:nzmax,n)*tr_arr(1:nzmax,n,tr_num)+ & + ! del_ttf(1:nzmax,n))/hnode_new(1:nzmax,n) end do - +!$OMP END PARALLEL DO !___________________________________________________________________________ if (tracers%i_vert_diff) then ! do vertical diffusion: implicite @@ -358,11 +357,10 @@ subroutine diff_ver_part_expl_ale(tr_num, tracers, partit, mesh) type(t_tracer), intent(inout), target :: tracers type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit + integer :: n, nz, nl1, ul1 real(kind=WP) :: vd_flux(mesh%nl-1) - real(kind=WP) :: rdata,flux,rlx - integer :: nz,nl1,ul1,n - real(kind=WP) :: zinv1,Ty - + real(kind=WP) :: rdata, flux, rlx + real(kind=WP) :: zinv1 real(kind=WP), pointer :: del_ttf(:,:) #include "associate_part_def.h" @@ -371,9 +369,7 @@ subroutine diff_ver_part_expl_ale(tr_num, tracers, partit, mesh) #include "associate_mesh_ass.h" del_ttf => tracers%work%del_ttf - - Ty = 0.0_WP - +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz, nl1, ul1, vd_flux, rdata, flux, rlx, zinv1) !___________________________________________________________________________ do n=1, myDim_nod2D nl1=nlevels_nod2D(n)-1 @@ -390,35 +386,23 @@ subroutine diff_ver_part_expl_ale(tr_num, tracers, partit, mesh) flux = 0._WP rdata = 0._WP rlx=0._WP - endif - + endif !_______________________________________________________________________ !Surface forcing - !!PS vd_flux(1)= flux - vd_flux(ul1)= flux - - !_______________________________________________________________________ - !!PS do nz=2,nl1 + vd_flux(ul1)= flux do nz=ul1+1,nl1 !___________________________________________________________________ - zinv1=1.0_WP/(Z_3d_n(nz-1,n)-Z_3d_n(nz,n)) - - !___________________________________________________________________ -! Ty= Kd(4,nz-1,n)*(Z_3d_n(nz-1,n)-zbar_3d_n(nz,n))*zinv1 *neutral_slope(3,nz-1,n)**2 + & -! Kd(4,nz,n)*(zbar_3d_n(nz,n)-Z_3d_n(nz,n))*zinv1 *neutral_slope(3,nz,n)**2 - - vd_flux(nz) = (Kv(nz,n)+Ty)*(tracers%data(tr_num)%values(nz-1,n)-tracers%data(tr_num)%values(nz,n))*zinv1*area(nz,n) - + zinv1=1.0_WP/(Z_3d_n(nz-1,n)-Z_3d_n(nz,n)) + vd_flux(nz) = Kv(nz,n)*(tracers%data(tr_num)%values(nz-1,n)-tracers%data(tr_num)%values(nz,n))*zinv1*area(nz,n) end do - !_______________________________________________________________________ - !!PS do nz=1,nl1-1 do nz=ul1,nl1-1 del_ttf(nz,n) = del_ttf(nz,n) + (vd_flux(nz) - vd_flux(nz+1))/(zbar_3d_n(nz,n)-zbar_3d_n(nz+1,n))*dt/areasvol(nz,n) end do del_ttf(nl1,n) = del_ttf(nl1,n) + (vd_flux(nl1)/(zbar_3d_n(nl1,n)-zbar_3d_n(nl1+1,n)))*dt/areasvol(nl1,n) end do ! --> do n=1, myDim_nod2D +!$OMP END PARALLEL DO end subroutine diff_ver_part_expl_ale ! ! @@ -455,7 +439,7 @@ subroutine diff_ver_part_impl_ale(tr_num, dynamics, tracers, partit, mesh) real(kind=WP), external :: TFrez ! Sea water freeze temperature. real(kind=WP) :: isredi=0._WP logical :: do_wimpl=.true. - + real(kind=WP) :: zbar_n(mesh%nl), z_n(mesh%nl-1) real(kind=WP), dimension(:,:), pointer :: trarr real(kind=WP), dimension(:,:), pointer :: Wvel_i @@ -894,7 +878,7 @@ end subroutine diff_ver_part_impl_ale ! ! !=============================================================================== -subroutine diff_ver_part_redi_expl(tr_num, tracers, partit, mesh) +subroutine diff_ver_part_redi_expl(tracers, partit, mesh) use o_ARRAYS use MOD_MESH USE MOD_PARTIT @@ -904,14 +888,14 @@ subroutine diff_ver_part_redi_expl(tr_num, tracers, partit, mesh) use g_config use g_comm_auto IMPLICIT NONE - integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracers type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - integer :: elem,k - integer :: n2,nl1,ul1,nl2,nz,n - real(kind=WP) :: Tx, Ty - real(kind=WP) :: tr_xynodes(2,mesh%nl-1,partit%myDim_nod2D+partit%eDim_nod2D), vd_flux(mesh%nl) + integer :: n, k, elem, nz + integer :: n2, nl1, ul1, nl2 + real(kind=WP) :: Tx, Ty, vd_flux(mesh%nl) + real(kind=WP) :: tr_xynodes(2,mesh%nl-1,partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP) :: zbar_n(mesh%nl), z_n(mesh%nl-1) real(kind=WP), pointer :: del_ttf(:,:) #include "associate_part_def.h" @@ -921,6 +905,8 @@ subroutine diff_ver_part_redi_expl(tr_num, tracers, partit, mesh) del_ttf => tracers%work%del_ttf +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, k, elem, nz, n2, nl1, ul1, nl2, Tx, Ty, vd_flux, zbar_n, z_n) +!$OMP DO do n=1, myDim_nod2D nl1=nlevels_nod2D(n)-1 ul1=ulevels_nod2D(n) @@ -941,45 +927,43 @@ subroutine diff_ver_part_redi_expl(tr_num, tracers, partit, mesh) end do end do - ! call exchange_nod_begin(tr_xynodes) !NR the halo is not needed - +!$OMP END DO + ! no halo exchange of tr_xynodes is needed ! +!$OMP DO do n=1, myDim_nod2D nl1=nlevels_nod2D(n)-1 ul1=ulevels_nod2D(n) vd_flux=0._WP !_______________________________________________________________________ - zbar_n=0.0_WP - Z_n =0.0_WP -! zbar_n(nl1+1)=zbar(nl1+1) + zbar_n(1:mesh%nl )=0.0_WP + z_n (1:mesh%nl-1)=0.0_WP zbar_n(nl1+1)=zbar_n_bot(n) - Z_n(nl1)=zbar_n(nl1+1) + hnode_new(nl1,n)/2.0_WP - !!PS do nz=nl1, 2, -1 + z_n(nl1)=zbar_n(nl1+1) + hnode_new(nl1,n)/2.0_WP do nz=nl1, ul1+1, -1 zbar_n(nz) = zbar_n(nz+1) + hnode_new(nz,n) - Z_n(nz-1) = zbar_n(nz) + hnode_new(nz-1,n)/2.0_WP + z_n(nz-1) = zbar_n(nz) + hnode_new(nz-1,n)/2.0_WP end do - !!PS zbar_n(1) = zbar_n(2) + hnode_new(1,n) - zbar_n(ul1) = zbar_n(ul1+1) + hnode_new(ul1,n) + zbar_n(ul1) = zbar_n(ul1+1) + hnode_new(ul1,n) !_______________________________________________________________________ - !!PS do nz=2,nl1 do nz=ul1+1,nl1 - vd_flux(nz)=(Z_n(nz-1)-zbar_n(nz))*(slope_tapered(1,nz-1,n)*tr_xynodes(1,nz-1,n)+slope_tapered(2,nz-1,n)*tr_xynodes(2,nz-1,n))*Ki(nz-1,n) + vd_flux(nz)=(z_n(nz-1)-zbar_n(nz))*(slope_tapered(1,nz-1,n)*tr_xynodes(1,nz-1,n)+slope_tapered(2,nz-1,n)*tr_xynodes(2,nz-1,n))*Ki(nz-1,n) vd_flux(nz)=vd_flux(nz)+& - (zbar_n(nz)-Z_n(nz)) *(slope_tapered(1,nz,n) *tr_xynodes(1,nz,n) +slope_tapered(2,nz,n) *tr_xynodes(2,nz,n)) *Ki(nz,n) - vd_flux(nz)=vd_flux(nz)/(Z_n(nz-1)-Z_n(nz))*area(nz,n) + (zbar_n(nz)-z_n(nz)) *(slope_tapered(1,nz,n) *tr_xynodes(1,nz,n) +slope_tapered(2,nz,n) *tr_xynodes(2,nz,n)) *Ki(nz,n) + vd_flux(nz)=vd_flux(nz)/(z_n(nz-1)-z_n(nz))*area(nz,n) enddo - !!PS do nz=1,nl1 do nz=ul1,nl1 del_ttf(nz,n) = del_ttf(nz,n)+(vd_flux(nz) - vd_flux(nz+1))*dt/areasvol(nz,n) enddo end do +!$OMP END DO +!$OMP END PARALLEL end subroutine diff_ver_part_redi_expl ! ! !=============================================================================== -subroutine diff_part_hor_redi(tr_num, tracers, partit, mesh) +subroutine diff_part_hor_redi(tracers, partit, mesh) use o_ARRAYS use MOD_MESH USE MOD_PARTIT @@ -988,14 +972,13 @@ subroutine diff_part_hor_redi(tr_num, tracers, partit, mesh) use o_param use g_config IMPLICIT NONE - integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracers type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - real(kind=WP) :: deltaX1,deltaY1,deltaX2,deltaY2 integer :: edge - integer :: n2,nl1,ul1,nl2,ul2,nl12,ul12,nz,el(2),elnodes(3),n,enodes(2) - real(kind=WP) :: c, Fx, Fy,Tx, Ty, Tx_z, Ty_z, SxTz, SyTz, Tz(2) + real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2 + integer :: nl1, ul1, nl2, ul2, nl12, ul12, nz, el(2), elnodes(3), enodes(2) + real(kind=WP) :: c, Fx, Fy, Tx, Ty, Tx_z, Ty_z, SxTz, SyTz, Tz(2) real(kind=WP) :: rhs1(mesh%nl-1), rhs2(mesh%nl-1), Kh, dz real(kind=WP) :: isredi=0._WP real(kind=WP), pointer :: del_ttf(:,:) @@ -1006,8 +989,12 @@ subroutine diff_part_hor_redi(tr_num, tracers, partit, mesh) #include "associate_mesh_ass.h" del_ttf => tracers%work%del_ttf - if (Redi) isredi=1._WP +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(edge, deltaX1, deltaY1, deltaX2, deltaY2, & +!$OMP nl1, ul1, nl2, ul2, nl12, ul12, nz, el, elnodes, enodes, & +!$OMP c, Fx, Fy, Tx, Ty, Tx_z, Ty_z, SxTz, SyTz, Tz, & +!$OMP rhs1, rhs2, Kh, dz) +!$OMP DO do edge=1, myDim_edge2D rhs1=0.0_WP rhs2=0.0_WP @@ -1019,25 +1006,21 @@ subroutine diff_part_hor_redi(tr_num, tracers, partit, mesh) nl1=nlevels(el(1))-1 ul1=ulevels(el(1)) elnodes=elem2d_nodes(:,el(1)) - !Kh=elem_area(el(1)) !_______________________________________________________________________ nl2=0 ul2=0 if (el(2)>0) then - !Kh=0.5_WP*(Kh+elem_area(el(2))) nl2=nlevels(el(2))-1 ul2=ulevels(el(2)) deltaX2=edge_cross_dxdy(3,edge) deltaY2=edge_cross_dxdy(4,edge) endif - !Kh=K_hor*Kh/scale_area !_______________________________________________________________________ nl12=min(nl1,nl2) ul12=max(ul1,ul2) - !_______________________________________________________________________ ! (A) - do nz=ul1,ul12-1 + do nz=ul1, ul12-1 Kh=sum(Ki(nz, enodes))/2.0_WP dz=helem(nz, el(1)) Tz=0.5_WP*(tr_z(nz,enodes)+tr_z(nz+1,enodes)) @@ -1051,7 +1034,6 @@ subroutine diff_part_hor_redi(tr_num, tracers, partit, mesh) rhs1(nz) = rhs1(nz) + c rhs2(nz) = rhs2(nz) - c end do - !_______________________________________________________________________ ! (B) if (ul2>0) then @@ -1070,11 +1052,9 @@ subroutine diff_part_hor_redi(tr_num, tracers, partit, mesh) rhs2(nz) = rhs2(nz) - c end do end if - !_______________________________________________________________________ ! (C) - !!PS do nz=1,nl12 - do nz=ul12,nl12 + do nz=ul12, nl12 Kh=sum(Ki(nz, enodes))/2.0_WP dz=sum(helem(nz, el))/2.0_WP Tz=0.5_WP*(tr_z(nz,enodes)+tr_z(nz+1,enodes)) @@ -1088,10 +1068,9 @@ subroutine diff_part_hor_redi(tr_num, tracers, partit, mesh) rhs1(nz) = rhs1(nz) + c rhs2(nz) = rhs2(nz) - c enddo - !_______________________________________________________________________ ! (D) - do nz=nl12+1,nl1 + do nz=nl12+1, nl1 Kh=sum(Ki(nz, enodes))/2.0_WP dz=helem(nz, el(1)) Tz=0.5_WP*(tr_z(nz,enodes)+tr_z(nz+1,enodes)) @@ -1105,10 +1084,9 @@ subroutine diff_part_hor_redi(tr_num, tracers, partit, mesh) rhs1(nz) = rhs1(nz) + c rhs2(nz) = rhs2(nz) - c end do - !_______________________________________________________________________ ! (E) - do nz=nl12+1,nl2 + do nz=nl12+1, nl2 Kh=sum(Ki(nz, enodes))/2.0_WP dz=helem(nz, el(2)) Tz=0.5_WP*(tr_z(nz,enodes)+tr_z(nz+1,enodes)) @@ -1122,17 +1100,25 @@ subroutine diff_part_hor_redi(tr_num, tracers, partit, mesh) rhs1(nz) = rhs1(nz) + c rhs2(nz) = rhs2(nz) - c end do - !_______________________________________________________________________ nl12=max(nl1,nl2) ul12 = ul1 if (ul2>0) ul12=min(ul1,ul2) - !!PS del_ttf(1:nl12,enodes(1))=del_ttf(1:nl12,enodes(1))+rhs1(1:nl12)*dt/area(1:nl12,enodes(1)) - !!PS del_ttf(1:nl12,enodes(2))=del_ttf(1:nl12,enodes(2))+rhs2(1:nl12)*dt/area(1:nl12,enodes(2)) +#if defined(_OPENMP) + call omp_set_lock(partit%plock(enodes(1))) +#endif del_ttf(ul12:nl12,enodes(1))=del_ttf(ul12:nl12,enodes(1))+rhs1(ul12:nl12)*dt/areasvol(ul12:nl12,enodes(1)) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(enodes(1))) + call omp_set_lock (partit%plock(enodes(2))) +#endif del_ttf(ul12:nl12,enodes(2))=del_ttf(ul12:nl12,enodes(2))+rhs2(ul12:nl12)*dt/areasvol(ul12:nl12,enodes(2)) - +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(enodes(2))) +#endif end do +!$OMP END DO +!$OMP END PARALLEL end subroutine diff_part_hor_redi ! ! diff --git a/src/oce_fer_gm.F90 b/src/oce_fer_gm.F90 index 7db79c91e..db898cc26 100644 --- a/src/oce_fer_gm.F90 +++ b/src/oce_fer_gm.F90 @@ -52,6 +52,8 @@ subroutine fer_solve_Gamma(partit, mesh) real(kind=WP) :: zinv1,zinv2, zinv, m, r real(kind=WP) :: a(mesh%nl), b(mesh%nl), c(mesh%nl) real(kind=WP) :: cp(mesh%nl), tp(2,mesh%nl) + real(kind=WP) :: zbar_n(mesh%nl), z_n(mesh%nl-1) + real(kind=WP), dimension(:,:), pointer :: tr #include "associate_part_def.h" diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index 1a0c078a0..22790d9c9 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -196,7 +196,6 @@ MODULE o_ARRAYS integer, allocatable :: MLD1_ind(:), MLD2_ind(:) real(kind=WP), allocatable :: ssh_gp(:) !Tracer gradients&RHS -real(kind=WP), allocatable :: ttrhs(:,:) real(kind=WP), allocatable :: tr_xy(:,:,:) real(kind=WP), allocatable :: tr_z(:,:) diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 8309a1ca6..dcc307727 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -564,7 +564,6 @@ SUBROUTINE arrays_init(num_tracers, partit, mesh) end if ! tracer gradients & RHS -allocate(ttrhs(nl-1,node_size)) allocate(tr_xy(2,nl-1,myDim_elem2D+eDim_elem2D+eXDim_elem2D)) allocate(tr_z(nl,myDim_nod2D+eDim_nod2D)) diff --git a/src/oce_tracer_mod.F90 b/src/oce_tracer_mod.F90 index da2d7baa7..f374ca5d2 100755 --- a/src/oce_tracer_mod.F90 +++ b/src/oce_tracer_mod.F90 @@ -24,15 +24,16 @@ SUBROUTINE init_tracers_AB(tr_num, tracers, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers integer :: n,nz - +!$OMP PARALLEL DO do n=1, partit%myDim_nod2D+partit%eDim_nod2D ! del_ttf will contain all advection / diffusion contributions for this tracer. Set it to 0 at the beginning! tracers%work%del_ttf (:, n) = 0.0_WP tracers%work%del_ttf_advhoriz (:, n) = 0.0_WP tracers%work%del_ttf_advvert (:, n) = 0.0_WP ! AB interpolation - tracers%data(tr_num)%valuesAB(:, n)=-(0.5_WP+epsilon)*tracers%data(tr_num)%valuesAB(:, n)+(1.5_WP+epsilon)*tracers%data(tr_num)%values(:, n) + tracers%data(tr_num)%valuesAB(:, n) =-(0.5_WP+epsilon)*tracers%data(tr_num)%valuesAB(:, n)+(1.5_WP+epsilon)*tracers%data(tr_num)%values(:, n) end do +!$OMP END PARALLEL DO if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_elements'//achar(27)//'[0m' call tracer_gradient_elements(tracers%data(tr_num)%valuesAB, partit, mesh) @@ -41,6 +42,8 @@ SUBROUTINE init_tracers_AB(tr_num, tracers, partit, mesh) if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_z'//achar(27)//'[0m' call tracer_gradient_z(tracers%data(tr_num)%values, partit, mesh) !WHY NOT AB HERE? DSIDOREN! call exchange_elem_end(partit) ! tr_xy used in fill_up_dn_grad +!$OMP BARRIER + call exchange_nod_begin(tr_z, partit) ! not used in fill_up_dn_grad if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call fill_up_dn_grad'//achar(27)//'[0m' @@ -69,13 +72,13 @@ SUBROUTINE tracer_gradient_elements(ttf, partit, mesh) type(t_partit), intent(inout), target :: partit real(kind=WP) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) integer :: elem, elnodes(3) - integer :: n, nz, nzmin, nzmax + integer :: nz, nzmin, nzmax #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(elem, elnodes, nz, nzmin, nzmax) DO elem=1, myDim_elem2D elnodes=elem2D_nodes(:,elem) nzmin = ulevels(elem) @@ -86,6 +89,7 @@ SUBROUTINE tracer_gradient_elements(ttf, partit, mesh) tr_xy(2,nz, elem)=sum(gradient_sca(4:6,elem)*ttf(nz,elnodes)) END DO END DO +!$OMP END PARALLEL DO END SUBROUTINE tracer_gradient_elements ! ! @@ -110,7 +114,7 @@ SUBROUTINE tracer_gradient_z(ttf, partit, mesh) #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax, dz) DO n=1, myDim_nod2D+eDim_nod2D !!PS nlev=nlevels_nod2D(n) nzmax=nlevels_nod2D(n) @@ -125,6 +129,7 @@ SUBROUTINE tracer_gradient_z(ttf, partit, mesh) tr_z(nzmin, n)=0.0_WP tr_z(nzmax, n)=0.0_WP END DO +!$OMP END PARALLEL DO END SUBROUTINE tracer_gradient_z ! ! @@ -142,7 +147,7 @@ SUBROUTINE relax_to_clim(tr_num, tracers, partit, mesh) type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers - integer :: n,nz, nzmin, nzmax + integer :: n, nzmin, nzmax real(kind=WP), dimension(:,:), pointer :: trarr #include "associate_part_def.h" @@ -152,6 +157,7 @@ SUBROUTINE relax_to_clim(tr_num, tracers, partit, mesh) trarr=>tracers%data(tr_num)%values(:,:) if ((clim_relax>1.0e-8_WP).and.(tracers%data(tr_num)%ID==1)) then +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nzmin, nzmax) DO n=1, myDim_nod2D nzmin = ulevels_nod2D(n) nzmax = nlevels_nod2D(n) @@ -160,14 +166,17 @@ SUBROUTINE relax_to_clim(tr_num, tracers, partit, mesh) trarr(nzmin:nzmax-1,n)=trarr(nzmin:nzmax-1,n)+& relax2clim(n)*dt*(Tclim(nzmin:nzmax-1,n)-trarr(nzmin:nzmax-1,n)) END DO +!$OMP END PARALLEL DO END if if ((clim_relax>1.0e-8_WP).and.(tracers%data(tr_num)%ID==2)) then +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nzmin, nzmax) DO n=1, myDim_nod2D nzmin = ulevels_nod2D(n) nzmax = nlevels_nod2D(n) trarr(nzmin:nzmax-1,n)=trarr(nzmin:nzmax-1,n)+& relax2clim(n)*dt*(Sclim(nzmin:nzmax-1,n)-trarr(nzmin:nzmax-1,n)) END DO +!$OMP END PARALLEL DO END IF END SUBROUTINE relax_to_clim END MODULE o_tracers diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index 1b84011ab..d1c2b4bfd 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -437,7 +437,6 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) write(*,*) 'nz = ',nz write(*,*) 'nzmin, nzmax= ',ulevels_nod2D(n),nlevels_nod2D(n) write(*,*) 'x=', geo_coord_nod2D(1,n)/rad, ' ; ', 'y=', geo_coord_nod2D(2,n)/rad - write(*,*) 'z=', Z_n(nz) write(*,*) 'temp(nz, n) = ',tracers%data(1)%values(nz, n) write(*,*) 'temp(: , n) = ',tracers%data(1)%values(:, n) write(*,*) 'temp_old(nz,n)= ',tracers%data(1)%valuesAB(nz, n) @@ -491,7 +490,7 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) write(*,*) 'nz = ',nz write(*,*) 'nzmin, nzmax= ',ulevels_nod2D(n),nlevels_nod2D(n) write(*,*) 'x=', geo_coord_nod2D(1,n)/rad, ' ; ', 'y=', geo_coord_nod2D(2,n)/rad - write(*,*) 'z=', Z_n(nz) +! write(*,*) 'z=', Z_n(nz) write(*,*) 'salt(nz, n) = ',tracers%data(2)%values(nz, n) write(*,*) 'salt(: , n) = ',tracers%data(2)%values(:, n) write(*,*) From 340ab3e5212f097f54d39b109f96ee90f0ea6c7b Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Wed, 10 Nov 2021 15:27:00 +0100 Subject: [PATCH 536/909] removed pointers to mesh%zbar_n and mesh%z_n from icepack. zbar_n & z_n shall be defined locally! --- src/icepack_drivers/associate_mesh.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/icepack_drivers/associate_mesh.h b/src/icepack_drivers/associate_mesh.h index 3d1b2edb8..b2746d32f 100644 --- a/src/icepack_drivers/associate_mesh.h +++ b/src/icepack_drivers/associate_mesh.h @@ -48,8 +48,6 @@ real(kind=WP), dimension(:) , pointer :: bottom_node_thickness real(kind=WP), dimension(:) , pointer :: dhe real(kind=WP), dimension(:) , pointer :: hbar real(kind=WP), dimension(:) , pointer :: hbar_old -real(kind=WP), dimension(:) , pointer :: zbar_n -real(kind=WP), dimension(:) , pointer :: Z_n real(kind=WP), dimension(:) , pointer :: zbar_n_bot real(kind=WP), dimension(:) , pointer :: zbar_e_bot real(kind=WP), dimension(:) , pointer :: zbar_n_srf From 559a2122f3d5417d62d0fdbae2f2a4202a70ce88 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Wed, 10 Nov 2021 15:36:37 +0100 Subject: [PATCH 537/909] fixing icepack compilation --- src/icepack_drivers/associate_mesh.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/icepack_drivers/associate_mesh.h b/src/icepack_drivers/associate_mesh.h index b2746d32f..c9c789c7a 100644 --- a/src/icepack_drivers/associate_mesh.h +++ b/src/icepack_drivers/associate_mesh.h @@ -126,8 +126,6 @@ bottom_node_thickness(1:myDim_nod2D+eDim_nod2D) => mesh%bottom_node_t dhe(1:myDim_elem2D) => mesh%dhe hbar(1:myDim_nod2D+eDim_nod2D) => mesh%hbar hbar_old(1:myDim_nod2D+eDim_nod2D) => mesh%hbar_old -zbar_n(1:mesh%nl) => mesh%zbar_n -Z_n(1:mesh%nl-1) => mesh%Z_n zbar_n_bot(1:myDim_nod2D+eDim_nod2D) => mesh%zbar_n_bot zbar_e_bot(1:myDim_elem2D+eDim_elem2D) => mesh%zbar_e_bot zbar_n_srf(1:myDim_nod2D+eDim_nod2D) => mesh%zbar_n_srf From 908a955235c56ad60121b35ed105098f35897049 Mon Sep 17 00:00:00 2001 From: patrickscholz Date: Wed, 10 Nov 2021 18:25:35 +0100 Subject: [PATCH 538/909] Refactoring part4 checkdyn (#219) * just clean up oce_dyn.F90 a bit * just do some cleanup * just do some cleanup * just do some cleanup * just do some cleanup --- src/oce_ale.F90 | 821 +++++++++++++++++----------------- src/oce_ale_tracer.F90 | 287 ++++++------ src/oce_ale_vel_rhs.F90 | 47 +- src/oce_dyn.F90 | 103 +++-- src/oce_setup_step.F90 | 945 ++++++++++++++++++++-------------------- 5 files changed, 1133 insertions(+), 1070 deletions(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index a1b0a0a9b..1e0ed2ffc 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -1,148 +1,148 @@ module oce_ale_interfaces - interface - subroutine init_bottom_elem_thickness(partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - type(t_mesh), intent(inout), target :: mesh - type(t_partit), intent(inout), target :: partit - end subroutine + interface + subroutine init_bottom_elem_thickness(partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + end subroutine - subroutine init_bottom_node_thickness(partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - type(t_mesh), intent(inout), target :: mesh - type(t_partit), intent(inout), target :: partit - end subroutine - - subroutine init_surface_elem_depth(partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - type(t_mesh), intent(inout), target :: mesh - type(t_partit), intent(inout), target :: partit - end subroutine + subroutine init_bottom_node_thickness(partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + end subroutine + + subroutine init_surface_elem_depth(partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + end subroutine - subroutine init_surface_node_depth(partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - type(t_mesh), intent(inout), target :: mesh - type(t_partit), intent(inout), target :: partit - end subroutine + subroutine init_surface_node_depth(partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + end subroutine - subroutine impl_vert_visc_ale(dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_DYN - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_dyn), intent(inout), target :: dynamics - end subroutine + subroutine impl_vert_visc_ale(dynamics, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + end subroutine - subroutine update_stiff_mat_ale(partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - type(t_mesh), intent(inout), target :: mesh - type(t_partit), intent(inout), target :: partit - end subroutine + subroutine update_stiff_mat_ale(partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + end subroutine - subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use MOD_DYN - type(t_mesh), intent(inout), target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_dyn), intent(inout), target :: dynamics - end subroutine + subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + end subroutine - subroutine solve_ssh_ale(dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_DYN - type(t_mesh), intent(inout), target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_dyn), intent(inout), target :: dynamics - end subroutine + subroutine solve_ssh_ale(dynamics, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + end subroutine - subroutine compute_hbar_ale(dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_DYN - type(t_mesh), intent(inout), target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_dyn), intent(inout), target :: dynamics - end subroutine + subroutine compute_hbar_ale(dynamics, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + end subroutine - subroutine vert_vel_ale(dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_DYN - type(t_mesh), intent(inout), target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_dyn), intent(inout), target :: dynamics - end subroutine + subroutine vert_vel_ale(dynamics, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + end subroutine - subroutine update_thickness_ale(partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - type(t_mesh), intent(inout), target :: mesh - type(t_partit), intent(inout), target :: partit - end subroutine - end interface + subroutine update_thickness_ale(partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + end subroutine + end interface end module module init_ale_interface - interface - subroutine init_ale(dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use MOD_DYN - type(t_mesh) , intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_dyn) , intent(inout), target :: dynamics - end subroutine - end interface + interface + subroutine init_ale(dynamics, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + end subroutine + end interface end module module init_thickness_ale_interface - interface - subroutine init_thickness_ale(dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use MOD_DYN - type(t_mesh) , intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_dyn) , intent(inout), target :: dynamics - end subroutine - end interface + interface + subroutine init_thickness_ale(dynamics, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + end subroutine + end interface end module module oce_timestep_ale_interface - interface - subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use mod_tracer - use MOD_DYN - integer, intent(in) :: n - type(t_mesh), intent(inout), target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(inout), target :: tracers - type(t_dyn), intent(inout), target :: dynamics - end subroutine - end interface + interface + subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer + use MOD_DYN + integer , intent(in) :: n + type(t_dyn) , intent(inout), target :: dynamics + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + end subroutine + end interface end module ! CONTENT: ! ------------ @@ -177,15 +177,18 @@ subroutine init_ale(dynamics, partit, mesh) USE g_forcing_param, only: use_virt_salt use oce_ale_interfaces Implicit NONE - - integer :: n, nzmax, nzmin, elnodes(3), elem - type(t_mesh), intent(inout), target :: mesh - type(t_partit), intent(inout), target :: partit type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(inout), target :: mesh + !___________________________________________________________________________ + integer :: n, nzmax, nzmin, elnodes(3), elem + !___________________________________________________________________________ + ! pointer on necessary derived types #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + !___allocate________________________________________________________________ ! hnode and hnode_new: layer thicknesses at nodes. allocate(mesh%hnode(1:nl-1, myDim_nod2D+eDim_nod2D)) @@ -318,11 +321,13 @@ subroutine init_bottom_elem_thickness(partit, mesh) use g_comm_auto use g_support implicit none - + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + !___________________________________________________________________________ integer :: elem, elnodes(3), nle real(kind=WP) :: dd - type(t_mesh), intent(inout), target :: mesh - type(t_partit), intent(inout), target :: partit + !___________________________________________________________________________ + ! pointer on necessary derived types #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -441,12 +446,14 @@ subroutine init_bottom_node_thickness(partit, mesh) use g_comm_auto use g_support implicit none - + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + !___________________________________________________________________________ integer :: node, nln, elem, elemi, nelem real(kind=WP) :: dd real(kind=WP) :: hnbot, tvol - type(t_mesh), intent(inout), target :: mesh - type(t_partit), intent(inout), target :: partit + !___________________________________________________________________________ + ! pointer on necessary derived types #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -552,12 +559,13 @@ subroutine init_surface_elem_depth(partit, mesh) use g_comm_auto use g_support implicit none - + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + !___________________________________________________________________________ integer :: elem, elnodes(3), ule real(kind=WP) :: dd - type(t_mesh), intent(inout), target :: mesh - type(t_partit), intent(inout), target :: partit - + !___________________________________________________________________________ + ! pointer on necessary derived types #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -630,17 +638,19 @@ subroutine init_surface_node_depth(partit, mesh) use g_comm_auto use g_support implicit none - + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + !___________________________________________________________________________ integer :: node, uln, nelem, elemi real(kind=WP) :: dd - type(t_mesh), intent(inout), target :: mesh - type(t_partit), intent(inout), target :: partit - + !___________________________________________________________________________ + ! pointer on necessary derived types #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + !___________________________________________________________________________ if (use_cavity) then !___________________________________________________________________________ ! If we use partial cells and cavity, the thickness of surface cell is adjusted. @@ -693,11 +703,14 @@ subroutine init_thickness_ale(dynamics, partit, mesh) USE MOD_PARSUP USE MOD_DYN implicit none - integer :: n, nz, elem, elnodes(3), nzmin, nzmax - real(kind=WP) :: dd - type(t_mesh), intent(inout), target :: mesh + type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit - type(t_dyn), intent(inout), target :: dynamics + type(t_mesh) , intent(inout), target :: mesh + !___________________________________________________________________________ + integer :: n, nz, elem, elnodes(3), nzmin, nzmax + real(kind=WP) :: dd + !___________________________________________________________________________ + ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: ssh_rhs_old, eta_n #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -706,6 +719,8 @@ subroutine init_thickness_ale(dynamics, partit, mesh) ssh_rhs_old=>dynamics%ssh_rhs_old(:) eta_n =>dynamics%eta_n(:) + !___________________________________________________________________________ + if(mype==0) then write(*,*) '____________________________________________________________' write(*,*) ' --> initialise ALE layerthicknesses, depth levels and middepth levels' @@ -946,11 +961,13 @@ subroutine update_thickness_ale(partit, mesh) use o_ARRAYS use g_config,only: which_ale,lzstar_lev,min_hnode implicit none + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + !___________________________________________________________________________ integer :: n, nz, elem, elnodes(3),nzmax, nzmin integer , dimension(:), allocatable :: idx - type(t_mesh), intent(inout), target :: mesh - type(t_partit), intent(inout), target :: partit - + !___________________________________________________________________________ + ! pointer on necessary derived types #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -1148,16 +1165,19 @@ subroutine restart_thickness_ale(partit, mesh) use o_ARRAYS use g_config,only: which_ale,lzstar_lev,min_hnode implicit none + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + !___________________________________________________________________________ integer :: n, nz, elem, elnodes(3), nzmax, nzmin, lcl_lzstar_lev integer , dimension(:), allocatable :: idx - type(t_mesh), intent(inout), target :: mesh - type(t_partit), intent(inout), target :: partit - + !___________________________________________________________________________ + ! pointer on necessary derived types #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + !___________________________________________________________________________ if(mype==0) then write(*,*) '____________________________________________________________' write(*,*) ' --> restart ALE layerthicknesses, depth levels and middepth levels' @@ -1253,7 +1273,9 @@ subroutine init_stiff_mat_ale(partit, mesh) USE MOD_PARSUP use g_CONFIG implicit none - + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(inout), target :: mesh + !___________________________________________________________________________ integer :: n, n1, n2, i, j, row, ed, fileID integer :: elnodes(3), el(2) integer :: npos(3), offset, nini, nend @@ -1264,13 +1286,14 @@ subroutine init_stiff_mat_ale(partit, mesh) character(MAX_PATH) :: dist_mesh_dir, file_name real(kind=WP) :: t0, t1 integer :: ierror ! MPI, return error code - type(t_mesh), intent(inout), target :: mesh - type(t_partit), intent(inout), target :: partit + !___________________________________________________________________________ + ! pointer on necessary derived types #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + !___________________________________________________________________________ t0=MPI_Wtime() if (mype==0) then write(*,*) '____________________________________________________________' @@ -1541,17 +1564,18 @@ subroutine update_stiff_mat_ale(partit, mesh) USE MOD_PARTIT USE MOD_PARSUP use o_ARRAYS - ! implicit none + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + !___________________________________________________________________________ integer :: n, i, j, row, ed,n2 integer :: enodes(2), elnodes(3), el(2) integer :: elem, npos(3), offset, nini, nend real(kind=WP) :: factor real(kind=WP) :: fx(3), fy(3) integer, allocatable :: n_num(:) - type(t_mesh), intent(inout), target :: mesh - type(t_partit), intent(inout), target :: partit - + !___________________________________________________________________________ + ! pointer on necessary derived types #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -1645,6 +1669,9 @@ end subroutine update_stiff_mat_ale !"FESOM2: from finite elements to finite volumes" ! ! ssh_rhs = alpha * grad[ int_hbot^hbar(n+0.5)( u^n+deltau)dz + W(n+0.5) ] +! In the semiimplicit method: +! ssh_rhs=-alpha*\nabla\int(U_n+U_rhs)dz-(1-alpha)*... +! see "FESOM2: from finite elements to finte volumes, S. Danilov..." eq. (11) rhs subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) use g_config,only: which_ALE,dt use MOD_MESH @@ -1655,30 +1682,29 @@ subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) USE MOD_DYN use g_comm_auto implicit none - - ! In the semiimplicit method: - ! ssh_rhs=-alpha*\nabla\int(U_n+U_rhs)dz-(1-alpha)*... - ! see "FESOM2: from finite elements to finte volumes, S. Danilov..." eq. (11) rhs + type(t_mesh) , intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_dyn) , intent(inout), target :: dynamics + !___________________________________________________________________________ integer :: ed, el(2), enodes(2), nz, n, nzmin, nzmax real(kind=WP) :: c1, c2, deltaX1, deltaX2, deltaY1, deltaY2 real(kind=WP) :: dumc1_1, dumc1_2, dumc2_1, dumc2_2 !!PS - type(t_mesh), intent(inout), target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_dyn), intent(inout), target :: dynamics + !___________________________________________________________________________ + ! pointer on necessary derived types real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs - real(kind=WP), dimension(:), pointer :: ssh_rhs, ssh_rhs_old + real(kind=WP), dimension(:) , pointer :: ssh_rhs, ssh_rhs_old #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV=>dynamics%uv(:,:,:) - UV_rhs=>dynamics%uv_rhs(:,:,:) - ssh_rhs=>dynamics%ssh_rhs(:) - ssh_rhs_old=>dynamics%ssh_rhs_old(:) + UV => dynamics%uv(:,:,:) + UV_rhs => dynamics%uv_rhs(:,:,:) + ssh_rhs => dynamics%ssh_rhs(:) + ssh_rhs_old=> dynamics%ssh_rhs_old(:) - ssh_rhs=0.0_WP !___________________________________________________________________________ ! loop over local edges + ssh_rhs=0.0_WP do ed=1, myDim_edge2D ! local indice of nodes that span up edge ed enodes=edges(:,ed) @@ -1764,6 +1790,11 @@ end subroutine compute_ssh_rhs_ale ! hbar(n+0.5) = hbar(n-0.5) - tau*ssh_rhs_old ! ! in S. Danilov et al.: "FESOM2: from finite elements to finite volumes" +! +! see "FESOM2: from finite elements to finte volumes, S. Danilov..." +! hbar(n+1)-hbar(n)=tau*ssh_rhs_old +! ssh_rhs_old=-\nabla\int(U_n)dz-water_flux*area (if free surface) +! Find new elevation hbar subroutine compute_hbar_ale(dynamics, partit, mesh) use g_config,only: dt, which_ALE, use_cavity use MOD_MESH @@ -1773,29 +1804,24 @@ subroutine compute_hbar_ale(dynamics, partit, mesh) USE MOD_PARSUP USE MOD_DYN use g_comm_auto - implicit none - - ! see "FESOM2: from finite elements to finte volumes, S. Danilov..." - ! hbar(n+1)-hbar(n)=tau*ssh_rhs_old - ! ssh_rhs_old=-\nabla\int(U_n)dz-water_flux*area (if free surface) - ! Find new elevation hbar - - integer :: ed, el(2), enodes(2), nz,n, elnodes(3), elem, nzmin, nzmax - real(kind=WP) :: c1, c2, deltaX1, deltaX2, deltaY1, deltaY2 - type(t_mesh), intent(inout), target :: mesh - type(t_partit), intent(inout), target :: partit type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(inout), target :: mesh + !___________________________________________________________________________ + integer :: ed, el(2), enodes(2), nz,n, elnodes(3), elem, nzmin, nzmax + real(kind=WP) :: c1, c2, deltaX1, deltaX2, deltaY1, deltaY2 + !___________________________________________________________________________ + ! pointer on necessary derived types real(kind=WP), dimension(:,:,:), pointer :: UV - real(kind=WP), dimension(:), pointer :: ssh_rhs, ssh_rhs_old - + real(kind=WP), dimension(:) , pointer :: ssh_rhs, ssh_rhs_old #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV=>dynamics%uv(:,:,:) - ssh_rhs=>dynamics%ssh_rhs(:) - ssh_rhs_old=>dynamics%ssh_rhs_old(:) + UV => dynamics%uv(:,:,:) + ssh_rhs => dynamics%ssh_rhs(:) + ssh_rhs_old=> dynamics%ssh_rhs_old(:) !___________________________________________________________________________ ! compute the rhs @@ -1896,39 +1922,40 @@ subroutine vert_vel_ale(dynamics, partit, mesh) use i_arrays !!PS use g_forcing_arrays !!PS implicit none - + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(inout), target :: mesh + !___________________________________________________________________________ integer :: el(2), enodes(2), n, nz, ed, nzmin, nzmax, uln1, uln2, nln1, nln2 real(kind=WP) :: c1, c2, deltaX1, deltaY1, deltaX2, deltaY2, dd, dd1, dddt, cflmax - - !_______________________________ ! --> zlevel with local zstar real(kind=WP) :: dhbar_total, dhbar_rest, distrib_dhbar_int !PS real(kind=WP), dimension(:), allocatable :: max_dhbar2distr,cumsum_maxdhbar,distrib_dhbar integer , dimension(:), allocatable :: idx - type(t_dyn) , intent(inout), target :: dynamics - type(t_mesh), intent(inout), target :: mesh - type(t_partit), intent(inout), target :: partit + !___________________________________________________________________________ + ! pointer on necessary derived types real(kind=WP), dimension(:,:,:), pointer :: UV, fer_UV real(kind=WP), dimension(:,:) , pointer :: Wvel, Wvel_e, Wvel_i, CFL_z, fer_Wvel - real(kind=WP), dimension(:) , pointer :: ssh_rhs, ssh_rhs_old - real(kind=WP), dimension(:) , pointer :: eta_n, d_eta + real(kind=WP), dimension(:) , pointer :: ssh_rhs, ssh_rhs_old + real(kind=WP), dimension(:) , pointer :: eta_n, d_eta #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV =>dynamics%uv(:,:,:) - Wvel =>dynamics%w(:,:) - Wvel_e=>dynamics%w_e(:,:) - Wvel_i=>dynamics%w_i(:,:) - CFL_z =>dynamics%cfl_z(:,:) - ssh_rhs =>dynamics%ssh_rhs(:) - ssh_rhs_old =>dynamics%ssh_rhs_old(:) - eta_n =>dynamics%eta_n(:) - d_eta =>dynamics%d_eta(:) + UV => dynamics%uv(:,:,:) + Wvel => dynamics%w(:,:) + Wvel_e => dynamics%w_e(:,:) + Wvel_i => dynamics%w_i(:,:) + CFL_z => dynamics%cfl_z(:,:) + ssh_rhs => dynamics%ssh_rhs(:) + ssh_rhs_old => dynamics%ssh_rhs_old(:) + eta_n => dynamics%eta_n(:) + d_eta => dynamics%d_eta(:) if (Fer_GM) then - fer_UV =>dynamics%fer_uv(:,:,:) - fer_Wvel =>dynamics%fer_w(:,:) + fer_UV => dynamics%fer_uv(:,:,:) + fer_Wvel=> dynamics%fer_w(:,:) end if + !___________________________________________________________________________ ! Contributions from levels in divergence Wvel=0.0_WP @@ -2434,15 +2461,15 @@ subroutine solve_ssh_ale(dynamics, partit, mesh) use iso_c_binding, only: C_INT, C_DOUBLE implicit none #include "fparms.h" + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + !___________________________________________________________________________ logical, save :: lfirst=.true. integer(kind=C_INT) :: n3, reuse, new_values integer :: n - type(t_mesh) , intent(inout), target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_dyn) , intent(inout), target :: dynamics - real(kind=C_DOUBLE), pointer :: droptol, soltol - integer(kind=C_INT), pointer :: maxiter, restart, lutype, fillin, ident - + !___________________________________________________________________________ + ! interface for solver interface subroutine psolver_init(ident, SOL, PCGLOB, PCLOC, lutype, & fillin, droptol, maxiter, restart, soltol, & @@ -2461,7 +2488,10 @@ subroutine psolve(ident, ssh_rhs, values, d_eta, newvalues) bind(C) real(kind=C_DOUBLE) :: values(*), ssh_rhs(*), d_eta(*) end subroutine psolve end interface - + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=C_DOUBLE), pointer :: droptol, soltol + integer(kind=C_INT), pointer :: maxiter, restart, lutype, fillin, ident #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -2474,6 +2504,7 @@ end subroutine psolve droptol => dynamics%solverinfo%droptol soltol => dynamics%solverinfo%soltol + !___________________________________________________________________________ if (trim(which_ale)=='linfs') then reuse=0 new_values=0 @@ -2522,185 +2553,187 @@ end subroutine solve_ssh_ale ! !=============================================================================== subroutine impl_vert_visc_ale(dynamics, partit, mesh) -USE MOD_MESH -USE o_PARAM -USE o_ARRAYS, only: Av, stress_surf -USE MOD_PARTIT -USE MOD_PARSUP -USE MOD_DYN -USE g_CONFIG,only: dt -IMPLICIT NONE - -type(t_mesh), intent(inout), target :: mesh -type(t_partit), intent(inout), target :: partit -type(t_dyn) , intent(inout), target :: dynamics - -real(kind=WP) :: a(mesh%nl-1), b(mesh%nl-1), c(mesh%nl-1), ur(mesh%nl-1), vr(mesh%nl-1) -real(kind=WP) :: cp(mesh%nl-1), up(mesh%nl-1), vp(mesh%nl-1) -integer :: nz, elem, nzmax, nzmin, elnodes(3) -real(kind=WP) :: zinv, m, friction, wu, wd -real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs -real(kind=WP), dimension(:,:) , pointer :: Wvel_i + USE MOD_MESH + USE o_PARAM + USE o_ARRAYS, only: Av, stress_surf + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + USE g_CONFIG,only: dt + IMPLICIT NONE + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + !___________________________________________________________________________ + real(kind=WP) :: a(mesh%nl-1), b(mesh%nl-1), c(mesh%nl-1), ur(mesh%nl-1), vr(mesh%nl-1) + real(kind=WP) :: cp(mesh%nl-1), up(mesh%nl-1), vp(mesh%nl-1) + integer :: nz, elem, nzmax, nzmin, elnodes(3) + real(kind=WP) :: zinv, m, friction, wu, wd + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs + real(kind=WP), dimension(:,:) , pointer :: Wvel_i #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" -UV =>dynamics%uv(:,:,:) -UV_rhs =>dynamics%uv_rhs(:,:,:) -Wvel_i =>dynamics%w_i(:,:) + UV =>dynamics%uv(:,:,:) + UV_rhs =>dynamics%uv_rhs(:,:,:) + Wvel_i =>dynamics%w_i(:,:) -DO elem=1,myDim_elem2D - elnodes=elem2D_nodes(:,elem) - nzmin = ulevels(elem) - nzmax = nlevels(elem) - - !___________________________________________________________________________ - ! Here can not exchange zbar_n & Z_n with zbar_3d_n & Z_3d_n because - ! they run over elements here - zbar_n=0.0_WP - Z_n =0.0_WP - ! in case of partial cells zbar_n(nzmax) is not any more at zbar(nzmax), - ! zbar_n(nzmax) is now zbar_e_bot(elem), - zbar_n(nzmax)=zbar_e_bot(elem) - Z_n(nzmax-1)=zbar_n(nzmax) + helem(nzmax-1,elem)/2.0_WP - !!PS do nz=nzmax-1,2,-1 - do nz=nzmax-1,nzmin+1,-1 - zbar_n(nz) = zbar_n(nz+1) + helem(nz,elem) - Z_n(nz-1) = zbar_n(nz) + helem(nz-1,elem)/2.0_WP - end do - !!PS zbar_n(1) = zbar_n(2) + helem(1,elem) - zbar_n(nzmin) = zbar_n(nzmin+1) + helem(nzmin,elem) - - !___________________________________________________________________________ - ! Operator - ! Regular part of coefficients: - !!PS do nz=2, nzmax-2 - do nz=nzmin+1, nzmax-2 - zinv=1.0_WP*dt/(zbar_n(nz)-zbar_n(nz+1)) - a(nz)=-Av(nz,elem)/(Z_n(nz-1)-Z_n(nz))*zinv - c(nz)=-Av(nz+1,elem)/(Z_n(nz)-Z_n(nz+1))*zinv - b(nz)=-a(nz)-c(nz)+1.0_WP + !___________________________________________________________________________ + DO elem=1,myDim_elem2D + elnodes=elem2D_nodes(:,elem) + nzmin = ulevels(elem) + nzmax = nlevels(elem) + + !___________________________________________________________________________ + ! Here can not exchange zbar_n & Z_n with zbar_3d_n & Z_3d_n because + ! they run over elements here + zbar_n=0.0_WP + Z_n =0.0_WP + ! in case of partial cells zbar_n(nzmax) is not any more at zbar(nzmax), + ! zbar_n(nzmax) is now zbar_e_bot(elem), + zbar_n(nzmax)=zbar_e_bot(elem) + Z_n(nzmax-1)=zbar_n(nzmax) + helem(nzmax-1,elem)/2.0_WP + !!PS do nz=nzmax-1,2,-1 + do nz=nzmax-1,nzmin+1,-1 + zbar_n(nz) = zbar_n(nz+1) + helem(nz,elem) + Z_n(nz-1) = zbar_n(nz) + helem(nz-1,elem)/2.0_WP + end do + !!PS zbar_n(1) = zbar_n(2) + helem(1,elem) + zbar_n(nzmin) = zbar_n(nzmin+1) + helem(nzmin,elem) + + !___________________________________________________________________________ + ! Operator + ! Regular part of coefficients: + !!PS do nz=2, nzmax-2 + do nz=nzmin+1, nzmax-2 + zinv=1.0_WP*dt/(zbar_n(nz)-zbar_n(nz+1)) + a(nz)=-Av(nz,elem)/(Z_n(nz-1)-Z_n(nz))*zinv + c(nz)=-Av(nz+1,elem)/(Z_n(nz)-Z_n(nz+1))*zinv + b(nz)=-a(nz)-c(nz)+1.0_WP + ! Update from the vertical advection + wu=sum(Wvel_i(nz, elnodes))/3._WP + wd=sum(Wvel_i(nz+1, elnodes))/3._WP + a(nz)=a(nz)+min(0._WP, wu)*zinv + b(nz)=b(nz)+max(0._WP, wu)*zinv + + b(nz)=b(nz)-min(0._WP, wd)*zinv + c(nz)=c(nz)-max(0._WP, wd)*zinv + + end do + ! The last row + zinv=1.0_WP*dt/(zbar_n(nzmax-1)-zbar_n(nzmax)) + a(nzmax-1)=-Av(nzmax-1,elem)/(Z_n(nzmax-2)-Z_n(nzmax-1))*zinv + b(nzmax-1)=-a(nzmax-1)+1.0_WP + c(nzmax-1)=0.0_WP + ! Update from the vertical advection - wu=sum(Wvel_i(nz, elnodes))/3._WP - wd=sum(Wvel_i(nz+1, elnodes))/3._WP - a(nz)=a(nz)+min(0._WP, wu)*zinv - b(nz)=b(nz)+max(0._WP, wu)*zinv + wu=sum(Wvel_i(nzmax-1, elnodes))/3._WP + a(nzmax-1)=a(nzmax-1)+min(0._WP, wu)*zinv + b(nzmax-1)=b(nzmax-1)+max(0._WP, wu)*zinv - b(nz)=b(nz)-min(0._WP, wd)*zinv - c(nz)=c(nz)-max(0._WP, wd)*zinv + ! The first row + !!PS zinv=1.0_WP*dt/(zbar_n(1)-zbar_n(2)) + !!PS c(1)=-Av(2,elem)/(Z_n(1)-Z_n(2))*zinv + !!PS a(1)=0.0_WP + !!PS b(1)=-c(1)+1.0_WP + zinv=1.0_WP*dt/(zbar_n(nzmin)-zbar_n(nzmin+1)) + c(nzmin)=-Av(nzmin+1,elem)/(Z_n(nzmin)-Z_n(nzmin+1))*zinv + a(nzmin)=0.0_WP + b(nzmin)=-c(nzmin)+1.0_WP - end do - ! The last row - zinv=1.0_WP*dt/(zbar_n(nzmax-1)-zbar_n(nzmax)) - a(nzmax-1)=-Av(nzmax-1,elem)/(Z_n(nzmax-2)-Z_n(nzmax-1))*zinv - b(nzmax-1)=-a(nzmax-1)+1.0_WP - c(nzmax-1)=0.0_WP - - ! Update from the vertical advection - wu=sum(Wvel_i(nzmax-1, elnodes))/3._WP - a(nzmax-1)=a(nzmax-1)+min(0._WP, wu)*zinv - b(nzmax-1)=b(nzmax-1)+max(0._WP, wu)*zinv - - ! The first row - !!PS zinv=1.0_WP*dt/(zbar_n(1)-zbar_n(2)) - !!PS c(1)=-Av(2,elem)/(Z_n(1)-Z_n(2))*zinv - !!PS a(1)=0.0_WP - !!PS b(1)=-c(1)+1.0_WP - zinv=1.0_WP*dt/(zbar_n(nzmin)-zbar_n(nzmin+1)) - c(nzmin)=-Av(nzmin+1,elem)/(Z_n(nzmin)-Z_n(nzmin+1))*zinv - a(nzmin)=0.0_WP - b(nzmin)=-c(nzmin)+1.0_WP - - ! Update from the vertical advection - !!PS wu=sum(Wvel_i(1, elnodes))/3._WP - !!PS wd=sum(Wvel_i(2, elnodes))/3._WP - wu=sum(Wvel_i(nzmin, elnodes))/3._WP - wd=sum(Wvel_i(nzmin+1, elnodes))/3._WP - - !!PS b(1)=b(1)+wu*zinv - !!PS b(1)=b(1)-min(0._WP, wd)*zinv - !!PS c(1)=c(1)-max(0._WP, wd)*zinv - b(nzmin)=b(nzmin)+wu*zinv - b(nzmin)=b(nzmin)-min(0._WP, wd)*zinv - c(nzmin)=c(nzmin)-max(0._WP, wd)*zinv - - ! =========================== - ! The rhs: - ! =========================== - !!PS ur(1:nzmax-1)=UV_rhs(1,1:nzmax-1,elem) - !!PS vr(1:nzmax-1)=UV_rhs(2,1:nzmax-1,elem) - ur(nzmin:nzmax-1)=UV_rhs(1,nzmin:nzmax-1,elem) - vr(nzmin:nzmax-1)=UV_rhs(2,nzmin:nzmax-1,elem) - - ! The first row contains surface forcing - !!PS ur(1)= ur(1)+zinv*stress_surf(1,elem)/density_0 - !!PS vr(1)= vr(1)+zinv*stress_surf(2,elem)/density_0 - ur(nzmin)= ur(nzmin)+zinv*stress_surf(1,elem)/density_0 - vr(nzmin)= vr(nzmin)+zinv*stress_surf(2,elem)/density_0 - - ! The last row contains bottom friction - zinv=1.0_WP*dt/(zbar_n(nzmax-1)-zbar_n(nzmax)) - !!PS friction=-C_d*sqrt(UV(1,nlevels(elem)-1,elem)**2+ & - !!PS UV(2,nlevels(elem)-1,elem)**2) - friction=-C_d*sqrt(UV(1,nzmax-1,elem)**2+ & - UV(2,nzmax-1,elem)**2) - ur(nzmax-1)=ur(nzmax-1)+zinv*friction*UV(1,nzmax-1,elem) - vr(nzmax-1)=vr(nzmax-1)+zinv*friction*UV(2,nzmax-1,elem) - - ! Model solves for the difference to the timestep N and therefore we need to - ! update the RHS for advective and diffusive contributions at the previous time step - !!PS do nz=2, nzmax-2 - do nz=nzmin+1, nzmax-2 - ur(nz)=ur(nz)-a(nz)*UV(1,nz-1,elem)-(b(nz)-1.0_WP)*UV(1,nz,elem)-c(nz)*UV(1,nz+1,elem) - vr(nz)=vr(nz)-a(nz)*UV(2,nz-1,elem)-(b(nz)-1.0_WP)*UV(2,nz,elem)-c(nz)*UV(2,nz+1,elem) - end do - !!PS ur(1)=ur(1)-(b(1)-1.0_WP)*UV(1,1,elem)-c(1)*UV(1,2,elem) - !!PS vr(1)=vr(1)-(b(1)-1.0_WP)*UV(2,1,elem)-c(1)*UV(2,2,elem) - ur(nzmin)=ur(nzmin)-(b(nzmin)-1.0_WP)*UV(1,nzmin,elem)-c(nzmin)*UV(1,nzmin+1,elem) - vr(nzmin)=vr(nzmin)-(b(nzmin)-1.0_WP)*UV(2,nzmin,elem)-c(nzmin)*UV(2,nzmin+1,elem) - - ur(nzmax-1)=ur(nzmax-1)-a(nzmax-1)*UV(1,nzmax-2,elem)-(b(nzmax-1)-1.0_WP)*UV(1,nzmax-1,elem) - vr(nzmax-1)=vr(nzmax-1)-a(nzmax-1)*UV(2,nzmax-2,elem)-(b(nzmax-1)-1.0_WP)*UV(2,nzmax-1,elem) - - ! =========================== - ! The sweep algorithm - ! =========================== - ! initialize c-prime and s,t-prime - !!PS cp(1) = c(1)/b(1) - !!PS up(1) = ur(1)/b(1) - !!PS vp(1) = vr(1)/b(1) - cp(nzmin) = c(nzmin)/b(nzmin) - up(nzmin) = ur(nzmin)/b(nzmin) - vp(nzmin) = vr(nzmin)/b(nzmin) - - ! solve for vectors c-prime and t, s-prime - !!PS do nz = 2,nzmax-1 - do nz = nzmin+1,nzmax-1 - m = b(nz)-cp(nz-1)*a(nz) - cp(nz) = c(nz)/m - up(nz) = (ur(nz)-up(nz-1)*a(nz))/m - vp(nz) = (vr(nz)-vp(nz-1)*a(nz))/m - enddo - ! initialize x - ur(nzmax-1) = up(nzmax-1) - vr(nzmax-1) = vp(nzmax-1) - - ! solve for x from the vectors c-prime and d-prime - !!PS do nz = nzmax-2, 1, -1 - do nz = nzmax-2, nzmin, -1 - ur(nz) = up(nz)-cp(nz)*ur(nz+1) - vr(nz) = vp(nz)-cp(nz)*vr(nz+1) - end do - - ! =========================== - ! RHS update - ! =========================== - !!PS do nz=1,nzmax-1 - do nz=nzmin,nzmax-1 - UV_rhs(1,nz,elem)=ur(nz) - UV_rhs(2,nz,elem)=vr(nz) - end do -end do !!! cycle over elements + ! Update from the vertical advection + !!PS wu=sum(Wvel_i(1, elnodes))/3._WP + !!PS wd=sum(Wvel_i(2, elnodes))/3._WP + wu=sum(Wvel_i(nzmin, elnodes))/3._WP + wd=sum(Wvel_i(nzmin+1, elnodes))/3._WP + + !!PS b(1)=b(1)+wu*zinv + !!PS b(1)=b(1)-min(0._WP, wd)*zinv + !!PS c(1)=c(1)-max(0._WP, wd)*zinv + b(nzmin)=b(nzmin)+wu*zinv + b(nzmin)=b(nzmin)-min(0._WP, wd)*zinv + c(nzmin)=c(nzmin)-max(0._WP, wd)*zinv + + ! =========================== + ! The rhs: + ! =========================== + !!PS ur(1:nzmax-1)=UV_rhs(1,1:nzmax-1,elem) + !!PS vr(1:nzmax-1)=UV_rhs(2,1:nzmax-1,elem) + ur(nzmin:nzmax-1)=UV_rhs(1,nzmin:nzmax-1,elem) + vr(nzmin:nzmax-1)=UV_rhs(2,nzmin:nzmax-1,elem) + + ! The first row contains surface forcing + !!PS ur(1)= ur(1)+zinv*stress_surf(1,elem)/density_0 + !!PS vr(1)= vr(1)+zinv*stress_surf(2,elem)/density_0 + ur(nzmin)= ur(nzmin)+zinv*stress_surf(1,elem)/density_0 + vr(nzmin)= vr(nzmin)+zinv*stress_surf(2,elem)/density_0 + + ! The last row contains bottom friction + zinv=1.0_WP*dt/(zbar_n(nzmax-1)-zbar_n(nzmax)) + !!PS friction=-C_d*sqrt(UV(1,nlevels(elem)-1,elem)**2+ & + !!PS UV(2,nlevels(elem)-1,elem)**2) + friction=-C_d*sqrt(UV(1,nzmax-1,elem)**2+ & + UV(2,nzmax-1,elem)**2) + ur(nzmax-1)=ur(nzmax-1)+zinv*friction*UV(1,nzmax-1,elem) + vr(nzmax-1)=vr(nzmax-1)+zinv*friction*UV(2,nzmax-1,elem) + + ! Model solves for the difference to the timestep N and therefore we need to + ! update the RHS for advective and diffusive contributions at the previous time step + !!PS do nz=2, nzmax-2 + do nz=nzmin+1, nzmax-2 + ur(nz)=ur(nz)-a(nz)*UV(1,nz-1,elem)-(b(nz)-1.0_WP)*UV(1,nz,elem)-c(nz)*UV(1,nz+1,elem) + vr(nz)=vr(nz)-a(nz)*UV(2,nz-1,elem)-(b(nz)-1.0_WP)*UV(2,nz,elem)-c(nz)*UV(2,nz+1,elem) + end do + !!PS ur(1)=ur(1)-(b(1)-1.0_WP)*UV(1,1,elem)-c(1)*UV(1,2,elem) + !!PS vr(1)=vr(1)-(b(1)-1.0_WP)*UV(2,1,elem)-c(1)*UV(2,2,elem) + ur(nzmin)=ur(nzmin)-(b(nzmin)-1.0_WP)*UV(1,nzmin,elem)-c(nzmin)*UV(1,nzmin+1,elem) + vr(nzmin)=vr(nzmin)-(b(nzmin)-1.0_WP)*UV(2,nzmin,elem)-c(nzmin)*UV(2,nzmin+1,elem) + + ur(nzmax-1)=ur(nzmax-1)-a(nzmax-1)*UV(1,nzmax-2,elem)-(b(nzmax-1)-1.0_WP)*UV(1,nzmax-1,elem) + vr(nzmax-1)=vr(nzmax-1)-a(nzmax-1)*UV(2,nzmax-2,elem)-(b(nzmax-1)-1.0_WP)*UV(2,nzmax-1,elem) + + ! =========================== + ! The sweep algorithm + ! =========================== + ! initialize c-prime and s,t-prime + !!PS cp(1) = c(1)/b(1) + !!PS up(1) = ur(1)/b(1) + !!PS vp(1) = vr(1)/b(1) + cp(nzmin) = c(nzmin)/b(nzmin) + up(nzmin) = ur(nzmin)/b(nzmin) + vp(nzmin) = vr(nzmin)/b(nzmin) + + ! solve for vectors c-prime and t, s-prime + !!PS do nz = 2,nzmax-1 + do nz = nzmin+1,nzmax-1 + m = b(nz)-cp(nz-1)*a(nz) + cp(nz) = c(nz)/m + up(nz) = (ur(nz)-up(nz-1)*a(nz))/m + vp(nz) = (vr(nz)-vp(nz-1)*a(nz))/m + enddo + ! initialize x + ur(nzmax-1) = up(nzmax-1) + vr(nzmax-1) = vp(nzmax-1) + + ! solve for x from the vectors c-prime and d-prime + !!PS do nz = nzmax-2, 1, -1 + do nz = nzmax-2, nzmin, -1 + ur(nz) = up(nz)-cp(nz)*ur(nz+1) + vr(nz) = vp(nz)-cp(nz)*vr(nz+1) + end do + + ! =========================== + ! RHS update + ! =========================== + !!PS do nz=1,nzmax-1 + do nz=nzmin,nzmax-1 + UV_rhs(1,nz,elem)=ur(nz) + UV_rhs(2,nz,elem)=vr(nz) + end do + end do !!! cycle over elements end subroutine impl_vert_visc_ale ! @@ -2735,14 +2768,16 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) use check_blowup_interface use fer_solve_interface IMPLICIT NONE - integer, intent(in) :: n - type(t_mesh), intent(inout), target :: mesh - type(t_partit), intent(inout), target :: partit + integer , intent(in) :: n + type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracers - type(t_dyn), intent(inout), target :: dynamics - + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + !___________________________________________________________________________ real(kind=8) :: t0,t1, t2, t30, t3, t4, t5, t6, t7, t8, t9, t10, loc, glo integer :: node + !___________________________________________________________________________ + ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: eta_n #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -2750,8 +2785,8 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) #include "associate_mesh_ass.h" eta_n => dynamics%eta_n(:) + !___________________________________________________________________________ t0=MPI_Wtime() - ! water_flux = 0.0_WP ! heat_flux = 0.0_WP ! stress_surf= 0.0_WP diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 28c84ae16..8d6d83992 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -1,126 +1,126 @@ module diff_part_hor_redi_interface - interface - subroutine diff_part_hor_redi(tr_num, tracer, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use mod_tracer - integer, intent(in), target :: tr_num - type(t_tracer), intent(inout), target :: tracer - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - end subroutine - end interface + interface + subroutine diff_part_hor_redi(tr_num, tracer, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer + integer , intent(in) , target :: tr_num + type(t_tracer), intent(inout), target :: tracer + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface end module module diff_ver_part_expl_ale_interface - interface - subroutine diff_ver_part_expl_ale(tr_num, tracer, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use mod_tracer - integer, intent(in), target :: tr_num - type(t_tracer), intent(inout), target :: tracer - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - end subroutine - end interface + interface + subroutine diff_ver_part_expl_ale(tr_num, tracer, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer + integer , intent(in) , target :: tr_num + type(t_tracer), intent(inout), target :: tracer + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface end module module diff_ver_part_redi_expl_interface - interface - subroutine diff_ver_part_redi_expl(tr_num, tracer, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use mod_tracer - integer, intent(in), target :: tr_num - type(t_tracer), intent(inout), target :: tracer - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - end subroutine - end interface + interface + subroutine diff_ver_part_redi_expl(tr_num, tracer, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer + integer , intent(in) , target :: tr_num + type(t_tracer), intent(inout), target :: tracer + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface end module module diff_ver_part_impl_ale_interface - interface - subroutine diff_ver_part_impl_ale(tr_num, dynamics, tracer, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use mod_tracer - use MOD_DYN - integer, intent(in), target :: tr_num - type(t_dyn), intent(inout), target :: dynamics - type(t_tracer), intent(inout), target :: tracer - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - end subroutine - end interface + interface + subroutine diff_ver_part_impl_ale(tr_num, dynamics, tracer, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer + use MOD_DYN + integer , intent(in) , target :: tr_num + type(t_dyn) , intent(inout), target :: dynamics + type(t_tracer), intent(inout), target :: tracer + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface end module module diff_tracers_ale_interface - interface - subroutine diff_tracers_ale(tr_num, dynamics, tracer, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use mod_tracer - use MOD_DYN - integer, intent(in), target :: tr_num - type(t_dyn) , intent(inout), target :: dynamics - type(t_tracer), intent(inout), target :: tracer - type(t_mesh) , intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit - end subroutine - end interface + interface + subroutine diff_tracers_ale(tr_num, dynamics, tracer, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer + use MOD_DYN + integer , intent(in), target :: tr_num + type(t_dyn) , intent(inout), target :: dynamics + type(t_tracer), intent(inout), target :: tracer + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface end module module bc_surface_interface - interface - function bc_surface(n, id, sval, partit) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - integer , intent(in) :: n, id - type(t_partit), intent(inout), target :: partit - real(kind=WP) :: bc_surface - real(kind=WP), intent(in) :: sval - end function - end interface + interface + function bc_surface(n, id, sval, partit) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + integer , intent(in) :: n, id + type(t_partit), intent(inout), target :: partit + real(kind=WP) :: bc_surface + real(kind=WP), intent(in) :: sval + end function + end interface end module module diff_part_bh_interface - interface - subroutine diff_part_bh(tr_num, dynamics, tracer, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use mod_tracer - use MOD_DYN - integer, intent(in), target :: tr_num - type(t_dyn) , intent(inout), target :: dynamics - type(t_tracer), intent(inout), target :: tracer - type(t_mesh) , intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit - end subroutine - end interface + interface + subroutine diff_part_bh(tr_num, dynamics, tracer, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer + use MOD_DYN + integer , intent(in) , target :: tr_num + type(t_dyn) , intent(inout), target :: dynamics + type(t_tracer), intent(inout), target :: tracer + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface end module module solve_tracers_ale_interface - interface - subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use mod_tracer - use MOD_DYN - type(t_tracer), intent(inout), target :: tracers - type(t_mesh) , intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_dyn) , intent(inout), target :: dynamics - end subroutine - end interface + interface + subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer + use MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface end module ! ! @@ -140,12 +140,14 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) use diff_tracers_ale_interface use oce_adv_tra_driver_interfaces implicit none - type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracers - type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ integer :: tr_num, node, elem, nzmax, nzmin + !___________________________________________________________________________ + ! pointer on necessary derived types real(kind=WP), dimension(:,:,:), pointer :: UV, fer_UV real(kind=WP), dimension(:,:) , pointer :: Wvel, Wvel_e, Wvel_i, fer_Wvel real(kind=WP), dimension(:,:) , pointer :: del_ttf @@ -228,12 +230,14 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) end if call exchange_nod(tracers%data(tr_num)%values(:,:), partit) end do + !___________________________________________________________________________ ! 3D restoring for "passive" tracers !!!$OMPTODO: add OpenMP later, not needed right now! do tr_num=1, ptracers_restore_total tracers%data(ptracers_restore(tr_num)%locid)%values(:, ptracers_restore(tr_num)%ind2)=1.0_WP end do + !___________________________________________________________________________ ! subtract the the bolus velocities back from 3D velocities: if (Fer_GM) then @@ -249,6 +253,7 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) end do !$OMP END PARALLEL DO end if + !___________________________________________________________________________ ! to avoid crash with high salinities when coupled to atmosphere ! --> if we do only where (tr_arr(:,:,2) < 3._WP ) we also fill up the bottom @@ -285,33 +290,36 @@ subroutine diff_tracers_ale(tr_num, dynamics, tracers, partit, mesh) use diff_ver_part_impl_ale_interface use diff_part_bh_interface implicit none - - integer :: n, nzmax, nzmin - integer, intent(in), target :: tr_num + integer , intent(in) , target :: tr_num type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracers - type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: n, nzmax, nzmin + !___________________________________________________________________________ + ! pointer on necessary derived types real(kind=WP), pointer :: del_ttf(:,:) - #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - del_ttf => tracers%work%del_ttf + !___________________________________________________________________________ ! do horizontal diffusiion ! write there also horizontal diffusion rhs to del_ttf which is equal the R_T^n ! in danilovs srcipt ! includes Redi diffusivity if Redi=.true. call diff_part_hor_redi(tr_num, tracers, partit, mesh) ! seems to be ~9% faster than diff_part_hor + !___________________________________________________________________________ ! do vertical diffusion: explicit if (.not. tracers%i_vert_diff) call diff_ver_part_expl_ale(tr_num, tracers, partit, mesh) ! A projection of horizontal Redi diffussivity onto vertical. This par contains horizontal ! derivatives and has to be computed explicitly! if (Redi) call diff_ver_part_redi_expl(tr_num, tracers, partit, mesh) + !___________________________________________________________________________ ! Update tracers --> calculate T* see Danilov et al. (2017) ! T* = (dt*R_T^n + h^(n-0.5)*T^(n-0.5))/h^(n+0.5) @@ -352,29 +360,27 @@ subroutine diff_ver_part_expl_ale(tr_num, tracers, partit, mesh) USE MOD_PARSUP use MOD_TRACER use g_config,only: dt - implicit none - integer, intent(in), target :: tr_num + integer , intent(in) , target :: tr_num type(t_tracer), intent(inout), target :: tracers - type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ real(kind=WP) :: vd_flux(mesh%nl-1) real(kind=WP) :: rdata,flux,rlx integer :: nz,nl1,ul1,n real(kind=WP) :: zinv1,Ty - + !___________________________________________________________________________ + ! pointer on necessary derived types real(kind=WP), pointer :: del_ttf(:,:) - #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - del_ttf => tracers%work%del_ttf - Ty = 0.0_WP - !___________________________________________________________________________ + Ty = 0.0_WP do n=1, myDim_nod2D nl1=nlevels_nod2D(n)-1 ul1=ulevels_nod2D(n) @@ -440,13 +446,13 @@ subroutine diff_ver_part_impl_ale(tr_num, dynamics, tracers, partit, mesh) use o_mixing_KPP_mod !for ghats _GO_ use g_cvmix_kpp, only: kpp_nonlcltranspT, kpp_nonlcltranspS, kpp_oblmixc use bc_surface_interface - implicit none - integer, intent(in), target :: tr_num + integer , intent(in) , target :: tr_num type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracers - type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ real(kind=WP) :: a(mesh%nl), b(mesh%nl), c(mesh%nl), tr(mesh%nl) real(kind=WP) :: cp(mesh%nl), tp(mesh%nl) integer :: nz, n, nzmax,nzmin @@ -455,16 +461,17 @@ subroutine diff_ver_part_impl_ale(tr_num, dynamics, tracers, partit, mesh) real(kind=WP), external :: TFrez ! Sea water freeze temperature. real(kind=WP) :: isredi=0._WP logical :: do_wimpl=.true. - + !___________________________________________________________________________ + ! pointer on necessary derived types real(kind=WP), dimension(:,:), pointer :: trarr real(kind=WP), dimension(:,:), pointer :: Wvel_i - #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" trarr => tracers%data(tr_num)%values(:,:) Wvel_i => dynamics%w_i(:,:) + !___________________________________________________________________________ if ((trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') .OR. (.not. dynamics%use_wsplit)) do_wimpl=.false. @@ -906,21 +913,23 @@ subroutine diff_ver_part_redi_expl(tr_num, tracers, partit, mesh) IMPLICIT NONE integer, intent(in), target :: tr_num type(t_tracer), intent(inout), target :: tracers - type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + !___________________________________________________________________________ integer :: elem,k integer :: n2,nl1,ul1,nl2,nz,n real(kind=WP) :: Tx, Ty real(kind=WP) :: tr_xynodes(2,mesh%nl-1,partit%myDim_nod2D+partit%eDim_nod2D), vd_flux(mesh%nl) + !___________________________________________________________________________ + ! pointer on necessary derived types real(kind=WP), pointer :: del_ttf(:,:) - #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - del_ttf => tracers%work%del_ttf + !___________________________________________________________________________ do n=1, myDim_nod2D nl1=nlevels_nod2D(n)-1 ul1=ulevels_nod2D(n) @@ -988,25 +997,27 @@ subroutine diff_part_hor_redi(tr_num, tracers, partit, mesh) use o_param use g_config IMPLICIT NONE - integer, intent(in), target :: tr_num + integer , intent(in) , target :: tr_num type(t_tracer), intent(inout), target :: tracers - type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ real(kind=WP) :: deltaX1,deltaY1,deltaX2,deltaY2 integer :: edge integer :: n2,nl1,ul1,nl2,ul2,nl12,ul12,nz,el(2),elnodes(3),n,enodes(2) real(kind=WP) :: c, Fx, Fy,Tx, Ty, Tx_z, Ty_z, SxTz, SyTz, Tz(2) real(kind=WP) :: rhs1(mesh%nl-1), rhs2(mesh%nl-1), Kh, dz real(kind=WP) :: isredi=0._WP + !___________________________________________________________________________ + ! pointer on necessary derived types real(kind=WP), pointer :: del_ttf(:,:) - #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - del_ttf => tracers%work%del_ttf + !___________________________________________________________________________ if (Redi) isredi=1._WP do edge=1, myDim_edge2D rhs1=0.0_WP @@ -1147,30 +1158,32 @@ SUBROUTINE diff_part_bh(tr_num, dynamics, tracers, partit, mesh) use o_param use g_config use g_comm_auto - IMPLICIT NONE - integer, intent(in), target :: tr_num + integer , intent(in) , target :: tr_num type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracers - type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ real(kind=WP) :: u1, v1, len, vi, tt, ww integer :: nz, ed, el(2), en(2), k, elem, nl1, ul1 real(kind=WP), allocatable :: temporary_ttf(:,:) - real(kind=WP), pointer :: ttf(:,:) + !___________________________________________________________________________ + ! pointer on necessary derived types real(kind=WP), dimension(:,:,:), pointer :: UV - + real(kind=WP), dimension(:,:) , pointer :: ttf #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UV => dynamics%uv(:,:,:) ttf => tracers%data(tr_num)%values - + + !___________________________________________________________________________ ed=myDim_nod2D+eDim_nod2D allocate(temporary_ttf(nl-1, ed)) - temporary_ttf=0.0_8 + temporary_ttf=0.0_WP DO ed=1, myDim_edge2D+eDim_edge2D if (myList_edge2D(ed)>edge2D_in) cycle el=edge_tri(:,ed) diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index ea77166ed..5bed6b618 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -29,7 +29,6 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) end interface end module - ! ! !_______________________________________________________________________________ @@ -48,11 +47,11 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) use g_comm_auto use g_sbf, only: l_mslp use momentum_adv_scalar_interface - implicit none type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ integer :: elem, elnodes(3), nz, nzmax, nzmin real(kind=WP) :: ff, mm real(kind=WP) :: Fx, Fy, pre(3) @@ -60,9 +59,10 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) real(kind=WP) :: t1, t2, t3, t4 real(kind=WP) :: p_ice(3), p_air(3), p_eta(3) integer :: use_pice + !___________________________________________________________________________ + ! pointer on necessary derived types real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhsAB, UV_rhs real(kind=WP), dimension(:) , pointer :: eta_n - #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -72,6 +72,7 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) UV_rhsAB =>dynamics%uv_rhsAB(:,:,:) eta_n =>dynamics%eta_n(:) + !___________________________________________________________________________ t1=MPI_Wtime() use_pice=0 if (use_floatice .and. .not. trim(which_ale)=='linfs') use_pice=1 @@ -182,31 +183,31 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) ! write(*,*) 'vert. part ', t4-t3 ! end if END SUBROUTINE compute_vel_rhs -! =================================================================== + ! ! Momentum advection on scalar control volumes with ALE adaption--> exchange zinv(nz) ! against hnode(nz,node) !_______________________________________________________________________________ subroutine momentum_adv_scalar(dynamics, partit, mesh) -USE MOD_MESH -USE MOD_PARTIT -USE MOD_PARSUP -use MOD_DYN -USE o_PARAM -use g_comm_auto -IMPLICIT NONE - -type(t_dyn) , intent(inout), target :: dynamics -type(t_partit), intent(inout), target :: partit -type(t_mesh) , intent(in) , target :: mesh - -integer :: n, nz, el1, el2 -integer :: nl1, nl2, ul1, ul2, nod(2), el, ed, k, nle, ule -real(kind=WP) :: un1(1:mesh%nl-1), un2(1:mesh%nl-1) -real(kind=WP) :: wu(1:mesh%nl), wv(1:mesh%nl) -real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhsAB, UVnode_rhs -real(kind=WP), dimension(:,:), pointer :: Wvel_e - + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_DYN + USE o_PARAM + use g_comm_auto + IMPLICIT NONE + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: n, nz, el1, el2 + integer :: nl1, nl2, ul1, ul2, nod(2), el, ed, k, nle, ule + real(kind=WP) :: un1(1:mesh%nl-1), un2(1:mesh%nl-1) + real(kind=WP) :: wu(1:mesh%nl), wv(1:mesh%nl) + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhsAB, UVnode_rhs + real(kind=WP), dimension(:,:) , pointer :: Wvel_e #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 7ad39dd0a..5ff34e70e 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -9,8 +9,6 @@ ! We however, try to keep dynamics%visc_gamma1<0.1 ! 3. dynamics%visc_gamma2 is dimensional (1/velocity). If it is 10, then the respective term dominates starting from |u|=0.1 m/s an so on. It is only used in: ! (5) visc_filt_bcksct, (6) visc_filt_bilapl, (7) visc_filt_bidiff -! 4. Div_c =1. should be default -! 5. Leith_c=? (need to be adjusted) module visc_filt_bcksct_interface interface subroutine visc_filt_bcksct(dynamics, partit, mesh) @@ -69,24 +67,27 @@ SUBROUTINE update_vel(dynamics, partit, mesh) USE g_CONFIG use g_comm_auto IMPLICIT NONE + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ integer :: elem, elnodes(3), nz, m, nzmax, nzmin real(kind=WP) :: eta(3) real(kind=WP) :: Fx, Fy - type(t_dyn) , intent(inout), target :: dynamics - type(t_mesh) , intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit + !___________________________________________________________________________ + ! pointer on necessary derived types real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs - real(kind=WP), dimension(:), pointer :: eta_n, d_eta - + real(kind=WP), dimension(:) , pointer :: eta_n, d_eta #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV=>dynamics%uv(:,:,:) - UV_rhs=>dynamics%uv_rhs(:,:,:) - eta_n=>dynamics%eta_n(:) - d_eta=>dynamics%d_eta(:) - + UV => dynamics%uv(:,:,:) + UV_rhs => dynamics%uv_rhs(:,:,:) + eta_n => dynamics%eta_n(:) + d_eta => dynamics%d_eta(:) + + !___________________________________________________________________________ DO elem=1, myDim_elem2D elnodes=elem2D_nodes(:,elem) eta=-g*theta*dt*d_eta(elnodes) @@ -94,7 +95,6 @@ SUBROUTINE update_vel(dynamics, partit, mesh) Fy=sum(gradient_sca(4:6,elem)*eta) nzmin = ulevels(elem) nzmax = nlevels(elem) - !!PS DO nz=1, nlevels(elem)-1 DO nz=nzmin, nzmax-1 UV(1,nz,elem)= UV(1,nz,elem) + UV_rhs(1,nz,elem) + Fx UV(2,nz,elem)= UV(2,nz,elem) + UV_rhs(2,nz,elem) + Fy @@ -114,12 +114,14 @@ subroutine compute_vel_nodes(dynamics, partit, mesh) USE o_PARAM use g_comm_auto IMPLICIT NONE - integer :: n, nz, k, elem, nln, uln, nle, ule - real(kind=WP) :: tx, ty, tvol - type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: n, nz, k, elem, nln, uln, nle, ule + real(kind=WP) :: tx, ty, tvol + !___________________________________________________________________________ + ! pointer on necessary derived types real(kind=WP), dimension(:,:,:), pointer :: UV, UVnode #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -127,11 +129,11 @@ subroutine compute_vel_nodes(dynamics, partit, mesh) #include "associate_mesh_ass.h" UV=>dynamics%uv(:,:,:) UVnode=>dynamics%uvnode(:,:,:) - + + !___________________________________________________________________________ DO n=1, myDim_nod2D uln = ulevels_nod2D(n) nln = nlevels_nod2D(n) - !!PS DO nz=1, nlevels_nod2D(n)-1 DO nz=uln, nln-1 tvol=0.0_WP tx =0.0_WP @@ -164,15 +166,13 @@ subroutine viscosity_filter(option, dynamics, partit, mesh) use visc_filt_bcksct_interface use visc_filt_bilapl_interface use visc_filt_bidiff_interface -!!PS use visc_filt_dbcksc_interface -!!PS use backscatter_coef_interface use g_backscatter IMPLICIT NONE integer :: option type(t_dyn) , intent(inout), target :: dynamics - type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit - + type(t_mesh) , intent(in) , target :: mesh + ! Driving routine ! Background viscosity is selected in terms of Vl, where V is ! background velocity scale and l is the resolution. V is 0.005 @@ -214,12 +214,14 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) USE g_CONFIG USE g_comm_auto IMPLICIT NONE - - real(kind=8) :: u1, v1, len, vi - integer :: nz, ed, el(2), nelem(3),k, elem, nzmin, nzmax type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + real(kind=8) :: u1, v1, len, vi + integer :: nz, ed, el(2), nelem(3),k, elem, nzmin, nzmax + !___________________________________________________________________________ + ! pointer on necessary derived types real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs real(kind=WP), dimension(:,:) , pointer :: U_c, V_c, U_b, V_b #include "associate_part_def.h" @@ -233,22 +235,21 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) U_b => dynamics%work%u_b(:,:) V_b => dynamics%work%v_b(:,:) + !___________________________________________________________________________ ! An analog of harmonic viscosity operator. ! Same as visc_filt_h, but with the backscatter. ! Here the contribution from squared velocities is added to the viscosity. ! The contribution from boundary edges is neglected (free slip). - - U_b=0.0_WP - V_b=0.0_WP - U_c=0.0_WP - V_c=0.0_WP + U_b = 0.0_WP + V_b = 0.0_WP + U_c = 0.0_WP + V_c = 0.0_WP DO ed=1, myDim_edge2D+eDim_edge2D if(myList_edge2D(ed)>edge2D_in) cycle el=edge_tri(:,ed) len=sqrt(sum(elem_area(el))) nzmax = minval(nlevels(el)) nzmin = maxval(ulevels(el)) - !!PS DO nz=1,minval(nlevels(el))-1 DO nz=nzmin,nzmax-1 u1=UV(1,nz,el(1))-UV(1,nz,el(2)) v1=UV(2,nz,el(1))-UV(2,nz,el(2)) @@ -274,7 +275,6 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) DO ed=1, myDim_nod2D nzmin = ulevels_nod2D(ed) nzmax = nlevels_nod2D(ed) - !!PS DO nz=1, nlevels_nod2D(ed)-1 DO nz=nzmin, nzmax-1 vi=0.0_WP u1=0.0_WP @@ -295,7 +295,6 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) nelem=elem2D_nodes(:,ed) nzmin = ulevels(ed) nzmax = nlevels(ed) - !!PS Do nz=1, nlevels(ed)-1 Do nz=nzmin, nzmax-1 UV_rhs(1,nz,ed)=UV_rhs(1,nz,ed)+U_b(nz,ed) -dynamics%visc_easybsreturn*sum(U_c(nz,nelem))/3.0_WP UV_rhs(2,nz,ed)=UV_rhs(2,nz,ed)+V_b(nz,ed) -dynamics%visc_easybsreturn*sum(V_c(nz,nelem))/3.0_WP @@ -320,32 +319,33 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) USE g_CONFIG USE g_comm_auto IMPLICIT NONE - real(kind=8) :: u1, v1, vi, len - integer :: ed, el(2), nz, nzmin, nzmax - type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh - + !___________________________________________________________________________ + real(kind=8) :: u1, v1, vi, len + integer :: ed, el(2), nz, nzmin, nzmax + !___________________________________________________________________________ + ! pointer on necessary derived types real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs real(kind=WP), dimension(:,:) , pointer :: U_c, V_c #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) + UV => dynamics%uv(:,:,:) UV_rhs => dynamics%uv_rhs(:,:,:) U_c => dynamics%work%u_c(:,:) V_c => dynamics%work%v_c(:,:) - - U_c=0.0_WP - V_c=0.0_WP + + !___________________________________________________________________________ + U_c = 0.0_WP + V_c = 0.0_WP DO ed=1, myDim_edge2D+eDim_edge2D if(myList_edge2D(ed)>edge2D_in) cycle el=edge_tri(:,ed) nzmin = maxval(ulevels(el)) nzmax = minval(nlevels(el)) - !!PS DO nz=1,minval(nlevels(el))-1 DO nz=nzmin,nzmax-1 u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) @@ -360,7 +360,6 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) len=sqrt(elem_area(ed)) nzmin = ulevels(ed) nzmax = nlevels(ed) - !!PS Do nz=1,nlevels(ed)-1 Do nz=nzmin,nzmax-1 ! vi has the sense of harmonic viscosity coef. because of ! division by area in the end @@ -381,7 +380,6 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) el=edge_tri(:,ed) nzmin = maxval(ulevels(el)) nzmax = minval(nlevels(el)) - !!PS DO nz=1,minval(nlevels(el))-1 DO nz=nzmin,nzmax-1 u1=(U_c(nz,el(1))-U_c(nz,el(2))) v1=(V_c(nz,el(1))-V_c(nz,el(2))) @@ -411,32 +409,34 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) USE g_CONFIG USE g_comm_auto IMPLICIT NONE - real(kind=8) :: u1, v1, vi, len - integer :: ed, el(2), nz, nzmin, nzmax type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh - + !___________________________________________________________________________ + real(kind=8) :: u1, v1, vi, len + integer :: ed, el(2), nz, nzmin, nzmax + !___________________________________________________________________________ + ! pointer on necessary derived types real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs real(kind=WP), dimension(:,:) , pointer :: U_c, V_c #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) + UV => dynamics%uv(:,:,:) UV_rhs => dynamics%uv_rhs(:,:,:) U_c => dynamics%work%u_c(:,:) V_c => dynamics%work%v_c(:,:) - U_c=0.0_WP - V_c=0.0_WP + !___________________________________________________________________________ + U_c = 0.0_WP + V_c = 0.0_WP DO ed=1, myDim_edge2D+eDim_edge2D if(myList_edge2D(ed)>edge2D_in) cycle el=edge_tri(:,ed) len=sqrt(sum(elem_area(el))) nzmin = maxval(ulevels(el)) nzmax = minval(nlevels(el)) - !!PS DO nz=1,minval(nlevels(el))-1 DO nz=nzmin,nzmax-1 u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) @@ -463,7 +463,6 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) len=sqrt(sum(elem_area(el))) nzmin = maxval(ulevels(el)) nzmax = minval(nlevels(el)) - !!PS DO nz=1,minval(nlevels(el))-1 DO nz=nzmin,nzmax-1 u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 8309a1ca6..7b24e2824 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -1,103 +1,108 @@ module oce_initial_state_interface - interface - subroutine oce_initial_state(tracers, partit, mesh) - USE MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - use mod_tracer - type(t_mesh), intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(inout), target :: tracers - end subroutine - end interface + interface + subroutine oce_initial_state(tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in) , target :: mesh + end subroutine + end interface end module module tracer_init_interface - interface - subroutine tracer_init(tracers, partit, mesh) - USE MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - use mod_tracer - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(inout), target :: tracers - end subroutine - end interface + interface + subroutine tracer_init(tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in) , target :: mesh + end subroutine + end interface end module module dynamics_init_interface - interface - subroutine dynamics_init(dynamics, partit, mesh) - USE MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - use MOD_DYN - type(t_mesh) , intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_dyn) , intent(inout), target :: dynamics - end subroutine - end interface + interface + subroutine dynamics_init(dynamics, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface end module module ocean_setup_interface - interface - subroutine ocean_setup(dynamics, tracers, partit, mesh) - USE MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - use mod_tracer - use MOD_DYN - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(inout), target :: tracers - type(t_dyn), intent(inout), target :: dynamics - end subroutine - end interface + interface + subroutine ocean_setup(dynamics, tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer + use MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface end module + module before_oce_step_interface - interface - subroutine before_oce_step(dynamics, tracers, partit, mesh) - USE MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - use mod_tracer - use MOD_DYN - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(inout), target :: tracers - type(t_dyn), intent(inout), target :: dynamics - end subroutine - end interface + interface + subroutine before_oce_step(dynamics, tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer + use MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface end module +! +! !_______________________________________________________________________________ subroutine ocean_setup(dynamics, tracers, partit, mesh) -USE MOD_MESH -USE MOD_PARTIT -USE MOD_PARSUP -USE MOD_TRACER -USE MOD_DYN -USE o_PARAM -USE o_ARRAYS -USE g_config -USE g_forcing_param, only: use_virt_salt -use g_cvmix_tke -use g_cvmix_idemix -use g_cvmix_pp -use g_cvmix_kpp -use g_cvmix_tidal -use g_backscatter -use Toy_Channel_Soufflet -use oce_initial_state_interface -use oce_adv_tra_fct_interfaces -use init_ale_interface -use init_thickness_ale_interface -IMPLICIT NONE -type(t_mesh), intent(inout), target :: mesh -type(t_partit), intent(inout), target :: partit -type(t_tracer), intent(inout), target :: tracers -type(t_dyn), intent(inout), target :: dynamics -integer :: n + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_TRACER + USE MOD_DYN + USE o_PARAM + USE o_ARRAYS + USE g_config + USE g_forcing_param, only: use_virt_salt + use g_cvmix_tke + use g_cvmix_idemix + use g_cvmix_pp + use g_cvmix_kpp + use g_cvmix_tidal + use g_backscatter + use Toy_Channel_Soufflet + use oce_initial_state_interface + use oce_adv_tra_fct_interfaces + use init_ale_interface + use init_thickness_ale_interface + IMPLICIT NONE + type(t_dyn) , intent(inout), target :: dynamics + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + !___________________________________________________________________________ + integer :: n + !___setup virt_salt_flux____________________________________________________ ! if the ale thinkness remain unchanged (like in 'linfs' case) the vitrual ! salinity flux need to be used @@ -255,101 +260,106 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) write(*,*) '******************************************************************************' end if end subroutine ocean_setup +! +! !_______________________________________________________________________________ SUBROUTINE tracer_init(tracers, partit, mesh) -USE MOD_MESH -USE MOD_PARTIT -USE MOD_PARSUP -USE MOD_TRACER -USE DIAGNOSTICS, only: ldiag_DVD -USE g_ic3d -IMPLICIT NONE -integer :: elem_size, node_size -integer, save :: nm_unit = 104 ! unit to open namelist file, skip 100-102 for cray -integer :: iost -integer :: n - -integer :: num_tracers -logical :: i_vert_diff, smooth_bh_tra -real(kind=WP) :: gamma0_tra, gamma1_tra, gamma2_tra - -type(t_mesh), intent(in) , target :: mesh -type(t_partit), intent(inout), target :: partit -type(t_tracer), intent(inout), target :: tracers -type(nml_tracer_list_type), target, allocatable :: nml_tracer_list(:) - -namelist /tracer_listsize/ num_tracers -namelist /tracer_list / nml_tracer_list -namelist /tracer_general / smooth_bh_tra, gamma0_tra, gamma1_tra, gamma2_tra, i_vert_diff - + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_TRACER + USE DIAGNOSTICS, only: ldiag_DVD + USE g_ic3d + IMPLICIT NONE + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in) , target :: mesh + type(nml_tracer_list_type), target, allocatable :: nml_tracer_list(:) + !___________________________________________________________________________ + integer :: elem_size, node_size + integer, save :: nm_unit = 104 ! unit to open namelist file, skip 100-102 for cray + integer :: iost + integer :: n + !___________________________________________________________________________ + ! define tracer namelist parameter + integer :: num_tracers + logical :: i_vert_diff, smooth_bh_tra + real(kind=WP) :: gamma0_tra, gamma1_tra, gamma2_tra + namelist /tracer_listsize/ num_tracers + namelist /tracer_list / nml_tracer_list + namelist /tracer_general / smooth_bh_tra, gamma0_tra, gamma1_tra, gamma2_tra, i_vert_diff + !___________________________________________________________________________ + ! pointer on necessary derived types #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + + !___________________________________________________________________________ + ! OPEN and read namelist for I/O + open( unit=nm_unit, file='namelist.tra', form='formatted', access='sequential', status='old', iostat=iost ) + if (iost == 0) then + if (mype==0) WRITE(*,*) ' file : ', 'namelist.tra',' open ok' + else + if (mype==0) WRITE(*,*) 'ERROR: --> bad opening file : ', 'namelist.tra',' ; iostat=',iost + call par_ex(partit%MPI_COMM_FESOM, partit%mype) + stop + end if + + READ(nm_unit, nml=tracer_listsize, iostat=iost) + allocate(nml_tracer_list(num_tracers)) + READ(nm_unit, nml=tracer_list, iostat=iost) + read(nm_unit, nml=tracer_init3d, iostat=iost) + READ(nm_unit, nml=tracer_general, iostat=iost) + close(nm_unit) + + do n=1, num_tracers + if (nml_tracer_list(n)%id==-1) then + if (mype==0) write(*,*) 'number of tracers will be changed from ', num_tracers, ' to ', n-1, '!' + num_tracers=n-1 + EXIT + end if + end do + + if (mype==0) write(*,*) 'total number of tracers is: ', num_tracers -! OPEN and read namelist for I/O -open( unit=nm_unit, file='namelist.tra', form='formatted', access='sequential', status='old', iostat=iost ) -if (iost == 0) then - if (mype==0) WRITE(*,*) ' file : ', 'namelist.tra',' open ok' -else - if (mype==0) WRITE(*,*) 'ERROR: --> bad opening file : ', 'namelist.tra',' ; iostat=',iost - call par_ex(partit%MPI_COMM_FESOM, partit%mype) - stop -end if - -READ(nm_unit, nml=tracer_listsize, iostat=iost) -allocate(nml_tracer_list(num_tracers)) -READ(nm_unit, nml=tracer_list, iostat=iost) -read (nm_unit, nml=tracer_init3d, iostat=iost) -READ(nm_unit, nml=tracer_general, iostat=iost) -close(nm_unit) - -do n=1, num_tracers - if (nml_tracer_list(n)%id==-1) then - if (mype==0) write(*,*) 'number of tracers will be changed from ', num_tracers, ' to ', n-1, '!' - num_tracers=n-1 - EXIT - end if -end do - -if (mype==0) write(*,*) 'total number of tracers is: ', num_tracers - -elem_size=myDim_elem2D+eDim_elem2D -node_size=myDim_nod2D+eDim_nod2D - -tracers%num_tracers=num_tracers - -! ================ -! Temperature (index=1), Salinity (index=2), etc. -! ================ -allocate(tracers%data(num_tracers)) -do n=1, tracers%num_tracers - allocate(tracers%data(n)%values (nl-1,node_size)) - allocate(tracers%data(n)%valuesAB(nl-1,node_size)) - tracers%data(n)%ID = nml_tracer_list(n)%id - tracers%data(n)%tra_adv_hor = TRIM(nml_tracer_list(n)%adv_hor) - tracers%data(n)%tra_adv_ver = TRIM(nml_tracer_list(n)%adv_ver) - tracers%data(n)%tra_adv_lim = TRIM(nml_tracer_list(n)%adv_lim) - tracers%data(n)%tra_adv_ph = nml_tracer_list(n)%adv_ph - tracers%data(n)%tra_adv_pv = nml_tracer_list(n)%adv_pv - tracers%data(n)%smooth_bh_tra = smooth_bh_tra - tracers%data(n)%gamma0_tra = gamma0_tra - tracers%data(n)%gamma1_tra = gamma1_tra - tracers%data(n)%gamma2_tra = gamma2_tra - tracers%data(n)%values = 0. - tracers%data(n)%valuesAB = 0. - tracers%data(n)%i_vert_diff = i_vert_diff -end do -allocate(tracers%work%del_ttf(nl-1,node_size)) -allocate(tracers%work%del_ttf_advhoriz(nl-1,node_size),tracers%work%del_ttf_advvert(nl-1,node_size)) -tracers%work%del_ttf = 0.0_WP -tracers%work%del_ttf_advhoriz = 0.0_WP -tracers%work%del_ttf_advvert = 0.0_WP -if (ldiag_DVD) then - allocate(tracers%work%tr_dvd_horiz(nl-1,node_size,2),tracers%work%tr_dvd_vert(nl-1,node_size,2)) - tracers%work%tr_dvd_horiz = 0.0_WP - tracers%work%tr_dvd_vert = 0.0_WP -end if + !___________________________________________________________________________ + ! define local vertice & elem array size + number of tracers + elem_size=myDim_elem2D+eDim_elem2D + node_size=myDim_nod2D+eDim_nod2D + tracers%num_tracers=num_tracers + + !___________________________________________________________________________ + ! allocate/initialise horizontal velocity arrays in derived type + ! Temperature (index=1), Salinity (index=2), etc. + allocate(tracers%data(num_tracers)) + do n=1, tracers%num_tracers + allocate(tracers%data(n)%values (nl-1,node_size)) + allocate(tracers%data(n)%valuesAB(nl-1,node_size)) + tracers%data(n)%ID = nml_tracer_list(n)%id + tracers%data(n)%tra_adv_hor = TRIM(nml_tracer_list(n)%adv_hor) + tracers%data(n)%tra_adv_ver = TRIM(nml_tracer_list(n)%adv_ver) + tracers%data(n)%tra_adv_lim = TRIM(nml_tracer_list(n)%adv_lim) + tracers%data(n)%tra_adv_ph = nml_tracer_list(n)%adv_ph + tracers%data(n)%tra_adv_pv = nml_tracer_list(n)%adv_pv + tracers%data(n)%smooth_bh_tra = smooth_bh_tra + tracers%data(n)%gamma0_tra = gamma0_tra + tracers%data(n)%gamma1_tra = gamma1_tra + tracers%data(n)%gamma2_tra = gamma2_tra + tracers%data(n)%values = 0. + tracers%data(n)%valuesAB = 0. + tracers%data(n)%i_vert_diff = i_vert_diff + end do + allocate(tracers%work%del_ttf(nl-1,node_size)) + allocate(tracers%work%del_ttf_advhoriz(nl-1,node_size),tracers%work%del_ttf_advvert(nl-1,node_size)) + tracers%work%del_ttf = 0.0_WP + tracers%work%del_ttf_advhoriz = 0.0_WP + tracers%work%del_ttf_advvert = 0.0_WP + if (ldiag_DVD) then + allocate(tracers%work%tr_dvd_horiz(nl-1,node_size,2),tracers%work%tr_dvd_vert(nl-1,node_size,2)) + tracers%work%tr_dvd_horiz = 0.0_WP + tracers%work%tr_dvd_vert = 0.0_WP + end if END SUBROUTINE tracer_init ! ! @@ -361,10 +371,15 @@ SUBROUTINE dynamics_init(dynamics, partit, mesh) USE MOD_DYN USE o_param IMPLICIT NONE + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_dyn) , intent(inout), target :: dynamics + !___________________________________________________________________________ integer :: elem_size, node_size integer, save :: nm_unit = 105 ! unit to open namelist file, skip 100-102 for cray integer :: iost - + !___________________________________________________________________________ + ! define dynamics namelist parameter integer :: opt_visc real(kind=WP) :: visc_gamma0, visc_gamma1, visc_gamma2 real(kind=WP) :: visc_easybsreturn @@ -373,21 +388,17 @@ SUBROUTINE dynamics_init(dynamics, partit, mesh) logical :: use_freeslip logical :: use_wsplit real(kind=WP) :: wsplit_maxcfl - - type(t_mesh) , intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_dyn) , intent(inout), target :: dynamics - - ! define dynamics namelist parameter namelist /dynamics_visc / opt_visc, visc_gamma0, visc_gamma1, visc_gamma2, & use_ivertvisc, visc_easybsreturn namelist /dynamics_general/ momadv_opt, use_freeslip, use_wsplit, wsplit_maxcfl - + !___________________________________________________________________________ + ! pointer on necessary derived types #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + !___________________________________________________________________________ ! open and read namelist for I/O open(unit=nm_unit, file='namelist.dyn', form='formatted', access='sequential', status='old', iostat=iost ) if (iost == 0) then @@ -474,146 +485,146 @@ SUBROUTINE dynamics_init(dynamics, partit, mesh) dynamics%work%u_b = 0.0_WP dynamics%work%v_b = 0.0_WP end if - - END SUBROUTINE dynamics_init ! ! !_______________________________________________________________________________ SUBROUTINE arrays_init(num_tracers, partit, mesh) -USE MOD_MESH -USE MOD_PARTIT -USE MOD_PARSUP -USE o_ARRAYS -USE o_PARAM -use g_comm_auto -use g_config -use g_forcing_arrays -use o_mixing_kpp_mod ! KPP -USE g_forcing_param, only: use_virt_salt -use diagnostics, only: ldiag_dMOC, ldiag_DVD -IMPLICIT NONE -integer :: elem_size, node_size -integer :: n -integer, intent(in) :: num_tracers -type(t_mesh), intent(in), target :: mesh -type(t_partit), intent(inout), target :: partit + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE o_ARRAYS + USE o_PARAM + use g_comm_auto + use g_config + use g_forcing_arrays + use o_mixing_kpp_mod ! KPP + USE g_forcing_param, only: use_virt_salt + use diagnostics, only: ldiag_dMOC, ldiag_DVD + IMPLICIT NONE + integer, intent(in) :: num_tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + !___________________________________________________________________________ + integer :: elem_size, node_size + integer :: n + !___________________________________________________________________________ + ! define dynamics namelist parameter #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + !___________________________________________________________________________ + elem_size=myDim_elem2D+eDim_elem2D + node_size=myDim_nod2D+eDim_nod2D -elem_size=myDim_elem2D+eDim_elem2D -node_size=myDim_nod2D+eDim_nod2D - - -! ================ -! Velocities -! ================ -!allocate(stress_diag(2, elem_size))!delete me -!!PS allocate(Visc(nl-1, elem_size)) -! ================ -! elevation and its rhs -! ================ - -! ================ -! Monin-Obukhov -! ================ -if (use_ice .and. use_momix) allocate(mo(nl,node_size),mixlength(node_size)) -if (use_ice .and. use_momix) mixlength=0. -! ================ -! Vertical velocity and pressure -! ================ -allocate( hpressure(nl,node_size)) -allocate(bvfreq(nl,node_size),mixlay_dep(node_size),bv_ref(node_size)) -! ================ -! Ocean forcing arrays -! ================ -allocate(Tclim(nl-1,node_size), Sclim(nl-1, node_size)) -allocate(stress_surf(2,myDim_elem2D)) !!! Attention, it is shorter !!! -allocate(stress_node_surf(2,node_size)) -allocate(stress_atmoce_x(node_size), stress_atmoce_y(node_size)) -allocate(relax2clim(node_size)) -allocate(heat_flux(node_size), Tsurf(node_size)) -allocate(water_flux(node_size), Ssurf(node_size)) -allocate(relax_salt(node_size)) -allocate(virtual_salt(node_size)) - -allocate(heat_flux_in(node_size)) -allocate(real_salt_flux(node_size)) !PS -! ================= -! Arrays used to organize surface forcing -! ================= -allocate(Tsurf_t(node_size,2), Ssurf_t(node_size,2)) -allocate(tau_x_t(node_size,2), tau_y_t(node_size,2)) - - -! ================= -! Visc and Diff coefs -! ================= - -allocate(Av(nl,elem_size), Kv(nl,node_size)) - -Av=0.0_WP -Kv=0.0_WP -if (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) then - allocate(Kv_double(nl,node_size, num_tracers)) - Kv_double=0.0_WP - !!PS call oce_mixing_kpp_init ! Setup constants, allocate arrays and construct look up table -end if - -! tracer gradients & RHS -allocate(ttrhs(nl-1,node_size)) -allocate(tr_xy(2,nl-1,myDim_elem2D+eDim_elem2D+eXDim_elem2D)) -allocate(tr_z(nl,myDim_nod2D+eDim_nod2D)) - -! neutral slope etc. to be used in Redi formulation -allocate(neutral_slope(3, nl-1, node_size)) -allocate(slope_tapered(3, nl-1, node_size)) -allocate(Ki(nl-1, node_size)) - -do n=1, node_size -! Ki(n)=K_hor*area(1,n)/scale_area - Ki(:,n)=K_hor*(mesh_resolution(n)/100000.0_WP)**2 -end do -call exchange_nod(Ki, partit) - -neutral_slope=0.0_WP -slope_tapered=0.0_WP - -allocate(MLD1(node_size), MLD2(node_size), MLD1_ind(node_size), MLD2_ind(node_size)) -if (use_global_tides) then - allocate(ssh_gp(node_size)) - ssh_gp=0. -end if -! xy gradient of a neutral surface -allocate(sigma_xy(2, nl-1, node_size)) -sigma_xy=0.0_WP - -! alpha and beta in the EoS -allocate(sw_beta(nl-1, node_size), sw_alpha(nl-1, node_size)) -allocate(dens_flux(node_size)) -sw_beta =0.0_WP -sw_alpha =0.0_WP -dens_flux=0.0_WP - -if (Fer_GM) then - allocate(fer_c(node_size),fer_scal(node_size), fer_gamma(2, nl, node_size), fer_K(nl, node_size)) - fer_gamma=0.0_WP - fer_K=500._WP - fer_c=1._WP - fer_scal = 0.0_WP -end if - -if (SPP) then - allocate(ice_rejected_salt(node_size)) - ice_rejected_salt=0._WP -end if - -! ================= -! Initialize with zeros -! ================= + ! ================ + ! Velocities + ! ================ + !allocate(stress_diag(2, elem_size))!delete me + !!PS allocate(Visc(nl-1, elem_size)) + ! ================ + ! elevation and its rhs + ! ================ + + ! ================ + ! Monin-Obukhov + ! ================ + if (use_ice .and. use_momix) allocate(mo(nl,node_size),mixlength(node_size)) + if (use_ice .and. use_momix) mixlength=0. + ! ================ + ! Vertical velocity and pressure + ! ================ + allocate( hpressure(nl,node_size)) + allocate(bvfreq(nl,node_size),mixlay_dep(node_size),bv_ref(node_size)) + ! ================ + ! Ocean forcing arrays + ! ================ + allocate(Tclim(nl-1,node_size), Sclim(nl-1, node_size)) + allocate(stress_surf(2,myDim_elem2D)) !!! Attention, it is shorter !!! + allocate(stress_node_surf(2,node_size)) + allocate(stress_atmoce_x(node_size), stress_atmoce_y(node_size)) + allocate(relax2clim(node_size)) + allocate(heat_flux(node_size), Tsurf(node_size)) + allocate(water_flux(node_size), Ssurf(node_size)) + allocate(relax_salt(node_size)) + allocate(virtual_salt(node_size)) + + allocate(heat_flux_in(node_size)) + allocate(real_salt_flux(node_size)) !PS + ! ================= + ! Arrays used to organize surface forcing + ! ================= + allocate(Tsurf_t(node_size,2), Ssurf_t(node_size,2)) + allocate(tau_x_t(node_size,2), tau_y_t(node_size,2)) + + + ! ================= + ! Visc and Diff coefs + ! ================= + + allocate(Av(nl,elem_size), Kv(nl,node_size)) + + Av=0.0_WP + Kv=0.0_WP + if (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) then + allocate(Kv_double(nl,node_size, num_tracers)) + Kv_double=0.0_WP + !!PS call oce_mixing_kpp_init ! Setup constants, allocate arrays and construct look up table + end if + + ! tracer gradients & RHS + allocate(ttrhs(nl-1,node_size)) + allocate(tr_xy(2,nl-1,myDim_elem2D+eDim_elem2D+eXDim_elem2D)) + allocate(tr_z(nl,myDim_nod2D+eDim_nod2D)) + + ! neutral slope etc. to be used in Redi formulation + allocate(neutral_slope(3, nl-1, node_size)) + allocate(slope_tapered(3, nl-1, node_size)) + allocate(Ki(nl-1, node_size)) + + do n=1, node_size + ! Ki(n)=K_hor*area(1,n)/scale_area + Ki(:,n)=K_hor*(mesh_resolution(n)/100000.0_WP)**2 + end do + call exchange_nod(Ki, partit) + + neutral_slope=0.0_WP + slope_tapered=0.0_WP + + allocate(MLD1(node_size), MLD2(node_size), MLD1_ind(node_size), MLD2_ind(node_size)) + if (use_global_tides) then + allocate(ssh_gp(node_size)) + ssh_gp=0. + end if + ! xy gradient of a neutral surface + allocate(sigma_xy(2, nl-1, node_size)) + sigma_xy=0.0_WP + + ! alpha and beta in the EoS + allocate(sw_beta(nl-1, node_size), sw_alpha(nl-1, node_size)) + allocate(dens_flux(node_size)) + sw_beta =0.0_WP + sw_alpha =0.0_WP + dens_flux=0.0_WP + + if (Fer_GM) then + allocate(fer_c(node_size),fer_scal(node_size), fer_gamma(2, nl, node_size), fer_K(nl, node_size)) + fer_gamma=0.0_WP + fer_K=500._WP + fer_c=1._WP + fer_scal = 0.0_WP + end if + + if (SPP) then + allocate(ice_rejected_salt(node_size)) + ice_rejected_salt=0._WP + end if + + ! ================= + ! Initialize with zeros + ! ================= hpressure=0.0_WP ! @@ -680,166 +691,167 @@ END SUBROUTINE arrays_init !_______________________________________________________________________________ ! Here the 3D tracers will be initialized. Initialization strategy depends on a tracer ID. ! ID = 0 and 1 are reserved for temperature and salinity +! --> reads the initial state or the restart file for the ocean SUBROUTINE oce_initial_state(tracers, partit, mesh) -USE MOD_MESH -USE MOD_PARTIT -USE MOD_PARSUP -USE MOD_TRACER -USE o_ARRAYS -USE g_config -USE g_ic3d - ! - ! reads the initial state or the restart file for the ocean - ! - implicit none - integer :: i, k, counter, rcounter3, id - character(len=10) :: i_string, id_string - type(t_mesh), intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(inout), target :: tracers - real(kind=WP) :: loc, max_temp, min_temp, max_salt, min_salt - + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_TRACER + USE o_ARRAYS + USE g_config + USE g_ic3d + implicit none + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: i, k, counter, rcounter3, id + character(len=10) :: i_string, id_string + real(kind=WP) :: loc, max_temp, min_temp, max_salt, min_salt + !___________________________________________________________________________ + ! pointer on necessary derived types #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - if (mype==0) write(*,*) tracers%num_tracers, ' tracers will be used in FESOM' - if (mype==0) write(*,*) 'tracer IDs are: ', tracers%data(1:tracers%num_tracers)%ID - ! - ! read ocean state - ! this must be always done! First two tracers with IDs 0 and 1 are the temperature and salinity. - if(mype==0) write(*,*) 'read Temperatur climatology from:', trim(filelist(1)) - if(mype==0) write(*,*) 'read Salt climatology from:', trim(filelist(2)) - call do_ic3d(tracers, partit, mesh) - - Tclim=tracers%data(1)%values - Sclim=tracers%data(2)%values - Tsurf=Tclim(1,:) - Ssurf=Sclim(1,:) - relax2clim=0.0_WP - - ! count the passive tracers which require 3D source (ptracers_restore_total) - ptracers_restore_total=0 - DO i=3, tracers%num_tracers - id=tracers%data(i)%ID - SELECT CASE (id) - CASE (301) - ptracers_restore_total=ptracers_restore_total+1 - CASE (302) - ptracers_restore_total=ptracers_restore_total+1 - CASE (303) - ptracers_restore_total=ptracers_restore_total+1 - - END SELECT - END DO - allocate(ptracers_restore(ptracers_restore_total)) - - rcounter3=0 ! counter for tracers with 3D source - DO i=3, tracers%num_tracers - id=tracers%data(i)%ID - SELECT CASE (id) - CASE (101) ! initialize tracer ID=101 - tracers%data(i)%values(:,:)=0.0_WP - if (mype==0) then - write (i_string, "(I3)") i - write (id_string, "(I3)") id - write(*,*) 'initializing '//trim(i_string)//'th tracer with ID='//trim(id_string) - end if - CASE (301) !Fram Strait 3d restored passive tracer - tracers%data(i)%values(:,:)=0.0_WP - rcounter3 =rcounter3+1 - counter=0 - do k=1, myDim_nod2D+eDim_nod2D - if (((geo_coord_nod2D(2,k)>77.5*rad) .and. (geo_coord_nod2D(2,k)<78.*rad))& - .and.((geo_coord_nod2D(1,k)>0. *rad) .and. (geo_coord_nod2D(1,k)<10.*rad))) then - counter=counter+1 - end if - end do - allocate(ptracers_restore(rcounter3)%ind2(counter)) - ptracers_restore(rcounter3)%id =301 - ptracers_restore(rcounter3)%locid=i - counter=0 - do k=1, myDim_nod2D+eDim_nod2D - if (((geo_coord_nod2D(2,k)>77.5*rad) .and. (geo_coord_nod2D(2,k)<78.*rad))& - .and.((geo_coord_nod2D(1,k)>0. *rad) .and. (geo_coord_nod2D(1,k)<10.*rad))) then - counter=counter+1 - ptracers_restore(rcounter3)%ind2(counter)=k + !___________________________________________________________________________ + if (mype==0) write(*,*) tracers%num_tracers, ' tracers will be used in FESOM' + if (mype==0) write(*,*) 'tracer IDs are: ', tracers%data(1:tracers%num_tracers)%ID + ! + ! read ocean state + ! this must be always done! First two tracers with IDs 0 and 1 are the temperature and salinity. + if(mype==0) write(*,*) 'read Temperatur climatology from:', trim(filelist(1)) + if(mype==0) write(*,*) 'read Salt climatology from:', trim(filelist(2)) + call do_ic3d(tracers, partit, mesh) + + Tclim=tracers%data(1)%values + Sclim=tracers%data(2)%values + Tsurf=Tclim(1,:) + Ssurf=Sclim(1,:) + relax2clim=0.0_WP + + ! count the passive tracers which require 3D source (ptracers_restore_total) + ptracers_restore_total=0 + DO i=3, tracers%num_tracers + id=tracers%data(i)%ID + SELECT CASE (id) + CASE (301) + ptracers_restore_total=ptracers_restore_total+1 + CASE (302) + ptracers_restore_total=ptracers_restore_total+1 + CASE (303) + ptracers_restore_total=ptracers_restore_total+1 + + END SELECT + END DO + allocate(ptracers_restore(ptracers_restore_total)) + + rcounter3=0 ! counter for tracers with 3D source + DO i=3, tracers%num_tracers + id=tracers%data(i)%ID + SELECT CASE (id) + CASE (101) ! initialize tracer ID=101 + tracers%data(i)%values(:,:)=0.0_WP + if (mype==0) then + write (i_string, "(I3)") i + write (id_string, "(I3)") id + write(*,*) 'initializing '//trim(i_string)//'th tracer with ID='//trim(id_string) end if - end do - tracers%data(i)%values(:,ptracers_restore(rcounter3)%ind2)=1. - if (mype==0) then - write (i_string, "(I3)") i - write (id_string, "(I3)") id - write(*,*) 'initializing '//trim(i_string)//'th tracer with ID='//trim(id_string) - end if - - CASE (302) !Bering Strait 3d restored passive tracer - tracers%data(i)%values(:,:)=0.0_WP - rcounter3 =rcounter3+1 - counter=0 - do k=1, myDim_nod2D+eDim_nod2D - if (((geo_coord_nod2D(2,k)>65.6*rad) .and. (geo_coord_nod2D(2,k)<66.*rad))& - .and.((geo_coord_nod2D(1,k)>-172. *rad) .and. (geo_coord_nod2D(1,k)<-166.*rad))) then - counter=counter+1 + CASE (301) !Fram Strait 3d restored passive tracer + tracers%data(i)%values(:,:)=0.0_WP + rcounter3 =rcounter3+1 + counter=0 + do k=1, myDim_nod2D+eDim_nod2D + if (((geo_coord_nod2D(2,k)>77.5*rad) .and. (geo_coord_nod2D(2,k)<78.*rad))& + .and.((geo_coord_nod2D(1,k)>0. *rad) .and. (geo_coord_nod2D(1,k)<10.*rad))) then + counter=counter+1 + end if + end do + allocate(ptracers_restore(rcounter3)%ind2(counter)) + ptracers_restore(rcounter3)%id =301 + ptracers_restore(rcounter3)%locid=i + counter=0 + do k=1, myDim_nod2D+eDim_nod2D + if (((geo_coord_nod2D(2,k)>77.5*rad) .and. (geo_coord_nod2D(2,k)<78.*rad))& + .and.((geo_coord_nod2D(1,k)>0. *rad) .and. (geo_coord_nod2D(1,k)<10.*rad))) then + counter=counter+1 + ptracers_restore(rcounter3)%ind2(counter)=k + end if + end do + tracers%data(i)%values(:,ptracers_restore(rcounter3)%ind2)=1. + if (mype==0) then + write (i_string, "(I3)") i + write (id_string, "(I3)") id + write(*,*) 'initializing '//trim(i_string)//'th tracer with ID='//trim(id_string) end if - end do - allocate(ptracers_restore(rcounter3)%ind2(counter)) - ptracers_restore(rcounter3)%id =302 - ptracers_restore(rcounter3)%locid=i - counter=0 - do k=1, myDim_nod2D+eDim_nod2D - if (((geo_coord_nod2D(2,k)>65.6*rad) .and. (geo_coord_nod2D(2,k)<66.*rad))& - .and.((geo_coord_nod2D(1,k)>-172. *rad) .and. (geo_coord_nod2D(1,k)<-166.*rad))) then - counter=counter+1 - ptracers_restore(rcounter3)%ind2(counter)=k + + CASE (302) !Bering Strait 3d restored passive tracer + tracers%data(i)%values(:,:)=0.0_WP + rcounter3 =rcounter3+1 + counter=0 + do k=1, myDim_nod2D+eDim_nod2D + if (((geo_coord_nod2D(2,k)>65.6*rad) .and. (geo_coord_nod2D(2,k)<66.*rad))& + .and.((geo_coord_nod2D(1,k)>-172. *rad) .and. (geo_coord_nod2D(1,k)<-166.*rad))) then + counter=counter+1 + end if + end do + allocate(ptracers_restore(rcounter3)%ind2(counter)) + ptracers_restore(rcounter3)%id =302 + ptracers_restore(rcounter3)%locid=i + counter=0 + do k=1, myDim_nod2D+eDim_nod2D + if (((geo_coord_nod2D(2,k)>65.6*rad) .and. (geo_coord_nod2D(2,k)<66.*rad))& + .and.((geo_coord_nod2D(1,k)>-172. *rad) .and. (geo_coord_nod2D(1,k)<-166.*rad))) then + counter=counter+1 + ptracers_restore(rcounter3)%ind2(counter)=k + end if + end do + tracers%data(i)%values(:,ptracers_restore(rcounter3)%ind2)=0.0_WP + if (mype==0) then + write (i_string, "(I3)") i + write (id_string, "(I3)") id + write(*,*) 'initializing '//trim(i_string)//'th tracer with ID='//trim(id_string) end if - end do - tracers%data(i)%values(:,ptracers_restore(rcounter3)%ind2)=0.0_WP - if (mype==0) then - write (i_string, "(I3)") i - write (id_string, "(I3)") id - write(*,*) 'initializing '//trim(i_string)//'th tracer with ID='//trim(id_string) - end if - - CASE (303) !BSO 3d restored passive tracer - tracers%data(i)%values(:,:)=0.0_WP - rcounter3 =rcounter3+1 - counter=0 - do k=1, myDim_nod2D+eDim_nod2D - if (((geo_coord_nod2D(2,k)>69.5*rad) .and. (geo_coord_nod2D(2,k)<74.5*rad))& - .and.((geo_coord_nod2D(1,k)>19. *rad) .and. (geo_coord_nod2D(1,k)<20.*rad))) then - counter=counter+1 + + CASE (303) !BSO 3d restored passive tracer + tracers%data(i)%values(:,:)=0.0_WP + rcounter3 =rcounter3+1 + counter=0 + do k=1, myDim_nod2D+eDim_nod2D + if (((geo_coord_nod2D(2,k)>69.5*rad) .and. (geo_coord_nod2D(2,k)<74.5*rad))& + .and.((geo_coord_nod2D(1,k)>19. *rad) .and. (geo_coord_nod2D(1,k)<20.*rad))) then + counter=counter+1 + end if + end do + allocate(ptracers_restore(rcounter3)%ind2(counter)) + ptracers_restore(rcounter3)%id =303 + ptracers_restore(rcounter3)%locid=i + counter=0 + do k=1, myDim_nod2D+eDim_nod2D + if (((geo_coord_nod2D(2,k)>69.5*rad) .and. (geo_coord_nod2D(2,k)<74.5*rad))& + .and.((geo_coord_nod2D(1,k)>19. *rad) .and. (geo_coord_nod2D(1,k)<20.*rad))) then + counter=counter+1 + ptracers_restore(rcounter3)%ind2(counter)=k + end if + end do + tracers%data(i)%values(:,ptracers_restore(rcounter3)%ind2)=0.0_WP + if (mype==0) then + write (i_string, "(I3)") i + write (id_string, "(I3)") id + write(*,*) 'initializing '//trim(i_string)//'th tracer with ID='//trim(id_string) end if - end do - allocate(ptracers_restore(rcounter3)%ind2(counter)) - ptracers_restore(rcounter3)%id =303 - ptracers_restore(rcounter3)%locid=i - counter=0 - do k=1, myDim_nod2D+eDim_nod2D - if (((geo_coord_nod2D(2,k)>69.5*rad) .and. (geo_coord_nod2D(2,k)<74.5*rad))& - .and.((geo_coord_nod2D(1,k)>19. *rad) .and. (geo_coord_nod2D(1,k)<20.*rad))) then - counter=counter+1 - ptracers_restore(rcounter3)%ind2(counter)=k + CASE DEFAULT + if (mype==0) then + write (i_string, "(I3)") i + write (id_string, "(I3)") id + if (mype==0) write(*,*) 'invalid ID '//trim(id_string)//' specified for '//trim(i_string)//' th tracer!!!' + if (mype==0) write(*,*) 'the model will stop!' end if - end do - tracers%data(i)%values(:,ptracers_restore(rcounter3)%ind2)=0.0_WP - if (mype==0) then - write (i_string, "(I3)") i - write (id_string, "(I3)") id - write(*,*) 'initializing '//trim(i_string)//'th tracer with ID='//trim(id_string) - end if - CASE DEFAULT - if (mype==0) then - write (i_string, "(I3)") i - write (id_string, "(I3)") id - if (mype==0) write(*,*) 'invalid ID '//trim(id_string)//' specified for '//trim(i_string)//' th tracer!!!' - if (mype==0) write(*,*) 'the model will stop!' - end if - call par_ex(partit%MPI_COMM_FESOM, partit%mype) - stop - END SELECT - END DO + call par_ex(partit%MPI_COMM_FESOM, partit%mype) + stop + END SELECT + END DO end subroutine oce_initial_state ! ! @@ -855,18 +867,21 @@ SUBROUTINE before_oce_step(dynamics, tracers, partit, mesh) USE g_config USE Toy_Channel_Soufflet implicit none + type(t_dyn) , intent(inout), target :: dynamics + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ integer :: i, k, counter, rcounter3, id character(len=10) :: i_string, id_string - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(inout), target :: tracers - type(t_dyn), intent(inout), target :: dynamics - + !___________________________________________________________________________ + ! pointer on necessary derived types #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - + + !___________________________________________________________________________ if (toy_ocean) then SELECT CASE (TRIM(which_toy)) CASE ("soufflet") !forcing update for soufflet testcase From 321536c0fccd0b30f134683b4416948b7860b1de Mon Sep 17 00:00:00 2001 From: patrickscholz Date: Wed, 10 Nov 2021 19:02:56 +0100 Subject: [PATCH 539/909] Update oce_ale_tracer.F90 correct small issue from solving of conflicts --- src/oce_ale_tracer.F90 | 1 - 1 file changed, 1 deletion(-) diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 24afd330b..a801db59c 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -381,7 +381,6 @@ subroutine diff_ver_part_expl_ale(tr_num, tracers, partit, mesh) !$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz, nl1, ul1, vd_flux, rdata, flux, rlx, zinv1) !___________________________________________________________________________ - Ty = 0.0_WP do n=1, myDim_nod2D nl1=nlevels_nod2D(n)-1 ul1=ulevels_nod2D(n) From 86f16d8db5046ce16da6f790b687f3df99ecd304 Mon Sep 17 00:00:00 2001 From: patrickscholz Date: Wed, 10 Nov 2021 19:17:42 +0100 Subject: [PATCH 540/909] Update oce_ale.F90 fix another issue from solving of conflicts --- src/oce_ale.F90 | 1 + 1 file changed, 1 insertion(+) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index c1c39638d..c71022e3c 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2564,6 +2564,7 @@ subroutine impl_vert_visc_ale(dynamics, partit, mesh) real(kind=WP) :: cp(mesh%nl-1), up(mesh%nl-1), vp(mesh%nl-1) integer :: nz, elem, nzmax, nzmin, elnodes(3) real(kind=WP) :: zinv, m, friction, wu, wd + real(kind=WP) :: zbar_n(mesh%nl), Z_n(mesh%nl-1), !___________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs From 41ea369c099237112b784808cc2c18ea9c2cf5a8 Mon Sep 17 00:00:00 2001 From: patrickscholz Date: Wed, 10 Nov 2021 19:21:50 +0100 Subject: [PATCH 541/909] Update oce_ale.F90 --- src/oce_ale.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index c71022e3c..24767c543 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2564,7 +2564,7 @@ subroutine impl_vert_visc_ale(dynamics, partit, mesh) real(kind=WP) :: cp(mesh%nl-1), up(mesh%nl-1), vp(mesh%nl-1) integer :: nz, elem, nzmax, nzmin, elnodes(3) real(kind=WP) :: zinv, m, friction, wu, wd - real(kind=WP) :: zbar_n(mesh%nl), Z_n(mesh%nl-1), + real(kind=WP) :: zbar_n(mesh%nl), Z_n(mesh%nl-1) !___________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs From b22f8f7643f1ef9d6ad61181827cf6dd5408fdba Mon Sep 17 00:00:00 2001 From: patrickscholz Date: Wed, 10 Nov 2021 19:28:55 +0100 Subject: [PATCH 542/909] Update gen_modules_config.F90 switch off debug flag as default: flag_debug=.false. --- src/gen_modules_config.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gen_modules_config.F90 b/src/gen_modules_config.F90 index b9d3d1807..f265ea898 100755 --- a/src/gen_modules_config.F90 +++ b/src/gen_modules_config.F90 @@ -107,7 +107,7 @@ module g_config real(kind=WP) :: cavity_partial_cell_thresh=0.0_WP ! same as partial_cell_tresh but for surface logical :: toy_ocean=.false. ! Ersatz forcing has to be supplied character(100) :: which_toy="soufflet" - logical :: flag_debug=.true. ! prints name of actual subroutine he is in + logical :: flag_debug=.false. ! prints name of actual subroutine he is in logical :: flag_warn_cflz=.true. ! switches off cflz warning namelist /run_config/ use_ice,use_floatice, use_sw_pene, use_cavity, & use_cavity_partial_cell, cavity_partial_cell_thresh, toy_ocean, which_toy, flag_debug, flag_warn_cflz From d37a51edc7131a83076285ef445ede5d054f90eb Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Wed, 10 Nov 2021 21:36:51 +0100 Subject: [PATCH 543/909] OPENMP for the tracer part completed. time measurements will follow! --- src/oce_ale_tracer.F90 | 72 +++++++++++++++++++++++------------------- 1 file changed, 39 insertions(+), 33 deletions(-) diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 37fa87f35..c9b42cd59 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -223,7 +223,7 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) call relax_zonal_temp(tracers%data(1), partit, mesh) else call relax_to_clim(tr_num, tracers, partit, mesh) - end if + end if call exchange_nod(tracers%data(tr_num)%values(:,:), partit) end do !___________________________________________________________________________ @@ -433,10 +433,9 @@ subroutine diff_ver_part_impl_ale(tr_num, dynamics, tracers, partit, mesh) type(t_partit), intent(inout), target :: partit real(kind=WP) :: a(mesh%nl), b(mesh%nl), c(mesh%nl), tr(mesh%nl) real(kind=WP) :: cp(mesh%nl), tp(mesh%nl) - integer :: nz, n, nzmax,nzmin - real(kind=WP) :: m, zinv, dt_inv, dz - real(kind=WP) :: rsss, Ty,Ty1, c1,zinv1,zinv2,v_adv - real(kind=WP), external :: TFrez ! Sea water freeze temperature. + integer :: nz, n, nzmax, nzmin + real(kind=WP) :: m, zinv, dz + real(kind=WP) :: rsss, Ty, Ty1, c1, zinv1, zinv2, v_adv real(kind=WP) :: isredi=0._WP logical :: do_wimpl=.true. real(kind=WP) :: zbar_n(mesh%nl), z_n(mesh%nl-1) @@ -450,10 +449,8 @@ subroutine diff_ver_part_impl_ale(tr_num, dynamics, tracers, partit, mesh) trarr => tracers%data(tr_num)%values(:,:) Wvel_i => dynamics%w_i(:,:) !___________________________________________________________________________ - if ((trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') .OR. (.not. dynamics%use_wsplit)) do_wimpl=.false. - + if ((trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') .OR. (.not. dynamics%use_wsplit)) do_wimpl=.false. if (Redi) isredi=1._WP - dt_inv=1.0_WP/dt Ty =0.0_WP Ty1 =0.0_WP @@ -535,8 +532,11 @@ subroutine diff_ver_part_impl_ale(tr_num, dynamics, tracers, partit, mesh) !___________________________________________________________________________ ! loop over local nodes - do n=1,myDim_nod2D - + +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, nzmax, nzmin, a, b, c, tr, cp, tp, m, zinv, dz, & +!$OMP rsss, Ty, Ty1, c1, zinv1, zinv2, v_adv, zbar_n, z_n) +!$OMP DO + do n=1,myDim_nod2D ! initialise a = 0.0_WP b = 0.0_WP @@ -544,11 +544,9 @@ subroutine diff_ver_part_impl_ale(tr_num, dynamics, tracers, partit, mesh) tr = 0.0_WP tp = 0.0_WP cp = 0.0_WP - ! max. number of levels at node n nzmax=nlevels_nod2D(n) nzmin=ulevels_nod2D(n) - !___________________________________________________________________________ ! Here can not exchange zbar_n & Z_n with zbar_3d_n & Z_3d_n because ! they be calculate from the actualized mesh with hnode_new @@ -564,7 +562,6 @@ subroutine diff_ver_part_impl_ale(tr_num, dynamics, tracers, partit, mesh) Z_n(nz-1) = zbar_n(nz) + hnode_new(nz-1,n)/2.0_WP end do zbar_n(nzmin) = zbar_n(nzmin+1) + hnode_new(nzmin,n) - !_______________________________________________________________________ ! Regular part of coefficients: --> surface layer nz=nzmin @@ -583,7 +580,6 @@ subroutine diff_ver_part_impl_ale(tr_num, dynamics, tracers, partit, mesh) !!PS c(nz)=-(Kv(nz+1,n)+Ty1)*zinv2*zinv * (area(nz+1,n)/areasvol(nz,n)) c(nz)=-(Kv(nz+1,n)+Ty1)*zinv2*zinv * area(nz+1,n)/areasvol(nz,n) b(nz)=-c(nz)+hnode_new(nz,n) ! ale - ! update from the vertical advection --> comes from splitting of vert ! velocity into explicite and implicite contribution if (do_wimpl) then @@ -601,7 +597,6 @@ subroutine diff_ver_part_impl_ale(tr_num, dynamics, tracers, partit, mesh) end if ! backup zinv2 for next depth level zinv1=zinv2 - !_______________________________________________________________________ ! Regular part of coefficients: --> 2nd...nl-2 layer do nz=nzmin+1, nzmax-2 @@ -628,7 +623,6 @@ subroutine diff_ver_part_impl_ale(tr_num, dynamics, tracers, partit, mesh) ! backup zinv2 for next depth level zinv1=zinv2 - ! update from the vertical advection if (do_wimpl) then !_______________________________________________________________ @@ -873,7 +867,10 @@ subroutine diff_ver_part_impl_ale(tr_num, dynamics, tracers, partit, mesh) trarr(nz,n)=trarr(nz,n)+tr(nz) end do - end do ! --> do n=1,myDim_nod2D + end do ! --> do n=1,myDim_nod2D +!$OMP END DO +!$OMP END PARALLEL +write(*,*) 'END IMPL VERTICAL DIFFUSION' end subroutine diff_ver_part_impl_ale ! ! @@ -1135,28 +1132,33 @@ SUBROUTINE diff_part_bh(tr_num, dynamics, tracers, partit, mesh) use g_comm_auto IMPLICIT NONE - integer, intent(in), target :: tr_num - type(t_dyn) , intent(inout), target :: dynamics - type(t_tracer), intent(inout), target :: tracers - type(t_mesh) , intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit - real(kind=WP) :: u1, v1, len, vi, tt, ww - integer :: nz, ed, el(2), en(2), k, elem, nl1, ul1 - real(kind=WP), allocatable :: temporary_ttf(:,:) - real(kind=WP), pointer :: ttf(:,:) + integer, intent(in), target :: tr_num + type(t_dyn) , intent(inout), target :: dynamics + type(t_tracer), intent(inout), target :: tracers + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: n, nz, ed, el(2), en(2), k, elem, nl1, ul1 + real(kind=WP) :: u1, v1, len, vi, tt, ww + real(kind=WP), pointer :: temporary_ttf(:,:) + real(kind=WP), pointer :: ttf(:,:) real(kind=WP), dimension(:,:,:), pointer :: UV #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) - ttf => tracers%data(tr_num)%values + UV => dynamics%uv(:,:,:) + ttf => tracers%data(tr_num)%values + temporary_ttf => tracers%work%del_ttf !use already allocated working array. could be fct_LO instead etc. - ed=myDim_nod2D+eDim_nod2D - allocate(temporary_ttf(nl-1, ed)) +!$OMP PARALLEL DO + do n=1, myDim_nod2D+eDim_nod2D + temporary_ttf(:, n)=0.0_8 + end do +!$OMP END PARALLEL DO - temporary_ttf=0.0_8 +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, ed, el, en, k, elem, nl1, ul1, u1, v1, len, vi, tt, ww) +!$OMP DO DO ed=1, myDim_edge2D+eDim_edge2D if (myList_edge2D(ed)>edge2D_in) cycle el=edge_tri(:,ed) @@ -1180,10 +1182,13 @@ SUBROUTINE diff_part_bh(tr_num, dynamics, tracers, partit, mesh) temporary_ttf(nz,en(2))=temporary_ttf(nz,en(2))+tt END DO END DO +!$OMP END DO call exchange_nod(temporary_ttf, partit) +!$OMP BARRIER ! =========== ! Second round: ! =========== +!$OMP DO DO ed=1, myDim_edge2D+eDim_edge2D if (myList_edge2D(ed)>edge2D_in) cycle el=edge_tri(:,ed) @@ -1206,8 +1211,9 @@ SUBROUTINE diff_part_bh(tr_num, dynamics, tracers, partit, mesh) ttf(nz,en(1))=ttf(nz,en(1))-tt/area(nz,en(1)) ttf(nz,en(2))=ttf(nz,en(2))+tt/area(nz,en(2)) END DO - END DO - deallocate(temporary_ttf) + END DO +!$OMP END DO +!$OMP END PARALLEL end subroutine diff_part_bh ! ! From bca417a6eca26dba4f62432975c9c3029d6e9538 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 11 Nov 2021 10:48:59 +0100 Subject: [PATCH 544/909] move the merged ifs_* files to the ifs_interface directory --- src/ifs_interface.F90 | 1506 ----------------------- src/ifs_interface/ifs_notused.F90 | 11 +- src/ifs_modules.F90 | 1859 ----------------------------- src/ifs_notused.F90 | 362 ------ 4 files changed, 1 insertion(+), 3737 deletions(-) delete mode 100644 src/ifs_interface.F90 delete mode 100644 src/ifs_modules.F90 delete mode 100644 src/ifs_notused.F90 diff --git a/src/ifs_interface.F90 b/src/ifs_interface.F90 deleted file mode 100644 index 4467dfa9a..000000000 --- a/src/ifs_interface.F90 +++ /dev/null @@ -1,1506 +0,0 @@ -#if defined (__ifsinterface) -!===================================================== -! IFS interface for calling FESOM2 as a subroutine. -! -! -Original code for NEMO by Kristian Mogensen, ECMWF. -! -Adapted to FESOM2 by Thomas Rackow, AWI, 2018. -!----------------------------------------------------- - -MODULE nemogcmcoup_steps - INTEGER :: substeps !per IFS timestep -END MODULE nemogcmcoup_steps - -SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & - & lwaveonly, iatmunit, lwrite ) - - ! Initialize the FESOM model for single executable coupling - - USE par_kind !in ifs_modules.F90 - USE g_PARSUP, only: MPI_COMM_FESOM, mype - USE g_config, only: dt - USE g_clock, only: timenew, daynew, yearnew, month, day_in_month - USE nemogcmcoup_steps, ONLY : substeps - - IMPLICIT NONE - - ! Input arguments - - ! Message passing information - INTEGER, INTENT(IN) :: icomm - ! Initial date (e.g. 20170906), time, initial timestep and final time step - INTEGER, INTENT(OUT) :: inidate, initime, itini, itend - ! Length of the time step - REAL(wpIFS), INTENT(OUT) :: zstp - - ! inherited from interface to NEMO, not used here: - ! Coupling to waves only - LOGICAL, INTENT(IN) :: lwaveonly - ! Logfile unit (used if >=0) - INTEGER :: iatmunit - ! Write to this unit - LOGICAL :: lwrite - ! FESOM might perform substeps - INTEGER :: itend_fesom - INTEGER :: i - NAMELIST/namfesomstep/substeps - - ! TODO hard-coded here, put in namelist - substeps=2 - OPEN(9,file='namfesomstep.in') - READ(9,namfesomstep) - CLOSE(9) - - MPI_COMM_FESOM=icomm - itini = 1 - CALL main_initialize(itend_fesom) !also sets mype and npes - itend=itend_fesom/substeps - if(mype==0) then - WRITE(0,*)'!======================================' - WRITE(0,*)'! FESOM is initialized from within IFS.' - WRITE(0,*)'! get MPI_COMM_FESOM. =================' - WRITE(0,*)'! main_initialize done. ===============' - endif - - ! Set more information for the caller - - ! initial date and time (time is not used) - inidate = yearnew*10000 + month*100 + day_in_month ! e.g. 20170906 - initime = 0 - if(mype==0) then - WRITE(0,*)'! FESOM initial date is ', inidate ,' ======' - WRITE(0,*)'! FESOM substeps are ', substeps ,' ======' - endif - - ! fesom timestep (as seen by IFS) - zstp = REAL(substeps,wpIFS)*dt - if(mype==0) then - WRITE(0,*)'! FESOM timestep as seen by IFS is ', real(zstp,4), 'sec (',substeps,'xdt)' - WRITE(0,*)'!======================================' - endif - -END SUBROUTINE nemogcmcoup_init - - -SUBROUTINE nemogcmcoup_coupinit( mypeIN, npesIN, icomm, & - & npoints, nlocmsk, ngloind ) - - ! FESOM modules - USE g_PARSUP, only: mype, npes, myDim_nod2D, eDim_nod2D, myDim_elem2D, eDim_elem2D, eXDim_elem2D, & - myDim_edge2D, eDim_edge2D, myList_nod2D, myList_elem2D - USE MOD_MESH - !USE o_MESH, only: nod2D, elem2D - USE g_init2timestepping, only: meshinmod - - ! Initialize single executable coupling - USE parinter - USE scripremap - USE interinfo - IMPLICIT NONE - - ! Input arguments - - ! Message passing information - INTEGER, INTENT(IN) :: mypeIN,npesIN,icomm - ! Gaussian grid information - ! Number of points - INTEGER, INTENT(IN) :: npoints - ! Integer mask and global indices - INTEGER, DIMENSION(npoints), INTENT(IN) :: nlocmsk, ngloind - INTEGER :: iunit = 0 - - ! Local variables - type(t_mesh), target :: mesh - integer , pointer :: nod2D - integer , pointer :: elem2D - - ! Namelist containing the file names of the weights - CHARACTER(len=256) :: cdfile_gauss_to_T, cdfile_gauss_to_UV, & - & cdfile_T_to_gauss, cdfile_UV_to_gauss - CHARACTER(len=256) :: cdpathdist - LOGICAL :: lwritedist, lreaddist - LOGICAL :: lcommout - CHARACTER(len=128) :: commoutprefix - NAMELIST/namfesomcoup/cdfile_gauss_to_T,& - & cdfile_gauss_to_UV,& - & cdfile_T_to_gauss,& - & cdfile_UV_to_gauss,& - & cdpathdist, & - & lreaddist, & - & lwritedist, & - & lcommout, & - & commoutprefix,& - & lparbcast - - ! Global number of gaussian gridpoints - INTEGER :: nglopoints - ! Ocean grids accessed with NEMO modules - INTEGER :: noglopoints,nopoints - INTEGER, ALLOCATABLE, DIMENSION(:) :: omask,ogloind - ! SCRIP remapping data structures. - TYPE(scripremaptype) :: remap_gauss_to_T, remap_T_to_gauss, & - & remap_gauss_to_UV, remap_UV_to_gauss - ! Misc variables - INTEGER :: i,j,k,ierr - LOGICAL :: lexists - - ! associate the mesh, only what is needed here - ! #include "associate_mesh.h" - mesh = meshinmod - nod2D => mesh%nod2D - elem2D => mesh%elem2D - - - ! here FESOM knows about the (total number of) MPI tasks - - if(mype==0) then - write(*,*) 'MPI has been initialized in the atmospheric model' - write(*, *) 'Running on ', npes, ' PEs' - end if - - ! Read namelists - - cdfile_gauss_to_T = 'gausstoT.nc' - cdfile_gauss_to_UV = 'gausstoUV.nc' - cdfile_T_to_gauss = 'Ttogauss.nc' - cdfile_UV_to_gauss = 'UVtogauss.nc' - lcommout = .FALSE. - commoutprefix = 'parinter_comm' - cdpathdist = './' - lreaddist = .FALSE. - lwritedist = .FALSE. - - OPEN(9,file='namfesomcoup.in') - READ(9,namfesomcoup) - CLOSE(9) - - ! Global number of Gaussian gridpoints - - CALL mpi_allreduce( npoints, nglopoints, 1, & - & mpi_integer, mpi_sum, icomm, ierr) - - - if(mype==0) then - WRITE(0,*)'!======================================' - WRITE(0,*)'! SCALARS =============================' - - WRITE(0,*)'Update FESOM global scalar points' - endif - - noglopoints=nod2D - nopoints=myDim_nod2d - - ! Ocean mask and global indicies - - ALLOCATE(omask(MAX(nopoints,1)),ogloind(MAX(nopoints,1))) - omask(:)= 1 ! all points are ocean points - ogloind(1:myDim_nod2d)= myList_nod2D(1:myDim_nod2d) ! global index for local point number - - ! Could be helpful later: - ! Replace global numbering with a local one - ! tmp(1:nod2d)=0 - ! DO n=1, myDim_nod2D+eDim_nod2D - ! tmp(myList_nod2D(n))=n - - ! Read the interpolation weights and setup the parallel interpolation - ! from atmosphere Gaussian grid to ocean T-grid - - IF (lreaddist) THEN - CALL parinter_read( mype, npes, nglopoints, noglopoints, gausstoT, & - & cdpathdist,'ifs_to_fesom_gridT',lexists) - ENDIF - IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN - IF (lparbcast) THEN - CALL scripremap_read_sgl(cdfile_gauss_to_T,remap_gauss_to_T,& - & mype,npes,icomm,.TRUE.) - ELSE - CALL scripremap_read(cdfile_gauss_to_T,remap_gauss_to_T) - ENDIF - CALL parinter_init( mype, npes, icomm, & - & npoints, nglopoints, nlocmsk, ngloind, & - & nopoints, noglopoints, omask, ogloind, & - & remap_gauss_to_T, gausstoT, lcommout, TRIM(commoutprefix)//'_gtoT', & - & iunit ) - CALL scripremap_dealloc(remap_gauss_to_T) - IF (lwritedist) THEN - CALL parinter_write( mype, npes, nglopoints, noglopoints, gausstoT, & - & cdpathdist,'ifs_to_fesom_gridT') - ENDIF - ENDIF - - ! From ocean T-grid to atmosphere Gaussian grid - - IF (lreaddist) THEN - CALL parinter_read( mype, npes, noglopoints, nglopoints, Ttogauss, & - & cdpathdist,'fesom_gridT_to_ifs',lexists) - ENDIF - IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN - IF (lparbcast) THEN - CALL scripremap_read_sgl(cdfile_T_to_gauss,remap_T_to_gauss,& - & mype,npes,icomm,.TRUE.) - ELSE - CALL scripremap_read(cdfile_T_to_gauss,remap_T_to_gauss) - ENDIF - - CALL parinter_init( mype, npes, icomm, & - & nopoints, noglopoints, omask, ogloind, & - & npoints, nglopoints, nlocmsk, ngloind, & - & remap_T_to_gauss, Ttogauss, lcommout, TRIM(commoutprefix)//'_Ttog', & - & iunit ) - CALL scripremap_dealloc(remap_T_to_gauss) - IF (lwritedist) THEN - CALL parinter_write( mype, npes, noglopoints, nglopoints, Ttogauss, & - & cdpathdist,'fesom_gridT_to_ifs') - ENDIF - ENDIF - - DEALLOCATE(omask,ogloind) - - - if(mype==0) then - WRITE(0,*)'!======================================' - WRITE(0,*)'! VECTORS =============================' - - WRITE(0,*)'Update FESOM global vector points' - endif - noglopoints=elem2D - nopoints=myDim_elem2D - - ! Ocean mask and global indicies - - ALLOCATE(omask(MAX(nopoints,1)),ogloind(MAX(nopoints,1))) - - omask(:)= 1 ! all elements are in the ocean - ogloind(1:myDim_elem2D) = myList_elem2D(1:myDim_elem2D) ! global index for local element number - - ! Read the interpolation weights and setup the parallel interpolation - ! from atmosphere Gaussian grid to ocean UV-grid - - IF (lreaddist) THEN - CALL parinter_read( mype, npes, nglopoints, noglopoints, gausstoUV, & - & cdpathdist,'ifs_to_fesom_gridUV',lexists) - ENDIF - IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN - IF (lparbcast) THEN - CALL scripremap_read_sgl(cdfile_gauss_to_UV,remap_gauss_to_UV,& - & mype,npes,icomm,.TRUE.) - ELSE - CALL scripremap_read(cdfile_gauss_to_UV,remap_gauss_to_UV) - ENDIF - CALL parinter_init( mype, npes, icomm, & - & npoints, nglopoints, nlocmsk, ngloind, & - & nopoints, noglopoints, omask, ogloind, & - & remap_gauss_to_UV, gausstoUV, lcommout, TRIM(commoutprefix)//'_gtoUV', & - & iunit ) - CALL scripremap_dealloc(remap_gauss_to_UV) - IF (lwritedist) THEN - CALL parinter_write( mype, npes, nglopoints, noglopoints, gausstoUV, & - & cdpathdist,'ifs_to_fesom_gridUV') - ENDIF - ENDIF - - ! From ocean UV-grid to atmosphere Gaussian grid - - IF (lreaddist) THEN - CALL parinter_read( mype, npes, noglopoints, nglopoints, UVtogauss, & - & cdpathdist,'fesom_gridUV_to_ifs',lexists) - ENDIF - IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN - IF (lparbcast) THEN - CALL scripremap_read_sgl(cdfile_UV_to_gauss,remap_UV_to_gauss,& - & mype,npes,icomm,.TRUE.) - ELSE - CALL scripremap_read(cdfile_UV_to_gauss,remap_UV_to_gauss) - ENDIF - - CALL parinter_init( mype, npes, icomm, & - & nopoints, noglopoints, omask, ogloind, & - & npoints, nglopoints, nlocmsk, ngloind, & - & remap_UV_to_gauss, UVtogauss, lcommout, TRIM(commoutprefix)//'_UVtog', & - & iunit ) - CALL scripremap_dealloc(remap_UV_to_gauss) - IF (lwritedist) THEN - CALL parinter_write( mype, npes, noglopoints, nglopoints, UVtogauss, & - & cdpathdist,'fesom_gridUV_to_ifs') - ENDIF - ENDIF - - DEALLOCATE(omask,ogloind) - -END SUBROUTINE nemogcmcoup_coupinit - - -SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & - & nopoints, pgsst, pgist, pgalb, & - & pgifr, pghic, pghsn, pgucur, pgvcur, & - & pgistl, licelvls ) - - ! Interpolate sst, ice: surf T; albedo; concentration; thickness, - ! snow thickness and currents from the FESOM grid to the Gaussian grid. - - ! This routine can be called at any point in time since it does - ! the necessary message passing in parinter_fld. - - USE par_kind ! in ifs_modules.F90 - USE o_ARRAYS, ONLY : tr_arr, UV - USE i_arrays, ONLY : m_ice, a_ice, m_snow - USE i_therm_param, ONLY : tmelt - !USE o_PARAM, ONLY : WP - USE g_PARSUP, only: myDim_nod2D,eDim_nod2D, myDim_elem2D,eDim_elem2D,eXDim_elem2D - !USE o_MESH, only: elem2D_nodes, coord_nod2D - USE MOD_MESH - USE g_init2timestepping, only: meshinmod - - USE g_rotate_grid, only: vector_r2g - USE parinter - USE scripremap - USE interinfo - - IMPLICIT NONE - - ! Arguments - REAL(wpIFS), DIMENSION(nopoints) :: pgsst, pgist, pgalb, pgifr, pghic, pghsn, pgucur, pgvcur - REAL(wpIFS), DIMENSION(nopoints,3) :: pgistl - LOGICAL :: licelvls - - type(t_mesh), target :: mesh - real(kind=wpIFS), dimension(:,:), pointer :: coord_nod2D - integer, dimension(:,:) , pointer :: elem2D_nodes - - ! Message passing information - INTEGER, INTENT(IN) :: mype, npes, icomm - ! Number Gaussian grid points - INTEGER, INTENT(IN) :: nopoints - - ! Local variables - REAL(wpIFS), DIMENSION(myDim_nod2D) :: zsend - REAL(wpIFS), DIMENSION(myDim_elem2D) :: zsendU, zsendV - INTEGER :: elnodes(3) - REAL(wpIFS) :: rlon, rlat - - ! Loop variables - INTEGER :: n, elem, ierr - - !#include "associate_mesh.h" - ! associate what is needed only - mesh = meshinmod - coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%coord_nod2D - elem2D_nodes(1:3, 1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem2D_nodes - - - ! =================================================================== ! - ! Pack SST data and convert to K. 'pgsst' is on Gauss grid. - do n=1,myDim_nod2D - zsend(n)=tr_arr(1, n, 1)+tmelt ! sea surface temperature [K], - ! (1=surface, n=node, 1/2=T/S) - enddo - - ! Interpolate SST - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & myDim_nod2D, zsend, & - & nopoints, pgsst ) - - - ! =================================================================== ! - ! Pack ice fraction data [0..1] and interpolate: 'pgifr' on Gauss. - ! zsend(:)=a_ice(:) - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & myDim_nod2D, a_ice, & - & nopoints, pgifr ) - - - ! =================================================================== ! - ! Pack ice temperature data (already in K) - zsend(:)=273.15 - - ! Interpolate ice surface temperature: 'pgist' on Gaussian grid. - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & myDim_nod2D, zsend, & - & nopoints, pgist ) - - - ! =================================================================== ! - ! Pack ice albedo data and interpolate: 'pgalb' on Gaussian grid. - zsend(:)=0.7 - - ! Interpolate ice albedo - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & myDim_nod2D, zsend, & - & nopoints, pgalb ) - - - ! =================================================================== ! - ! Pack ice thickness data and interpolate: 'pghic' on Gaussian grid. - zsend(:)=m_ice(:)/max(a_ice(:),0.01) ! ice thickness (mean over ice) - - ! Interpolation of average ice thickness - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & myDim_nod2D, zsend, & - & nopoints, pghic ) - - - ! =================================================================== ! - ! Pack snow thickness data and interpolate: 'pghsn' on Gaussian grid. - zsend(:)=m_snow(:)/max(a_ice(:),0.01) ! snow thickness (mean over ice) - - ! Interpolation of snow thickness - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & myDim_nod2D, zsend, & - & nopoints, pghsn ) - - - ! =================================================================== ! - ! Surface currents need to be rotated to geographical grid - - ! Pack u(v) surface currents - zsendU(:)=UV(1,1,1:myDim_elem2D) - zsendV(:)=UV(2,1,1:myDim_elem2D) !UV includes eDim, leave those away here - - do elem=1, myDim_elem2D - - ! compute element midpoints - elnodes=elem2D_nodes(:,elem) - rlon=sum(coord_nod2D(1,elnodes))/3.0_wpIFS - rlat=sum(coord_nod2D(2,elnodes))/3.0_wpIFS - - ! Rotate vectors to geographical coordinates (r2g) - call vector_r2g(zsendU(elem), zsendV(elem), rlon, rlat, 0) ! 0-flag for rot. coord - - end do - -#ifdef FESOM_TODO - - ! We need to sort out the non-unique global index before we - ! can couple currents - - ! Interpolate: 'pgucur' and 'pgvcur' on Gaussian grid. - CALL parinter_fld( mype, npes, icomm, UVtogauss, & - & myDim_elem2D, zsendU, & - & nopoints, pgucur ) - - CALL parinter_fld( mype, npes, icomm, UVtogauss, & - & myDim_elem2D, zsendV, & - & nopoints, pgvcur ) - -#else - - pgucur(:) = 0.0 - pgvcur(:) = 0.0 - -#endif - -#ifndef FESOM_TODO - - if(mype==0) then - WRITE(0,*)'Everything implemented except ice level temperatures (licelvls).' - endif - -#else - - ! Ice level temperatures - - IF (licelvls) THEN - -#if defined key_lim2 - - DO jl = 1, 3 - - ! Pack ice temperatures data at level jl(already in K) - - jk = 0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = jk + 1 - zsend(jk) = tbif (ji,jj,jl) - ENDDO - ENDDO - - ! Interpolate ice temperature at level jl - - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & - & nopoints, pgistl(:,jl) ) - - ENDDO - -#else - WRITE(0,*)'licelvls needs to be sorted for LIM3' - CALL abort -#endif - - ENDIF - - IF(nn_timing == 1) CALL timing_stop('nemogcmcoup_lim2_get') - IF(lhook) CALL dr_hook('nemogcmcoup_lim2_get',1,zhook_handle) - -#endif - -END SUBROUTINE nemogcmcoup_lim2_get - - -SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & - & npoints, & - & taux_oce, tauy_oce, taux_ice, tauy_ice, & - & qs___oce, qs___ice, qns__oce, qns__ice, dqdt_ice, & - & evap_tot, evap_ice, prcp_liq, prcp_sol, & - & runoffIN, ocerunoff, tcc, lcc, tice_atm, & - & kt, ldebug, loceicemix, lqnsicefilt ) - - ! Update fluxes in nemogcmcoup_data by parallel - ! interpolation of the input gaussian grid data - - USE par_kind !in ifs_modules.F90 - USE g_PARSUP, only: myDim_nod2D, myDim_elem2D, par_ex, eDim_nod2D, eDim_elem2D, eXDim_elem2D, myDim_edge2D, eDim_edge2D - !USE o_MESH, only: coord_nod2D !elem2D_nodes - USE MOD_MESH - USE g_init2timestepping, only: meshinmod - !USE o_PARAM, ONLY : WP, use wpIFS from par_kind (IFS) - USE g_rotate_grid, only: vector_r2g, vector_g2r - USE g_forcing_arrays, only: shortwave, prec_rain, prec_snow, runoff, & - & evap_no_ifrac, sublimation !'longwave' only stand-alone, 'evaporation' filled later - USE i_ARRAYS, only: stress_atmice_x, stress_atmice_y, oce_heat_flux, ice_heat_flux - USE o_ARRAYS, only: stress_atmoce_x, stress_atmoce_y - USE g_comm_auto ! exchange_nod does the halo exchange - - ! all needed? - USE parinter - USE scripremap - USE interinfo - - IMPLICIT NONE - - ! =================================================================== ! - ! Arguments ========================================================= ! - - ! MPI communications - INTEGER, INTENT(IN) :: mype,npes,icomm - ! Fluxes on the Gaussian grid. - INTEGER, INTENT(IN) :: npoints - REAL(wpIFS), DIMENSION(npoints), INTENT(IN) :: & - & taux_oce, tauy_oce, taux_ice, tauy_ice, & - & qs___oce, qs___ice, qns__oce, qns__ice, & - & dqdt_ice, evap_tot, evap_ice, prcp_liq, prcp_sol, & - & runoffIN, ocerunoff, tcc, lcc, tice_atm - - ! Current time step - INTEGER, INTENT(in) :: kt - ! Write debugging fields in netCDF - LOGICAL, INTENT(IN) :: ldebug - ! QS/QNS mixed switch - LOGICAL, INTENT(IN) :: loceicemix - ! QNS ice filter switch (requires tice_atm to be sent) - LOGICAL, INTENT(IN) :: lqnsicefilt - - type(t_mesh), target :: mesh - - ! Local variables - INTEGER :: n - REAL(wpIFS), parameter :: rhofwt = 1000. ! density of freshwater - - - ! Packed receive buffer - REAL(wpIFS), DIMENSION(myDim_nod2D) :: zrecv - REAL(wpIFS), DIMENSION(myDim_elem2D):: zrecvU, zrecvV - - - !#include "associate_mesh.h" - ! associate only the necessary things - real(kind=WP), dimension(:,:), pointer :: coord_nod2D - mesh = meshinmod - coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%coord_nod2D - - ! =================================================================== ! - ! Sort out incoming arrays from the IFS and put them on the ocean grid - - ! TODO - shortwave(:)=0. ! Done, updated below. What to do with shortwave over ice?? - !longwave(:)=0. ! Done. Only used in stand-alone mode. - prec_rain(:)=0. ! Done, updated below. - prec_snow(:)=0. ! Done, updated below. - evap_no_ifrac=0. ! Done, updated below. This is evap over ocean, does this correspond to evap_tot? - sublimation=0. ! Done, updated below. - ! - ice_heat_flux=0. ! Done. This is qns__ice currently. Is this the non-solar heat flux? ! non solar heat fluxes below ! (qns) - oce_heat_flux=0. ! Done. This is qns__oce currently. Is this the non-solar heat flux? - ! - runoff(:)=0. ! not used apparently. What is runoffIN, ocerunoff? - !evaporation(:)=0. - !ice_thermo_cpl.F90: !---- total evaporation (needed in oce_salt_balance.F90) - !ice_thermo_cpl.F90: evaporation = evap_no_ifrac*(1.-a_ice) + sublimation*a_ice - stress_atmice_x=0. ! Done, taux_ice - stress_atmice_y=0. ! Done, tauy_ice - stress_atmoce_x=0. ! Done, taux_oce - stress_atmoce_y=0. ! Done, tauy_oce - - - ! =================================================================== ! - !1. Interpolate ocean solar radiation to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qs___oce, & - & myDim_nod2D, zrecv ) - - ! Unpack ocean solar radiation, without halo - shortwave(1:myDim_nod2D)=zrecv(1:myDim_nod2D) - - ! Do the halo exchange - call exchange_nod(shortwave) - - - ! =================================================================== ! - !2. Interpolate ice solar radiation to T grid - ! DO NOTHING - - - ! =================================================================== ! - !3. Interpolate ocean non-solar radiation to T grid (is this non-solar heat flux?) - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qns__oce, & - & myDim_nod2D, zrecv ) - - ! Unpack ocean non-solar, without halo - oce_heat_flux(1:myDim_nod2D)=zrecv(1:myDim_nod2D) - - ! Do the halo exchange - call exchange_nod(oce_heat_flux) - - - ! =================================================================== ! - !4. Interpolate non-solar radiation over ice to T grid (is this non-solar heat flux?) - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qns__ice, & - & myDim_nod2D, zrecv ) - - ! Unpack ice non-solar - ice_heat_flux(1:myDim_nod2D)=zrecv(1:myDim_nod2D) - - ! Do the halo exchange - call exchange_nod(ice_heat_flux) - - - ! =================================================================== ! - !5. D(q)/dT to T grid - ! DO NOTHING - - - ! =================================================================== ! - !6. Interpolate total evaporation to T grid - ! =================================================================== ! - !ice_thermo_cpl.F90: total evaporation (needed in oce_salt_balance.F90) - !ice_thermo_cpl.F90: evaporation = evap_no_ifrac*(1.-a_ice) + sublimation*a_ice - ! =================================================================== ! - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, evap_tot, & - & myDim_nod2D, zrecv ) - - ! Unpack total evaporation, without halo - evap_no_ifrac(1:myDim_nod2D)=-zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s; change sign - - ! Do the halo exchange - call exchange_nod(evap_no_ifrac) - - !7. Interpolate sublimation (evaporation over ice) to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, evap_ice, & - & myDim_nod2D, zrecv ) - - ! Unpack sublimation (evaporation over ice), without halo - sublimation(1:myDim_nod2D)=-zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s; change sign - - ! Do the halo exchange - call exchange_nod(sublimation) - ! =================================================================== ! - ! =================================================================== ! - - - ! =================================================================== ! - !8. Interpolate liquid precipitation to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, prcp_liq, & - & myDim_nod2D, zrecv ) - - ! Unpack liquid precipitation, without halo - prec_rain(1:myDim_nod2D)=zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s - - ! Do the halo exchange - call exchange_nod(prec_rain) - - - ! =================================================================== ! - !9. Interpolate solid precipitation to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, prcp_sol, & - & myDim_nod2D, zrecv ) - - ! Unpack solid precipitation, without halo - prec_snow(1:myDim_nod2D)=zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s - - ! Do the halo exchange - call exchange_nod(prec_snow) - - - ! =================================================================== ! - !10. Interpolate runoff to T grid - ! - !CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, runoff, & - ! & myDim_nod2D, zrecv ) - ! - ! Unpack runoff, without halo - !runoff(1:myDim_nod2D)=zrecv(1:myDim_nod2D) !conversion?? - ! - ! Do the halo exchange - !call exchange_nod(runoff) - ! - !11. Interpolate ocean runoff to T grid - ! - !CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, ocerunoff, & - ! & myDim_nod2D, zrecv ) - ! - ! Unpack ocean runoff - ! ?? - - !12. Interpolate total cloud fractions to T grid (tcc) - ! - !13. Interpolate low cloud fractions to T grid (lcc) - - - ! =================================================================== ! - ! STRESSES - - ! OVER OCEAN: - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, taux_oce, & - & myDim_nod2D, zrecv ) - - ! Unpack x stress atm->oce, without halo; then do halo exchange - stress_atmoce_x(1:myDim_nod2D)=zrecv(1:myDim_nod2D) - call exchange_nod(stress_atmoce_x) - - ! - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tauy_oce, & - & myDim_nod2D, zrecv ) - - ! Unpack y stress atm->oce, without halo; then do halo exchange - stress_atmoce_y(1:myDim_nod2D)=zrecv(1:myDim_nod2D) - call exchange_nod(stress_atmoce_y) - - ! =================================================================== ! - ! OVER ICE: - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, taux_ice, & - & myDim_nod2D, zrecv ) - - ! Unpack x stress atm->ice, without halo; then do halo exchange - stress_atmice_x(1:myDim_nod2D)=zrecv(1:myDim_nod2D) - call exchange_nod(stress_atmice_x) - - ! - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tauy_ice, & - & myDim_nod2D, zrecv ) - - ! Unpack y stress atm->ice, without halo; then do halo exchange - stress_atmice_y(1:myDim_nod2D)=zrecv(1:myDim_nod2D) - call exchange_nod(stress_atmice_y) - - - ! =================================================================== ! - ! ROTATE VECTORS FROM GEOGRAPHIC TO FESOMS ROTATED GRID - - !if ((do_rotate_oce_wind .AND. do_rotate_ice_wind) .AND. rotated_grid) then - do n=1, myDim_nod2D+eDim_nod2D - call vector_g2r(stress_atmoce_x(n), stress_atmoce_y(n), coord_nod2D(1, n), coord_nod2D(2, n), 0) !0-flag for rot. coord. - call vector_g2r(stress_atmice_x(n), stress_atmice_y(n), coord_nod2D(1, n), coord_nod2D(2, n), 0) - end do - !do_rotate_oce_wind=.false. - !do_rotate_ice_wind=.false. - !end if - - -#ifdef FESOM_TODO - - ! Packed receive buffer - REAL(wpIFS), DIMENSION((nlei-nldi+1)*(nlej-nldj+1)) :: zrecv - ! Unpacked fields on ORCA grids - REAL(wpIFS), DIMENSION(jpi,jpj) :: zqs___oce, zqs___ice, zqns__oce, zqns__ice - REAL(wpIFS), DIMENSION(jpi,jpj) :: zdqdt_ice, zevap_tot, zevap_ice, zprcp_liq, zprcp_sol - REAL(wpIFS), DIMENSION(jpi,jpj) :: zrunoff, zocerunoff - REAL(wpIFS), DIMENSION(jpi,jpj) :: ztmp, zicefr - ! Arrays for rotation - REAL(wpIFS), DIMENSION(jpi,jpj) :: zuu,zvu,zuv,zvv,zutau,zvtau - ! Lead fraction for both LIM2/LIM3 - REAL(wpIFS), DIMENSION(jpi,jpj) :: zfrld - ! Mask for masking for I grid - REAL(wpIFS) :: zmsksum - ! For summing up LIM3 contributions to ice temperature - REAL(wpIFS) :: zval,zweig - - ! Loop variables - INTEGER :: ji,jj,jk,jl - ! netCDF debugging output variables - CHARACTER(len=128) :: cdoutfile - INTEGER :: inum - REAL(wpIFS) :: zhook_handle ! Dr Hook handle - - IF(lhook) CALL dr_hook('nemogcmcoup_lim2_update',0,zhook_handle) - IF(nn_timing == 1) CALL timing_start('nemogcmcoup_lim2_update') - - ! Allocate the storage data - - IF (.NOT.lallociceflx) THEN - ALLOCATE( & - & zsqns_tot(jpi,jpj), & - & zsqns_ice(jpi,jpj), & - & zsqsr_tot(jpi,jpj), & - & zsqsr_ice(jpi,jpj), & - & zsemp_tot(jpi,jpj), & - & zsemp_ice(jpi,jpj), & - & zsevap_ice(jpi,jpj), & - & zsdqdns_ice(jpi,jpj), & - & zssprecip(jpi,jpj), & - & zstprecip(jpi,jpj), & - & zstcc(jpi,jpj), & - & zslcc(jpi,jpj), & - & zsatmist(jpi,jpj), & - & zsqns_ice_add(jpi,jpj)& - & ) - lallociceflx = .TRUE. - ENDIF - IF (.NOT.lallocstress) THEN - ALLOCATE( & - & zsutau(jpi,jpj), & - & zsvtau(jpi,jpj), & - & zsutau_ice(jpi,jpj), & - & zsvtau_ice(jpi,jpj) & - & ) - lallocstress = .TRUE. - ENDIF - - ! Sort out incoming arrays from the IFS and put them on the ocean grid - - !1. Interpolate ocean solar radiation to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qs___oce, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ocean solar radiation - - zqs___oce(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zqs___oce(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !2. Interpolate ice solar radiation to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qs___ice, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ice solar radiation - - zqs___ice(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zqs___ice(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !3. Interpolate ocean non-solar radiation to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qns__oce, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ocean non-solar radiation - - zqns__oce(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zqns__oce(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !4. Interpolate ice non-solar radiation to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qns__ice, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ice non-solar radiation - - zqns__ice(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zqns__ice(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !5. Interpolate D(q)/dT to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, dqdt_ice, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack D(q)/D(T) - - zdqdt_ice(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zdqdt_ice(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !6. Interpolate total evaporation to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, evap_tot, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack total evaporation - - zevap_tot(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zevap_tot(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !7. Interpolate evaporation over ice to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, evap_ice, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack evaporation over ice - - zevap_ice(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zevap_ice(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !8. Interpolate liquid precipitation to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, prcp_liq, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack liquid precipitation - - zprcp_liq(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zprcp_liq(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !9. Interpolate solid precipitation to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, prcp_sol, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack precipitation over ice - - zprcp_sol(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zprcp_sol(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !10. Interpolate runoff to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, runoff, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack runoff - - zrunoff(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zrunoff(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !11. Interpolate ocean runoff to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, ocerunoff, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ocean runoff - - zocerunoff(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zocerunoff(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !12. Interpolate total cloud fractions to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tcc, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ocean runoff - - zstcc(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zstcc(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !13. Interpolate low cloud fractions to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, lcc, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ocean runoff - - zslcc(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zslcc(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - ! get sea ice fraction and lead fraction - -#if defined key_lim2 - zfrld(:,:) = frld(:,:) - zicefr(:,:) = 1 - zfrld(:,:) -#else - zicefr(:,:) = 0.0_wpIFS - DO jl = 1, jpl - zicefr(:,:) = zicefr(:,:) + a_i(:,:,jl) - ENDDO - zfrld(:,:) = 1 - zicefr(:,:) -#endif - - zsemp_tot(:,:) = zevap_tot(:,:) - zprcp_liq(:,:) - zprcp_sol(:,:) - zstprecip(:,:) = zprcp_liq(:,:) + zprcp_sol(:,:) - ! More consistent with NEMO, but does changes the results, so - ! we don't do it for now. - ! zsemp_tot(:,:) = zevap_tot(:,:) - zstprecip(:,:) - zsemp_ice(:,:) = zevap_ice(:,:) - zprcp_sol(:,:) - zssprecip(:,:) = - zsemp_ice(:,:) - zsemp_tot(:,:) = zsemp_tot(:,:) - zrunoff(:,:) - zsemp_tot(:,:) = zsemp_tot(:,:) - zocerunoff(:,:) - zsevap_ice(:,:) = zevap_ice(:,:) - - ! non solar heat fluxes ! (qns) - IF (loceicemix) THEN - zsqns_tot(:,:) = zqns__oce(:,:) - ELSE - zsqns_tot(:,:) = zfrld(:,:) * zqns__oce(:,:) + zicefr(:,:) * zqns__ice(:,:) - ENDIF - zsqns_ice(:,:) = zqns__ice(:,:) - ztmp(:,:) = zfrld(:,:) * zprcp_sol(:,:) * lfus ! add the latent heat of solid precip. melting - - zsqns_tot(:,:) = zsqns_tot(:,:) - ztmp(:,:) ! over free ocean - ! solar heat fluxes ! (qsr) - - IF (loceicemix) THEN - zsqsr_tot(:,:) = zqs___oce(:,:) - ELSE - zsqsr_tot(:,:) = zfrld(:,:) * zqs___oce(:,:) + zicefr(:,:) * zqs___ice(:,:) - ENDIF - zsqsr_ice(:,:) = zqs___ice(:,:) - - IF( ln_dm2dc ) THEN ! modify qsr to include the diurnal cycle - zsqsr_tot(:,:) = sbc_dcy( zsqsr_tot(:,:) ) - zsqsr_ice(:,:) = sbc_dcy( zsqsr_ice(:,:) ) - ENDIF - - zsdqdns_ice(:,:) = zdqdt_ice(:,:) - - ! Apply lateral boundary condition - - CALL lbc_lnk(zsqns_tot, 'T', 1.0) - CALL lbc_lnk(zsqns_ice, 'T', 1.0) - CALL lbc_lnk(zsqsr_tot, 'T', 1.0) - CALL lbc_lnk(zsqsr_ice, 'T', 1.0) - CALL lbc_lnk(zsemp_tot, 'T', 1.0) - CALL lbc_lnk(zsemp_ice, 'T', 1.0) - CALL lbc_lnk(zsdqdns_ice, 'T', 1.0) - CALL lbc_lnk(zssprecip, 'T', 1.0) - CALL lbc_lnk(zstprecip, 'T', 1.0) - CALL lbc_lnk(zstcc, 'T', 1.0) - CALL lbc_lnk(zslcc, 'T', 1.0) - - ! Interpolate atmospheric ice temperature to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tice_atm, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack atmospheric ice temperature - - zsatmist(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zsatmist(ji,jj) = zrecv(jk) - ENDDO - ENDDO - CALL lbc_lnk(zsatmist, 'T', 1.0) - - zsqns_ice_add(:,:) = 0.0_wpIFS - - ! Use the dqns_ice filter - - IF (lqnsicefilt) THEN - - ! Add filtr to qns_ice - -#if defined key_lim2 - ztmp(:,:) = tn_ice(:,:,1) -#else - DO jj = nldj, nlej - DO ji = nldi, nlei - zval=0.0 - zweig=0.0 - DO jl = 1, jpl - zval = zval + tn_ice(ji,jj,jl) * a_i(ji,jj,jl) - zweig = zweig + a_i(ji,jj,jl) - ENDDO - IF ( zweig > 0.0 ) THEN - ztmp(ji,jj) = zval /zweig - ELSE - ztmp(ji,jj) = rt0 - ENDIF - ENDDO - ENDDO - CALL lbc_lnk(ztmp, 'T', 1.0) -#endif - - WHERE ( zicefr(:,:) > .001_wpIFS ) - zsqns_ice_add(:,:) = zsdqdns_ice(:,:) * ( ztmp(:,:) - zsatmist(:,:) ) - END WHERE - - zsqns_ice(:,:) = zsqns_ice(:,:) + zsqns_ice_add(:,:) - - ENDIF - - ! Interpolate u-stress to U grid - - CALL parinter_fld( mype, npes, icomm, gausstoU, npoints,taux_oce, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack u stress on U grid - - zuu(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zuu(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - ! Interpolate v-stress to U grid - - CALL parinter_fld( mype, npes, icomm, gausstoU, npoints, tauy_oce, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack v stress on U grid - - zvu(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zvu(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - ! Interpolate u-stress to V grid - - CALL parinter_fld( mype, npes, icomm, gausstoV, npoints,taux_oce, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack u stress on V grid - - zuv(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zuv(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - ! Interpolate v-stress to V grid - - CALL parinter_fld( mype, npes, icomm, gausstoV, npoints, tauy_oce, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack v stress on V grid - - zvv(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zvv(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - ! Rotate stresses from en to ij and put u,v stresses on U,V grids - - CALL repcmo( zuu, zvu, zuv, zvv, zsutau, zsvtau ) - - ! Apply lateral boundary condition on u,v stresses on the U,V grids - - CALL lbc_lnk( zsutau, 'U', -1.0 ) - CALL lbc_lnk( zsvtau, 'V', -1.0 ) - - ! Interpolate ice u-stress to U grid - - CALL parinter_fld( mype, npes, icomm, gausstoU, npoints,taux_ice, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ice u stress on U grid - - zuu(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zuu(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - ! Interpolate ice v-stress to U grid - - CALL parinter_fld( mype, npes, icomm, gausstoU, npoints, tauy_ice, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ice v stress on U grid - - zvu(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zvu(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - ! Interpolate ice u-stress to V grid - - CALL parinter_fld( mype, npes, icomm, gausstoV, npoints,taux_ice, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ice u stress on V grid - - zuv(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zuv(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - ! Interpolate ice v-stress to V grid - - CALL parinter_fld( mype, npes, icomm, gausstoV, npoints, tauy_ice, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ice v stress on V grid - - zvv(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zvv(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - ! Rotate stresses from en to ij and put u,v stresses on U,V grids - - CALL repcmo( zuu, zvu, zuv, zvv, zutau, zvtau ) - - ! Apply lateral boundary condition on u,v stresses on the U,V grids - - CALL lbc_lnk( zutau, 'U', -1.0 ) - CALL lbc_lnk( zvtau, 'V', -1.0 ) - -#if defined key_lim2_vp - - ! Convert to I grid for LIM2 for key_lim_vp - DO jj = 2, jpjm1 ! (U,V) ==> I - DO ji = 2, jpim1 ! NO vector opt. - zmsksum = umask(ji-1,jj,1) + umask(ji-1,jj-1,1) - zsutau_ice(ji,jj) = ( umask(ji-1,jj,1) * zutau(ji-1,jj) + & - & umask(ji-1,jj-1,1) * zutau(ji-1,jj-1) ) - IF ( zmsksum > 0.0 ) THEN - zsutau_ice(ji,jj) = zsutau_ice(ji,jj) / zmsksum - ENDIF - zmsksum = vmask(ji,jj-1,1) + vmask(ji-1,jj-1,1) - zsvtau_ice(ji,jj) = ( vmask(ji,jj-1,1) * zvtau(ji,jj-1) + & - & vmask(ji-1,jj-1,1) * zvtau(ji-1,jj-1) ) - IF ( zmsksum > 0.0 ) THEN - zsvtau_ice(ji,jj) = zsvtau_ice(ji,jj) / zmsksum - ENDIF - END DO - END DO - -#else - - zsutau_ice(:,:) = zutau(:,:) - zsvtau_ice(:,:) = zvtau(:,:) - -#endif - - CALL lbc_lnk( zsutau_ice, 'I', -1.0 ) - CALL lbc_lnk( zsvtau_ice, 'I', -1.0 ) - - ! Optionally write files write the data on the ORCA grid via IOM. - - IF (ldebug) THEN - WRITE(cdoutfile,'(A,I8.8)') 'zsutau_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsutau' , zsutau ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsvtau_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsvtau' , zsvtau ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsutau_ice_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsutau_ice' , zsutau_ice ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsvtau_ice_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsvtau_ice' , zsvtau_ice ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsqns_tot_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsqns_tot' , zsqns_tot ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsqns_ice_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsqns_ice' , zsqns_ice ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsqsr_tot_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsqsr_tot' , zsqsr_tot ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsqsr_ice_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsqsr_ice' , zsqsr_ice ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsemp_tot_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsemp_tot' , zsemp_tot ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsemp_ice_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsemp_ice' , zsemp_ice ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsdqdns_ice_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsdqdns_ice' , zsdqdns_ice ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zssprecip_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zssprecip' , zssprecip ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zstprecip_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zstprecip' , zstprecip ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsevap_ice_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsevap_ice' , zsevap_ice ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zstcc_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zstcc' , zstcc ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zslcc_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zslcc' , zslcc ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsatmist_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsatmist' , zsatmist ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsqns_ice_add_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsqns_ice_add' , zsqns_ice_add ) - CALL iom_close( inum ) - ENDIF - - IF(nn_timing == 1) CALL timing_stop('nemogcmcoup_lim2_update') - IF(lhook) CALL dr_hook('nemogcmcoup_lim2_update',1,zhook_handle) - -#else - - !FESOM part - !WRITE(0,*)'nemogcmcoup_lim2_update partially implemented. Proceeding...' - !CALL par_ex - -#endif - -END SUBROUTINE nemogcmcoup_lim2_update - - -SUBROUTINE nemogcmcoup_step( istp, icdate, ictime ) - - USE g_clock, only: yearnew, month, day_in_month - USE g_PARSUP, only: mype - USE nemogcmcoup_steps, ONLY : substeps - IMPLICIT NONE - - ! Arguments - - ! Time step - INTEGER, INTENT(IN) :: istp - - ! Data and time from NEMO - INTEGER, INTENT(OUT) :: icdate, ictime - - if(mype==0) then - WRITE(0,*)'! IFS at timestep ', istp, '. Do ', substeps , 'FESOM timesteps...' - endif - CALL main_timestepping(substeps) - - ! Compute date and time at the end of the time step - - icdate = yearnew*10000 + month*100 + day_in_month ! e.g. 20170906 - ictime = 0 ! (time is not used) - - if(mype==0) then - WRITE(0,*)'! FESOM date at end of timestep is ', icdate ,' ======' - endif - -#ifdef FESOM_TODO - iye = ndastp / 10000 - imo = ndastp / 100 - iye * 100 - ida = MOD( ndastp, 100 ) - CALL greg2jul( 0, 0, 0, ida, imo, iye, zjul ) - zjul = zjul + ( nsec_day + 0.5_wpIFS * rdttra(1) ) / 86400.0_wpIFS - CALL jul2greg( iss, imm, ihh, ida, imo, iye, zjul ) - icdate = iye * 10000 + imo * 100 + ida - ictime = ihh * 10000 + imm * 100 + iss -#endif - -END SUBROUTINE nemogcmcoup_step - - -SUBROUTINE nemogcmcoup_final - - USE g_PARSUP, only: mype - - ! Finalize the FESOM model - - IMPLICIT NONE - - if(mype==0) then - WRITE(*,*)'Finalization of FESOM from IFS.' - endif - CALL main_finalize - -END SUBROUTINE nemogcmcoup_final -#endif diff --git a/src/ifs_interface/ifs_notused.F90 b/src/ifs_interface/ifs_notused.F90 index d596169c4..b483bf962 100644 --- a/src/ifs_interface/ifs_notused.F90 +++ b/src/ifs_interface/ifs_notused.F90 @@ -4,15 +4,6 @@ ! ! -Original code by Kristian Mogensen, ECMWF. -SUBROUTINE nemogcmcoup_end_ioserver - -! End the NEMO mppio server - - WRITE(*,*)'No mpp_ioserver used' -! CALL abort - -END SUBROUTINE nemogcmcoup_end_ioserver - SUBROUTINE nemogcmcoup_init_ioserver( icomm, lnemoioserver ) ! Initialize the NEMO mppio server @@ -22,7 +13,7 @@ SUBROUTINE nemogcmcoup_init_ioserver( icomm, lnemoioserver ) LOGICAL :: lnemoioserver WRITE(*,*)'No mpp_ioserver' - !CALL abort + CALL abort END SUBROUTINE nemogcmcoup_init_ioserver diff --git a/src/ifs_modules.F90 b/src/ifs_modules.F90 deleted file mode 100644 index 8f52ee153..000000000 --- a/src/ifs_modules.F90 +++ /dev/null @@ -1,1859 +0,0 @@ -#if defined (__ifsinterface) -#define __MYFILE__ 'ifs_modules.F90' -#define key_mpp_mpi -! Set of modules needed by the interface to IFS. -! -! -Original code by Kristian Mogensen, ECMWF. - -MODULE par_kind - IMPLICIT NONE - INTEGER, PUBLIC, PARAMETER :: & !: Floating point section - sp = SELECTED_REAL_KIND( 6, 37), & !: single precision (real 4) - dp = SELECTED_REAL_KIND(12,307), & !: double precision (real 8) - wpIFS = SELECTED_REAL_KIND(12,307), & !: double precision (real 8) - ik = SELECTED_INT_KIND(6) !: integer precision -END MODULE par_kind - -MODULE nctools - - ! Utility subroutines for netCDF access - ! Modified : MAB (nf90, handle_error, LINE&FILE) - ! Modifled : KSM (new shorter name) - - USE netcdf - - PUBLIC ldebug_netcdf, nchdlerr - LOGICAL :: ldebug_netcdf = .FALSE. ! Debug switch for netcdf - -CONTAINS - - SUBROUTINE nchdlerr(status,lineno,filename) - - ! Error handler for netCDF access - IMPLICIT NONE - - - INTEGER :: status ! netCDF return status - INTEGER :: lineno ! Line number (usually obtained from - ! preprocessing __LINE__,__MYFILE__) - CHARACTER(len=*),OPTIONAL :: filename - - IF (status/=nf90_noerr) THEN - WRITE(*,*)'Netcdf error, code ',status - IF (PRESENT(filename)) THEN - WRITE(*,*)'In file ',filename,' in line ',lineno - ELSE - WRITE(*,*)'In line ',lineno - END IF - WRITE(*,'(2A)')' Error message : ',nf90_strerror(status) - CALL abort - ENDIF - - END SUBROUTINE nchdlerr - -!---------------------------------------------------------------------- -END MODULE nctools - -MODULE scrippar - INTEGER, PARAMETER :: scripdp = SELECTED_REAL_KIND(12,307) - INTEGER, PARAMETER :: scriplen = 80 -END MODULE scrippar - -MODULE scripgrid - - USE nctools - USE scrippar - - IMPLICIT NONE - - TYPE scripgridtype - INTEGER :: grid_size - INTEGER :: grid_corners - INTEGER :: grid_rank - INTEGER, ALLOCATABLE, DIMENSION(:) :: grid_dims - REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: grid_center_lat - REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: grid_center_lon - INTEGER, ALLOCATABLE, DIMENSION(:) :: grid_imask - REAL(scripdp), ALLOCATABLE, DIMENSION(:,:) :: grid_corner_lat - REAL(scripdp), ALLOCATABLE, DIMENSION(:,:) :: grid_corner_lon - CHARACTER(len=scriplen) :: grid_center_lat_units - CHARACTER(len=scriplen) :: grid_center_lon_units - CHARACTER(len=scriplen) :: grid_imask_units - CHARACTER(len=scriplen) :: grid_corner_lat_units - CHARACTER(len=scriplen) :: grid_corner_lon_units - CHARACTER(len=scriplen) :: title - END TYPE scripgridtype - -CONTAINS - - SUBROUTINE scripgrid_read( cdfilename, grid ) - - CHARACTER(len=*) :: cdfilename - TYPE(scripgridtype) :: grid - - INTEGER :: ncid, dimid, varid - - CALL scripgrid_init(grid) - - CALL nchdlerr(nf90_open(TRIM(cdfilename),nf90_nowrite,ncid),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_dimid(ncid,'grid_size',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=grid%grid_size),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_dimid(ncid,'grid_corners',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=grid%grid_corners),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_dimid(ncid,'grid_rank',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=grid%grid_rank),& - & __LINE__,__MYFILE__) - - CALL scripgrid_alloc(grid) - - CALL nchdlerr(nf90_inq_varid(ncid,'grid_dims',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_dims),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'grid_center_lat',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_center_lat_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_center_lat),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'grid_center_lon',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_center_lon_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_center_lon),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'grid_corner_lat',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_corner_lat_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_corner_lat),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'grid_corner_lon',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_corner_lon_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_corner_lon),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'grid_imask',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_imask_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_imask),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_get_att(ncid,nf90_global,'title',grid%title),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_close(ncid),__LINE__,__MYFILE__) - - END SUBROUTINE scripgrid_read - - SUBROUTINE scripgrid_write( cdgridfile, grid ) - - CHARACTER(len=*) :: cdgridfile - TYPE(scripgridtype) :: grid - - INTEGER :: ncid - INTEGER :: ioldfill - INTEGER :: idimsize,idimxsize,idimysize,idimcorners,idimrank - INTEGER :: idims1rank(1),idims1size(1),idims2(2) - INTEGER :: iddims,idcentlat,idcentlon,idimask,idcornlat,idcornlon - INTEGER :: igriddims(2) - - ! Setup netcdf file - - CALL nchdlerr(nf90_create(TRIM(cdgridfile),nf90_clobber,ncid),& - & __LINE__,__MYFILE__) - - ! Define dimensions - - CALL nchdlerr(nf90_def_dim(ncid,'grid_size',& - & grid%grid_size,idimsize),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_def_dim(ncid,'grid_corners',& - & grid%grid_corners,idimcorners),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_def_dim(ncid,'grid_rank',& - & grid%grid_rank,idimrank),& - & __LINE__,__MYFILE__) - - idims1rank(1) = idimrank - - idims1size(1) = idimsize - - idims2(1) = idimcorners - idims2(2) = idimsize - - ! Define variables - - CALL nchdlerr(nf90_def_var(ncid,'grid_dims',& - & nf90_int,idims1rank,iddims),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_var(ncid,'grid_center_lat',& - & nf90_double,idims1size,idcentlat),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idcentlat,'units',& - & grid%grid_center_lat_units),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_var(ncid,'grid_center_lon',& - & nf90_double,idims1size,idcentlon),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idcentlon,'units',& - & grid%grid_center_lon_units),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_var(ncid,'grid_imask',& - & nf90_int,idims1size,idimask),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idimask,'units',& - & grid%grid_imask_units),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_var(ncid,'grid_corner_lat',& - & nf90_double,idims2,idcornlat),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idcornlat,'units',& - & grid%grid_corner_lat_units),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_var(ncid,'grid_corner_lon',& - & nf90_double,idims2,idcornlon),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idcornlon,'units',& - & grid%grid_corner_lon_units),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_att(ncid,nf90_global,'title',& - & TRIM(grid%title)),& - & __LINE__,__MYFILE__) - - ! End of netCDF definition phase - - CALL nchdlerr(nf90_enddef(ncid),__LINE__,__MYFILE__) - - ! Write variables - - - CALL nchdlerr(nf90_put_var(ncid,iddims,grid%grid_dims),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idcentlat,& - & grid%grid_center_lat),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idcentlon,& - & grid%grid_center_lon),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idimask,& - & grid%grid_imask), & - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idcornlat,& - & grid%grid_corner_lat),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idcornlon,& - & grid%grid_corner_lon),& - & __LINE__,__MYFILE__) - - ! Close file - - CALL nchdlerr(nf90_close(ncid),__LINE__,__MYFILE__) - - END SUBROUTINE scripgrid_write - - SUBROUTINE scripgrid_init( grid ) - - TYPE(scripgridtype) :: grid - - grid%grid_size=0 - grid%grid_corners=0 - grid%grid_rank=0 - grid%grid_center_lat_units='' - grid%grid_center_lon_units='' - grid%grid_imask_units='' - grid%grid_corner_lat_units='' - grid%grid_corner_lon_units='' - grid%title='' - - END SUBROUTINE scripgrid_init - - SUBROUTINE scripgrid_alloc( grid ) - - TYPE(scripgridtype) :: grid - - IF ( (grid%grid_size == 0) .OR. & - & (grid%grid_corners == 0) .OR. & - & (grid%grid_rank == 0) ) THEN - WRITE(*,*)'scripgridtype not initialized' - CALL abort - ENDIF - - ALLOCATE( & - & grid%grid_dims(grid%grid_rank), & - & grid%grid_center_lat(grid%grid_size), & - & grid%grid_center_lon(grid%grid_size), & - & grid%grid_corner_lat(grid%grid_corners, grid%grid_size), & - & grid%grid_corner_lon(grid%grid_corners, grid%grid_size), & - & grid%grid_imask(grid%grid_size) & - & ) - - END SUBROUTINE scripgrid_alloc - - SUBROUTINE scripgrid_dealloc( grid ) - - TYPE(scripgridtype) :: grid - - DEALLOCATE( & - & grid%grid_dims, & - & grid%grid_center_lat, & - & grid%grid_center_lon, & - & grid%grid_corner_lat, & - & grid%grid_corner_lon, & - & grid%grid_imask & - & ) - - END SUBROUTINE scripgrid_dealloc - -END MODULE scripgrid - -MODULE scripremap - -#if defined key_mpp_mpi - USE mpi -#endif - USE nctools - USE scrippar - USE scripgrid - - IMPLICIT NONE - - TYPE scripremaptype - INTEGER :: num_links - INTEGER :: num_wgts - TYPE(scripgridtype) :: src - TYPE(scripgridtype) :: dst - REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: src_grid_area - REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: dst_grid_area - REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: src_grid_frac - REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: dst_grid_frac - INTEGER, ALLOCATABLE, DIMENSION(:) :: src_address - INTEGER, ALLOCATABLE, DIMENSION(:) :: dst_address - REAL(scripdp), ALLOCATABLE, DIMENSION(:,:) :: remap_matrix - CHARACTER(len=scriplen) :: src_grid_area_units - CHARACTER(len=scriplen) :: dst_grid_area_units - CHARACTER(len=scriplen) :: src_grid_frac_units - CHARACTER(len=scriplen) :: dst_grid_frac_units - CHARACTER(len=scriplen) :: title - CHARACTER(len=scriplen) :: normalization - CHARACTER(len=scriplen) :: map_method - CHARACTER(len=scriplen) :: history - CHARACTER(len=scriplen) :: conventions - END TYPE scripremaptype - -CONTAINS - - SUBROUTINE scripremap_read_work(cdfilename,remap) - - CHARACTER(len=*) :: cdfilename - TYPE(scripremaptype) :: remap - - INTEGER :: ncid, dimid, varid - LOGICAL :: lcorners - - lcorners=.TRUE. - - CALL scripremap_init(remap) - - CALL nchdlerr(nf90_open(TRIM(cdfilename),nf90_nowrite,ncid),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_dimid(ncid,'src_grid_size',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=remap%src%grid_size),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_dimid(ncid,'dst_grid_size',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=remap%dst%grid_size),& - & __LINE__,__MYFILE__) - - - IF (nf90_inq_dimid(ncid,'src_grid_corners',dimid)==nf90_noerr) THEN - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=remap%src%grid_corners),& - & __LINE__,__MYFILE__) - ELSE - lcorners=.FALSE. - remap%src%grid_corners=1 - ENDIF - - IF (lcorners) THEN - CALL nchdlerr(nf90_inq_dimid(ncid,'dst_grid_corners',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=remap%dst%grid_corners),& - & __LINE__,__MYFILE__) - ELSE - remap%dst%grid_corners=1 - ENDIF - - CALL nchdlerr(nf90_inq_dimid(ncid,'src_grid_rank',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=remap%src%grid_rank),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_dimid(ncid,'dst_grid_rank',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=remap%dst%grid_rank),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_dimid(ncid,'num_links',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=remap%num_links),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_dimid(ncid,'num_wgts',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=remap%num_wgts),& - & __LINE__,__MYFILE__) - - CALL scripremap_alloc(remap) - - CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_dims',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_dims),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_dims',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_dims),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_center_lat',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_center_lat_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_center_lat),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_center_lat',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_center_lat_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_center_lat),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_center_lon',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_center_lon_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_center_lon),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_center_lon',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_center_lon_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_center_lon),& - & __LINE__,__MYFILE__) - - IF (lcorners) THEN - - CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_corner_lat',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_corner_lat_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_corner_lat),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_corner_lon',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_corner_lon_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_corner_lon),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_corner_lat',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_corner_lat_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_corner_lat),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_corner_lon',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_corner_lon_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_corner_lon),& - & __LINE__,__MYFILE__) - - ELSE - - remap%src%grid_corner_lat(:,:) = 0.0 - remap%src%grid_corner_lon(:,:) = 0.0 - remap%dst%grid_corner_lat(:,:) = 0.0 - remap%dst%grid_corner_lon(:,:) = 0.0 - remap%src%grid_corner_lat_units = '' - remap%src%grid_corner_lon_units = '' - remap%dst%grid_corner_lat_units = '' - remap%dst%grid_corner_lon_units = '' - - ENDIF - - CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_imask',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_imask_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_imask),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_imask',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_imask_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_imask),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_area',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src_grid_area_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%src_grid_area),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_area',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst_grid_area_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst_grid_area),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_frac',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src_grid_frac_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%src_grid_frac),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_frac',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst_grid_frac_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst_grid_frac),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'src_address',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%src_address),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'dst_address',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst_address),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'remap_matrix',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%remap_matrix),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_get_att(ncid,nf90_global,'title',remap%title),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,nf90_global,'normalization',remap%normalization),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,nf90_global,'map_method',remap%map_method),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,nf90_global,'history',remap%history),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,nf90_global,'conventions',remap%conventions),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,nf90_global,'dest_grid',remap%dst%title),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,nf90_global,'source_grid',remap%src%title),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_close(ncid),__LINE__,__MYFILE__) - - END SUBROUTINE scripremap_read_work - - SUBROUTINE scripremap_read(cdfilename,remap) - - CHARACTER(len=*) :: cdfilename - TYPE(scripremaptype) :: remap - - CALL scripremap_read_work(cdfilename,remap) - - END SUBROUTINE scripremap_read - - - SUBROUTINE scripremap_read_sgl(cdfilename,remap,& - & mype,nproc,mycomm,linteronly) - - CHARACTER(len=*) :: cdfilename - TYPE(scripremaptype) :: remap - INTEGER :: mype,nproc,mycomm - LOGICAL :: linteronly - - INTEGER, DIMENSION(8) :: isizes - INTEGER :: ierr, ip - - IF (mype==0) THEN - CALL scripremap_read_work(cdfilename,remap) -#if defined key_mpp_mpi - isizes(1)=remap%src%grid_size - isizes(2)=remap%dst%grid_size - isizes(3)=remap%src%grid_corners - isizes(4)=remap%dst%grid_corners - isizes(5)=remap%src%grid_rank - isizes(6)=remap%dst%grid_rank - isizes(7)=remap%num_links - isizes(8)=remap%num_wgts - CALL mpi_bcast( isizes, 8, mpi_integer, 0, mycomm, ierr) - ELSE - CALL mpi_bcast( isizes, 8, mpi_integer, 0, mycomm, ierr) - CALL scripremap_init(remap) - remap%src%grid_size=isizes(1) - remap%dst%grid_size=isizes(2) - remap%src%grid_corners=isizes(3) - remap%dst%grid_corners=isizes(4) - remap%src%grid_rank=isizes(5) - remap%dst%grid_rank=isizes(6) - remap%num_links=isizes(7) - remap%num_wgts=isizes(8) - CALL scripremap_alloc(remap) -#endif - ENDIF - -#if defined key_mpp_mpi - - IF (.NOT.linteronly) THEN - - CALL mpi_bcast( remap%src%grid_dims, remap%src%grid_rank, & - & mpi_integer, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src%grid_center_lat, remap%src%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src%grid_center_lon, remap%src%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src%grid_corner_lat, remap%src%grid_corners*remap%src%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src%grid_corner_lon, remap%src%grid_corners*remap%src%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - - CALL mpi_bcast( remap%dst%grid_dims, remap%dst%grid_rank, & - & mpi_integer, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst%grid_center_lat, remap%dst%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst%grid_center_lon, remap%dst%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst%grid_corner_lat, remap%dst%grid_corners*remap%dst%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst%grid_corner_lon, remap%dst%grid_corners*remap%dst%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - - CALL mpi_bcast( remap%src_grid_area, remap%src%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst_grid_area, remap%dst%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src_grid_frac, remap%src%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst_grid_frac, remap%dst%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - - CALL mpi_bcast( remap%src%grid_center_lat_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst%grid_center_lat_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src%grid_center_lon_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst%grid_center_lon_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src%grid_corner_lat_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src%grid_corner_lon_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst%grid_corner_lat_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst%grid_corner_lon_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src%grid_imask_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst%grid_imask_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src_grid_area_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst_grid_area_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src_grid_frac_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst_grid_frac_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%title, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%normalization, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%map_method, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%history, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%conventions, scriplen, & - & mpi_character, 0, mycomm, ierr ) - ENDIF - - CALL mpi_bcast( remap%src_address, remap%num_links, & - & mpi_integer, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst_address, remap%num_links, & - & mpi_integer, 0, mycomm, ierr ) - CALL mpi_bcast( remap%remap_matrix, remap%num_wgts*remap%num_links, & - & mpi_double_precision, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src%grid_imask, remap%src%grid_size, & - & mpi_integer, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst%grid_imask, remap%dst%grid_size, & - & mpi_integer, 0, mycomm, ierr ) - -#endif - END SUBROUTINE scripremap_read_sgl - - SUBROUTINE scripremap_write(cdfilename,remap) - - CHARACTER(len=*) :: cdfilename - TYPE(scripremaptype) :: remap - - INTEGER :: ncid - INTEGER :: dimsgs,dimdgs,dimsgc,dimdgc,dimsgr,dimdgr,dimnl,dimnw - INTEGER :: dims1(1),dims2(2) - INTEGER :: idsgd,iddgd,idsgea,iddgea,idsgeo,iddgeo - INTEGER :: idsgoa,idsgoo,iddgoa,iddgoo,idsgim,iddgim,idsgar,iddgar - INTEGER :: idsgf,iddgf,idsga,iddga,idsa,idda,idrm - - CALL nchdlerr(nf90_create(TRIM(cdfilename),nf90_clobber,ncid), & - & __LINE__, __MYFILE__ ) - - CALL nchdlerr(nf90_def_dim(ncid,'src_grid_size',& - & remap%src%grid_size,dimsgs),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_dim(ncid,'dst_grid_size',& - & remap%dst%grid_size,dimdgs),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_dim(ncid,'src_grid_corners',& - & remap%src%grid_corners,dimsgc),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_dim(ncid,'dst_grid_corners',& - & remap%dst%grid_corners,dimdgc),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_dim(ncid,'src_grid_rank',& - & remap%src%grid_rank,dimsgr),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_dim(ncid,'dst_grid_rank',& - & remap%dst%grid_rank,dimdgr),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_dim(ncid,'num_links',& - & remap%num_links,dimnl),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_dim(ncid,'num_wgts',& - & remap%num_wgts,dimnw),& - & __LINE__,__MYFILE__) - - dims1(1)=dimsgr - CALL nchdlerr(nf90_def_var(ncid,'src_grid_dims',& - & nf90_int,dims1,idsgd),& - & __LINE__,__MYFILE__) - - dims1(1)=dimdgr - CALL nchdlerr(nf90_def_var(ncid,'dst_grid_dims',& - & nf90_int,dims1,iddgd), & - & __LINE__,__MYFILE__) - - dims1(1)=dimsgs - CALL nchdlerr(nf90_def_var(ncid,'src_grid_center_lat',& - & nf90_double,dims1,idsgea), & - & __LINE__,__MYFILE__) - - dims1(1)=dimdgs - CALL nchdlerr(nf90_def_var(ncid,'dst_grid_center_lat',& - & nf90_double,dims1,iddgea), & - & __LINE__,__MYFILE__) - - dims1(1)=dimsgs - CALL nchdlerr(nf90_def_var(ncid,'src_grid_center_lon',& - & nf90_double,dims1,idsgeo), & - & __LINE__,__MYFILE__) - - dims1(1)=dimdgs - CALL nchdlerr(nf90_def_var(ncid,'dst_grid_center_lon',& - & nf90_double,dims1,iddgeo), & - & __LINE__,__MYFILE__) - - dims2(1)=dimsgc - dims2(2)=dimsgs - CALL nchdlerr(nf90_def_var(ncid,'src_grid_corner_lat',& - & nf90_double,dims2,idsgoa), & - & __LINE__,__MYFILE__) - - dims2(1)=dimsgc - dims2(2)=dimsgs - CALL nchdlerr(nf90_def_var(ncid,'src_grid_corner_lon',& - & nf90_double,dims2,idsgoo), & - & __LINE__,__MYFILE__) - - dims2(1)=dimdgc - dims2(2)=dimdgs - CALL nchdlerr(nf90_def_var(ncid,'dst_grid_corner_lat',& - & nf90_double,dims2,iddgoa), & - & __LINE__,__MYFILE__) - - dims2(1)=dimdgc - dims2(2)=dimdgs - CALL nchdlerr(nf90_def_var(ncid,'dst_grid_corner_lon',& - & nf90_double,dims2,iddgoo), & - & __LINE__,__MYFILE__) - - dims1(1)=dimsgs - CALL nchdlerr(nf90_def_var(ncid,'src_grid_imask',& - & nf90_int,dims1,idsgim), & - & __LINE__,__MYFILE__) - - dims1(1)=dimdgs - CALL nchdlerr(nf90_def_var(ncid,'dst_grid_imask',& - & nf90_int,dims1,iddgim), & - & __LINE__,__MYFILE__) - - dims1(1)=dimsgs - CALL nchdlerr(nf90_def_var(ncid,'src_grid_area',& - & nf90_double,dims1,idsga), & - & __LINE__,__MYFILE__) - - dims1(1)=dimdgs - CALL nchdlerr(nf90_def_var(ncid,'dst_grid_area',& - & nf90_double,dims1,iddga), & - & __LINE__,__MYFILE__) - - dims1(1)=dimsgs - CALL nchdlerr(nf90_def_var(ncid,'src_grid_frac',& - & nf90_double,dims1,idsgf), & - & __LINE__,__MYFILE__) - - dims1(1)=dimdgs - CALL nchdlerr(nf90_def_var(ncid,'dst_grid_frac',& - & nf90_double,dims1,iddgf), & - & __LINE__,__MYFILE__) - - dims1(1)=dimnl - CALL nchdlerr(nf90_def_var(ncid,'src_address',& - & nf90_int,dims1,idsa), & - & __LINE__,__MYFILE__) - - dims1(1)=dimnl - CALL nchdlerr(nf90_def_var(ncid,'dst_address',& - & nf90_int,dims1,idda), & - & __LINE__,__MYFILE__) - - dims2(1)=dimnw - dims2(2)=dimnl - CALL nchdlerr(nf90_def_var(ncid,'remap_matrix',& - & nf90_double,dims2,idrm), & - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_att(ncid,idsgea,'units',& - & remap%src%grid_center_lat_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,iddgea,'units',& - & remap%dst%grid_center_lat_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idsgeo,'units',& - & remap%src%grid_center_lon_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,iddgeo,'units',& - & remap%dst%grid_center_lon_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idsgoa,'units',& - & remap%src%grid_corner_lat_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idsgoo,'units',& - & remap%src%grid_corner_lon_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,iddgoa,'units',& - & remap%dst%grid_corner_lat_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,iddgoo,'units',& - & remap%dst%grid_corner_lon_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idsgim,'units',& - & remap%src%grid_imask_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,iddgim,'units',& - & remap%dst%grid_imask_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idsga,'units',& - & remap%src_grid_area_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,iddga,'units',& - & remap%dst_grid_area_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idsgf,'units',& - & remap%src_grid_frac_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,iddgf,'units',& - & remap%dst_grid_frac_units),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_att(ncid,nf90_global,'title',& - & remap%title),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,nf90_global,'normalization',& - & remap%normalization),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,nf90_global,'map_method',& - & remap%map_method),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,nf90_global,'history',& - & remap%history),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,nf90_global,'conventions',& - & remap%conventions),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,nf90_global,'dest_grid',& - & remap%dst%title),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,nf90_global,'source_grid',& - & remap%src%title),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_enddef(ncid),__LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idsgd,remap%src%grid_dims),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,iddgd,remap%dst%grid_dims),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idsgea,remap%src%grid_center_lat),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,iddgea,remap%dst%grid_center_lat),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idsgeo,remap%src%grid_center_lon),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,iddgeo,remap%dst%grid_center_lon),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idsgoa,remap%src%grid_corner_lat),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,idsgoo,remap%src%grid_corner_lon),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,iddgoa,remap%dst%grid_corner_lat),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,iddgoo,remap%dst%grid_corner_lon),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,idsgim,remap%src%grid_imask),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,iddgim,remap%dst%grid_imask),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,idsga,remap%src_grid_area),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,iddga,remap%dst_grid_area),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,idsgf,remap%src_grid_frac),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,iddgf,remap%dst_grid_frac),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,idsa,remap%src_address),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,idda,remap%dst_address),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,idrm,remap%remap_matrix),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_close(ncid),__LINE__, __MYFILE__ ) - - END SUBROUTINE scripremap_write - - SUBROUTINE scripremap_init(remap) - - TYPE(scripremaptype) :: remap - - CALL scripgrid_init(remap%src) - CALL scripgrid_init(remap%dst) - remap%num_links = 0 - remap%num_wgts = 0 - remap%title='' - remap%normalization='' - remap%map_method='' - remap%history='' - remap%conventions='' - remap%src_grid_area_units='' - remap%dst_grid_area_units='' - remap%src_grid_frac_units='' - remap%dst_grid_frac_units='' - - END SUBROUTINE scripremap_init - - SUBROUTINE scripremap_alloc(remap) - - TYPE(scripremaptype) :: remap - - IF ( (remap%num_links == 0) .OR. & - & (remap%num_wgts == 0) ) THEN - WRITE(*,*)'scripremaptype not initialized' - CALL abort - ENDIF - - CALL scripgrid_alloc(remap%src) - CALL scripgrid_alloc(remap%dst) - - ALLOCATE( & - & remap%src_grid_area(remap%src%grid_size), & - & remap%dst_grid_area(remap%dst%grid_size), & - & remap%src_grid_frac(remap%src%grid_size), & - & remap%dst_grid_frac(remap%dst%grid_size), & - & remap%src_address(remap%num_links), & - & remap%dst_address(remap%num_links), & - & remap%remap_matrix(remap%num_wgts, remap%num_links) & - & ) - - END SUBROUTINE scripremap_alloc - - SUBROUTINE scripremap_dealloc(remap) - - TYPE(scripremaptype) :: remap - - DEALLOCATE( & - & remap%src_grid_area, & - & remap%dst_grid_area, & - & remap%src_grid_frac, & - & remap%dst_grid_frac, & - & remap%src_address, & - & remap%dst_address, & - & remap%remap_matrix & - & ) - - CALL scripgrid_dealloc(remap%src) - CALL scripgrid_dealloc(remap%dst) - - CALL scripremap_init(remap) - - END SUBROUTINE scripremap_dealloc - -END MODULE scripremap - -MODULE parinter - -#if defined key_mpp_mpi - USE mpi -#endif - USE scripremap - USE scrippar - USE nctools - - IMPLICIT NONE - - ! Type to contains interpolation information - ! (like what is in scripremaptype) and message - ! passing information - - TYPE parinterinfo - ! Number of local links - INTEGER :: num_links - ! Destination side - INTEGER, POINTER, DIMENSION(:) :: dst_address - ! Source addresses and work array - INTEGER, POINTER, DIMENSION(:) :: src_address - ! Local remap matrix - REAL(scripdp), POINTER, DIMENSION(:,:) :: remap_matrix - ! Message passing information - ! Array of local addresses for send buffer - ! packing - INTEGER, POINTER, DIMENSION(:) :: send_address - ! Sending bookkeeping - INTEGER :: nsendtot - INTEGER, POINTER, DIMENSION(:) :: nsend,nsdisp - ! Receiving bookkeeping - INTEGER :: nrecvtot - INTEGER, POINTER, DIMENSION(:) :: nrecv,nrdisp - END TYPE parinterinfo - -CONTAINS - - SUBROUTINE parinter_init( mype, nproc, mpi_comm, & - & nsrclocpoints, nsrcglopoints, srcmask, srcgloind, & - & ndstlocpoints, ndstglopoints, dstmask, dstgloind, & - & remap, pinfo, lcommout, commoutprefix, iunit ) - - ! Setup interpolation based on SCRIP format weights in - ! remap and the source/destination grids information. - - ! Procedure: - - ! 1) A global SCRIP remapping file is read on all processors. - ! 2) Find local destination points in the global grid. - ! 3) Find which processor needs source data and setup buffer - ! information for sending data. - ! 4) Construct new src remapping for buffer received - - ! All information is stored in the TYPE(parinterinfo) output - ! data type - - ! Input arguments. - - ! Message passing information - INTEGER, INTENT(IN) :: mype, nproc, mpi_comm - ! Source grid local and global number of grid points - INTEGER, INTENT(IN) :: nsrclocpoints, nsrcglopoints - ! Source integer mask (0/1) for SCRIP compliance - INTEGER, INTENT(IN), DIMENSION(nsrclocpoints) :: srcmask - ! Source global addresses of each local grid point - INTEGER, INTENT(IN), DIMENSION(nsrclocpoints) :: srcgloind - ! Destination grid local and global number of grid points - INTEGER, INTENT(IN) :: ndstlocpoints, ndstglopoints - ! Destination integer mask (0/1) for SCRIP compliance - INTEGER, INTENT(IN), DIMENSION(ndstlocpoints) :: dstmask - ! Destination global addresses of each local grid point - INTEGER, INTENT(IN), DIMENSION(ndstlocpoints) :: dstgloind - ! SCRIP remapping data - TYPE(scripremaptype) :: remap - ! Switch for output communication patterns - LOGICAL :: lcommout - CHARACTER(len=*) :: commoutprefix - ! Unit to use for output - INTEGER :: iunit - - ! Output arguments - - ! Interpolation and message passing information - TYPE(parinterinfo), INTENT(OUT) :: pinfo - - ! Local variable - - ! Variable for glocal <-> local address/pe information - INTEGER, DIMENSION(nsrcglopoints) :: ilsrcmppmap, ilsrclocind - INTEGER, DIMENSION(nsrcglopoints) :: igsrcmppmap, igsrclocind - INTEGER, DIMENSION(ndstglopoints) :: ildstmppmap, ildstlocind - INTEGER, DIMENSION(ndstglopoints) :: igdstmppmap, igdstlocind - INTEGER, DIMENSION(nsrcglopoints) :: isrcpe,isrcpetmp - INTEGER, DIMENSION(nsrcglopoints) :: isrcaddtmp - INTEGER, DIMENSION(0:nproc-1) :: isrcoffset - INTEGER, DIMENSION(nproc) :: isrcno, isrcoff, isrccur - INTEGER, DIMENSION(nproc) :: ircvoff, ircvcur - INTEGER, DIMENSION(:), ALLOCATABLE :: isrctot, ircvtot - - ! Misc variable - INTEGER :: i,n,pe - INTEGER :: istatus - CHARACTER(len=256) :: cdfile - - ! Check that masks are consistent. - - ! Remark: More consistency tests between remapping information - ! and input argument could be code, but for now we settle - ! for checking the masks. - - ! Source grid - - DO i=1,nsrclocpoints - IF (srcmask(i)/=remap%src%grid_imask(srcgloind(i))) THEN - WRITE(iunit,*)'Source imask is inconsistent at ' - WRITE(iunit,*)'global index = ',srcgloind(i) - WRITE(iunit,*)'Source mask = ',srcmask(i) - WRITE(iunit,*)'Remap mask = ',remap%src%grid_imask(srcgloind(i)) - WRITE(iunit,*)'Latitude = ',remap%src%grid_center_lat(srcgloind(i)) - WRITE(iunit,*)'Longitude = ',remap%src%grid_center_lon(srcgloind(i)) - CALL flush(iunit) - CALL abort - ENDIF - ENDDO - - ! Destination grid - - DO i=1,ndstlocpoints - IF (dstmask(i)/=remap%dst%grid_imask(dstgloind(i))) THEN - WRITE(iunit,*)'Destination imask is inconsistent at ' - WRITE(iunit,*)'global index = ',dstgloind(i) - WRITE(iunit,*)'Destin mask = ',dstmask(i) - WRITE(iunit,*)'Remap mask = ',remap%dst%grid_imask(dstgloind(i)) - WRITE(iunit,*)'Latitude = ',remap%dst%grid_center_lat(dstgloind(i)) - WRITE(iunit,*)'Longitude = ',remap%dst%grid_center_lon(dstgloind(i)) - CALL flush(iunit) - CALL abort - ENDIF - ENDDO - - ! Setup global to local and vice versa mappings. - - ilsrcmppmap(:)=-1 - ilsrclocind(:)=0 - ildstmppmap(:)=-1 - ildstlocind(:)=0 - - DO i=1,nsrclocpoints - ilsrcmppmap(srcgloind(i))=mype - ilsrclocind(srcgloind(i))=i - ENDDO - - DO i=1,ndstlocpoints - ildstmppmap(dstgloind(i))=mype - ildstlocind(dstgloind(i))=i - ENDDO - -#if defined key_mpp_mpi - CALL mpi_allreduce(ilsrcmppmap,igsrcmppmap,nsrcglopoints, & - & mpi_integer,mpi_max,mpi_comm,istatus) - CALL mpi_allreduce(ilsrclocind,igsrclocind,nsrcglopoints, & - & mpi_integer,mpi_max,mpi_comm,istatus) - CALL mpi_allreduce(ildstmppmap,igdstmppmap,ndstglopoints, & - & mpi_integer,mpi_max,mpi_comm,istatus) - CALL mpi_allreduce(ildstlocind,igdstlocind,ndstglopoints, & - & mpi_integer,mpi_max,mpi_comm,istatus) -#else - igsrcmppmap(:)=ilsrcmppmap(:) - igsrclocind(:)=ilsrclocind(:) - igdstmppmap(:)=ildstmppmap(:) - igdstlocind(:)=ildstlocind(:) -#endif - - ! Optionally construct an ascii file listing what src and - ! dest points belongs to which task - - ! Since igsrcmppmap and igdstmppmap are global data only do - ! this for mype==0. - - IF (lcommout.AND.(mype==0)) THEN - WRITE(cdfile,'(A,I4.4,A)')commoutprefix//'_srcmppmap_',mype+1,'.dat' - OPEN(9,file=cdfile) - DO i=1,nsrcglopoints - WRITE(9,*)remap%src%grid_center_lat(i),& - & remap%src%grid_center_lon(i), & - & igsrcmppmap(i)+1,remap%src%grid_imask(i) - ENDDO - CLOSE(9) - WRITE(cdfile,'(A,I4.4,A)')commoutprefix//'_dstmppmap_',mype+1,'.dat' - OPEN(9,file=cdfile) - DO i=1,ndstglopoints - WRITE(9,*)remap%dst%grid_center_lat(i),& - & remap%dst%grid_center_lon(i), & - & igdstmppmap(i)+1,remap%dst%grid_imask(i) - ENDDO - CLOSE(9) - ENDIF - - ! - ! Standard interpolation in serial case is - ! - ! DO n=1,remap%num_links - ! zdst(remap%dst_address(n)) = zdst(remap%dst_address(n)) + & - ! & remap%remap_matrix(1,n)*zsrc(remap%src_address(n)) - ! END DO - ! - - ! In parallel we need to first find local number of links - - pinfo%num_links=0 - DO i=1,remap%num_links - IF (igdstmppmap(remap%dst_address(i))==mype) & - & pinfo%num_links=pinfo%num_links+1 - ENDDO - ALLOCATE(pinfo%dst_address(pinfo%num_links),& - & pinfo%src_address(pinfo%num_links),& - & pinfo%remap_matrix(1,pinfo%num_links)) - - ! Get local destination addresses - - n=0 - DO i=1,remap%num_links - IF (igdstmppmap(remap%dst_address(i))==mype) THEN - n=n+1 - pinfo%dst_address(n)=& - & igdstlocind(remap%dst_address(i)) - pinfo%remap_matrix(:,n)=& - & remap%remap_matrix(:,i) - ENDIF - ENDDO - - ! Get sending processors maps. - - ! The same data point might need to be sent to many processors - ! so first construct a map for processors needing the data - - isrcpe(:)=-1 - DO i=1,remap%num_links - IF (igdstmppmap(remap%dst_address(i))==mype) THEN - isrcpe(remap%src_address(i))=& - & igsrcmppmap(remap%src_address(i)) - ENDIF - ENDDO - - ! Optionally write a set if ascii file listing which tasks - ! mype needs to send to communicate with - - IF (lcommout) THEN - ! Destination processors - WRITE(cdfile,'(A,I4.4,A)')commoutprefix//'_dsts_',mype+1,'.dat' - OPEN(9,file=cdfile) - DO pe=0,nproc-1 - IF (pe==mype) THEN - isrcpetmp(:)=isrcpe(:) - ENDIF -#if defined key_mpp_mpi - CALL mpi_bcast(isrcpetmp,nsrcglopoints,mpi_integer,pe,mpi_comm,istatus) -#endif - DO i=1,nsrcglopoints - IF (isrcpetmp(i)==mype) THEN - WRITE(9,*)remap%src%grid_center_lat(i),& - & remap%src%grid_center_lon(i), & - & pe+1,mype+1 - ENDIF - ENDDO - ENDDO - CLOSE(9) - ENDIF - - ! Get number of points to send to each processor - - ALLOCATE(pinfo%nsend(0:nproc-1)) - isrcno(:)=0 - DO i=1,nsrcglopoints - IF (isrcpe(i)>=0) THEN - isrcno(isrcpe(i)+1)=isrcno(isrcpe(i)+1)+1 - ENDIF - ENDDO -#if defined key_mpp_mpi - CALL mpi_alltoall(isrcno,1,mpi_integer, & - & pinfo%nsend(0:nproc-1),1,mpi_integer, & - & mpi_comm,istatus) -#else - pinfo%nsend(0:nproc-1) = isrcno(1:nproc) -#endif - pinfo%nsendtot=SUM(pinfo%nsend(0:nproc-1)) - - ! Construct sending buffer mapping. Data is mapping in - ! processor order. - - ALLOCATE(pinfo%send_address(pinfo%nsendtot)) - - ! Temporary arrays for mpi all to all. - - ALLOCATE(isrctot(SUM(isrcno(1:nproc)))) - ALLOCATE(ircvtot(SUM(pinfo%nsend(0:nproc-1)))) - - ! Offset for message parsing - - isrcoff(1)=0 - ircvoff(1)=0 - DO i=1,nproc-1 - isrcoff(i+1) = isrcoff(i) + isrcno(i) - ircvoff(i+1) = pinfo%nsend(i-1) + ircvoff(i) - ENDDO - - ! Pack indices i into a buffer - - isrccur(:)=0 - DO i=1,nsrcglopoints - IF (isrcpe(i)>=0) THEN - isrccur(isrcpe(i)+1)=isrccur(isrcpe(i)+1)+1 - isrctot(isrccur(isrcpe(i)+1)+isrcoff(isrcpe(i)+1)) = i - ENDIF - ENDDO - - ! Send the data - -#if defined key_mpp_mpi - CALL mpi_alltoallv(& - & isrctot,isrccur,isrcoff,mpi_integer, & - & ircvtot,pinfo%nsend(0:nproc-1),ircvoff,mpi_integer, & - & mpi_comm,istatus) -#else - ircvtot(:)=isrctot(:) -#endif - - ! Get the send address. ircvtot will at this point contain the - ! addresses in the global index needed for message passing - - DO i=1,pinfo%nsendtot - pinfo%send_address(i)=igsrclocind(ircvtot(i)) - ENDDO - - ! Deallocate the mpi all to all arrays - - DEALLOCATE(ircvtot,isrctot) - - ! Get number of points to receive to each processor - - ALLOCATE(pinfo%nrecv(0:nproc-1)) - pinfo%nrecv(0:nproc-1)=0 - DO i=1,nsrcglopoints - IF (isrcpe(i)>=0 .AND. isrcpe(i)=0 .AND. isrcpe(i)0) THEN - CALL nchdlerr(nf90_def_dim(ncid,'num_links',& - & pinfo%num_links,dimnl),& - & __LINE__,__MYFILE__) - ENDIF - - CALL nchdlerr(nf90_def_dim(ncid,'num_wgts',& - & 1,dimnw),& - & __LINE__,__MYFILE__) - - IF (pinfo%nsendtot>0) THEN - CALL nchdlerr(nf90_def_dim(ncid,'nsendtot',& - & pinfo%nsendtot,dimnst),& - & __LINE__,__MYFILE__) - ENDIF - - IF (pinfo%nrecvtot>0) THEN - CALL nchdlerr(nf90_def_dim(ncid,'nrecvtot',& - & pinfo%nrecvtot,dimnrt),& - & __LINE__,__MYFILE__) - ENDIF - - CALL nchdlerr(nf90_def_dim(ncid,'nproc',& - & nproc,dimnpr),& - & __LINE__,__MYFILE__) - - IF (pinfo%num_links>0) THEN - - dims1(1)=dimnl - CALL nchdlerr(nf90_def_var(ncid,'dst_address',& - & nf90_int,dims1,idda),& - & __LINE__,__MYFILE__) - - dims1(1)=dimnl - CALL nchdlerr(nf90_def_var(ncid,'src_address',& - & nf90_int,dims1,idsa),& - & __LINE__,__MYFILE__) - - dims2(1)=dimnw - dims2(2)=dimnl - CALL nchdlerr(nf90_def_var(ncid,'remap_matrix',& - & nf90_double,dims2,idrm),& - & __LINE__,__MYFILE__) - - ENDIF - - dims1(1)=dimnpr - CALL nchdlerr(nf90_def_var(ncid,'nsend',& - & nf90_int,dims1,idns),& - & __LINE__,__MYFILE__) - - IF (pinfo%nsendtot>0) THEN - - dims1(1)=dimnst - CALL nchdlerr(nf90_def_var(ncid,'send_address',& - & nf90_int,dims1,idsaa),& - & __LINE__,__MYFILE__) - - ENDIF - - dims1(1)=dimnpr - CALL nchdlerr(nf90_def_var(ncid,'nrecv',& - & nf90_int,dims1,idnr),& - & __LINE__,__MYFILE__) - - dims1(1)=dimnpr - CALL nchdlerr(nf90_def_var(ncid,'nsdisp',& - & nf90_int,dims1,idnsp),& - & __LINE__,__MYFILE__) - - dims1(1)=dimnpr - CALL nchdlerr(nf90_def_var(ncid,'nrdisp',& - & nf90_int,dims1,idnrp),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_enddef(ncid),__LINE__,__MYFILE__) - - - IF (pinfo%num_links>0) THEN - - CALL nchdlerr(nf90_put_var(ncid,idda,pinfo%dst_address),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idsa,pinfo%src_address),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idrm,pinfo%remap_matrix),& - & __LINE__,__MYFILE__) - - ENDIF - - CALL nchdlerr(nf90_put_var(ncid,idns,pinfo%nsend(0:nproc-1)),& - & __LINE__,__MYFILE__) - - IF (pinfo%nsendtot>0) THEN - - CALL nchdlerr(nf90_put_var(ncid,idsaa,pinfo%send_address),& - & __LINE__,__MYFILE__) - - ENDIF - - CALL nchdlerr(nf90_put_var(ncid,idnr,pinfo%nrecv(0:nproc-1)),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idnsp,pinfo%nsdisp(0:nproc-1)),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idnrp,pinfo%nrdisp(0:nproc-1)),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_close(ncid),__LINE__, __MYFILE__ ) - - END SUBROUTINE parinter_write - - SUBROUTINE parinter_read( mype, nproc, & - & nsrcglopoints, ndstglopoints, & - & pinfo, cdpath, cdprefix, lexists ) - - ! Write pinfo information in a netCDF file in order to - ! be able to read it rather than calling parinter_init - - ! Input arguments. - - ! Message passing information - INTEGER, INTENT(IN) :: mype, nproc - ! Source grid local global number of grid points - INTEGER, INTENT(IN) :: nsrcglopoints - ! Destination grid global number of grid points - INTEGER, INTENT(IN) :: ndstglopoints - ! Interpolation and message passing information - TYPE(parinterinfo), INTENT(OUT) :: pinfo - ! Does the information exists - LOGICAL :: lexists - ! Path and file prefix - CHARACTER(len=*) :: cdpath, cdprefix - - ! Local variable - - ! Misc variable - CHARACTER(len=1024) :: cdfile - INTEGER :: ncid, dimid, varid, num_wgts - - WRITE(cdfile,'(A,2(I8.8,A),2(I4.4,A),A)') & - & TRIM(cdpath)//'/'//TRIM(cdprefix)//'_', & - & nsrcglopoints,'_',ndstglopoints,'_',mype,'_',nproc,'.nc' - - - lexists=nf90_open(TRIM(cdfile),nf90_nowrite,ncid)==nf90_noerr - - IF (lexists) THEN - - ! If num_links is not present we assume it to be zero. - - IF (nf90_inq_dimid(ncid,'num_links',dimid)==nf90_noerr) THEN - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=pinfo%num_links),& - & __LINE__,__MYFILE__) - ELSE - pinfo%num_links=0 - ENDIF - - CALL nchdlerr(nf90_inq_dimid(ncid,'num_wgts',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=num_wgts),& - & __LINE__,__MYFILE__) - IF (num_wgts/=1) THEN - WRITE(0,*)'parinter_read: num_wgts has to be 1 for now' - CALL abort - ENDIF - - ! If nsendtot is not present we assume it to be zero. - - IF (nf90_inq_dimid(ncid,'nsendtot',dimid)==nf90_noerr) THEN - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=pinfo%nsendtot),& - & __LINE__,__MYFILE__) - ELSE - pinfo%nsendtot=0 - ENDIF - - IF(nf90_inq_dimid(ncid,'nrecvtot',dimid)==nf90_noerr) THEN - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=pinfo%nrecvtot),& - & __LINE__,__MYFILE__) - ELSE - pinfo%nrecvtot=0 - ENDIF - - ALLOCATE(pinfo%dst_address(pinfo%num_links),& - & pinfo%src_address(pinfo%num_links),& - & pinfo%remap_matrix(num_wgts,pinfo%num_links),& - & pinfo%nsend(0:nproc-1),& - & pinfo%send_address(pinfo%nsendtot),& - & pinfo%nrecv(0:nproc-1),& - & pinfo%nsdisp(0:nproc-1),& - & pinfo%nrdisp(0:nproc-1)) - - IF (pinfo%num_links>0) THEN - CALL nchdlerr(nf90_inq_varid(ncid,'dst_address',varid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%dst_address),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'src_address',varid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%src_address),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'remap_matrix',varid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%remap_matrix),& - & __LINE__,__MYFILE__) - ENDIF - - CALL nchdlerr(nf90_inq_varid(ncid,'nsend',varid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nsend(0:nproc-1)),& - & __LINE__,__MYFILE__) - - IF (pinfo%nsendtot>0) THEN - - CALL nchdlerr(nf90_inq_varid(ncid,'send_address',varid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%send_address),& - & __LINE__,__MYFILE__) - - ENDIF - - CALL nchdlerr(nf90_inq_varid(ncid,'nrecv',varid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nrecv(0:nproc-1)),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'nsdisp',varid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nsdisp(0:nproc-1)),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'nrdisp',varid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nrdisp(0:nproc-1)),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_close(ncid),__LINE__, __MYFILE__ ) - - ENDIF - - END SUBROUTINE parinter_read - -END MODULE parinter - -MODULE interinfo - - ! Parallel regridding information - - USE parinter - - IMPLICIT NONE - - SAVE - - ! IFS to NEMO - - TYPE(parinterinfo) :: gausstoT,gausstoUV - - ! NEMO to IFS - - TYPE(parinterinfo) :: Ttogauss, UVtogauss - - ! Read parinterinfo on task 0 only and broadcast. - - LOGICAL :: lparbcast = .FALSE. - -END MODULE interinfo -#endif diff --git a/src/ifs_notused.F90 b/src/ifs_notused.F90 deleted file mode 100644 index b483bf962..000000000 --- a/src/ifs_notused.F90 +++ /dev/null @@ -1,362 +0,0 @@ -#if defined (__ifsinterface) -! Routines usually provided by the library that are currently -! not implemented for FESOM2. -! -! -Original code by Kristian Mogensen, ECMWF. - -SUBROUTINE nemogcmcoup_init_ioserver( icomm, lnemoioserver ) - - ! Initialize the NEMO mppio server - - IMPLICIT NONE - INTEGER :: icomm - LOGICAL :: lnemoioserver - - WRITE(*,*)'No mpp_ioserver' - CALL abort - -END SUBROUTINE nemogcmcoup_init_ioserver - - -SUBROUTINE nemogcmcoup_init_ioserver_2( icomm ) - - ! Initialize the NEMO mppio server - - IMPLICIT NONE - INTEGER :: icomm - - WRITE(*,*)'No mpp_ioserver' - CALL abort - -END SUBROUTINE nemogcmcoup_init_ioserver_2 - - -SUBROUTINE nemogcmcoup_mlflds_get( mype, npes, icomm, & - & nlev, nopoints, pgt3d, pgs3d, pgu3d, pgv3d ) - - ! Interpolate sst, ice: surf T; albedo; concentration; thickness, - ! snow thickness and currents from the ORCA grid to the Gaussian grid. - - ! This routine can be called at any point in time since it does - ! the necessary message passing in parinter_fld. - - USE par_kind - IMPLICIT NONE - - ! Arguments - REAL(wpIFS), DIMENSION(nopoints,nlev) :: pgt3d, pgs3d, pgu3d, pgv3d - ! Message passing information - INTEGER, INTENT(IN) :: mype, npes, icomm - ! Number Gaussian grid points - INTEGER, INTENT(IN) :: nopoints,nlev - - ! Local variables - - WRITE(0,*)'nemogcmcoup_mlflds_get should not be called when coupling to fesom.' - CALL abort - -END SUBROUTINE nemogcmcoup_mlflds_get - - -SUBROUTINE nemogcmcoup_get( mype, npes, icomm, & - & nopoints, pgsst, pgice, pgucur, pgvcur ) - - ! Interpolate sst, ice and currents from the ORCA grid - ! to the Gaussian grid. - - ! This routine can be called at any point in time since it does - ! the necessary message passing in parinter_fld. - - USE par_kind - - IMPLICIT NONE - - - ! Arguments - - ! Message passing information - INTEGER, INTENT(IN) :: mype, npes, icomm - ! Number Gaussian grid points - INTEGER, INTENT(IN) :: nopoints - ! Local arrays of sst, ice and currents - REAL(wpIFS), DIMENSION(nopoints) :: pgsst, pgice, pgucur, pgvcur - - ! Local variables - - WRITE(0,*)'nemogcmcoup_get should not be called with FESOM' - CALL abort - -END SUBROUTINE nemogcmcoup_get - - -SUBROUTINE nemogcmcoup_exflds_get( mype, npes, icomm, & - & nopoints, pgssh, pgmld, pg20d, pgsss, & - & pgtem300, pgsal300 ) - - ! Interpolate sst, ice: surf T; albedo; concentration; thickness, - ! snow thickness and currents from the ORCA grid to the Gaussian grid. - - ! This routine can be called at any point in time since it does - ! the necessary message passing in parinter_fld. - - USE par_kind - IMPLICIT NONE - - ! Arguments - REAL(wpIFS), DIMENSION(nopoints) :: pgssh, pgmld, pg20d, pgsss, & - & pgtem300, pgsal300 - ! Message passing information - INTEGER, INTENT(IN) :: mype, npes, icomm - ! Number Gaussian grid points - INTEGER, INTENT(IN) :: nopoints - - ! Local variables - - WRITE(0,*)'nemogcmcoup_exflds_get should not be called when coupling to fesom.' - CALL abort - -END SUBROUTINE nemogcmcoup_exflds_get - - -SUBROUTINE nemogcmcoup_get_1way( mype, npes, icomm ) - - ! Interpolate sst, ice and currents from the ORCA grid - ! to the Gaussian grid. - - ! This routine can be called at any point in time since it does - ! the necessary message passing in parinter_fld. - - IMPLICIT NONE - - - ! Arguments - - ! Message passing information - INTEGER, INTENT(IN) :: mype, npes, icomm - - ! Local variables - - WRITE(0,*)'nemogcmcoup_get_1way should not be called when coupling to fesom.' - CALL abort - -END SUBROUTINE nemogcmcoup_get_1way - - -SUBROUTINE nemogcmcoup_mlinit( mype, npes, icomm, & - & nlev, nopoints, pdep, pmask ) - - ! Get information about the vertical discretization of the ocean model - - ! nlevs are maximum levels on input and actual number levels on output - - USE par_kind - - IMPLICIT NONE - - ! Input arguments - - ! Message passing information - INTEGER, INTENT(IN) :: mype,npes,icomm - ! Grid information - INTEGER, INTENT(INOUT) :: nlev, nopoints - REAL(wpIFS), INTENT(OUT), DIMENSION(nlev) :: pdep - REAL(wpIFS), INTENT(OUT), DIMENSION(nopoints,nlev) :: pmask - - ! Local variables - - ! dummy argument with explicit INTENT(OUT) declaration needs an explicit value - pdep=0. - pmask=0. - - WRITE(0,*)'nemogcmcoup_mlinit should not be called when coupling to fesom.' - CALL abort - -END SUBROUTINE nemogcmcoup_mlinit - - -SUBROUTINE nemogcmcoup_update( mype, npes, icomm, & - & npoints, pgutau, pgvtau, & - & pgqsr, pgqns, pgemp, kt, ldebug ) - - ! Update fluxes in nemogcmcoup_data by parallel - ! interpolation of the input gaussian grid data - - USE par_kind - - IMPLICIT NONE - - ! Arguments - - ! MPI communications - INTEGER, INTENT(IN) :: mype,npes,icomm - ! Fluxes on the Gaussian grid. - INTEGER, INTENT(IN) :: npoints - REAL(wpIFS), DIMENSION(npoints), intent(IN) :: & - & pgutau, pgvtau, pgqsr, pgqns, pgemp - ! Current time step - INTEGER, INTENT(in) :: kt - ! Write debugging fields in netCDF - LOGICAL, INTENT(IN) :: ldebug - - ! Local variables - - WRITE(0,*)'nemogcmcoup_update should be called with with.' - CALL abort - -END SUBROUTINE nemogcmcoup_update - -SUBROUTINE nemogcmcoup_update_add( mype, npes, icomm, & - & npoints, pgsst, pgtsk, kt, ldebug ) - - ! Update addetiona in nemogcmcoup_data by parallel - ! interpolation of the input gaussian grid data - - USE par_kind - - IMPLICIT NONE - - ! Arguments - - ! MPI communications - INTEGER, INTENT(IN) :: mype,npes,icomm - ! Input on the Gaussian grid. - INTEGER, INTENT(IN) :: npoints - REAL(wpIFS), DIMENSION(npoints), intent(IN) :: & - & pgsst, pgtsk - ! Current time step - INTEGER, INTENT(in) :: kt - ! Write debugging fields in netCDF - LOGICAL, INTENT(IN) :: ldebug - - ! Local variables - - WRITE(0,*)'nemogcmcoup_update_add should not be called when coupling to fesom. Commented ABORT. Proceeding...' - !CALL abort - - -END SUBROUTINE nemogcmcoup_update_add - - -SUBROUTINE nemogcmcoup_wam_coupinit( mype, npes, icomm, & - & nlocpoints, nglopoints, & - & nlocmsk, ngloind, iunit ) - - ! Initialize single executable coupling between WAM and NEMO - ! This is called from WAM. - - IMPLICIT NONE - - ! Input arguments - - ! Message passing information - INTEGER, INTENT(IN) :: mype,npes,icomm - ! WAM grid information - ! Number of local and global points - INTEGER, INTENT(IN) :: nlocpoints, nglopoints - ! Integer mask and global indices - INTEGER, DIMENSION(nlocpoints), INTENT(IN) :: nlocmsk, ngloind - ! Unit for output in parinter_init - INTEGER :: iunit - - WRITE(0,*)'Wam coupling not implemented for FESOM' - CALL abort - -END SUBROUTINE nemogcmcoup_wam_coupinit - - -SUBROUTINE nemogcmcoup_wam_get( mype, npes, icomm, & - & nopoints, pwsst, pwicecov, pwicethk, & - & pwucur, pwvcur, licethk ) - - ! Interpolate from the ORCA grid - ! to the WAM grid. - - ! This routine can be called at any point in time since it does - ! the necessary message passing in parinter_fld. - - USE par_kind - IMPLICIT NONE - - ! Arguments - - ! Message passing information - INTEGER, INTENT(IN) :: mype, npes, icomm - ! Number WAM grid points - INTEGER, INTENT(IN) :: nopoints - ! Local arrays of sst, ice cover, ice thickness and currents - REAL(wpIFS), DIMENSION(nopoints) :: pwsst, pwicecov, pwicethk, pwucur, pwvcur - LOGICAL :: licethk - - ! Local variables - - WRITE(0,*)'nemogcmcoup_wam_get should not be called when coupling to fesom.' - CALL abort - -END SUBROUTINE nemogcmcoup_wam_get - - -SUBROUTINE nemogcmcoup_wam_update( mype, npes, icomm, & - & npoints, pwswh, pwmwp, & - & pwphioc, pwtauoc, pwstrn, & - & pwustokes, pwvstokes, & - & cdtpro, ldebug ) - - ! Update fluxes in nemogcmcoup_data by parallel - ! interpolation of the input WAM grid data - - USE par_kind - - IMPLICIT NONE - - ! Arguments - - ! MPI communications - INTEGER, INTENT(IN) :: mype,npes,icomm - ! Data on the WAM grid. - INTEGER, INTENT(IN) :: npoints - REAL(wpIFS), DIMENSION(npoints), INTENT(IN) :: & - & pwswh, pwmwp, pwphioc, pwtauoc, pwstrn, pwustokes, pwvstokes - ! Current time - CHARACTER(len=14), INTENT(IN) :: cdtpro - ! Write debugging fields in netCDF - LOGICAL, INTENT(IN) :: ldebug - - ! Local variables - - WRITE(0,*)'nemogcmcoup_wam_update should not be called when coupling to fesom.' - CALL abort - -END SUBROUTINE nemogcmcoup_wam_update - - -SUBROUTINE nemogcmcoup_wam_update_stress( mype, npes, icomm, npoints, & - & pwutau, pwvtau, pwuv10n, pwphif,& - & cdtpro, ldebug ) - - ! Update stresses in nemogcmcoup_data by parallel - ! interpolation of the input WAM grid data - - USE par_kind - - IMPLICIT NONE - - ! Arguments - - ! MPI communications - INTEGER, INTENT(IN) :: mype,npes,icomm - ! Data on the WAM grid. - INTEGER, INTENT(IN) :: npoints - REAL(wpIFS), DIMENSION(npoints), INTENT(IN) :: & - & pwutau, pwvtau, pwuv10n, pwphif - ! Current time step - CHARACTER(len=14), INTENT(IN) :: cdtpro - ! Write debugging fields in netCDF - LOGICAL, INTENT(IN) :: ldebug - - ! Local variables - - WRITE(0,*)'nemogcmcoup_wam_update_stress should not be called when coupling to fesom.' - CALL abort - -END SUBROUTINE nemogcmcoup_wam_update_stress -#endif From ad1ddd7d29719b629d6630a62af417187bc807f7 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 11 Nov 2021 11:00:36 +0100 Subject: [PATCH 545/909] Revert "move the merged ifs_* files to the ifs_interface directory" This reverts commit bca417a6eca26dba4f62432975c9c3029d6e9538. --- src/ifs_interface.F90 | 1506 +++++++++++++++++++++++ src/ifs_interface/ifs_notused.F90 | 11 +- src/ifs_modules.F90 | 1859 +++++++++++++++++++++++++++++ src/ifs_notused.F90 | 362 ++++++ 4 files changed, 3737 insertions(+), 1 deletion(-) create mode 100644 src/ifs_interface.F90 create mode 100644 src/ifs_modules.F90 create mode 100644 src/ifs_notused.F90 diff --git a/src/ifs_interface.F90 b/src/ifs_interface.F90 new file mode 100644 index 000000000..4467dfa9a --- /dev/null +++ b/src/ifs_interface.F90 @@ -0,0 +1,1506 @@ +#if defined (__ifsinterface) +!===================================================== +! IFS interface for calling FESOM2 as a subroutine. +! +! -Original code for NEMO by Kristian Mogensen, ECMWF. +! -Adapted to FESOM2 by Thomas Rackow, AWI, 2018. +!----------------------------------------------------- + +MODULE nemogcmcoup_steps + INTEGER :: substeps !per IFS timestep +END MODULE nemogcmcoup_steps + +SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & + & lwaveonly, iatmunit, lwrite ) + + ! Initialize the FESOM model for single executable coupling + + USE par_kind !in ifs_modules.F90 + USE g_PARSUP, only: MPI_COMM_FESOM, mype + USE g_config, only: dt + USE g_clock, only: timenew, daynew, yearnew, month, day_in_month + USE nemogcmcoup_steps, ONLY : substeps + + IMPLICIT NONE + + ! Input arguments + + ! Message passing information + INTEGER, INTENT(IN) :: icomm + ! Initial date (e.g. 20170906), time, initial timestep and final time step + INTEGER, INTENT(OUT) :: inidate, initime, itini, itend + ! Length of the time step + REAL(wpIFS), INTENT(OUT) :: zstp + + ! inherited from interface to NEMO, not used here: + ! Coupling to waves only + LOGICAL, INTENT(IN) :: lwaveonly + ! Logfile unit (used if >=0) + INTEGER :: iatmunit + ! Write to this unit + LOGICAL :: lwrite + ! FESOM might perform substeps + INTEGER :: itend_fesom + INTEGER :: i + NAMELIST/namfesomstep/substeps + + ! TODO hard-coded here, put in namelist + substeps=2 + OPEN(9,file='namfesomstep.in') + READ(9,namfesomstep) + CLOSE(9) + + MPI_COMM_FESOM=icomm + itini = 1 + CALL main_initialize(itend_fesom) !also sets mype and npes + itend=itend_fesom/substeps + if(mype==0) then + WRITE(0,*)'!======================================' + WRITE(0,*)'! FESOM is initialized from within IFS.' + WRITE(0,*)'! get MPI_COMM_FESOM. =================' + WRITE(0,*)'! main_initialize done. ===============' + endif + + ! Set more information for the caller + + ! initial date and time (time is not used) + inidate = yearnew*10000 + month*100 + day_in_month ! e.g. 20170906 + initime = 0 + if(mype==0) then + WRITE(0,*)'! FESOM initial date is ', inidate ,' ======' + WRITE(0,*)'! FESOM substeps are ', substeps ,' ======' + endif + + ! fesom timestep (as seen by IFS) + zstp = REAL(substeps,wpIFS)*dt + if(mype==0) then + WRITE(0,*)'! FESOM timestep as seen by IFS is ', real(zstp,4), 'sec (',substeps,'xdt)' + WRITE(0,*)'!======================================' + endif + +END SUBROUTINE nemogcmcoup_init + + +SUBROUTINE nemogcmcoup_coupinit( mypeIN, npesIN, icomm, & + & npoints, nlocmsk, ngloind ) + + ! FESOM modules + USE g_PARSUP, only: mype, npes, myDim_nod2D, eDim_nod2D, myDim_elem2D, eDim_elem2D, eXDim_elem2D, & + myDim_edge2D, eDim_edge2D, myList_nod2D, myList_elem2D + USE MOD_MESH + !USE o_MESH, only: nod2D, elem2D + USE g_init2timestepping, only: meshinmod + + ! Initialize single executable coupling + USE parinter + USE scripremap + USE interinfo + IMPLICIT NONE + + ! Input arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mypeIN,npesIN,icomm + ! Gaussian grid information + ! Number of points + INTEGER, INTENT(IN) :: npoints + ! Integer mask and global indices + INTEGER, DIMENSION(npoints), INTENT(IN) :: nlocmsk, ngloind + INTEGER :: iunit = 0 + + ! Local variables + type(t_mesh), target :: mesh + integer , pointer :: nod2D + integer , pointer :: elem2D + + ! Namelist containing the file names of the weights + CHARACTER(len=256) :: cdfile_gauss_to_T, cdfile_gauss_to_UV, & + & cdfile_T_to_gauss, cdfile_UV_to_gauss + CHARACTER(len=256) :: cdpathdist + LOGICAL :: lwritedist, lreaddist + LOGICAL :: lcommout + CHARACTER(len=128) :: commoutprefix + NAMELIST/namfesomcoup/cdfile_gauss_to_T,& + & cdfile_gauss_to_UV,& + & cdfile_T_to_gauss,& + & cdfile_UV_to_gauss,& + & cdpathdist, & + & lreaddist, & + & lwritedist, & + & lcommout, & + & commoutprefix,& + & lparbcast + + ! Global number of gaussian gridpoints + INTEGER :: nglopoints + ! Ocean grids accessed with NEMO modules + INTEGER :: noglopoints,nopoints + INTEGER, ALLOCATABLE, DIMENSION(:) :: omask,ogloind + ! SCRIP remapping data structures. + TYPE(scripremaptype) :: remap_gauss_to_T, remap_T_to_gauss, & + & remap_gauss_to_UV, remap_UV_to_gauss + ! Misc variables + INTEGER :: i,j,k,ierr + LOGICAL :: lexists + + ! associate the mesh, only what is needed here + ! #include "associate_mesh.h" + mesh = meshinmod + nod2D => mesh%nod2D + elem2D => mesh%elem2D + + + ! here FESOM knows about the (total number of) MPI tasks + + if(mype==0) then + write(*,*) 'MPI has been initialized in the atmospheric model' + write(*, *) 'Running on ', npes, ' PEs' + end if + + ! Read namelists + + cdfile_gauss_to_T = 'gausstoT.nc' + cdfile_gauss_to_UV = 'gausstoUV.nc' + cdfile_T_to_gauss = 'Ttogauss.nc' + cdfile_UV_to_gauss = 'UVtogauss.nc' + lcommout = .FALSE. + commoutprefix = 'parinter_comm' + cdpathdist = './' + lreaddist = .FALSE. + lwritedist = .FALSE. + + OPEN(9,file='namfesomcoup.in') + READ(9,namfesomcoup) + CLOSE(9) + + ! Global number of Gaussian gridpoints + + CALL mpi_allreduce( npoints, nglopoints, 1, & + & mpi_integer, mpi_sum, icomm, ierr) + + + if(mype==0) then + WRITE(0,*)'!======================================' + WRITE(0,*)'! SCALARS =============================' + + WRITE(0,*)'Update FESOM global scalar points' + endif + + noglopoints=nod2D + nopoints=myDim_nod2d + + ! Ocean mask and global indicies + + ALLOCATE(omask(MAX(nopoints,1)),ogloind(MAX(nopoints,1))) + omask(:)= 1 ! all points are ocean points + ogloind(1:myDim_nod2d)= myList_nod2D(1:myDim_nod2d) ! global index for local point number + + ! Could be helpful later: + ! Replace global numbering with a local one + ! tmp(1:nod2d)=0 + ! DO n=1, myDim_nod2D+eDim_nod2D + ! tmp(myList_nod2D(n))=n + + ! Read the interpolation weights and setup the parallel interpolation + ! from atmosphere Gaussian grid to ocean T-grid + + IF (lreaddist) THEN + CALL parinter_read( mype, npes, nglopoints, noglopoints, gausstoT, & + & cdpathdist,'ifs_to_fesom_gridT',lexists) + ENDIF + IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN + IF (lparbcast) THEN + CALL scripremap_read_sgl(cdfile_gauss_to_T,remap_gauss_to_T,& + & mype,npes,icomm,.TRUE.) + ELSE + CALL scripremap_read(cdfile_gauss_to_T,remap_gauss_to_T) + ENDIF + CALL parinter_init( mype, npes, icomm, & + & npoints, nglopoints, nlocmsk, ngloind, & + & nopoints, noglopoints, omask, ogloind, & + & remap_gauss_to_T, gausstoT, lcommout, TRIM(commoutprefix)//'_gtoT', & + & iunit ) + CALL scripremap_dealloc(remap_gauss_to_T) + IF (lwritedist) THEN + CALL parinter_write( mype, npes, nglopoints, noglopoints, gausstoT, & + & cdpathdist,'ifs_to_fesom_gridT') + ENDIF + ENDIF + + ! From ocean T-grid to atmosphere Gaussian grid + + IF (lreaddist) THEN + CALL parinter_read( mype, npes, noglopoints, nglopoints, Ttogauss, & + & cdpathdist,'fesom_gridT_to_ifs',lexists) + ENDIF + IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN + IF (lparbcast) THEN + CALL scripremap_read_sgl(cdfile_T_to_gauss,remap_T_to_gauss,& + & mype,npes,icomm,.TRUE.) + ELSE + CALL scripremap_read(cdfile_T_to_gauss,remap_T_to_gauss) + ENDIF + + CALL parinter_init( mype, npes, icomm, & + & nopoints, noglopoints, omask, ogloind, & + & npoints, nglopoints, nlocmsk, ngloind, & + & remap_T_to_gauss, Ttogauss, lcommout, TRIM(commoutprefix)//'_Ttog', & + & iunit ) + CALL scripremap_dealloc(remap_T_to_gauss) + IF (lwritedist) THEN + CALL parinter_write( mype, npes, noglopoints, nglopoints, Ttogauss, & + & cdpathdist,'fesom_gridT_to_ifs') + ENDIF + ENDIF + + DEALLOCATE(omask,ogloind) + + + if(mype==0) then + WRITE(0,*)'!======================================' + WRITE(0,*)'! VECTORS =============================' + + WRITE(0,*)'Update FESOM global vector points' + endif + noglopoints=elem2D + nopoints=myDim_elem2D + + ! Ocean mask and global indicies + + ALLOCATE(omask(MAX(nopoints,1)),ogloind(MAX(nopoints,1))) + + omask(:)= 1 ! all elements are in the ocean + ogloind(1:myDim_elem2D) = myList_elem2D(1:myDim_elem2D) ! global index for local element number + + ! Read the interpolation weights and setup the parallel interpolation + ! from atmosphere Gaussian grid to ocean UV-grid + + IF (lreaddist) THEN + CALL parinter_read( mype, npes, nglopoints, noglopoints, gausstoUV, & + & cdpathdist,'ifs_to_fesom_gridUV',lexists) + ENDIF + IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN + IF (lparbcast) THEN + CALL scripremap_read_sgl(cdfile_gauss_to_UV,remap_gauss_to_UV,& + & mype,npes,icomm,.TRUE.) + ELSE + CALL scripremap_read(cdfile_gauss_to_UV,remap_gauss_to_UV) + ENDIF + CALL parinter_init( mype, npes, icomm, & + & npoints, nglopoints, nlocmsk, ngloind, & + & nopoints, noglopoints, omask, ogloind, & + & remap_gauss_to_UV, gausstoUV, lcommout, TRIM(commoutprefix)//'_gtoUV', & + & iunit ) + CALL scripremap_dealloc(remap_gauss_to_UV) + IF (lwritedist) THEN + CALL parinter_write( mype, npes, nglopoints, noglopoints, gausstoUV, & + & cdpathdist,'ifs_to_fesom_gridUV') + ENDIF + ENDIF + + ! From ocean UV-grid to atmosphere Gaussian grid + + IF (lreaddist) THEN + CALL parinter_read( mype, npes, noglopoints, nglopoints, UVtogauss, & + & cdpathdist,'fesom_gridUV_to_ifs',lexists) + ENDIF + IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN + IF (lparbcast) THEN + CALL scripremap_read_sgl(cdfile_UV_to_gauss,remap_UV_to_gauss,& + & mype,npes,icomm,.TRUE.) + ELSE + CALL scripremap_read(cdfile_UV_to_gauss,remap_UV_to_gauss) + ENDIF + + CALL parinter_init( mype, npes, icomm, & + & nopoints, noglopoints, omask, ogloind, & + & npoints, nglopoints, nlocmsk, ngloind, & + & remap_UV_to_gauss, UVtogauss, lcommout, TRIM(commoutprefix)//'_UVtog', & + & iunit ) + CALL scripremap_dealloc(remap_UV_to_gauss) + IF (lwritedist) THEN + CALL parinter_write( mype, npes, noglopoints, nglopoints, UVtogauss, & + & cdpathdist,'fesom_gridUV_to_ifs') + ENDIF + ENDIF + + DEALLOCATE(omask,ogloind) + +END SUBROUTINE nemogcmcoup_coupinit + + +SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & + & nopoints, pgsst, pgist, pgalb, & + & pgifr, pghic, pghsn, pgucur, pgvcur, & + & pgistl, licelvls ) + + ! Interpolate sst, ice: surf T; albedo; concentration; thickness, + ! snow thickness and currents from the FESOM grid to the Gaussian grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + USE par_kind ! in ifs_modules.F90 + USE o_ARRAYS, ONLY : tr_arr, UV + USE i_arrays, ONLY : m_ice, a_ice, m_snow + USE i_therm_param, ONLY : tmelt + !USE o_PARAM, ONLY : WP + USE g_PARSUP, only: myDim_nod2D,eDim_nod2D, myDim_elem2D,eDim_elem2D,eXDim_elem2D + !USE o_MESH, only: elem2D_nodes, coord_nod2D + USE MOD_MESH + USE g_init2timestepping, only: meshinmod + + USE g_rotate_grid, only: vector_r2g + USE parinter + USE scripremap + USE interinfo + + IMPLICIT NONE + + ! Arguments + REAL(wpIFS), DIMENSION(nopoints) :: pgsst, pgist, pgalb, pgifr, pghic, pghsn, pgucur, pgvcur + REAL(wpIFS), DIMENSION(nopoints,3) :: pgistl + LOGICAL :: licelvls + + type(t_mesh), target :: mesh + real(kind=wpIFS), dimension(:,:), pointer :: coord_nod2D + integer, dimension(:,:) , pointer :: elem2D_nodes + + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + ! Number Gaussian grid points + INTEGER, INTENT(IN) :: nopoints + + ! Local variables + REAL(wpIFS), DIMENSION(myDim_nod2D) :: zsend + REAL(wpIFS), DIMENSION(myDim_elem2D) :: zsendU, zsendV + INTEGER :: elnodes(3) + REAL(wpIFS) :: rlon, rlat + + ! Loop variables + INTEGER :: n, elem, ierr + + !#include "associate_mesh.h" + ! associate what is needed only + mesh = meshinmod + coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%coord_nod2D + elem2D_nodes(1:3, 1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem2D_nodes + + + ! =================================================================== ! + ! Pack SST data and convert to K. 'pgsst' is on Gauss grid. + do n=1,myDim_nod2D + zsend(n)=tr_arr(1, n, 1)+tmelt ! sea surface temperature [K], + ! (1=surface, n=node, 1/2=T/S) + enddo + + ! Interpolate SST + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & myDim_nod2D, zsend, & + & nopoints, pgsst ) + + + ! =================================================================== ! + ! Pack ice fraction data [0..1] and interpolate: 'pgifr' on Gauss. + ! zsend(:)=a_ice(:) + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & myDim_nod2D, a_ice, & + & nopoints, pgifr ) + + + ! =================================================================== ! + ! Pack ice temperature data (already in K) + zsend(:)=273.15 + + ! Interpolate ice surface temperature: 'pgist' on Gaussian grid. + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & myDim_nod2D, zsend, & + & nopoints, pgist ) + + + ! =================================================================== ! + ! Pack ice albedo data and interpolate: 'pgalb' on Gaussian grid. + zsend(:)=0.7 + + ! Interpolate ice albedo + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & myDim_nod2D, zsend, & + & nopoints, pgalb ) + + + ! =================================================================== ! + ! Pack ice thickness data and interpolate: 'pghic' on Gaussian grid. + zsend(:)=m_ice(:)/max(a_ice(:),0.01) ! ice thickness (mean over ice) + + ! Interpolation of average ice thickness + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & myDim_nod2D, zsend, & + & nopoints, pghic ) + + + ! =================================================================== ! + ! Pack snow thickness data and interpolate: 'pghsn' on Gaussian grid. + zsend(:)=m_snow(:)/max(a_ice(:),0.01) ! snow thickness (mean over ice) + + ! Interpolation of snow thickness + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & myDim_nod2D, zsend, & + & nopoints, pghsn ) + + + ! =================================================================== ! + ! Surface currents need to be rotated to geographical grid + + ! Pack u(v) surface currents + zsendU(:)=UV(1,1,1:myDim_elem2D) + zsendV(:)=UV(2,1,1:myDim_elem2D) !UV includes eDim, leave those away here + + do elem=1, myDim_elem2D + + ! compute element midpoints + elnodes=elem2D_nodes(:,elem) + rlon=sum(coord_nod2D(1,elnodes))/3.0_wpIFS + rlat=sum(coord_nod2D(2,elnodes))/3.0_wpIFS + + ! Rotate vectors to geographical coordinates (r2g) + call vector_r2g(zsendU(elem), zsendV(elem), rlon, rlat, 0) ! 0-flag for rot. coord + + end do + +#ifdef FESOM_TODO + + ! We need to sort out the non-unique global index before we + ! can couple currents + + ! Interpolate: 'pgucur' and 'pgvcur' on Gaussian grid. + CALL parinter_fld( mype, npes, icomm, UVtogauss, & + & myDim_elem2D, zsendU, & + & nopoints, pgucur ) + + CALL parinter_fld( mype, npes, icomm, UVtogauss, & + & myDim_elem2D, zsendV, & + & nopoints, pgvcur ) + +#else + + pgucur(:) = 0.0 + pgvcur(:) = 0.0 + +#endif + +#ifndef FESOM_TODO + + if(mype==0) then + WRITE(0,*)'Everything implemented except ice level temperatures (licelvls).' + endif + +#else + + ! Ice level temperatures + + IF (licelvls) THEN + +#if defined key_lim2 + + DO jl = 1, 3 + + ! Pack ice temperatures data at level jl(already in K) + + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = tbif (ji,jj,jl) + ENDDO + ENDDO + + ! Interpolate ice temperature at level jl + + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & + & nopoints, pgistl(:,jl) ) + + ENDDO + +#else + WRITE(0,*)'licelvls needs to be sorted for LIM3' + CALL abort +#endif + + ENDIF + + IF(nn_timing == 1) CALL timing_stop('nemogcmcoup_lim2_get') + IF(lhook) CALL dr_hook('nemogcmcoup_lim2_get',1,zhook_handle) + +#endif + +END SUBROUTINE nemogcmcoup_lim2_get + + +SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & + & npoints, & + & taux_oce, tauy_oce, taux_ice, tauy_ice, & + & qs___oce, qs___ice, qns__oce, qns__ice, dqdt_ice, & + & evap_tot, evap_ice, prcp_liq, prcp_sol, & + & runoffIN, ocerunoff, tcc, lcc, tice_atm, & + & kt, ldebug, loceicemix, lqnsicefilt ) + + ! Update fluxes in nemogcmcoup_data by parallel + ! interpolation of the input gaussian grid data + + USE par_kind !in ifs_modules.F90 + USE g_PARSUP, only: myDim_nod2D, myDim_elem2D, par_ex, eDim_nod2D, eDim_elem2D, eXDim_elem2D, myDim_edge2D, eDim_edge2D + !USE o_MESH, only: coord_nod2D !elem2D_nodes + USE MOD_MESH + USE g_init2timestepping, only: meshinmod + !USE o_PARAM, ONLY : WP, use wpIFS from par_kind (IFS) + USE g_rotate_grid, only: vector_r2g, vector_g2r + USE g_forcing_arrays, only: shortwave, prec_rain, prec_snow, runoff, & + & evap_no_ifrac, sublimation !'longwave' only stand-alone, 'evaporation' filled later + USE i_ARRAYS, only: stress_atmice_x, stress_atmice_y, oce_heat_flux, ice_heat_flux + USE o_ARRAYS, only: stress_atmoce_x, stress_atmoce_y + USE g_comm_auto ! exchange_nod does the halo exchange + + ! all needed? + USE parinter + USE scripremap + USE interinfo + + IMPLICIT NONE + + ! =================================================================== ! + ! Arguments ========================================================= ! + + ! MPI communications + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Fluxes on the Gaussian grid. + INTEGER, INTENT(IN) :: npoints + REAL(wpIFS), DIMENSION(npoints), INTENT(IN) :: & + & taux_oce, tauy_oce, taux_ice, tauy_ice, & + & qs___oce, qs___ice, qns__oce, qns__ice, & + & dqdt_ice, evap_tot, evap_ice, prcp_liq, prcp_sol, & + & runoffIN, ocerunoff, tcc, lcc, tice_atm + + ! Current time step + INTEGER, INTENT(in) :: kt + ! Write debugging fields in netCDF + LOGICAL, INTENT(IN) :: ldebug + ! QS/QNS mixed switch + LOGICAL, INTENT(IN) :: loceicemix + ! QNS ice filter switch (requires tice_atm to be sent) + LOGICAL, INTENT(IN) :: lqnsicefilt + + type(t_mesh), target :: mesh + + ! Local variables + INTEGER :: n + REAL(wpIFS), parameter :: rhofwt = 1000. ! density of freshwater + + + ! Packed receive buffer + REAL(wpIFS), DIMENSION(myDim_nod2D) :: zrecv + REAL(wpIFS), DIMENSION(myDim_elem2D):: zrecvU, zrecvV + + + !#include "associate_mesh.h" + ! associate only the necessary things + real(kind=WP), dimension(:,:), pointer :: coord_nod2D + mesh = meshinmod + coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%coord_nod2D + + ! =================================================================== ! + ! Sort out incoming arrays from the IFS and put them on the ocean grid + + ! TODO + shortwave(:)=0. ! Done, updated below. What to do with shortwave over ice?? + !longwave(:)=0. ! Done. Only used in stand-alone mode. + prec_rain(:)=0. ! Done, updated below. + prec_snow(:)=0. ! Done, updated below. + evap_no_ifrac=0. ! Done, updated below. This is evap over ocean, does this correspond to evap_tot? + sublimation=0. ! Done, updated below. + ! + ice_heat_flux=0. ! Done. This is qns__ice currently. Is this the non-solar heat flux? ! non solar heat fluxes below ! (qns) + oce_heat_flux=0. ! Done. This is qns__oce currently. Is this the non-solar heat flux? + ! + runoff(:)=0. ! not used apparently. What is runoffIN, ocerunoff? + !evaporation(:)=0. + !ice_thermo_cpl.F90: !---- total evaporation (needed in oce_salt_balance.F90) + !ice_thermo_cpl.F90: evaporation = evap_no_ifrac*(1.-a_ice) + sublimation*a_ice + stress_atmice_x=0. ! Done, taux_ice + stress_atmice_y=0. ! Done, tauy_ice + stress_atmoce_x=0. ! Done, taux_oce + stress_atmoce_y=0. ! Done, tauy_oce + + + ! =================================================================== ! + !1. Interpolate ocean solar radiation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qs___oce, & + & myDim_nod2D, zrecv ) + + ! Unpack ocean solar radiation, without halo + shortwave(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + + ! Do the halo exchange + call exchange_nod(shortwave) + + + ! =================================================================== ! + !2. Interpolate ice solar radiation to T grid + ! DO NOTHING + + + ! =================================================================== ! + !3. Interpolate ocean non-solar radiation to T grid (is this non-solar heat flux?) + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qns__oce, & + & myDim_nod2D, zrecv ) + + ! Unpack ocean non-solar, without halo + oce_heat_flux(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + + ! Do the halo exchange + call exchange_nod(oce_heat_flux) + + + ! =================================================================== ! + !4. Interpolate non-solar radiation over ice to T grid (is this non-solar heat flux?) + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qns__ice, & + & myDim_nod2D, zrecv ) + + ! Unpack ice non-solar + ice_heat_flux(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + + ! Do the halo exchange + call exchange_nod(ice_heat_flux) + + + ! =================================================================== ! + !5. D(q)/dT to T grid + ! DO NOTHING + + + ! =================================================================== ! + !6. Interpolate total evaporation to T grid + ! =================================================================== ! + !ice_thermo_cpl.F90: total evaporation (needed in oce_salt_balance.F90) + !ice_thermo_cpl.F90: evaporation = evap_no_ifrac*(1.-a_ice) + sublimation*a_ice + ! =================================================================== ! + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, evap_tot, & + & myDim_nod2D, zrecv ) + + ! Unpack total evaporation, without halo + evap_no_ifrac(1:myDim_nod2D)=-zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s; change sign + + ! Do the halo exchange + call exchange_nod(evap_no_ifrac) + + !7. Interpolate sublimation (evaporation over ice) to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, evap_ice, & + & myDim_nod2D, zrecv ) + + ! Unpack sublimation (evaporation over ice), without halo + sublimation(1:myDim_nod2D)=-zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s; change sign + + ! Do the halo exchange + call exchange_nod(sublimation) + ! =================================================================== ! + ! =================================================================== ! + + + ! =================================================================== ! + !8. Interpolate liquid precipitation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, prcp_liq, & + & myDim_nod2D, zrecv ) + + ! Unpack liquid precipitation, without halo + prec_rain(1:myDim_nod2D)=zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s + + ! Do the halo exchange + call exchange_nod(prec_rain) + + + ! =================================================================== ! + !9. Interpolate solid precipitation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, prcp_sol, & + & myDim_nod2D, zrecv ) + + ! Unpack solid precipitation, without halo + prec_snow(1:myDim_nod2D)=zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s + + ! Do the halo exchange + call exchange_nod(prec_snow) + + + ! =================================================================== ! + !10. Interpolate runoff to T grid + ! + !CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, runoff, & + ! & myDim_nod2D, zrecv ) + ! + ! Unpack runoff, without halo + !runoff(1:myDim_nod2D)=zrecv(1:myDim_nod2D) !conversion?? + ! + ! Do the halo exchange + !call exchange_nod(runoff) + ! + !11. Interpolate ocean runoff to T grid + ! + !CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, ocerunoff, & + ! & myDim_nod2D, zrecv ) + ! + ! Unpack ocean runoff + ! ?? + + !12. Interpolate total cloud fractions to T grid (tcc) + ! + !13. Interpolate low cloud fractions to T grid (lcc) + + + ! =================================================================== ! + ! STRESSES + + ! OVER OCEAN: + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, taux_oce, & + & myDim_nod2D, zrecv ) + + ! Unpack x stress atm->oce, without halo; then do halo exchange + stress_atmoce_x(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + call exchange_nod(stress_atmoce_x) + + ! + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tauy_oce, & + & myDim_nod2D, zrecv ) + + ! Unpack y stress atm->oce, without halo; then do halo exchange + stress_atmoce_y(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + call exchange_nod(stress_atmoce_y) + + ! =================================================================== ! + ! OVER ICE: + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, taux_ice, & + & myDim_nod2D, zrecv ) + + ! Unpack x stress atm->ice, without halo; then do halo exchange + stress_atmice_x(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + call exchange_nod(stress_atmice_x) + + ! + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tauy_ice, & + & myDim_nod2D, zrecv ) + + ! Unpack y stress atm->ice, without halo; then do halo exchange + stress_atmice_y(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + call exchange_nod(stress_atmice_y) + + + ! =================================================================== ! + ! ROTATE VECTORS FROM GEOGRAPHIC TO FESOMS ROTATED GRID + + !if ((do_rotate_oce_wind .AND. do_rotate_ice_wind) .AND. rotated_grid) then + do n=1, myDim_nod2D+eDim_nod2D + call vector_g2r(stress_atmoce_x(n), stress_atmoce_y(n), coord_nod2D(1, n), coord_nod2D(2, n), 0) !0-flag for rot. coord. + call vector_g2r(stress_atmice_x(n), stress_atmice_y(n), coord_nod2D(1, n), coord_nod2D(2, n), 0) + end do + !do_rotate_oce_wind=.false. + !do_rotate_ice_wind=.false. + !end if + + +#ifdef FESOM_TODO + + ! Packed receive buffer + REAL(wpIFS), DIMENSION((nlei-nldi+1)*(nlej-nldj+1)) :: zrecv + ! Unpacked fields on ORCA grids + REAL(wpIFS), DIMENSION(jpi,jpj) :: zqs___oce, zqs___ice, zqns__oce, zqns__ice + REAL(wpIFS), DIMENSION(jpi,jpj) :: zdqdt_ice, zevap_tot, zevap_ice, zprcp_liq, zprcp_sol + REAL(wpIFS), DIMENSION(jpi,jpj) :: zrunoff, zocerunoff + REAL(wpIFS), DIMENSION(jpi,jpj) :: ztmp, zicefr + ! Arrays for rotation + REAL(wpIFS), DIMENSION(jpi,jpj) :: zuu,zvu,zuv,zvv,zutau,zvtau + ! Lead fraction for both LIM2/LIM3 + REAL(wpIFS), DIMENSION(jpi,jpj) :: zfrld + ! Mask for masking for I grid + REAL(wpIFS) :: zmsksum + ! For summing up LIM3 contributions to ice temperature + REAL(wpIFS) :: zval,zweig + + ! Loop variables + INTEGER :: ji,jj,jk,jl + ! netCDF debugging output variables + CHARACTER(len=128) :: cdoutfile + INTEGER :: inum + REAL(wpIFS) :: zhook_handle ! Dr Hook handle + + IF(lhook) CALL dr_hook('nemogcmcoup_lim2_update',0,zhook_handle) + IF(nn_timing == 1) CALL timing_start('nemogcmcoup_lim2_update') + + ! Allocate the storage data + + IF (.NOT.lallociceflx) THEN + ALLOCATE( & + & zsqns_tot(jpi,jpj), & + & zsqns_ice(jpi,jpj), & + & zsqsr_tot(jpi,jpj), & + & zsqsr_ice(jpi,jpj), & + & zsemp_tot(jpi,jpj), & + & zsemp_ice(jpi,jpj), & + & zsevap_ice(jpi,jpj), & + & zsdqdns_ice(jpi,jpj), & + & zssprecip(jpi,jpj), & + & zstprecip(jpi,jpj), & + & zstcc(jpi,jpj), & + & zslcc(jpi,jpj), & + & zsatmist(jpi,jpj), & + & zsqns_ice_add(jpi,jpj)& + & ) + lallociceflx = .TRUE. + ENDIF + IF (.NOT.lallocstress) THEN + ALLOCATE( & + & zsutau(jpi,jpj), & + & zsvtau(jpi,jpj), & + & zsutau_ice(jpi,jpj), & + & zsvtau_ice(jpi,jpj) & + & ) + lallocstress = .TRUE. + ENDIF + + ! Sort out incoming arrays from the IFS and put them on the ocean grid + + !1. Interpolate ocean solar radiation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qs___oce, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ocean solar radiation + + zqs___oce(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zqs___oce(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !2. Interpolate ice solar radiation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qs___ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ice solar radiation + + zqs___ice(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zqs___ice(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !3. Interpolate ocean non-solar radiation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qns__oce, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ocean non-solar radiation + + zqns__oce(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zqns__oce(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !4. Interpolate ice non-solar radiation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qns__ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ice non-solar radiation + + zqns__ice(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zqns__ice(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !5. Interpolate D(q)/dT to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, dqdt_ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack D(q)/D(T) + + zdqdt_ice(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zdqdt_ice(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !6. Interpolate total evaporation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, evap_tot, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack total evaporation + + zevap_tot(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zevap_tot(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !7. Interpolate evaporation over ice to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, evap_ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack evaporation over ice + + zevap_ice(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zevap_ice(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !8. Interpolate liquid precipitation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, prcp_liq, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack liquid precipitation + + zprcp_liq(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zprcp_liq(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !9. Interpolate solid precipitation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, prcp_sol, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack precipitation over ice + + zprcp_sol(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zprcp_sol(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !10. Interpolate runoff to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, runoff, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack runoff + + zrunoff(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zrunoff(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !11. Interpolate ocean runoff to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, ocerunoff, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ocean runoff + + zocerunoff(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zocerunoff(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !12. Interpolate total cloud fractions to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tcc, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ocean runoff + + zstcc(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zstcc(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !13. Interpolate low cloud fractions to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, lcc, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ocean runoff + + zslcc(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zslcc(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! get sea ice fraction and lead fraction + +#if defined key_lim2 + zfrld(:,:) = frld(:,:) + zicefr(:,:) = 1 - zfrld(:,:) +#else + zicefr(:,:) = 0.0_wpIFS + DO jl = 1, jpl + zicefr(:,:) = zicefr(:,:) + a_i(:,:,jl) + ENDDO + zfrld(:,:) = 1 - zicefr(:,:) +#endif + + zsemp_tot(:,:) = zevap_tot(:,:) - zprcp_liq(:,:) - zprcp_sol(:,:) + zstprecip(:,:) = zprcp_liq(:,:) + zprcp_sol(:,:) + ! More consistent with NEMO, but does changes the results, so + ! we don't do it for now. + ! zsemp_tot(:,:) = zevap_tot(:,:) - zstprecip(:,:) + zsemp_ice(:,:) = zevap_ice(:,:) - zprcp_sol(:,:) + zssprecip(:,:) = - zsemp_ice(:,:) + zsemp_tot(:,:) = zsemp_tot(:,:) - zrunoff(:,:) + zsemp_tot(:,:) = zsemp_tot(:,:) - zocerunoff(:,:) + zsevap_ice(:,:) = zevap_ice(:,:) + + ! non solar heat fluxes ! (qns) + IF (loceicemix) THEN + zsqns_tot(:,:) = zqns__oce(:,:) + ELSE + zsqns_tot(:,:) = zfrld(:,:) * zqns__oce(:,:) + zicefr(:,:) * zqns__ice(:,:) + ENDIF + zsqns_ice(:,:) = zqns__ice(:,:) + ztmp(:,:) = zfrld(:,:) * zprcp_sol(:,:) * lfus ! add the latent heat of solid precip. melting + + zsqns_tot(:,:) = zsqns_tot(:,:) - ztmp(:,:) ! over free ocean + ! solar heat fluxes ! (qsr) + + IF (loceicemix) THEN + zsqsr_tot(:,:) = zqs___oce(:,:) + ELSE + zsqsr_tot(:,:) = zfrld(:,:) * zqs___oce(:,:) + zicefr(:,:) * zqs___ice(:,:) + ENDIF + zsqsr_ice(:,:) = zqs___ice(:,:) + + IF( ln_dm2dc ) THEN ! modify qsr to include the diurnal cycle + zsqsr_tot(:,:) = sbc_dcy( zsqsr_tot(:,:) ) + zsqsr_ice(:,:) = sbc_dcy( zsqsr_ice(:,:) ) + ENDIF + + zsdqdns_ice(:,:) = zdqdt_ice(:,:) + + ! Apply lateral boundary condition + + CALL lbc_lnk(zsqns_tot, 'T', 1.0) + CALL lbc_lnk(zsqns_ice, 'T', 1.0) + CALL lbc_lnk(zsqsr_tot, 'T', 1.0) + CALL lbc_lnk(zsqsr_ice, 'T', 1.0) + CALL lbc_lnk(zsemp_tot, 'T', 1.0) + CALL lbc_lnk(zsemp_ice, 'T', 1.0) + CALL lbc_lnk(zsdqdns_ice, 'T', 1.0) + CALL lbc_lnk(zssprecip, 'T', 1.0) + CALL lbc_lnk(zstprecip, 'T', 1.0) + CALL lbc_lnk(zstcc, 'T', 1.0) + CALL lbc_lnk(zslcc, 'T', 1.0) + + ! Interpolate atmospheric ice temperature to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tice_atm, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack atmospheric ice temperature + + zsatmist(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zsatmist(ji,jj) = zrecv(jk) + ENDDO + ENDDO + CALL lbc_lnk(zsatmist, 'T', 1.0) + + zsqns_ice_add(:,:) = 0.0_wpIFS + + ! Use the dqns_ice filter + + IF (lqnsicefilt) THEN + + ! Add filtr to qns_ice + +#if defined key_lim2 + ztmp(:,:) = tn_ice(:,:,1) +#else + DO jj = nldj, nlej + DO ji = nldi, nlei + zval=0.0 + zweig=0.0 + DO jl = 1, jpl + zval = zval + tn_ice(ji,jj,jl) * a_i(ji,jj,jl) + zweig = zweig + a_i(ji,jj,jl) + ENDDO + IF ( zweig > 0.0 ) THEN + ztmp(ji,jj) = zval /zweig + ELSE + ztmp(ji,jj) = rt0 + ENDIF + ENDDO + ENDDO + CALL lbc_lnk(ztmp, 'T', 1.0) +#endif + + WHERE ( zicefr(:,:) > .001_wpIFS ) + zsqns_ice_add(:,:) = zsdqdns_ice(:,:) * ( ztmp(:,:) - zsatmist(:,:) ) + END WHERE + + zsqns_ice(:,:) = zsqns_ice(:,:) + zsqns_ice_add(:,:) + + ENDIF + + ! Interpolate u-stress to U grid + + CALL parinter_fld( mype, npes, icomm, gausstoU, npoints,taux_oce, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack u stress on U grid + + zuu(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zuu(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Interpolate v-stress to U grid + + CALL parinter_fld( mype, npes, icomm, gausstoU, npoints, tauy_oce, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack v stress on U grid + + zvu(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zvu(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Interpolate u-stress to V grid + + CALL parinter_fld( mype, npes, icomm, gausstoV, npoints,taux_oce, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack u stress on V grid + + zuv(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zuv(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Interpolate v-stress to V grid + + CALL parinter_fld( mype, npes, icomm, gausstoV, npoints, tauy_oce, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack v stress on V grid + + zvv(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zvv(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Rotate stresses from en to ij and put u,v stresses on U,V grids + + CALL repcmo( zuu, zvu, zuv, zvv, zsutau, zsvtau ) + + ! Apply lateral boundary condition on u,v stresses on the U,V grids + + CALL lbc_lnk( zsutau, 'U', -1.0 ) + CALL lbc_lnk( zsvtau, 'V', -1.0 ) + + ! Interpolate ice u-stress to U grid + + CALL parinter_fld( mype, npes, icomm, gausstoU, npoints,taux_ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ice u stress on U grid + + zuu(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zuu(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Interpolate ice v-stress to U grid + + CALL parinter_fld( mype, npes, icomm, gausstoU, npoints, tauy_ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ice v stress on U grid + + zvu(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zvu(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Interpolate ice u-stress to V grid + + CALL parinter_fld( mype, npes, icomm, gausstoV, npoints,taux_ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ice u stress on V grid + + zuv(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zuv(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Interpolate ice v-stress to V grid + + CALL parinter_fld( mype, npes, icomm, gausstoV, npoints, tauy_ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ice v stress on V grid + + zvv(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zvv(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Rotate stresses from en to ij and put u,v stresses on U,V grids + + CALL repcmo( zuu, zvu, zuv, zvv, zutau, zvtau ) + + ! Apply lateral boundary condition on u,v stresses on the U,V grids + + CALL lbc_lnk( zutau, 'U', -1.0 ) + CALL lbc_lnk( zvtau, 'V', -1.0 ) + +#if defined key_lim2_vp + + ! Convert to I grid for LIM2 for key_lim_vp + DO jj = 2, jpjm1 ! (U,V) ==> I + DO ji = 2, jpim1 ! NO vector opt. + zmsksum = umask(ji-1,jj,1) + umask(ji-1,jj-1,1) + zsutau_ice(ji,jj) = ( umask(ji-1,jj,1) * zutau(ji-1,jj) + & + & umask(ji-1,jj-1,1) * zutau(ji-1,jj-1) ) + IF ( zmsksum > 0.0 ) THEN + zsutau_ice(ji,jj) = zsutau_ice(ji,jj) / zmsksum + ENDIF + zmsksum = vmask(ji,jj-1,1) + vmask(ji-1,jj-1,1) + zsvtau_ice(ji,jj) = ( vmask(ji,jj-1,1) * zvtau(ji,jj-1) + & + & vmask(ji-1,jj-1,1) * zvtau(ji-1,jj-1) ) + IF ( zmsksum > 0.0 ) THEN + zsvtau_ice(ji,jj) = zsvtau_ice(ji,jj) / zmsksum + ENDIF + END DO + END DO + +#else + + zsutau_ice(:,:) = zutau(:,:) + zsvtau_ice(:,:) = zvtau(:,:) + +#endif + + CALL lbc_lnk( zsutau_ice, 'I', -1.0 ) + CALL lbc_lnk( zsvtau_ice, 'I', -1.0 ) + + ! Optionally write files write the data on the ORCA grid via IOM. + + IF (ldebug) THEN + WRITE(cdoutfile,'(A,I8.8)') 'zsutau_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsutau' , zsutau ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsvtau_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsvtau' , zsvtau ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsutau_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsutau_ice' , zsutau_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsvtau_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsvtau_ice' , zsvtau_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsqns_tot_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsqns_tot' , zsqns_tot ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsqns_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsqns_ice' , zsqns_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsqsr_tot_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsqsr_tot' , zsqsr_tot ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsqsr_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsqsr_ice' , zsqsr_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsemp_tot_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsemp_tot' , zsemp_tot ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsemp_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsemp_ice' , zsemp_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsdqdns_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsdqdns_ice' , zsdqdns_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zssprecip_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zssprecip' , zssprecip ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zstprecip_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zstprecip' , zstprecip ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsevap_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsevap_ice' , zsevap_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zstcc_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zstcc' , zstcc ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zslcc_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zslcc' , zslcc ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsatmist_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsatmist' , zsatmist ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsqns_ice_add_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsqns_ice_add' , zsqns_ice_add ) + CALL iom_close( inum ) + ENDIF + + IF(nn_timing == 1) CALL timing_stop('nemogcmcoup_lim2_update') + IF(lhook) CALL dr_hook('nemogcmcoup_lim2_update',1,zhook_handle) + +#else + + !FESOM part + !WRITE(0,*)'nemogcmcoup_lim2_update partially implemented. Proceeding...' + !CALL par_ex + +#endif + +END SUBROUTINE nemogcmcoup_lim2_update + + +SUBROUTINE nemogcmcoup_step( istp, icdate, ictime ) + + USE g_clock, only: yearnew, month, day_in_month + USE g_PARSUP, only: mype + USE nemogcmcoup_steps, ONLY : substeps + IMPLICIT NONE + + ! Arguments + + ! Time step + INTEGER, INTENT(IN) :: istp + + ! Data and time from NEMO + INTEGER, INTENT(OUT) :: icdate, ictime + + if(mype==0) then + WRITE(0,*)'! IFS at timestep ', istp, '. Do ', substeps , 'FESOM timesteps...' + endif + CALL main_timestepping(substeps) + + ! Compute date and time at the end of the time step + + icdate = yearnew*10000 + month*100 + day_in_month ! e.g. 20170906 + ictime = 0 ! (time is not used) + + if(mype==0) then + WRITE(0,*)'! FESOM date at end of timestep is ', icdate ,' ======' + endif + +#ifdef FESOM_TODO + iye = ndastp / 10000 + imo = ndastp / 100 - iye * 100 + ida = MOD( ndastp, 100 ) + CALL greg2jul( 0, 0, 0, ida, imo, iye, zjul ) + zjul = zjul + ( nsec_day + 0.5_wpIFS * rdttra(1) ) / 86400.0_wpIFS + CALL jul2greg( iss, imm, ihh, ida, imo, iye, zjul ) + icdate = iye * 10000 + imo * 100 + ida + ictime = ihh * 10000 + imm * 100 + iss +#endif + +END SUBROUTINE nemogcmcoup_step + + +SUBROUTINE nemogcmcoup_final + + USE g_PARSUP, only: mype + + ! Finalize the FESOM model + + IMPLICIT NONE + + if(mype==0) then + WRITE(*,*)'Finalization of FESOM from IFS.' + endif + CALL main_finalize + +END SUBROUTINE nemogcmcoup_final +#endif diff --git a/src/ifs_interface/ifs_notused.F90 b/src/ifs_interface/ifs_notused.F90 index b483bf962..d596169c4 100644 --- a/src/ifs_interface/ifs_notused.F90 +++ b/src/ifs_interface/ifs_notused.F90 @@ -4,6 +4,15 @@ ! ! -Original code by Kristian Mogensen, ECMWF. +SUBROUTINE nemogcmcoup_end_ioserver + +! End the NEMO mppio server + + WRITE(*,*)'No mpp_ioserver used' +! CALL abort + +END SUBROUTINE nemogcmcoup_end_ioserver + SUBROUTINE nemogcmcoup_init_ioserver( icomm, lnemoioserver ) ! Initialize the NEMO mppio server @@ -13,7 +22,7 @@ SUBROUTINE nemogcmcoup_init_ioserver( icomm, lnemoioserver ) LOGICAL :: lnemoioserver WRITE(*,*)'No mpp_ioserver' - CALL abort + !CALL abort END SUBROUTINE nemogcmcoup_init_ioserver diff --git a/src/ifs_modules.F90 b/src/ifs_modules.F90 new file mode 100644 index 000000000..8f52ee153 --- /dev/null +++ b/src/ifs_modules.F90 @@ -0,0 +1,1859 @@ +#if defined (__ifsinterface) +#define __MYFILE__ 'ifs_modules.F90' +#define key_mpp_mpi +! Set of modules needed by the interface to IFS. +! +! -Original code by Kristian Mogensen, ECMWF. + +MODULE par_kind + IMPLICIT NONE + INTEGER, PUBLIC, PARAMETER :: & !: Floating point section + sp = SELECTED_REAL_KIND( 6, 37), & !: single precision (real 4) + dp = SELECTED_REAL_KIND(12,307), & !: double precision (real 8) + wpIFS = SELECTED_REAL_KIND(12,307), & !: double precision (real 8) + ik = SELECTED_INT_KIND(6) !: integer precision +END MODULE par_kind + +MODULE nctools + + ! Utility subroutines for netCDF access + ! Modified : MAB (nf90, handle_error, LINE&FILE) + ! Modifled : KSM (new shorter name) + + USE netcdf + + PUBLIC ldebug_netcdf, nchdlerr + LOGICAL :: ldebug_netcdf = .FALSE. ! Debug switch for netcdf + +CONTAINS + + SUBROUTINE nchdlerr(status,lineno,filename) + + ! Error handler for netCDF access + IMPLICIT NONE + + + INTEGER :: status ! netCDF return status + INTEGER :: lineno ! Line number (usually obtained from + ! preprocessing __LINE__,__MYFILE__) + CHARACTER(len=*),OPTIONAL :: filename + + IF (status/=nf90_noerr) THEN + WRITE(*,*)'Netcdf error, code ',status + IF (PRESENT(filename)) THEN + WRITE(*,*)'In file ',filename,' in line ',lineno + ELSE + WRITE(*,*)'In line ',lineno + END IF + WRITE(*,'(2A)')' Error message : ',nf90_strerror(status) + CALL abort + ENDIF + + END SUBROUTINE nchdlerr + +!---------------------------------------------------------------------- +END MODULE nctools + +MODULE scrippar + INTEGER, PARAMETER :: scripdp = SELECTED_REAL_KIND(12,307) + INTEGER, PARAMETER :: scriplen = 80 +END MODULE scrippar + +MODULE scripgrid + + USE nctools + USE scrippar + + IMPLICIT NONE + + TYPE scripgridtype + INTEGER :: grid_size + INTEGER :: grid_corners + INTEGER :: grid_rank + INTEGER, ALLOCATABLE, DIMENSION(:) :: grid_dims + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: grid_center_lat + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: grid_center_lon + INTEGER, ALLOCATABLE, DIMENSION(:) :: grid_imask + REAL(scripdp), ALLOCATABLE, DIMENSION(:,:) :: grid_corner_lat + REAL(scripdp), ALLOCATABLE, DIMENSION(:,:) :: grid_corner_lon + CHARACTER(len=scriplen) :: grid_center_lat_units + CHARACTER(len=scriplen) :: grid_center_lon_units + CHARACTER(len=scriplen) :: grid_imask_units + CHARACTER(len=scriplen) :: grid_corner_lat_units + CHARACTER(len=scriplen) :: grid_corner_lon_units + CHARACTER(len=scriplen) :: title + END TYPE scripgridtype + +CONTAINS + + SUBROUTINE scripgrid_read( cdfilename, grid ) + + CHARACTER(len=*) :: cdfilename + TYPE(scripgridtype) :: grid + + INTEGER :: ncid, dimid, varid + + CALL scripgrid_init(grid) + + CALL nchdlerr(nf90_open(TRIM(cdfilename),nf90_nowrite,ncid),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'grid_size',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=grid%grid_size),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'grid_corners',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=grid%grid_corners),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'grid_rank',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=grid%grid_rank),& + & __LINE__,__MYFILE__) + + CALL scripgrid_alloc(grid) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_dims',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_center_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_center_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_center_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_center_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_center_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_corner_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_corner_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_corner_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_corner_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_imask',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_imask_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_imask),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'title',grid%title),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_close(ncid),__LINE__,__MYFILE__) + + END SUBROUTINE scripgrid_read + + SUBROUTINE scripgrid_write( cdgridfile, grid ) + + CHARACTER(len=*) :: cdgridfile + TYPE(scripgridtype) :: grid + + INTEGER :: ncid + INTEGER :: ioldfill + INTEGER :: idimsize,idimxsize,idimysize,idimcorners,idimrank + INTEGER :: idims1rank(1),idims1size(1),idims2(2) + INTEGER :: iddims,idcentlat,idcentlon,idimask,idcornlat,idcornlon + INTEGER :: igriddims(2) + + ! Setup netcdf file + + CALL nchdlerr(nf90_create(TRIM(cdgridfile),nf90_clobber,ncid),& + & __LINE__,__MYFILE__) + + ! Define dimensions + + CALL nchdlerr(nf90_def_dim(ncid,'grid_size',& + & grid%grid_size,idimsize),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_def_dim(ncid,'grid_corners',& + & grid%grid_corners,idimcorners),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_def_dim(ncid,'grid_rank',& + & grid%grid_rank,idimrank),& + & __LINE__,__MYFILE__) + + idims1rank(1) = idimrank + + idims1size(1) = idimsize + + idims2(1) = idimcorners + idims2(2) = idimsize + + ! Define variables + + CALL nchdlerr(nf90_def_var(ncid,'grid_dims',& + & nf90_int,idims1rank,iddims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_var(ncid,'grid_center_lat',& + & nf90_double,idims1size,idcentlat),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idcentlat,'units',& + & grid%grid_center_lat_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_var(ncid,'grid_center_lon',& + & nf90_double,idims1size,idcentlon),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idcentlon,'units',& + & grid%grid_center_lon_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_var(ncid,'grid_imask',& + & nf90_int,idims1size,idimask),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idimask,'units',& + & grid%grid_imask_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_var(ncid,'grid_corner_lat',& + & nf90_double,idims2,idcornlat),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idcornlat,'units',& + & grid%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_var(ncid,'grid_corner_lon',& + & nf90_double,idims2,idcornlon),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idcornlon,'units',& + & grid%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'title',& + & TRIM(grid%title)),& + & __LINE__,__MYFILE__) + + ! End of netCDF definition phase + + CALL nchdlerr(nf90_enddef(ncid),__LINE__,__MYFILE__) + + ! Write variables + + + CALL nchdlerr(nf90_put_var(ncid,iddims,grid%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idcentlat,& + & grid%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idcentlon,& + & grid%grid_center_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idimask,& + & grid%grid_imask), & + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idcornlat,& + & grid%grid_corner_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idcornlon,& + & grid%grid_corner_lon),& + & __LINE__,__MYFILE__) + + ! Close file + + CALL nchdlerr(nf90_close(ncid),__LINE__,__MYFILE__) + + END SUBROUTINE scripgrid_write + + SUBROUTINE scripgrid_init( grid ) + + TYPE(scripgridtype) :: grid + + grid%grid_size=0 + grid%grid_corners=0 + grid%grid_rank=0 + grid%grid_center_lat_units='' + grid%grid_center_lon_units='' + grid%grid_imask_units='' + grid%grid_corner_lat_units='' + grid%grid_corner_lon_units='' + grid%title='' + + END SUBROUTINE scripgrid_init + + SUBROUTINE scripgrid_alloc( grid ) + + TYPE(scripgridtype) :: grid + + IF ( (grid%grid_size == 0) .OR. & + & (grid%grid_corners == 0) .OR. & + & (grid%grid_rank == 0) ) THEN + WRITE(*,*)'scripgridtype not initialized' + CALL abort + ENDIF + + ALLOCATE( & + & grid%grid_dims(grid%grid_rank), & + & grid%grid_center_lat(grid%grid_size), & + & grid%grid_center_lon(grid%grid_size), & + & grid%grid_corner_lat(grid%grid_corners, grid%grid_size), & + & grid%grid_corner_lon(grid%grid_corners, grid%grid_size), & + & grid%grid_imask(grid%grid_size) & + & ) + + END SUBROUTINE scripgrid_alloc + + SUBROUTINE scripgrid_dealloc( grid ) + + TYPE(scripgridtype) :: grid + + DEALLOCATE( & + & grid%grid_dims, & + & grid%grid_center_lat, & + & grid%grid_center_lon, & + & grid%grid_corner_lat, & + & grid%grid_corner_lon, & + & grid%grid_imask & + & ) + + END SUBROUTINE scripgrid_dealloc + +END MODULE scripgrid + +MODULE scripremap + +#if defined key_mpp_mpi + USE mpi +#endif + USE nctools + USE scrippar + USE scripgrid + + IMPLICIT NONE + + TYPE scripremaptype + INTEGER :: num_links + INTEGER :: num_wgts + TYPE(scripgridtype) :: src + TYPE(scripgridtype) :: dst + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: src_grid_area + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: dst_grid_area + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: src_grid_frac + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: dst_grid_frac + INTEGER, ALLOCATABLE, DIMENSION(:) :: src_address + INTEGER, ALLOCATABLE, DIMENSION(:) :: dst_address + REAL(scripdp), ALLOCATABLE, DIMENSION(:,:) :: remap_matrix + CHARACTER(len=scriplen) :: src_grid_area_units + CHARACTER(len=scriplen) :: dst_grid_area_units + CHARACTER(len=scriplen) :: src_grid_frac_units + CHARACTER(len=scriplen) :: dst_grid_frac_units + CHARACTER(len=scriplen) :: title + CHARACTER(len=scriplen) :: normalization + CHARACTER(len=scriplen) :: map_method + CHARACTER(len=scriplen) :: history + CHARACTER(len=scriplen) :: conventions + END TYPE scripremaptype + +CONTAINS + + SUBROUTINE scripremap_read_work(cdfilename,remap) + + CHARACTER(len=*) :: cdfilename + TYPE(scripremaptype) :: remap + + INTEGER :: ncid, dimid, varid + LOGICAL :: lcorners + + lcorners=.TRUE. + + CALL scripremap_init(remap) + + CALL nchdlerr(nf90_open(TRIM(cdfilename),nf90_nowrite,ncid),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'src_grid_size',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%src%grid_size),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'dst_grid_size',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%dst%grid_size),& + & __LINE__,__MYFILE__) + + + IF (nf90_inq_dimid(ncid,'src_grid_corners',dimid)==nf90_noerr) THEN + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%src%grid_corners),& + & __LINE__,__MYFILE__) + ELSE + lcorners=.FALSE. + remap%src%grid_corners=1 + ENDIF + + IF (lcorners) THEN + CALL nchdlerr(nf90_inq_dimid(ncid,'dst_grid_corners',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%dst%grid_corners),& + & __LINE__,__MYFILE__) + ELSE + remap%dst%grid_corners=1 + ENDIF + + CALL nchdlerr(nf90_inq_dimid(ncid,'src_grid_rank',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%src%grid_rank),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'dst_grid_rank',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%dst%grid_rank),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'num_links',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%num_links),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'num_wgts',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%num_wgts),& + & __LINE__,__MYFILE__) + + CALL scripremap_alloc(remap) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_dims',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_dims',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_center_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_center_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_center_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_center_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_center_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_center_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_center_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_center_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_center_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_center_lon),& + & __LINE__,__MYFILE__) + + IF (lcorners) THEN + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_corner_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_corner_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_corner_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_corner_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_corner_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_corner_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_corner_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_corner_lon),& + & __LINE__,__MYFILE__) + + ELSE + + remap%src%grid_corner_lat(:,:) = 0.0 + remap%src%grid_corner_lon(:,:) = 0.0 + remap%dst%grid_corner_lat(:,:) = 0.0 + remap%dst%grid_corner_lon(:,:) = 0.0 + remap%src%grid_corner_lat_units = '' + remap%src%grid_corner_lon_units = '' + remap%dst%grid_corner_lat_units = '' + remap%dst%grid_corner_lon_units = '' + + ENDIF + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_imask',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_imask_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_imask),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_imask',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_imask_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_imask),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_area',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src_grid_area_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src_grid_area),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_area',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst_grid_area_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst_grid_area),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_frac',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src_grid_frac_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src_grid_frac),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_frac',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst_grid_frac_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst_grid_frac),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_address',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_address',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'remap_matrix',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%remap_matrix),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'title',remap%title),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'normalization',remap%normalization),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'map_method',remap%map_method),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'history',remap%history),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'conventions',remap%conventions),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'dest_grid',remap%dst%title),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'source_grid',remap%src%title),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_close(ncid),__LINE__,__MYFILE__) + + END SUBROUTINE scripremap_read_work + + SUBROUTINE scripremap_read(cdfilename,remap) + + CHARACTER(len=*) :: cdfilename + TYPE(scripremaptype) :: remap + + CALL scripremap_read_work(cdfilename,remap) + + END SUBROUTINE scripremap_read + + + SUBROUTINE scripremap_read_sgl(cdfilename,remap,& + & mype,nproc,mycomm,linteronly) + + CHARACTER(len=*) :: cdfilename + TYPE(scripremaptype) :: remap + INTEGER :: mype,nproc,mycomm + LOGICAL :: linteronly + + INTEGER, DIMENSION(8) :: isizes + INTEGER :: ierr, ip + + IF (mype==0) THEN + CALL scripremap_read_work(cdfilename,remap) +#if defined key_mpp_mpi + isizes(1)=remap%src%grid_size + isizes(2)=remap%dst%grid_size + isizes(3)=remap%src%grid_corners + isizes(4)=remap%dst%grid_corners + isizes(5)=remap%src%grid_rank + isizes(6)=remap%dst%grid_rank + isizes(7)=remap%num_links + isizes(8)=remap%num_wgts + CALL mpi_bcast( isizes, 8, mpi_integer, 0, mycomm, ierr) + ELSE + CALL mpi_bcast( isizes, 8, mpi_integer, 0, mycomm, ierr) + CALL scripremap_init(remap) + remap%src%grid_size=isizes(1) + remap%dst%grid_size=isizes(2) + remap%src%grid_corners=isizes(3) + remap%dst%grid_corners=isizes(4) + remap%src%grid_rank=isizes(5) + remap%dst%grid_rank=isizes(6) + remap%num_links=isizes(7) + remap%num_wgts=isizes(8) + CALL scripremap_alloc(remap) +#endif + ENDIF + +#if defined key_mpp_mpi + + IF (.NOT.linteronly) THEN + + CALL mpi_bcast( remap%src%grid_dims, remap%src%grid_rank, & + & mpi_integer, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_center_lat, remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_center_lon, remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_corner_lat, remap%src%grid_corners*remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_corner_lon, remap%src%grid_corners*remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + + CALL mpi_bcast( remap%dst%grid_dims, remap%dst%grid_rank, & + & mpi_integer, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_center_lat, remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_center_lon, remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_corner_lat, remap%dst%grid_corners*remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_corner_lon, remap%dst%grid_corners*remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + + CALL mpi_bcast( remap%src_grid_area, remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst_grid_area, remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src_grid_frac, remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst_grid_frac, remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + + CALL mpi_bcast( remap%src%grid_center_lat_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_center_lat_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_center_lon_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_center_lon_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_corner_lat_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_corner_lon_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_corner_lat_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_corner_lon_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_imask_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_imask_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src_grid_area_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst_grid_area_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src_grid_frac_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst_grid_frac_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%title, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%normalization, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%map_method, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%history, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%conventions, scriplen, & + & mpi_character, 0, mycomm, ierr ) + ENDIF + + CALL mpi_bcast( remap%src_address, remap%num_links, & + & mpi_integer, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst_address, remap%num_links, & + & mpi_integer, 0, mycomm, ierr ) + CALL mpi_bcast( remap%remap_matrix, remap%num_wgts*remap%num_links, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_imask, remap%src%grid_size, & + & mpi_integer, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_imask, remap%dst%grid_size, & + & mpi_integer, 0, mycomm, ierr ) + +#endif + END SUBROUTINE scripremap_read_sgl + + SUBROUTINE scripremap_write(cdfilename,remap) + + CHARACTER(len=*) :: cdfilename + TYPE(scripremaptype) :: remap + + INTEGER :: ncid + INTEGER :: dimsgs,dimdgs,dimsgc,dimdgc,dimsgr,dimdgr,dimnl,dimnw + INTEGER :: dims1(1),dims2(2) + INTEGER :: idsgd,iddgd,idsgea,iddgea,idsgeo,iddgeo + INTEGER :: idsgoa,idsgoo,iddgoa,iddgoo,idsgim,iddgim,idsgar,iddgar + INTEGER :: idsgf,iddgf,idsga,iddga,idsa,idda,idrm + + CALL nchdlerr(nf90_create(TRIM(cdfilename),nf90_clobber,ncid), & + & __LINE__, __MYFILE__ ) + + CALL nchdlerr(nf90_def_dim(ncid,'src_grid_size',& + & remap%src%grid_size,dimsgs),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'dst_grid_size',& + & remap%dst%grid_size,dimdgs),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'src_grid_corners',& + & remap%src%grid_corners,dimsgc),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'dst_grid_corners',& + & remap%dst%grid_corners,dimdgc),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'src_grid_rank',& + & remap%src%grid_rank,dimsgr),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'dst_grid_rank',& + & remap%dst%grid_rank,dimdgr),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'num_links',& + & remap%num_links,dimnl),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'num_wgts',& + & remap%num_wgts,dimnw),& + & __LINE__,__MYFILE__) + + dims1(1)=dimsgr + CALL nchdlerr(nf90_def_var(ncid,'src_grid_dims',& + & nf90_int,dims1,idsgd),& + & __LINE__,__MYFILE__) + + dims1(1)=dimdgr + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_dims',& + & nf90_int,dims1,iddgd), & + & __LINE__,__MYFILE__) + + dims1(1)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_center_lat',& + & nf90_double,dims1,idsgea), & + & __LINE__,__MYFILE__) + + dims1(1)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_center_lat',& + & nf90_double,dims1,iddgea), & + & __LINE__,__MYFILE__) + + dims1(1)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_center_lon',& + & nf90_double,dims1,idsgeo), & + & __LINE__,__MYFILE__) + + dims1(1)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_center_lon',& + & nf90_double,dims1,iddgeo), & + & __LINE__,__MYFILE__) + + dims2(1)=dimsgc + dims2(2)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_corner_lat',& + & nf90_double,dims2,idsgoa), & + & __LINE__,__MYFILE__) + + dims2(1)=dimsgc + dims2(2)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_corner_lon',& + & nf90_double,dims2,idsgoo), & + & __LINE__,__MYFILE__) + + dims2(1)=dimdgc + dims2(2)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_corner_lat',& + & nf90_double,dims2,iddgoa), & + & __LINE__,__MYFILE__) + + dims2(1)=dimdgc + dims2(2)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_corner_lon',& + & nf90_double,dims2,iddgoo), & + & __LINE__,__MYFILE__) + + dims1(1)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_imask',& + & nf90_int,dims1,idsgim), & + & __LINE__,__MYFILE__) + + dims1(1)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_imask',& + & nf90_int,dims1,iddgim), & + & __LINE__,__MYFILE__) + + dims1(1)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_area',& + & nf90_double,dims1,idsga), & + & __LINE__,__MYFILE__) + + dims1(1)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_area',& + & nf90_double,dims1,iddga), & + & __LINE__,__MYFILE__) + + dims1(1)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_frac',& + & nf90_double,dims1,idsgf), & + & __LINE__,__MYFILE__) + + dims1(1)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_frac',& + & nf90_double,dims1,iddgf), & + & __LINE__,__MYFILE__) + + dims1(1)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'src_address',& + & nf90_int,dims1,idsa), & + & __LINE__,__MYFILE__) + + dims1(1)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'dst_address',& + & nf90_int,dims1,idda), & + & __LINE__,__MYFILE__) + + dims2(1)=dimnw + dims2(2)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'remap_matrix',& + & nf90_double,dims2,idrm), & + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_att(ncid,idsgea,'units',& + & remap%src%grid_center_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgea,'units',& + & remap%dst%grid_center_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsgeo,'units',& + & remap%src%grid_center_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgeo,'units',& + & remap%dst%grid_center_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsgoa,'units',& + & remap%src%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsgoo,'units',& + & remap%src%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgoa,'units',& + & remap%dst%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgoo,'units',& + & remap%dst%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsgim,'units',& + & remap%src%grid_imask_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgim,'units',& + & remap%dst%grid_imask_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsga,'units',& + & remap%src_grid_area_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddga,'units',& + & remap%dst_grid_area_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsgf,'units',& + & remap%src_grid_frac_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgf,'units',& + & remap%dst_grid_frac_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'title',& + & remap%title),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'normalization',& + & remap%normalization),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'map_method',& + & remap%map_method),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'history',& + & remap%history),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'conventions',& + & remap%conventions),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'dest_grid',& + & remap%dst%title),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'source_grid',& + & remap%src%title),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_enddef(ncid),__LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idsgd,remap%src%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,iddgd,remap%dst%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idsgea,remap%src%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,iddgea,remap%dst%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idsgeo,remap%src%grid_center_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,iddgeo,remap%dst%grid_center_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idsgoa,remap%src%grid_corner_lat),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idsgoo,remap%src%grid_corner_lon),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,iddgoa,remap%dst%grid_corner_lat),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,iddgoo,remap%dst%grid_corner_lon),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idsgim,remap%src%grid_imask),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,iddgim,remap%dst%grid_imask),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idsga,remap%src_grid_area),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,iddga,remap%dst_grid_area),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idsgf,remap%src_grid_frac),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,iddgf,remap%dst_grid_frac),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idsa,remap%src_address),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idda,remap%dst_address),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idrm,remap%remap_matrix),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_close(ncid),__LINE__, __MYFILE__ ) + + END SUBROUTINE scripremap_write + + SUBROUTINE scripremap_init(remap) + + TYPE(scripremaptype) :: remap + + CALL scripgrid_init(remap%src) + CALL scripgrid_init(remap%dst) + remap%num_links = 0 + remap%num_wgts = 0 + remap%title='' + remap%normalization='' + remap%map_method='' + remap%history='' + remap%conventions='' + remap%src_grid_area_units='' + remap%dst_grid_area_units='' + remap%src_grid_frac_units='' + remap%dst_grid_frac_units='' + + END SUBROUTINE scripremap_init + + SUBROUTINE scripremap_alloc(remap) + + TYPE(scripremaptype) :: remap + + IF ( (remap%num_links == 0) .OR. & + & (remap%num_wgts == 0) ) THEN + WRITE(*,*)'scripremaptype not initialized' + CALL abort + ENDIF + + CALL scripgrid_alloc(remap%src) + CALL scripgrid_alloc(remap%dst) + + ALLOCATE( & + & remap%src_grid_area(remap%src%grid_size), & + & remap%dst_grid_area(remap%dst%grid_size), & + & remap%src_grid_frac(remap%src%grid_size), & + & remap%dst_grid_frac(remap%dst%grid_size), & + & remap%src_address(remap%num_links), & + & remap%dst_address(remap%num_links), & + & remap%remap_matrix(remap%num_wgts, remap%num_links) & + & ) + + END SUBROUTINE scripremap_alloc + + SUBROUTINE scripremap_dealloc(remap) + + TYPE(scripremaptype) :: remap + + DEALLOCATE( & + & remap%src_grid_area, & + & remap%dst_grid_area, & + & remap%src_grid_frac, & + & remap%dst_grid_frac, & + & remap%src_address, & + & remap%dst_address, & + & remap%remap_matrix & + & ) + + CALL scripgrid_dealloc(remap%src) + CALL scripgrid_dealloc(remap%dst) + + CALL scripremap_init(remap) + + END SUBROUTINE scripremap_dealloc + +END MODULE scripremap + +MODULE parinter + +#if defined key_mpp_mpi + USE mpi +#endif + USE scripremap + USE scrippar + USE nctools + + IMPLICIT NONE + + ! Type to contains interpolation information + ! (like what is in scripremaptype) and message + ! passing information + + TYPE parinterinfo + ! Number of local links + INTEGER :: num_links + ! Destination side + INTEGER, POINTER, DIMENSION(:) :: dst_address + ! Source addresses and work array + INTEGER, POINTER, DIMENSION(:) :: src_address + ! Local remap matrix + REAL(scripdp), POINTER, DIMENSION(:,:) :: remap_matrix + ! Message passing information + ! Array of local addresses for send buffer + ! packing + INTEGER, POINTER, DIMENSION(:) :: send_address + ! Sending bookkeeping + INTEGER :: nsendtot + INTEGER, POINTER, DIMENSION(:) :: nsend,nsdisp + ! Receiving bookkeeping + INTEGER :: nrecvtot + INTEGER, POINTER, DIMENSION(:) :: nrecv,nrdisp + END TYPE parinterinfo + +CONTAINS + + SUBROUTINE parinter_init( mype, nproc, mpi_comm, & + & nsrclocpoints, nsrcglopoints, srcmask, srcgloind, & + & ndstlocpoints, ndstglopoints, dstmask, dstgloind, & + & remap, pinfo, lcommout, commoutprefix, iunit ) + + ! Setup interpolation based on SCRIP format weights in + ! remap and the source/destination grids information. + + ! Procedure: + + ! 1) A global SCRIP remapping file is read on all processors. + ! 2) Find local destination points in the global grid. + ! 3) Find which processor needs source data and setup buffer + ! information for sending data. + ! 4) Construct new src remapping for buffer received + + ! All information is stored in the TYPE(parinterinfo) output + ! data type + + ! Input arguments. + + ! Message passing information + INTEGER, INTENT(IN) :: mype, nproc, mpi_comm + ! Source grid local and global number of grid points + INTEGER, INTENT(IN) :: nsrclocpoints, nsrcglopoints + ! Source integer mask (0/1) for SCRIP compliance + INTEGER, INTENT(IN), DIMENSION(nsrclocpoints) :: srcmask + ! Source global addresses of each local grid point + INTEGER, INTENT(IN), DIMENSION(nsrclocpoints) :: srcgloind + ! Destination grid local and global number of grid points + INTEGER, INTENT(IN) :: ndstlocpoints, ndstglopoints + ! Destination integer mask (0/1) for SCRIP compliance + INTEGER, INTENT(IN), DIMENSION(ndstlocpoints) :: dstmask + ! Destination global addresses of each local grid point + INTEGER, INTENT(IN), DIMENSION(ndstlocpoints) :: dstgloind + ! SCRIP remapping data + TYPE(scripremaptype) :: remap + ! Switch for output communication patterns + LOGICAL :: lcommout + CHARACTER(len=*) :: commoutprefix + ! Unit to use for output + INTEGER :: iunit + + ! Output arguments + + ! Interpolation and message passing information + TYPE(parinterinfo), INTENT(OUT) :: pinfo + + ! Local variable + + ! Variable for glocal <-> local address/pe information + INTEGER, DIMENSION(nsrcglopoints) :: ilsrcmppmap, ilsrclocind + INTEGER, DIMENSION(nsrcglopoints) :: igsrcmppmap, igsrclocind + INTEGER, DIMENSION(ndstglopoints) :: ildstmppmap, ildstlocind + INTEGER, DIMENSION(ndstglopoints) :: igdstmppmap, igdstlocind + INTEGER, DIMENSION(nsrcglopoints) :: isrcpe,isrcpetmp + INTEGER, DIMENSION(nsrcglopoints) :: isrcaddtmp + INTEGER, DIMENSION(0:nproc-1) :: isrcoffset + INTEGER, DIMENSION(nproc) :: isrcno, isrcoff, isrccur + INTEGER, DIMENSION(nproc) :: ircvoff, ircvcur + INTEGER, DIMENSION(:), ALLOCATABLE :: isrctot, ircvtot + + ! Misc variable + INTEGER :: i,n,pe + INTEGER :: istatus + CHARACTER(len=256) :: cdfile + + ! Check that masks are consistent. + + ! Remark: More consistency tests between remapping information + ! and input argument could be code, but for now we settle + ! for checking the masks. + + ! Source grid + + DO i=1,nsrclocpoints + IF (srcmask(i)/=remap%src%grid_imask(srcgloind(i))) THEN + WRITE(iunit,*)'Source imask is inconsistent at ' + WRITE(iunit,*)'global index = ',srcgloind(i) + WRITE(iunit,*)'Source mask = ',srcmask(i) + WRITE(iunit,*)'Remap mask = ',remap%src%grid_imask(srcgloind(i)) + WRITE(iunit,*)'Latitude = ',remap%src%grid_center_lat(srcgloind(i)) + WRITE(iunit,*)'Longitude = ',remap%src%grid_center_lon(srcgloind(i)) + CALL flush(iunit) + CALL abort + ENDIF + ENDDO + + ! Destination grid + + DO i=1,ndstlocpoints + IF (dstmask(i)/=remap%dst%grid_imask(dstgloind(i))) THEN + WRITE(iunit,*)'Destination imask is inconsistent at ' + WRITE(iunit,*)'global index = ',dstgloind(i) + WRITE(iunit,*)'Destin mask = ',dstmask(i) + WRITE(iunit,*)'Remap mask = ',remap%dst%grid_imask(dstgloind(i)) + WRITE(iunit,*)'Latitude = ',remap%dst%grid_center_lat(dstgloind(i)) + WRITE(iunit,*)'Longitude = ',remap%dst%grid_center_lon(dstgloind(i)) + CALL flush(iunit) + CALL abort + ENDIF + ENDDO + + ! Setup global to local and vice versa mappings. + + ilsrcmppmap(:)=-1 + ilsrclocind(:)=0 + ildstmppmap(:)=-1 + ildstlocind(:)=0 + + DO i=1,nsrclocpoints + ilsrcmppmap(srcgloind(i))=mype + ilsrclocind(srcgloind(i))=i + ENDDO + + DO i=1,ndstlocpoints + ildstmppmap(dstgloind(i))=mype + ildstlocind(dstgloind(i))=i + ENDDO + +#if defined key_mpp_mpi + CALL mpi_allreduce(ilsrcmppmap,igsrcmppmap,nsrcglopoints, & + & mpi_integer,mpi_max,mpi_comm,istatus) + CALL mpi_allreduce(ilsrclocind,igsrclocind,nsrcglopoints, & + & mpi_integer,mpi_max,mpi_comm,istatus) + CALL mpi_allreduce(ildstmppmap,igdstmppmap,ndstglopoints, & + & mpi_integer,mpi_max,mpi_comm,istatus) + CALL mpi_allreduce(ildstlocind,igdstlocind,ndstglopoints, & + & mpi_integer,mpi_max,mpi_comm,istatus) +#else + igsrcmppmap(:)=ilsrcmppmap(:) + igsrclocind(:)=ilsrclocind(:) + igdstmppmap(:)=ildstmppmap(:) + igdstlocind(:)=ildstlocind(:) +#endif + + ! Optionally construct an ascii file listing what src and + ! dest points belongs to which task + + ! Since igsrcmppmap and igdstmppmap are global data only do + ! this for mype==0. + + IF (lcommout.AND.(mype==0)) THEN + WRITE(cdfile,'(A,I4.4,A)')commoutprefix//'_srcmppmap_',mype+1,'.dat' + OPEN(9,file=cdfile) + DO i=1,nsrcglopoints + WRITE(9,*)remap%src%grid_center_lat(i),& + & remap%src%grid_center_lon(i), & + & igsrcmppmap(i)+1,remap%src%grid_imask(i) + ENDDO + CLOSE(9) + WRITE(cdfile,'(A,I4.4,A)')commoutprefix//'_dstmppmap_',mype+1,'.dat' + OPEN(9,file=cdfile) + DO i=1,ndstglopoints + WRITE(9,*)remap%dst%grid_center_lat(i),& + & remap%dst%grid_center_lon(i), & + & igdstmppmap(i)+1,remap%dst%grid_imask(i) + ENDDO + CLOSE(9) + ENDIF + + ! + ! Standard interpolation in serial case is + ! + ! DO n=1,remap%num_links + ! zdst(remap%dst_address(n)) = zdst(remap%dst_address(n)) + & + ! & remap%remap_matrix(1,n)*zsrc(remap%src_address(n)) + ! END DO + ! + + ! In parallel we need to first find local number of links + + pinfo%num_links=0 + DO i=1,remap%num_links + IF (igdstmppmap(remap%dst_address(i))==mype) & + & pinfo%num_links=pinfo%num_links+1 + ENDDO + ALLOCATE(pinfo%dst_address(pinfo%num_links),& + & pinfo%src_address(pinfo%num_links),& + & pinfo%remap_matrix(1,pinfo%num_links)) + + ! Get local destination addresses + + n=0 + DO i=1,remap%num_links + IF (igdstmppmap(remap%dst_address(i))==mype) THEN + n=n+1 + pinfo%dst_address(n)=& + & igdstlocind(remap%dst_address(i)) + pinfo%remap_matrix(:,n)=& + & remap%remap_matrix(:,i) + ENDIF + ENDDO + + ! Get sending processors maps. + + ! The same data point might need to be sent to many processors + ! so first construct a map for processors needing the data + + isrcpe(:)=-1 + DO i=1,remap%num_links + IF (igdstmppmap(remap%dst_address(i))==mype) THEN + isrcpe(remap%src_address(i))=& + & igsrcmppmap(remap%src_address(i)) + ENDIF + ENDDO + + ! Optionally write a set if ascii file listing which tasks + ! mype needs to send to communicate with + + IF (lcommout) THEN + ! Destination processors + WRITE(cdfile,'(A,I4.4,A)')commoutprefix//'_dsts_',mype+1,'.dat' + OPEN(9,file=cdfile) + DO pe=0,nproc-1 + IF (pe==mype) THEN + isrcpetmp(:)=isrcpe(:) + ENDIF +#if defined key_mpp_mpi + CALL mpi_bcast(isrcpetmp,nsrcglopoints,mpi_integer,pe,mpi_comm,istatus) +#endif + DO i=1,nsrcglopoints + IF (isrcpetmp(i)==mype) THEN + WRITE(9,*)remap%src%grid_center_lat(i),& + & remap%src%grid_center_lon(i), & + & pe+1,mype+1 + ENDIF + ENDDO + ENDDO + CLOSE(9) + ENDIF + + ! Get number of points to send to each processor + + ALLOCATE(pinfo%nsend(0:nproc-1)) + isrcno(:)=0 + DO i=1,nsrcglopoints + IF (isrcpe(i)>=0) THEN + isrcno(isrcpe(i)+1)=isrcno(isrcpe(i)+1)+1 + ENDIF + ENDDO +#if defined key_mpp_mpi + CALL mpi_alltoall(isrcno,1,mpi_integer, & + & pinfo%nsend(0:nproc-1),1,mpi_integer, & + & mpi_comm,istatus) +#else + pinfo%nsend(0:nproc-1) = isrcno(1:nproc) +#endif + pinfo%nsendtot=SUM(pinfo%nsend(0:nproc-1)) + + ! Construct sending buffer mapping. Data is mapping in + ! processor order. + + ALLOCATE(pinfo%send_address(pinfo%nsendtot)) + + ! Temporary arrays for mpi all to all. + + ALLOCATE(isrctot(SUM(isrcno(1:nproc)))) + ALLOCATE(ircvtot(SUM(pinfo%nsend(0:nproc-1)))) + + ! Offset for message parsing + + isrcoff(1)=0 + ircvoff(1)=0 + DO i=1,nproc-1 + isrcoff(i+1) = isrcoff(i) + isrcno(i) + ircvoff(i+1) = pinfo%nsend(i-1) + ircvoff(i) + ENDDO + + ! Pack indices i into a buffer + + isrccur(:)=0 + DO i=1,nsrcglopoints + IF (isrcpe(i)>=0) THEN + isrccur(isrcpe(i)+1)=isrccur(isrcpe(i)+1)+1 + isrctot(isrccur(isrcpe(i)+1)+isrcoff(isrcpe(i)+1)) = i + ENDIF + ENDDO + + ! Send the data + +#if defined key_mpp_mpi + CALL mpi_alltoallv(& + & isrctot,isrccur,isrcoff,mpi_integer, & + & ircvtot,pinfo%nsend(0:nproc-1),ircvoff,mpi_integer, & + & mpi_comm,istatus) +#else + ircvtot(:)=isrctot(:) +#endif + + ! Get the send address. ircvtot will at this point contain the + ! addresses in the global index needed for message passing + + DO i=1,pinfo%nsendtot + pinfo%send_address(i)=igsrclocind(ircvtot(i)) + ENDDO + + ! Deallocate the mpi all to all arrays + + DEALLOCATE(ircvtot,isrctot) + + ! Get number of points to receive to each processor + + ALLOCATE(pinfo%nrecv(0:nproc-1)) + pinfo%nrecv(0:nproc-1)=0 + DO i=1,nsrcglopoints + IF (isrcpe(i)>=0 .AND. isrcpe(i)=0 .AND. isrcpe(i)0) THEN + CALL nchdlerr(nf90_def_dim(ncid,'num_links',& + & pinfo%num_links,dimnl),& + & __LINE__,__MYFILE__) + ENDIF + + CALL nchdlerr(nf90_def_dim(ncid,'num_wgts',& + & 1,dimnw),& + & __LINE__,__MYFILE__) + + IF (pinfo%nsendtot>0) THEN + CALL nchdlerr(nf90_def_dim(ncid,'nsendtot',& + & pinfo%nsendtot,dimnst),& + & __LINE__,__MYFILE__) + ENDIF + + IF (pinfo%nrecvtot>0) THEN + CALL nchdlerr(nf90_def_dim(ncid,'nrecvtot',& + & pinfo%nrecvtot,dimnrt),& + & __LINE__,__MYFILE__) + ENDIF + + CALL nchdlerr(nf90_def_dim(ncid,'nproc',& + & nproc,dimnpr),& + & __LINE__,__MYFILE__) + + IF (pinfo%num_links>0) THEN + + dims1(1)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'dst_address',& + & nf90_int,dims1,idda),& + & __LINE__,__MYFILE__) + + dims1(1)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'src_address',& + & nf90_int,dims1,idsa),& + & __LINE__,__MYFILE__) + + dims2(1)=dimnw + dims2(2)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'remap_matrix',& + & nf90_double,dims2,idrm),& + & __LINE__,__MYFILE__) + + ENDIF + + dims1(1)=dimnpr + CALL nchdlerr(nf90_def_var(ncid,'nsend',& + & nf90_int,dims1,idns),& + & __LINE__,__MYFILE__) + + IF (pinfo%nsendtot>0) THEN + + dims1(1)=dimnst + CALL nchdlerr(nf90_def_var(ncid,'send_address',& + & nf90_int,dims1,idsaa),& + & __LINE__,__MYFILE__) + + ENDIF + + dims1(1)=dimnpr + CALL nchdlerr(nf90_def_var(ncid,'nrecv',& + & nf90_int,dims1,idnr),& + & __LINE__,__MYFILE__) + + dims1(1)=dimnpr + CALL nchdlerr(nf90_def_var(ncid,'nsdisp',& + & nf90_int,dims1,idnsp),& + & __LINE__,__MYFILE__) + + dims1(1)=dimnpr + CALL nchdlerr(nf90_def_var(ncid,'nrdisp',& + & nf90_int,dims1,idnrp),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_enddef(ncid),__LINE__,__MYFILE__) + + + IF (pinfo%num_links>0) THEN + + CALL nchdlerr(nf90_put_var(ncid,idda,pinfo%dst_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idsa,pinfo%src_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idrm,pinfo%remap_matrix),& + & __LINE__,__MYFILE__) + + ENDIF + + CALL nchdlerr(nf90_put_var(ncid,idns,pinfo%nsend(0:nproc-1)),& + & __LINE__,__MYFILE__) + + IF (pinfo%nsendtot>0) THEN + + CALL nchdlerr(nf90_put_var(ncid,idsaa,pinfo%send_address),& + & __LINE__,__MYFILE__) + + ENDIF + + CALL nchdlerr(nf90_put_var(ncid,idnr,pinfo%nrecv(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idnsp,pinfo%nsdisp(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idnrp,pinfo%nrdisp(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_close(ncid),__LINE__, __MYFILE__ ) + + END SUBROUTINE parinter_write + + SUBROUTINE parinter_read( mype, nproc, & + & nsrcglopoints, ndstglopoints, & + & pinfo, cdpath, cdprefix, lexists ) + + ! Write pinfo information in a netCDF file in order to + ! be able to read it rather than calling parinter_init + + ! Input arguments. + + ! Message passing information + INTEGER, INTENT(IN) :: mype, nproc + ! Source grid local global number of grid points + INTEGER, INTENT(IN) :: nsrcglopoints + ! Destination grid global number of grid points + INTEGER, INTENT(IN) :: ndstglopoints + ! Interpolation and message passing information + TYPE(parinterinfo), INTENT(OUT) :: pinfo + ! Does the information exists + LOGICAL :: lexists + ! Path and file prefix + CHARACTER(len=*) :: cdpath, cdprefix + + ! Local variable + + ! Misc variable + CHARACTER(len=1024) :: cdfile + INTEGER :: ncid, dimid, varid, num_wgts + + WRITE(cdfile,'(A,2(I8.8,A),2(I4.4,A),A)') & + & TRIM(cdpath)//'/'//TRIM(cdprefix)//'_', & + & nsrcglopoints,'_',ndstglopoints,'_',mype,'_',nproc,'.nc' + + + lexists=nf90_open(TRIM(cdfile),nf90_nowrite,ncid)==nf90_noerr + + IF (lexists) THEN + + ! If num_links is not present we assume it to be zero. + + IF (nf90_inq_dimid(ncid,'num_links',dimid)==nf90_noerr) THEN + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=pinfo%num_links),& + & __LINE__,__MYFILE__) + ELSE + pinfo%num_links=0 + ENDIF + + CALL nchdlerr(nf90_inq_dimid(ncid,'num_wgts',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=num_wgts),& + & __LINE__,__MYFILE__) + IF (num_wgts/=1) THEN + WRITE(0,*)'parinter_read: num_wgts has to be 1 for now' + CALL abort + ENDIF + + ! If nsendtot is not present we assume it to be zero. + + IF (nf90_inq_dimid(ncid,'nsendtot',dimid)==nf90_noerr) THEN + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=pinfo%nsendtot),& + & __LINE__,__MYFILE__) + ELSE + pinfo%nsendtot=0 + ENDIF + + IF(nf90_inq_dimid(ncid,'nrecvtot',dimid)==nf90_noerr) THEN + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=pinfo%nrecvtot),& + & __LINE__,__MYFILE__) + ELSE + pinfo%nrecvtot=0 + ENDIF + + ALLOCATE(pinfo%dst_address(pinfo%num_links),& + & pinfo%src_address(pinfo%num_links),& + & pinfo%remap_matrix(num_wgts,pinfo%num_links),& + & pinfo%nsend(0:nproc-1),& + & pinfo%send_address(pinfo%nsendtot),& + & pinfo%nrecv(0:nproc-1),& + & pinfo%nsdisp(0:nproc-1),& + & pinfo%nrdisp(0:nproc-1)) + + IF (pinfo%num_links>0) THEN + CALL nchdlerr(nf90_inq_varid(ncid,'dst_address',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%dst_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_address',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%src_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'remap_matrix',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%remap_matrix),& + & __LINE__,__MYFILE__) + ENDIF + + CALL nchdlerr(nf90_inq_varid(ncid,'nsend',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nsend(0:nproc-1)),& + & __LINE__,__MYFILE__) + + IF (pinfo%nsendtot>0) THEN + + CALL nchdlerr(nf90_inq_varid(ncid,'send_address',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%send_address),& + & __LINE__,__MYFILE__) + + ENDIF + + CALL nchdlerr(nf90_inq_varid(ncid,'nrecv',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nrecv(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'nsdisp',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nsdisp(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'nrdisp',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nrdisp(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_close(ncid),__LINE__, __MYFILE__ ) + + ENDIF + + END SUBROUTINE parinter_read + +END MODULE parinter + +MODULE interinfo + + ! Parallel regridding information + + USE parinter + + IMPLICIT NONE + + SAVE + + ! IFS to NEMO + + TYPE(parinterinfo) :: gausstoT,gausstoUV + + ! NEMO to IFS + + TYPE(parinterinfo) :: Ttogauss, UVtogauss + + ! Read parinterinfo on task 0 only and broadcast. + + LOGICAL :: lparbcast = .FALSE. + +END MODULE interinfo +#endif diff --git a/src/ifs_notused.F90 b/src/ifs_notused.F90 new file mode 100644 index 000000000..b483bf962 --- /dev/null +++ b/src/ifs_notused.F90 @@ -0,0 +1,362 @@ +#if defined (__ifsinterface) +! Routines usually provided by the library that are currently +! not implemented for FESOM2. +! +! -Original code by Kristian Mogensen, ECMWF. + +SUBROUTINE nemogcmcoup_init_ioserver( icomm, lnemoioserver ) + + ! Initialize the NEMO mppio server + + IMPLICIT NONE + INTEGER :: icomm + LOGICAL :: lnemoioserver + + WRITE(*,*)'No mpp_ioserver' + CALL abort + +END SUBROUTINE nemogcmcoup_init_ioserver + + +SUBROUTINE nemogcmcoup_init_ioserver_2( icomm ) + + ! Initialize the NEMO mppio server + + IMPLICIT NONE + INTEGER :: icomm + + WRITE(*,*)'No mpp_ioserver' + CALL abort + +END SUBROUTINE nemogcmcoup_init_ioserver_2 + + +SUBROUTINE nemogcmcoup_mlflds_get( mype, npes, icomm, & + & nlev, nopoints, pgt3d, pgs3d, pgu3d, pgv3d ) + + ! Interpolate sst, ice: surf T; albedo; concentration; thickness, + ! snow thickness and currents from the ORCA grid to the Gaussian grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + USE par_kind + IMPLICIT NONE + + ! Arguments + REAL(wpIFS), DIMENSION(nopoints,nlev) :: pgt3d, pgs3d, pgu3d, pgv3d + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + ! Number Gaussian grid points + INTEGER, INTENT(IN) :: nopoints,nlev + + ! Local variables + + WRITE(0,*)'nemogcmcoup_mlflds_get should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_mlflds_get + + +SUBROUTINE nemogcmcoup_get( mype, npes, icomm, & + & nopoints, pgsst, pgice, pgucur, pgvcur ) + + ! Interpolate sst, ice and currents from the ORCA grid + ! to the Gaussian grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + USE par_kind + + IMPLICIT NONE + + + ! Arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + ! Number Gaussian grid points + INTEGER, INTENT(IN) :: nopoints + ! Local arrays of sst, ice and currents + REAL(wpIFS), DIMENSION(nopoints) :: pgsst, pgice, pgucur, pgvcur + + ! Local variables + + WRITE(0,*)'nemogcmcoup_get should not be called with FESOM' + CALL abort + +END SUBROUTINE nemogcmcoup_get + + +SUBROUTINE nemogcmcoup_exflds_get( mype, npes, icomm, & + & nopoints, pgssh, pgmld, pg20d, pgsss, & + & pgtem300, pgsal300 ) + + ! Interpolate sst, ice: surf T; albedo; concentration; thickness, + ! snow thickness and currents from the ORCA grid to the Gaussian grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + USE par_kind + IMPLICIT NONE + + ! Arguments + REAL(wpIFS), DIMENSION(nopoints) :: pgssh, pgmld, pg20d, pgsss, & + & pgtem300, pgsal300 + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + ! Number Gaussian grid points + INTEGER, INTENT(IN) :: nopoints + + ! Local variables + + WRITE(0,*)'nemogcmcoup_exflds_get should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_exflds_get + + +SUBROUTINE nemogcmcoup_get_1way( mype, npes, icomm ) + + ! Interpolate sst, ice and currents from the ORCA grid + ! to the Gaussian grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + IMPLICIT NONE + + + ! Arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + + ! Local variables + + WRITE(0,*)'nemogcmcoup_get_1way should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_get_1way + + +SUBROUTINE nemogcmcoup_mlinit( mype, npes, icomm, & + & nlev, nopoints, pdep, pmask ) + + ! Get information about the vertical discretization of the ocean model + + ! nlevs are maximum levels on input and actual number levels on output + + USE par_kind + + IMPLICIT NONE + + ! Input arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Grid information + INTEGER, INTENT(INOUT) :: nlev, nopoints + REAL(wpIFS), INTENT(OUT), DIMENSION(nlev) :: pdep + REAL(wpIFS), INTENT(OUT), DIMENSION(nopoints,nlev) :: pmask + + ! Local variables + + ! dummy argument with explicit INTENT(OUT) declaration needs an explicit value + pdep=0. + pmask=0. + + WRITE(0,*)'nemogcmcoup_mlinit should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_mlinit + + +SUBROUTINE nemogcmcoup_update( mype, npes, icomm, & + & npoints, pgutau, pgvtau, & + & pgqsr, pgqns, pgemp, kt, ldebug ) + + ! Update fluxes in nemogcmcoup_data by parallel + ! interpolation of the input gaussian grid data + + USE par_kind + + IMPLICIT NONE + + ! Arguments + + ! MPI communications + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Fluxes on the Gaussian grid. + INTEGER, INTENT(IN) :: npoints + REAL(wpIFS), DIMENSION(npoints), intent(IN) :: & + & pgutau, pgvtau, pgqsr, pgqns, pgemp + ! Current time step + INTEGER, INTENT(in) :: kt + ! Write debugging fields in netCDF + LOGICAL, INTENT(IN) :: ldebug + + ! Local variables + + WRITE(0,*)'nemogcmcoup_update should be called with with.' + CALL abort + +END SUBROUTINE nemogcmcoup_update + +SUBROUTINE nemogcmcoup_update_add( mype, npes, icomm, & + & npoints, pgsst, pgtsk, kt, ldebug ) + + ! Update addetiona in nemogcmcoup_data by parallel + ! interpolation of the input gaussian grid data + + USE par_kind + + IMPLICIT NONE + + ! Arguments + + ! MPI communications + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Input on the Gaussian grid. + INTEGER, INTENT(IN) :: npoints + REAL(wpIFS), DIMENSION(npoints), intent(IN) :: & + & pgsst, pgtsk + ! Current time step + INTEGER, INTENT(in) :: kt + ! Write debugging fields in netCDF + LOGICAL, INTENT(IN) :: ldebug + + ! Local variables + + WRITE(0,*)'nemogcmcoup_update_add should not be called when coupling to fesom. Commented ABORT. Proceeding...' + !CALL abort + + +END SUBROUTINE nemogcmcoup_update_add + + +SUBROUTINE nemogcmcoup_wam_coupinit( mype, npes, icomm, & + & nlocpoints, nglopoints, & + & nlocmsk, ngloind, iunit ) + + ! Initialize single executable coupling between WAM and NEMO + ! This is called from WAM. + + IMPLICIT NONE + + ! Input arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype,npes,icomm + ! WAM grid information + ! Number of local and global points + INTEGER, INTENT(IN) :: nlocpoints, nglopoints + ! Integer mask and global indices + INTEGER, DIMENSION(nlocpoints), INTENT(IN) :: nlocmsk, ngloind + ! Unit for output in parinter_init + INTEGER :: iunit + + WRITE(0,*)'Wam coupling not implemented for FESOM' + CALL abort + +END SUBROUTINE nemogcmcoup_wam_coupinit + + +SUBROUTINE nemogcmcoup_wam_get( mype, npes, icomm, & + & nopoints, pwsst, pwicecov, pwicethk, & + & pwucur, pwvcur, licethk ) + + ! Interpolate from the ORCA grid + ! to the WAM grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + USE par_kind + IMPLICIT NONE + + ! Arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + ! Number WAM grid points + INTEGER, INTENT(IN) :: nopoints + ! Local arrays of sst, ice cover, ice thickness and currents + REAL(wpIFS), DIMENSION(nopoints) :: pwsst, pwicecov, pwicethk, pwucur, pwvcur + LOGICAL :: licethk + + ! Local variables + + WRITE(0,*)'nemogcmcoup_wam_get should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_wam_get + + +SUBROUTINE nemogcmcoup_wam_update( mype, npes, icomm, & + & npoints, pwswh, pwmwp, & + & pwphioc, pwtauoc, pwstrn, & + & pwustokes, pwvstokes, & + & cdtpro, ldebug ) + + ! Update fluxes in nemogcmcoup_data by parallel + ! interpolation of the input WAM grid data + + USE par_kind + + IMPLICIT NONE + + ! Arguments + + ! MPI communications + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Data on the WAM grid. + INTEGER, INTENT(IN) :: npoints + REAL(wpIFS), DIMENSION(npoints), INTENT(IN) :: & + & pwswh, pwmwp, pwphioc, pwtauoc, pwstrn, pwustokes, pwvstokes + ! Current time + CHARACTER(len=14), INTENT(IN) :: cdtpro + ! Write debugging fields in netCDF + LOGICAL, INTENT(IN) :: ldebug + + ! Local variables + + WRITE(0,*)'nemogcmcoup_wam_update should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_wam_update + + +SUBROUTINE nemogcmcoup_wam_update_stress( mype, npes, icomm, npoints, & + & pwutau, pwvtau, pwuv10n, pwphif,& + & cdtpro, ldebug ) + + ! Update stresses in nemogcmcoup_data by parallel + ! interpolation of the input WAM grid data + + USE par_kind + + IMPLICIT NONE + + ! Arguments + + ! MPI communications + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Data on the WAM grid. + INTEGER, INTENT(IN) :: npoints + REAL(wpIFS), DIMENSION(npoints), INTENT(IN) :: & + & pwutau, pwvtau, pwuv10n, pwphif + ! Current time step + CHARACTER(len=14), INTENT(IN) :: cdtpro + ! Write debugging fields in netCDF + LOGICAL, INTENT(IN) :: ldebug + + ! Local variables + + WRITE(0,*)'nemogcmcoup_wam_update_stress should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_wam_update_stress +#endif From 151189539eba176315d9658dc2665189a2da658e Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 11 Nov 2021 11:07:35 +0100 Subject: [PATCH 546/909] remove files before overwriting with identical ones (ifs_notused without nemogcmcoup_end_ioserver) from another branch to keep the git history --- src/ifs_interface/ifs_interface.F90 | 1506 ---------------------- src/ifs_interface/ifs_modules.F90 | 1859 --------------------------- src/ifs_interface/ifs_notused.F90 | 371 ------ 3 files changed, 3736 deletions(-) delete mode 100644 src/ifs_interface/ifs_interface.F90 delete mode 100644 src/ifs_interface/ifs_modules.F90 delete mode 100644 src/ifs_interface/ifs_notused.F90 diff --git a/src/ifs_interface/ifs_interface.F90 b/src/ifs_interface/ifs_interface.F90 deleted file mode 100644 index 4467dfa9a..000000000 --- a/src/ifs_interface/ifs_interface.F90 +++ /dev/null @@ -1,1506 +0,0 @@ -#if defined (__ifsinterface) -!===================================================== -! IFS interface for calling FESOM2 as a subroutine. -! -! -Original code for NEMO by Kristian Mogensen, ECMWF. -! -Adapted to FESOM2 by Thomas Rackow, AWI, 2018. -!----------------------------------------------------- - -MODULE nemogcmcoup_steps - INTEGER :: substeps !per IFS timestep -END MODULE nemogcmcoup_steps - -SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & - & lwaveonly, iatmunit, lwrite ) - - ! Initialize the FESOM model for single executable coupling - - USE par_kind !in ifs_modules.F90 - USE g_PARSUP, only: MPI_COMM_FESOM, mype - USE g_config, only: dt - USE g_clock, only: timenew, daynew, yearnew, month, day_in_month - USE nemogcmcoup_steps, ONLY : substeps - - IMPLICIT NONE - - ! Input arguments - - ! Message passing information - INTEGER, INTENT(IN) :: icomm - ! Initial date (e.g. 20170906), time, initial timestep and final time step - INTEGER, INTENT(OUT) :: inidate, initime, itini, itend - ! Length of the time step - REAL(wpIFS), INTENT(OUT) :: zstp - - ! inherited from interface to NEMO, not used here: - ! Coupling to waves only - LOGICAL, INTENT(IN) :: lwaveonly - ! Logfile unit (used if >=0) - INTEGER :: iatmunit - ! Write to this unit - LOGICAL :: lwrite - ! FESOM might perform substeps - INTEGER :: itend_fesom - INTEGER :: i - NAMELIST/namfesomstep/substeps - - ! TODO hard-coded here, put in namelist - substeps=2 - OPEN(9,file='namfesomstep.in') - READ(9,namfesomstep) - CLOSE(9) - - MPI_COMM_FESOM=icomm - itini = 1 - CALL main_initialize(itend_fesom) !also sets mype and npes - itend=itend_fesom/substeps - if(mype==0) then - WRITE(0,*)'!======================================' - WRITE(0,*)'! FESOM is initialized from within IFS.' - WRITE(0,*)'! get MPI_COMM_FESOM. =================' - WRITE(0,*)'! main_initialize done. ===============' - endif - - ! Set more information for the caller - - ! initial date and time (time is not used) - inidate = yearnew*10000 + month*100 + day_in_month ! e.g. 20170906 - initime = 0 - if(mype==0) then - WRITE(0,*)'! FESOM initial date is ', inidate ,' ======' - WRITE(0,*)'! FESOM substeps are ', substeps ,' ======' - endif - - ! fesom timestep (as seen by IFS) - zstp = REAL(substeps,wpIFS)*dt - if(mype==0) then - WRITE(0,*)'! FESOM timestep as seen by IFS is ', real(zstp,4), 'sec (',substeps,'xdt)' - WRITE(0,*)'!======================================' - endif - -END SUBROUTINE nemogcmcoup_init - - -SUBROUTINE nemogcmcoup_coupinit( mypeIN, npesIN, icomm, & - & npoints, nlocmsk, ngloind ) - - ! FESOM modules - USE g_PARSUP, only: mype, npes, myDim_nod2D, eDim_nod2D, myDim_elem2D, eDim_elem2D, eXDim_elem2D, & - myDim_edge2D, eDim_edge2D, myList_nod2D, myList_elem2D - USE MOD_MESH - !USE o_MESH, only: nod2D, elem2D - USE g_init2timestepping, only: meshinmod - - ! Initialize single executable coupling - USE parinter - USE scripremap - USE interinfo - IMPLICIT NONE - - ! Input arguments - - ! Message passing information - INTEGER, INTENT(IN) :: mypeIN,npesIN,icomm - ! Gaussian grid information - ! Number of points - INTEGER, INTENT(IN) :: npoints - ! Integer mask and global indices - INTEGER, DIMENSION(npoints), INTENT(IN) :: nlocmsk, ngloind - INTEGER :: iunit = 0 - - ! Local variables - type(t_mesh), target :: mesh - integer , pointer :: nod2D - integer , pointer :: elem2D - - ! Namelist containing the file names of the weights - CHARACTER(len=256) :: cdfile_gauss_to_T, cdfile_gauss_to_UV, & - & cdfile_T_to_gauss, cdfile_UV_to_gauss - CHARACTER(len=256) :: cdpathdist - LOGICAL :: lwritedist, lreaddist - LOGICAL :: lcommout - CHARACTER(len=128) :: commoutprefix - NAMELIST/namfesomcoup/cdfile_gauss_to_T,& - & cdfile_gauss_to_UV,& - & cdfile_T_to_gauss,& - & cdfile_UV_to_gauss,& - & cdpathdist, & - & lreaddist, & - & lwritedist, & - & lcommout, & - & commoutprefix,& - & lparbcast - - ! Global number of gaussian gridpoints - INTEGER :: nglopoints - ! Ocean grids accessed with NEMO modules - INTEGER :: noglopoints,nopoints - INTEGER, ALLOCATABLE, DIMENSION(:) :: omask,ogloind - ! SCRIP remapping data structures. - TYPE(scripremaptype) :: remap_gauss_to_T, remap_T_to_gauss, & - & remap_gauss_to_UV, remap_UV_to_gauss - ! Misc variables - INTEGER :: i,j,k,ierr - LOGICAL :: lexists - - ! associate the mesh, only what is needed here - ! #include "associate_mesh.h" - mesh = meshinmod - nod2D => mesh%nod2D - elem2D => mesh%elem2D - - - ! here FESOM knows about the (total number of) MPI tasks - - if(mype==0) then - write(*,*) 'MPI has been initialized in the atmospheric model' - write(*, *) 'Running on ', npes, ' PEs' - end if - - ! Read namelists - - cdfile_gauss_to_T = 'gausstoT.nc' - cdfile_gauss_to_UV = 'gausstoUV.nc' - cdfile_T_to_gauss = 'Ttogauss.nc' - cdfile_UV_to_gauss = 'UVtogauss.nc' - lcommout = .FALSE. - commoutprefix = 'parinter_comm' - cdpathdist = './' - lreaddist = .FALSE. - lwritedist = .FALSE. - - OPEN(9,file='namfesomcoup.in') - READ(9,namfesomcoup) - CLOSE(9) - - ! Global number of Gaussian gridpoints - - CALL mpi_allreduce( npoints, nglopoints, 1, & - & mpi_integer, mpi_sum, icomm, ierr) - - - if(mype==0) then - WRITE(0,*)'!======================================' - WRITE(0,*)'! SCALARS =============================' - - WRITE(0,*)'Update FESOM global scalar points' - endif - - noglopoints=nod2D - nopoints=myDim_nod2d - - ! Ocean mask and global indicies - - ALLOCATE(omask(MAX(nopoints,1)),ogloind(MAX(nopoints,1))) - omask(:)= 1 ! all points are ocean points - ogloind(1:myDim_nod2d)= myList_nod2D(1:myDim_nod2d) ! global index for local point number - - ! Could be helpful later: - ! Replace global numbering with a local one - ! tmp(1:nod2d)=0 - ! DO n=1, myDim_nod2D+eDim_nod2D - ! tmp(myList_nod2D(n))=n - - ! Read the interpolation weights and setup the parallel interpolation - ! from atmosphere Gaussian grid to ocean T-grid - - IF (lreaddist) THEN - CALL parinter_read( mype, npes, nglopoints, noglopoints, gausstoT, & - & cdpathdist,'ifs_to_fesom_gridT',lexists) - ENDIF - IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN - IF (lparbcast) THEN - CALL scripremap_read_sgl(cdfile_gauss_to_T,remap_gauss_to_T,& - & mype,npes,icomm,.TRUE.) - ELSE - CALL scripremap_read(cdfile_gauss_to_T,remap_gauss_to_T) - ENDIF - CALL parinter_init( mype, npes, icomm, & - & npoints, nglopoints, nlocmsk, ngloind, & - & nopoints, noglopoints, omask, ogloind, & - & remap_gauss_to_T, gausstoT, lcommout, TRIM(commoutprefix)//'_gtoT', & - & iunit ) - CALL scripremap_dealloc(remap_gauss_to_T) - IF (lwritedist) THEN - CALL parinter_write( mype, npes, nglopoints, noglopoints, gausstoT, & - & cdpathdist,'ifs_to_fesom_gridT') - ENDIF - ENDIF - - ! From ocean T-grid to atmosphere Gaussian grid - - IF (lreaddist) THEN - CALL parinter_read( mype, npes, noglopoints, nglopoints, Ttogauss, & - & cdpathdist,'fesom_gridT_to_ifs',lexists) - ENDIF - IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN - IF (lparbcast) THEN - CALL scripremap_read_sgl(cdfile_T_to_gauss,remap_T_to_gauss,& - & mype,npes,icomm,.TRUE.) - ELSE - CALL scripremap_read(cdfile_T_to_gauss,remap_T_to_gauss) - ENDIF - - CALL parinter_init( mype, npes, icomm, & - & nopoints, noglopoints, omask, ogloind, & - & npoints, nglopoints, nlocmsk, ngloind, & - & remap_T_to_gauss, Ttogauss, lcommout, TRIM(commoutprefix)//'_Ttog', & - & iunit ) - CALL scripremap_dealloc(remap_T_to_gauss) - IF (lwritedist) THEN - CALL parinter_write( mype, npes, noglopoints, nglopoints, Ttogauss, & - & cdpathdist,'fesom_gridT_to_ifs') - ENDIF - ENDIF - - DEALLOCATE(omask,ogloind) - - - if(mype==0) then - WRITE(0,*)'!======================================' - WRITE(0,*)'! VECTORS =============================' - - WRITE(0,*)'Update FESOM global vector points' - endif - noglopoints=elem2D - nopoints=myDim_elem2D - - ! Ocean mask and global indicies - - ALLOCATE(omask(MAX(nopoints,1)),ogloind(MAX(nopoints,1))) - - omask(:)= 1 ! all elements are in the ocean - ogloind(1:myDim_elem2D) = myList_elem2D(1:myDim_elem2D) ! global index for local element number - - ! Read the interpolation weights and setup the parallel interpolation - ! from atmosphere Gaussian grid to ocean UV-grid - - IF (lreaddist) THEN - CALL parinter_read( mype, npes, nglopoints, noglopoints, gausstoUV, & - & cdpathdist,'ifs_to_fesom_gridUV',lexists) - ENDIF - IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN - IF (lparbcast) THEN - CALL scripremap_read_sgl(cdfile_gauss_to_UV,remap_gauss_to_UV,& - & mype,npes,icomm,.TRUE.) - ELSE - CALL scripremap_read(cdfile_gauss_to_UV,remap_gauss_to_UV) - ENDIF - CALL parinter_init( mype, npes, icomm, & - & npoints, nglopoints, nlocmsk, ngloind, & - & nopoints, noglopoints, omask, ogloind, & - & remap_gauss_to_UV, gausstoUV, lcommout, TRIM(commoutprefix)//'_gtoUV', & - & iunit ) - CALL scripremap_dealloc(remap_gauss_to_UV) - IF (lwritedist) THEN - CALL parinter_write( mype, npes, nglopoints, noglopoints, gausstoUV, & - & cdpathdist,'ifs_to_fesom_gridUV') - ENDIF - ENDIF - - ! From ocean UV-grid to atmosphere Gaussian grid - - IF (lreaddist) THEN - CALL parinter_read( mype, npes, noglopoints, nglopoints, UVtogauss, & - & cdpathdist,'fesom_gridUV_to_ifs',lexists) - ENDIF - IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN - IF (lparbcast) THEN - CALL scripremap_read_sgl(cdfile_UV_to_gauss,remap_UV_to_gauss,& - & mype,npes,icomm,.TRUE.) - ELSE - CALL scripremap_read(cdfile_UV_to_gauss,remap_UV_to_gauss) - ENDIF - - CALL parinter_init( mype, npes, icomm, & - & nopoints, noglopoints, omask, ogloind, & - & npoints, nglopoints, nlocmsk, ngloind, & - & remap_UV_to_gauss, UVtogauss, lcommout, TRIM(commoutprefix)//'_UVtog', & - & iunit ) - CALL scripremap_dealloc(remap_UV_to_gauss) - IF (lwritedist) THEN - CALL parinter_write( mype, npes, noglopoints, nglopoints, UVtogauss, & - & cdpathdist,'fesom_gridUV_to_ifs') - ENDIF - ENDIF - - DEALLOCATE(omask,ogloind) - -END SUBROUTINE nemogcmcoup_coupinit - - -SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & - & nopoints, pgsst, pgist, pgalb, & - & pgifr, pghic, pghsn, pgucur, pgvcur, & - & pgistl, licelvls ) - - ! Interpolate sst, ice: surf T; albedo; concentration; thickness, - ! snow thickness and currents from the FESOM grid to the Gaussian grid. - - ! This routine can be called at any point in time since it does - ! the necessary message passing in parinter_fld. - - USE par_kind ! in ifs_modules.F90 - USE o_ARRAYS, ONLY : tr_arr, UV - USE i_arrays, ONLY : m_ice, a_ice, m_snow - USE i_therm_param, ONLY : tmelt - !USE o_PARAM, ONLY : WP - USE g_PARSUP, only: myDim_nod2D,eDim_nod2D, myDim_elem2D,eDim_elem2D,eXDim_elem2D - !USE o_MESH, only: elem2D_nodes, coord_nod2D - USE MOD_MESH - USE g_init2timestepping, only: meshinmod - - USE g_rotate_grid, only: vector_r2g - USE parinter - USE scripremap - USE interinfo - - IMPLICIT NONE - - ! Arguments - REAL(wpIFS), DIMENSION(nopoints) :: pgsst, pgist, pgalb, pgifr, pghic, pghsn, pgucur, pgvcur - REAL(wpIFS), DIMENSION(nopoints,3) :: pgistl - LOGICAL :: licelvls - - type(t_mesh), target :: mesh - real(kind=wpIFS), dimension(:,:), pointer :: coord_nod2D - integer, dimension(:,:) , pointer :: elem2D_nodes - - ! Message passing information - INTEGER, INTENT(IN) :: mype, npes, icomm - ! Number Gaussian grid points - INTEGER, INTENT(IN) :: nopoints - - ! Local variables - REAL(wpIFS), DIMENSION(myDim_nod2D) :: zsend - REAL(wpIFS), DIMENSION(myDim_elem2D) :: zsendU, zsendV - INTEGER :: elnodes(3) - REAL(wpIFS) :: rlon, rlat - - ! Loop variables - INTEGER :: n, elem, ierr - - !#include "associate_mesh.h" - ! associate what is needed only - mesh = meshinmod - coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%coord_nod2D - elem2D_nodes(1:3, 1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem2D_nodes - - - ! =================================================================== ! - ! Pack SST data and convert to K. 'pgsst' is on Gauss grid. - do n=1,myDim_nod2D - zsend(n)=tr_arr(1, n, 1)+tmelt ! sea surface temperature [K], - ! (1=surface, n=node, 1/2=T/S) - enddo - - ! Interpolate SST - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & myDim_nod2D, zsend, & - & nopoints, pgsst ) - - - ! =================================================================== ! - ! Pack ice fraction data [0..1] and interpolate: 'pgifr' on Gauss. - ! zsend(:)=a_ice(:) - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & myDim_nod2D, a_ice, & - & nopoints, pgifr ) - - - ! =================================================================== ! - ! Pack ice temperature data (already in K) - zsend(:)=273.15 - - ! Interpolate ice surface temperature: 'pgist' on Gaussian grid. - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & myDim_nod2D, zsend, & - & nopoints, pgist ) - - - ! =================================================================== ! - ! Pack ice albedo data and interpolate: 'pgalb' on Gaussian grid. - zsend(:)=0.7 - - ! Interpolate ice albedo - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & myDim_nod2D, zsend, & - & nopoints, pgalb ) - - - ! =================================================================== ! - ! Pack ice thickness data and interpolate: 'pghic' on Gaussian grid. - zsend(:)=m_ice(:)/max(a_ice(:),0.01) ! ice thickness (mean over ice) - - ! Interpolation of average ice thickness - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & myDim_nod2D, zsend, & - & nopoints, pghic ) - - - ! =================================================================== ! - ! Pack snow thickness data and interpolate: 'pghsn' on Gaussian grid. - zsend(:)=m_snow(:)/max(a_ice(:),0.01) ! snow thickness (mean over ice) - - ! Interpolation of snow thickness - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & myDim_nod2D, zsend, & - & nopoints, pghsn ) - - - ! =================================================================== ! - ! Surface currents need to be rotated to geographical grid - - ! Pack u(v) surface currents - zsendU(:)=UV(1,1,1:myDim_elem2D) - zsendV(:)=UV(2,1,1:myDim_elem2D) !UV includes eDim, leave those away here - - do elem=1, myDim_elem2D - - ! compute element midpoints - elnodes=elem2D_nodes(:,elem) - rlon=sum(coord_nod2D(1,elnodes))/3.0_wpIFS - rlat=sum(coord_nod2D(2,elnodes))/3.0_wpIFS - - ! Rotate vectors to geographical coordinates (r2g) - call vector_r2g(zsendU(elem), zsendV(elem), rlon, rlat, 0) ! 0-flag for rot. coord - - end do - -#ifdef FESOM_TODO - - ! We need to sort out the non-unique global index before we - ! can couple currents - - ! Interpolate: 'pgucur' and 'pgvcur' on Gaussian grid. - CALL parinter_fld( mype, npes, icomm, UVtogauss, & - & myDim_elem2D, zsendU, & - & nopoints, pgucur ) - - CALL parinter_fld( mype, npes, icomm, UVtogauss, & - & myDim_elem2D, zsendV, & - & nopoints, pgvcur ) - -#else - - pgucur(:) = 0.0 - pgvcur(:) = 0.0 - -#endif - -#ifndef FESOM_TODO - - if(mype==0) then - WRITE(0,*)'Everything implemented except ice level temperatures (licelvls).' - endif - -#else - - ! Ice level temperatures - - IF (licelvls) THEN - -#if defined key_lim2 - - DO jl = 1, 3 - - ! Pack ice temperatures data at level jl(already in K) - - jk = 0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = jk + 1 - zsend(jk) = tbif (ji,jj,jl) - ENDDO - ENDDO - - ! Interpolate ice temperature at level jl - - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & - & nopoints, pgistl(:,jl) ) - - ENDDO - -#else - WRITE(0,*)'licelvls needs to be sorted for LIM3' - CALL abort -#endif - - ENDIF - - IF(nn_timing == 1) CALL timing_stop('nemogcmcoup_lim2_get') - IF(lhook) CALL dr_hook('nemogcmcoup_lim2_get',1,zhook_handle) - -#endif - -END SUBROUTINE nemogcmcoup_lim2_get - - -SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & - & npoints, & - & taux_oce, tauy_oce, taux_ice, tauy_ice, & - & qs___oce, qs___ice, qns__oce, qns__ice, dqdt_ice, & - & evap_tot, evap_ice, prcp_liq, prcp_sol, & - & runoffIN, ocerunoff, tcc, lcc, tice_atm, & - & kt, ldebug, loceicemix, lqnsicefilt ) - - ! Update fluxes in nemogcmcoup_data by parallel - ! interpolation of the input gaussian grid data - - USE par_kind !in ifs_modules.F90 - USE g_PARSUP, only: myDim_nod2D, myDim_elem2D, par_ex, eDim_nod2D, eDim_elem2D, eXDim_elem2D, myDim_edge2D, eDim_edge2D - !USE o_MESH, only: coord_nod2D !elem2D_nodes - USE MOD_MESH - USE g_init2timestepping, only: meshinmod - !USE o_PARAM, ONLY : WP, use wpIFS from par_kind (IFS) - USE g_rotate_grid, only: vector_r2g, vector_g2r - USE g_forcing_arrays, only: shortwave, prec_rain, prec_snow, runoff, & - & evap_no_ifrac, sublimation !'longwave' only stand-alone, 'evaporation' filled later - USE i_ARRAYS, only: stress_atmice_x, stress_atmice_y, oce_heat_flux, ice_heat_flux - USE o_ARRAYS, only: stress_atmoce_x, stress_atmoce_y - USE g_comm_auto ! exchange_nod does the halo exchange - - ! all needed? - USE parinter - USE scripremap - USE interinfo - - IMPLICIT NONE - - ! =================================================================== ! - ! Arguments ========================================================= ! - - ! MPI communications - INTEGER, INTENT(IN) :: mype,npes,icomm - ! Fluxes on the Gaussian grid. - INTEGER, INTENT(IN) :: npoints - REAL(wpIFS), DIMENSION(npoints), INTENT(IN) :: & - & taux_oce, tauy_oce, taux_ice, tauy_ice, & - & qs___oce, qs___ice, qns__oce, qns__ice, & - & dqdt_ice, evap_tot, evap_ice, prcp_liq, prcp_sol, & - & runoffIN, ocerunoff, tcc, lcc, tice_atm - - ! Current time step - INTEGER, INTENT(in) :: kt - ! Write debugging fields in netCDF - LOGICAL, INTENT(IN) :: ldebug - ! QS/QNS mixed switch - LOGICAL, INTENT(IN) :: loceicemix - ! QNS ice filter switch (requires tice_atm to be sent) - LOGICAL, INTENT(IN) :: lqnsicefilt - - type(t_mesh), target :: mesh - - ! Local variables - INTEGER :: n - REAL(wpIFS), parameter :: rhofwt = 1000. ! density of freshwater - - - ! Packed receive buffer - REAL(wpIFS), DIMENSION(myDim_nod2D) :: zrecv - REAL(wpIFS), DIMENSION(myDim_elem2D):: zrecvU, zrecvV - - - !#include "associate_mesh.h" - ! associate only the necessary things - real(kind=WP), dimension(:,:), pointer :: coord_nod2D - mesh = meshinmod - coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%coord_nod2D - - ! =================================================================== ! - ! Sort out incoming arrays from the IFS and put them on the ocean grid - - ! TODO - shortwave(:)=0. ! Done, updated below. What to do with shortwave over ice?? - !longwave(:)=0. ! Done. Only used in stand-alone mode. - prec_rain(:)=0. ! Done, updated below. - prec_snow(:)=0. ! Done, updated below. - evap_no_ifrac=0. ! Done, updated below. This is evap over ocean, does this correspond to evap_tot? - sublimation=0. ! Done, updated below. - ! - ice_heat_flux=0. ! Done. This is qns__ice currently. Is this the non-solar heat flux? ! non solar heat fluxes below ! (qns) - oce_heat_flux=0. ! Done. This is qns__oce currently. Is this the non-solar heat flux? - ! - runoff(:)=0. ! not used apparently. What is runoffIN, ocerunoff? - !evaporation(:)=0. - !ice_thermo_cpl.F90: !---- total evaporation (needed in oce_salt_balance.F90) - !ice_thermo_cpl.F90: evaporation = evap_no_ifrac*(1.-a_ice) + sublimation*a_ice - stress_atmice_x=0. ! Done, taux_ice - stress_atmice_y=0. ! Done, tauy_ice - stress_atmoce_x=0. ! Done, taux_oce - stress_atmoce_y=0. ! Done, tauy_oce - - - ! =================================================================== ! - !1. Interpolate ocean solar radiation to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qs___oce, & - & myDim_nod2D, zrecv ) - - ! Unpack ocean solar radiation, without halo - shortwave(1:myDim_nod2D)=zrecv(1:myDim_nod2D) - - ! Do the halo exchange - call exchange_nod(shortwave) - - - ! =================================================================== ! - !2. Interpolate ice solar radiation to T grid - ! DO NOTHING - - - ! =================================================================== ! - !3. Interpolate ocean non-solar radiation to T grid (is this non-solar heat flux?) - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qns__oce, & - & myDim_nod2D, zrecv ) - - ! Unpack ocean non-solar, without halo - oce_heat_flux(1:myDim_nod2D)=zrecv(1:myDim_nod2D) - - ! Do the halo exchange - call exchange_nod(oce_heat_flux) - - - ! =================================================================== ! - !4. Interpolate non-solar radiation over ice to T grid (is this non-solar heat flux?) - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qns__ice, & - & myDim_nod2D, zrecv ) - - ! Unpack ice non-solar - ice_heat_flux(1:myDim_nod2D)=zrecv(1:myDim_nod2D) - - ! Do the halo exchange - call exchange_nod(ice_heat_flux) - - - ! =================================================================== ! - !5. D(q)/dT to T grid - ! DO NOTHING - - - ! =================================================================== ! - !6. Interpolate total evaporation to T grid - ! =================================================================== ! - !ice_thermo_cpl.F90: total evaporation (needed in oce_salt_balance.F90) - !ice_thermo_cpl.F90: evaporation = evap_no_ifrac*(1.-a_ice) + sublimation*a_ice - ! =================================================================== ! - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, evap_tot, & - & myDim_nod2D, zrecv ) - - ! Unpack total evaporation, without halo - evap_no_ifrac(1:myDim_nod2D)=-zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s; change sign - - ! Do the halo exchange - call exchange_nod(evap_no_ifrac) - - !7. Interpolate sublimation (evaporation over ice) to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, evap_ice, & - & myDim_nod2D, zrecv ) - - ! Unpack sublimation (evaporation over ice), without halo - sublimation(1:myDim_nod2D)=-zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s; change sign - - ! Do the halo exchange - call exchange_nod(sublimation) - ! =================================================================== ! - ! =================================================================== ! - - - ! =================================================================== ! - !8. Interpolate liquid precipitation to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, prcp_liq, & - & myDim_nod2D, zrecv ) - - ! Unpack liquid precipitation, without halo - prec_rain(1:myDim_nod2D)=zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s - - ! Do the halo exchange - call exchange_nod(prec_rain) - - - ! =================================================================== ! - !9. Interpolate solid precipitation to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, prcp_sol, & - & myDim_nod2D, zrecv ) - - ! Unpack solid precipitation, without halo - prec_snow(1:myDim_nod2D)=zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s - - ! Do the halo exchange - call exchange_nod(prec_snow) - - - ! =================================================================== ! - !10. Interpolate runoff to T grid - ! - !CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, runoff, & - ! & myDim_nod2D, zrecv ) - ! - ! Unpack runoff, without halo - !runoff(1:myDim_nod2D)=zrecv(1:myDim_nod2D) !conversion?? - ! - ! Do the halo exchange - !call exchange_nod(runoff) - ! - !11. Interpolate ocean runoff to T grid - ! - !CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, ocerunoff, & - ! & myDim_nod2D, zrecv ) - ! - ! Unpack ocean runoff - ! ?? - - !12. Interpolate total cloud fractions to T grid (tcc) - ! - !13. Interpolate low cloud fractions to T grid (lcc) - - - ! =================================================================== ! - ! STRESSES - - ! OVER OCEAN: - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, taux_oce, & - & myDim_nod2D, zrecv ) - - ! Unpack x stress atm->oce, without halo; then do halo exchange - stress_atmoce_x(1:myDim_nod2D)=zrecv(1:myDim_nod2D) - call exchange_nod(stress_atmoce_x) - - ! - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tauy_oce, & - & myDim_nod2D, zrecv ) - - ! Unpack y stress atm->oce, without halo; then do halo exchange - stress_atmoce_y(1:myDim_nod2D)=zrecv(1:myDim_nod2D) - call exchange_nod(stress_atmoce_y) - - ! =================================================================== ! - ! OVER ICE: - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, taux_ice, & - & myDim_nod2D, zrecv ) - - ! Unpack x stress atm->ice, without halo; then do halo exchange - stress_atmice_x(1:myDim_nod2D)=zrecv(1:myDim_nod2D) - call exchange_nod(stress_atmice_x) - - ! - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tauy_ice, & - & myDim_nod2D, zrecv ) - - ! Unpack y stress atm->ice, without halo; then do halo exchange - stress_atmice_y(1:myDim_nod2D)=zrecv(1:myDim_nod2D) - call exchange_nod(stress_atmice_y) - - - ! =================================================================== ! - ! ROTATE VECTORS FROM GEOGRAPHIC TO FESOMS ROTATED GRID - - !if ((do_rotate_oce_wind .AND. do_rotate_ice_wind) .AND. rotated_grid) then - do n=1, myDim_nod2D+eDim_nod2D - call vector_g2r(stress_atmoce_x(n), stress_atmoce_y(n), coord_nod2D(1, n), coord_nod2D(2, n), 0) !0-flag for rot. coord. - call vector_g2r(stress_atmice_x(n), stress_atmice_y(n), coord_nod2D(1, n), coord_nod2D(2, n), 0) - end do - !do_rotate_oce_wind=.false. - !do_rotate_ice_wind=.false. - !end if - - -#ifdef FESOM_TODO - - ! Packed receive buffer - REAL(wpIFS), DIMENSION((nlei-nldi+1)*(nlej-nldj+1)) :: zrecv - ! Unpacked fields on ORCA grids - REAL(wpIFS), DIMENSION(jpi,jpj) :: zqs___oce, zqs___ice, zqns__oce, zqns__ice - REAL(wpIFS), DIMENSION(jpi,jpj) :: zdqdt_ice, zevap_tot, zevap_ice, zprcp_liq, zprcp_sol - REAL(wpIFS), DIMENSION(jpi,jpj) :: zrunoff, zocerunoff - REAL(wpIFS), DIMENSION(jpi,jpj) :: ztmp, zicefr - ! Arrays for rotation - REAL(wpIFS), DIMENSION(jpi,jpj) :: zuu,zvu,zuv,zvv,zutau,zvtau - ! Lead fraction for both LIM2/LIM3 - REAL(wpIFS), DIMENSION(jpi,jpj) :: zfrld - ! Mask for masking for I grid - REAL(wpIFS) :: zmsksum - ! For summing up LIM3 contributions to ice temperature - REAL(wpIFS) :: zval,zweig - - ! Loop variables - INTEGER :: ji,jj,jk,jl - ! netCDF debugging output variables - CHARACTER(len=128) :: cdoutfile - INTEGER :: inum - REAL(wpIFS) :: zhook_handle ! Dr Hook handle - - IF(lhook) CALL dr_hook('nemogcmcoup_lim2_update',0,zhook_handle) - IF(nn_timing == 1) CALL timing_start('nemogcmcoup_lim2_update') - - ! Allocate the storage data - - IF (.NOT.lallociceflx) THEN - ALLOCATE( & - & zsqns_tot(jpi,jpj), & - & zsqns_ice(jpi,jpj), & - & zsqsr_tot(jpi,jpj), & - & zsqsr_ice(jpi,jpj), & - & zsemp_tot(jpi,jpj), & - & zsemp_ice(jpi,jpj), & - & zsevap_ice(jpi,jpj), & - & zsdqdns_ice(jpi,jpj), & - & zssprecip(jpi,jpj), & - & zstprecip(jpi,jpj), & - & zstcc(jpi,jpj), & - & zslcc(jpi,jpj), & - & zsatmist(jpi,jpj), & - & zsqns_ice_add(jpi,jpj)& - & ) - lallociceflx = .TRUE. - ENDIF - IF (.NOT.lallocstress) THEN - ALLOCATE( & - & zsutau(jpi,jpj), & - & zsvtau(jpi,jpj), & - & zsutau_ice(jpi,jpj), & - & zsvtau_ice(jpi,jpj) & - & ) - lallocstress = .TRUE. - ENDIF - - ! Sort out incoming arrays from the IFS and put them on the ocean grid - - !1. Interpolate ocean solar radiation to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qs___oce, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ocean solar radiation - - zqs___oce(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zqs___oce(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !2. Interpolate ice solar radiation to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qs___ice, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ice solar radiation - - zqs___ice(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zqs___ice(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !3. Interpolate ocean non-solar radiation to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qns__oce, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ocean non-solar radiation - - zqns__oce(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zqns__oce(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !4. Interpolate ice non-solar radiation to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qns__ice, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ice non-solar radiation - - zqns__ice(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zqns__ice(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !5. Interpolate D(q)/dT to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, dqdt_ice, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack D(q)/D(T) - - zdqdt_ice(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zdqdt_ice(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !6. Interpolate total evaporation to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, evap_tot, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack total evaporation - - zevap_tot(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zevap_tot(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !7. Interpolate evaporation over ice to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, evap_ice, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack evaporation over ice - - zevap_ice(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zevap_ice(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !8. Interpolate liquid precipitation to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, prcp_liq, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack liquid precipitation - - zprcp_liq(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zprcp_liq(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !9. Interpolate solid precipitation to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, prcp_sol, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack precipitation over ice - - zprcp_sol(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zprcp_sol(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !10. Interpolate runoff to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, runoff, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack runoff - - zrunoff(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zrunoff(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !11. Interpolate ocean runoff to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, ocerunoff, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ocean runoff - - zocerunoff(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zocerunoff(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !12. Interpolate total cloud fractions to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tcc, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ocean runoff - - zstcc(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zstcc(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !13. Interpolate low cloud fractions to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, lcc, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ocean runoff - - zslcc(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zslcc(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - ! get sea ice fraction and lead fraction - -#if defined key_lim2 - zfrld(:,:) = frld(:,:) - zicefr(:,:) = 1 - zfrld(:,:) -#else - zicefr(:,:) = 0.0_wpIFS - DO jl = 1, jpl - zicefr(:,:) = zicefr(:,:) + a_i(:,:,jl) - ENDDO - zfrld(:,:) = 1 - zicefr(:,:) -#endif - - zsemp_tot(:,:) = zevap_tot(:,:) - zprcp_liq(:,:) - zprcp_sol(:,:) - zstprecip(:,:) = zprcp_liq(:,:) + zprcp_sol(:,:) - ! More consistent with NEMO, but does changes the results, so - ! we don't do it for now. - ! zsemp_tot(:,:) = zevap_tot(:,:) - zstprecip(:,:) - zsemp_ice(:,:) = zevap_ice(:,:) - zprcp_sol(:,:) - zssprecip(:,:) = - zsemp_ice(:,:) - zsemp_tot(:,:) = zsemp_tot(:,:) - zrunoff(:,:) - zsemp_tot(:,:) = zsemp_tot(:,:) - zocerunoff(:,:) - zsevap_ice(:,:) = zevap_ice(:,:) - - ! non solar heat fluxes ! (qns) - IF (loceicemix) THEN - zsqns_tot(:,:) = zqns__oce(:,:) - ELSE - zsqns_tot(:,:) = zfrld(:,:) * zqns__oce(:,:) + zicefr(:,:) * zqns__ice(:,:) - ENDIF - zsqns_ice(:,:) = zqns__ice(:,:) - ztmp(:,:) = zfrld(:,:) * zprcp_sol(:,:) * lfus ! add the latent heat of solid precip. melting - - zsqns_tot(:,:) = zsqns_tot(:,:) - ztmp(:,:) ! over free ocean - ! solar heat fluxes ! (qsr) - - IF (loceicemix) THEN - zsqsr_tot(:,:) = zqs___oce(:,:) - ELSE - zsqsr_tot(:,:) = zfrld(:,:) * zqs___oce(:,:) + zicefr(:,:) * zqs___ice(:,:) - ENDIF - zsqsr_ice(:,:) = zqs___ice(:,:) - - IF( ln_dm2dc ) THEN ! modify qsr to include the diurnal cycle - zsqsr_tot(:,:) = sbc_dcy( zsqsr_tot(:,:) ) - zsqsr_ice(:,:) = sbc_dcy( zsqsr_ice(:,:) ) - ENDIF - - zsdqdns_ice(:,:) = zdqdt_ice(:,:) - - ! Apply lateral boundary condition - - CALL lbc_lnk(zsqns_tot, 'T', 1.0) - CALL lbc_lnk(zsqns_ice, 'T', 1.0) - CALL lbc_lnk(zsqsr_tot, 'T', 1.0) - CALL lbc_lnk(zsqsr_ice, 'T', 1.0) - CALL lbc_lnk(zsemp_tot, 'T', 1.0) - CALL lbc_lnk(zsemp_ice, 'T', 1.0) - CALL lbc_lnk(zsdqdns_ice, 'T', 1.0) - CALL lbc_lnk(zssprecip, 'T', 1.0) - CALL lbc_lnk(zstprecip, 'T', 1.0) - CALL lbc_lnk(zstcc, 'T', 1.0) - CALL lbc_lnk(zslcc, 'T', 1.0) - - ! Interpolate atmospheric ice temperature to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tice_atm, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack atmospheric ice temperature - - zsatmist(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zsatmist(ji,jj) = zrecv(jk) - ENDDO - ENDDO - CALL lbc_lnk(zsatmist, 'T', 1.0) - - zsqns_ice_add(:,:) = 0.0_wpIFS - - ! Use the dqns_ice filter - - IF (lqnsicefilt) THEN - - ! Add filtr to qns_ice - -#if defined key_lim2 - ztmp(:,:) = tn_ice(:,:,1) -#else - DO jj = nldj, nlej - DO ji = nldi, nlei - zval=0.0 - zweig=0.0 - DO jl = 1, jpl - zval = zval + tn_ice(ji,jj,jl) * a_i(ji,jj,jl) - zweig = zweig + a_i(ji,jj,jl) - ENDDO - IF ( zweig > 0.0 ) THEN - ztmp(ji,jj) = zval /zweig - ELSE - ztmp(ji,jj) = rt0 - ENDIF - ENDDO - ENDDO - CALL lbc_lnk(ztmp, 'T', 1.0) -#endif - - WHERE ( zicefr(:,:) > .001_wpIFS ) - zsqns_ice_add(:,:) = zsdqdns_ice(:,:) * ( ztmp(:,:) - zsatmist(:,:) ) - END WHERE - - zsqns_ice(:,:) = zsqns_ice(:,:) + zsqns_ice_add(:,:) - - ENDIF - - ! Interpolate u-stress to U grid - - CALL parinter_fld( mype, npes, icomm, gausstoU, npoints,taux_oce, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack u stress on U grid - - zuu(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zuu(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - ! Interpolate v-stress to U grid - - CALL parinter_fld( mype, npes, icomm, gausstoU, npoints, tauy_oce, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack v stress on U grid - - zvu(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zvu(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - ! Interpolate u-stress to V grid - - CALL parinter_fld( mype, npes, icomm, gausstoV, npoints,taux_oce, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack u stress on V grid - - zuv(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zuv(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - ! Interpolate v-stress to V grid - - CALL parinter_fld( mype, npes, icomm, gausstoV, npoints, tauy_oce, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack v stress on V grid - - zvv(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zvv(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - ! Rotate stresses from en to ij and put u,v stresses on U,V grids - - CALL repcmo( zuu, zvu, zuv, zvv, zsutau, zsvtau ) - - ! Apply lateral boundary condition on u,v stresses on the U,V grids - - CALL lbc_lnk( zsutau, 'U', -1.0 ) - CALL lbc_lnk( zsvtau, 'V', -1.0 ) - - ! Interpolate ice u-stress to U grid - - CALL parinter_fld( mype, npes, icomm, gausstoU, npoints,taux_ice, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ice u stress on U grid - - zuu(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zuu(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - ! Interpolate ice v-stress to U grid - - CALL parinter_fld( mype, npes, icomm, gausstoU, npoints, tauy_ice, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ice v stress on U grid - - zvu(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zvu(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - ! Interpolate ice u-stress to V grid - - CALL parinter_fld( mype, npes, icomm, gausstoV, npoints,taux_ice, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ice u stress on V grid - - zuv(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zuv(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - ! Interpolate ice v-stress to V grid - - CALL parinter_fld( mype, npes, icomm, gausstoV, npoints, tauy_ice, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ice v stress on V grid - - zvv(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zvv(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - ! Rotate stresses from en to ij and put u,v stresses on U,V grids - - CALL repcmo( zuu, zvu, zuv, zvv, zutau, zvtau ) - - ! Apply lateral boundary condition on u,v stresses on the U,V grids - - CALL lbc_lnk( zutau, 'U', -1.0 ) - CALL lbc_lnk( zvtau, 'V', -1.0 ) - -#if defined key_lim2_vp - - ! Convert to I grid for LIM2 for key_lim_vp - DO jj = 2, jpjm1 ! (U,V) ==> I - DO ji = 2, jpim1 ! NO vector opt. - zmsksum = umask(ji-1,jj,1) + umask(ji-1,jj-1,1) - zsutau_ice(ji,jj) = ( umask(ji-1,jj,1) * zutau(ji-1,jj) + & - & umask(ji-1,jj-1,1) * zutau(ji-1,jj-1) ) - IF ( zmsksum > 0.0 ) THEN - zsutau_ice(ji,jj) = zsutau_ice(ji,jj) / zmsksum - ENDIF - zmsksum = vmask(ji,jj-1,1) + vmask(ji-1,jj-1,1) - zsvtau_ice(ji,jj) = ( vmask(ji,jj-1,1) * zvtau(ji,jj-1) + & - & vmask(ji-1,jj-1,1) * zvtau(ji-1,jj-1) ) - IF ( zmsksum > 0.0 ) THEN - zsvtau_ice(ji,jj) = zsvtau_ice(ji,jj) / zmsksum - ENDIF - END DO - END DO - -#else - - zsutau_ice(:,:) = zutau(:,:) - zsvtau_ice(:,:) = zvtau(:,:) - -#endif - - CALL lbc_lnk( zsutau_ice, 'I', -1.0 ) - CALL lbc_lnk( zsvtau_ice, 'I', -1.0 ) - - ! Optionally write files write the data on the ORCA grid via IOM. - - IF (ldebug) THEN - WRITE(cdoutfile,'(A,I8.8)') 'zsutau_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsutau' , zsutau ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsvtau_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsvtau' , zsvtau ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsutau_ice_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsutau_ice' , zsutau_ice ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsvtau_ice_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsvtau_ice' , zsvtau_ice ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsqns_tot_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsqns_tot' , zsqns_tot ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsqns_ice_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsqns_ice' , zsqns_ice ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsqsr_tot_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsqsr_tot' , zsqsr_tot ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsqsr_ice_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsqsr_ice' , zsqsr_ice ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsemp_tot_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsemp_tot' , zsemp_tot ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsemp_ice_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsemp_ice' , zsemp_ice ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsdqdns_ice_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsdqdns_ice' , zsdqdns_ice ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zssprecip_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zssprecip' , zssprecip ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zstprecip_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zstprecip' , zstprecip ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsevap_ice_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsevap_ice' , zsevap_ice ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zstcc_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zstcc' , zstcc ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zslcc_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zslcc' , zslcc ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsatmist_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsatmist' , zsatmist ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsqns_ice_add_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsqns_ice_add' , zsqns_ice_add ) - CALL iom_close( inum ) - ENDIF - - IF(nn_timing == 1) CALL timing_stop('nemogcmcoup_lim2_update') - IF(lhook) CALL dr_hook('nemogcmcoup_lim2_update',1,zhook_handle) - -#else - - !FESOM part - !WRITE(0,*)'nemogcmcoup_lim2_update partially implemented. Proceeding...' - !CALL par_ex - -#endif - -END SUBROUTINE nemogcmcoup_lim2_update - - -SUBROUTINE nemogcmcoup_step( istp, icdate, ictime ) - - USE g_clock, only: yearnew, month, day_in_month - USE g_PARSUP, only: mype - USE nemogcmcoup_steps, ONLY : substeps - IMPLICIT NONE - - ! Arguments - - ! Time step - INTEGER, INTENT(IN) :: istp - - ! Data and time from NEMO - INTEGER, INTENT(OUT) :: icdate, ictime - - if(mype==0) then - WRITE(0,*)'! IFS at timestep ', istp, '. Do ', substeps , 'FESOM timesteps...' - endif - CALL main_timestepping(substeps) - - ! Compute date and time at the end of the time step - - icdate = yearnew*10000 + month*100 + day_in_month ! e.g. 20170906 - ictime = 0 ! (time is not used) - - if(mype==0) then - WRITE(0,*)'! FESOM date at end of timestep is ', icdate ,' ======' - endif - -#ifdef FESOM_TODO - iye = ndastp / 10000 - imo = ndastp / 100 - iye * 100 - ida = MOD( ndastp, 100 ) - CALL greg2jul( 0, 0, 0, ida, imo, iye, zjul ) - zjul = zjul + ( nsec_day + 0.5_wpIFS * rdttra(1) ) / 86400.0_wpIFS - CALL jul2greg( iss, imm, ihh, ida, imo, iye, zjul ) - icdate = iye * 10000 + imo * 100 + ida - ictime = ihh * 10000 + imm * 100 + iss -#endif - -END SUBROUTINE nemogcmcoup_step - - -SUBROUTINE nemogcmcoup_final - - USE g_PARSUP, only: mype - - ! Finalize the FESOM model - - IMPLICIT NONE - - if(mype==0) then - WRITE(*,*)'Finalization of FESOM from IFS.' - endif - CALL main_finalize - -END SUBROUTINE nemogcmcoup_final -#endif diff --git a/src/ifs_interface/ifs_modules.F90 b/src/ifs_interface/ifs_modules.F90 deleted file mode 100644 index 8f52ee153..000000000 --- a/src/ifs_interface/ifs_modules.F90 +++ /dev/null @@ -1,1859 +0,0 @@ -#if defined (__ifsinterface) -#define __MYFILE__ 'ifs_modules.F90' -#define key_mpp_mpi -! Set of modules needed by the interface to IFS. -! -! -Original code by Kristian Mogensen, ECMWF. - -MODULE par_kind - IMPLICIT NONE - INTEGER, PUBLIC, PARAMETER :: & !: Floating point section - sp = SELECTED_REAL_KIND( 6, 37), & !: single precision (real 4) - dp = SELECTED_REAL_KIND(12,307), & !: double precision (real 8) - wpIFS = SELECTED_REAL_KIND(12,307), & !: double precision (real 8) - ik = SELECTED_INT_KIND(6) !: integer precision -END MODULE par_kind - -MODULE nctools - - ! Utility subroutines for netCDF access - ! Modified : MAB (nf90, handle_error, LINE&FILE) - ! Modifled : KSM (new shorter name) - - USE netcdf - - PUBLIC ldebug_netcdf, nchdlerr - LOGICAL :: ldebug_netcdf = .FALSE. ! Debug switch for netcdf - -CONTAINS - - SUBROUTINE nchdlerr(status,lineno,filename) - - ! Error handler for netCDF access - IMPLICIT NONE - - - INTEGER :: status ! netCDF return status - INTEGER :: lineno ! Line number (usually obtained from - ! preprocessing __LINE__,__MYFILE__) - CHARACTER(len=*),OPTIONAL :: filename - - IF (status/=nf90_noerr) THEN - WRITE(*,*)'Netcdf error, code ',status - IF (PRESENT(filename)) THEN - WRITE(*,*)'In file ',filename,' in line ',lineno - ELSE - WRITE(*,*)'In line ',lineno - END IF - WRITE(*,'(2A)')' Error message : ',nf90_strerror(status) - CALL abort - ENDIF - - END SUBROUTINE nchdlerr - -!---------------------------------------------------------------------- -END MODULE nctools - -MODULE scrippar - INTEGER, PARAMETER :: scripdp = SELECTED_REAL_KIND(12,307) - INTEGER, PARAMETER :: scriplen = 80 -END MODULE scrippar - -MODULE scripgrid - - USE nctools - USE scrippar - - IMPLICIT NONE - - TYPE scripgridtype - INTEGER :: grid_size - INTEGER :: grid_corners - INTEGER :: grid_rank - INTEGER, ALLOCATABLE, DIMENSION(:) :: grid_dims - REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: grid_center_lat - REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: grid_center_lon - INTEGER, ALLOCATABLE, DIMENSION(:) :: grid_imask - REAL(scripdp), ALLOCATABLE, DIMENSION(:,:) :: grid_corner_lat - REAL(scripdp), ALLOCATABLE, DIMENSION(:,:) :: grid_corner_lon - CHARACTER(len=scriplen) :: grid_center_lat_units - CHARACTER(len=scriplen) :: grid_center_lon_units - CHARACTER(len=scriplen) :: grid_imask_units - CHARACTER(len=scriplen) :: grid_corner_lat_units - CHARACTER(len=scriplen) :: grid_corner_lon_units - CHARACTER(len=scriplen) :: title - END TYPE scripgridtype - -CONTAINS - - SUBROUTINE scripgrid_read( cdfilename, grid ) - - CHARACTER(len=*) :: cdfilename - TYPE(scripgridtype) :: grid - - INTEGER :: ncid, dimid, varid - - CALL scripgrid_init(grid) - - CALL nchdlerr(nf90_open(TRIM(cdfilename),nf90_nowrite,ncid),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_dimid(ncid,'grid_size',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=grid%grid_size),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_dimid(ncid,'grid_corners',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=grid%grid_corners),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_dimid(ncid,'grid_rank',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=grid%grid_rank),& - & __LINE__,__MYFILE__) - - CALL scripgrid_alloc(grid) - - CALL nchdlerr(nf90_inq_varid(ncid,'grid_dims',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_dims),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'grid_center_lat',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_center_lat_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_center_lat),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'grid_center_lon',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_center_lon_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_center_lon),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'grid_corner_lat',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_corner_lat_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_corner_lat),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'grid_corner_lon',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_corner_lon_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_corner_lon),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'grid_imask',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_imask_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_imask),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_get_att(ncid,nf90_global,'title',grid%title),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_close(ncid),__LINE__,__MYFILE__) - - END SUBROUTINE scripgrid_read - - SUBROUTINE scripgrid_write( cdgridfile, grid ) - - CHARACTER(len=*) :: cdgridfile - TYPE(scripgridtype) :: grid - - INTEGER :: ncid - INTEGER :: ioldfill - INTEGER :: idimsize,idimxsize,idimysize,idimcorners,idimrank - INTEGER :: idims1rank(1),idims1size(1),idims2(2) - INTEGER :: iddims,idcentlat,idcentlon,idimask,idcornlat,idcornlon - INTEGER :: igriddims(2) - - ! Setup netcdf file - - CALL nchdlerr(nf90_create(TRIM(cdgridfile),nf90_clobber,ncid),& - & __LINE__,__MYFILE__) - - ! Define dimensions - - CALL nchdlerr(nf90_def_dim(ncid,'grid_size',& - & grid%grid_size,idimsize),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_def_dim(ncid,'grid_corners',& - & grid%grid_corners,idimcorners),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_def_dim(ncid,'grid_rank',& - & grid%grid_rank,idimrank),& - & __LINE__,__MYFILE__) - - idims1rank(1) = idimrank - - idims1size(1) = idimsize - - idims2(1) = idimcorners - idims2(2) = idimsize - - ! Define variables - - CALL nchdlerr(nf90_def_var(ncid,'grid_dims',& - & nf90_int,idims1rank,iddims),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_var(ncid,'grid_center_lat',& - & nf90_double,idims1size,idcentlat),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idcentlat,'units',& - & grid%grid_center_lat_units),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_var(ncid,'grid_center_lon',& - & nf90_double,idims1size,idcentlon),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idcentlon,'units',& - & grid%grid_center_lon_units),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_var(ncid,'grid_imask',& - & nf90_int,idims1size,idimask),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idimask,'units',& - & grid%grid_imask_units),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_var(ncid,'grid_corner_lat',& - & nf90_double,idims2,idcornlat),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idcornlat,'units',& - & grid%grid_corner_lat_units),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_var(ncid,'grid_corner_lon',& - & nf90_double,idims2,idcornlon),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idcornlon,'units',& - & grid%grid_corner_lon_units),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_att(ncid,nf90_global,'title',& - & TRIM(grid%title)),& - & __LINE__,__MYFILE__) - - ! End of netCDF definition phase - - CALL nchdlerr(nf90_enddef(ncid),__LINE__,__MYFILE__) - - ! Write variables - - - CALL nchdlerr(nf90_put_var(ncid,iddims,grid%grid_dims),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idcentlat,& - & grid%grid_center_lat),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idcentlon,& - & grid%grid_center_lon),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idimask,& - & grid%grid_imask), & - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idcornlat,& - & grid%grid_corner_lat),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idcornlon,& - & grid%grid_corner_lon),& - & __LINE__,__MYFILE__) - - ! Close file - - CALL nchdlerr(nf90_close(ncid),__LINE__,__MYFILE__) - - END SUBROUTINE scripgrid_write - - SUBROUTINE scripgrid_init( grid ) - - TYPE(scripgridtype) :: grid - - grid%grid_size=0 - grid%grid_corners=0 - grid%grid_rank=0 - grid%grid_center_lat_units='' - grid%grid_center_lon_units='' - grid%grid_imask_units='' - grid%grid_corner_lat_units='' - grid%grid_corner_lon_units='' - grid%title='' - - END SUBROUTINE scripgrid_init - - SUBROUTINE scripgrid_alloc( grid ) - - TYPE(scripgridtype) :: grid - - IF ( (grid%grid_size == 0) .OR. & - & (grid%grid_corners == 0) .OR. & - & (grid%grid_rank == 0) ) THEN - WRITE(*,*)'scripgridtype not initialized' - CALL abort - ENDIF - - ALLOCATE( & - & grid%grid_dims(grid%grid_rank), & - & grid%grid_center_lat(grid%grid_size), & - & grid%grid_center_lon(grid%grid_size), & - & grid%grid_corner_lat(grid%grid_corners, grid%grid_size), & - & grid%grid_corner_lon(grid%grid_corners, grid%grid_size), & - & grid%grid_imask(grid%grid_size) & - & ) - - END SUBROUTINE scripgrid_alloc - - SUBROUTINE scripgrid_dealloc( grid ) - - TYPE(scripgridtype) :: grid - - DEALLOCATE( & - & grid%grid_dims, & - & grid%grid_center_lat, & - & grid%grid_center_lon, & - & grid%grid_corner_lat, & - & grid%grid_corner_lon, & - & grid%grid_imask & - & ) - - END SUBROUTINE scripgrid_dealloc - -END MODULE scripgrid - -MODULE scripremap - -#if defined key_mpp_mpi - USE mpi -#endif - USE nctools - USE scrippar - USE scripgrid - - IMPLICIT NONE - - TYPE scripremaptype - INTEGER :: num_links - INTEGER :: num_wgts - TYPE(scripgridtype) :: src - TYPE(scripgridtype) :: dst - REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: src_grid_area - REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: dst_grid_area - REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: src_grid_frac - REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: dst_grid_frac - INTEGER, ALLOCATABLE, DIMENSION(:) :: src_address - INTEGER, ALLOCATABLE, DIMENSION(:) :: dst_address - REAL(scripdp), ALLOCATABLE, DIMENSION(:,:) :: remap_matrix - CHARACTER(len=scriplen) :: src_grid_area_units - CHARACTER(len=scriplen) :: dst_grid_area_units - CHARACTER(len=scriplen) :: src_grid_frac_units - CHARACTER(len=scriplen) :: dst_grid_frac_units - CHARACTER(len=scriplen) :: title - CHARACTER(len=scriplen) :: normalization - CHARACTER(len=scriplen) :: map_method - CHARACTER(len=scriplen) :: history - CHARACTER(len=scriplen) :: conventions - END TYPE scripremaptype - -CONTAINS - - SUBROUTINE scripremap_read_work(cdfilename,remap) - - CHARACTER(len=*) :: cdfilename - TYPE(scripremaptype) :: remap - - INTEGER :: ncid, dimid, varid - LOGICAL :: lcorners - - lcorners=.TRUE. - - CALL scripremap_init(remap) - - CALL nchdlerr(nf90_open(TRIM(cdfilename),nf90_nowrite,ncid),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_dimid(ncid,'src_grid_size',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=remap%src%grid_size),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_dimid(ncid,'dst_grid_size',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=remap%dst%grid_size),& - & __LINE__,__MYFILE__) - - - IF (nf90_inq_dimid(ncid,'src_grid_corners',dimid)==nf90_noerr) THEN - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=remap%src%grid_corners),& - & __LINE__,__MYFILE__) - ELSE - lcorners=.FALSE. - remap%src%grid_corners=1 - ENDIF - - IF (lcorners) THEN - CALL nchdlerr(nf90_inq_dimid(ncid,'dst_grid_corners',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=remap%dst%grid_corners),& - & __LINE__,__MYFILE__) - ELSE - remap%dst%grid_corners=1 - ENDIF - - CALL nchdlerr(nf90_inq_dimid(ncid,'src_grid_rank',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=remap%src%grid_rank),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_dimid(ncid,'dst_grid_rank',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=remap%dst%grid_rank),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_dimid(ncid,'num_links',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=remap%num_links),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_dimid(ncid,'num_wgts',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=remap%num_wgts),& - & __LINE__,__MYFILE__) - - CALL scripremap_alloc(remap) - - CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_dims',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_dims),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_dims',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_dims),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_center_lat',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_center_lat_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_center_lat),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_center_lat',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_center_lat_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_center_lat),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_center_lon',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_center_lon_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_center_lon),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_center_lon',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_center_lon_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_center_lon),& - & __LINE__,__MYFILE__) - - IF (lcorners) THEN - - CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_corner_lat',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_corner_lat_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_corner_lat),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_corner_lon',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_corner_lon_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_corner_lon),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_corner_lat',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_corner_lat_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_corner_lat),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_corner_lon',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_corner_lon_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_corner_lon),& - & __LINE__,__MYFILE__) - - ELSE - - remap%src%grid_corner_lat(:,:) = 0.0 - remap%src%grid_corner_lon(:,:) = 0.0 - remap%dst%grid_corner_lat(:,:) = 0.0 - remap%dst%grid_corner_lon(:,:) = 0.0 - remap%src%grid_corner_lat_units = '' - remap%src%grid_corner_lon_units = '' - remap%dst%grid_corner_lat_units = '' - remap%dst%grid_corner_lon_units = '' - - ENDIF - - CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_imask',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_imask_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_imask),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_imask',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_imask_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_imask),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_area',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src_grid_area_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%src_grid_area),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_area',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst_grid_area_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst_grid_area),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_frac',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src_grid_frac_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%src_grid_frac),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_frac',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst_grid_frac_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst_grid_frac),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'src_address',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%src_address),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'dst_address',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst_address),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'remap_matrix',varid), & - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,remap%remap_matrix),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_get_att(ncid,nf90_global,'title',remap%title),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,nf90_global,'normalization',remap%normalization),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,nf90_global,'map_method',remap%map_method),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,nf90_global,'history',remap%history),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,nf90_global,'conventions',remap%conventions),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,nf90_global,'dest_grid',remap%dst%title),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_att(ncid,nf90_global,'source_grid',remap%src%title),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_close(ncid),__LINE__,__MYFILE__) - - END SUBROUTINE scripremap_read_work - - SUBROUTINE scripremap_read(cdfilename,remap) - - CHARACTER(len=*) :: cdfilename - TYPE(scripremaptype) :: remap - - CALL scripremap_read_work(cdfilename,remap) - - END SUBROUTINE scripremap_read - - - SUBROUTINE scripremap_read_sgl(cdfilename,remap,& - & mype,nproc,mycomm,linteronly) - - CHARACTER(len=*) :: cdfilename - TYPE(scripremaptype) :: remap - INTEGER :: mype,nproc,mycomm - LOGICAL :: linteronly - - INTEGER, DIMENSION(8) :: isizes - INTEGER :: ierr, ip - - IF (mype==0) THEN - CALL scripremap_read_work(cdfilename,remap) -#if defined key_mpp_mpi - isizes(1)=remap%src%grid_size - isizes(2)=remap%dst%grid_size - isizes(3)=remap%src%grid_corners - isizes(4)=remap%dst%grid_corners - isizes(5)=remap%src%grid_rank - isizes(6)=remap%dst%grid_rank - isizes(7)=remap%num_links - isizes(8)=remap%num_wgts - CALL mpi_bcast( isizes, 8, mpi_integer, 0, mycomm, ierr) - ELSE - CALL mpi_bcast( isizes, 8, mpi_integer, 0, mycomm, ierr) - CALL scripremap_init(remap) - remap%src%grid_size=isizes(1) - remap%dst%grid_size=isizes(2) - remap%src%grid_corners=isizes(3) - remap%dst%grid_corners=isizes(4) - remap%src%grid_rank=isizes(5) - remap%dst%grid_rank=isizes(6) - remap%num_links=isizes(7) - remap%num_wgts=isizes(8) - CALL scripremap_alloc(remap) -#endif - ENDIF - -#if defined key_mpp_mpi - - IF (.NOT.linteronly) THEN - - CALL mpi_bcast( remap%src%grid_dims, remap%src%grid_rank, & - & mpi_integer, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src%grid_center_lat, remap%src%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src%grid_center_lon, remap%src%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src%grid_corner_lat, remap%src%grid_corners*remap%src%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src%grid_corner_lon, remap%src%grid_corners*remap%src%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - - CALL mpi_bcast( remap%dst%grid_dims, remap%dst%grid_rank, & - & mpi_integer, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst%grid_center_lat, remap%dst%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst%grid_center_lon, remap%dst%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst%grid_corner_lat, remap%dst%grid_corners*remap%dst%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst%grid_corner_lon, remap%dst%grid_corners*remap%dst%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - - CALL mpi_bcast( remap%src_grid_area, remap%src%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst_grid_area, remap%dst%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src_grid_frac, remap%src%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst_grid_frac, remap%dst%grid_size, & - & mpi_double_precision, 0, mycomm, ierr ) - - CALL mpi_bcast( remap%src%grid_center_lat_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst%grid_center_lat_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src%grid_center_lon_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst%grid_center_lon_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src%grid_corner_lat_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src%grid_corner_lon_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst%grid_corner_lat_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst%grid_corner_lon_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src%grid_imask_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst%grid_imask_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src_grid_area_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst_grid_area_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src_grid_frac_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst_grid_frac_units, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%title, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%normalization, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%map_method, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%history, scriplen, & - & mpi_character, 0, mycomm, ierr ) - CALL mpi_bcast( remap%conventions, scriplen, & - & mpi_character, 0, mycomm, ierr ) - ENDIF - - CALL mpi_bcast( remap%src_address, remap%num_links, & - & mpi_integer, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst_address, remap%num_links, & - & mpi_integer, 0, mycomm, ierr ) - CALL mpi_bcast( remap%remap_matrix, remap%num_wgts*remap%num_links, & - & mpi_double_precision, 0, mycomm, ierr ) - CALL mpi_bcast( remap%src%grid_imask, remap%src%grid_size, & - & mpi_integer, 0, mycomm, ierr ) - CALL mpi_bcast( remap%dst%grid_imask, remap%dst%grid_size, & - & mpi_integer, 0, mycomm, ierr ) - -#endif - END SUBROUTINE scripremap_read_sgl - - SUBROUTINE scripremap_write(cdfilename,remap) - - CHARACTER(len=*) :: cdfilename - TYPE(scripremaptype) :: remap - - INTEGER :: ncid - INTEGER :: dimsgs,dimdgs,dimsgc,dimdgc,dimsgr,dimdgr,dimnl,dimnw - INTEGER :: dims1(1),dims2(2) - INTEGER :: idsgd,iddgd,idsgea,iddgea,idsgeo,iddgeo - INTEGER :: idsgoa,idsgoo,iddgoa,iddgoo,idsgim,iddgim,idsgar,iddgar - INTEGER :: idsgf,iddgf,idsga,iddga,idsa,idda,idrm - - CALL nchdlerr(nf90_create(TRIM(cdfilename),nf90_clobber,ncid), & - & __LINE__, __MYFILE__ ) - - CALL nchdlerr(nf90_def_dim(ncid,'src_grid_size',& - & remap%src%grid_size,dimsgs),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_dim(ncid,'dst_grid_size',& - & remap%dst%grid_size,dimdgs),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_dim(ncid,'src_grid_corners',& - & remap%src%grid_corners,dimsgc),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_dim(ncid,'dst_grid_corners',& - & remap%dst%grid_corners,dimdgc),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_dim(ncid,'src_grid_rank',& - & remap%src%grid_rank,dimsgr),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_dim(ncid,'dst_grid_rank',& - & remap%dst%grid_rank,dimdgr),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_dim(ncid,'num_links',& - & remap%num_links,dimnl),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_def_dim(ncid,'num_wgts',& - & remap%num_wgts,dimnw),& - & __LINE__,__MYFILE__) - - dims1(1)=dimsgr - CALL nchdlerr(nf90_def_var(ncid,'src_grid_dims',& - & nf90_int,dims1,idsgd),& - & __LINE__,__MYFILE__) - - dims1(1)=dimdgr - CALL nchdlerr(nf90_def_var(ncid,'dst_grid_dims',& - & nf90_int,dims1,iddgd), & - & __LINE__,__MYFILE__) - - dims1(1)=dimsgs - CALL nchdlerr(nf90_def_var(ncid,'src_grid_center_lat',& - & nf90_double,dims1,idsgea), & - & __LINE__,__MYFILE__) - - dims1(1)=dimdgs - CALL nchdlerr(nf90_def_var(ncid,'dst_grid_center_lat',& - & nf90_double,dims1,iddgea), & - & __LINE__,__MYFILE__) - - dims1(1)=dimsgs - CALL nchdlerr(nf90_def_var(ncid,'src_grid_center_lon',& - & nf90_double,dims1,idsgeo), & - & __LINE__,__MYFILE__) - - dims1(1)=dimdgs - CALL nchdlerr(nf90_def_var(ncid,'dst_grid_center_lon',& - & nf90_double,dims1,iddgeo), & - & __LINE__,__MYFILE__) - - dims2(1)=dimsgc - dims2(2)=dimsgs - CALL nchdlerr(nf90_def_var(ncid,'src_grid_corner_lat',& - & nf90_double,dims2,idsgoa), & - & __LINE__,__MYFILE__) - - dims2(1)=dimsgc - dims2(2)=dimsgs - CALL nchdlerr(nf90_def_var(ncid,'src_grid_corner_lon',& - & nf90_double,dims2,idsgoo), & - & __LINE__,__MYFILE__) - - dims2(1)=dimdgc - dims2(2)=dimdgs - CALL nchdlerr(nf90_def_var(ncid,'dst_grid_corner_lat',& - & nf90_double,dims2,iddgoa), & - & __LINE__,__MYFILE__) - - dims2(1)=dimdgc - dims2(2)=dimdgs - CALL nchdlerr(nf90_def_var(ncid,'dst_grid_corner_lon',& - & nf90_double,dims2,iddgoo), & - & __LINE__,__MYFILE__) - - dims1(1)=dimsgs - CALL nchdlerr(nf90_def_var(ncid,'src_grid_imask',& - & nf90_int,dims1,idsgim), & - & __LINE__,__MYFILE__) - - dims1(1)=dimdgs - CALL nchdlerr(nf90_def_var(ncid,'dst_grid_imask',& - & nf90_int,dims1,iddgim), & - & __LINE__,__MYFILE__) - - dims1(1)=dimsgs - CALL nchdlerr(nf90_def_var(ncid,'src_grid_area',& - & nf90_double,dims1,idsga), & - & __LINE__,__MYFILE__) - - dims1(1)=dimdgs - CALL nchdlerr(nf90_def_var(ncid,'dst_grid_area',& - & nf90_double,dims1,iddga), & - & __LINE__,__MYFILE__) - - dims1(1)=dimsgs - CALL nchdlerr(nf90_def_var(ncid,'src_grid_frac',& - & nf90_double,dims1,idsgf), & - & __LINE__,__MYFILE__) - - dims1(1)=dimdgs - CALL nchdlerr(nf90_def_var(ncid,'dst_grid_frac',& - & nf90_double,dims1,iddgf), & - & __LINE__,__MYFILE__) - - dims1(1)=dimnl - CALL nchdlerr(nf90_def_var(ncid,'src_address',& - & nf90_int,dims1,idsa), & - & __LINE__,__MYFILE__) - - dims1(1)=dimnl - CALL nchdlerr(nf90_def_var(ncid,'dst_address',& - & nf90_int,dims1,idda), & - & __LINE__,__MYFILE__) - - dims2(1)=dimnw - dims2(2)=dimnl - CALL nchdlerr(nf90_def_var(ncid,'remap_matrix',& - & nf90_double,dims2,idrm), & - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_att(ncid,idsgea,'units',& - & remap%src%grid_center_lat_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,iddgea,'units',& - & remap%dst%grid_center_lat_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idsgeo,'units',& - & remap%src%grid_center_lon_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,iddgeo,'units',& - & remap%dst%grid_center_lon_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idsgoa,'units',& - & remap%src%grid_corner_lat_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idsgoo,'units',& - & remap%src%grid_corner_lon_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,iddgoa,'units',& - & remap%dst%grid_corner_lat_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,iddgoo,'units',& - & remap%dst%grid_corner_lon_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idsgim,'units',& - & remap%src%grid_imask_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,iddgim,'units',& - & remap%dst%grid_imask_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idsga,'units',& - & remap%src_grid_area_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,iddga,'units',& - & remap%dst_grid_area_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,idsgf,'units',& - & remap%src_grid_frac_units),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,iddgf,'units',& - & remap%dst_grid_frac_units),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_att(ncid,nf90_global,'title',& - & remap%title),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,nf90_global,'normalization',& - & remap%normalization),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,nf90_global,'map_method',& - & remap%map_method),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,nf90_global,'history',& - & remap%history),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,nf90_global,'conventions',& - & remap%conventions),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,nf90_global,'dest_grid',& - & remap%dst%title),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_att(ncid,nf90_global,'source_grid',& - & remap%src%title),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_enddef(ncid),__LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idsgd,remap%src%grid_dims),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,iddgd,remap%dst%grid_dims),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idsgea,remap%src%grid_center_lat),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,iddgea,remap%dst%grid_center_lat),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idsgeo,remap%src%grid_center_lon),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,iddgeo,remap%dst%grid_center_lon),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idsgoa,remap%src%grid_corner_lat),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,idsgoo,remap%src%grid_corner_lon),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,iddgoa,remap%dst%grid_corner_lat),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,iddgoo,remap%dst%grid_corner_lon),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,idsgim,remap%src%grid_imask),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,iddgim,remap%dst%grid_imask),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,idsga,remap%src_grid_area),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,iddga,remap%dst_grid_area),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,idsgf,remap%src_grid_frac),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,iddgf,remap%dst_grid_frac),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,idsa,remap%src_address),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,idda,remap%dst_address),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_put_var(ncid,idrm,remap%remap_matrix),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_close(ncid),__LINE__, __MYFILE__ ) - - END SUBROUTINE scripremap_write - - SUBROUTINE scripremap_init(remap) - - TYPE(scripremaptype) :: remap - - CALL scripgrid_init(remap%src) - CALL scripgrid_init(remap%dst) - remap%num_links = 0 - remap%num_wgts = 0 - remap%title='' - remap%normalization='' - remap%map_method='' - remap%history='' - remap%conventions='' - remap%src_grid_area_units='' - remap%dst_grid_area_units='' - remap%src_grid_frac_units='' - remap%dst_grid_frac_units='' - - END SUBROUTINE scripremap_init - - SUBROUTINE scripremap_alloc(remap) - - TYPE(scripremaptype) :: remap - - IF ( (remap%num_links == 0) .OR. & - & (remap%num_wgts == 0) ) THEN - WRITE(*,*)'scripremaptype not initialized' - CALL abort - ENDIF - - CALL scripgrid_alloc(remap%src) - CALL scripgrid_alloc(remap%dst) - - ALLOCATE( & - & remap%src_grid_area(remap%src%grid_size), & - & remap%dst_grid_area(remap%dst%grid_size), & - & remap%src_grid_frac(remap%src%grid_size), & - & remap%dst_grid_frac(remap%dst%grid_size), & - & remap%src_address(remap%num_links), & - & remap%dst_address(remap%num_links), & - & remap%remap_matrix(remap%num_wgts, remap%num_links) & - & ) - - END SUBROUTINE scripremap_alloc - - SUBROUTINE scripremap_dealloc(remap) - - TYPE(scripremaptype) :: remap - - DEALLOCATE( & - & remap%src_grid_area, & - & remap%dst_grid_area, & - & remap%src_grid_frac, & - & remap%dst_grid_frac, & - & remap%src_address, & - & remap%dst_address, & - & remap%remap_matrix & - & ) - - CALL scripgrid_dealloc(remap%src) - CALL scripgrid_dealloc(remap%dst) - - CALL scripremap_init(remap) - - END SUBROUTINE scripremap_dealloc - -END MODULE scripremap - -MODULE parinter - -#if defined key_mpp_mpi - USE mpi -#endif - USE scripremap - USE scrippar - USE nctools - - IMPLICIT NONE - - ! Type to contains interpolation information - ! (like what is in scripremaptype) and message - ! passing information - - TYPE parinterinfo - ! Number of local links - INTEGER :: num_links - ! Destination side - INTEGER, POINTER, DIMENSION(:) :: dst_address - ! Source addresses and work array - INTEGER, POINTER, DIMENSION(:) :: src_address - ! Local remap matrix - REAL(scripdp), POINTER, DIMENSION(:,:) :: remap_matrix - ! Message passing information - ! Array of local addresses for send buffer - ! packing - INTEGER, POINTER, DIMENSION(:) :: send_address - ! Sending bookkeeping - INTEGER :: nsendtot - INTEGER, POINTER, DIMENSION(:) :: nsend,nsdisp - ! Receiving bookkeeping - INTEGER :: nrecvtot - INTEGER, POINTER, DIMENSION(:) :: nrecv,nrdisp - END TYPE parinterinfo - -CONTAINS - - SUBROUTINE parinter_init( mype, nproc, mpi_comm, & - & nsrclocpoints, nsrcglopoints, srcmask, srcgloind, & - & ndstlocpoints, ndstglopoints, dstmask, dstgloind, & - & remap, pinfo, lcommout, commoutprefix, iunit ) - - ! Setup interpolation based on SCRIP format weights in - ! remap and the source/destination grids information. - - ! Procedure: - - ! 1) A global SCRIP remapping file is read on all processors. - ! 2) Find local destination points in the global grid. - ! 3) Find which processor needs source data and setup buffer - ! information for sending data. - ! 4) Construct new src remapping for buffer received - - ! All information is stored in the TYPE(parinterinfo) output - ! data type - - ! Input arguments. - - ! Message passing information - INTEGER, INTENT(IN) :: mype, nproc, mpi_comm - ! Source grid local and global number of grid points - INTEGER, INTENT(IN) :: nsrclocpoints, nsrcglopoints - ! Source integer mask (0/1) for SCRIP compliance - INTEGER, INTENT(IN), DIMENSION(nsrclocpoints) :: srcmask - ! Source global addresses of each local grid point - INTEGER, INTENT(IN), DIMENSION(nsrclocpoints) :: srcgloind - ! Destination grid local and global number of grid points - INTEGER, INTENT(IN) :: ndstlocpoints, ndstglopoints - ! Destination integer mask (0/1) for SCRIP compliance - INTEGER, INTENT(IN), DIMENSION(ndstlocpoints) :: dstmask - ! Destination global addresses of each local grid point - INTEGER, INTENT(IN), DIMENSION(ndstlocpoints) :: dstgloind - ! SCRIP remapping data - TYPE(scripremaptype) :: remap - ! Switch for output communication patterns - LOGICAL :: lcommout - CHARACTER(len=*) :: commoutprefix - ! Unit to use for output - INTEGER :: iunit - - ! Output arguments - - ! Interpolation and message passing information - TYPE(parinterinfo), INTENT(OUT) :: pinfo - - ! Local variable - - ! Variable for glocal <-> local address/pe information - INTEGER, DIMENSION(nsrcglopoints) :: ilsrcmppmap, ilsrclocind - INTEGER, DIMENSION(nsrcglopoints) :: igsrcmppmap, igsrclocind - INTEGER, DIMENSION(ndstglopoints) :: ildstmppmap, ildstlocind - INTEGER, DIMENSION(ndstglopoints) :: igdstmppmap, igdstlocind - INTEGER, DIMENSION(nsrcglopoints) :: isrcpe,isrcpetmp - INTEGER, DIMENSION(nsrcglopoints) :: isrcaddtmp - INTEGER, DIMENSION(0:nproc-1) :: isrcoffset - INTEGER, DIMENSION(nproc) :: isrcno, isrcoff, isrccur - INTEGER, DIMENSION(nproc) :: ircvoff, ircvcur - INTEGER, DIMENSION(:), ALLOCATABLE :: isrctot, ircvtot - - ! Misc variable - INTEGER :: i,n,pe - INTEGER :: istatus - CHARACTER(len=256) :: cdfile - - ! Check that masks are consistent. - - ! Remark: More consistency tests between remapping information - ! and input argument could be code, but for now we settle - ! for checking the masks. - - ! Source grid - - DO i=1,nsrclocpoints - IF (srcmask(i)/=remap%src%grid_imask(srcgloind(i))) THEN - WRITE(iunit,*)'Source imask is inconsistent at ' - WRITE(iunit,*)'global index = ',srcgloind(i) - WRITE(iunit,*)'Source mask = ',srcmask(i) - WRITE(iunit,*)'Remap mask = ',remap%src%grid_imask(srcgloind(i)) - WRITE(iunit,*)'Latitude = ',remap%src%grid_center_lat(srcgloind(i)) - WRITE(iunit,*)'Longitude = ',remap%src%grid_center_lon(srcgloind(i)) - CALL flush(iunit) - CALL abort - ENDIF - ENDDO - - ! Destination grid - - DO i=1,ndstlocpoints - IF (dstmask(i)/=remap%dst%grid_imask(dstgloind(i))) THEN - WRITE(iunit,*)'Destination imask is inconsistent at ' - WRITE(iunit,*)'global index = ',dstgloind(i) - WRITE(iunit,*)'Destin mask = ',dstmask(i) - WRITE(iunit,*)'Remap mask = ',remap%dst%grid_imask(dstgloind(i)) - WRITE(iunit,*)'Latitude = ',remap%dst%grid_center_lat(dstgloind(i)) - WRITE(iunit,*)'Longitude = ',remap%dst%grid_center_lon(dstgloind(i)) - CALL flush(iunit) - CALL abort - ENDIF - ENDDO - - ! Setup global to local and vice versa mappings. - - ilsrcmppmap(:)=-1 - ilsrclocind(:)=0 - ildstmppmap(:)=-1 - ildstlocind(:)=0 - - DO i=1,nsrclocpoints - ilsrcmppmap(srcgloind(i))=mype - ilsrclocind(srcgloind(i))=i - ENDDO - - DO i=1,ndstlocpoints - ildstmppmap(dstgloind(i))=mype - ildstlocind(dstgloind(i))=i - ENDDO - -#if defined key_mpp_mpi - CALL mpi_allreduce(ilsrcmppmap,igsrcmppmap,nsrcglopoints, & - & mpi_integer,mpi_max,mpi_comm,istatus) - CALL mpi_allreduce(ilsrclocind,igsrclocind,nsrcglopoints, & - & mpi_integer,mpi_max,mpi_comm,istatus) - CALL mpi_allreduce(ildstmppmap,igdstmppmap,ndstglopoints, & - & mpi_integer,mpi_max,mpi_comm,istatus) - CALL mpi_allreduce(ildstlocind,igdstlocind,ndstglopoints, & - & mpi_integer,mpi_max,mpi_comm,istatus) -#else - igsrcmppmap(:)=ilsrcmppmap(:) - igsrclocind(:)=ilsrclocind(:) - igdstmppmap(:)=ildstmppmap(:) - igdstlocind(:)=ildstlocind(:) -#endif - - ! Optionally construct an ascii file listing what src and - ! dest points belongs to which task - - ! Since igsrcmppmap and igdstmppmap are global data only do - ! this for mype==0. - - IF (lcommout.AND.(mype==0)) THEN - WRITE(cdfile,'(A,I4.4,A)')commoutprefix//'_srcmppmap_',mype+1,'.dat' - OPEN(9,file=cdfile) - DO i=1,nsrcglopoints - WRITE(9,*)remap%src%grid_center_lat(i),& - & remap%src%grid_center_lon(i), & - & igsrcmppmap(i)+1,remap%src%grid_imask(i) - ENDDO - CLOSE(9) - WRITE(cdfile,'(A,I4.4,A)')commoutprefix//'_dstmppmap_',mype+1,'.dat' - OPEN(9,file=cdfile) - DO i=1,ndstglopoints - WRITE(9,*)remap%dst%grid_center_lat(i),& - & remap%dst%grid_center_lon(i), & - & igdstmppmap(i)+1,remap%dst%grid_imask(i) - ENDDO - CLOSE(9) - ENDIF - - ! - ! Standard interpolation in serial case is - ! - ! DO n=1,remap%num_links - ! zdst(remap%dst_address(n)) = zdst(remap%dst_address(n)) + & - ! & remap%remap_matrix(1,n)*zsrc(remap%src_address(n)) - ! END DO - ! - - ! In parallel we need to first find local number of links - - pinfo%num_links=0 - DO i=1,remap%num_links - IF (igdstmppmap(remap%dst_address(i))==mype) & - & pinfo%num_links=pinfo%num_links+1 - ENDDO - ALLOCATE(pinfo%dst_address(pinfo%num_links),& - & pinfo%src_address(pinfo%num_links),& - & pinfo%remap_matrix(1,pinfo%num_links)) - - ! Get local destination addresses - - n=0 - DO i=1,remap%num_links - IF (igdstmppmap(remap%dst_address(i))==mype) THEN - n=n+1 - pinfo%dst_address(n)=& - & igdstlocind(remap%dst_address(i)) - pinfo%remap_matrix(:,n)=& - & remap%remap_matrix(:,i) - ENDIF - ENDDO - - ! Get sending processors maps. - - ! The same data point might need to be sent to many processors - ! so first construct a map for processors needing the data - - isrcpe(:)=-1 - DO i=1,remap%num_links - IF (igdstmppmap(remap%dst_address(i))==mype) THEN - isrcpe(remap%src_address(i))=& - & igsrcmppmap(remap%src_address(i)) - ENDIF - ENDDO - - ! Optionally write a set if ascii file listing which tasks - ! mype needs to send to communicate with - - IF (lcommout) THEN - ! Destination processors - WRITE(cdfile,'(A,I4.4,A)')commoutprefix//'_dsts_',mype+1,'.dat' - OPEN(9,file=cdfile) - DO pe=0,nproc-1 - IF (pe==mype) THEN - isrcpetmp(:)=isrcpe(:) - ENDIF -#if defined key_mpp_mpi - CALL mpi_bcast(isrcpetmp,nsrcglopoints,mpi_integer,pe,mpi_comm,istatus) -#endif - DO i=1,nsrcglopoints - IF (isrcpetmp(i)==mype) THEN - WRITE(9,*)remap%src%grid_center_lat(i),& - & remap%src%grid_center_lon(i), & - & pe+1,mype+1 - ENDIF - ENDDO - ENDDO - CLOSE(9) - ENDIF - - ! Get number of points to send to each processor - - ALLOCATE(pinfo%nsend(0:nproc-1)) - isrcno(:)=0 - DO i=1,nsrcglopoints - IF (isrcpe(i)>=0) THEN - isrcno(isrcpe(i)+1)=isrcno(isrcpe(i)+1)+1 - ENDIF - ENDDO -#if defined key_mpp_mpi - CALL mpi_alltoall(isrcno,1,mpi_integer, & - & pinfo%nsend(0:nproc-1),1,mpi_integer, & - & mpi_comm,istatus) -#else - pinfo%nsend(0:nproc-1) = isrcno(1:nproc) -#endif - pinfo%nsendtot=SUM(pinfo%nsend(0:nproc-1)) - - ! Construct sending buffer mapping. Data is mapping in - ! processor order. - - ALLOCATE(pinfo%send_address(pinfo%nsendtot)) - - ! Temporary arrays for mpi all to all. - - ALLOCATE(isrctot(SUM(isrcno(1:nproc)))) - ALLOCATE(ircvtot(SUM(pinfo%nsend(0:nproc-1)))) - - ! Offset for message parsing - - isrcoff(1)=0 - ircvoff(1)=0 - DO i=1,nproc-1 - isrcoff(i+1) = isrcoff(i) + isrcno(i) - ircvoff(i+1) = pinfo%nsend(i-1) + ircvoff(i) - ENDDO - - ! Pack indices i into a buffer - - isrccur(:)=0 - DO i=1,nsrcglopoints - IF (isrcpe(i)>=0) THEN - isrccur(isrcpe(i)+1)=isrccur(isrcpe(i)+1)+1 - isrctot(isrccur(isrcpe(i)+1)+isrcoff(isrcpe(i)+1)) = i - ENDIF - ENDDO - - ! Send the data - -#if defined key_mpp_mpi - CALL mpi_alltoallv(& - & isrctot,isrccur,isrcoff,mpi_integer, & - & ircvtot,pinfo%nsend(0:nproc-1),ircvoff,mpi_integer, & - & mpi_comm,istatus) -#else - ircvtot(:)=isrctot(:) -#endif - - ! Get the send address. ircvtot will at this point contain the - ! addresses in the global index needed for message passing - - DO i=1,pinfo%nsendtot - pinfo%send_address(i)=igsrclocind(ircvtot(i)) - ENDDO - - ! Deallocate the mpi all to all arrays - - DEALLOCATE(ircvtot,isrctot) - - ! Get number of points to receive to each processor - - ALLOCATE(pinfo%nrecv(0:nproc-1)) - pinfo%nrecv(0:nproc-1)=0 - DO i=1,nsrcglopoints - IF (isrcpe(i)>=0 .AND. isrcpe(i)=0 .AND. isrcpe(i)0) THEN - CALL nchdlerr(nf90_def_dim(ncid,'num_links',& - & pinfo%num_links,dimnl),& - & __LINE__,__MYFILE__) - ENDIF - - CALL nchdlerr(nf90_def_dim(ncid,'num_wgts',& - & 1,dimnw),& - & __LINE__,__MYFILE__) - - IF (pinfo%nsendtot>0) THEN - CALL nchdlerr(nf90_def_dim(ncid,'nsendtot',& - & pinfo%nsendtot,dimnst),& - & __LINE__,__MYFILE__) - ENDIF - - IF (pinfo%nrecvtot>0) THEN - CALL nchdlerr(nf90_def_dim(ncid,'nrecvtot',& - & pinfo%nrecvtot,dimnrt),& - & __LINE__,__MYFILE__) - ENDIF - - CALL nchdlerr(nf90_def_dim(ncid,'nproc',& - & nproc,dimnpr),& - & __LINE__,__MYFILE__) - - IF (pinfo%num_links>0) THEN - - dims1(1)=dimnl - CALL nchdlerr(nf90_def_var(ncid,'dst_address',& - & nf90_int,dims1,idda),& - & __LINE__,__MYFILE__) - - dims1(1)=dimnl - CALL nchdlerr(nf90_def_var(ncid,'src_address',& - & nf90_int,dims1,idsa),& - & __LINE__,__MYFILE__) - - dims2(1)=dimnw - dims2(2)=dimnl - CALL nchdlerr(nf90_def_var(ncid,'remap_matrix',& - & nf90_double,dims2,idrm),& - & __LINE__,__MYFILE__) - - ENDIF - - dims1(1)=dimnpr - CALL nchdlerr(nf90_def_var(ncid,'nsend',& - & nf90_int,dims1,idns),& - & __LINE__,__MYFILE__) - - IF (pinfo%nsendtot>0) THEN - - dims1(1)=dimnst - CALL nchdlerr(nf90_def_var(ncid,'send_address',& - & nf90_int,dims1,idsaa),& - & __LINE__,__MYFILE__) - - ENDIF - - dims1(1)=dimnpr - CALL nchdlerr(nf90_def_var(ncid,'nrecv',& - & nf90_int,dims1,idnr),& - & __LINE__,__MYFILE__) - - dims1(1)=dimnpr - CALL nchdlerr(nf90_def_var(ncid,'nsdisp',& - & nf90_int,dims1,idnsp),& - & __LINE__,__MYFILE__) - - dims1(1)=dimnpr - CALL nchdlerr(nf90_def_var(ncid,'nrdisp',& - & nf90_int,dims1,idnrp),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_enddef(ncid),__LINE__,__MYFILE__) - - - IF (pinfo%num_links>0) THEN - - CALL nchdlerr(nf90_put_var(ncid,idda,pinfo%dst_address),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idsa,pinfo%src_address),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idrm,pinfo%remap_matrix),& - & __LINE__,__MYFILE__) - - ENDIF - - CALL nchdlerr(nf90_put_var(ncid,idns,pinfo%nsend(0:nproc-1)),& - & __LINE__,__MYFILE__) - - IF (pinfo%nsendtot>0) THEN - - CALL nchdlerr(nf90_put_var(ncid,idsaa,pinfo%send_address),& - & __LINE__,__MYFILE__) - - ENDIF - - CALL nchdlerr(nf90_put_var(ncid,idnr,pinfo%nrecv(0:nproc-1)),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idnsp,pinfo%nsdisp(0:nproc-1)),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_put_var(ncid,idnrp,pinfo%nrdisp(0:nproc-1)),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_close(ncid),__LINE__, __MYFILE__ ) - - END SUBROUTINE parinter_write - - SUBROUTINE parinter_read( mype, nproc, & - & nsrcglopoints, ndstglopoints, & - & pinfo, cdpath, cdprefix, lexists ) - - ! Write pinfo information in a netCDF file in order to - ! be able to read it rather than calling parinter_init - - ! Input arguments. - - ! Message passing information - INTEGER, INTENT(IN) :: mype, nproc - ! Source grid local global number of grid points - INTEGER, INTENT(IN) :: nsrcglopoints - ! Destination grid global number of grid points - INTEGER, INTENT(IN) :: ndstglopoints - ! Interpolation and message passing information - TYPE(parinterinfo), INTENT(OUT) :: pinfo - ! Does the information exists - LOGICAL :: lexists - ! Path and file prefix - CHARACTER(len=*) :: cdpath, cdprefix - - ! Local variable - - ! Misc variable - CHARACTER(len=1024) :: cdfile - INTEGER :: ncid, dimid, varid, num_wgts - - WRITE(cdfile,'(A,2(I8.8,A),2(I4.4,A),A)') & - & TRIM(cdpath)//'/'//TRIM(cdprefix)//'_', & - & nsrcglopoints,'_',ndstglopoints,'_',mype,'_',nproc,'.nc' - - - lexists=nf90_open(TRIM(cdfile),nf90_nowrite,ncid)==nf90_noerr - - IF (lexists) THEN - - ! If num_links is not present we assume it to be zero. - - IF (nf90_inq_dimid(ncid,'num_links',dimid)==nf90_noerr) THEN - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=pinfo%num_links),& - & __LINE__,__MYFILE__) - ELSE - pinfo%num_links=0 - ENDIF - - CALL nchdlerr(nf90_inq_dimid(ncid,'num_wgts',dimid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=num_wgts),& - & __LINE__,__MYFILE__) - IF (num_wgts/=1) THEN - WRITE(0,*)'parinter_read: num_wgts has to be 1 for now' - CALL abort - ENDIF - - ! If nsendtot is not present we assume it to be zero. - - IF (nf90_inq_dimid(ncid,'nsendtot',dimid)==nf90_noerr) THEN - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=pinfo%nsendtot),& - & __LINE__,__MYFILE__) - ELSE - pinfo%nsendtot=0 - ENDIF - - IF(nf90_inq_dimid(ncid,'nrecvtot',dimid)==nf90_noerr) THEN - CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& - & len=pinfo%nrecvtot),& - & __LINE__,__MYFILE__) - ELSE - pinfo%nrecvtot=0 - ENDIF - - ALLOCATE(pinfo%dst_address(pinfo%num_links),& - & pinfo%src_address(pinfo%num_links),& - & pinfo%remap_matrix(num_wgts,pinfo%num_links),& - & pinfo%nsend(0:nproc-1),& - & pinfo%send_address(pinfo%nsendtot),& - & pinfo%nrecv(0:nproc-1),& - & pinfo%nsdisp(0:nproc-1),& - & pinfo%nrdisp(0:nproc-1)) - - IF (pinfo%num_links>0) THEN - CALL nchdlerr(nf90_inq_varid(ncid,'dst_address',varid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%dst_address),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'src_address',varid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%src_address),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'remap_matrix',varid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%remap_matrix),& - & __LINE__,__MYFILE__) - ENDIF - - CALL nchdlerr(nf90_inq_varid(ncid,'nsend',varid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nsend(0:nproc-1)),& - & __LINE__,__MYFILE__) - - IF (pinfo%nsendtot>0) THEN - - CALL nchdlerr(nf90_inq_varid(ncid,'send_address',varid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%send_address),& - & __LINE__,__MYFILE__) - - ENDIF - - CALL nchdlerr(nf90_inq_varid(ncid,'nrecv',varid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nrecv(0:nproc-1)),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'nsdisp',varid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nsdisp(0:nproc-1)),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_inq_varid(ncid,'nrdisp',varid),& - & __LINE__,__MYFILE__) - CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nrdisp(0:nproc-1)),& - & __LINE__,__MYFILE__) - - CALL nchdlerr(nf90_close(ncid),__LINE__, __MYFILE__ ) - - ENDIF - - END SUBROUTINE parinter_read - -END MODULE parinter - -MODULE interinfo - - ! Parallel regridding information - - USE parinter - - IMPLICIT NONE - - SAVE - - ! IFS to NEMO - - TYPE(parinterinfo) :: gausstoT,gausstoUV - - ! NEMO to IFS - - TYPE(parinterinfo) :: Ttogauss, UVtogauss - - ! Read parinterinfo on task 0 only and broadcast. - - LOGICAL :: lparbcast = .FALSE. - -END MODULE interinfo -#endif diff --git a/src/ifs_interface/ifs_notused.F90 b/src/ifs_interface/ifs_notused.F90 deleted file mode 100644 index d596169c4..000000000 --- a/src/ifs_interface/ifs_notused.F90 +++ /dev/null @@ -1,371 +0,0 @@ -#if defined (__ifsinterface) -! Routines usually provided by the library that are currently -! not implemented for FESOM2. -! -! -Original code by Kristian Mogensen, ECMWF. - -SUBROUTINE nemogcmcoup_end_ioserver - -! End the NEMO mppio server - - WRITE(*,*)'No mpp_ioserver used' -! CALL abort - -END SUBROUTINE nemogcmcoup_end_ioserver - -SUBROUTINE nemogcmcoup_init_ioserver( icomm, lnemoioserver ) - - ! Initialize the NEMO mppio server - - IMPLICIT NONE - INTEGER :: icomm - LOGICAL :: lnemoioserver - - WRITE(*,*)'No mpp_ioserver' - !CALL abort - -END SUBROUTINE nemogcmcoup_init_ioserver - - -SUBROUTINE nemogcmcoup_init_ioserver_2( icomm ) - - ! Initialize the NEMO mppio server - - IMPLICIT NONE - INTEGER :: icomm - - WRITE(*,*)'No mpp_ioserver' - CALL abort - -END SUBROUTINE nemogcmcoup_init_ioserver_2 - - -SUBROUTINE nemogcmcoup_mlflds_get( mype, npes, icomm, & - & nlev, nopoints, pgt3d, pgs3d, pgu3d, pgv3d ) - - ! Interpolate sst, ice: surf T; albedo; concentration; thickness, - ! snow thickness and currents from the ORCA grid to the Gaussian grid. - - ! This routine can be called at any point in time since it does - ! the necessary message passing in parinter_fld. - - USE par_kind - IMPLICIT NONE - - ! Arguments - REAL(wpIFS), DIMENSION(nopoints,nlev) :: pgt3d, pgs3d, pgu3d, pgv3d - ! Message passing information - INTEGER, INTENT(IN) :: mype, npes, icomm - ! Number Gaussian grid points - INTEGER, INTENT(IN) :: nopoints,nlev - - ! Local variables - - WRITE(0,*)'nemogcmcoup_mlflds_get should not be called when coupling to fesom.' - CALL abort - -END SUBROUTINE nemogcmcoup_mlflds_get - - -SUBROUTINE nemogcmcoup_get( mype, npes, icomm, & - & nopoints, pgsst, pgice, pgucur, pgvcur ) - - ! Interpolate sst, ice and currents from the ORCA grid - ! to the Gaussian grid. - - ! This routine can be called at any point in time since it does - ! the necessary message passing in parinter_fld. - - USE par_kind - - IMPLICIT NONE - - - ! Arguments - - ! Message passing information - INTEGER, INTENT(IN) :: mype, npes, icomm - ! Number Gaussian grid points - INTEGER, INTENT(IN) :: nopoints - ! Local arrays of sst, ice and currents - REAL(wpIFS), DIMENSION(nopoints) :: pgsst, pgice, pgucur, pgvcur - - ! Local variables - - WRITE(0,*)'nemogcmcoup_get should not be called with FESOM' - CALL abort - -END SUBROUTINE nemogcmcoup_get - - -SUBROUTINE nemogcmcoup_exflds_get( mype, npes, icomm, & - & nopoints, pgssh, pgmld, pg20d, pgsss, & - & pgtem300, pgsal300 ) - - ! Interpolate sst, ice: surf T; albedo; concentration; thickness, - ! snow thickness and currents from the ORCA grid to the Gaussian grid. - - ! This routine can be called at any point in time since it does - ! the necessary message passing in parinter_fld. - - USE par_kind - IMPLICIT NONE - - ! Arguments - REAL(wpIFS), DIMENSION(nopoints) :: pgssh, pgmld, pg20d, pgsss, & - & pgtem300, pgsal300 - ! Message passing information - INTEGER, INTENT(IN) :: mype, npes, icomm - ! Number Gaussian grid points - INTEGER, INTENT(IN) :: nopoints - - ! Local variables - - WRITE(0,*)'nemogcmcoup_exflds_get should not be called when coupling to fesom.' - CALL abort - -END SUBROUTINE nemogcmcoup_exflds_get - - -SUBROUTINE nemogcmcoup_get_1way( mype, npes, icomm ) - - ! Interpolate sst, ice and currents from the ORCA grid - ! to the Gaussian grid. - - ! This routine can be called at any point in time since it does - ! the necessary message passing in parinter_fld. - - IMPLICIT NONE - - - ! Arguments - - ! Message passing information - INTEGER, INTENT(IN) :: mype, npes, icomm - - ! Local variables - - WRITE(0,*)'nemogcmcoup_get_1way should not be called when coupling to fesom.' - CALL abort - -END SUBROUTINE nemogcmcoup_get_1way - - -SUBROUTINE nemogcmcoup_mlinit( mype, npes, icomm, & - & nlev, nopoints, pdep, pmask ) - - ! Get information about the vertical discretization of the ocean model - - ! nlevs are maximum levels on input and actual number levels on output - - USE par_kind - - IMPLICIT NONE - - ! Input arguments - - ! Message passing information - INTEGER, INTENT(IN) :: mype,npes,icomm - ! Grid information - INTEGER, INTENT(INOUT) :: nlev, nopoints - REAL(wpIFS), INTENT(OUT), DIMENSION(nlev) :: pdep - REAL(wpIFS), INTENT(OUT), DIMENSION(nopoints,nlev) :: pmask - - ! Local variables - - ! dummy argument with explicit INTENT(OUT) declaration needs an explicit value - pdep=0. - pmask=0. - - WRITE(0,*)'nemogcmcoup_mlinit should not be called when coupling to fesom.' - CALL abort - -END SUBROUTINE nemogcmcoup_mlinit - - -SUBROUTINE nemogcmcoup_update( mype, npes, icomm, & - & npoints, pgutau, pgvtau, & - & pgqsr, pgqns, pgemp, kt, ldebug ) - - ! Update fluxes in nemogcmcoup_data by parallel - ! interpolation of the input gaussian grid data - - USE par_kind - - IMPLICIT NONE - - ! Arguments - - ! MPI communications - INTEGER, INTENT(IN) :: mype,npes,icomm - ! Fluxes on the Gaussian grid. - INTEGER, INTENT(IN) :: npoints - REAL(wpIFS), DIMENSION(npoints), intent(IN) :: & - & pgutau, pgvtau, pgqsr, pgqns, pgemp - ! Current time step - INTEGER, INTENT(in) :: kt - ! Write debugging fields in netCDF - LOGICAL, INTENT(IN) :: ldebug - - ! Local variables - - WRITE(0,*)'nemogcmcoup_update should be called with with.' - CALL abort - -END SUBROUTINE nemogcmcoup_update - -SUBROUTINE nemogcmcoup_update_add( mype, npes, icomm, & - & npoints, pgsst, pgtsk, kt, ldebug ) - - ! Update addetiona in nemogcmcoup_data by parallel - ! interpolation of the input gaussian grid data - - USE par_kind - - IMPLICIT NONE - - ! Arguments - - ! MPI communications - INTEGER, INTENT(IN) :: mype,npes,icomm - ! Input on the Gaussian grid. - INTEGER, INTENT(IN) :: npoints - REAL(wpIFS), DIMENSION(npoints), intent(IN) :: & - & pgsst, pgtsk - ! Current time step - INTEGER, INTENT(in) :: kt - ! Write debugging fields in netCDF - LOGICAL, INTENT(IN) :: ldebug - - ! Local variables - - WRITE(0,*)'nemogcmcoup_update_add should not be called when coupling to fesom. Commented ABORT. Proceeding...' - !CALL abort - - -END SUBROUTINE nemogcmcoup_update_add - - -SUBROUTINE nemogcmcoup_wam_coupinit( mype, npes, icomm, & - & nlocpoints, nglopoints, & - & nlocmsk, ngloind, iunit ) - - ! Initialize single executable coupling between WAM and NEMO - ! This is called from WAM. - - IMPLICIT NONE - - ! Input arguments - - ! Message passing information - INTEGER, INTENT(IN) :: mype,npes,icomm - ! WAM grid information - ! Number of local and global points - INTEGER, INTENT(IN) :: nlocpoints, nglopoints - ! Integer mask and global indices - INTEGER, DIMENSION(nlocpoints), INTENT(IN) :: nlocmsk, ngloind - ! Unit for output in parinter_init - INTEGER :: iunit - - WRITE(0,*)'Wam coupling not implemented for FESOM' - CALL abort - -END SUBROUTINE nemogcmcoup_wam_coupinit - - -SUBROUTINE nemogcmcoup_wam_get( mype, npes, icomm, & - & nopoints, pwsst, pwicecov, pwicethk, & - & pwucur, pwvcur, licethk ) - - ! Interpolate from the ORCA grid - ! to the WAM grid. - - ! This routine can be called at any point in time since it does - ! the necessary message passing in parinter_fld. - - USE par_kind - IMPLICIT NONE - - ! Arguments - - ! Message passing information - INTEGER, INTENT(IN) :: mype, npes, icomm - ! Number WAM grid points - INTEGER, INTENT(IN) :: nopoints - ! Local arrays of sst, ice cover, ice thickness and currents - REAL(wpIFS), DIMENSION(nopoints) :: pwsst, pwicecov, pwicethk, pwucur, pwvcur - LOGICAL :: licethk - - ! Local variables - - WRITE(0,*)'nemogcmcoup_wam_get should not be called when coupling to fesom.' - CALL abort - -END SUBROUTINE nemogcmcoup_wam_get - - -SUBROUTINE nemogcmcoup_wam_update( mype, npes, icomm, & - & npoints, pwswh, pwmwp, & - & pwphioc, pwtauoc, pwstrn, & - & pwustokes, pwvstokes, & - & cdtpro, ldebug ) - - ! Update fluxes in nemogcmcoup_data by parallel - ! interpolation of the input WAM grid data - - USE par_kind - - IMPLICIT NONE - - ! Arguments - - ! MPI communications - INTEGER, INTENT(IN) :: mype,npes,icomm - ! Data on the WAM grid. - INTEGER, INTENT(IN) :: npoints - REAL(wpIFS), DIMENSION(npoints), INTENT(IN) :: & - & pwswh, pwmwp, pwphioc, pwtauoc, pwstrn, pwustokes, pwvstokes - ! Current time - CHARACTER(len=14), INTENT(IN) :: cdtpro - ! Write debugging fields in netCDF - LOGICAL, INTENT(IN) :: ldebug - - ! Local variables - - WRITE(0,*)'nemogcmcoup_wam_update should not be called when coupling to fesom.' - CALL abort - -END SUBROUTINE nemogcmcoup_wam_update - - -SUBROUTINE nemogcmcoup_wam_update_stress( mype, npes, icomm, npoints, & - & pwutau, pwvtau, pwuv10n, pwphif,& - & cdtpro, ldebug ) - - ! Update stresses in nemogcmcoup_data by parallel - ! interpolation of the input WAM grid data - - USE par_kind - - IMPLICIT NONE - - ! Arguments - - ! MPI communications - INTEGER, INTENT(IN) :: mype,npes,icomm - ! Data on the WAM grid. - INTEGER, INTENT(IN) :: npoints - REAL(wpIFS), DIMENSION(npoints), INTENT(IN) :: & - & pwutau, pwvtau, pwuv10n, pwphif - ! Current time step - CHARACTER(len=14), INTENT(IN) :: cdtpro - ! Write debugging fields in netCDF - LOGICAL, INTENT(IN) :: ldebug - - ! Local variables - - WRITE(0,*)'nemogcmcoup_wam_update_stress should not be called when coupling to fesom.' - CALL abort - -END SUBROUTINE nemogcmcoup_wam_update_stress -#endif From 8a069459df704e2a9827ede2b107332ab614ee73 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 11 Nov 2021 11:08:27 +0100 Subject: [PATCH 547/909] move the merged ifs_* files to the ifs_interface directory --- src/{ => ifs_interface}/ifs_interface.F90 | 0 src/{ => ifs_interface}/ifs_modules.F90 | 0 src/{ => ifs_interface}/ifs_notused.F90 | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename src/{ => ifs_interface}/ifs_interface.F90 (100%) rename src/{ => ifs_interface}/ifs_modules.F90 (100%) rename src/{ => ifs_interface}/ifs_notused.F90 (100%) diff --git a/src/ifs_interface.F90 b/src/ifs_interface/ifs_interface.F90 similarity index 100% rename from src/ifs_interface.F90 rename to src/ifs_interface/ifs_interface.F90 diff --git a/src/ifs_modules.F90 b/src/ifs_interface/ifs_modules.F90 similarity index 100% rename from src/ifs_modules.F90 rename to src/ifs_interface/ifs_modules.F90 diff --git a/src/ifs_notused.F90 b/src/ifs_interface/ifs_notused.F90 similarity index 100% rename from src/ifs_notused.F90 rename to src/ifs_interface/ifs_notused.F90 From 6d6580204443f79128fba914dd91eb0449a62968 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Thu, 11 Nov 2021 12:14:34 +0100 Subject: [PATCH 548/909] OPENMP for fill_up_dn_grad --- src/oce_muscl_adv.F90 | 115 +++++++++++++++++++++++++++++++++--------- 1 file changed, 91 insertions(+), 24 deletions(-) diff --git a/src/oce_muscl_adv.F90 b/src/oce_muscl_adv.F90 index 10507e9dd..e4a995569 100755 --- a/src/oce_muscl_adv.F90 +++ b/src/oce_muscl_adv.F90 @@ -41,7 +41,6 @@ subroutine muscl_adv_init(twork, partit, mesh) use find_up_downwind_triangles_interface IMPLICIT NONE integer :: n, k, n1, n2 - integer :: nz type(t_mesh), intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit @@ -58,6 +57,8 @@ subroutine muscl_adv_init(twork, partit, mesh) !___________________________________________________________________________ nn_size=0 +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, k) +!$OMP DO do n=1, myDim_nod2D ! get number of neighbouring nodes from sparse stiffness matrix ! stiffnes matrix filled up in subroutine init_stiff_mat_ale @@ -68,39 +69,63 @@ subroutine muscl_adv_init(twork, partit, mesh) ! --> SSH_stiff%rowptr(n+1)-SSH_stiff%rowptr(n) gives maximum number of ! neighbouring nodes within a single row of the sparse matrix k=SSH_stiff%rowptr(n+1)-SSH_stiff%rowptr(n) - if(k>nn_size) nn_size=k ! nnum maximum number of neighbouring nodes + if (k > nn_size) then +#if defined(_OPENMP) + call omp_set_lock(partit%plock(n)) +#endif + nn_size=k ! nnum maximum number of neighbouring nodes +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(n)) +#endif + end if end do - +!$OMP END DO +!$OMP END PARALLEL !___________________________________________________________________________ allocate(mesh%nn_num(myDim_nod2D), mesh%nn_pos(nn_size,myDim_nod2D)) nn_num(1:myDim_nod2D) => mesh%nn_num nn_pos(1:nn_size, 1:myDim_nod2D) => mesh%nn_pos ! These are the same arrays that we also use in quadratic reconstruction !MOVE IT TO SOMEWHERE ELSE - do n=1,myDim_nod2d +!$OMP PARALLEL DO + do n=1, myDim_nod2d ! number of neigbouring nodes to node n nn_num(n)=1 ! local position of neigbouring nodes nn_pos(1,n)=n end do - +!$OMP END PARALLEL DO !___________________________________________________________________________ allocate(twork%nboundary_lay(myDim_nod2D+eDim_nod2D)) !node n becomes a boundary node after layer twork%nboundary_lay(n) twork%nboundary_lay=nl-1 +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, k, n1, n2) +!$OMP DO do n=1, myDim_edge2D ! n1 and n2 are local indices n1=edges(1,n) n2=edges(2,n) ! ... if(n1<=myDim_nod2D) --> because dont use extended nodes if(n1<=myDim_nod2D) then +#if defined(_OPENMP) + call omp_set_lock(partit%plock(n1)) +#endif nn_pos(nn_num(n1)+1,n1)=n2 nn_num(n1)=nn_num(n1)+1 +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(n1)) +#endif end if ! ... if(n2<=myDim_nod2D) --> because dont use extended nodes if(n2<=myDim_nod2D) then +#if defined(_OPENMP) + call omp_set_lock(partit%plock(n2)) +#endif nn_pos(nn_num(n2)+1,n2)=n1 nn_num(n2)=nn_num(n2)+1 +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(n2)) +#endif end if if (any(edge_tri(:,n)<=0)) then @@ -112,10 +137,22 @@ subroutine muscl_adv_init(twork, partit, mesh) ! this edge nodes become boundary edge with increasing depth due to bottom topography ! at the depth twork%nboundary_lay the edge (edgepoints) still has two valid ocean triangles ! below that depth, edge becomes boundary edge +#if defined(_OPENMP) + call omp_set_lock (partit%plock(edges(1,n))) +#endif twork%nboundary_lay(edges(1,n))=min(twork%nboundary_lay(edges(1,n)), minval(nlevels(edge_tri(:,n)))-1) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(edges(1,n))) + call omp_set_lock (partit%plock(edges(2,n))) +#endif twork%nboundary_lay(edges(2,n))=min(twork%nboundary_lay(edges(2,n)), minval(nlevels(edge_tri(:,n)))-1) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(edges(2,n))) +#endif end if end do +!$OMP END DO +!$OMP END PARALLEL end SUBROUTINE muscl_adv_init ! ! @@ -150,15 +187,26 @@ SUBROUTINE find_up_downwind_triangles(twork, partit, mesh) ! In order that this procedure works, we need to know nodes and their coordinates ! on the extended set of elements (not only my, but myDim+eDim+eXDim) ! ===== -allocate(coord_elem(2,3,myDim_elem2D+eDim_elem2D+eXDim_elem2D)) +allocate(coord_elem(2, 3, myDim_elem2D+eDim_elem2D+eXDim_elem2D)) allocate(temp(myDim_elem2D+eDim_elem2D+eXDim_elem2D)) DO n=1,3 DO k=1,2 - do el=1,myDim_elem2D +!$OMP PARALLEL +!$OMP DO + DO el=1,myDim_elem2D temp(el)=coord_nod2D(k,elem2D_nodes(n,el)) - end do + END DO +!$OMP END DO +!$OMP MASTER call exchange_elem(temp, partit) - coord_elem(k,n,:)=temp(:) +!$OMP END MASTER +!$OMP BARRIER +!$OMP DO + DO el=1, myDim_elem2D+eDim_elem2D+eXDim_elem2D + coord_elem(k,n,el)=temp(el) + END DO +!$OMP END DO +!$OMP END PARALLEL END DO END DO deallocate(temp) @@ -166,15 +214,27 @@ SUBROUTINE find_up_downwind_triangles(twork, partit, mesh) allocate(e_nodes(3,myDim_elem2D+eDim_elem2D+eXDim_elem2D)) allocate(temp_i(myDim_elem2D+eDim_elem2D+eXDim_elem2D)) DO n=1,3 +!$OMP PARALLEL +!$OMP DO do el=1,myDim_elem2D temp_i(el)=myList_nod2D(elem2D_nodes(n,el)) end do +!$OMP END DO +!$OMP MASTER call exchange_elem(temp_i, partit) - e_nodes(n,:)=temp_i(:) +!$OMP END MASTER +!$OMP BARRIER +!$OMP DO + DO el=1, myDim_elem2D+eDim_elem2D+eXDim_elem2D + e_nodes(n, el)=temp_i(el) + END DO +!$OMP END DO +!$OMP END PARALLEL END DO deallocate(temp_i) - +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, k, ednodes, elem, el, x,b, c, cr, bx, by, xx, xy, ab, ax) +!$OMP DO DO n=1, myDim_edge2d ednodes=edges(:,n) x=coord_nod2D(:,ednodes(2))-coord_nod2D(:,ednodes(1)) @@ -224,7 +284,7 @@ SUBROUTINE find_up_downwind_triangles(twork, partit, mesh) twork%edge_up_dn_tri(1,n)=elem cycle endif -END DO + END DO ! Find downwind element x=-x DO k=1,nod_in_elem2D_num(ednodes(2)) @@ -269,20 +329,24 @@ SUBROUTINE find_up_downwind_triangles(twork, partit, mesh) endif END DO END DO +!$OMP END DO +!$OMP END PARALLEL + ! For edges touching the boundary --- up or downwind elements may be absent. ! We return to the standard Miura at nodes that ! belong to such edges. Same at the depth. ! Count the number of 'good' edges: -k=0 -DO n=1,myDim_edge2D - if((twork%edge_up_dn_tri(1,n).ne.0).and.(twork%edge_up_dn_tri(2,n).ne.0)) k=k+1 -END DO +!k=0 +!DO n=1, myDim_edge2D +! if((twork%edge_up_dn_tri(1,n).ne.0).and.(twork%edge_up_dn_tri(2,n).ne.0)) k=k+1 +!END DO +!$OMP PARALLEL DO +DO n=1, myDim_edge2D + twork%edge_up_dn_grad(:, :, n)=0.0_WP +END DO +!$OMP END PARALLEL DO deallocate(e_nodes, coord_elem) - - -twork%edge_up_dn_grad=0.0_WP - end SUBROUTINE find_up_downwind_triangles ! ! @@ -296,7 +360,7 @@ SUBROUTINE fill_up_dn_grad(twork, partit, mesh) USE MOD_TRACER USE o_ARRAYS IMPLICIT NONE -integer :: n, nz, elem, k, edge, ednodes(2), nzmin, nzmax +integer :: edge, n, nz, elem, k, ednodes(2), nzmin, nzmax real(kind=WP) :: tvol, tx, ty type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit @@ -305,10 +369,11 @@ SUBROUTINE fill_up_dn_grad(twork, partit, mesh) #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - !___________________________________________________________________________ ! loop over edge segments - DO edge=1,myDim_edge2D +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(edge, n, nz, elem, k, ednodes, nzmin, nzmax, tvol, tx, ty) +!$OMP DO + DO edge=1, myDim_edge2D ednodes=edges(:,edge) !_______________________________________________________________________ ! case when edge has upwind and downwind triangle on the surface @@ -452,5 +517,7 @@ SUBROUTINE fill_up_dn_grad(twork, partit, mesh) twork%edge_up_dn_grad(4,nz,edge)=ty/tvol END DO end if - END DO + END DO +!$OMP END DO +!$OMP END PARALLEL END SUBROUTINE fill_up_dn_grad From 3342bb70d733480975d6102f9ed8910da9683059 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Thu, 11 Nov 2021 15:40:44 +0000 Subject: [PATCH 549/909] ADD defined (__ifsinterface) case for evap_no_ifrac. Maybe get rid of these later if possible. --- src/gen_forcing_init.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gen_forcing_init.F90 b/src/gen_forcing_init.F90 index 95dd108a7..3ca30cf64 100755 --- a/src/gen_forcing_init.F90 +++ b/src/gen_forcing_init.F90 @@ -75,7 +75,7 @@ subroutine forcing_array_setup(partit, mesh) evaporation = 0.0_WP ice_sublimation = 0.0_WP -#if defined (__oasis) +#if defined (__oasis) || defined (__ifsinterface) allocate(tmp_sublimation(n2),tmp_evap_no_ifrac(n2), tmp_shortwave(n2)) allocate(sublimation(n2),evap_no_ifrac(n2)) allocate(atm_net_fluxes_north(nrecv), atm_net_fluxes_south(nrecv)) From 27d588c995f91f5822f9c7315ba8d094cbc56419 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Thu, 11 Nov 2021 15:48:15 +0000 Subject: [PATCH 550/909] ADD defined (__ifsinterface) case for definition of evap_no_ifrac etc. Maybe get rid of these later if possible. --- src/gen_modules_forcing.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gen_modules_forcing.F90 b/src/gen_modules_forcing.F90 index 2e2f67dca..d8a99b704 100755 --- a/src/gen_modules_forcing.F90 +++ b/src/gen_modules_forcing.F90 @@ -59,7 +59,7 @@ module g_forcing_arrays real(kind=WP), allocatable, dimension(:) :: runoff, evaporation, ice_sublimation real(kind=WP), allocatable, dimension(:) :: cloudiness, press_air -#if defined (__oasis) +#if defined (__oasis) || defined (__ifsinterface) real(kind=WP), target, allocatable, dimension(:) :: sublimation, evap_no_ifrac real(kind=WP), target, allocatable, dimension(:) :: tmp_sublimation, tmp_evap_no_ifrac !temporary flux fields real(kind=WP), target, allocatable, dimension(:) :: tmp_shortwave !(for flux correction) From e0847d945a2a887a48817f527aa5cc0cfebfeaea Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Thu, 11 Nov 2021 15:55:17 +0000 Subject: [PATCH 551/909] treat __oasis and __ifsinterface case differently --- src/gen_forcing_init.F90 | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/gen_forcing_init.F90 b/src/gen_forcing_init.F90 index 3ca30cf64..4c7d14263 100755 --- a/src/gen_forcing_init.F90 +++ b/src/gen_forcing_init.F90 @@ -76,8 +76,12 @@ subroutine forcing_array_setup(partit, mesh) ice_sublimation = 0.0_WP #if defined (__oasis) || defined (__ifsinterface) + allocate(sublimation(n2), evap_no_ifrac(n2)) + sublimation=0.0_WP + evap_no_ifrac=0.0_WP +#endif +#if defined (__oasis) allocate(tmp_sublimation(n2),tmp_evap_no_ifrac(n2), tmp_shortwave(n2)) - allocate(sublimation(n2),evap_no_ifrac(n2)) allocate(atm_net_fluxes_north(nrecv), atm_net_fluxes_south(nrecv)) allocate(oce_net_fluxes_north(nrecv), oce_net_fluxes_south(nrecv)) allocate(flux_correction_north(nrecv), flux_correction_south(nrecv)) @@ -92,11 +96,10 @@ subroutine forcing_array_setup(partit, mesh) flux_correction_north=0.0_WP flux_correction_south=0.0_WP flux_correction_total=0.0_WP - evap_no_ifrac=0.0_WP - sublimation=0.0_WP #endif + ! Temp storage for averaging !!PS allocate(aver_temp(n2)) From 8bcdab024c326e57005cb3cd4d5a4d0e3fc29ae0 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Thu, 11 Nov 2021 16:03:02 +0000 Subject: [PATCH 552/909] ADD defined (__ifsinterface) case for oce_heat_flux, ice_heat_flux etc. Maybe get rid of these later if possible. --- src/ice_modules.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index 4300aee6d..c7366c028 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -71,7 +71,7 @@ MODULE i_ARRAYS REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: eps11, eps12, eps22 REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: fresh_wa_flux REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: net_heat_flux -#if defined (__oasis) +#if defined (__oasis) || defined (__ifsinterface) real(kind=WP),target, allocatable, dimension(:) :: ice_alb, ice_temp ! new fields for OIFS coupling real(kind=WP),target, allocatable, dimension(:) :: oce_heat_flux, ice_heat_flux real(kind=WP),target, allocatable, dimension(:) :: tmp_oce_heat_flux, tmp_ice_heat_flux @@ -81,7 +81,7 @@ MODULE i_ARRAYS #if defined (__oifs) real(kind=WP),target, allocatable, dimension(:) :: enthalpyoffuse #endif -#endif /* (__oasis) */ +#endif /* (__oasis) || defined (__ifsinterface)*/ REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: S_oc_array, T_oc_array REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_iceoce_x From 9da7b856d4ee532f16bdf4017e2bb0859f511d96 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Thu, 11 Nov 2021 16:49:49 +0000 Subject: [PATCH 553/909] Updated ifs-interface to latest and greatest fesom. Compiles successfully --- src/ifs_interface/ifs_interface.F90 | 170 +++++++++++++++------------- 1 file changed, 90 insertions(+), 80 deletions(-) diff --git a/src/ifs_interface/ifs_interface.F90 b/src/ifs_interface/ifs_interface.F90 index 4467dfa9a..8bf150ba2 100644 --- a/src/ifs_interface/ifs_interface.F90 +++ b/src/ifs_interface/ifs_interface.F90 @@ -16,7 +16,7 @@ SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & ! Initialize the FESOM model for single executable coupling USE par_kind !in ifs_modules.F90 - USE g_PARSUP, only: MPI_COMM_FESOM, mype + USE fesom_main_storage_module, only: fesom => f ! only: MPI_COMM_FESOM, mype (previously in g_parsup) USE g_config, only: dt USE g_clock, only: timenew, daynew, yearnew, month, day_in_month USE nemogcmcoup_steps, ONLY : substeps @@ -44,17 +44,17 @@ SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & INTEGER :: i NAMELIST/namfesomstep/substeps - ! TODO hard-coded here, put in namelist + ! overwritten from value namelist substeps=2 OPEN(9,file='namfesomstep.in') READ(9,namfesomstep) CLOSE(9) - MPI_COMM_FESOM=icomm + fesom%MPI_COMM_FESOM=icomm itini = 1 CALL main_initialize(itend_fesom) !also sets mype and npes itend=itend_fesom/substeps - if(mype==0) then + if(fesom%mype==0) then WRITE(0,*)'!======================================' WRITE(0,*)'! FESOM is initialized from within IFS.' WRITE(0,*)'! get MPI_COMM_FESOM. =================' @@ -66,14 +66,14 @@ SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & ! initial date and time (time is not used) inidate = yearnew*10000 + month*100 + day_in_month ! e.g. 20170906 initime = 0 - if(mype==0) then + if(fesom%mype==0) then WRITE(0,*)'! FESOM initial date is ', inidate ,' ======' WRITE(0,*)'! FESOM substeps are ', substeps ,' ======' endif ! fesom timestep (as seen by IFS) zstp = REAL(substeps,wpIFS)*dt - if(mype==0) then + if(fesom%mype==0) then WRITE(0,*)'! FESOM timestep as seen by IFS is ', real(zstp,4), 'sec (',substeps,'xdt)' WRITE(0,*)'!======================================' endif @@ -85,11 +85,8 @@ SUBROUTINE nemogcmcoup_coupinit( mypeIN, npesIN, icomm, & & npoints, nlocmsk, ngloind ) ! FESOM modules - USE g_PARSUP, only: mype, npes, myDim_nod2D, eDim_nod2D, myDim_elem2D, eDim_elem2D, eXDim_elem2D, & - myDim_edge2D, eDim_edge2D, myList_nod2D, myList_elem2D - USE MOD_MESH - !USE o_MESH, only: nod2D, elem2D - USE g_init2timestepping, only: meshinmod + USE fesom_main_storage_module, only: fesom => f ! only: mype, npes, myDim_nod2D, eDim_nod2D, myDim_elem2D, eDim_elem2D, eXDim_elem2D, & + ! myDim_edge2D, eDim_edge2D, myList_nod2D, myList_elem2D ! Initialize single executable coupling USE parinter @@ -109,9 +106,13 @@ SUBROUTINE nemogcmcoup_coupinit( mypeIN, npesIN, icomm, & INTEGER :: iunit = 0 ! Local variables - type(t_mesh), target :: mesh + !type(t_mesh), target :: mesh integer , pointer :: nod2D integer , pointer :: elem2D + integer, pointer :: myDim_nod2D, eDim_nod2D + integer, dimension(:), pointer :: myList_nod2D + integer, pointer :: myDim_elem2D, eDim_elem2D, eXDim_elem2D + integer, dimension(:), pointer :: myList_elem2D ! Namelist containing the file names of the weights CHARACTER(len=256) :: cdfile_gauss_to_T, cdfile_gauss_to_UV, & @@ -145,16 +146,21 @@ SUBROUTINE nemogcmcoup_coupinit( mypeIN, npesIN, icomm, & ! associate the mesh, only what is needed here ! #include "associate_mesh.h" - mesh = meshinmod - nod2D => mesh%nod2D - elem2D => mesh%elem2D - + nod2D => fesom%mesh%nod2D + elem2D => fesom%mesh%elem2D + myDim_nod2D => fesom%partit%myDim_nod2D + eDim_nod2D => fesom%partit%eDim_nod2D + myList_nod2D(1:myDim_nod2D+eDim_nod2D) => fesom%partit%myList_nod2D + myDim_elem2D => fesom%partit%myDim_elem2D + eDim_elem2D => fesom%partit%eDim_elem2D + eXDim_elem2D => fesom%partit%eXDim_elem2D + myList_elem2D(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => fesom%partit%myList_elem2D ! here FESOM knows about the (total number of) MPI tasks - if(mype==0) then + if(fesom%mype==0) then write(*,*) 'MPI has been initialized in the atmospheric model' - write(*, *) 'Running on ', npes, ' PEs' + write(*, *) 'Running on ', fesom%npes, ' PEs' end if ! Read namelists @@ -179,7 +185,7 @@ SUBROUTINE nemogcmcoup_coupinit( mypeIN, npesIN, icomm, & & mpi_integer, mpi_sum, icomm, ierr) - if(mype==0) then + if(fesom%mype==0) then WRITE(0,*)'!======================================' WRITE(0,*)'! SCALARS =============================' @@ -205,24 +211,24 @@ SUBROUTINE nemogcmcoup_coupinit( mypeIN, npesIN, icomm, & ! from atmosphere Gaussian grid to ocean T-grid IF (lreaddist) THEN - CALL parinter_read( mype, npes, nglopoints, noglopoints, gausstoT, & + CALL parinter_read( fesom%mype, fesom%npes, nglopoints, noglopoints, gausstoT, & & cdpathdist,'ifs_to_fesom_gridT',lexists) ENDIF IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN IF (lparbcast) THEN CALL scripremap_read_sgl(cdfile_gauss_to_T,remap_gauss_to_T,& - & mype,npes,icomm,.TRUE.) + & fesom%mype,fesom%npes,icomm,.TRUE.) ELSE CALL scripremap_read(cdfile_gauss_to_T,remap_gauss_to_T) ENDIF - CALL parinter_init( mype, npes, icomm, & + CALL parinter_init( fesom%mype, fesom%npes, icomm, & & npoints, nglopoints, nlocmsk, ngloind, & & nopoints, noglopoints, omask, ogloind, & & remap_gauss_to_T, gausstoT, lcommout, TRIM(commoutprefix)//'_gtoT', & & iunit ) CALL scripremap_dealloc(remap_gauss_to_T) IF (lwritedist) THEN - CALL parinter_write( mype, npes, nglopoints, noglopoints, gausstoT, & + CALL parinter_write( fesom%mype, fesom%npes, nglopoints, noglopoints, gausstoT, & & cdpathdist,'ifs_to_fesom_gridT') ENDIF ENDIF @@ -230,25 +236,25 @@ SUBROUTINE nemogcmcoup_coupinit( mypeIN, npesIN, icomm, & ! From ocean T-grid to atmosphere Gaussian grid IF (lreaddist) THEN - CALL parinter_read( mype, npes, noglopoints, nglopoints, Ttogauss, & + CALL parinter_read( fesom%mype, fesom%npes, noglopoints, nglopoints, Ttogauss, & & cdpathdist,'fesom_gridT_to_ifs',lexists) ENDIF IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN IF (lparbcast) THEN CALL scripremap_read_sgl(cdfile_T_to_gauss,remap_T_to_gauss,& - & mype,npes,icomm,.TRUE.) + & fesom%mype,fesom%npes,icomm,.TRUE.) ELSE CALL scripremap_read(cdfile_T_to_gauss,remap_T_to_gauss) ENDIF - CALL parinter_init( mype, npes, icomm, & + CALL parinter_init( fesom%mype, fesom%npes, icomm, & & nopoints, noglopoints, omask, ogloind, & & npoints, nglopoints, nlocmsk, ngloind, & & remap_T_to_gauss, Ttogauss, lcommout, TRIM(commoutprefix)//'_Ttog', & & iunit ) CALL scripremap_dealloc(remap_T_to_gauss) IF (lwritedist) THEN - CALL parinter_write( mype, npes, noglopoints, nglopoints, Ttogauss, & + CALL parinter_write( fesom%mype, fesom%npes, noglopoints, nglopoints, Ttogauss, & & cdpathdist,'fesom_gridT_to_ifs') ENDIF ENDIF @@ -256,7 +262,7 @@ SUBROUTINE nemogcmcoup_coupinit( mypeIN, npesIN, icomm, & DEALLOCATE(omask,ogloind) - if(mype==0) then + if(fesom%mype==0) then WRITE(0,*)'!======================================' WRITE(0,*)'! VECTORS =============================' @@ -276,24 +282,24 @@ SUBROUTINE nemogcmcoup_coupinit( mypeIN, npesIN, icomm, & ! from atmosphere Gaussian grid to ocean UV-grid IF (lreaddist) THEN - CALL parinter_read( mype, npes, nglopoints, noglopoints, gausstoUV, & + CALL parinter_read( fesom%mype, fesom%npes, nglopoints, noglopoints, gausstoUV, & & cdpathdist,'ifs_to_fesom_gridUV',lexists) ENDIF IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN IF (lparbcast) THEN CALL scripremap_read_sgl(cdfile_gauss_to_UV,remap_gauss_to_UV,& - & mype,npes,icomm,.TRUE.) + & fesom%mype,fesom%npes,icomm,.TRUE.) ELSE CALL scripremap_read(cdfile_gauss_to_UV,remap_gauss_to_UV) ENDIF - CALL parinter_init( mype, npes, icomm, & + CALL parinter_init( fesom%mype, fesom%npes, icomm, & & npoints, nglopoints, nlocmsk, ngloind, & & nopoints, noglopoints, omask, ogloind, & & remap_gauss_to_UV, gausstoUV, lcommout, TRIM(commoutprefix)//'_gtoUV', & & iunit ) CALL scripremap_dealloc(remap_gauss_to_UV) IF (lwritedist) THEN - CALL parinter_write( mype, npes, nglopoints, noglopoints, gausstoUV, & + CALL parinter_write( fesom%mype, fesom%npes, nglopoints, noglopoints, gausstoUV, & & cdpathdist,'ifs_to_fesom_gridUV') ENDIF ENDIF @@ -301,25 +307,25 @@ SUBROUTINE nemogcmcoup_coupinit( mypeIN, npesIN, icomm, & ! From ocean UV-grid to atmosphere Gaussian grid IF (lreaddist) THEN - CALL parinter_read( mype, npes, noglopoints, nglopoints, UVtogauss, & + CALL parinter_read( fesom%mype, fesom%npes, noglopoints, nglopoints, UVtogauss, & & cdpathdist,'fesom_gridUV_to_ifs',lexists) ENDIF IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN IF (lparbcast) THEN CALL scripremap_read_sgl(cdfile_UV_to_gauss,remap_UV_to_gauss,& - & mype,npes,icomm,.TRUE.) + & fesom%mype,fesom%npes,icomm,.TRUE.) ELSE CALL scripremap_read(cdfile_UV_to_gauss,remap_UV_to_gauss) ENDIF - CALL parinter_init( mype, npes, icomm, & + CALL parinter_init( fesom%mype, fesom%npes, icomm, & & nopoints, noglopoints, omask, ogloind, & & npoints, nglopoints, nlocmsk, ngloind, & & remap_UV_to_gauss, UVtogauss, lcommout, TRIM(commoutprefix)//'_UVtog', & & iunit ) CALL scripremap_dealloc(remap_UV_to_gauss) IF (lwritedist) THEN - CALL parinter_write( mype, npes, noglopoints, nglopoints, UVtogauss, & + CALL parinter_write( fesom%mype, fesom%npes, noglopoints, nglopoints, UVtogauss, & & cdpathdist,'fesom_gridUV_to_ifs') ENDIF ENDIF @@ -334,22 +340,17 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & & pgifr, pghic, pghsn, pgucur, pgvcur, & & pgistl, licelvls ) - ! Interpolate sst, ice: surf T; albedo; concentration; thickness, + ! Interpolate sst, ice; surf T; albedo; concentration; thickness, ! snow thickness and currents from the FESOM grid to the Gaussian grid. ! This routine can be called at any point in time since it does ! the necessary message passing in parinter_fld. USE par_kind ! in ifs_modules.F90 - USE o_ARRAYS, ONLY : tr_arr, UV + USE fesom_main_storage_module, only: fesom => f + USE o_ARRAYS, ONLY : UV ! tr_arr is now tracers USE i_arrays, ONLY : m_ice, a_ice, m_snow USE i_therm_param, ONLY : tmelt - !USE o_PARAM, ONLY : WP - USE g_PARSUP, only: myDim_nod2D,eDim_nod2D, myDim_elem2D,eDim_elem2D,eXDim_elem2D - !USE o_MESH, only: elem2D_nodes, coord_nod2D - USE MOD_MESH - USE g_init2timestepping, only: meshinmod - USE g_rotate_grid, only: vector_r2g USE parinter USE scripremap @@ -362,9 +363,12 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & REAL(wpIFS), DIMENSION(nopoints,3) :: pgistl LOGICAL :: licelvls - type(t_mesh), target :: mesh + !type(t_mesh), target :: mesh real(kind=wpIFS), dimension(:,:), pointer :: coord_nod2D - integer, dimension(:,:) , pointer :: elem2D_nodes + integer, dimension(:,:) , pointer :: elem2D_nodes + integer, pointer :: myDim_nod2D, eDim_nod2D + integer, pointer :: myDim_elem2D, eDim_elem2D, eXDim_elem2D + ! Message passing information INTEGER, INTENT(IN) :: mype, npes, icomm @@ -372,8 +376,8 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & INTEGER, INTENT(IN) :: nopoints ! Local variables - REAL(wpIFS), DIMENSION(myDim_nod2D) :: zsend - REAL(wpIFS), DIMENSION(myDim_elem2D) :: zsendU, zsendV + REAL(wpIFS), DIMENSION(fesom%partit%myDim_nod2D) :: zsend + REAL(wpIFS), DIMENSION(fesom%partit%myDim_elem2D) :: zsendU, zsendV INTEGER :: elnodes(3) REAL(wpIFS) :: rlon, rlat @@ -382,16 +386,23 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & !#include "associate_mesh.h" ! associate what is needed only - mesh = meshinmod - coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%coord_nod2D - elem2D_nodes(1:3, 1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem2D_nodes + myDim_nod2D => fesom%partit%myDim_nod2D + eDim_nod2D => fesom%partit%eDim_nod2D + + myDim_elem2D => fesom%partit%myDim_elem2D + eDim_elem2D => fesom%partit%eDim_elem2D + eXDim_elem2D => fesom%partit%eXDim_elem2D + + coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => fesom%mesh%coord_nod2D + elem2D_nodes(1:3, 1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => fesom%mesh%elem2D_nodes + ! =================================================================== ! ! Pack SST data and convert to K. 'pgsst' is on Gauss grid. do n=1,myDim_nod2D - zsend(n)=tr_arr(1, n, 1)+tmelt ! sea surface temperature [K], - ! (1=surface, n=node, 1/2=T/S) + zsend(n)=fesom%tracers%data(1)%values(1, n) +tmelt ! sea surface temperature [K], + ! (1=surface, n=node, data(1/2)=T/S) enddo ! Interpolate SST @@ -549,10 +560,7 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & ! interpolation of the input gaussian grid data USE par_kind !in ifs_modules.F90 - USE g_PARSUP, only: myDim_nod2D, myDim_elem2D, par_ex, eDim_nod2D, eDim_elem2D, eXDim_elem2D, myDim_edge2D, eDim_edge2D - !USE o_MESH, only: coord_nod2D !elem2D_nodes - USE MOD_MESH - USE g_init2timestepping, only: meshinmod + USE fesom_main_storage_module, only: fesom => f !USE o_PARAM, ONLY : WP, use wpIFS from par_kind (IFS) USE g_rotate_grid, only: vector_r2g, vector_g2r USE g_forcing_arrays, only: shortwave, prec_rain, prec_snow, runoff, & @@ -590,23 +598,25 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & ! QNS ice filter switch (requires tice_atm to be sent) LOGICAL, INTENT(IN) :: lqnsicefilt - type(t_mesh), target :: mesh + !type(t_mesh), target :: mesh ! Local variables INTEGER :: n + integer, pointer :: myDim_nod2D, eDim_nod2D REAL(wpIFS), parameter :: rhofwt = 1000. ! density of freshwater ! Packed receive buffer - REAL(wpIFS), DIMENSION(myDim_nod2D) :: zrecv - REAL(wpIFS), DIMENSION(myDim_elem2D):: zrecvU, zrecvV + REAL(wpIFS), DIMENSION(fesom%partit%myDim_nod2D) :: zrecv + REAL(wpIFS), DIMENSION(fesom%partit%myDim_elem2D):: zrecvU, zrecvV !#include "associate_mesh.h" ! associate only the necessary things - real(kind=WP), dimension(:,:), pointer :: coord_nod2D - mesh = meshinmod - coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%coord_nod2D + real(kind=wpIFS), dimension(:,:), pointer :: coord_nod2D + myDim_nod2D => fesom%partit%myDim_nod2D + eDim_nod2D => fesom%partit%eDim_nod2D + coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => fesom%mesh%coord_nod2D ! =================================================================== ! ! Sort out incoming arrays from the IFS and put them on the ocean grid @@ -642,7 +652,7 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & shortwave(1:myDim_nod2D)=zrecv(1:myDim_nod2D) ! Do the halo exchange - call exchange_nod(shortwave) + call exchange_nod(shortwave,fesom%partit) ! =================================================================== ! @@ -660,7 +670,7 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & oce_heat_flux(1:myDim_nod2D)=zrecv(1:myDim_nod2D) ! Do the halo exchange - call exchange_nod(oce_heat_flux) + call exchange_nod(oce_heat_flux,fesom%partit) ! =================================================================== ! @@ -673,7 +683,7 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & ice_heat_flux(1:myDim_nod2D)=zrecv(1:myDim_nod2D) ! Do the halo exchange - call exchange_nod(ice_heat_flux) + call exchange_nod(ice_heat_flux,fesom%partit) ! =================================================================== ! @@ -695,7 +705,7 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & evap_no_ifrac(1:myDim_nod2D)=-zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s; change sign ! Do the halo exchange - call exchange_nod(evap_no_ifrac) + call exchange_nod(evap_no_ifrac,fesom%partit) !7. Interpolate sublimation (evaporation over ice) to T grid @@ -706,7 +716,7 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & sublimation(1:myDim_nod2D)=-zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s; change sign ! Do the halo exchange - call exchange_nod(sublimation) + call exchange_nod(sublimation,fesom%partit) ! =================================================================== ! ! =================================================================== ! @@ -721,7 +731,7 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & prec_rain(1:myDim_nod2D)=zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s ! Do the halo exchange - call exchange_nod(prec_rain) + call exchange_nod(prec_rain,fesom%partit) ! =================================================================== ! @@ -734,7 +744,7 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & prec_snow(1:myDim_nod2D)=zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s ! Do the halo exchange - call exchange_nod(prec_snow) + call exchange_nod(prec_snow,fesom%partit) ! =================================================================== ! @@ -747,7 +757,7 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & !runoff(1:myDim_nod2D)=zrecv(1:myDim_nod2D) !conversion?? ! ! Do the halo exchange - !call exchange_nod(runoff) + !call exchange_nod(runoff,fesom%partit) ! !11. Interpolate ocean runoff to T grid ! @@ -772,7 +782,7 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & ! Unpack x stress atm->oce, without halo; then do halo exchange stress_atmoce_x(1:myDim_nod2D)=zrecv(1:myDim_nod2D) - call exchange_nod(stress_atmoce_x) + call exchange_nod(stress_atmoce_x,fesom%partit) ! CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tauy_oce, & @@ -780,7 +790,7 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & ! Unpack y stress atm->oce, without halo; then do halo exchange stress_atmoce_y(1:myDim_nod2D)=zrecv(1:myDim_nod2D) - call exchange_nod(stress_atmoce_y) + call exchange_nod(stress_atmoce_y,fesom%partit) ! =================================================================== ! ! OVER ICE: @@ -790,7 +800,7 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & ! Unpack x stress atm->ice, without halo; then do halo exchange stress_atmice_x(1:myDim_nod2D)=zrecv(1:myDim_nod2D) - call exchange_nod(stress_atmice_x) + call exchange_nod(stress_atmice_x,fesom%partit) ! CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tauy_ice, & @@ -798,7 +808,7 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & ! Unpack y stress atm->ice, without halo; then do halo exchange stress_atmice_y(1:myDim_nod2D)=zrecv(1:myDim_nod2D) - call exchange_nod(stress_atmice_y) + call exchange_nod(stress_atmice_y,fesom%partit) ! =================================================================== ! @@ -1449,7 +1459,7 @@ END SUBROUTINE nemogcmcoup_lim2_update SUBROUTINE nemogcmcoup_step( istp, icdate, ictime ) USE g_clock, only: yearnew, month, day_in_month - USE g_PARSUP, only: mype + USE fesom_main_storage_module, only: fesom => f ! mype USE nemogcmcoup_steps, ONLY : substeps IMPLICIT NONE @@ -1461,7 +1471,7 @@ SUBROUTINE nemogcmcoup_step( istp, icdate, ictime ) ! Data and time from NEMO INTEGER, INTENT(OUT) :: icdate, ictime - if(mype==0) then + if(fesom%mype==0) then WRITE(0,*)'! IFS at timestep ', istp, '. Do ', substeps , 'FESOM timesteps...' endif CALL main_timestepping(substeps) @@ -1471,7 +1481,7 @@ SUBROUTINE nemogcmcoup_step( istp, icdate, ictime ) icdate = yearnew*10000 + month*100 + day_in_month ! e.g. 20170906 ictime = 0 ! (time is not used) - if(mype==0) then + if(fesom%mype==0) then WRITE(0,*)'! FESOM date at end of timestep is ', icdate ,' ======' endif @@ -1491,13 +1501,13 @@ END SUBROUTINE nemogcmcoup_step SUBROUTINE nemogcmcoup_final - USE g_PARSUP, only: mype + USE fesom_main_storage_module, only: fesom => f ! mype ! Finalize the FESOM model IMPLICIT NONE - if(mype==0) then + if(fesom%mype==0) then WRITE(*,*)'Finalization of FESOM from IFS.' endif CALL main_finalize From ee843a4bab6bd0cbc0e898b46be6aae0b36305c4 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Thu, 11 Nov 2021 16:57:26 +0000 Subject: [PATCH 554/909] Change CALL of main_init, main_timestepping, and main_finalize to the new fesom_init(), fesom_runloop(), and fesom_finalize() routines in latest fesom2. --- src/ifs_interface/ifs_interface.F90 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/ifs_interface/ifs_interface.F90 b/src/ifs_interface/ifs_interface.F90 index 8bf150ba2..8ad96b3df 100644 --- a/src/ifs_interface/ifs_interface.F90 +++ b/src/ifs_interface/ifs_interface.F90 @@ -52,7 +52,7 @@ SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & fesom%MPI_COMM_FESOM=icomm itini = 1 - CALL main_initialize(itend_fesom) !also sets mype and npes + CALL fesom_init(itend_fesom) !also sets mype and npes itend=itend_fesom/substeps if(fesom%mype==0) then WRITE(0,*)'!======================================' @@ -1474,7 +1474,7 @@ SUBROUTINE nemogcmcoup_step( istp, icdate, ictime ) if(fesom%mype==0) then WRITE(0,*)'! IFS at timestep ', istp, '. Do ', substeps , 'FESOM timesteps...' endif - CALL main_timestepping(substeps) + CALL fesom_runloop(substeps) ! Compute date and time at the end of the time step @@ -1510,7 +1510,7 @@ SUBROUTINE nemogcmcoup_final if(fesom%mype==0) then WRITE(*,*)'Finalization of FESOM from IFS.' endif - CALL main_finalize + CALL fesom_finalize END SUBROUTINE nemogcmcoup_final #endif From bfc448c3508bba86dc972aed093e9d88158fbcf3 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Thu, 11 Nov 2021 15:17:55 +0100 Subject: [PATCH 555/909] ONPENMP in ../src/oce_ale_pressure_bv.F90. Efficiently speeds the runtime for "oce. mix,pres." --- src/oce_ale_pressure_bv.F90 | 146 ++++++++++++++++++++++++++---------- 1 file changed, 105 insertions(+), 41 deletions(-) diff --git a/src/oce_ale_pressure_bv.F90 b/src/oce_ale_pressure_bv.F90 index b0ee29d27..39d25a18c 100644 --- a/src/oce_ale_pressure_bv.F90 +++ b/src/oce_ale_pressure_bv.F90 @@ -217,7 +217,7 @@ subroutine pressure_bv(tracers, partit, mesh) type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in), target :: tracers - real(kind=WP) :: dz_inv, bv, a, rho_up, rho_dn, t, s + real(kind=WP) :: dz_inv, bv, a, a_loc, rho_up, rho_dn, t, s integer :: node, nz, nl1, nzmax, nzmin real(kind=WP) :: rhopot(mesh%nl), bulk_0(mesh%nl), bulk_pz(mesh%nl), bulk_pz2(mesh%nl), rho(mesh%nl), dbsfc1(mesh%nl), db_max real(kind=WP) :: bulk_up, bulk_dn, smallvalue, buoyancy_crit, rho_surf, aux_rho, aux_rho1 @@ -232,28 +232,34 @@ subroutine pressure_bv(tracers, partit, mesh) salt=>tracers%data(2)%values(:,:) smallvalue=1.0e-20 buoyancy_crit=0.0003_WP - mixing_kpp = (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) ! NR Evaluate string comparison outside the loop. It is expensive. -!!PS mixing_kpp = (trim(mix_scheme)=='KPP' .or. trim(mix_scheme)=='cvmix_KPP') ! NR Evaluate string comparison outside the loop. It is expensive. + mixing_kpp = (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) !___________________________________________________________________________ ! Screen salinity - a=0.0_WP + a_loc=0.0_WP +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax, a_loc) +!$OMP DO do node=1, myDim_nod2D+eDim_nod2D nzmin = ulevels_nod2D(node) nzmax = nlevels_nod2D(node) - !!PS do nz=1,nlevels_nod2d(node)-1 do nz=nzmin,nzmax-1 - a=min(a,salt(nz,node)) + a_loc=min(a_loc,salt(nz,node)) enddo enddo - +!$OMP END DO + a=0.0_WP +!$OMP CRITICAL + a=min(a, a_loc) +!$OMP END CRITICAL +!$OMP END PARALLEL + !___________________________________________________________________________ - if(a<0.0_WP) then + ! model explodes, no OpenMP parallelization ! + if( a < 0.0_WP ) then write (*,*)' --> pressure_bv: s<0 happens!', a pe_status=1 do node=1, myDim_nod2D+eDim_nod2D nzmin = ulevels_nod2D(node) nzmax = nlevels_nod2D(node) - !!PS do nz=1, nlevels_nod2d(node)-1 do nz=nzmin, nzmax-1 if (salt(nz, node) < 0) write (*,*) 'the model blows up at n=', mylist_nod2D(node), ' ; ', 'nz=', nz end do @@ -261,6 +267,11 @@ subroutine pressure_bv(tracers, partit, mesh) endif !___________________________________________________________________________ + +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(dz_inv, bv, a, a_loc, rho_up, rho_dn, t, s, node, nz, nl1, nzmax, nzmin, & +!$OMP rhopot, bulk_0, bulk_pz, bulk_pz2, rho, dbsfc1, db_max, bulk_up, bulk_dn, & +!$OMP rho_surf, aux_rho, aux_rho1, flag1, flag2) +!$OMP DO do node=1, myDim_nod2D+eDim_nod2D nzmin = ulevels_nod2D(node) nzmax = nlevels_nod2D(node) @@ -463,9 +474,11 @@ subroutine pressure_bv(tracers, partit, mesh) ! The mixed layer depth ! mixlay_depth ! bv_ref + !_______________________________________________________________________ + ! BV is defined on full levels except for the first and the last ones. end do - !_______________________________________________________________________ - ! BV is defined on full levels except for the first and the last ones. +!$OMP END DO +!$OMP END PARALLEL end subroutine pressure_bv ! ! @@ -558,6 +571,7 @@ subroutine pressure_force_4_linfs_fullcell(partit, mesh) #include "associate_mesh_ass.h" !___________________________________________________________________________ ! loop over triangular elemments +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(elem, elnodes, nle, ule, nlz) do elem=1, myDim_elem2D !_______________________________________________________________________ ! number of levels at elem @@ -577,6 +591,7 @@ subroutine pressure_force_4_linfs_fullcell(partit, mesh) pgf_y(nlz,elem) = sum(gradient_sca(4:6,elem)*hpressure(nlz,elnodes)/density_0) end do end do !-->do elem=1, myDim_elem2D +!$OMP END PARALLEL DO end subroutine pressure_force_4_linfs_fullcell ! ! @@ -610,8 +625,8 @@ subroutine pressure_force_4_linfs_nemo(tracers, partit, mesh) real(kind=WP) :: interp_n_dens(3), interp_n_temp, interp_n_salt, & dZn, dZn_i, dh, dval, mean_e_rho,dZn_rho_grad(2) real(kind=WP) :: rhopot, bulk_0, bulk_pz, bulk_pz2 - real(kind=WP), dimension(:,:), pointer :: temp, salt real(kind=WP) :: zbar_n(mesh%nl), z_n(mesh%nl-1) + real(kind=WP), dimension(:,:), pointer :: temp, salt #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -620,6 +635,11 @@ subroutine pressure_force_4_linfs_nemo(tracers, partit, mesh) salt=>tracers%data(2)%values(:,:) !___________________________________________________________________________ ! loop over triangular elemments +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(elem, elnodes, nle, ule, nlz, nln, uln, ni, nlc, nlce, hpress_n_bottom, & +!$OMP interp_n_dens, interp_n_temp, interp_n_salt, dZn, dZn_i, dh, dval, & +!$OMP mean_e_rho, dZn_rho_grad, rhopot, bulk_0, bulk_pz, bulk_pz2, & +!$OMP zbar_n, z_n) +!$OMP DO do elem=1, myDim_elem2D !_______________________________________________________________________ ! nle...number of mid-depth levels at elem @@ -756,6 +776,8 @@ subroutine pressure_force_4_linfs_nemo(tracers, partit, mesh) pgf_y(nle,elem) = sum(gradient_sca(4:6,elem)*hpress_n_bottom)/density_0 end do ! --> do elem=1, myDim_elem2D +!$OMP END DO +!$OMP END PARALLEL end subroutine pressure_force_4_linfs_nemo ! ! @@ -778,7 +800,7 @@ subroutine pressure_force_4_linfs_shchepetkin(partit, mesh) implicit none type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit - integer :: elem, elnodes(3), nle, ule, nlz, idx(3),ni + integer :: elem, elnodes(3), nle, ule, nlz, idx(3), ni real(kind=WP) :: int_dp_dx(2), drho_dx, dz_dx, aux_sum real(kind=WP) :: dx10(3), dx20(3), dx21(3), df10(3), df21(3), drho_dz(3) real(kind=WP) :: zbar_n(mesh%nl), z_n(mesh%nl-1) @@ -788,6 +810,9 @@ subroutine pressure_force_4_linfs_shchepetkin(partit, mesh) #include "associate_mesh_ass.h" !___________________________________________________________________________ ! loop over triangular elemments +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(elem, elnodes, nle, ule, nlz, idx, ni, int_dp_dx, drho_dx, dz_dx, aux_sum, & +!$OMP dx10, dx20, dx21, df10, df21, drho_dz, zbar_n, z_n) +!$OMP DO do elem=1, myDim_elem2D !_______________________________________________________________________ ! nle...number of mid-depth levels at elem @@ -1017,6 +1042,8 @@ subroutine pressure_force_4_linfs_shchepetkin(partit, mesh) pgf_y(nlz,elem) = int_dp_dx(2) + aux_sum*0.5_WP end do ! --> do elem=1, myDim_elem2D +!$OMP END DO +!$OMP END PARALLEL end subroutine pressure_force_4_linfs_shchepetkin ! ! @@ -1057,6 +1084,10 @@ subroutine pressure_force_4_linfs_easypgf(tracers, partit, mesh) !___________________________________________________________________________ ! loop over triangular elemments +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(elem, elnodes, nle, ule, nlz, idx, ni, int_dp_dx, drho_dx, aux_sum, dx10, dx20, dx21, t0, dt10, dt21, s0, ds10, ds21, & +!$OMP rho_at_Zn, temp_at_Zn, salt_at_Zn, drho_dz, aux_dref, rhopot, bulk_0, bulk_pz, bulk_pz2, dref_rhopot, dref_bulk_0, & +!$OMP dref_bulk_pz, dref_bulk_pz2, zbar_n, z_n ) +!$OMP DO do elem=1, myDim_elem2D !_______________________________________________________________________ ! nle...number of mid-depth levels at elem @@ -1381,6 +1412,8 @@ subroutine pressure_force_4_linfs_easypgf(tracers, partit, mesh) pgf_y(nlz,elem) = int_dp_dx(2) + aux_sum*0.5_WP end do ! --> do elem=1, myDim_elem2D +!$OMP END DO +!$OMP END PARALLEL end subroutine pressure_force_4_linfs_easypgf ! ! @@ -1412,6 +1445,10 @@ subroutine pressure_force_4_linfs_cubicspline(partit, mesh) #include "associate_mesh_ass.h" !___________________________________________________________________________ ! loop over triangular elemments +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(elem, elnodes, nle, ule, nlz, nlc, ni, node, nln, uln, dd, int_dp_dx, drho_dx, dz_dx, drho_dz, auxp, & +!$OMP dx10, dx20, dx21, df10, df21, interp_n_dens, s_ind, s_z, s_dens, s_H, aux1, aux2, s_dup, s_dlo, & +!$OMP a, b, c, d, dz, zbar_n, z_n ) +!$OMP DO do elem=1, myDim_elem2D !_______________________________________________________________________ ! nle...number of mid-depth levels at elem @@ -1585,6 +1622,8 @@ subroutine pressure_force_4_linfs_cubicspline(partit, mesh) int_dp_dx(2) = int_dp_dx(2) + auxp end do ! --> do elem=1, myDim_elem2D +!$OMP END DO +!$OMP END PARALLEL end subroutine pressure_force_4_linfs_cubicspline ! ! @@ -1614,6 +1653,9 @@ subroutine pressure_force_4_linfs_cavity(partit, mesh) !___________________________________________________________________________ ! loop over triangular elemments +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(elem, elnodes, nle, ule, nlz, idx, ni, int_dp_dx, drho_dx, dz_dx, aux_sum, & +!$OMP dx10, dx20, dx21, df10, df21, drho_dz, zbar_n, z_n ) +!$OMP DO do elem=1, myDim_elem2D !_______________________________________________________________________ ! number of levels at elem @@ -1801,6 +1843,8 @@ subroutine pressure_force_4_linfs_cavity(partit, mesh) end if end do !-->do elem=1, myDim_elem2D +!$OMP END DO +!$OMP END PARALLEL end subroutine pressure_force_4_linfs_cavity ! ! @@ -1836,7 +1880,7 @@ subroutine pressure_force_4_zxxxx(tracers, partit, mesh) write(*,*) ' shchepetkin, cubicspline, easypgf ' write(*,*) '________________________________________________________' call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) - end if + end if end subroutine pressure_force_4_zxxxx ! ! @@ -1857,8 +1901,8 @@ subroutine pressure_force_4_zxxxx_cubicspline(partit, mesh) implicit none type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit - integer :: elem, elnodes(3), nle, ule, nln(3), uln(3), nlz, nlc,dd - integer :: ni, node, dens_ind,kk + integer :: elem, elnodes(3), nle, ule, nln(3), uln(3), nlz, nlc, dd + integer :: ni, node, dens_ind, kk real(kind=WP) :: ze integer :: s_ind(4) real(kind=WP) :: s_z(4), s_dens(4), s_H, aux1, aux2, aux(2), s_dup, s_dlo @@ -1870,6 +1914,9 @@ subroutine pressure_force_4_zxxxx_cubicspline(partit, mesh) #include "associate_mesh_ass.h" !___________________________________________________________________________ ! loop over triangular elemments +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(elem, elnodes, nle, ule, nln, uln, nlz, nlc, dd, ni, node, dens_ind, kk, ze, s_ind, s_z, s_dens, s_H, & +!$OMP aux1, aux2, aux, s_dup, s_dlo, a, b, c, d, dz, rho_n, rhograd_e, p_grad, zbar_n, z_n ) +!$OMP DO do elem=1, myDim_elem2D ule = ulevels(elem) nle = nlevels(elem)-1 @@ -2021,6 +2068,8 @@ subroutine pressure_force_4_zxxxx_cubicspline(partit, mesh) end do ! --> do nlz=1,nle end do ! --> do elem=1, myDim_elem2D +!$OMP END DO +!$OMP END PARALLEL end subroutine pressure_force_4_zxxxx_cubicspline ! ! @@ -2045,7 +2094,7 @@ subroutine pressure_force_4_zxxxx_shchepetkin(partit, mesh) implicit none type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit - integer :: elem, elnodes(3), nle,ule, nlz, nln(3), ni, nlc, nlce, idx(3) + integer :: elem, elnodes(3), nle, ule, nlz, nln(3), ni, nlc, nlce, idx(3) real(kind=WP) :: int_dp_dx(2), drho_dx, drho_dy, drho_dz(3), dz_dx, dz_dy, aux_sum real(kind=WP) :: dx10(3), dx20(3), dx21(3), df10(3), df21(3) real(kind=WP) :: rhopot(3), bulk_0(3), bulk_pz(3), bulk_pz2(3) @@ -2057,6 +2106,9 @@ subroutine pressure_force_4_zxxxx_shchepetkin(partit, mesh) !___________________________________________________________________________ ! loop over triangular elemments +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(elem, elnodes, nle, ule, nlz, nln, ni, nlc, nlce, idx, int_dp_dx, drho_dx, drho_dy, drho_dz, dz_dx, dz_dy, aux_sum, & +!$OMP dx10, dx20, dx21, df10, df21, rhopot, bulk_0, bulk_pz, bulk_pz2, zbar_n, z_n) +!$OMP DO do elem=1, myDim_elem2D !_______________________________________________________________________ ! nle...number of mid-depth levels at elem @@ -2263,7 +2315,8 @@ subroutine pressure_force_4_zxxxx_shchepetkin(partit, mesh) pgf_y(nlz,elem) = int_dp_dx(2) + aux_sum*0.5_WP end do ! --> do elem=1, myDim_elem2D - +!$OMP END DO +!$OMP END PARALLEL end subroutine pressure_force_4_zxxxx_shchepetkin ! ! @@ -2290,8 +2343,8 @@ subroutine pressure_force_4_zxxxx_easypgf(tracers, partit, mesh) type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in), target :: tracers - integer :: elem, elnodes(3), nle,ule, nlz, nln(3), ni, nlc, nlce, idx(3) - real(kind=WP) :: int_dp_dx(2), drho_dx, dz_dx, drho_dy, dz_dy,aux_sum + integer :: elem, elnodes(3), nle, ule, nlz, nln(3), ni, nlc, nlce, idx(3) + real(kind=WP) :: int_dp_dx(2), drho_dx, dz_dx, drho_dy, dz_dy, aux_sum real(kind=WP) :: dx10(3), dx20(3), dx21(3) real(kind=WP) :: f0(3), df10(3), df21(3) real(kind=WP) :: t0(3), dt10(3), dt21(3) @@ -2309,6 +2362,10 @@ subroutine pressure_force_4_zxxxx_easypgf(tracers, partit, mesh) salt=>tracers%data(2)%values(:,:) !___________________________________________________________________________ ! loop over triangular elemments +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(elem, elnodes, nle, ule, nlz, nln, ni, nlc, nlce, idx, int_dp_dx, drho_dx, drho_dy, dz_dx, dz_dy, aux_sum, dx10, dx20, dx21, & +!$OMP f0, df10, df21, t0, dt10, dt21, s0, ds10, ds21, rho_at_Zn, temp_at_Zn, salt_at_Zn, drho_dz, aux_dref, rhopot, & +!$OMP bulk_0, bulk_pz, bulk_pz2, dref_rhopot, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2, zbar_n, z_n ) +!$OMP DO do elem=1, myDim_elem2D !_______________________________________________________________________ ! nle...number of mid-depth levels at elem @@ -2716,6 +2773,8 @@ subroutine pressure_force_4_zxxxx_easypgf(tracers, partit, mesh) pgf_y(nlz,elem) = int_dp_dx(2) + aux_sum*0.5_WP end do ! --> do elem=1, myDim_elem2D +!$OMP END DO +!$OMP END PARALLEL end subroutine pressure_force_4_zxxxx_easypgf ! ! @@ -2950,7 +3009,7 @@ subroutine sw_alpha_beta(TF1,SF1, partit, mesh) type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit integer :: n, nz, nzmin, nzmax - real(kind=WP) :: t1,t1_2,t1_3,t1_4,p1,p1_2,p1_3,s1,s35,s35_2 + real(kind=WP) :: t1, t1_2, t1_3, t1_4, p1, p1_2, p1_3, s1, s35, s35_2 real(kind=WP) :: a_over_b real(kind=WP) :: TF1(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D),SF1(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) @@ -2959,6 +3018,8 @@ subroutine sw_alpha_beta(TF1,SF1, partit, mesh) #include "associate_part_ass.h" #include "associate_mesh_ass.h" +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax, t1, t1_2, t1_3, t1_4, p1, p1_2, p1_3, s1, s35, s35_2, a_over_b) +!$OMP DO do n = 1,myDim_nod2d nzmin = ulevels_nod2d(n) nzmax = nlevels_nod2d(n) @@ -3003,6 +3064,8 @@ subroutine sw_alpha_beta(TF1,SF1, partit, mesh) sw_alpha(nz,n) = a_over_b*sw_beta(nz,n) end do end do +!$OMP END DO +!$OMP END PARALLEL call exchange_nod(sw_alpha, partit) call exchange_nod(sw_beta, partit) end subroutine sw_alpha_beta @@ -3041,7 +3104,9 @@ subroutine compute_sigma_xy(TF1,SF1, partit, mesh) #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - ! + +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(tx, ty, sx, sy, vol, testino, n, nz, elnodes, el, k, nln, uln, nle, ule) +!$OMP DO DO n=1, myDim_nod2D nln = nlevels_nod2D(n)-1 uln = ulevels_nod2D(n) @@ -3087,7 +3152,8 @@ subroutine compute_sigma_xy(TF1,SF1, partit, mesh) sigma_xy(1,uln:nln,n) = (-sw_alpha(uln:nln,n)*tx(uln:nln)+sw_beta(uln:nln,n)*sx(uln:nln))/vol(uln:nln)*density_0 sigma_xy(2,uln:nln,n) = (-sw_alpha(uln:nln,n)*ty(uln:nln)+sw_beta(uln:nln,n)*sy(uln:nln))/vol(uln:nln)*density_0 END DO - +!$OMP END DO +!$OMP END PARALLEL call exchange_nod(sigma_xy, partit) end subroutine compute_sigma_xy ! @@ -3105,10 +3171,10 @@ subroutine compute_neutral_slope(partit, mesh) IMPLICIT NONE type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit - real(kind=WP) :: deltaX1,deltaY1,deltaX2,deltaY2 integer :: edge - integer :: n,nz,nl1,ul1,el(2),elnodes(3),enodes(2) - real(kind=WP) :: c, ro_z_inv,eps,S_cr,S_d + real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2 + integer :: n, nz, nl1, ul1, el(2), elnodes(3), enodes(2) + real(kind=WP) :: c, ro_z_inv, eps, S_cr, S_d #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -3118,12 +3184,13 @@ subroutine compute_neutral_slope(partit, mesh) eps=5.0e-6_WP S_cr=1.0e-2_WP S_d=1.0e-3_WP - slope_tapered=0._WP +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(edge, deltaX1, deltaY1, deltaX2, deltaY2, n, nz, nl1, ul1, el, elnodes, enodes, c, ro_z_inv) +!$OMP DO do n=1, myDim_nod2D + slope_tapered(: , :, n)=0._WP nl1=nlevels_nod2d(n)-1 ul1=ulevels_nod2d(n) - !!PS do nz = 2,nl1 - do nz = ul1+1,nl1 + do nz = ul1+1, nl1 ro_z_inv=2._WP*g/density_0/max(bvfreq(nz,n)+bvfreq(nz+1,n), eps**2) !without minus, because neutral slope S=-(nabla\rho)/(d\rho/dz) neutral_slope(1,nz,n)=sigma_xy(1,nz,n)*ro_z_inv neutral_slope(2,nz,n)=sigma_xy(2,nz,n)*ro_z_inv @@ -3133,11 +3200,10 @@ subroutine compute_neutral_slope(partit, mesh) c=0.5_WP*(1.0_WP + tanh((S_cr - neutral_slope(3,nz,n))/S_d)) if ((bvfreq(nz,n) <= 0.0_WP) .or. (bvfreq(nz+1,n) <= 0.0_WP)) c=0.0_WP slope_tapered(:,nz,n)=neutral_slope(:,nz,n)*c -! slope_tapered(:,nl1-1:nl1,n)=0. -! slope_tapered(:,1:2,n) =0. enddo enddo - +!$OMP END DO +!$OMP END PARALLEL call exchange_nod(neutral_slope, partit) call exchange_nod(slope_tapered, partit) end subroutine compute_neutral_slope @@ -3161,7 +3227,7 @@ subroutine insitu2pot(tracers, partit, mesh) type(t_tracer), intent(in), target :: tracers real(kind=WP), external :: ptheta real(kind=WP) :: pp, pr, tt, ss - integer :: n, nz, nzmin,nzmax + integer :: n, nz, nzmin, nzmax real(kind=WP), dimension(:,:), pointer :: temp, salt #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -3171,6 +3237,7 @@ subroutine insitu2pot(tracers, partit, mesh) salt=>tracers%data(2)%values(:,:) ! Convert in situ temperature into potential temperature pr=0.0_WP +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax, tt, ss, pp) do n=1,myDim_nod2d+eDim_nod2D nzmin = ulevels_nod2D(n) nzmax = nlevels_nod2D(n) @@ -3189,6 +3256,7 @@ subroutine insitu2pot(tracers, partit, mesh) temp(nz,n)=ptheta(ss, tt, pp, pr) end do end do +!$OMP END PARALLEL DO end subroutine insitu2pot ! ! @@ -3254,15 +3322,10 @@ subroutine init_ref_density(partit, mesh) #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - - !___________________________________________________________________________ -!!PS S=34. -!!PS T=2.0 - !___________________________________________________________________________ - density_ref = 0.0_WP +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax, rhopot, bulk_0, bulk_pz, bulk_pz2, rho, T, S, auxz) do node=1,myDim_nod2d+eDim_nod2d - !!PS nzmin = ulevels_nod2d(node) + density_ref(:, node) = 0.0_WP nzmin = 1 nzmax = nlevels_nod2d(node)-1 auxz=min(0.0,Z_3d_n(nzmin,node)) @@ -3270,7 +3333,7 @@ subroutine init_ref_density(partit, mesh) !_______________________________________________________________________ call densityJM_components(density_ref_T, density_ref_S, bulk_0, bulk_pz, bulk_pz2, rhopot, partit, mesh) rho = bulk_0 + auxz*bulk_pz + auxz*bulk_pz2 - density_ref(nzmin,node) = rho*rhopot/(rho+0.1_WP*auxz) + density_ref(nzmin, node) = rho*rhopot/(rho+0.1_WP*auxz) !_______________________________________________________________________ do nz=nzmin+1,nzmax @@ -3279,6 +3342,7 @@ subroutine init_ref_density(partit, mesh) density_ref(nz,node) = rho*rhopot/(rho+0.1_WP*auxz) end do end do +!$OMP END PARALLEL DO if(mype==0) write(*,*) ' --> compute reference density' end subroutine init_ref_density From 64e164899dadbe59551cba58e59885dc32c096d9 Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Thu, 11 Nov 2021 23:27:30 +0100 Subject: [PATCH 556/909] indicate how many OpenMP threads can be used --- src/gen_modules_partitioning.F90 | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index 6522a8b2d..b9666a7b8 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -72,7 +72,10 @@ subroutine par_init(partit) ! initializes MPI end if write(*,*) 'MPI has been initialized, provided MPI thread support level: ', & provided_mpi_thread_support_level_name,provided_mpi_thread_support_level - write(*, *) 'Running on ', partit%npes, ' PEs' + write(*, *) 'Running on ', partit%npes, ' PEs' +#if defined(_OPENMP) + write(*, *) 'This is MPI/OpenMP run, with ', OMP_GET_MAX_THREADS(), ' threads per PE' +#endif end if end subroutine par_init !================================================================= From 4da51de02954eee6437c11a36482465b713d983e Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Thu, 11 Nov 2021 23:54:09 +0100 Subject: [PATCH 557/909] add OMP_NUM_THREADS to test jobscript --- work/job_ubuntu | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/work/job_ubuntu b/work/job_ubuntu index 34b9e6535..5930bf2c6 100755 --- a/work/job_ubuntu +++ b/work/job_ubuntu @@ -2,7 +2,7 @@ ulimit -s unlimited -# determine JOBID +export OMP_NUM_THREADS=1 ln -s ../bin/fesom.x . # cp -n ../bin/fesom.x cp -n ../config/namelist.config . @@ -12,7 +12,7 @@ cp -n ../config/namelist.ice . cp -n ../config/namelist.io . date -mpirun --allow-run-as-root --mca btl_vader_single_copy_mechanism none -n 2 fesom.x +time mpirun --allow-run-as-root --mca btl_vader_single_copy_mechanism none -n 2 fesom.x date From faa929db5b0d6e3a51c60fdb15c66d8c9aa65330 Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Thu, 11 Nov 2021 23:54:32 +0100 Subject: [PATCH 558/909] add OpenMP testing --- .github/workflows/fesom2_openmp.yml | 51 +++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 .github/workflows/fesom2_openmp.yml diff --git a/.github/workflows/fesom2_openmp.yml b/.github/workflows/fesom2_openmp.yml new file mode 100644 index 000000000..5913836fc --- /dev/null +++ b/.github/workflows/fesom2_openmp.yml @@ -0,0 +1,51 @@ + +name: FESOM2 OpenMP test + +# Controls when the action will run. Triggers the workflow on push or pull request. + +on: [push, pull_request] + + +# A workflow run is made up of one or more jobs that can run sequentially or in parallel +jobs: + openmp_test: + # Containers must run in Linux based operating systems + runs-on: ubuntu-latest + # Docker Hub image that `container-job` executes in + container: koldunovn/fesom2_test:refactoring2 + + # Service containers to run with `gfortran_ubuntu` + steps: + # NK: this changes working directory to fesom2 + - uses: actions/checkout@v2 + + - name: switch OpenMP ON + cd ./src/ + sed -i 's/with OpenMP\" OFF/with OpenMP\" ON/g' CMakeLists.txt + cd ../ + + - name: Compile model + run: | + bash -l configure.sh ubuntu + + - name: Create global test run with 4 OpenMP threads + run: | + mkrun pi test_pi -m docker + cd work_pi + sed -i 's/THREADS=1/THREADS=4/g' job_docker_new + cd ../ + + - name: FESOM2 global test run + run: | + cd work_pi + chmod +x job_docker_new + ./job_docker_new + + - name: Check global results with large tollerance + run: | + cd work_pi + fcheck -a 1e-9 . + + + + From 2efebeb21fb518d517dfbba0331518310f28830b Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Thu, 11 Nov 2021 23:56:56 +0100 Subject: [PATCH 559/909] fis syntax in openmp testing --- .github/workflows/fesom2_openmp.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/fesom2_openmp.yml b/.github/workflows/fesom2_openmp.yml index 5913836fc..4184b3310 100644 --- a/.github/workflows/fesom2_openmp.yml +++ b/.github/workflows/fesom2_openmp.yml @@ -20,6 +20,7 @@ jobs: - uses: actions/checkout@v2 - name: switch OpenMP ON + run: | cd ./src/ sed -i 's/with OpenMP\" OFF/with OpenMP\" ON/g' CMakeLists.txt cd ../ From fe841d7e5cfa3f792ab42c9354cfee7ea3b6af89 Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Fri, 12 Nov 2021 00:15:24 +0100 Subject: [PATCH 560/909] add info about number of OpenMP threads to the end of the log --- src/fvom.F90 | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/fvom.F90 b/src/fvom.F90 index 82752d507..9d5f09541 100755 --- a/src/fvom.F90 +++ b/src/fvom.F90 @@ -434,8 +434,11 @@ subroutine fesom_finalize() write(*,*) write(*,*) '============================================' write(*,*) '=========== BENCHMARK RUNTIME ==============' - write(*,*) ' Number of cores : ',f%npes - write(*,*) ' Runtime for all timesteps : ',f%runtime_alltimesteps,' sec' + write(*,*) ' Number of cores : ',f%npes +#if defined(_OPENMP) + write(*,*) ' Max OpenMP threads : ',OMP_GET_MAX_THREADS() +#endif + write(*,*) ' Runtime for all timesteps : ',f%runtime_alltimesteps,' sec' write(*,*) '============================================' write(*,*) end if From d8e95e4db728ac5b515a2d5bc0eb222997abab4e Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 9 Nov 2021 12:18:02 +0100 Subject: [PATCH 561/909] force newer required cmake version to make sure we can use the OpenMP::OpenMP_Fortran setting --- CMakeLists.txt | 2 +- src/CMakeLists.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 95b7e7b78..4feed315f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.4) +cmake_minimum_required(VERSION 3.9) # set default build type cache entry (do so before project(...) is called, which would create this cache entry on its own) if(NOT CMAKE_BUILD_TYPE) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index d38e57edd..c6d2576d8 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.4) +cmake_minimum_required(VERSION 3.9) project(fesom C Fortran) From 92aa3da4d2278807facb6c8adf901f1fc95696df Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Fri, 12 Nov 2021 15:23:34 +0100 Subject: [PATCH 562/909] OMP for KPP. Some implicit loops are still missing. Need to find them! --- src/oce_ale_mixing_kpp.F90 | 458 +++++++++++++++++-------------------- src/oce_mo_conv.F90 | 9 +- 2 files changed, 220 insertions(+), 247 deletions(-) diff --git a/src/oce_ale_mixing_kpp.F90 b/src/oce_ale_mixing_kpp.F90 index cc2a54890..555bf838f 100755 --- a/src/oce_ale_mixing_kpp.F90 +++ b/src/oce_ale_mixing_kpp.F90 @@ -50,7 +50,7 @@ MODULE o_mixing_KPP_mod real(KIND=WP), parameter :: epsln = 1.0e-40_WP ! a small value - real(KIND=WP), parameter :: epsilon_kpp = 0.1_WP + real(KIND=WP), parameter :: epsilon_kpp = 0.1_WP real(KIND=WP), parameter :: vonk = 0.4_WP real(KIND=WP), parameter :: conc1 = 5.0_WP @@ -71,12 +71,14 @@ MODULE o_mixing_KPP_mod integer, parameter :: nnj = 480 ! number of values for ustar in the look up table real(KIND=WP), dimension(0:nni+1,0:nnj+1) :: wmt ! lookup table for wm, the turbulent velocity scale for momentum real(KIND=WP), dimension(0:nni+1,0:nnj+1) :: wst ! lookup table for ws, the turbulent velocity scale scalars - logical :: smooth_blmc=.true. - logical :: smooth_hbl =.false. - logical :: smooth_Ri_hor =.false. - logical :: smooth_Ri_ver =.false. + logical :: smooth_blmc =.true. + logical :: smooth_hbl =.false. + logical :: smooth_Ri_hor =.false. + logical :: smooth_Ri_ver =.false. logical :: limit_hbl_ekmmob =.false. !.true. + integer :: n ! to perform loop iterations + contains !####################################################################### @@ -124,45 +126,47 @@ subroutine oce_mixing_kpp_init(partit, mesh) #include "associate_part_ass.h" #include "associate_mesh_ass.h" - allocate ( ghats ( nl-1, myDim_nod2D+eDim_nod2D )) ! nonlocal transport (s/m^2) - allocate ( hbl ( myDim_nod2D+eDim_nod2D )) ! boundary layer depth - ghats = 0.0_WP - hbl = 0.0_WP - - allocate ( bfsfc ( myDim_nod2D+eDim_nod2D )) ! surface buoyancy forcing (m^2/s^3) + allocate ( ghats ( nl-1, myDim_nod2D+eDim_nod2D )) ! nonlocal transport (s/m^2) + allocate ( hbl ( myDim_nod2D+eDim_nod2D )) ! boundary layer depth + allocate ( bfsfc ( myDim_nod2D+eDim_nod2D )) ! surface buoyancy forcing (m^2/s^3) allocate ( caseA ( myDim_nod2D+eDim_nod2D )) ! = 1 in case A; =0 in case B allocate ( stable ( myDim_nod2D+eDim_nod2D )) ! = 1 in stable forcing; =0 in unstable - allocate ( dkm1 ( myDim_nod2D+eDim_nod2D, 3 )) ! boundary layer diff at kbl-1 level - allocate ( blmc ( nl, myDim_nod2D+eDim_nod2D, 3 )) ! boundary layer mixing coefficients + allocate ( dkm1 ( myDim_nod2D+eDim_nod2D, 3 )) ! boundary layer diff at kbl-1 level + allocate ( blmc ( nl, myDim_nod2D+eDim_nod2D, 3 )) ! boundary layer mixing coefficients allocate ( ustar ( myDim_nod2D+eDim_nod2D )) ! surface friction velocity (m/s) allocate ( Bo ( myDim_nod2D+eDim_nod2D )) ! surface turb buoy. forcing (m^2/s^3) allocate ( dVsq ( nl, myDim_nod2D+eDim_nod2D )) ! (velocity shear re sfc)^2 (m/s)^2 allocate ( dbsfc ( nl, myDim_nod2D+eDim_nod2D )) ! buoyancy re sfc - allocate ( kbl ( myDim_nod2D+eDim_nod2D )) ! index of first grid level below hbl - - bfsfc = 0.0_WP - caseA = 0.0_WP - stable= 0.0_WP - dkm1 = 0.0_WP - blmc = 0.0_WP - ustar = 0.0_WP - Bo = 0.0_WP - dVsq = 0.0_WP - dbsfc = 0.0_WP - kbl = 0.0_WP + allocate ( kbl ( myDim_nod2D+eDim_nod2D )) ! index of first grid level below hbl + +!$OMP PARALLEL DO + DO n=1, myDim_nod2D+eDim_nod2D + ghats (:, n ) = 0.0_WP + hbl ( n ) = 0.0_WP + bfsfc ( n ) = 0.0_WP + caseA ( n ) = 0.0_WP + stable ( n ) = 0.0_WP + dkm1 ( n, :) = 0.0_WP + blmc (:, n, :) = 0.0_WP + ustar ( n ) = 0.0_WP + Bo ( n ) = 0.0_WP + dVsq (:, n ) = 0.0_WP + dbsfc (:, n ) = 0.0_WP + kbl ( n ) = 0.0_WP + END DO +!$OMP END PARALLEL DO ! ******************************************************************* ! Initialize some constants for kmix subroutines, and initialize ! for kmix subroutine "wscale" the 2D-lookup table for wm and ws ! as functions of ustar and zetahat (=vonk*sigma*hbl*bfsfc). ! ******************************************************************* - ! ******************************************************************* ! Define some non-dimensional constants (recall epsilon_kpp=0.1) ! ******************************************************************* -! Vtc used in eqn. 23 - Vtc = concv * sqrt(0.2_WP/concs/epsilon_kpp) / vonk**2 / Ricr - +! Vtc used in eqn. 23 + Vtc = concv * sqrt(0.2_WP/concs/epsilon_kpp) / vonk**2 / Ricr + ! ******************************************************************* ! The nonlocal transport term is nonzero ONLY FOR SCALARS in ! unstable (convective) forcing conditions where it has been @@ -172,7 +176,7 @@ subroutine oce_mixing_kpp_init(partit, mesh) ! cg = cs in eqn. 20 ! ******************************************************************* - cg = cstar * vonk * (concs * vonk * epsilon_kpp)**(1._WP/3._WP) + cg = cstar * vonk * (concs * vonk * epsilon_kpp)**(1._WP/3._WP) ! ******************************************************************* ! Construct the wm and ws lookup tables (eqn. 13 & B1) @@ -243,7 +247,7 @@ end subroutine oce_mixing_kpp_init ! diffK = diffusion coefficient (m^2/s) ! !--------------------------------------------------------------- - subroutine oce_mixing_KPP(viscAE, diffK, dynamics, tracers, partit, mesh) + SUBROUTINE oce_mixing_KPP(viscAE, diffK, dynamics, tracers, partit, mesh) IMPLICIT NONE @@ -274,10 +278,16 @@ subroutine oce_mixing_KPP(viscAE, diffK, dynamics, tracers, partit, mesh) #include "associate_mesh_ass.h" UVnode=>dynamics%uvnode(:,:,:) - ViscA=0.0_WP - DO node=1, myDim_nod2D !+eDim_nod2D - nzmin = ulevels_nod2D(node) - nzmax = nlevels_nod2D(node) +!$OMP PARALLEL DO + DO node=1, myDim_nod2D+eDim_nod2D + ViscA(:, node) = 0.0_WP + END DO +!$OMP END PARALLEL DO + +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax, usurf, vsurf, u_loc, v_loc) + DO node=1, myDim_nod2D !+eDim_nod2D + nzmin = ulevels_nod2D(node) + nzmax = nlevels_nod2D(node) ! ******************************************************************* ! Eqn. 21 @@ -295,30 +305,24 @@ subroutine oce_mixing_KPP(viscAE, diffK, dynamics, tracers, partit, mesh) ! is calculated in a separate routine ! ******************************************************************* -! Surface layer is our reference dVsq(m2/s2) & dbsfc(m/s2) - !!PS dVsq (1,node) = 0.0_WP - !!PS dbsfc(1,node) = 0.0_WP - dVsq (nzmin,node) = 0.0_WP - dbsfc(nzmin,node) = 0.0_WP - -! Surface velocity - usurf = UVnode(1,nzmin,node) - vsurf = UVnode(2,nzmin,node) - - !!PS DO nz=2, nlevels_nod2d(node)-1 - DO nz=nzmin+1, nzmax-1 - -! Squared velocity shear referenced to surface (@ Z) - u_loc = 0.5_WP * ( UVnode(1,nz-1,node) + UVnode(1,nz,node) ) - v_loc = 0.5_WP * ( UVnode(2,nz-1,node) + UVnode(2,nz,node) ) - - dVsq(nz,node) = ( usurf - u_loc )**2 + ( vsurf - v_loc )**2 - -! dbsfc (buoyancy difference with respect to the surface (m/s2)) is now computed in oce_ale_pressure_bv.F90 + ! Surface layer is our reference dVsq(m2/s2) & dbsfc(m/s2) + dVsq (nzmin,node) = 0.0_WP + dbsfc(nzmin,node) = 0.0_WP + + ! Surface velocity + usurf = UVnode(1,nzmin,node) + vsurf = UVnode(2,nzmin,node) + DO nz=nzmin+1, nzmax-1 + + ! Squared velocity shear referenced to surface (@ Z) + u_loc = 0.5_WP * ( UVnode(1,nz-1,node) + UVnode(1,nz,node) ) + v_loc = 0.5_WP * ( UVnode(2,nz-1,node) + UVnode(2,nz,node) ) + dVsq(nz,node) = ( usurf - u_loc )**2 + ( vsurf - v_loc )**2 + ! dbsfc (buoyancy difference with respect to the surface (m/s2)) is now computed in oce_ale_pressure_bv.F90 + END DO + dVsq ( nzmax, node ) = dVsq ( nzmax-1, node ) END DO - !!PS dVsq ( nlevels_nod2d(node), node ) = dVsq ( nlevels_nod2d(node)-1, node ) - dVsq ( nzmax, node ) = dVsq ( nzmax-1, node ) - END DO +!$OMP END PARALLEL DO ! ******************************************************************* ! compute thermal and haline expansion coefficients (without factor of rho). @@ -336,54 +340,52 @@ subroutine oce_mixing_KPP(viscAE, diffK, dynamics, tracers, partit, mesh) ! ustar = sqrt( sqrt( stress_atmoce_x^2 + stress_atmoce_y^2 ) / rho ) (m/s) ! bo = -g * ( Talpha*heat_flux/vcpw + Sbeta * salinity*water_flux ) (m^2/s^3) ! ******************************************************************* +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, nzmin) + DO node=1, myDim_nod2D + nzmin = ulevels_nod2D(node) + ustar(node) = sqrt( sqrt( stress_atmoce_x(node)**2 + stress_atmoce_y(node)**2 )*density_0_r ) ! @ the surface (eqn. 2) + ! Surface buoyancy forcing (eqns. A2c & A2d & A3b & A3d) + Bo(node) = -g * ( sw_alpha(nzmin,node) * heat_flux(node) / vcpw & !heat_flux & water_flux: positive up + + sw_beta (nzmin,node) * water_flux(node) * tracers%data(2)%values(nzmin,node)) + END DO +!$OMP END PARALLEL DO - DO node=1, myDim_nod2D !+eDim_nod2D - nzmin = ulevels_nod2D(node) - ustar(node) = sqrt( sqrt( stress_atmoce_x(node)**2 + stress_atmoce_y(node)**2 )*density_0_r ) ! @ the surface (eqn. 2) -!!PS ustar(node) = sqrt( sqrt( stress_node_surf(1,node)**2 + stress_node_surf(2,node)**2 )*density_0_r ) ! @ the surface (eqn. 2) - -! Surface buoyancy forcing (eqns. A2c & A2d & A3b & A3d) - Bo(node) = -g * ( sw_alpha(nzmin,node) * heat_flux(node) / vcpw & !heat_flux & water_flux: positive up - + sw_beta (nzmin,node) * water_flux(node) * tracers%data(2)%values(nzmin,node)) - END DO - ! compute interior mixing coefficients everywhere, due to constant ! internal wave activity, static instability, and local shear ! instability. - CALL ri_iwmix(viscA, diffK, dynamics, tracers, partit, mesh) + CALL ri_iwmix(viscA, diffK, dynamics, tracers, partit, mesh) ! add double diffusion - IF (double_diffusion) then - CALL ddmix(diffK, tracers, partit, mesh) - END IF + IF (double_diffusion) then + CALL ddmix(diffK, tracers, partit, mesh) + END IF ! boundary layer mixing coefficients: diagnose new b.l. depth - CALL bldepth(partit, mesh) + CALL bldepth(partit, mesh) ! boundary layer diffusivities - CALL blmix_kpp(viscA, diffK, partit, mesh) + CALL blmix_kpp(viscA, diffK, partit, mesh) ! enhance diffusivity at interface kbl - 1 - CALL enhance(viscA, diffK, partit, mesh) + CALL enhance(viscA, diffK, partit, mesh) - if (smooth_blmc) then - call exchange_nod(blmc(:,:,1), partit) - call exchange_nod(blmc(:,:,2), partit) - call exchange_nod(blmc(:,:,3), partit) - do j=1, 3 + if (smooth_blmc) then + call exchange_nod(blmc(:,:,1), partit) + call exchange_nod(blmc(:,:,2), partit) + call exchange_nod(blmc(:,:,3), partit) + do j=1, 3 !_____________________________________________________________________ ! all loops go over myDim_nod2D so no halo information --> for smoothing ! haloinfo is required --> therefor exchange_nod call smooth_nod(blmc(:,:,j), 3, partit, mesh) - end do - end if - + end do + end if +!$OMP BARRIER ! then combine blmc and viscA/diffK - - DO node=1, myDim_nod2D - nzmin = ulevels_nod2D(node) - nzmax = nlevels_nod2D(node) - !!PS DO nz=2,nlevels_nod2d(node)-1 - DO nz=nzmin+1,nzmax-1 +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax) + DO node=1, myDim_nod2D + nzmin = ulevels_nod2D(node) + nzmax = nlevels_nod2D(node) + DO nz=nzmin+1,nzmax-1 IF (nz < kbl(node)) then ! within the bounday layer viscA(nz,node ) = MAX(viscA(nz,node ), blmc(nz,node,1)) diffK(nz,node,1) = MAX(diffK(nz,node,1), blmc(nz,node,2)) @@ -391,47 +393,36 @@ subroutine oce_mixing_KPP(viscAE, diffK, dynamics, tracers, partit, mesh) ELSE ghats(nz,node)=0.0_WP ! outside the boundary layer set nonlocal terms to zero ENDIF - END DO - END DO - - !_____________________________________________________________________________ - ! do all node loops only over myDim_nod2D --> therefore do an halo exchange - ! only at the end should save some time - call exchange_nod(diffK(:,:,1), partit) - call exchange_nod(diffK(:,:,2), partit) - call exchange_nod(ghats, partit) + END DO + END DO +!$OMP END PARALLEL DO + !_____________________________________________________________________________ + ! do all node loops only over myDim_nod2D --> therefore do an halo exchange + ! only at the end should save some time + call exchange_nod(diffK(:,:,1), partit) + call exchange_nod(diffK(:,:,2), partit) + call exchange_nod(ghats, partit) -! OVER ELEMENTS - call exchange_nod(viscA, partit) !Warning: don't forget to communicate before averaging on elements!!! - minmix=3.0e-3_WP - DO elem=1, myDim_elem2D - elnodes=elem2D_nodes(:,elem) - nzmin = ulevels(elem) - nzmax = nlevels(elem) - !!PS DO nz=1,nlevels(elem)-1 - DO nz=nzmin,nzmax-1 - viscAE(nz,elem) = SUM(viscA(nz,elnodes))/3.0_WP ! (elementwise) + ! OVER ELEMENTS + call exchange_nod(viscA, partit) !Warning: don't forget to communicate before averaging on elements!!! +!$OMP BARRIER + minmix=3.0e-3_WP +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(elem, elnodes, nz, nzmin, nzmax) + DO elem = 1, myDim_elem2D + elnodes = elem2D_nodes(:,elem) + nzmin = ulevels(elem) + nzmax = nlevels(elem) + DO nz=nzmin,nzmax-1 + viscAE(nz, elem) = SUM(viscA(nz, elnodes))/3.0_WP ! (elementwise) + END DO + viscAE( nlevels(elem), elem ) = viscAE( nlevels(elem)-1, elem) + ! Set the mixing coeff. in the first layer above some limiting value + ! this is very helpful to avoid huge surface velocity when vertical + ! viscosity is very small derived from the KPP scheme. + ! I strongly recommend this trick, at least in the current FESOM version. + IF (viscAE(nzmin,elem) < minmix) viscAE(nzmin,elem) = minmix END DO - viscAE( nlevels(elem), elem ) = viscAE( nlevels(elem)-1, elem ) - - ! Set the mixing coeff. in the first layer above some limiting value - ! this is very helpful to avoid huge surface velocity when vertical - ! viscosity is very small derived from the KPP scheme. - ! I strongly recommend this trick, at least in the current FESOM version. - if (viscAE(nzmin,elem) < minmix) viscAE(nzmin,elem) = minmix - - END DO - -!!PS ! Set the mixing coeff. in the first layer above some limiting value -!!PS ! this is very helpful to avoid huge surface velocity when vertical -!!PS ! viscosity is very small derived from the KPP scheme. -!!PS ! I strongly recommend this trick, at least in the current FESOM version. -!!PS minmix=3.0e-3_WP -!!PS WHERE(viscAE(nzmin,:) < minmix) -!!PS viscAE(nzmin,:) = minmix -!!PS END WHERE - -! non-local contribution will be added to oce_tracer_mod directly +!$OMP END PARALLEL DO END SUBROUTINE oce_mixing_kpp @@ -496,31 +487,33 @@ SUBROUTINE bldepth(partit, mesh) #include "associate_part_ass.h" #include "associate_mesh_ass.h" -! Initialize hbl and kbl to bottomed out values +!$OMP PARALLEL DO + ! Initialize hbl and kbl to bottomed out values DO node=1, myDim_nod2D !+eDim_nod2D -! Index of first grid level below hbl + ! Index of first grid level below hbl kbl(node) = nlevels_nod2D(node) -! Boundary layer depth + ! Boundary layer depth hbl(node) = ABS( zbar_3d_n( nlevels_nod2d(node),node ) ) END DO +!$OMP END PARALLEL DO +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax, coeff_sw, Rib_km1, zk, zkm1, sigma, zehat, & +!$OMP wm, ws, bvsq, Vtsq, Ritop, Rib_k, dzup, hekman, hmonob, hlimit) +!$OMP DO DO node=1, myDim_nod2D !+eDim_nod2D nzmin = ulevels_nod2D(node) nzmax = nlevels_nod2D(node) IF (use_sw_pene) THEN - !!PS coeff_sw = g * sw_alpha(1,node) ! @ the surface @ Z (m/s2/K) coeff_sw = g * sw_alpha(nzmin,node) ! @ the surface @ Z (m/s2/K) END IF Rib_km1 = 0.0_WP - !!PS nk = nlevels_nod2D(node) bfsfc(node) = Bo(node) - !!PS DO nz=2,nk DO nz=nzmin+1,nzmax - zk = ABS( zbar_3d_n(nz,node) ) + zk = ABS( zbar_3d_n(nz, node) ) zkm1 = ABS( zbar_3d_n(nz-1,node) ) ! bfsfc = Bo + sw contribution @@ -602,14 +595,17 @@ SUBROUTINE bldepth(partit, mesh) hbl(node) = MAX( hbl(node), ABS(zbar_3d_n(2,node)) ) END IF END DO +!$OMP END DO +!$OMP END PARALLEL if (smooth_hbl) then call exchange_nod(hbl, partit) call smooth_nod(hbl, 3, partit, mesh) end if +!$OMP BARRIER - DO node=1, myDim_nod2D !+eDim_nod2D - !!PS nk = nlevels_nod2D(node) +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax, dzup) + DO node=1, myDim_nod2D nzmax = nlevels_nod2D(node) nzmin = ulevels_nod2D(node) !----------------------------------------------------------------------- @@ -628,7 +624,7 @@ SUBROUTINE bldepth(partit, mesh) ! find stability and buoyancy forcing for final hbl values !----------------------------------------------------------------------- IF (use_sw_pene) THEN - ! Linear interpolation of sw_3d to depth of hbl + ! Linear interpolation of sw_3d to depth of hbl bfsfc(node) = Bo(node) + & coeff_sw * & ( sw_3d(nzmin,node) - & @@ -644,12 +640,10 @@ SUBROUTINE bldepth(partit, mesh) ! (if hbl is below (deeper than) the mid point of level kbl ! then caseA=0 else caseA=1) !----------------------------------------------------------------------- - -! nz=kbl(node) dzup = zbar_3d_n(kbl(node)-1,node) - zbar_3d_n(kbl(node),node) caseA(node) = 0.5_WP + SIGN( 0.5_WP, ABS( zbar_3d_n(kbl(node),node) ) - 0.5_WP * dzup - hbl(node) ) - - END DO + END DO +!$OMP END PARALLEL DO END SUBROUTINE bldepth @@ -757,7 +751,8 @@ subroutine ri_iwmix(viscA, diffK, dynamics, tracers, partit, mesh) #include "associate_mesh_ass.h" UVnode=>dynamics%uvnode(:,:,:) -! Compute Richardson number and store it as diffK to save memory + ! Compute Richardson number and store it as diffK to save memory +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax, dz_inv, shear, mr, ri_prev, tmp) DO node=1, myDim_nod2D! +eDim_nod2D nzmin = ulevels_nod2D(node) nzmax = nlevels_nod2D(node) @@ -765,92 +760,81 @@ subroutine ri_iwmix(viscA, diffK, dynamics, tracers, partit, mesh) DO nz=nzmin+1,nzmax-1 dz_inv = 1.0_WP / (Z_3d_n(nz-1,node)-Z_3d_n(nz,node)) ! > 0 shear = ( UVnode(1, nz-1, node) - UVnode(1, nz, node) )**2 + & - ( UVnode(2, nz-1, node) - UVnode(2, nz, node) )**2 + ( UVnode(2, nz-1, node) - UVnode(2, nz, node) )**2 shear = shear * dz_inv * dz_inv diffK(nz,node,1) = MAX( bvfreq(nz,node), 0.0_WP ) / (shear + epsln) ! To avoid NaNs at start END DO ! minimum Richardson number is 0 - ! ******************************************************************* ! No need to set Richardson number for the surface and bottom layers ! diffK @ zbar. Model do not use these levels !!!!!!! ! ******************************************************************* - - !!PS diffK(1,node,1)=diffK(2,node,1) - !!PS diffK(nlevels_nod2d(node),node,1)=diffK(nlevels_nod2d(node)-1,node,1) diffK(nzmin,node,1)=diffK(nzmin+1,node,1) diffK(nzmax,node,1)=diffK(nzmax-1,node,1) -! smooth Richardson number in the vertical using a 1-2-1 filter - !!PS IF(smooth_richardson_number .and. nlevels_nod2d(node)>2) then - IF(smooth_Ri_ver .and. nzmax>2) then - DO mr=1,num_smoothings + ! smooth Richardson number in the vertical using a 1-2-1 filter + IF(smooth_Ri_ver .and. nzmax > 2) then + DO mr=1, num_smoothings ri_prev = 0.25_WP * diffK(1, node, 1) - !!PS DO nz=2,nlevels_nod2d(node)-1 - DO nz=nzmin+1,nzmax-1 - tmp = diffK(nz,node,1) - diffK(nz,node,1) = ri_prev + 0.5_WP * diffK(nz,node,1) + 0.25_WP * diffK(nz+1,node,1) - ri_prev = 0.25_WP * tmp + DO nz=nzmin+1, nzmax-1 + tmp = diffK(nz,node,1) + diffK(nz,node,1) = ri_prev + 0.5_WP * diffK(nz,node,1) + 0.25_WP * diffK(nz+1,node,1) + ri_prev = 0.25_WP * tmp END DO END DO END IF END DO - - if (smooth_Ri_hor) then +!$OMP END PARALLEL DO + IF (smooth_Ri_hor) then call smooth_nod(diffK(:,:,1), 3, partit, mesh) - end if - + END IF +!$OMP BARRIER !___________________________________________________________________________ ! compute viscA and diffK - do node=1, myDim_nod2D !+eDim_nod2D - nzmin = ulevels_nod2D(node) - nzmax = nlevels_nod2D(node) - !!PS do nz=2,nlevels_nod2d(node)-1 - do nz=nzmin+1,nzmax-1 - !___________________________________________________________________ - ! evaluate function of Ri# for shear instability eqn. (28b&c) - Rigg = AMAX1( diffK(nz,node,1) , 0.0_WP) - ratio = AMIN1( Rigg/Riinfty , 1.0_WP ) - frit = (1.0_WP - ratio*ratio) - frit = frit*frit*frit - !___________________________________________________________________ - ! viscosity - viscA(nz,node) = visc_sh_limit * frit + A_ver ! A_ver= 1.e-4 Vertical harm. visc. +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax, Rigg, ratio, frit) + do node=1, myDim_nod2D + nzmin = ulevels_nod2D(node) + nzmax = nlevels_nod2D(node) + do nz=nzmin+1, nzmax-1 + !___________________________________________________________________ + ! evaluate function of Ri# for shear instability eqn. (28b&c) + Rigg = AMAX1( diffK(nz,node,1) , 0.0_WP) + ratio = AMIN1( Rigg/Riinfty , 1.0_WP ) + frit = (1.0_WP - ratio*ratio) + frit = frit*frit*frit + !___________________________________________________________________ + ! viscosity + viscA(nz,node) = visc_sh_limit * frit + A_ver ! A_ver= 1.e-4 Vertical harm. visc. - !___________________________________________________________________ - ! diffusivity - ! set constant background diffusivity with namelist value K_ver - if(Kv0_const) then - diffK(nz,node,1) = diff_sh_limit * frit + K_ver - - ! set latitudinal and depth dependent background diffusivity after Qiangs - ! FESOM1.4 approach - else - ! --> see in oce_ale_mixing_pp.F90 --> there are different - ! schemes of the vertical background diffusivity possible strongly - ! depending on purpos and tuning especially with arctic focus - call Kv0_background_qiang(Kv0_b,geo_coord_nod2D(2,node)/rad,abs(zbar_3d_n(nz,node))) - diffK(nz,node,1) = diff_sh_limit * frit + Kv0_b - end if - diffK(nz,node,2) = diffK(nz,node,1) - end do ! --> do nz=2,nlevels_nod2d(node)-1 + !___________________________________________________________________ + ! diffusivity + ! set constant background diffusivity with namelist value K_ver + if (Kv0_const) then + diffK(nz,node,1) = diff_sh_limit * frit + K_ver + ! set latitudinal and depth dependent background diffusivity after Qiangs + ! FESOM1.4 approach + else + ! --> see in oce_ale_mixing_pp.F90 --> there are different + ! schemes of the vertical background diffusivity possible strongly + ! depending on purpos and tuning especially with arctic focus + call Kv0_background_qiang(Kv0_b,geo_coord_nod2D(2,node)/rad,abs(zbar_3d_n(nz,node))) + diffK(nz,node,1) = diff_sh_limit * frit + Kv0_b + end if + diffK(nz,node,2) = diffK(nz,node,1) + end do ! --> do nz=2,nlevels_nod2d(node)-1 !_______________________________________________________________________ !!! No need to set surface and bottom diffusivity. diffK @ zbar !!! !!! Model do not use these levels !!!!!!! !!! - !!PS viscA( 1, node ) = viscA(2, node ) - !!PS diffK( 1, node, 1 ) = diffK(2, node, 1) - !!PS diffK( 1, node, 2 ) = diffK(2, node, 2) - viscA( nzmin, node ) = viscA(nzmin+1, node ) - diffK( nzmin, node, 1 ) = diffK(nzmin+1, node, 1) - diffK( nzmin, node, 2 ) = diffK(nzmin+1, node, 2) - !!PS viscA( nlevels_nod2d(node), node ) = viscA( nlevels_nod2d(node)-1, node ) - !!PS diffK( nlevels_nod2d(node), node, 1 ) = diffK( nlevels_nod2d(node)-1, node, 1 ) - !!PS diffK( nlevels_nod2d(node), node, 2 ) = diffK( nlevels_nod2d(node)-1, node, 2 ) + viscA( nzmin, node ) = viscA( nzmin+1, node ) + diffK( nzmin, node, 1 ) = diffK( nzmin+1, node, 1 ) + diffK( nzmin, node, 2 ) = diffK( nzmin+1, node, 2 ) + viscA( nzmax, node ) = viscA( nzmax-1, node ) diffK( nzmax, node, 1 ) = diffK( nzmax-1, node, 1 ) diffK( nzmax, node, 2 ) = diffK( nzmax-1, node, 2 ) end do !-->do node=1, myDim_nod2D+eDim_nod2D +!$OMP END PARALLEL DO end subroutine ri_iwmix !####################################################################### @@ -885,13 +869,11 @@ subroutine ddmix(diffK, tracers, partit, mesh) #include "associate_part_ass.h" #include "associate_mesh_ass.h" - DO node=1, myDim_nod2D!+eDim_nod2D +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax, alphaDT, betaDS, Rrho, diffdd, prandtl) + DO node=1, myDim_nod2D nzmin = ulevels_nod2D(node) nzmax = nlevels_nod2D(node) - !!PS DO nz=2,nlevels_nod2d(node)-1 DO nz=nzmin+1,nzmax-1 - - ! alphaDT and betaDS @Z alphaDT = sw_alpha(nz-1,node) * tracers%data(1)%values(nz-1,node) betaDS = sw_beta (nz-1,node) * tracers%data(2)%values(nz-1,node) @@ -904,9 +886,7 @@ subroutine ddmix(diffK, tracers, partit, mesh) ! ******************************************************************* Rrho = MIN(alphaDT / betaDS, Rrho0) - ! diffdd = dsfmax*(1.0-((Rrho-1)/(Rrho0-1))**2)**pexp2 ! (very old code) - ! diffdd = 1.0-((Rrho-1)/(Rrho0-1))**2 ! (less old code) - diffdd = 1.0_WP -( (Rrho-1.0_WP) / (Rrho0-1.0_WP) ) ! (new code) + diffdd = 1.0_WP -( (Rrho-1.0_WP) / (Rrho0-1.0_WP) ) diffdd = dsfmax * diffdd * diffdd * diffdd diffK(nz,node,1) = diffK(nz,node,1) + 0.7_WP * diffdd ! for temperature @@ -935,17 +915,12 @@ subroutine ddmix(diffK, tracers, partit, mesh) ! No need to set surface and bottom diffusivity. diffK @ zbar ! Model do not use these levels !!!!!!! ! ******************************************************************* - - !!PS diffK( 1, node, 1 ) = diffK( 2, node, 1 ) - !!PS diffK( 1, node, 2 ) = diffK( 2, node, 2 ) diffK( nzmin, node, 1 ) = diffK( nzmin+1, node, 1 ) diffK( nzmin, node, 2 ) = diffK( nzmin+1, node, 2 ) - !!PS diffK( nlevels_nod2d(node), node, 1 ) = diffK( nlevels_nod2d(node)-1, node, 1 ) - !!PS diffK( nlevels_nod2d(node), node, 2 ) = diffK( nlevels_nod2d(node)-1, node, 2 ) diffK( nzmax, node, 1 ) = diffK( nzmax-1, node, 1 ) diffK( nzmax, node, 2 ) = diffK( nzmax-1, node, 2 ) - END DO +!$OMP END PARALLEL DO end subroutine ddmix !####################################################################### @@ -975,14 +950,14 @@ subroutine blmix_kpp(viscA,diffK, partit, mesh) IMPLICIT NONE type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - integer :: node, nz, kn, elem, elnodes(3), knm1, knp1, nl1, nu1 + integer :: node, nz, kn, knm1, knp1, nl1, nu1 real(KIND=WP) :: delhat, R, dvdzup, dvdzdn real(KIND=WP) :: viscp, difsp, diftp, visch, difsh, difth, f1 real(KIND=WP) :: sig, a1, a2, a3, Gm, Gs, Gt real(KIND=WP) :: sigma, zehat, wm, ws real(KIND=WP) :: gat1m, gat1t, gat1s, dat1m, dat1s, dat1t - real(KIND=WP) :: dthick(mesh%nl), diff_col(mesh%nl,3), diff_colE(mesh%nl) + real(KIND=WP) :: dthick(mesh%nl), diff_col(mesh%nl,3) real(KIND=WP), dimension(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D ), intent(inout) :: viscA ! for momentum (nodes) real(KIND=WP), dimension(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D, 2 ), intent(inout) :: diffK ! for T and S @@ -992,30 +967,29 @@ subroutine blmix_kpp(viscA,diffK, partit, mesh) #include "associate_part_ass.h" #include "associate_mesh_ass.h" +!$OMP PARALLEL DO + DO node=1, myDim_nod2D+eDim_nod2D + blmc (:, n, :) = 0.0_WP + END DO +!$OMP END PARALLEL DO blmc = 0.0_WP - -! ******************************************************************* -! Kv over the NODE -! ******************************************************************* - DO node=1, myDim_nod2D !+eDim_nod2D +! ******************************************************************* +! Kv over the NODE +! ******************************************************************* +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(node, nz, kn, knm1, knp1, nl1, nu1, delhat, R, dvdzup, dvdzdn, viscp, difsp, diftp, visch, difsh, difth, f1, sig, & +!$OMP a1, a2, a3, Gm, Gs, Gt, sigma, zehat, wm, ws, gat1m, gat1t, gat1s, dat1m, dat1s, dat1t, dthick, diff_col) +!$OMP DO + DO node=1, myDim_nod2D nl1=nlevels_nod2d(node) nu1=ulevels_nod2d(node) - if(nl1<3) cycle ! a temporary solution + if(nl1 < 3) cycle ! a temporary solution if(nl1-nu1 < 2) cycle - ! level thickness - !!PS dthick(2:nl1-1)=0.5_WP*(ABS(zbar_3d_n(3:nl1,node))-ABS(zbar_3d_n(1:nl1-2,node))) - !!PS dthick(1)=dthick(2) - !!PS dthick(nu1+1:nl1-1)=0.5_WP*(ABS(zbar_3d_n(nu1+2:nl1,node))-ABS(zbar_3d_n(nu1:nl1-2,node))) - !!PS dthick(nu1)=dthick(nu1+1) - !!PS dthick(nl1)=dthick(nl1-1) dthick(nu1+1:nl1-1)=0.5_WP*(hnode(nu1:nl1-2,node)+hnode(nu1+1:nl1-1,node) ) dthick(nu1)=hnode(nu1,node)*0.5_WP dthick(nl1)=hnode(nl1-1,node)*0.5_WP - !!PS diff_col(1:nl1-1,1)=viscA(1:nl1-1,node) - !!PS diff_col(1:nl1-1,2:3)=diffK(1:nl1-1,node,:) diff_col(nu1:nl1-1,1)=viscA(nu1:nl1-1,node) diff_col(nu1:nl1-1,2:3)=diffK(nu1:nl1-1,node,:) diff_col(nl1,:)=diff_col(nl1-1,:) @@ -1030,8 +1004,7 @@ subroutine blmix_kpp(viscA,diffK, partit, mesh) kn = INT(caseA(node)+epsln) *(kbl(node) -1) + & (1-INT(caseA(node)+epsln)) * kbl(node) - kn = min(kn,nl1-1) - !!PS knm1 = MAX(kn-1,1) + kn = MIN(kn,nl1-1) knm1 = MAX(kn-1,nu1) knp1 = MIN(kn+1,nl1) @@ -1039,8 +1012,6 @@ subroutine blmix_kpp(viscA,diffK, partit, mesh) ! Find the interior viscosities and derivatives at hbl(i) ! eqn. (18) ! ******************************************************************* - -!!PS delhat = ABS(Z(kn))-hbl(node) delhat = ABS(Z_3d_n(kn,node))-hbl(node) R = 1.0_WP - delhat / dthick(kn) @@ -1087,8 +1058,6 @@ subroutine blmix_kpp(viscA,diffK, partit, mesh) ! ******************************************************************* ! Compute turbulent velocity scales on the interfaces ! ******************************************************************* - -!!PS sig = ABS(Z(nz)) / (hbl(node)+epsln) sig = ABS(Z_3d_n(nz,node)) / (hbl(node)+epsln) sigma = stable(node) * sig & + (1.0_WP - stable(node)) * AMIN1(sig, epsilon_kpp) @@ -1147,8 +1116,9 @@ subroutine blmix_kpp(viscA,diffK, partit, mesh) dkm1(node,1) = hbl(node) * wm * sig * (1.0_WP + sig * Gm) dkm1(node,2) = hbl(node) * ws * sig * (1.0_WP + sig * Gt) dkm1(node,3) = hbl(node) * ws * sig * (1.0_WP + sig * Gs) - END DO +!$OMP END DO +!$OMP END PARALLEL end subroutine blmix_kpp !####################################################################### @@ -1170,11 +1140,10 @@ end subroutine blmix_kpp ! subroutine enhance(viscA, diffK, partit, mesh) IMPLICIT NONE - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - real(KIND=WP), dimension(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D), intent(inout) :: viscA !for momentum (nodes) - real(kind=WP), dimension(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D,2), intent(inout) :: diffK !for T and S - + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + real(KIND=WP), dimension(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D), intent(inout) :: viscA !for momentum (nodes) + real(kind=WP), dimension(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D,2), intent(inout) :: diffK !for T and S integer :: nz, node, k real(kind=WP) :: delta, dkmp5, dstar @@ -1183,7 +1152,8 @@ subroutine enhance(viscA, diffK, partit, mesh) #include "associate_part_ass.h" #include "associate_mesh_ass.h" - DO node=1, myDim_nod2D !+eDim_nod2D +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(nz, node, k, delta, dkmp5, dstar) + DO node=1, myDim_nod2D k = kbl(node) - 1 delta = (hbl(node) + zbar_3d_n(k,node)) / (zbar_3d_n(k,node) - zbar_3d_n(k+1,node)) @@ -1211,6 +1181,6 @@ subroutine enhance(viscA, diffK, partit, mesh) ghats(k,node) = (1.0_WP-caseA(node)) * ghats(k,node) ! plot ghats END DO +!$OMP END PARALLEL DO end subroutine enhance - END MODULE o_mixing_KPP_mod diff --git a/src/oce_mo_conv.F90 b/src/oce_mo_conv.F90 index f8866f633..221301801 100644 --- a/src/oce_mo_conv.F90 +++ b/src/oce_mo_conv.F90 @@ -27,6 +27,7 @@ subroutine mo_convect(partit, mesh) ! Computes the mixing length derived from the Monin if (use_momix) then mo = 0._WP +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax) do node=1, myDim_nod2D+eDim_nod2D nzmax = nlevels_nod2d(node) nzmin = ulevels_nod2d(node) @@ -59,11 +60,12 @@ subroutine mo_convect(partit, mesh) end if end do end do +!$OMP END PARALLEL DO end if - ! !___________________________________________________________________________ ! apply mixing enhancements to vertical diffusivity +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax) do node=1, myDim_nod2D+eDim_nod2D nzmax = nlevels_nod2d(node) nzmin = ulevels_nod2d(node) @@ -79,11 +81,12 @@ subroutine mo_convect(partit, mesh) end do end do - +!$OMP END PARALLEL DO ! !___________________________________________________________________________ ! apply mixing enhancements to vertical viscosity ! elem2D_nodes has no dimension until +eDim_elem2D +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(elem, elnodes, nz, nzmin, nzmax) do elem=1, myDim_elem2D elnodes=elem2D_nodes(:,elem) nzmax = nlevels(elem) @@ -104,7 +107,7 @@ subroutine mo_convect(partit, mesh) if (use_windmix .and. nz<=windmix_nl+1) Av(nz,elem)=max(Av(nz,elem), windmix_kv) end do end do - !!PS call exchange_elem(Av) +!$OMP END PARALLEL DO end subroutine mo_convect ! ! From 235b3a8df72fbdc813c63970f13962bf3060a201 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Fri, 12 Nov 2021 19:03:39 +0100 Subject: [PATCH 563/909] slow down in OMP was due to mooth_blmc -> smooth_nod. Fixed! --- src/gen_support.F90 | 85 +++++++++++++++++++++++++++++---------------- 1 file changed, 55 insertions(+), 30 deletions(-) diff --git a/src/gen_support.F90 b/src/gen_support.F90 index 9c0f11681..4322df76c 100644 --- a/src/gen_support.F90 +++ b/src/gen_support.F90 @@ -60,6 +60,7 @@ subroutine smooth_nod2D(arr, N, partit, mesh) allocate(work_array(myDim_nod2D)) DO q=1, N !apply mass matrix N times to smooth the field +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, elem, j, q, elnodes, vol) DO node=1, myDim_nod2D vol=0._WP work_array(node)=0._WP @@ -68,13 +69,20 @@ subroutine smooth_nod2D(arr, N, partit, mesh) elnodes=elem2D_nodes(:,elem) work_array(node)=work_array(node)+sum(arr(elnodes))/3._WP*elem_area(elem) vol=vol+elem_area(elem) - END DO - work_array(node)=work_array(node)/vol + END DO + work_array(node)=work_array(node)/vol END DO +!$OMP END PARALLEL DO + +!$OMP PARALLEL DO DO node=1,myDim_nod2D arr(node)=work_array(node) ENDDO +!$OMP END PARALLEL DO +!$OMP MASTER call exchange_nod(arr, partit) +!$OMP END MASTER +!$OMP BARRIER END DO deallocate(work_array) end subroutine smooth_nod2D @@ -104,20 +112,17 @@ subroutine smooth_nod3D(arr, N_smooth, partit, mesh) ! Precompute area of patches on all levels (at the bottom, some neighbouring ! nodes may vanish in the bathymetry) in the first smoothing step +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, el, nz, j, q, num_el, nlev, nl_loc, nu_loc, uln, nln, ule, nle) +!$OMP DO DO n=1, myDim_nod2D uln = ulevels_nod2d(n) nln = min(nlev,nlevels_nod2d(n)) vol( 1:nln,n) = 0._WP work_array(1:nln,n) = 0._WP - !!PS vol( 1:min(nlev, nlevels_nod2d(n)),n) = 0._WP - !!PS work_array(1:min(nlev, nlevels_nod2d(n)),n) = 0._WP DO j=1, nod_in_elem2D_num(n) el = nod_in_elem2D(j,n) -!!PS nl_loc = min(nlev, minval(nlevels_nod2d(elem2D_nodes(1:3,el)))) -!!PS nu_loc = maxval(ulevels_nod2D(elem2D_nodes(1:3,el))) ule = max( uln, ulevels(el) ) nle = min( nln, min(nlev,nlevels(el)) ) - !!PS DO nz=1, nl_loc DO nz=ule, nle vol(nz,n) = vol(nz,n) + elem_area(el) work_array(nz,n) = work_array(nz,n) + elem_area(el) * (arr(nz, elem2D_nodes(1,el)) & @@ -125,46 +130,34 @@ subroutine smooth_nod3D(arr, N_smooth, partit, mesh) + arr(nz, elem2D_nodes(3,el))) END DO ENDDO - !!PS DO nz=1,nlevels_nod2d(n) DO nz=uln,nln vol(nz,n) = 1._WP / (3._WP * vol(nz,n)) ! Here, we need the inverse and scale by 1/3 END DO END DO - +!$OMP END DO ! combined: scale by patch volume + copy back to original field +!$OMP DO DO n=1, myDim_nod2D - !!PS DO nz=1, min(nlev, nlevels_nod2d(n)) uln = ulevels_nod2d(n) nln = min(nlev,nlevels_nod2d(n)) DO nz=uln,nln arr(nz, n) = work_array(nz, n) *vol(nz,n) -!!PS if (arr(nz,n)/=arr(nz,n)) then -!!PS write(*,*) ' --> found NaN in smoothing' -!!PS write(*,*) ' mype = ', mype -!!PS write(*,*) ' n = ', n -!!PS write(*,*) ' nz,uln,nln = ', nz,uln,nln -!!PS write(*,*) ' arr(nz,n) = ', arr(nz,n) -!!PS write(*,*) ' work_array(nz,n)= ', work_array(nz,n) -!!PS write(*,*) ' vol(nz,n) = ', vol(nz,n) -!!PS endif END DO - end DO - + END DO +!$OMP END DO +!$OMP MASTER call exchange_nod(arr, partit) - +!$OMP END MASTER +!$OMP BARRIER ! And the remaining smoothing sweeps - DO q=1,N_smooth-1 +!$OMP DO DO n=1, myDim_nod2D uln = ulevels_nod2d(n) nln = min(nlev,nlevels_nod2d(n)) - !!PS work_array(1:min(nlev, nlevels_nod2d(n)),n) = 0._WP work_array(1:nln,n) = 0._WP DO j=1,nod_in_elem2D_num(n) el = nod_in_elem2D(j,n) - !!PS nl_loc = min(nlev, minval(nlevels_nod2d(elem2D_nodes(1:3,el)))) - !!PS nu_loc = maxval(ulevels_nod2D(elem2D_nodes(1:3,el))) - !!PS DO nz=1, ule = max( uln, ulevels(el) ) nle = min( nln, min(nlev,nlevels(el)) ) DO nz=ule,nle @@ -174,7 +167,9 @@ subroutine smooth_nod3D(arr, N_smooth, partit, mesh) END DO ENDDO ENDDO +!$OMP END DO ! combined: scale by patch volume + copy back to original field +!$OMP DO DO n=1, myDim_nod2D !!PS DO nz=1, min(nlev, nlevels_nod2d(n)) uln = ulevels_nod2d(n) @@ -182,10 +177,14 @@ subroutine smooth_nod3D(arr, N_smooth, partit, mesh) DO nz=uln,nln arr(nz, n) = work_array(nz, n) *vol(nz,n) END DO - end DO + END DO +!$OMP END DO +!$OMP MASTER call exchange_nod(arr, partit) +!$OMP END MASTER +!$OMP BARRIER enddo - +!$OMP END PARALLEL deallocate(work_array) end subroutine smooth_nod3D @@ -206,6 +205,8 @@ subroutine smooth_elem2D(arr, N, partit, mesh) #include "associate_mesh_ass.h" allocate(work_array(myDim_nod2D+eDim_nod2D)) DO q=1, N !apply mass matrix N times to smooth the field +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(node, elem, j, q, elnodes, vol) +!$OMP DO DO node=1, myDim_nod2D vol=0._WP work_array(node)=0._WP @@ -217,12 +218,22 @@ subroutine smooth_elem2D(arr, N, partit, mesh) END DO work_array(node)=work_array(node)/vol END DO +!$OMP END DO +!$OMP MASTER call exchange_nod(work_array, partit) +!$OMP END MASTER +!$OMP BARRIER +!$OMP DO DO elem=1, myDim_elem2D elnodes=elem2D_nodes(:, elem) arr(elem)=sum(work_array(elnodes))/3.0_WP ! Here, we need the inverse and scale by 1/3 ENDDO +!$OMP END DO +!$OMP MASTER call exchange_elem(arr, partit) +!$OMP END MASTER +!$OMP BARRIER +!$OMP END PARALLEL END DO deallocate(work_array) end subroutine smooth_elem2D @@ -247,7 +258,13 @@ subroutine smooth_elem3D(arr, N, partit, mesh) my_nl=ubound(arr,1) DO q=1, N !apply mass matrix N times to smooth the field DO nz=1, my_nl - work_array = 0.0_WP +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(node, elem, j, q, elnodes, vol) +!$OMP DO + DO node=1, myDim_nod2D+eDim_nod2D + work_array(node) = 0.0_WP + END DO +!$OMP END DO +!$OMP DO DO node=1, myDim_nod2D vol=0._WP if (nz > nlevels_nod2d(node)) CYCLE @@ -263,15 +280,23 @@ subroutine smooth_elem3D(arr, N, partit, mesh) END DO work_array(node)=work_array(node)/vol END DO +!$OMP END DO +!$OMP MASTER call exchange_nod(work_array, partit) +!$OMP END MASTER +!$OMP BARRIER +!$OMP DO DO elem=1, myDim_elem2D if (nz>nlevels(elem) ) CYCLE if (nz Date: Fri, 12 Nov 2021 18:39:30 +0000 Subject: [PATCH 564/909] Some bug fixes relating to the introduction of dynamics derived type. For example, the oce_fluxes under cavities are U-dependent, UV is now in dynamics, and the interface definition was not consistent with the subroutine definitions. --- src/cavity_param.F90 | 4 +++- src/fvom.F90 | 2 +- src/ice_oce_coupling.F90 | 14 +++++++++----- src/ifs_interface/ifs_interface.F90 | 6 +++--- src/oce_setup_step.F90 | 2 +- 5 files changed, 17 insertions(+), 11 deletions(-) diff --git a/src/cavity_param.F90 b/src/cavity_param.F90 index 35ed3bdf5..cb2ec329c 100644 --- a/src/cavity_param.F90 +++ b/src/cavity_param.F90 @@ -1,13 +1,15 @@ module cavity_heat_water_fluxes_3eq_interface interface - subroutine cavity_heat_water_fluxes_3eq(tracers, partit, mesh) + subroutine cavity_heat_water_fluxes_3eq(dynamics, tracers, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP + use MOD_DYN use mod_tracer type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh type(t_tracer), intent(in), target :: tracers + type(t_dyn), intent(in), target :: dynamics end subroutine end interface end module diff --git a/src/fvom.F90 b/src/fvom.F90 index 82752d507..c18561297 100755 --- a/src/fvom.F90 +++ b/src/fvom.F90 @@ -342,7 +342,7 @@ subroutine fesom_runloop(current_nsteps) !___compute fluxes to the ocean: heat, freshwater, momentum_________ if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call oce_fluxes_mom...'//achar(27)//'[0m' call oce_fluxes_mom(f%dynamics, f%partit, f%mesh) ! momentum only - call oce_fluxes(f%tracers, f%partit, f%mesh) + call oce_fluxes(f%dynamics, f%tracers, f%partit, f%mesh) end if call before_oce_step(f%dynamics, f%tracers, f%partit, f%mesh) ! prepare the things if required f%t2 = MPI_Wtime() diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index 9bbff8834..ce4fd3eb9 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -7,7 +7,7 @@ subroutine ocean2ice(dynamics, tracers, partit, mesh) use mod_tracer use MOD_DYN type(t_dyn) , intent(in) , target :: dynamics - type(t_tracer), intent(in) , target :: tracers + type(t_tracer), intent(inout), target :: tracers type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh @@ -17,14 +17,16 @@ subroutine ocean2ice(dynamics, tracers, partit, mesh) module oce_fluxes_interface interface - subroutine oce_fluxes(tracers, partit, mesh) + subroutine oce_fluxes(dynamics, tracers, partit, mesh) use mod_mesh USE MOD_PARTIT + use MOD_DYN USE MOD_PARSUP use mod_tracer type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh - type(t_tracer), intent(in) , target :: tracers + type(t_tracer), intent(inout), target :: tracers + type(t_dyn) , intent(in) , target :: dynamics end subroutine end interface end module @@ -201,9 +203,10 @@ end subroutine ocean2ice ! ! !_______________________________________________________________________________ -subroutine oce_fluxes(tracers, partit, mesh) +subroutine oce_fluxes(dynamics, tracers, partit, mesh) use MOD_MESH + use MOD_DYN use MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP @@ -225,6 +228,7 @@ subroutine oce_fluxes(tracers, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh type(t_tracer), intent(inout), target :: tracers + type(t_dyn), intent(in), target :: dynamics integer :: n, elem, elnodes(3),n1 real(kind=WP) :: rsss, net real(kind=WP), allocatable :: flux(:) @@ -276,7 +280,7 @@ subroutine oce_fluxes(tracers, partit, mesh) water_flux = -fresh_wa_flux #endif heat_flux_in=heat_flux ! sw_pene will change the heat_flux - if (use_cavity) call cavity_heat_water_fluxes_3eq(tracers, partit, mesh) + if (use_cavity) call cavity_heat_water_fluxes_3eq(dynamics, tracers, partit, mesh) !!PS if (use_cavity) call cavity_heat_water_fluxes_2eq(mesh) !!PS where(ulevels_nod2D>1) heat_flux=0.0_WP diff --git a/src/ifs_interface/ifs_interface.F90 b/src/ifs_interface/ifs_interface.F90 index 8ad96b3df..e1f40a06f 100644 --- a/src/ifs_interface/ifs_interface.F90 +++ b/src/ifs_interface/ifs_interface.F90 @@ -348,7 +348,7 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & USE par_kind ! in ifs_modules.F90 USE fesom_main_storage_module, only: fesom => f - USE o_ARRAYS, ONLY : UV ! tr_arr is now tracers + !USE o_ARRAYS, ONLY : UV ! tr_arr is now tracers, UV in dynamics derived type USE i_arrays, ONLY : m_ice, a_ice, m_snow USE i_therm_param, ONLY : tmelt USE g_rotate_grid, only: vector_r2g @@ -463,8 +463,8 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & ! Surface currents need to be rotated to geographical grid ! Pack u(v) surface currents - zsendU(:)=UV(1,1,1:myDim_elem2D) - zsendV(:)=UV(2,1,1:myDim_elem2D) !UV includes eDim, leave those away here + zsendU(:)=fesom%dynamics%UV(1,1,1:myDim_elem2D) + zsendV(:)=fesom%dynamics%UV(2,1,1:myDim_elem2D) !UV includes eDim, leave those away here do elem=1, myDim_elem2D diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index c54745e87..36f18b0eb 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -51,7 +51,7 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracers type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh) , intent(inout) , target :: mesh end subroutine end interface end module From 9200f16a699d53228b05d0a186789f4a4e49fc2b Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Sat, 13 Nov 2021 11:08:40 +0000 Subject: [PATCH 565/909] add automatic test for successful compilation of FESOM as a library (with ifs_interface) --- .gitignore | 2 ++ test/ifs_interface/configure_lib.sh | 28 ++++++++++++++++++++++++++++ test/run_tests.sh | 17 ++++++++++++++++- 3 files changed, 46 insertions(+), 1 deletion(-) create mode 100755 test/ifs_interface/configure_lib.sh diff --git a/.gitignore b/.gitignore index f68261b2e..7ad900ed3 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ /build +/build.lib *.o *.mod *.x @@ -6,5 +7,6 @@ *~ *.swp src/icepack_drivers/Icepack +lib/libfesom.a /work_* Makefile.in diff --git a/test/ifs_interface/configure_lib.sh b/test/ifs_interface/configure_lib.sh new file mode 100755 index 000000000..cf1609bcb --- /dev/null +++ b/test/ifs_interface/configure_lib.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# custom build script in use at ECMWF + +set -e + +LIB=no +while getopts "l" OPT +do + case "$OPT" in + l) LIB=yes;; + esac +done +shift $((OPTIND-1)) + +#cd ../../ +source env.sh ubuntu # source this from your run script too + +if [[ ${LIB} = yes ]]; then + mkdir build.lib || true # build dir for library + cd build.lib + cmake -DBUILD_FESOM_AS_LIBRARY=ON .. # not required when re-compiling +else + mkdir build || true # build dir for binary + cd build + cmake .. # not required when re-compiling +fi +make install -j`nproc --all` diff --git a/test/run_tests.sh b/test/run_tests.sh index 78f72fbaa..8ab8ee7b7 100755 --- a/test/run_tests.sh +++ b/test/run_tests.sh @@ -3,7 +3,8 @@ set -e cd ../ machine="docker" -tests="test_pi test_souf test_pi_linfs test_pi_zstar test_pi_partial test_pi_floatice test_pi_visc7 test_pi_zstar" +tests="test_pi" +#tests="test_pi test_souf test_pi_linfs test_pi_zstar test_pi_partial test_pi_floatice test_pi_visc7 test_pi_zstar" ./configure.sh ubuntu @@ -20,3 +21,17 @@ echo $test done +othertest="test_lib_compiles" + +for test in $othertest; do + + echo $othertest + ./test/ifs_interface/configure_lib.sh -l + + FILE=./lib/libfesom.a + if [ -f "$FILE" ]; then + echo "$FILE compiled and linked." + else + echo "$FILE does not exist." + fi +done From a065d46374426d89eb426ada6ca539f0cdca3bc5 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Sat, 13 Nov 2021 12:27:34 +0100 Subject: [PATCH 566/909] accidentially commented the other tests --- test/run_tests.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/test/run_tests.sh b/test/run_tests.sh index 8ab8ee7b7..347bb5002 100755 --- a/test/run_tests.sh +++ b/test/run_tests.sh @@ -3,8 +3,7 @@ set -e cd ../ machine="docker" -tests="test_pi" -#tests="test_pi test_souf test_pi_linfs test_pi_zstar test_pi_partial test_pi_floatice test_pi_visc7 test_pi_zstar" +tests="test_pi test_souf test_pi_linfs test_pi_zstar test_pi_partial test_pi_floatice test_pi_visc7 test_pi_zstar" ./configure.sh ubuntu From 5d83f770b9fe5e287a7db055a103319b9483af6c Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Sat, 13 Nov 2021 22:41:10 +0100 Subject: [PATCH 567/909] add test case in fesom2.1.yml for compilation as library (#231) * add test case in fesom2.1.yml for compilation as library --- .github/workflows/fesom2.1.yml | 11 +++++++++-- test/ifs_interface/check_exist.sh | 12 ++++++++++++ 2 files changed, 21 insertions(+), 2 deletions(-) create mode 100755 test/ifs_interface/check_exist.sh diff --git a/.github/workflows/fesom2.1.yml b/.github/workflows/fesom2.1.yml index abcabbe97..b95d274f5 100644 --- a/.github/workflows/fesom2.1.yml +++ b/.github/workflows/fesom2.1.yml @@ -19,10 +19,17 @@ jobs: # NK: this changes working directory to fesom2 - uses: actions/checkout@v2 - - name: Compile model + - name: Compile model (binary) run: | bash -l configure.sh ubuntu - + + - name: Compile model (library) + run: | + bash ./test/ifs_interface/configure_lib.sh -l + - name: Library exists + run: | + bash ./test/ifs_interface/check_exist.sh + - name: Create global test run run: | mkrun pi test_pi -m docker diff --git a/test/ifs_interface/check_exist.sh b/test/ifs_interface/check_exist.sh new file mode 100755 index 000000000..e89a674e1 --- /dev/null +++ b/test/ifs_interface/check_exist.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e + +FILE=./lib/libfesom.a +if [ -f "$FILE" ]; then + echo "$FILE compiled and linked." + exit 0 +else + echo "$FILE does not exist." + exit 1 +fi From 3aed3cb68506fcfc88b212ea0105f5369bdbfe7a Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Mon, 15 Nov 2021 11:52:42 +0100 Subject: [PATCH 568/909] Update fesom2_openmp.yml Don't check results. --- .github/workflows/fesom2_openmp.yml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/.github/workflows/fesom2_openmp.yml b/.github/workflows/fesom2_openmp.yml index 4184b3310..fd1e8cafa 100644 --- a/.github/workflows/fesom2_openmp.yml +++ b/.github/workflows/fesom2_openmp.yml @@ -42,10 +42,7 @@ jobs: chmod +x job_docker_new ./job_docker_new - - name: Check global results with large tollerance - run: | - cd work_pi - fcheck -a 1e-9 . + From 33c78e9ff24be826c407efdc092c24d9bc21ebd7 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Mon, 15 Nov 2021 11:21:19 +0000 Subject: [PATCH 569/909] add custom configure script that is used in automatic test on fesom github in test/ifs_interface --- configure_any.sh | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100755 configure_any.sh diff --git a/configure_any.sh b/configure_any.sh new file mode 100755 index 000000000..7c63dae66 --- /dev/null +++ b/configure_any.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# custom build script in use at ECMWF + +set -e + +LIB=no +while getopts "l" OPT +do + case "$OPT" in + l) LIB=yes;; + esac +done +shift $((OPTIND-1)) + +source env.sh # source this from your run script too + +if [[ ${LIB} = yes ]]; then + mkdir build.lib || true # build dir for library + cd build.lib + cmake -DBUILD_FESOM_AS_LIBRARY=ON .. # not required when re-compiling + sed -i -e 's/-lFALSE//g' src/CMakeFiles/fesom.dir/link.txt # workaround for the moment on cray +else + mkdir build || true # build dir for binary + cd build + cmake .. # not required when re-compiling +fi +make install -j`nproc --all` From f61af680f33fb27a1ec2fbfd038d8b5dc707a034 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 15 Nov 2021 12:41:45 +0100 Subject: [PATCH 570/909] $OMP BARRIER after exchange --- src/oce_adv_tra_driver.F90 | 1 + src/oce_adv_tra_fct.F90 | 6 +----- src/oce_ale_pressure_bv.F90 | 3 +++ src/oce_ale_tracer.F90 | 1 + 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/src/oce_adv_tra_driver.F90 b/src/oce_adv_tra_driver.F90 index be59aea1f..87336224a 100644 --- a/src/oce_adv_tra_driver.F90 +++ b/src/oce_adv_tra_driver.F90 @@ -178,6 +178,7 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, dynamics, tracers, partit, call adv_tra_ver_upw1(w, ttf, partit, mesh, adv_flux_ver, o_init_zero=.true.) end if call exchange_nod(fct_LO, partit) +!$OMP BARRIER end if do_zero_flux=.true. if (trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') do_zero_flux=.false. diff --git a/src/oce_adv_tra_fct.F90 b/src/oce_adv_tra_fct.F90 index 3b176d1ab..6ee03a739 100644 --- a/src/oce_adv_tra_fct.F90 +++ b/src/oce_adv_tra_fct.F90 @@ -263,7 +263,7 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, !$OMP MASTER call exchange_nod(fct_plus, fct_minus, partit) !$OMP END MASTER -!!$OMP BARRIER +!$OMP BARRIER !___________________________________________________________________________ ! b3. Limiting !Vertical @@ -299,10 +299,6 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, ! the bottom flux is always zero end do !$OMP END DO -!!$OMP MASTER -! call exchange_nod_end(partit) ! fct_plus, fct_minus -!!$OMP END MASTER -!!$OMP BARRIER !Horizontal !$OMP DO do edge=1, myDim_edge2D diff --git a/src/oce_ale_pressure_bv.F90 b/src/oce_ale_pressure_bv.F90 index 39d25a18c..74a0d82fb 100644 --- a/src/oce_ale_pressure_bv.F90 +++ b/src/oce_ale_pressure_bv.F90 @@ -3068,6 +3068,7 @@ subroutine sw_alpha_beta(TF1,SF1, partit, mesh) !$OMP END PARALLEL call exchange_nod(sw_alpha, partit) call exchange_nod(sw_beta, partit) +!$OMP BARRIER end subroutine sw_alpha_beta ! ! @@ -3155,6 +3156,7 @@ subroutine compute_sigma_xy(TF1,SF1, partit, mesh) !$OMP END DO !$OMP END PARALLEL call exchange_nod(sigma_xy, partit) +!$OMP BARRIER end subroutine compute_sigma_xy ! ! @@ -3206,6 +3208,7 @@ subroutine compute_neutral_slope(partit, mesh) !$OMP END PARALLEL call exchange_nod(neutral_slope, partit) call exchange_nod(slope_tapered, partit) +!$OMP BARRIER end subroutine compute_neutral_slope ! ! diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 5bca9cd74..83c57d42e 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -228,6 +228,7 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) call relax_to_clim(tr_num, tracers, partit, mesh) end if call exchange_nod(tracers%data(tr_num)%values(:,:), partit) +!$OMP BARRIER end do !___________________________________________________________________________ From 080277f9ab95ae3b7e7acd4274dd2c3b70abfff3 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 15 Nov 2021 13:26:19 +0100 Subject: [PATCH 571/909] more OMP --- src/oce_ale.F90 | 7 ++++- src/oce_ale_vel_rhs.F90 | 67 ++++++++++++++++++++++++++--------------- 2 files changed, 49 insertions(+), 25 deletions(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 24767c543..c8ec718b7 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2844,7 +2844,12 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) if (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call oce_mixing_KPP'//achar(27)//'[0m' call oce_mixing_KPP(Av, Kv_double, dynamics, tracers, partit, mesh) - Kv=Kv_double(:,:,1) +!$OMP PARALLEL DO + DO node=1, myDim_nod2D+eDim_nod2D + Kv(:, node)=Kv_double(:, node, 1) + END DO +!$OMP END PARALLEL DO + call mo_convect(partit, mesh) ! use FESOM2.0 tuned pacanowski & philander parameterization for vertical diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index 5bed6b618..87679efc5 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -56,7 +56,6 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) real(kind=WP) :: ff, mm real(kind=WP) :: Fx, Fy, pre(3) logical, save :: lfirst=.true. - real(kind=WP) :: t1, t2, t3, t4 real(kind=WP) :: p_ice(3), p_air(3), p_eta(3) integer :: use_pice !___________________________________________________________________________ @@ -73,10 +72,10 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) eta_n =>dynamics%eta_n(:) !___________________________________________________________________________ - t1=MPI_Wtime() use_pice=0 if (use_floatice .and. .not. trim(which_ale)=='linfs') use_pice=1 - + +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(elem, nz, nzmin, nzmax, elnodes, ff, mm, Fx, Fy, pre, p_ice, p_air, p_eta) do elem=1, myDim_elem2D nzmax = nlevels(elem) nzmin = ulevels(elem) @@ -132,7 +131,7 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) Fx = sum(gradient_sca(1:3,elem)*pre) Fy = sum(gradient_sca(4:6,elem)*pre) - !!PS do nz=1,nlevels(elem)-1 + do nz=nzmin,nzmax-1 ! add pressure gradient terms UV_rhs(1,nz,elem) = UV_rhs(1,nz,elem) + (Fx-pgf_x(nz,elem))*elem_area(elem) @@ -143,8 +142,7 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) UV_rhsAB(2,nz,elem) =-UV(1,nz,elem)*ff! - mm*UV(1,nz,elem)*UV(2,nz,elem) end do end do - - t2=MPI_Wtime() +!$OMP END PARALLEL DO !___________________________________________________________________________ ! advection if (dynamics%momadv_opt==1) then @@ -153,8 +151,6 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) elseif (dynamics%momadv_opt==2) then call momentum_adv_scalar(dynamics, partit, mesh) end if - t3=MPI_Wtime() - !___________________________________________________________________________ ! Update the rhs ff=(1.5_WP+epsilon) @@ -162,26 +158,19 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) ff=1.0_WP lfirst=.false. end if - +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(elem, nz, nzmin, nzmax) do elem=1, myDim_elem2D nzmax = nlevels(elem) nzmin = ulevels(elem) - !!PS do nz=1,nlevels(elem)-1 do nz=nzmin,nzmax-1 UV_rhs(1,nz,elem)=dt*(UV_rhs(1,nz,elem)+UV_rhsAB(1,nz,elem)*ff)/elem_area(elem) UV_rhs(2,nz,elem)=dt*(UV_rhs(2,nz,elem)+UV_rhsAB(2,nz,elem)*ff)/elem_area(elem) end do end do +!$OMP END PARALLEL DO ! ======================= ! U_rhs contains all contributions to velocity from old time steps ! ======================= - t4=MPI_Wtime() - ! if (mod(mstep,logfile_outfreq)==0 .and. mype==0) then - ! write(*,*) 'Momentum: ', t4-t1 - ! write(*,*) 'pres., Cor: ', t2-t1 - ! write(*,*) 'h adv ', t3-t2 - ! write(*,*) 'vert. part ', t4-t3 - ! end if END SUBROUTINE compute_vel_rhs ! @@ -219,6 +208,8 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) !___________________________________________________________________________ ! 1st. compute vertical momentum advection component: w * du/dz, w*dv/dz +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, el1, el2, nl1, nl2, ul1, ul2, nod, el, ed, k, nle, ule, un1, un2, wu, wv) +!$OMP DO do n=1,myDim_nod2d nl1 = nlevels_nod2D(n)-1 ul1 = ulevels_nod2D(n) @@ -259,7 +250,6 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) !_______________________________________________________________________ ! compute w*du/dz, w*dv/dz do nz=ul1,nl1 -!!PS if (ul1>1) write(*,*) mype, wu(ul1:nl1) ! Here 1/3 because 1/3 of the area is related to the node --> comes from ! averaging the elemental velocities UVnode_rhs(1,nz,n) = - (wu(nz) - wu(nz+1) ) / (3._WP*hnode(nz,n)) @@ -272,11 +262,12 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) UVnode_rhs(1:2,nl1+1:nl-1,n) = 0._WP UVnode_rhs(1:2,1:ul1-1 ,n) = 0._WP end do - +!$OMP END DO !___________________________________________________________________________ ! 2nd. compute horizontal advection component: u*du/dx, u*dv/dx & v*du/dy, v*dv/dy ! loop over triangle edges +!$OMP DO do ed=1, myDim_edge2D nod = edges(:,ed) el1 = edge_tri(1,ed) @@ -328,8 +319,14 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) if (nod(1) <= myDim_nod2d) then do nz=min(ul1,ul2), max(nl1,nl2) ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) +#if defined(_OPENMP) + call omp_set_lock(partit%plock(nod(1))) +#endif UVnode_rhs(1,nz,nod(1)) = UVnode_rhs(1,nz,nod(1)) + un1(nz)*UV(1,nz,el1) + un2(nz)*UV(1,nz,el2) UVnode_rhs(2,nz,nod(1)) = UVnode_rhs(2,nz,nod(1)) + un1(nz)*UV(2,nz,el1) + un2(nz)*UV(2,nz,el2) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(nod(1))) +#endif end do endif @@ -337,8 +334,14 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) if (nod(2) <= myDim_nod2d) then do nz=min(ul1,ul2), max(nl1,nl2) ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) +#if defined(_OPENMP) + call omp_set_lock(partit%plock(nod(2))) +#endif UVnode_rhs(1,nz,nod(2)) = UVnode_rhs(1,nz,nod(2)) - un1(nz)*UV(1,nz,el1) - un2(nz)*UV(1,nz,el2) UVnode_rhs(2,nz,nod(2)) = UVnode_rhs(2,nz,nod(2)) - un1(nz)*UV(2,nz,el1) - un2(nz)*UV(2,nz,el2) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(nod(2))) +#endif end do endif @@ -347,39 +350,53 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) if (nod(1) <= myDim_nod2d) then do nz=ul1, nl1 ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) +#if defined(_OPENMP) + call omp_set_lock(partit%plock(nod(1))) +#endif UVnode_rhs(1,nz,nod(1)) = UVnode_rhs(1,nz,nod(1)) + un1(nz)*UV(1,nz,el1) UVnode_rhs(2,nz,nod(1)) = UVnode_rhs(2,nz,nod(1)) + un1(nz)*UV(2,nz,el1) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(nod(1))) +#endif end do ! --> do nz=ul1, nl1 endif ! second edge node if (nod(2) <= myDim_nod2d) then - !!PS do nz=1, nl1 do nz=ul1, nl1 ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) +#if defined(_OPENMP) + call omp_set_lock(partit%plock(nod(2))) +#endif UVnode_rhs(1,nz,nod(2)) = UVnode_rhs(1,nz,nod(2)) - un1(nz)*UV(1,nz,el1) UVnode_rhs(2,nz,nod(2)) = UVnode_rhs(2,nz,nod(2)) - un1(nz)*UV(2,nz,el1) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(nod(2))) +#endif end do ! --> do nz=ul1, nl1 endif endif ! --> if (el2>0) then end do ! --> do ed=1, myDim_edge2D +!$OMP END DO !___________________________________________________________________________ ! divide total nodal advection by scalar area +!$OMP DO do n=1,myDim_nod2d nl1 = nlevels_nod2D(n)-1 ul1 = ulevels_nod2D(n) -!!PS UVnode_rhs(1,ul1:nl1,n) = UVnode_rhs(1,ul1:nl1,n) *area_inv(ul1:nl1,n) ! --> TEST_cavity -!!PS UVnode_rhs(2,ul1:nl1,n) = UVnode_rhs(2,ul1:nl1,n) *area_inv(ul1:nl1,n) ! --> TEST_cavity UVnode_rhs(1,ul1:nl1,n) = UVnode_rhs(1,ul1:nl1,n) *areasvol_inv(ul1:nl1,n) UVnode_rhs(2,ul1:nl1,n) = UVnode_rhs(2,ul1:nl1,n) *areasvol_inv(ul1:nl1,n) end do !-->do n=1,myDim_nod2d - +!$OMP END DO !___________________________________________________________________________ +!$OMP MASTER call exchange_nod(UVnode_rhs, partit) - +!$OMP END MASTER +!$OMP BARRIER !___________________________________________________________________________ ! convert total nodal advection from vertice --> elements +!$OMP DO do el=1, myDim_elem2D nl1 = nlevels(el)-1 ul1 = ulevels(el) @@ -389,6 +406,8 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) + UVnode_rhs(1:2,ul1:nl1,elem2D_nodes(3,el))) / 3.0_WP end do ! --> do el=1, myDim_elem2D +!$OMP END DO +!$OMP END PARALLEL end subroutine momentum_adv_scalar From 6c434bc81261e00a2667760ec0ed1317aa9c003f Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 15 Nov 2021 13:47:30 +0100 Subject: [PATCH 572/909] OMP for backstatter --- src/oce_dyn.F90 | 45 +++++++++++++++++++++++++++++++++++++-------- 1 file changed, 37 insertions(+), 8 deletions(-) diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 5ff34e70e..5985e814e 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -71,7 +71,7 @@ SUBROUTINE update_vel(dynamics, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh !___________________________________________________________________________ - integer :: elem, elnodes(3), nz, m, nzmax, nzmin + integer :: n, elem, elnodes(3), nz, nzmin, nzmax real(kind=WP) :: eta(3) real(kind=WP) :: Fx, Fy !___________________________________________________________________________ @@ -88,6 +88,7 @@ SUBROUTINE update_vel(dynamics, partit, mesh) d_eta => dynamics%d_eta(:) !___________________________________________________________________________ +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(elem, elnodes, nz, nzmin, nzmax, eta, Fx, Fy) DO elem=1, myDim_elem2D elnodes=elem2D_nodes(:,elem) eta=-g*theta*dt*d_eta(elnodes) @@ -100,8 +101,15 @@ SUBROUTINE update_vel(dynamics, partit, mesh) UV(2,nz,elem)= UV(2,nz,elem) + UV_rhs(2,nz,elem) + Fy END DO END DO - eta_n=eta_n+d_eta +!$OMP END PARALLEL DO + +!$OMP PARALLEL DO + DO n=1, myDim_nod2D+eDim_nod2D + eta_n(n)=eta_n(n)+d_eta(n) + END DO +!$OMP END PARALLEL DO call exchange_elem(UV, partit) +!$OMP BARRIER end subroutine update_vel ! ! @@ -129,8 +137,8 @@ subroutine compute_vel_nodes(dynamics, partit, mesh) #include "associate_mesh_ass.h" UV=>dynamics%uv(:,:,:) UVnode=>dynamics%uvnode(:,:,:) - !___________________________________________________________________________ +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz, k, elem, nln, uln, nle, ule, tx, ty, tvol) DO n=1, myDim_nod2D uln = ulevels_nod2D(n) nln = nlevels_nod2D(n) @@ -152,7 +160,9 @@ subroutine compute_vel_nodes(dynamics, partit, mesh) UVnode(2,nz,n)=ty/tvol END DO END DO +!$OMP END PARALLEL DO call exchange_nod(UVnode, partit) +!$OMP BARRIER end subroutine compute_vel_nodes ! ! @@ -239,11 +249,18 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) ! An analog of harmonic viscosity operator. ! Same as visc_filt_h, but with the backscatter. ! Here the contribution from squared velocities is added to the viscosity. - ! The contribution from boundary edges is neglected (free slip). - U_b = 0.0_WP - V_b = 0.0_WP - U_c = 0.0_WP - V_c = 0.0_WP + ! The contribution from boundary edges is neglected (free slip). +!$OMP PARALLEL DO + DO elem=1, myDim_elem2D+eDim_elem2D + U_b(:, elem) = 0.0_WP + V_b(:, elem) = 0.0_WP + U_c(:, elem) = 0.0_WP + V_c(:, elem) = 0.0_WP + END DO +!$OMP END PARALLEL DO + +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(u1, v1, len, vi, nz, ed, el, nelem, k, elem, nzmin, nzmax) +!$OMP DO DO ed=1, myDim_edge2D+eDim_edge2D if(myList_edge2D(ed)>edge2D_in) cycle el=edge_tri(:,ed) @@ -267,11 +284,16 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) V_b(nz,el(2))=V_b(nz,el(2))+v1/elem_area(el(2)) END DO END DO +!$OMP END DO +!$OMP MASTER call exchange_elem(U_b, partit) call exchange_elem(V_b, partit) +!$OMP END MASTER +!$OMP BARRIER ! =========== ! Compute smoothed viscous term: ! =========== +!$OMP DO DO ed=1, myDim_nod2D nzmin = ulevels_nod2D(ed) nzmax = nlevels_nod2D(ed) @@ -289,8 +311,13 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) V_c(nz,ed)=v1/vi END DO END DO +!$OMP END DO +!$OMP MASTER call exchange_nod(U_c, partit) call exchange_nod(V_c, partit) +!$OMP END MASTER +!$OMP BARRIER +!$OMP DO do ed=1, myDim_elem2D nelem=elem2D_nodes(:,ed) nzmin = ulevels(ed) @@ -300,6 +327,8 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) UV_rhs(2,nz,ed)=UV_rhs(2,nz,ed)+V_b(nz,ed) -dynamics%visc_easybsreturn*sum(V_c(nz,nelem))/3.0_WP END DO end do +!$OMP END DO +!$OMP END PARALLEL end subroutine visc_filt_bcksct ! ! From 67eb7ec07490cc402a77c5c70d5697d063efb516 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 15 Nov 2021 14:15:38 +0100 Subject: [PATCH 573/909] OMP for the rest of viscosity filters. dimention of partit%plock has been extended to myDim_elem2D+eDim_elem2D --- src/gen_modules_partitioning.F90 | 4 +- src/oce_dyn.F90 | 90 ++++++++++++++++++++++++++------ 2 files changed, 75 insertions(+), 19 deletions(-) diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index 6522a8b2d..950c4c845 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -508,8 +508,8 @@ subroutine init_gatherLists(partit) endif !$OMP MASTER #if defined(_OPENMP) - allocate(partit%plock(partit%myDim_nod2D+partit%eDim_nod2D)) - do n=1, myDim_nod2D+partit%eDim_nod2D + allocate(partit%plock(myDim_elem2D+eDim_elem2D)) !allocate with maximum dimention (nELEM> nNODE) + do n=1, myDim_elem2D+eDim_elem2D !experiments showd that OPENMP5 implementation of the lock (201811) is >10% more efficient !make sure you use OPENMP v. 5.0 #if _OPENMP >= 201811 diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 5985e814e..446e2a656 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -228,7 +228,7 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh !___________________________________________________________________________ - real(kind=8) :: u1, v1, len, vi + real(kind=8) :: u1, v1, len, vi integer :: nz, ed, el(2), nelem(3),k, elem, nzmin, nzmax !___________________________________________________________________________ ! pointer on necessary derived types @@ -278,10 +278,20 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) !here dynamics%visc_gamma2 is dimensional (1/velocity). If it is 10, then the respective term dominates starting from |u|=0.1 m/s an so on. u1=u1*vi v1=v1*vi +#if defined(_OPENMP) + call omp_set_lock(partit%plock(el(1))) +#endif U_b(nz,el(1))=U_b(nz,el(1))-u1/elem_area(el(1)) - U_b(nz,el(2))=U_b(nz,el(2))+u1/elem_area(el(2)) V_b(nz,el(1))=V_b(nz,el(1))-v1/elem_area(el(1)) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(el(1))) + call omp_set_lock(partit%plock(el(2))) +#endif + U_b(nz,el(2))=U_b(nz,el(2))+u1/elem_area(el(2)) V_b(nz,el(2))=V_b(nz,el(2))+v1/elem_area(el(2)) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(el(2))) +#endif END DO END DO !$OMP END DO @@ -353,7 +363,7 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) type(t_mesh) , intent(in) , target :: mesh !___________________________________________________________________________ real(kind=8) :: u1, v1, vi, len - integer :: ed, el(2), nz, nzmin, nzmax + integer :: ed, el(2), elem, nz, nzmin, nzmax !___________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs @@ -368,8 +378,15 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) V_c => dynamics%work%v_c(:,:) !___________________________________________________________________________ - U_c = 0.0_WP - V_c = 0.0_WP +!$OMP PARALLEL DO + DO elem=1, myDim_elem2D+eDim_elem2D + U_c(:, elem) = 0.0_WP + V_c(:, elem) = 0.0_WP + END DO +!$OMP END PARALLEL DO + +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(u1, v1, len, vi, ed, el, nz, nzmin, nzmax) +!$OMP DO DO ed=1, myDim_edge2D+eDim_edge2D if(myList_edge2D(ed)>edge2D_in) cycle el=edge_tri(:,ed) @@ -384,8 +401,9 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) V_c(nz,el(2))=V_c(nz,el(2))+v1 END DO END DO - - Do ed=1,myDim_elem2D +!$OMP END DO +!$OMP DO + DO ed=1,myDim_elem2D len=sqrt(elem_area(ed)) nzmin = ulevels(ed) nzmax = nlevels(ed) @@ -400,10 +418,14 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) U_c(nz,ed)=-U_c(nz,ed)*vi V_c(nz,ed)=-V_c(nz,ed)*vi END DO - end do - + END DO +!$OMP END DO +!$OMP MASTER call exchange_elem(U_c, partit) call exchange_elem(V_c, partit) +!$OMP END MASTER +!$OMP BARRIER +!$OMP DO DO ed=1, myDim_edge2D+eDim_edge2D if(myList_edge2D(ed)>edge2D_in) cycle el=edge_tri(:,ed) @@ -412,13 +434,24 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) DO nz=nzmin,nzmax-1 u1=(U_c(nz,el(1))-U_c(nz,el(2))) v1=(V_c(nz,el(1))-V_c(nz,el(2))) +#if defined(_OPENMP) + call omp_set_lock(partit%plock(el(1))) +#endif UV_rhs(1,nz,el(1))=UV_rhs(1,nz,el(1))-u1/elem_area(el(1)) - UV_rhs(1,nz,el(2))=UV_rhs(1,nz,el(2))+u1/elem_area(el(2)) UV_rhs(2,nz,el(1))=UV_rhs(2,nz,el(1))-v1/elem_area(el(1)) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(el(1))) + call omp_set_lock(partit%plock(el(2))) +#endif + UV_rhs(1,nz,el(2))=UV_rhs(1,nz,el(2))+u1/elem_area(el(2)) UV_rhs(2,nz,el(2))=UV_rhs(2,nz,el(2))+v1/elem_area(el(2)) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(el(2))) +#endif END DO END DO - +!$OMP END DO +!$OMP END PARALLEL end subroutine visc_filt_bilapl ! ! @@ -442,8 +475,8 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh !___________________________________________________________________________ - real(kind=8) :: u1, v1, vi, len - integer :: ed, el(2), nz, nzmin, nzmax + real(kind=8) :: u1, v1, len, vi + integer :: ed, el(2), nz, nzmin, nzmax, elem !___________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs @@ -458,8 +491,14 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) V_c => dynamics%work%v_c(:,:) !___________________________________________________________________________ - U_c = 0.0_WP - V_c = 0.0_WP +!$OMP PARALLEL DO + DO elem=1, myDim_elem2D+eDim_elem2D + U_c(:, elem) = 0.0_WP + V_c(:, elem) = 0.0_WP + END DO +!$OMP END PARALLEL DO +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(u1, v1, len, vi, ed, el, nz, nzmin, nzmax) +!$OMP DO DO ed=1, myDim_edge2D+eDim_edge2D if(myList_edge2D(ed)>edge2D_in) cycle el=edge_tri(:,ed) @@ -483,9 +522,13 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) V_c(nz,el(2))=V_c(nz,el(2))+v1 END DO END DO - +!$OMP END DO +!$OMP MASTER call exchange_elem(U_c, partit) call exchange_elem(V_c, partit) +!$OMP END MASTER +!$OMP BARRIER +!$OMP DO DO ed=1, myDim_edge2D+eDim_edge2D if(myList_edge2D(ed)>edge2D_in) cycle el=edge_tri(:,ed) @@ -503,11 +546,24 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) ! vi=-dt*sqrt(max(dynamics%visc_gamma0, dynamics%visc_gamma1*max(sqrt(vi), dynamics%visc_gamma2*vi))*len) u1=vi*(U_c(nz,el(1))-U_c(nz,el(2))) v1=vi*(V_c(nz,el(1))-V_c(nz,el(2))) + +#if defined(_OPENMP) + call omp_set_lock(partit%plock(el(1))) +#endif UV_rhs(1,nz,el(1))=UV_rhs(1,nz,el(1))-u1/elem_area(el(1)) - UV_rhs(1,nz,el(2))=UV_rhs(1,nz,el(2))+u1/elem_area(el(2)) UV_rhs(2,nz,el(1))=UV_rhs(2,nz,el(1))-v1/elem_area(el(1)) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(el(1))) + call omp_set_lock(partit%plock(el(2))) +#endif + UV_rhs(1,nz,el(2))=UV_rhs(1,nz,el(2))+u1/elem_area(el(2)) UV_rhs(2,nz,el(2))=UV_rhs(2,nz,el(2))+v1/elem_area(el(2)) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(el(2))) +#endif END DO END DO +!$OMP END DO +!$OMP END PARALLEL end subroutine visc_filt_bidiff From ed34dd2c0937af0152c65a9559cf28056fcf937a Mon Sep 17 00:00:00 2001 From: "Kristian S. Mogensen" Date: Mon, 15 Nov 2021 15:18:07 +0000 Subject: [PATCH 574/909] Changes to be able for IFS CY47R3.2 to link fesom2. --- env.sh | 2 ++ env/wsecmwf/shell | 13 +++++++++++++ lib/metis-5.1.0/GKlib/GKlibSystem.cmake | 2 ++ lib/parms/CMakeLists.txt | 3 +++ src/CMakeLists.txt | 4 ++++ src/async_threads_cpp/CMakeLists.txt | 3 +++ src/ifs_interface/ifs_interface.F90 | 3 +++ src/ifs_interface/ifs_notused.F90 | 14 ++++++++++++++ 8 files changed, 44 insertions(+) create mode 100644 env/wsecmwf/shell diff --git a/env.sh b/env.sh index f568651e3..ecb576453 100755 --- a/env.sh +++ b/env.sh @@ -47,6 +47,8 @@ elif [[ $LOGINHOST =~ ^jwlogin[0-9][0-9].juwels$ ]]; then STRATEGY="juwels" elif [[ $LOGINHOST =~ ^cc[a-b]+-login[0-9]+\.ecmwf\.int$ ]]; then STRATEGY="ecaccess.ecmwf.int" +elif [[ $LOGINHOST =~ ^[A-Za-z0-9]+\.ecmwf\.int$ ]]; then + STRATEGY="wsecmwf" else echo "can not determine environment for host: "$LOGINHOST [ $BEING_EXECUTED = true ] && exit 1 diff --git a/env/wsecmwf/shell b/env/wsecmwf/shell new file mode 100644 index 000000000..ddcd1c13f --- /dev/null +++ b/env/wsecmwf/shell @@ -0,0 +1,13 @@ +# used at ECMWF + +module unload openmpi +module unload eccodes +module unload netcdf4 +module unload hdf5 +module switch gnu/7.3.0 +module load netcdf4/4.4.1 +module load hdf5/1.8.17 +module load openmpi/2.1.3 + +export FC=mpif90 CC=mpicc CXX=mpicxx # MPI wrappers for Fortran, cc and CC similarly +#export FC=mpif90 CC=gcc CXX=mpicxx # MPI wrappers for Fortran, cc and CC similarly diff --git a/lib/metis-5.1.0/GKlib/GKlibSystem.cmake b/lib/metis-5.1.0/GKlib/GKlibSystem.cmake index 3fcc29108..6993c7c69 100644 --- a/lib/metis-5.1.0/GKlib/GKlibSystem.cmake +++ b/lib/metis-5.1.0/GKlib/GKlibSystem.cmake @@ -37,6 +37,8 @@ if(CMAKE_COMPILER_IS_GNUCC) elseif(${CMAKE_C_COMPILER_ID} MATCHES "Sun") # Sun insists on -xc99. set(GKlib_COPTIONS "${GKlib_COPTIONS} -xc99") +elseif(${CMAKE_C_COMPILER_ID} MATCHES "Cray") + set(GKlib_COPTIONS "${GKlib_COPTIONS} -fPIC") endif(CMAKE_COMPILER_IS_GNUCC) # Find OpenMP if it is requested. diff --git a/lib/parms/CMakeLists.txt b/lib/parms/CMakeLists.txt index 6959573f5..a1a901341 100644 --- a/lib/parms/CMakeLists.txt +++ b/lib/parms/CMakeLists.txt @@ -19,3 +19,6 @@ target_link_libraries(${PROJECT_NAME} INTERFACE ${BLAS_C_LIBRARIES} $ENV{UBUNTU_ if(${CMAKE_C_COMPILER_ID} STREQUAL "Intel") target_compile_options(${PROJECT_NAME} PRIVATE -no-prec-div -no-prec-sqrt -fast-transcendentals -fp-model precise) endif() +if(${BUILD_FESOM_AS_LIBRARY}) + target_compile_options(parms PRIVATE -fPIC) +endif() diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index c6d2576d8..4fa60d6bc 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -105,6 +105,10 @@ else() target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64 -hnoomp) endif() endif() +if(${BUILD_FESOM_AS_LIBRARY}) + target_compile_options(${PROJECT_NAME} PRIVATE -fPIC) + target_compile_options(${PROJECT_NAME}_C PRIVATE -fPIC) +endif() target_include_directories(${PROJECT_NAME} PRIVATE ${NETCDF_Fortran_INCLUDE_DIRECTORIES} ${OASIS_Fortran_INCLUDE_DIRECTORIES}) target_include_directories(${PROJECT_NAME} PRIVATE ${MCT_Fortran_INCLUDE_DIRECTORIES} ${MPEU_Fortran_INCLUDE_DIRECTORIES}) target_include_directories(${PROJECT_NAME} PRIVATE ${SCRIP_Fortran_INCLUDE_DIRECTORIES}) diff --git a/src/async_threads_cpp/CMakeLists.txt b/src/async_threads_cpp/CMakeLists.txt index 1af6c8fff..fcae15177 100644 --- a/src/async_threads_cpp/CMakeLists.txt +++ b/src/async_threads_cpp/CMakeLists.txt @@ -18,3 +18,6 @@ if(${CMAKE_CXX_COMPILER_ID} STREQUAL Cray ) else() target_compile_options(${PROJECT_NAME} PRIVATE -std=c++11) endif() +if(${BUILD_FESOM_AS_LIBRARY}) + target_compile_options(${PROJECT_NAME} PRIVATE -fPIC) +endif() diff --git a/src/ifs_interface/ifs_interface.F90 b/src/ifs_interface/ifs_interface.F90 index e1f40a06f..5cf35a935 100644 --- a/src/ifs_interface/ifs_interface.F90 +++ b/src/ifs_interface/ifs_interface.F90 @@ -20,6 +20,7 @@ SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & USE g_config, only: dt USE g_clock, only: timenew, daynew, yearnew, month, day_in_month USE nemogcmcoup_steps, ONLY : substeps + USE fvom_module, only: fesom_init IMPLICIT NONE @@ -1461,6 +1462,7 @@ SUBROUTINE nemogcmcoup_step( istp, icdate, ictime ) USE g_clock, only: yearnew, month, day_in_month USE fesom_main_storage_module, only: fesom => f ! mype USE nemogcmcoup_steps, ONLY : substeps + USE fvom_module, only: fesom_runloop IMPLICIT NONE ! Arguments @@ -1502,6 +1504,7 @@ END SUBROUTINE nemogcmcoup_step SUBROUTINE nemogcmcoup_final USE fesom_main_storage_module, only: fesom => f ! mype + USE fvom_module, only: fesom_finalize ! Finalize the FESOM model diff --git a/src/ifs_interface/ifs_notused.F90 b/src/ifs_interface/ifs_notused.F90 index b483bf962..760a58f4c 100644 --- a/src/ifs_interface/ifs_notused.F90 +++ b/src/ifs_interface/ifs_notused.F90 @@ -359,4 +359,18 @@ SUBROUTINE nemogcmcoup_wam_update_stress( mype, npes, icomm, npoints, & CALL abort END SUBROUTINE nemogcmcoup_wam_update_stress + +SUBROUTINE nemogcmcoup_end_ioserver + + ! Close io servers + + IMPLICIT NONE + INTEGER :: icomm + LOGICAL :: lnemoioserver + + WRITE(*,*)'No mpp_ioserver' + CALL abort + +END SUBROUTINE nemogcmcoup_end_ioserver + #endif From 2cfbca7ad56b9de769e6fe6ee58fc5cbcb285869 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 15 Nov 2021 17:38:58 +0100 Subject: [PATCH 575/909] rename FESOM main source file and the FESOM module file --- src/CMakeLists.txt | 4 ++-- src/{fvom_main.F90 => fesom_main.F90} | 2 +- src/{fvom.F90 => fesom_module.F90} | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) rename src/{fvom_main.F90 => fesom_main.F90} (96%) rename src/{fvom.F90 => fesom_module.F90} (99%) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index c6d2576d8..efd831979 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -44,7 +44,7 @@ add_custom_command(OUTPUT 5303B6F4_E4F4_45B2_A6E5_8E2B9FB5CDC4 ${FESOM_GENERATED #endif() list(REMOVE_ITEM sources_Fortran ${src_home}/fvom_init.F90 ${src_home}/oce_local.F90 ${src_home}/gen_comm.F90) list(REMOVE_ITEM sources_C ${src_home}/fort_part.c) -list(REMOVE_ITEM sources_Fortran ${src_home}/fvom_main.F90) +list(REMOVE_ITEM sources_Fortran ${src_home}/fesom_main.F90) # depends on the metis library #add_subdirectory(../lib/metis-5.1.0 ${PROJECT_BINARY_DIR}/metis) @@ -67,7 +67,7 @@ target_link_libraries(${PROJECT_NAME}_C parms) #metis if(${BUILD_FESOM_AS_LIBRARY}) add_library(${PROJECT_NAME} ${sources_Fortran}) else() - add_executable(${PROJECT_NAME} ${sources_Fortran} ${src_home}/fvom_main.F90) + add_executable(${PROJECT_NAME} ${sources_Fortran} ${src_home}/fesom_main.F90) endif() target_compile_definitions(${PROJECT_NAME} PRIVATE PARMS -DMETIS_VERSION=5 -DPART_WEIGHTED -DMETISRANDOMSEED=35243) if(${DISABLE_MULTITHREADING}) diff --git a/src/fvom_main.F90 b/src/fesom_main.F90 similarity index 96% rename from src/fvom_main.F90 rename to src/fesom_main.F90 index a48953ed2..562fb6e1b 100755 --- a/src/fvom_main.F90 +++ b/src/fesom_main.F90 @@ -7,7 +7,7 @@ !=============================================================================! program main - use fvom_module + use fesom_module integer nsteps diff --git a/src/fvom.F90 b/src/fesom_module.F90 similarity index 99% rename from src/fvom.F90 rename to src/fesom_module.F90 index c18561297..c4c7f0fd5 100755 --- a/src/fvom.F90 +++ b/src/fesom_module.F90 @@ -76,7 +76,7 @@ module fesom_main_storage_module ! synopsis: main FESOM program split into 3 parts ! this way FESOM can e.g. be used as a library with an external time loop driver ! used with IFS-FESOM -module fvom_module +module fesom_module implicit none public fesom_init, fesom_runloop, fesom_finalize private From 8c52991a5623bf88a12bdf4a1608b7f78e85c7a8 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 15 Nov 2021 17:49:36 +0100 Subject: [PATCH 576/909] removed some bugs in OMP --- src/oce_ale_mixing_kpp.F90 | 10 ++-------- src/oce_ale_pressure_bv.F90 | 6 +++--- src/oce_muscl_adv.F90 | 4 ++-- 3 files changed, 7 insertions(+), 13 deletions(-) diff --git a/src/oce_ale_mixing_kpp.F90 b/src/oce_ale_mixing_kpp.F90 index 555bf838f..53c177384 100755 --- a/src/oce_ale_mixing_kpp.F90 +++ b/src/oce_ale_mixing_kpp.F90 @@ -283,7 +283,6 @@ SUBROUTINE oce_mixing_KPP(viscAE, diffK, dynamics, tracers, partit, mesh) ViscA(:, node) = 0.0_WP END DO !$OMP END PARALLEL DO - !$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax, usurf, vsurf, u_loc, v_loc) DO node=1, myDim_nod2D !+eDim_nod2D nzmin = ulevels_nod2D(node) @@ -349,7 +348,6 @@ SUBROUTINE oce_mixing_KPP(viscAE, diffK, dynamics, tracers, partit, mesh) + sw_beta (nzmin,node) * water_flux(node) * tracers%data(2)%values(nzmin,node)) END DO !$OMP END PARALLEL DO - ! compute interior mixing coefficients everywhere, due to constant ! internal wave activity, static instability, and local shear ! instability. @@ -361,10 +359,8 @@ SUBROUTINE oce_mixing_KPP(viscAE, diffK, dynamics, tracers, partit, mesh) ! boundary layer mixing coefficients: diagnose new b.l. depth CALL bldepth(partit, mesh) - ! boundary layer diffusivities CALL blmix_kpp(viscA, diffK, partit, mesh) - ! enhance diffusivity at interface kbl - 1 CALL enhance(viscA, diffK, partit, mesh) @@ -376,7 +372,7 @@ SUBROUTINE oce_mixing_KPP(viscAE, diffK, dynamics, tracers, partit, mesh) !_____________________________________________________________________ ! all loops go over myDim_nod2D so no halo information --> for smoothing ! haloinfo is required --> therefor exchange_nod - call smooth_nod(blmc(:,:,j), 3, partit, mesh) + call smooth_nod(blmc(:,:,j), 3, partit, mesh) end do end if !$OMP BARRIER @@ -966,13 +962,11 @@ subroutine blmix_kpp(viscA,diffK, partit, mesh) #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - !$OMP PARALLEL DO DO node=1, myDim_nod2D+eDim_nod2D - blmc (:, n, :) = 0.0_WP + blmc (:, node, :) = 0.0_WP END DO !$OMP END PARALLEL DO - blmc = 0.0_WP ! ******************************************************************* ! Kv over the NODE ! ******************************************************************* diff --git a/src/oce_ale_pressure_bv.F90 b/src/oce_ale_pressure_bv.F90 index 74a0d82fb..d4d482171 100644 --- a/src/oce_ale_pressure_bv.F90 +++ b/src/oce_ale_pressure_bv.F90 @@ -235,18 +235,18 @@ subroutine pressure_bv(tracers, partit, mesh) mixing_kpp = (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) !___________________________________________________________________________ ! Screen salinity - a_loc=0.0_WP + a =0.0_WP !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax, a_loc) + a_loc=0.0_WP !$OMP DO do node=1, myDim_nod2D+eDim_nod2D nzmin = ulevels_nod2D(node) nzmax = nlevels_nod2D(node) do nz=nzmin,nzmax-1 - a_loc=min(a_loc,salt(nz,node)) + a_loc=min(a_loc, salt(nz,node)) enddo enddo !$OMP END DO - a=0.0_WP !$OMP CRITICAL a=min(a, a_loc) !$OMP END CRITICAL diff --git a/src/oce_muscl_adv.F90 b/src/oce_muscl_adv.F90 index e4a995569..610b0c6e6 100755 --- a/src/oce_muscl_adv.F90 +++ b/src/oce_muscl_adv.F90 @@ -69,15 +69,15 @@ subroutine muscl_adv_init(twork, partit, mesh) ! --> SSH_stiff%rowptr(n+1)-SSH_stiff%rowptr(n) gives maximum number of ! neighbouring nodes within a single row of the sparse matrix k=SSH_stiff%rowptr(n+1)-SSH_stiff%rowptr(n) - if (k > nn_size) then #if defined(_OPENMP) call omp_set_lock(partit%plock(n)) #endif + if (k > nn_size) then nn_size=k ! nnum maximum number of neighbouring nodes + end if #if defined(_OPENMP) call omp_unset_lock(partit%plock(n)) #endif - end if end do !$OMP END DO !$OMP END PARALLEL From 916580a2e478e2ef803a766fe922043e1ee9055d Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 15 Nov 2021 17:49:36 +0100 Subject: [PATCH 577/909] removed some bugs in OMP --- src/oce_ale_mixing_kpp.F90 | 10 ++-------- src/oce_ale_pressure_bv.F90 | 6 +++--- src/oce_muscl_adv.F90 | 4 ++-- 3 files changed, 7 insertions(+), 13 deletions(-) diff --git a/src/oce_ale_mixing_kpp.F90 b/src/oce_ale_mixing_kpp.F90 index 555bf838f..53c177384 100755 --- a/src/oce_ale_mixing_kpp.F90 +++ b/src/oce_ale_mixing_kpp.F90 @@ -283,7 +283,6 @@ SUBROUTINE oce_mixing_KPP(viscAE, diffK, dynamics, tracers, partit, mesh) ViscA(:, node) = 0.0_WP END DO !$OMP END PARALLEL DO - !$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax, usurf, vsurf, u_loc, v_loc) DO node=1, myDim_nod2D !+eDim_nod2D nzmin = ulevels_nod2D(node) @@ -349,7 +348,6 @@ SUBROUTINE oce_mixing_KPP(viscAE, diffK, dynamics, tracers, partit, mesh) + sw_beta (nzmin,node) * water_flux(node) * tracers%data(2)%values(nzmin,node)) END DO !$OMP END PARALLEL DO - ! compute interior mixing coefficients everywhere, due to constant ! internal wave activity, static instability, and local shear ! instability. @@ -361,10 +359,8 @@ SUBROUTINE oce_mixing_KPP(viscAE, diffK, dynamics, tracers, partit, mesh) ! boundary layer mixing coefficients: diagnose new b.l. depth CALL bldepth(partit, mesh) - ! boundary layer diffusivities CALL blmix_kpp(viscA, diffK, partit, mesh) - ! enhance diffusivity at interface kbl - 1 CALL enhance(viscA, diffK, partit, mesh) @@ -376,7 +372,7 @@ SUBROUTINE oce_mixing_KPP(viscAE, diffK, dynamics, tracers, partit, mesh) !_____________________________________________________________________ ! all loops go over myDim_nod2D so no halo information --> for smoothing ! haloinfo is required --> therefor exchange_nod - call smooth_nod(blmc(:,:,j), 3, partit, mesh) + call smooth_nod(blmc(:,:,j), 3, partit, mesh) end do end if !$OMP BARRIER @@ -966,13 +962,11 @@ subroutine blmix_kpp(viscA,diffK, partit, mesh) #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - !$OMP PARALLEL DO DO node=1, myDim_nod2D+eDim_nod2D - blmc (:, n, :) = 0.0_WP + blmc (:, node, :) = 0.0_WP END DO !$OMP END PARALLEL DO - blmc = 0.0_WP ! ******************************************************************* ! Kv over the NODE ! ******************************************************************* diff --git a/src/oce_ale_pressure_bv.F90 b/src/oce_ale_pressure_bv.F90 index 74a0d82fb..d4d482171 100644 --- a/src/oce_ale_pressure_bv.F90 +++ b/src/oce_ale_pressure_bv.F90 @@ -235,18 +235,18 @@ subroutine pressure_bv(tracers, partit, mesh) mixing_kpp = (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) !___________________________________________________________________________ ! Screen salinity - a_loc=0.0_WP + a =0.0_WP !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax, a_loc) + a_loc=0.0_WP !$OMP DO do node=1, myDim_nod2D+eDim_nod2D nzmin = ulevels_nod2D(node) nzmax = nlevels_nod2D(node) do nz=nzmin,nzmax-1 - a_loc=min(a_loc,salt(nz,node)) + a_loc=min(a_loc, salt(nz,node)) enddo enddo !$OMP END DO - a=0.0_WP !$OMP CRITICAL a=min(a, a_loc) !$OMP END CRITICAL diff --git a/src/oce_muscl_adv.F90 b/src/oce_muscl_adv.F90 index e4a995569..610b0c6e6 100755 --- a/src/oce_muscl_adv.F90 +++ b/src/oce_muscl_adv.F90 @@ -69,15 +69,15 @@ subroutine muscl_adv_init(twork, partit, mesh) ! --> SSH_stiff%rowptr(n+1)-SSH_stiff%rowptr(n) gives maximum number of ! neighbouring nodes within a single row of the sparse matrix k=SSH_stiff%rowptr(n+1)-SSH_stiff%rowptr(n) - if (k > nn_size) then #if defined(_OPENMP) call omp_set_lock(partit%plock(n)) #endif + if (k > nn_size) then nn_size=k ! nnum maximum number of neighbouring nodes + end if #if defined(_OPENMP) call omp_unset_lock(partit%plock(n)) #endif - end if end do !$OMP END DO !$OMP END PARALLEL From 753dd7e8719580fbc2a9fa9e58030ac12c662d94 Mon Sep 17 00:00:00 2001 From: "Kristian S. Mogensen" Date: Mon, 15 Nov 2021 17:46:09 +0000 Subject: [PATCH 578/909] Install all lib.a files needed for coupling to IFS in lib. --- .gitignore | 2 +- lib/parms/CMakeLists.txt | 3 ++- src/CMakeLists.txt | 4 +++- src/async_threads_cpp/CMakeLists.txt | 1 + 4 files changed, 7 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index 7ad900ed3..3c80e52ca 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,6 @@ *~ *.swp src/icepack_drivers/Icepack -lib/libfesom.a +lib/*.a /work_* Makefile.in diff --git a/lib/parms/CMakeLists.txt b/lib/parms/CMakeLists.txt index a1a901341..85f40ec0e 100644 --- a/lib/parms/CMakeLists.txt +++ b/lib/parms/CMakeLists.txt @@ -20,5 +20,6 @@ if(${CMAKE_C_COMPILER_ID} STREQUAL "Intel") target_compile_options(${PROJECT_NAME} PRIVATE -no-prec-div -no-prec-sqrt -fast-transcendentals -fp-model precise) endif() if(${BUILD_FESOM_AS_LIBRARY}) - target_compile_options(parms PRIVATE -fPIC) + target_compile_options(${PROJECT_NAME} PRIVATE -fPIC) + install(TARGETS ${PROJECT_NAME} DESTINATION "${FESOM_INSTALL_PREFIX}/lib") endif() diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 4fa60d6bc..077d1ce5e 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -2,6 +2,8 @@ cmake_minimum_required(VERSION 3.9) project(fesom C Fortran) +set(FESOM_INSTALL_PREFIX "${CMAKE_CURRENT_LIST_DIR}/.." CACHE FILEPATH "directory where FESOM will be installed to via 'make install'") + option(DISABLE_MULTITHREADING "disable asynchronous operations" OFF) option(ENABLE_OPENMP "build FESOM with OpenMP" OFF) @@ -121,9 +123,9 @@ if(${ENABLE_OPENMP} AND NOT ${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray) endif() -set(FESOM_INSTALL_PREFIX "${CMAKE_CURRENT_LIST_DIR}/.." CACHE FILEPATH "directory where FESOM will be installed to via 'make install'") if(${BUILD_FESOM_AS_LIBRARY}) install(TARGETS ${PROJECT_NAME} DESTINATION "${FESOM_INSTALL_PREFIX}/lib") + install(TARGETS ${PROJECT_NAME}_C DESTINATION "${FESOM_INSTALL_PREFIX}/lib") else() set(FESOM_INSTALL_FILEPATH "${FESOM_INSTALL_PREFIX}/bin/fesom.x") get_filename_component(FESOM_INSTALL_PATH ${FESOM_INSTALL_FILEPATH} DIRECTORY) diff --git a/src/async_threads_cpp/CMakeLists.txt b/src/async_threads_cpp/CMakeLists.txt index fcae15177..d911b00b4 100644 --- a/src/async_threads_cpp/CMakeLists.txt +++ b/src/async_threads_cpp/CMakeLists.txt @@ -20,4 +20,5 @@ else() endif() if(${BUILD_FESOM_AS_LIBRARY}) target_compile_options(${PROJECT_NAME} PRIVATE -fPIC) + install(TARGETS ${PROJECT_NAME} DESTINATION "${FESOM_INSTALL_PREFIX}/lib") endif() From d30206386047ba7b39fd134b3d867fd395d5775e Mon Sep 17 00:00:00 2001 From: "Kristian S. Mogensen" Date: Mon, 15 Nov 2021 19:27:49 +0000 Subject: [PATCH 579/909] Change Cray compiler to 8.7.7. --- env/ecaccess.ecmwf.int/shell | 47 +++++++++++++++++++++++++++++++----- 1 file changed, 41 insertions(+), 6 deletions(-) diff --git a/env/ecaccess.ecmwf.int/shell b/env/ecaccess.ecmwf.int/shell index 743116a12..6a28b2f1c 100644 --- a/env/ecaccess.ecmwf.int/shell +++ b/env/ecaccess.ecmwf.int/shell @@ -1,11 +1,46 @@ -export PATH=/home/rd/natr/cmake-3.11.2-Linux-x86_64/bin:$PATH - +module unload grib_api +module unload eccodes +module unload emos +module unload cmake +module unload fftw +module unload fcm +module unload netcdf4 +module unload netcdf4-parallel +module unload hdf5-parallel +module unload cray-netcdf-hdf5parallel +module unload cray-hdf5-parallel +module unload cray-netcdf module unload cray-hdf5 -module load cray-netcdf -module load cray-hdf5 +module unload python +module unload python3 +module unload boost +module unload ecbuild +module unload ifs-support +module unload fcm +module unload cdt +module unload cmake +module unload gcc + +export EC_CRAYPE_INTEGRATION=off + +# Load modules +module load cdt/18.12 +module load gcc/6.3.0 +module load fftw/3.3.4.5 +module load netcdf4-parallel/4.6.2 +module load hdf5-parallel/1.10.4 +module load fcm/2015.02.0 +module load python/2.7.12-01 +module load python3/3.6.8-01 +module load boost/1.61.0 +module load eigen/3.2.0 +module load nag +module load parmetis +module load cray-snplauncher +module load atp +module load ninja +module load cmake/3.15.0 #export CRAYPE_LINK_TYPE=dynamic -# enable full MPI thread support level (MPI_THREAD_MULTIPLE) -export MPICH_MAX_THREAD_SAFETY=multiple # to also switch to an alternative (probably with faster locking) multi threading implementation of the cray-mpich library, use the compiler flag -craympich-mt export FC=ftn CC=cc CXX=CC From 519986bdf1b7b7ed6ddbe6a42158417690f6d8c4 Mon Sep 17 00:00:00 2001 From: "Kristian S. Mogensen" Date: Mon, 15 Nov 2021 19:39:18 +0000 Subject: [PATCH 580/909] Use shared libraries. --- .gitignore | 1 + lib/parms/CMakeLists.txt | 6 +++++- lib/parms/src/DDPQ/misc.c | 2 ++ src/CMakeLists.txt | 9 +++++++-- src/async_threads_cpp/CMakeLists.txt | 6 +++++- 5 files changed, 20 insertions(+), 4 deletions(-) diff --git a/.gitignore b/.gitignore index 3c80e52ca..b47b73763 100644 --- a/.gitignore +++ b/.gitignore @@ -8,5 +8,6 @@ *.swp src/icepack_drivers/Icepack lib/*.a +lib/*.so /work_* Makefile.in diff --git a/lib/parms/CMakeLists.txt b/lib/parms/CMakeLists.txt index 85f40ec0e..2ae7547eb 100644 --- a/lib/parms/CMakeLists.txt +++ b/lib/parms/CMakeLists.txt @@ -9,7 +9,11 @@ file(GLOB all_sources ${src_home}/src/*.c ${src_home}/src/DDPQ/*.c) include("${CMAKE_CURRENT_LIST_DIR}/../../cmake/FindBLAS.cmake") # create our library (set its name to name of this project) -add_library(${PROJECT_NAME} ${all_sources}) +if(${BUILD_FESOM_AS_LIBRARY}) + add_library(${PROJECT_NAME} SHARED ${all_sources}) +else() + add_library(${PROJECT_NAME} ${all_sources}) +endif() target_compile_definitions(${PROJECT_NAME} PRIVATE PARMS USE_MPI REAL=double DBL FORTRAN_UNDERSCORE VOID_POINTER_SIZE_8 HAS_BLAS) target_include_directories(${PROJECT_NAME} PRIVATE ${src_home}/src/../include ${src_home}/src/include diff --git a/lib/parms/src/DDPQ/misc.c b/lib/parms/src/DDPQ/misc.c index 1713c8e00..b17aef663 100755 --- a/lib/parms/src/DDPQ/misc.c +++ b/lib/parms/src/DDPQ/misc.c @@ -10,6 +10,7 @@ #define DBL_EPSILON 2.2204460492503131e-16 // double epsilon +#if defined USE_DUBLICATE int qsplitC(FLOAT *a, int *ind, int n, int ncut) { /*---------------------------------------------------------------------- @@ -58,6 +59,7 @@ int qsplitC(FLOAT *a, int *ind, int n, int ncut) goto label1; } /*--------------- end of zqsplitC ----------------------------------------*/ +#endif int SparTran(csptr amat, csptr bmat, int job, int flag) { diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 077d1ce5e..5c974ee29 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -30,6 +30,7 @@ else() endif() #list(REMOVE_ITEM sources_Fortran ${src_home}/fesom_partition_init.F90) file(GLOB sources_C ${src_home}/*.c) +list(REMOVE_ITEM sources_C ${src_home}/psolve_feom.c) # generate a custom file from fesom_version_info.F90 which includes the current git SHA set(FESOM_ORIGINAL_VERSION_FILE ${src_home}/fesom_version_info.F90) @@ -58,7 +59,11 @@ add_subdirectory(async_threads_cpp) include(${CMAKE_CURRENT_LIST_DIR}/../cmake/FindNETCDF.cmake) -add_library(${PROJECT_NAME}_C ${sources_C}) +if(${BUILD_FESOM_AS_LIBRARY}) + add_library(${PROJECT_NAME}_C SHARED ${sources_C}) +else() + add_library(${PROJECT_NAME}_C ${sources_C}) +endif() target_compile_definitions(${PROJECT_NAME}_C PRIVATE PARMS USE_MPI REAL=double DBL HAS_BLAS FORTRAN_UNDERSCORE VOID_POINTER_SIZE_8 SGI LINUX UNDER_ MPI2) target_link_libraries(${PROJECT_NAME}_C parms) #metis @@ -67,7 +72,7 @@ target_link_libraries(${PROJECT_NAME}_C parms) #metis # we do not always build the library along with the executable to avoid having two targets here in the CMakeLists.txt # two targets would allow e.g. setting different compiler options or preprocessor definition, which would be error prone. if(${BUILD_FESOM_AS_LIBRARY}) - add_library(${PROJECT_NAME} ${sources_Fortran}) + add_library(${PROJECT_NAME} SHARED ${sources_Fortran}) else() add_executable(${PROJECT_NAME} ${sources_Fortran} ${src_home}/fvom_main.F90) endif() diff --git a/src/async_threads_cpp/CMakeLists.txt b/src/async_threads_cpp/CMakeLists.txt index d911b00b4..9dfbf2fb9 100644 --- a/src/async_threads_cpp/CMakeLists.txt +++ b/src/async_threads_cpp/CMakeLists.txt @@ -8,7 +8,11 @@ file(GLOB sources_CXX ${CMAKE_CURRENT_LIST_DIR}/*.cpp) include(FortranCInterface) FortranCInterface_HEADER(ThreadsManagerFCMacros.h MACRO_NAMESPACE "ThreadsManagerFCMacros_" SYMBOLS init_ccall begin_ccall end_ccall) -add_library(${PROJECT_NAME} ${sources_CXX}) +if(${BUILD_FESOM_AS_LIBRARY}) + add_library(${PROJECT_NAME} SHARED ${sources_CXX}) +else() + add_library(${PROJECT_NAME} ${sources_CXX}) +endif() target_include_directories(${PROJECT_NAME} INTERFACE ${CMAKE_CURRENT_LIST_DIR} PUBLIC ${CMAKE_CURRENT_BINARY_DIR} From a7ef1dae83c044adabc45de3a1af33d6b7cfc804 Mon Sep 17 00:00:00 2001 From: "Kristian S. Mogensen" Date: Tue, 16 Nov 2021 11:30:10 +0000 Subject: [PATCH 581/909] Workaround for missing set_timer. --- lib/parms/src/DDPQ/protos.h | 2 +- lib/parms/src/DDPQ/setblks.c | 12 ++++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/lib/parms/src/DDPQ/protos.h b/lib/parms/src/DDPQ/protos.h index 22434772a..9f9de8ce9 100755 --- a/lib/parms/src/DDPQ/protos.h +++ b/lib/parms/src/DDPQ/protos.h @@ -128,6 +128,6 @@ extern int init_blocks( csptr csmat, int *pnBlock, int **pnB, int *t_angle ); /* systimer.c */ -extern double sys_timer(); +//extern double sys_timer(); #endif diff --git a/lib/parms/src/DDPQ/setblks.c b/lib/parms/src/DDPQ/setblks.c index f9bae1db6..2a5352360 100755 --- a/lib/parms/src/DDPQ/setblks.c +++ b/lib/parms/src/DDPQ/setblks.c @@ -70,7 +70,8 @@ int init_blocks( csptr csmat, int *pnBlock, int **pnB, int **pperm, int nextBlockID, nextBlockPos, belongTo, grp; double eps_2 = eps * eps, t1, t2; - t1 = sys_timer(); /* begin Hash method timer */ + // t1 = sys_timer(); /* begin Hash method timer */ + t1 = 0.0; group = (KeyType *)Malloc( n*sizeof(KeyType), "init_blocks" ); compress = (CompressType *)Malloc( n*sizeof(CompressType), "init_blocks" ); perm = (int *)Malloc( n * sizeof(int), "init_blocks" ); @@ -130,10 +131,12 @@ int init_blocks( csptr csmat, int *pnBlock, int **pnB, int **pperm, } } } - t2 = sys_timer(); /* end Hash method timer */ + //t2 = sys_timer(); /* end Hash method timer */ + t2 = 0.0; *t_hash = t2 - t1; - t1 = sys_timer(); /* begin angle method timer */ + //t1 = sys_timer(); /* begin angle method timer */ + t1 = 0.0; /* begin angle method timer */ nB = (int *)Malloc( n * sizeof(int), "init_blocks" ); jbuf = (int *)Malloc( n * sizeof(int), "init_blocks" ); @@ -232,7 +235,8 @@ int init_blocks( csptr csmat, int *pnBlock, int **pnB, int **pperm, nB[belongTo]++; } } - t2 = sys_timer(); /* end angle method timer */ + //t2 = sys_timer(); /* end angle method timer */ + t2 = 0.0; *t_angle = t2 - t1; *pperm = perm; From aca0d0fda8b440d8ae7f6e5569c9695b334c669b Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 16 Nov 2021 14:12:21 +0100 Subject: [PATCH 582/909] merge remaining ifsinterface definitions --- src/gen_forcing_couple.F90 | 3 ++- src/gen_modules_forcing.F90 | 4 +++- src/gen_modules_partitioning.F90 | 12 ++++++------ src/ice_modules.F90 | 2 +- src/ice_setup_step.F90 | 8 ++++---- src/ice_thermo_cpl.F90 | 2 +- src/ice_thermo_oce.F90 | 4 ++-- src/ifs_interface/ifs_interface.F90 | 2 -- src/ifs_interface/ifs_modules.F90 | 2 -- src/ifs_interface/ifs_notused.F90 | 2 -- 10 files changed, 19 insertions(+), 22 deletions(-) diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index 5aacb7f93..684b8a2cd 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -291,6 +291,7 @@ subroutine update_atm_forcing(istep, tracers, partit, mesh) do_rotate_ice_wind=.false. end if #else +#ifndef __ifsinterface call sbc_do(partit, mesh) u_wind = atmdata(i_xwind,:) v_wind = atmdata(i_ywind,:) @@ -301,7 +302,7 @@ subroutine update_atm_forcing(istep, tracers, partit, mesh) prec_rain = atmdata(i_prec ,:)/1000._WP prec_snow = atmdata(i_snow ,:)/1000._WP press_air = atmdata(i_mslp ,:) ! unit should be Pa - +#endif if (use_cavity) then do i=1,myDim_nod2d+eDim_nod2d diff --git a/src/gen_modules_forcing.F90 b/src/gen_modules_forcing.F90 index d8a99b704..25c99ee99 100755 --- a/src/gen_modules_forcing.F90 +++ b/src/gen_modules_forcing.F90 @@ -59,8 +59,10 @@ module g_forcing_arrays real(kind=WP), allocatable, dimension(:) :: runoff, evaporation, ice_sublimation real(kind=WP), allocatable, dimension(:) :: cloudiness, press_air -#if defined (__oasis) || defined (__ifsinterface) +#if defined (__oasis) || defined (__ifsinterface) /* todo: use a single shared definition */ real(kind=WP), target, allocatable, dimension(:) :: sublimation, evap_no_ifrac +#endif +#if defined (__oasis) real(kind=WP), target, allocatable, dimension(:) :: tmp_sublimation, tmp_evap_no_ifrac !temporary flux fields real(kind=WP), target, allocatable, dimension(:) :: tmp_shortwave !(for flux correction) real(kind=WP), allocatable, dimension(:) :: atm_net_fluxes_north, atm_net_fluxes_south diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index a038dbf74..658573fd3 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -48,14 +48,14 @@ subroutine par_init(partit) ! initializes MPI integer :: provided_mpi_thread_support_level character(:), allocatable :: provided_mpi_thread_support_level_name -#ifndef __oasis - call MPI_Comm_Size(MPI_COMM_WORLD,partit%npes,i) - call MPI_Comm_Rank(MPI_COMM_WORLD,partit%mype,i) - partit%MPI_COMM_FESOM=MPI_COMM_WORLD +#if defined __oasis || defined __ifsinterface + ! use comm from coupler or ifs #else - call MPI_Comm_Size(MPI_COMM_FESOM,partit%npes,i) - call MPI_Comm_Rank(MPI_COMM_FESOM,partit%mype,i) + partit%MPI_COMM_FESOM=MPI_COMM_WORLD ! use global comm if not coupled (e.g. no __oasis or __ifsinterface) #endif + call MPI_Comm_Size(partit%MPI_COMM_FESOM,partit%npes,i) + call MPI_Comm_Rank(partit%MPI_COMM_FESOM,partit%mype,i) + if(partit%mype==0) then call MPI_Query_thread(provided_mpi_thread_support_level, i) diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index c7366c028..9b31e31e7 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -78,7 +78,7 @@ MODULE i_ARRAYS !temporary flux fields !(for flux correction) REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_temp, m_templ, dm_temp, rhs_tempdiv -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) real(kind=WP),target, allocatable, dimension(:) :: enthalpyoffuse #endif #endif /* (__oasis) || defined (__ifsinterface)*/ diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index fb0bddf10..aa244b212 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -172,10 +172,10 @@ subroutine ice_array_setup(partit, mesh) stress_iceoce_x = 0.0_WP stress_iceoce_y = 0.0_WP allocate(U_w(n_size), V_w(n_size)) ! =uf and vf of ocean at surface nodes -#if defined (__oasis) +#if defined (__oasis) || defined (__ifsinterface) allocate(oce_heat_flux(n_size), ice_heat_flux(n_size)) allocate(tmp_oce_heat_flux(n_size), tmp_ice_heat_flux(n_size)) -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) allocate(ice_alb(n_size), ice_temp(n_size), enthalpyoffuse(n_size)) allocate(rhs_tempdiv(n_size), rhs_temp(n_size)) ice_alb=0.6_WP @@ -183,12 +183,12 @@ subroutine ice_array_setup(partit, mesh) rhs_tempdiv=0._WP rhs_temp=0._WP enthalpyoffuse=0._WP -#endif /* (__oifs) */ +#endif /* (__oifs) || defined (__ifsinterface) */ oce_heat_flux=0._WP ice_heat_flux=0._WP tmp_oce_heat_flux=0._WP tmp_ice_heat_flux=0._WP -#endif /* (__oasis) */ +#endif /* (__oasis) || defined (__ifsinterface) */ end subroutine ice_array_setup ! ! diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index 2c5fc650e..7709c0ccf 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -1,4 +1,4 @@ -#if defined (__oasis) +#if defined (__oasis) || defined (__ifsinterface) subroutine thermodynamics(partit, mesh) !=================================================================== diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index b2f196770..3eb97d14c 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -66,7 +66,7 @@ subroutine cut_off(partit, mesh) #endif /* (__oifs) */ end subroutine cut_off -#if !defined (__oasis) +#if !defined (__oasis) && !defined (__ifsinterface) !=================================================================== ! Sea-ice thermodynamics routines ! @@ -669,4 +669,4 @@ end function TFrez ! !====================================================================================== ! -#endif +#endif /* #if !defined (__oasis) && !defined (__ifsinterface) */ diff --git a/src/ifs_interface/ifs_interface.F90 b/src/ifs_interface/ifs_interface.F90 index e1f40a06f..78d758d86 100644 --- a/src/ifs_interface/ifs_interface.F90 +++ b/src/ifs_interface/ifs_interface.F90 @@ -1,4 +1,3 @@ -#if defined (__ifsinterface) !===================================================== ! IFS interface for calling FESOM2 as a subroutine. ! @@ -1513,4 +1512,3 @@ SUBROUTINE nemogcmcoup_final CALL fesom_finalize END SUBROUTINE nemogcmcoup_final -#endif diff --git a/src/ifs_interface/ifs_modules.F90 b/src/ifs_interface/ifs_modules.F90 index 8f52ee153..5e18ad10e 100644 --- a/src/ifs_interface/ifs_modules.F90 +++ b/src/ifs_interface/ifs_modules.F90 @@ -1,4 +1,3 @@ -#if defined (__ifsinterface) #define __MYFILE__ 'ifs_modules.F90' #define key_mpp_mpi ! Set of modules needed by the interface to IFS. @@ -1856,4 +1855,3 @@ MODULE interinfo LOGICAL :: lparbcast = .FALSE. END MODULE interinfo -#endif diff --git a/src/ifs_interface/ifs_notused.F90 b/src/ifs_interface/ifs_notused.F90 index b483bf962..6ef090079 100644 --- a/src/ifs_interface/ifs_notused.F90 +++ b/src/ifs_interface/ifs_notused.F90 @@ -1,4 +1,3 @@ -#if defined (__ifsinterface) ! Routines usually provided by the library that are currently ! not implemented for FESOM2. ! @@ -359,4 +358,3 @@ SUBROUTINE nemogcmcoup_wam_update_stress( mype, npes, icomm, npoints, & CALL abort END SUBROUTINE nemogcmcoup_wam_update_stress -#endif From d98e7c73c5f9f8c08035363bb846f5edc1bc0594 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Tue, 16 Nov 2021 15:20:08 +0100 Subject: [PATCH 583/909] bug fixes in OMP. also a bug in KPP foung (treatment of SW penetration) --- src/oce_ale_mixing_kpp.F90 | 6 ++++-- src/oce_muscl_adv.F90 | 8 ++------ 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/src/oce_ale_mixing_kpp.F90 b/src/oce_ale_mixing_kpp.F90 index 53c177384..1bc4c676e 100755 --- a/src/oce_ale_mixing_kpp.F90 +++ b/src/oce_ale_mixing_kpp.F90 @@ -600,7 +600,7 @@ SUBROUTINE bldepth(partit, mesh) end if !$OMP BARRIER -!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax, dzup) +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax, dzup, coeff_sw) DO node=1, myDim_nod2D nzmax = nlevels_nod2D(node) nzmin = ulevels_nod2D(node) @@ -616,10 +616,12 @@ SUBROUTINE bldepth(partit, mesh) EXIT END IF END DO + !----------------------------------------------------------------------- ! find stability and buoyancy forcing for final hbl values !----------------------------------------------------------------------- IF (use_sw_pene) THEN + coeff_sw = g * sw_alpha(nzmin,node) ! @ the surface @ Z (m/s2/K) ! Linear interpolation of sw_3d to depth of hbl bfsfc(node) = Bo(node) + & coeff_sw * & @@ -786,7 +788,7 @@ subroutine ri_iwmix(viscA, diffK, dynamics, tracers, partit, mesh) !$OMP BARRIER !___________________________________________________________________________ ! compute viscA and diffK -!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax, Rigg, ratio, frit) +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax, Rigg, ratio, frit, Kv0_b) do node=1, myDim_nod2D nzmin = ulevels_nod2D(node) nzmax = nlevels_nod2D(node) diff --git a/src/oce_muscl_adv.F90 b/src/oce_muscl_adv.F90 index 610b0c6e6..5e7cf5a24 100755 --- a/src/oce_muscl_adv.F90 +++ b/src/oce_muscl_adv.F90 @@ -69,15 +69,11 @@ subroutine muscl_adv_init(twork, partit, mesh) ! --> SSH_stiff%rowptr(n+1)-SSH_stiff%rowptr(n) gives maximum number of ! neighbouring nodes within a single row of the sparse matrix k=SSH_stiff%rowptr(n+1)-SSH_stiff%rowptr(n) -#if defined(_OPENMP) - call omp_set_lock(partit%plock(n)) -#endif +!$OMP CRITICAL if (k > nn_size) then nn_size=k ! nnum maximum number of neighbouring nodes end if -#if defined(_OPENMP) - call omp_unset_lock(partit%plock(n)) -#endif +!$OMP END CRITICAL end do !$OMP END DO !$OMP END PARALLEL From 15d5167f37e4623b9a0aab922b5e1a39f955a689 Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Wed, 17 Nov 2021 10:50:46 +0100 Subject: [PATCH 584/909] fix tests --- setups/test_pi/setup.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/setups/test_pi/setup.yml b/setups/test_pi/setup.yml index e303fbc0f..e1222bc86 100644 --- a/setups/test_pi/setup.yml +++ b/setups/test_pi/setup.yml @@ -59,12 +59,12 @@ namelist.io: prec: 8 fcheck: - a_ice: 0.2691276443855294 - salt: 23.944024712806094 - temp: 1.701768707848739 - sst: 8.531522995932146 - u: -0.001407225233294229 - v: 0.00014182969591235959 + a_ice: 0.2691276598479261 + salt: 23.944024679303666 + temp: 1.701768750033021 + sst: 8.531528640978305 + u: -0.0014072137861434184 + v: 0.00014184602459601167 From 3eb7d4985dbcb7f7f2e110ae43e902cb8af2988d Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 17 Nov 2021 10:53:48 +0100 Subject: [PATCH 585/909] do not initialize MPI in FESOM main if this has been done already (e.g. from IFS) --- src/fvom.F90 | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/fvom.F90 b/src/fvom.F90 index 0fab18011..e53bfa084 100755 --- a/src/fvom.F90 +++ b/src/fvom.F90 @@ -66,6 +66,7 @@ module fesom_main_storage_module character(LEN=MPI_MAX_LIBRARY_VERSION_STRING) :: mpi_version_txt integer mpi_version_len + logical fesom_did_mpi_init end type type(fesom_main_storage_type), save, target :: f @@ -87,16 +88,25 @@ subroutine fesom_init(fesom_total_nsteps) use fesom_main_storage_module integer, intent(out) :: fesom_total_nsteps ! EO parameters + logical mpi_is_initialized if(command_argument_count() > 0) then call command_line_options%parse() stop end if + + mpi_is_initialized = .false. + f%fesom_did_mpi_init = .false. #ifndef __oifs !ECHAM6-FESOM2 coupling: cpl_oasis3mct_init is called here in order to avoid circular dependencies between modules (cpl_driver and g_PARSUP) !OIFS-FESOM2 coupling: does not require MPI_INIT here as this is done by OASIS - call MPI_INIT_THREAD(MPI_THREAD_MULTIPLE, f%provided, f%i) + call MPI_Initialized(mpi_is_initialized, f%i) + if(.not. mpi_is_initialized) then + ! do not initialize MPI here if it has been initialized already, e.g. via IFS when fesom is called as library (__ifsinterface is defined) + call MPI_INIT_THREAD(MPI_THREAD_MULTIPLE, f%provided, f%i) + f%fesom_did_mpi_init = .true. + end if #endif @@ -443,7 +453,7 @@ subroutine fesom_finalize() write(*,*) end if ! call clock_finish - call par_ex(f%partit%MPI_COMM_FESOM, f%partit%mype) + if(f%fesom_did_mpi_init) call par_ex(f%partit%MPI_COMM_FESOM, f%partit%mype) end subroutine end module From e4e222144f12ac4e1f0f14107f94c4f922d7fdd4 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Wed, 17 Nov 2021 10:53:49 +0100 Subject: [PATCH 586/909] --- src/oce_ale.F90 | 264 ++++++++++++++++++++++++++++++++------------- src/oce_fer_gm.F90 | 29 +++-- 2 files changed, 213 insertions(+), 80 deletions(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index c8ec718b7..a990afc05 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -1563,12 +1563,11 @@ subroutine update_stiff_mat_ale(partit, mesh) type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(inout), target :: mesh !___________________________________________________________________________ - integer :: n, i, j, row, ed,n2 + integer :: n, i, j, k, row, ed, n2 integer :: enodes(2), elnodes(3), el(2) integer :: elem, npos(3), offset, nini, nend real(kind=WP) :: factor real(kind=WP) :: fx(3), fy(3) - integer, allocatable :: n_num(:) !___________________________________________________________________________ ! pointer on necessary derived types #include "associate_part_def.h" @@ -1580,13 +1579,12 @@ subroutine update_stiff_mat_ale(partit, mesh) ! update secod term of lhs od equation (18) of "FESOM2 from finite element ! to finite volumes" --> stiff matrix part ! loop over lcal edges - allocate(n_num(myDim_nod2D+eDim_nod2D)) - n_num=0 factor=g*dt*alpha*theta + +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, i, j, k, row, ed, n2, enodes, elnodes, el, elem, npos, offset, nini, nend, fx, fy) do ed=1,myDim_edge2D !! Attention ! enodes ... local node indices of nodes that edge ed - enodes=edges(:,ed) - + enodes=edges(:,ed) ! el ... local element indices of the two elments that contribute to edge ! el(1) or el(2) < 0 than edge is boundary edge el=edge_tri(:,ed) @@ -1598,27 +1596,26 @@ subroutine update_stiff_mat_ale(partit, mesh) !___________________________________________________________________ ! sparse indice offset for node with index row - offset=SSH_stiff%rowptr(row)-ssh_stiff%rowptr(1) - ! loop over number of neghbouring nodes of node-row - do n=1,SSH_stiff%rowptr(row+1)-SSH_stiff%rowptr(row) - ! nn_pos ... local indice position of neigbouring nodes - ! n2 ... local indice of n-th neighbouring node to node-row - n2=nn_pos(n,row) - ! n_num(n2) ... global sparse matrix indices of local mesh point n2 - n_num(n2)=offset+n - end do - + offset=SSH_stiff%rowptr(row)-ssh_stiff%rowptr(1) !___________________________________________________________________ - do i=1,2 ! Two elements related to the edge + do i=1, 2 ! Two elements related to the edge ! It should be just grad on elements ! elem ... local element index to calc grad on that element - elem=el(i) - - if(elem<1) cycle - + elem=el(i) + if(elem<1) cycle ! elnodes ... local node indices of nodes that form element elem elnodes=elem2D_nodes(:,elem) - + ! we have to put it here for OMP compatibility. The MPI version might become a bit slower :( + ! loop over number of neghbouring nodes of node-row + do k=1, 3 + do n=1, SSH_stiff%rowptr(row+1)-SSH_stiff%rowptr(row) + ! npos ... global sparse matrix indices of local mesh points elnodes + if (elnodes(k)==nn_pos(n, row)) then + npos(k)=offset+n !start with the next k + EXIT + end if + end do + end do ! here update of second term on lhs of eq. 18 in Danilov etal 2017 ! --> in the initialisation of the stiff matrix the integration went ! over the unperturbed ocean depth using -zbar_e_bot @@ -1635,14 +1632,17 @@ subroutine update_stiff_mat_ale(partit, mesh) ! In the computation above, I've used rules from ssh_rhs (where it is ! on the rhs. So the sign is changed in the expression below. ! npos... sparse matrix indices position of node points elnodes - npos=n_num(elnodes) - SSH_stiff%values(npos)=SSH_stiff%values(npos) + fy*factor - +#if defined(_OPENMP) +! call omp_set_lock(row) ! it shall be sufficient to block writing into the same row of SSH_stiff +#endif + SSH_stiff%values(npos)=SSH_stiff%values(npos) + fy*factor +#if defined(_OPENMP) +! call omp_unset_lock(row) +#endif end do ! --> do i=1,2 end do ! --> do j=1,2 end do ! --> do ed=1,myDim_edge2D - deallocate(n_num) - +!$OMP END PARALLEL DO !DS this check will work only on 0pe because SSH_stiff%rowptr contains global pointers !if (mype==0) then !do row=1, myDim_nod2D @@ -1699,7 +1699,15 @@ subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) !___________________________________________________________________________ ! loop over local edges - ssh_rhs=0.0_WP +!$OMP PARALLEL DO + do n=1, myDim_nod2D+eDim_nod2D + ssh_rhs(n)=0.0_WP + end do +!$OMP END PARALLEL DO + +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(ed, el, enodes, n, nz, nzmin, nzmax, c1, c2, deltaX1, deltaX2, deltaY1, deltaY2, & +!$OMP dumc1_1, dumc1_2, dumc2_1, dumc2_2) +!$OMP DO do ed=1, myDim_edge2D ! local indice of nodes that span up edge ed enodes=edges(:,ed) @@ -1742,10 +1750,22 @@ subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) !_______________________________________________________________________ ! calc netto "flux" +#if defined(_OPENMP) + call omp_set_lock(partit%plock(enodes(1))) +#endif ssh_rhs(enodes(1))=ssh_rhs(enodes(1))+(c1+c2) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(enodes(1))) + call omp_set_lock(partit%plock(enodes(2))) +#endif ssh_rhs(enodes(2))=ssh_rhs(enodes(2))-(c1+c2) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(enodes(2))) +#endif + end do - +!$OMP END DO + !___________________________________________________________________________ ! take into account water flux ! at this point: ssh_rhs = -alpha * nabla*int(u^n + deltau dz) @@ -1756,6 +1776,7 @@ subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) ! ! shown in eq (11) rhs of "FESOM2: from finite elements to finte volumes, S. Danilov..." eq. (11) rhs if ( .not. trim(which_ALE)=='linfs') then +!$OMP DO do n=1,myDim_nod2D nzmin = ulevels_nod2D(n) if (ulevels_nod2D(n)>1) then @@ -1764,14 +1785,18 @@ subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) ssh_rhs(n)=ssh_rhs(n)-alpha*water_flux(n)*areasvol(nzmin,n)+(1.0_WP-alpha)*ssh_rhs_old(n) end if end do +!$OMP END DO else +!$OMP DO do n=1,myDim_nod2D if (ulevels_nod2D(n)>1) cycle ssh_rhs(n)=ssh_rhs(n)+(1.0_WP-alpha)*ssh_rhs_old(n) end do +!$OMP END DO end if +!$OMP END PARALLEL call exchange_nod(ssh_rhs, partit) - +!$OMP BARRIER end subroutine compute_ssh_rhs_ale ! ! @@ -1804,7 +1829,7 @@ subroutine compute_hbar_ale(dynamics, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_mesh), intent(inout), target :: mesh !___________________________________________________________________________ - integer :: ed, el(2), enodes(2), nz,n, elnodes(3), elem, nzmin, nzmax + integer :: ed, el(2), enodes(2), elem, elnodes(3), n, nz, nzmin, nzmax real(kind=WP) :: c1, c2, deltaX1, deltaX2, deltaY1, deltaY2 !___________________________________________________________________________ ! pointer on necessary derived types @@ -1820,7 +1845,16 @@ subroutine compute_hbar_ale(dynamics, partit, mesh) !___________________________________________________________________________ ! compute the rhs - ssh_rhs_old=0.0_WP + +!$OMP PARALLEL DO + do n=1, myDim_nod2D+eDim_nod2D + ssh_rhs_old(n)=0.0_WP + end do +!$OMP END PARALLEL DO + +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(ed, el, enodes, elem, elnodes, n, nz, nzmin, nzmax, & +!$OMP c1, c2, deltaX1, deltaX2, deltaY1, deltaY2) +!$OMP DO do ed=1, myDim_edge2D ! local indice of nodes that span up edge ed enodes=edges(:,ed) @@ -1856,38 +1890,60 @@ subroutine compute_hbar_ale(dynamics, partit, mesh) end do end if !_______________________________________________________________________ +#if defined(_OPENMP) + call omp_set_lock(partit%plock(enodes(1))) +#endif ssh_rhs_old(enodes(1))=ssh_rhs_old(enodes(1))+(c1+c2) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(enodes(1))) + call omp_set_lock(partit%plock(enodes(2))) +#endif ssh_rhs_old(enodes(2))=ssh_rhs_old(enodes(2))-(c1+c2) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(enodes(2))) +#endif end do - +!$OMP END DO +!$OMP END PARALLEL + !___________________________________________________________________________ ! take into account water flux if (.not. trim(which_ALE)=='linfs') then +!$OMP PARALLEL DO do n=1,myDim_nod2D ssh_rhs_old(n)=ssh_rhs_old(n)-water_flux(n)*areasvol(ulevels_nod2D(n),n) end do +!$OMP END PARALLEL DO call exchange_nod(ssh_rhs_old, partit) +!$OMP BARRIER end if - !___________________________________________________________________________ ! update the thickness - hbar_old=hbar +!$OMP PARALLEL DO + do n=1, myDim_nod2D+eDim_nod2D + hbar_old(n)=hbar(n) + end do +!$OMP END PARALLEL DO + +!$OMP PARALLEL DO do n=1,myDim_nod2D hbar(n)=hbar_old(n)+ssh_rhs_old(n)*dt/areasvol(ulevels_nod2D(n),n) end do +!$OMP END PARALLEL DO call exchange_nod(hbar, partit) - +!$OMP BARRIER !___________________________________________________________________________ ! fill the array for updating the stiffness matrix +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(elem, elnodes) do elem=1,myDim_elem2D elnodes=elem2D_nodes(:,elem) - if (ulevels(elem)>1) then + if (ulevels(elem) > 1) then dhe(elem) = 0.0_WP else dhe(elem) = sum(hbar(elnodes)-hbar_old(elnodes))/3.0_WP endif end do - +!$OMP END PARALLEL DO end subroutine compute_hbar_ale ! ! @@ -1923,9 +1979,10 @@ subroutine vert_vel_ale(dynamics, partit, mesh) !___________________________________________________________________________ integer :: el(2), enodes(2), n, nz, ed, nzmin, nzmax, uln1, uln2, nln1, nln2 real(kind=WP) :: c1, c2, deltaX1, deltaY1, deltaX2, deltaY2, dd, dd1, dddt, cflmax + real(kind=WP) :: lcflmax !for OMP realization ! --> zlevel with local zstar - real(kind=WP) :: dhbar_total, dhbar_rest, distrib_dhbar_int !PS - real(kind=WP), dimension(:), allocatable :: max_dhbar2distr,cumsum_maxdhbar,distrib_dhbar + real(kind=WP) :: dhbar_total, dhbar_rest, distrib_dhbar_int + real(kind=WP), dimension(:), allocatable :: max_dhbar2distr, cumsum_maxdhbar, distrib_dhbar integer , dimension(:), allocatable :: idx !___________________________________________________________________________ ! pointer on necessary derived types @@ -1949,15 +2006,19 @@ subroutine vert_vel_ale(dynamics, partit, mesh) if (Fer_GM) then fer_UV => dynamics%fer_uv(:,:,:) fer_Wvel=> dynamics%fer_w(:,:) - end if - + end if !___________________________________________________________________________ ! Contributions from levels in divergence - Wvel=0.0_WP - if (Fer_GM) then - fer_Wvel=0.0_WP - end if - +!$OMP PARALLEL DO + DO n=1, myDim_nod2D+eDim_nod2D + Wvel(:, n)=0.0_WP + if (Fer_GM) then + fer_Wvel(:, n)=0.0_WP + end if + END DO +!$OMP END PARALLEL DO + +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(ed, enodes, el, deltaX1, deltaY1, nz, nzmin, nzmax, c1, deltaX2, deltaY2, c2) do ed=1, myDim_edge2D ! local indice of nodes that span up edge ed enodes=edges(:,ed) @@ -1987,9 +2048,19 @@ subroutine vert_vel_ale(dynamics, partit, mesh) Wvel(nz,enodes(2))=Wvel(nz,enodes(2))-c1 if (Fer_GM) then c1=(fer_UV(2,nz,el(1))*deltaX1- & - fer_UV(1,nz,el(1))*deltaY1)*helem(nz,el(1)) + fer_UV(1,nz,el(1))*deltaY1)*helem(nz,el(1)) +#if defined(_OPENMP) + call omp_set_lock(partit%plock(enodes(1))) +#endif fer_Wvel(nz,enodes(1))=fer_Wvel(nz,enodes(1))+c1 +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(enodes(1))) + call omp_set_lock (partit%plock(enodes(2))) +#endif fer_Wvel(nz,enodes(2))=fer_Wvel(nz,enodes(2))-c1 +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(enodes(2))) +#endif end if end do @@ -2009,20 +2080,32 @@ subroutine vert_vel_ale(dynamics, partit, mesh) if (Fer_GM) then c2=-(fer_UV(2,nz,el(2))*deltaX2- & fer_UV(1,nz,el(2))*deltaY2)*helem(nz,el(2)) +#if defined(_OPENMP) + call omp_set_lock(partit%plock(enodes(1))) +#endif fer_Wvel(nz,enodes(1))=fer_Wvel(nz,enodes(1))+c2 +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(enodes(1))) + call omp_set_lock (partit%plock(enodes(2))) +#endif fer_Wvel(nz,enodes(2))=fer_Wvel(nz,enodes(2))-c2 +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(enodes(2))) +#endif end if end do end if end do ! --> do ed=1, myDim_edge2D +!$OMP END PARALLEL DO ! | ! | ! +--> until here Wvel contains the thickness divergence div(u) - !___________________________________________________________________________ ! cumulative summation of div(u_vec*h) vertically ! W_k = W_k+1 - div(h_k*u_k) ! W_k ... vertical flux trough + +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax) do n=1, myDim_nod2D nzmin = ulevels_nod2D(n) nzmax = nlevels_nod2d(n)-1 @@ -2034,10 +2117,11 @@ subroutine vert_vel_ale(dynamics, partit, mesh) end if end do end do - +!$OMP END PARALLEL DO !___________________________________________________________________________ ! divide with depth dependent cell area to convert from Vertical flux to ! physical vertical velocities in units m/s +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax) do n=1, myDim_nod2D nzmin = ulevels_nod2D(n) nzmax = nlevels_nod2d(n)-1 @@ -2050,6 +2134,7 @@ subroutine vert_vel_ale(dynamics, partit, mesh) end do end do +!$OMP END PARALLEL DO ! | ! |--> (A) linear free surface: dh/dt=0 ; W_t-W_b = -div(hu) ! | @@ -2074,11 +2159,14 @@ subroutine vert_vel_ale(dynamics, partit, mesh) !_______________________________________________________________________ ! idx is only needed for local star case to estimate over how much ! depth layers change in ssh needs to be distributed - allocate(max_dhbar2distr(lzstar_lev),distrib_dhbar(lzstar_lev),idx(lzstar_lev),cumsum_maxdhbar(lzstar_lev)) - idx = (/(nz,nz=1,lzstar_lev,1)/) !!PS allocate(max_dhbar2distr(nl-1),distrib_dhbar(nl-1),idx(nl-1),cumsum_maxdhbar(nl-1)) !!PS idx = (/(nz,nz=1,nl-1,1)/) - + allocate(max_dhbar2distr(lzstar_lev), distrib_dhbar(lzstar_lev), idx(lzstar_lev), cumsum_maxdhbar(lzstar_lev)) + idx = (/(nz, nz=1, lzstar_lev, 1)/) + +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax, dhbar_total, max_dhbar2distr, cumsum_maxdhbar, & +!$OMP distrib_dhbar, dhbar_rest, distrib_dhbar_int, idx ) +!$OMP DO do n=1, myDim_nod2D nzmin = ulevels_nod2D(n) nzmax = nlevels_nod2D_min(n)-1 @@ -2099,7 +2187,7 @@ subroutine vert_vel_ale(dynamics, partit, mesh) ! layerthickness becomes to small or even negativ and model ! blows up !!PS if (dhbar_total<0.0_WP .and. hnode(1,n)+dhbar_total<=(zbar(1)-zbar(2))*min_hnode ) then - if (dhbar_total<0.0_WP .and. hnode(nzmin,n)+dhbar_total<=(zbar(nzmin)-zbar(nzmin+1))*min_hnode ) then + if (dhbar_total < 0.0_WP .and. hnode(nzmin,n)+dhbar_total<=(zbar(nzmin)-zbar(nzmin+1))*min_hnode ) then ! --> do local zstar case !_______________________________________________________________ ! max_dhbar2distr ... how much negative ssh change can be maximal @@ -2257,13 +2345,15 @@ subroutine vert_vel_ale(dynamics, partit, mesh) Wvel(nzmin,n) = Wvel(nzmin,n)-water_flux(n) end do ! --> do n=1, myDim_nod2D - +!$OMP END DO +!$OMP END PARALLEL !_______________________________________________________________________ deallocate(max_dhbar2distr,distrib_dhbar,idx,cumsum_maxdhbar) !___________________________________________________________________________ elseif (trim(which_ALE)=='zstar') then ! distribute total change in ssh (hbar(n)-hbar_old(n)) over all layers +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax, dd, dd1, dddt) do n=1, myDim_nod2D nzmin = ulevels_nod2D(n) !!PS nzmin = ulevels_nod2D_max(n) @@ -2296,7 +2386,7 @@ subroutine vert_vel_ale(dynamics, partit, mesh) !___________________________________________________________________ !!PS do nz=1,nlevels_nod2D_min(n)-2 - do nz=nzmin,nzmax-1 + do nz=nzmin, nzmax-1 ! why *(zbar(nz)-dd1) ??? ! because here Wvel_k = SUM_k:kmax(div(h_k*v_k))/V_k ! but Wvel_k = Wvel_k+1 - div(h_k*v_k) - h⁰_k/H*dhbar/dt @@ -2321,17 +2411,17 @@ subroutine vert_vel_ale(dynamics, partit, mesh) Wvel(nzmin,n)=Wvel(nzmin,n)-water_flux(n) end do ! --> do n=1, myDim_nod2D +!$OMP END PARALLEL DO ! The implementation here is a bit strange, but this is to avoid ! unnecessary multiplications and divisions by area. We use the fact ! that we apply stretching only over the part of the column ! where area(nz,n)=area(1,n) - endif ! --> if(trim(which_ALE)=='....') then - - if (any(hnode_new<0.0_WP)) then - write(*,*) ' --> fatal problem <--: layerthickness of a layer became smaller zero' + +!$OMP PARALLEL DO do n=1, myDim_nod2D+eDim_nod2D - if (any( hnode_new(:,n)<0.0_WP)) then + if (any( hnode_new(:,n) < 0.0_WP)) then + write(*,*) ' --> fatal problem <--: layerthickness of a layer became smaller than zero' write(*,*) " mype = ", mype write(*,*) " mstep = ", mstep write(*,*) " node = ", n @@ -2365,17 +2455,21 @@ subroutine vert_vel_ale(dynamics, partit, mesh) write(*,*) end if end do -!!PS call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) - endif - +!$OMP END PARALLEL DO !___________________________________________________________________________ call exchange_nod(Wvel, partit) call exchange_nod(hnode_new, partit) ! Or extend cycles above if (Fer_GM) call exchange_nod(fer_Wvel, partit) - +!$OMP BARRIER !___________________________________________________________________________ ! calc vertical CFL criteria for debugging purpose and vertical Wvel splitting - CFL_z(1,:)=0._WP +!$OMP PARALLEL DO + do n=1, myDim_nod2D+eDim_nod2D + CFL_z(1,n)=0._WP + end do +!$OMP END PARALLEL DO + +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax, c1, c2) do n=1, myDim_nod2D+eDim_nod2D nzmin = ulevels_nod2D(n) nzmax = nlevels_nod2D(n)-1 @@ -2389,13 +2483,26 @@ subroutine vert_vel_ale(dynamics, partit, mesh) CFL_z(nz+1,n)=c2 end do end do - cflmax=maxval(CFL_z(:, 1:myDim_nod2D)) !local CFL maximum is different on each mype - if (cflmax>1.0_WP .and. flag_warn_cflz) then +!$OMP END PARALLEL DO + +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, lcflmax) + lcflmax=0. +!$OMP DO + do n=1, myDim_nod2D+eDim_nod2D + lcflmax=max(lcflmax, maxval(CFL_z(:, n))) + end do +!$OMP END DO +!$OMP CRITICAL + cflmax=max(lcflmax, cflmax) +!$OMP END CRITICAL +!$OMP END PARALLEL + + if (cflmax > 1.0_WP .and. flag_warn_cflz) then +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax) do n=1, myDim_nod2D nzmin = ulevels_nod2D(n) nzmax = nlevels_nod2D(n)-1 do nz=nzmin,nzmax - !!PS if (abs(CFL_z(nz,n)-cflmax) < 1.e-12) then if (abs(CFL_z(nz,n)-cflmax) < 1.e-12 .and. CFL_z(nz,n) > 1.75_WP .and. CFL_z(nz,n)<=2.5_WP ) then print '(A, A, F4.2, A, I6, A, F7.2,A,F6.2, A, I3,I3)', achar(27)//'[33m'//' --> WARNING CFLz>1.75:'//achar(27)//'[0m',& 'CFLz_max=',cflmax,',mstep=',mstep,',glon/glat=',geo_coord_nod2D(1,n)/rad,'/',geo_coord_nod2D(2,n)/rad,& @@ -2414,6 +2521,7 @@ subroutine vert_vel_ale(dynamics, partit, mesh) end if end do end do +!$OMP END PARALLEL DO end if !___________________________________________________________________________ @@ -2423,6 +2531,7 @@ subroutine vert_vel_ale(dynamics, partit, mesh) ! wsplit_maxcfl=0 means w_exp is zero (everything computed implicitly) ! wsplit_maxcfl=inf menas w_impl is zero (everything computed explicitly) ! a guess for optimal choice of wsplit_maxcfl would be 0.95 +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax, c1, c2, dd) do n=1, myDim_nod2D+eDim_nod2D nzmin = ulevels_nod2D(n) nzmax = nlevels_nod2D(n) @@ -2438,6 +2547,7 @@ subroutine vert_vel_ale(dynamics, partit, mesh) Wvel_i(nz,n)=c2*Wvel(nz,n) end do end do +!$OMP END PARALLEL DO end subroutine vert_vel_ale ! ! @@ -2562,7 +2672,7 @@ subroutine impl_vert_visc_ale(dynamics, partit, mesh) !___________________________________________________________________________ real(kind=WP) :: a(mesh%nl-1), b(mesh%nl-1), c(mesh%nl-1), ur(mesh%nl-1), vr(mesh%nl-1) real(kind=WP) :: cp(mesh%nl-1), up(mesh%nl-1), vp(mesh%nl-1) - integer :: nz, elem, nzmax, nzmin, elnodes(3) + integer :: nz, elem, nzmin, nzmax, elnodes(3) real(kind=WP) :: zinv, m, friction, wu, wd real(kind=WP) :: zbar_n(mesh%nl), Z_n(mesh%nl-1) !___________________________________________________________________________ @@ -2578,6 +2688,10 @@ subroutine impl_vert_visc_ale(dynamics, partit, mesh) Wvel_i =>dynamics%w_i(:,:) !___________________________________________________________________________ +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(a, b, c, ur, vr, cp, up, vp, elem, nz, nzmin, nzmax, elnodes, & +!$OMP zinv, m, friction, wu, wd, zbar_n, Z_n) + +!$OMP DO DO elem=1,myDim_elem2D elnodes=elem2D_nodes(:,elem) nzmin = ulevels(elem) @@ -2617,7 +2731,6 @@ subroutine impl_vert_visc_ale(dynamics, partit, mesh) b(nz)=b(nz)-min(0._WP, wd)*zinv c(nz)=c(nz)-max(0._WP, wd)*zinv - end do ! The last row zinv=1.0_WP*dt/(zbar_n(nzmax-1)-zbar_n(nzmax)) @@ -2730,7 +2843,8 @@ subroutine impl_vert_visc_ale(dynamics, partit, mesh) UV_rhs(2,nz,elem)=vr(nz) end do end do !!! cycle over elements - +!$OMP END DO +!$OMP END PARALLEL end subroutine impl_vert_visc_ale ! ! @@ -2951,7 +3065,11 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) ! since there is no real elevation, but only surface pressure, there is ! no layer motion under the cavity. In this case the ice sheet acts as a ! rigid lid. - where(ulevels_nod2D==1) eta_n=alpha*hbar+(1.0_WP-alpha)*hbar_old +!$OMP PARALLEL DO + do node=1, myDim_nod2D+eDim_nod2D + if (ulevels_nod2D(node)==1) eta_n(node)=alpha*hbar(node)+(1.0_WP-alpha)*hbar_old(node) + end do +!$OMP END PARALLEL DO ! --> eta_(n) ! call zero_dynamics !DS, zeros several dynamical variables; to be used for testing new implementations! t5=MPI_Wtime() diff --git a/src/oce_fer_gm.F90 b/src/oce_fer_gm.F90 index db898cc26..e32d14cf0 100644 --- a/src/oce_fer_gm.F90 +++ b/src/oce_fer_gm.F90 @@ -61,6 +61,8 @@ subroutine fer_solve_Gamma(partit, mesh) #include "associate_part_ass.h" #include "associate_mesh_ass.h" +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, nzmax, nzmin, zinv1,zinv2, zinv, m, r, a, b, c, cp, tp, zbar_n, Z_n) +!$OMP DO DO n=1,myDim_nod2D tr=>fer_gamma(:,:,n) ! !_____________________________________________________________________ @@ -154,8 +156,10 @@ subroutine fer_solve_Gamma(partit, mesh) tr(:,nz) = tp(:,nz)-cp(nz)*tr(:,nz+1) end do END DO !!! cycle over nodes - +!$OMP END DO +!$OMP END PARALLEL call exchange_nod(fer_gamma, partit) +!$OMP BARRIER END subroutine fer_solve_Gamma ! ! @@ -186,7 +190,8 @@ subroutine fer_gamma2vel(dynamics, partit, mesh) #include "associate_mesh_ass.h" fer_UV =>dynamics%fer_uv(:,:,:) fer_Wvel =>dynamics%fer_w(:,:) - + +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(el, elnod, nz, nzmin, nzmax, zinv) DO el=1, myDim_elem2D elnod=elem2D_nodes(:,el) ! max. number of levels at element el @@ -199,7 +204,9 @@ subroutine fer_gamma2vel(dynamics, partit, mesh) fer_uv(2,nz,el)=sum(fer_gamma(2,nz,elnod)-fer_gamma(2,nz+1,elnod))*zinv END DO END DO +!$OMP END PARALLEL DO call exchange_elem(fer_uv, partit) +!$OMP BARRIER end subroutine fer_gamma2vel ! ! @@ -230,6 +237,8 @@ subroutine init_Redi_GM(partit, mesh) !fer_compute_C_K_Redi ! fill arrays for 3D Redi and GM coefficients: F1(xy)*F2(z) !******************************* F1(x,y) *********************************** +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, nzmax, nzmin, reso, c1, rosb, scaling, rr_ratio, aux_zz, zscaling, bvref) +!$OMP DO do n=1, myDim_nod2D nzmax=minval(nlevels(nod_in_elem2D(1:nod_in_elem2D_num(n), n)), 1) nzmin=maxval(ulevels(nod_in_elem2D(1:nod_in_elem2D_num(n), n)), 1) @@ -297,18 +306,22 @@ subroutine init_Redi_GM(partit, mesh) !fer_compute_C_K_Redi Ki(nzmin,n)=K_hor*(reso/100000.0_WP)**2 end if end do - +!$OMP END DO + !Like in FESOM 1.4 we make Redi equal GM if (Redi .and. Fer_GM) then - !!PS Ki(1,:)=fer_k(1,:) - Ki(nzmin,:)=fer_k(nzmin,:) +!$OMP DO + do n=1, myDim_nod2D + Ki(nzmin, n)=fer_k(nzmin, n) + end do +!$OMP END DO end if !******************************* F2(z) (e.g. Ferreira et al., 2005) ********************************* !Ferreira, D., Marshall, J. and Heimbach, P.: Estimating Eddy Stresses by Fitting Dynamics to Observations Using a !Residual-Mean Ocean Circulation Model and Its Adjoint, Journal of Physical Oceanography, 35(10), 1891– !1910, doi:10.1175/jpo2785.1, 2005. - +!$OMP DO do n=1,myDim_nod2D nzmax=nlevels_nod2D(n) nzmin=ulevels_nod2D(n) @@ -387,9 +400,11 @@ subroutine init_Redi_GM(partit, mesh) !fer_compute_C_K_Redi Ki(nzmin,n)=Ki(nzmin,n)*0.5_WP*(zscaling(nzmin)+zscaling(nzmin+1)) end if end do - +!$OMP END DO +!$OMP END PARALLEL if (Fer_GM) call exchange_nod(fer_c, partit) if (Fer_GM) call exchange_nod(fer_k, partit) if (Redi) call exchange_nod(Ki, partit) +!$OMP BARRIER end subroutine init_Redi_GM !==================================================================== From 7962829ec6a47c51c3b9598d2a7ff5f7e5743b93 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Wed, 17 Nov 2021 11:16:18 +0100 Subject: [PATCH 587/909] OMP bug fix in vert_vel_ale --- src/oce_ale.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index a990afc05..011072996 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -979,7 +979,7 @@ subroutine update_thickness_ale(partit, mesh) allocate(idx(lzstar_lev)) ! if lzstar_lev=4 --> idx = /1,2,3,4/ - idx = (/(nz,nz=1,lzstar_lev,1)/) + idx = (/(nz, nz=1, lzstar_lev, 1)/) !_______________________________________________________________________ do elem=1,myDim_elem2D @@ -2165,7 +2165,7 @@ subroutine vert_vel_ale(dynamics, partit, mesh) idx = (/(nz, nz=1, lzstar_lev, 1)/) !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax, dhbar_total, max_dhbar2distr, cumsum_maxdhbar, & -!$OMP distrib_dhbar, dhbar_rest, distrib_dhbar_int, idx ) +!$OMP distrib_dhbar, dhbar_rest, distrib_dhbar_int) !$OMP DO do n=1, myDim_nod2D nzmin = ulevels_nod2D(n) From 9c5f2624539c89d3ca3c19dea58bf08d7507e587 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Wed, 17 Nov 2021 12:14:19 +0100 Subject: [PATCH 588/909] ocean part has been fully MPIzed. Just 30% loss primarily because of the solver. what is left: 1. Sea ice 2. Solver 3. Some diagnostics, accumulations regarding I/O etc. 4. Initialisation --- src/oce_ale.F90 | 61 ++++++++++--------------------------------------- 1 file changed, 12 insertions(+), 49 deletions(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 011072996..dbc941243 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -980,7 +980,8 @@ subroutine update_thickness_ale(partit, mesh) ! if lzstar_lev=4 --> idx = /1,2,3,4/ idx = (/(nz, nz=1, lzstar_lev, 1)/) - +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax, elem, elnodes) +!$OMP DO !_______________________________________________________________________ do elem=1,myDim_elem2D elnodes=elem2D_nodes(:, elem) @@ -994,21 +995,6 @@ subroutine update_thickness_ale(partit, mesh) !___________________________________________________________________ ! actualize elemental layer thinkness in first lzstar_lev layers -!!PS if (any(hnode_new(2:lzstar_lev,elnodes(1))-hnode(2:lzstar_lev,elnodes(1))/=0.0_WP) .or. & -!!PS any(hnode_new(2:lzstar_lev,elnodes(2))-hnode(2:lzstar_lev,elnodes(2))/=0.0_WP) .or. & -!!PS any(hnode_new(2:lzstar_lev,elnodes(3))-hnode(2:lzstar_lev,elnodes(3))/=0.0_WP) & -!!PS ) then -!!PS ! --> case local zstar -!!PS ! try to limitate over how much layers i realy need to distribute -!!PS ! the change in ssh, so that the next loops run only over the -!!PS ! nesseccary levels and not over all lzstar_lev levels -!!PS nz = max(1 ,maxval(pack(idx,hnode_new(1:lzstar_lev,elnodes(1))-hnode(1:lzstar_lev,elnodes(1))/=0.0_WP))) -!!PS nz = max(nz,maxval(pack(idx,hnode_new(1:lzstar_lev,elnodes(2))-hnode(1:lzstar_lev,elnodes(2))/=0.0_WP))) -!!PS nz = max(nz,maxval(pack(idx,hnode_new(1:lzstar_lev,elnodes(3))-hnode(1:lzstar_lev,elnodes(3))/=0.0_WP))) -!!PS nzmax = min(nz,nlevels(elem)-2) -!!PS do nz=1,nzmax -!!PS helem(nz,elem)=sum(hnode_new(nz,elnodes))/3.0_WP -!!PS end do if (any(hnode_new(nzmin+1:nzmin+lzstar_lev-1,elnodes(1)) - hnode(nzmin+1:nzmin+lzstar_lev-1,elnodes(1))/=0.0_WP) .or. & any(hnode_new(nzmin+1:nzmin+lzstar_lev-1,elnodes(2)) - hnode(nzmin+1:nzmin+lzstar_lev-1,elnodes(2))/=0.0_WP) .or. & any(hnode_new(nzmin+1:nzmin+lzstar_lev-1,elnodes(3)) - hnode(nzmin+1:nzmin+lzstar_lev-1,elnodes(3))/=0.0_WP) & @@ -1031,10 +1017,10 @@ subroutine update_thickness_ale(partit, mesh) helem(nzmin,elem)=sum(hnode_new(nzmin,elnodes))/3.0_WP end if end do - +!$OMP END DO !_______________________________________________________________________ +!$OMP DO do n=1,myDim_nod2D+eDim_nod2D - !!PS nzmin = ulevels_nod2D(n) nzmin = ulevels_nod2D_max(n) nzmax = nlevels_nod2D_min(n)-1 @@ -1045,24 +1031,6 @@ subroutine update_thickness_ale(partit, mesh) !___________________________________________________________________ ! actualize layer thinkness in first lzstar_lev layers -!!PS if ( (any(hnode_new(2:lzstar_lev,n)-hnode(2:lzstar_lev,n)/=0.0_WP)) ) then -!!PS ! --> case local zstar -!!PS ! try to limitate over how much layers i realy need to distribute -!!PS ! the change in ssh, so that the next loops run only over the -!!PS ! nesseccary levels and not over all lzstar_lev levels -!!PS nz = max(1,maxval(pack(idx,hnode_new(1:lzstar_lev,n)-hnode(1:lzstar_lev,n)/=0.0_WP))) -!!PS -!!PS ! nlevels_nod2D_min(n)-1 ...would be hnode of partial bottom cell but this -!!PS ! one is not allowed to change so go until nlevels_nod2D_min(n)-2 -!!PS nzmax = min(nz,nlevels_nod2D_min(n)-2) -!!PS ! do not touch zbars_3d_n that are involved in the bottom cell !!!! -!!PS ! this ones are set up during initialisation and are not touched afterwards -!!PS ! --> nlevels_nod2D_min(n),nlevels_nod2D_min(n)-1 -!!PS do nz=nzmax,1,-1 -!!PS hnode(nz,n) = hnode_new(nz,n) -!!PS zbar_3d_n(nz,n) = zbar_3d_n(nz+1,n)+hnode_new(nz,n) -!!PS Z_3d_n(nz,n) = zbar_3d_n(nz+1,n)+hnode_new(nz,n)/2.0_WP -!!PS end do if ( (any(hnode_new(nzmin+1:nzmin+lzstar_lev-1,n)-hnode(nzmin+1:nzmin+lzstar_lev-1,n)/=0.0_WP)) ) then ! --> case local zstar ! try to limitate over how much layers i realy need to distribute @@ -1085,17 +1053,14 @@ subroutine update_thickness_ale(partit, mesh) !___________________________________________________________________ ! only actualize layer thinkness in first layer else -!!PS ! --> case normal zlevel -!!PS hnode(1,n) = hnode_new(1,n) -!!PS zbar_3d_n(1,n)= zbar_3d_n(2,n)+hnode_new(1,n) -!!PS Z_3d_n(1,n) = zbar_3d_n(2,n)+hnode_new(1,n)/2.0_WP ! --> case normal zlevel hnode(nzmin,n) = hnode_new(nzmin,n) zbar_3d_n(nzmin,n)= zbar_3d_n(nzmin+1,n)+hnode_new(nzmin,n) Z_3d_n(nzmin,n) = zbar_3d_n(nzmin+1,n)+hnode_new(nzmin,n)/2.0_WP end if end do - +!$OMP END DO +!$OMP END PARALLEL !_______________________________________________________________________ deallocate(idx) @@ -1105,10 +1070,10 @@ subroutine update_thickness_ale(partit, mesh) elseif (trim(which_ale)=='zstar' ) then ! --> update layer thinkness, depth layer and mid-depth layer at node +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax) do n=1, myDim_nod2D+eDim_nod2D ! actualize 3d depth levels and mid-depth levels from bottom to top nzmin = ulevels_nod2D(n) -!!PS nzmin = ulevels_nod2D_max(n) nzmax = nlevels_nod2D_min(n)-2 !___________________________________________________________________ @@ -1119,20 +1084,19 @@ subroutine update_thickness_ale(partit, mesh) !___________________________________________________________________ ! do not touch zbars_3d_n that are involved in the bottom cell !!!! ! --> nlevels_nod2D_min(n),nlevels_nod2D_min(n)-1 - !!PS do nz=nzmax,1,-1 - do nz=nzmax,nzmin,-1 + do nz=nzmax, nzmin,-1 hnode(nz,n) = hnode_new(nz,n) zbar_3d_n(nz,n) = zbar_3d_n(nz+1,n) + hnode_new(nz,n) Z_3d_n(nz,n) = zbar_3d_n(nz+1,n) + hnode_new(nz,n)/2.0_WP end do end do - +!$OMP END PARALLEL DO !_______________________________________________________________________ ! --> update mean layer thinkness at element +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(elem, elnodes, nz, nzmin, nzmax) do elem=1, myDim_elem2D nzmin = ulevels(elem) nzmax = nlevels(elem)-1 - !___________________________________________________________________ ! if there is a cavity layer thickness is not updated, its ! kept fixed @@ -1140,13 +1104,12 @@ subroutine update_thickness_ale(partit, mesh) !___________________________________________________________________ elnodes=elem2D_nodes(:, elem) - !!PS do nz=1,nlevels(elem)-2 - do nz=nzmin,nzmax-1 + do nz=nzmin, nzmax-1 helem(nz,elem)=sum(hnode(nz,elnodes))/3.0_WP end do end do +!$OMP END PARALLEL DO endif - end subroutine update_thickness_ale ! ! From d63a78fc853f0e80f6214d1abed83c07be44e5bb Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 17 Nov 2021 12:25:22 +0100 Subject: [PATCH 589/909] determine a free file unit at runtime to avoid conflicts with e.g. IFS --- src/gen_model_setup.F90 | 61 +++++++++++++++++---------------- src/gen_modules_cvmix_kpp.F90 | 7 ++-- src/gen_modules_cvmix_pp.F90 | 7 ++-- src/gen_modules_cvmix_tidal.F90 | 8 +++-- src/gen_modules_cvmix_tke.F90 | 7 ++-- 5 files changed, 48 insertions(+), 42 deletions(-) diff --git a/src/gen_model_setup.F90 b/src/gen_model_setup.F90 index fd4d3ebc5..df60f8d56 100755 --- a/src/gen_model_setup.F90 +++ b/src/gen_model_setup.F90 @@ -14,22 +14,23 @@ subroutine setup_model(partit) implicit none type(t_partit), intent(inout), target :: partit character(len=MAX_PATH) :: nmlfile + integer fileunit namelist /clockinit/ timenew, daynew, yearnew nmlfile ='namelist.config' ! name of general configuration namelist file - open (20,file=nmlfile) - read (20,NML=modelname) - read (20,NML=timestep) - read (20,NML=clockinit) - read (20,NML=paths) - read (20,NML=restart_log) - read (20,NML=ale_def) - read (20,NML=geometry) - read (20,NML=calendar) - read (20,NML=run_config) -!!$ read (20,NML=machine) - close (20) + open (newunit=fileunit, file=nmlfile) + read (fileunit, NML=modelname) + read (fileunit, NML=timestep) + read (fileunit, NML=clockinit) + read (fileunit, NML=paths) + read (fileunit, NML=restart_log) + read (fileunit, NML=ale_def) + read (fileunit, NML=geometry) + read (fileunit, NML=calendar) + read (fileunit, NML=run_config) +!!$ read (fileunit, NML=machine) + close (fileunit) ! ========== ! compute dt ! ========== @@ -46,34 +47,34 @@ subroutine setup_model(partit) ! ================================= nmlfile ='namelist.oce' ! name of ocean namelist file - open (20,file=nmlfile) - read (20,NML=oce_dyn) - close (20) + open (newunit=fileunit, file=nmlfile) + read (fileunit, NML=oce_dyn) + close (fileunit) nmlfile ='namelist.tra' ! name of ocean namelist file - open (20,file=nmlfile) - read (20,NML=tracer_phys) - close (20) + open (newunit=fileunit, file=nmlfile) + read (fileunit, NML=tracer_phys) + close (fileunit) nmlfile ='namelist.forcing' ! name of forcing namelist file - open (20,file=nmlfile) - read (20,NML=forcing_exchange_coeff) - read (20,NML=forcing_bulk) - read (20,NML=land_ice) - close (20) + open (newunit=fileunit, file=nmlfile) + read (fileunit, NML=forcing_exchange_coeff) + read (fileunit, NML=forcing_bulk) + read (fileunit, NML=land_ice) + close (fileunit) if(use_ice) then nmlfile ='namelist.ice' ! name of ice namelist file - open (20,file=nmlfile) - read (20,NML=ice_dyn) - read (20,NML=ice_therm) - close (20) + open (newunit=fileunit, file=nmlfile) + read (fileunit, NML=ice_dyn) + read (fileunit, NML=ice_therm) + close (fileunit) endif nmlfile ='namelist.io' ! name of forcing namelist file - open (20,file=nmlfile) - read (20,NML=diag_list) - close (20) + open (newunit=fileunit, file=nmlfile) + read (fileunit, NML=diag_list) + close (fileunit) if(partit%mype==0) write(*,*) 'Namelist files are read in' diff --git a/src/gen_modules_cvmix_kpp.F90 b/src/gen_modules_cvmix_kpp.F90 index 33c587016..8a6039852 100644 --- a/src/gen_modules_cvmix_kpp.F90 +++ b/src/gen_modules_cvmix_kpp.F90 @@ -228,6 +228,7 @@ subroutine init_cvmix_kpp(partit, mesh) character(len=MAX_PATH) :: nmlfile logical :: nmlfile_exist=.False. integer :: node_size + integer fileunit #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -279,9 +280,9 @@ subroutine init_cvmix_kpp(partit, mesh) ! check if cvmix namelist file exists if not use default values inquire(file=trim(nmlfile),exist=nmlfile_exist) if (nmlfile_exist) then - open(20,file=trim(nmlfile)) - read(20,nml=param_kpp) - close(20) + open(newunit=fileunit,file=trim(nmlfile)) + read(fileunit,nml=param_kpp) + close(fileunit) else write(*,*) ' could not find namelist.cvmix, will use default values !' end if diff --git a/src/gen_modules_cvmix_pp.F90 b/src/gen_modules_cvmix_pp.F90 index 58e9f2104..27ba2ca05 100644 --- a/src/gen_modules_cvmix_pp.F90 +++ b/src/gen_modules_cvmix_pp.F90 @@ -73,6 +73,7 @@ subroutine init_cvmix_pp(partit, mesh) character(len=MAX_PATH) :: nmlfile logical :: nmlfile_exist=.False. integer :: node_size + integer fileunit #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -102,9 +103,9 @@ subroutine init_cvmix_pp(partit, mesh) ! check if cvmix namelist file exists if not use default values inquire(file=trim(nmlfile),exist=nmlfile_exist) if (nmlfile_exist) then - open(20,file=trim(nmlfile)) - read(20,nml=param_pp) - close(20) + open(newunit=fileunit,file=trim(nmlfile)) + read(fileunit,nml=param_pp) + close(fileunit) else write(*,*) ' could not find namelist.cvmix, will use default values !' end if diff --git a/src/gen_modules_cvmix_tidal.F90 b/src/gen_modules_cvmix_tidal.F90 index 162ea6ec4..61ae43646 100644 --- a/src/gen_modules_cvmix_tidal.F90 +++ b/src/gen_modules_cvmix_tidal.F90 @@ -84,6 +84,8 @@ subroutine init_cvmix_tidal(partit, mesh) integer :: node_size type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit + integer fileunit + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -113,9 +115,9 @@ subroutine init_cvmix_tidal(partit, mesh) file_exist=.False. inquire(file=trim(nmlfile),exist=file_exist) if (file_exist) then - open(20,file=trim(nmlfile)) - read(20,nml=param_tidal) - close(20) + open(newunit=fileunit,file=trim(nmlfile)) + read(fileunit,nml=param_tidal) + close(fileunit) else write(*,*) ' could not find namelist.cvmix, will use default values !' end if diff --git a/src/gen_modules_cvmix_tke.F90 b/src/gen_modules_cvmix_tke.F90 index aa1deae21..53a1fdcda 100644 --- a/src/gen_modules_cvmix_tke.F90 +++ b/src/gen_modules_cvmix_tke.F90 @@ -125,6 +125,7 @@ subroutine init_cvmix_tke(partit, mesh) character(len=cvmix_strlen) :: nmlfile logical :: nmlfile_exist=.False. integer :: node_size + integer fileunit #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -205,9 +206,9 @@ subroutine init_cvmix_tke(partit, mesh) ! check if cvmix namelist file exists if not use default values inquire(file=trim(nmlfile),exist=nmlfile_exist) if (nmlfile_exist) then - open(20,file=trim(nmlfile)) - read(20,nml=param_tke) - close(20) + open(newunit=fileunit,file=trim(nmlfile)) + read(fileunit,nml=param_tke) + close(fileunit) else write(*,*) ' could not find namelist.cvmix, will use default values !' end if From a81cc5342817f786e79c3e5038f4aa7aa5210889 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Wed, 17 Nov 2021 14:05:36 +0100 Subject: [PATCH 590/909] for IFS with love: 1.removed psolve.c (potential conflicts) 2. renamed qsplitC in PARMS to qsplitCF (qsplitC conflicts with that in IFS) --- lib/parms/src/DDPQ/ilutpC.c | 8 +- lib/parms/src/DDPQ/misc.c | 4 +- lib/parms/src/DDPQ/piluNEW.c | 8 +- lib/parms/src/DDPQ/protos.h | 2 +- lib/parms/src/parms_ilu_vcsr.c | 6 +- lib/parms/src/parms_qsplit.c | 2 +- src/psolve_feom.c | 264 --------------------------------- 7 files changed, 15 insertions(+), 279 deletions(-) delete mode 100644 src/psolve_feom.c diff --git a/lib/parms/src/DDPQ/ilutpC.c b/lib/parms/src/DDPQ/ilutpC.c index a384f6a89..d158168d8 100755 --- a/lib/parms/src/DDPQ/ilutpC.c +++ b/lib/parms/src/DDPQ/ilutpC.c @@ -298,7 +298,7 @@ msg_timer_start(&t27); len = lenl > fil5 ? fil5 : lenl; ilusch->L->nnzrow[ii] = len; if (lenl > len) - qsplitC(w, jw, lenl, len); + qsplitCF(w, jw, lenl, len); /* printf(" row %d length of L = %d",ii,len); */ @@ -328,7 +328,7 @@ msg_timer_start(&t28); len = lenu > fil6 ? fil6 : lenu; ilusch->U->nnzrow[ii] = len; if (lenu > len+1) - qsplitC(&w[ii+1], &jw[ii+1], lenu-1, len); + qsplitCF(&w[ii+1], &jw[ii+1], lenu-1, len); ilusch->U->pa[ii] = (FLOAT *) Malloc(len*sizeof(FLOAT), "ilutpC:7" ); ilusch->U->pj[ii] = (int *) Malloc(len*sizeof(int), "ilutpC:8" ); /*--------------------------------------------------------------------- @@ -686,7 +686,7 @@ int ilutD(csptr amat, double *droptol, int *lfil, ilutptr ilusch) lenl = len > fil5 ? fil5 : len; ilusch->L->nnzrow[ii] = lenl; if (len > lenl) - qsplitC(w, jw, len, lenl); + qsplitCF(w, jw, len, lenl); if (len > 0) { ilusch->L->pj[ii] = (int *) Malloc(lenl*sizeof(int), "ilutD:4" ); ilusch->L->pa[ii] = (FLOAT *) Malloc(lenl*sizeof(FLOAT), "ilutD:5"); @@ -711,7 +711,7 @@ int ilutD(csptr amat, double *droptol, int *lfil, ilutptr ilusch) ilusch->U->nnzrow[ii] = lenu; jpos = lenu-1; if (len > jpos) - qsplitC(w, jw, len, jpos); + qsplitCF(w, jw, len, jpos); ilusch->U->pa[ii] = (FLOAT *) Malloc(lenu*sizeof(FLOAT), "ilutD:6" ); ilusch->U->pj[ii] = (int *) Malloc(lenu*sizeof(int), "ilutD:7" ); if(ABS_VALUE(t) <= DBL_EPSILON) t= tnorm; diff --git a/lib/parms/src/DDPQ/misc.c b/lib/parms/src/DDPQ/misc.c index 1713c8e00..ba3c53204 100755 --- a/lib/parms/src/DDPQ/misc.c +++ b/lib/parms/src/DDPQ/misc.c @@ -10,7 +10,7 @@ #define DBL_EPSILON 2.2204460492503131e-16 // double epsilon -int qsplitC(FLOAT *a, int *ind, int n, int ncut) +int qsplitCF(FLOAT *a, int *ind, int n, int ncut) { /*---------------------------------------------------------------------- | does a quick-sort split of a complex real array. @@ -57,7 +57,7 @@ int qsplitC(FLOAT *a, int *ind, int n, int ncut) first = mid+1; goto label1; } -/*--------------- end of zqsplitC ----------------------------------------*/ +/*--------------- end of zqsplitCF ----------------------------------------*/ int SparTran(csptr amat, csptr bmat, int job, int flag) { diff --git a/lib/parms/src/DDPQ/piluNEW.c b/lib/parms/src/DDPQ/piluNEW.c index f104927fd..36f219a58 100755 --- a/lib/parms/src/DDPQ/piluNEW.c +++ b/lib/parms/src/DDPQ/piluNEW.c @@ -338,7 +338,7 @@ int pilu(p4ptr amat, csptr B, csptr C, double *droptol, lenl = len > fil0 ? fil0 : len; amat->L->nnzrow[ii] = lenl; if (lenl < len) - qsplitC(w, jw, len, lenl); + qsplitCF(w, jw, len, lenl); if (len > 0) { amat->L->pj[ii] = (int *) Malloc(lenl*sizeof(int), "pilu:10" ); amat->L->pa[ii] = (FLOAT *) Malloc(lenl*sizeof(FLOAT), "pilu:11" ); @@ -363,7 +363,7 @@ int pilu(p4ptr amat, csptr B, csptr C, double *droptol, amat->U->nnzrow[ii] = lenu; jpos = lenu-1; if (jpos < len) - qsplitC(w, jw, len, jpos); + qsplitCF(w, jw, len, jpos); amat->U->pa[ii] = (FLOAT *) Malloc(lenu*sizeof(FLOAT), "pilu:12" ); amat->U->pj[ii] = (int *) Malloc(lenu*sizeof(int), "pilu:13" ); if(ABS_VALUE(t) <= DBL_EPSILON) t = rnorm; //(0.0001+drop1); @@ -387,7 +387,7 @@ int pilu(p4ptr amat, csptr B, csptr C, double *droptol, } lenu = len > fil2 ? fil2 : len; if (lenu < len) - qsplitC(w, jw, len, lenu); + qsplitCF(w, jw, len, lenu); lflen[ii] = lenu; if (lenu > 0) { @@ -586,7 +586,7 @@ int pilu(p4ptr amat, csptr B, csptr C, double *droptol, schur->nnzrow[ii] = lenu; jpos = lenu; if (jpos < len) - qsplitC(w, jw, len, jpos); + qsplitCF(w, jw, len, jpos); schur->pa[ii] = (FLOAT *) Malloc(lenu*sizeof(FLOAT), "pilu:16" ); schur->pj[ii] = (int *) Malloc(lenu*sizeof(int), "pilu:17" ); /*--------------------------------------------------------------------- diff --git a/lib/parms/src/DDPQ/protos.h b/lib/parms/src/DDPQ/protos.h index 22434772a..507b240e7 100755 --- a/lib/parms/src/DDPQ/protos.h +++ b/lib/parms/src/DDPQ/protos.h @@ -94,7 +94,7 @@ extern void qsortC(int *ja, FLOAT *ma, int left, int right, int abval); extern void qsortR2I(double *wa, int *cor1, int *cor2, int left, int right); -extern int qsplitC(FLOAT *a, int *ind, int n, int ncut); +extern int qsplitCF(FLOAT *a, int *ind, int n, int ncut); extern int roscalC(csptr mata, double *diag, int nrm); extern void swapj(int v[], int i, int j); extern void swapm(FLOAT v[], int i, int j); diff --git a/lib/parms/src/parms_ilu_vcsr.c b/lib/parms/src/parms_ilu_vcsr.c index e9dd64c77..116aa51ba 100755 --- a/lib/parms/src/parms_ilu_vcsr.c +++ b/lib/parms/src/parms_ilu_vcsr.c @@ -16,7 +16,7 @@ typedef struct parms_ilu_data { } *parms_ilu_data; /* -int qsplitC(FLOAT *a, int *ind, int n, int ncut); +int qsplitCF(FLOAT *a, int *ind, int n, int ncut); void qqsort(int *ja, FLOAT *ma, int left, int right); */ @@ -1436,7 +1436,7 @@ int parms_ilut_vcsr(parms_Mat self, parms_FactParam param, void *mat, #endif /* quick sort */ if (len > lenl) { - qsplitC(w,jw,len,lenl); + qsplitCF(w,jw,len,lenl); } if (lenl > 0) { @@ -1471,7 +1471,7 @@ int parms_ilut_vcsr(parms_Mat self, parms_FactParam param, void *mat, lenu = len + 1 > fill ? fill: len + 1; jpos = lenu - 1; if (len > jpos) - qsplitC(&w[ii+start+1], &jw[ii+start+1], len, jpos); + qsplitCF(&w[ii+start+1], &jw[ii+start+1], len, jpos); data->U->nnzrow[start+ii] = lenu; diff --git a/lib/parms/src/parms_qsplit.c b/lib/parms/src/parms_qsplit.c index 472c042b0..55cbb8e4b 100755 --- a/lib/parms/src/parms_qsplit.c +++ b/lib/parms/src/parms_qsplit.c @@ -6,7 +6,7 @@ #include #endif -int qsplitC(FLOAT *a, int *ind, int n, int ncut) +int qsplitCF(FLOAT *a, int *ind, int n, int ncut) { /*---------------------------------------------------------------------- | does a quick-sort split of a real array. diff --git a/src/psolve_feom.c b/src/psolve_feom.c deleted file mode 100644 index 1beff1cf4..000000000 --- a/src/psolve_feom.c +++ /dev/null @@ -1,264 +0,0 @@ -#ifdef PARMS - -#include -#include -#include -#include -#include -#include "psolve.h" - -#define NSOL 1 -//#define NSOL 10 - -psolver solvers[NSOL]; -int solv_id[12] = {0}; -int nsolver = 0; - -void psolver_init_(int *id, SOLVERTYPE *stype, PCTYPE *pctype, PCILUTYPE *pcilutype, - int *ilulevel, int *fillin, double *droptol, int *maxits, int *restart, double *soltol, - int *part, int *rptr, int *cols, double *vals, int *reuse) -{ - - parms_Viewer v; - int i, j, k, nloc, pid, nproc; - int *ncnts, *idxn, *rp=NULL, *r=NULL, *c=NULL, nmb; - double tmp, *scale, *values=NULL; - - parms_Map map; - parms_Mat A; - parms_PC pc; - parms_Solver ksp; - - psolver solver; - - MPI_Comm_rank(MPI_COMM_WORLD,&pid); - MPI_Comm_size(MPI_COMM_WORLD,&nproc); - - solver = malloc(sizeof(*solver)); - nmb = part[nproc]-part[0]; - - nloc = 0; - if(nproc > 1) { - nloc = part[pid+1]-part[pid]; - parms_MapCreateFromPetsc(&map, nloc, nmb, MPI_COMM_WORLD); - idxn = malloc(nloc*sizeof(int)); - parms_MapGetGlobalIndices(map, idxn); - } - else { - parms_MapCreateFromLocal(&map,nmb,0); - nloc = nmb; - idxn = malloc(nloc*sizeof(int)); - for(i = 0; i < nloc; i++) - idxn[i] = i; - } - solver->reuse = *reuse; - - scale = malloc(nloc*sizeof(double)); - values = malloc(rptr[nloc]*sizeof(double)); - for(i = 0; i < nloc; i++){ - tmp = 0.; - for(j = rptr[i]; j < rptr[i+1]; j++) - tmp += fabs(vals[j]); - scale[i] = 1./tmp; - for(j = rptr[i]; j < rptr[i+1]; j++) - values[j] = vals[j]*scale[i]; - } - solver->scale = scale; - - // create Mat - parms_MatCreate(&A,map); - parms_MatSetValues(A, nloc, idxn, rptr, cols, values, INSERT); - parms_MatSetup(A); - - // create PC/Solver & set parameters - parms_PCCreate(&pc,A); - set_pc_params(pc, *pctype, *pcilutype, *ilulevel, *fillin, *droptol); - parms_PCSetup(pc); - parms_SolverCreate(&ksp,A,pc); - set_solver_params(ksp, *stype, *maxits, *restart, *soltol); - - solver->ksp = ksp; - solver->map = map; - - if(solver->reuse){ - r = malloc((nloc)*sizeof(int)); - for(i = 0; i < nloc; i++) - r[i] = idxn[i]; - solver->rows = r; - - rp = malloc((nloc+1)*sizeof(int)); - for(i = 0; i < nloc+1; i++) - rp[i] = rptr[i]; - solver->rptr = rp; - - c = malloc(rptr[nloc]*sizeof(int)); - for(i = 0; i < rptr[nloc]; i++) - c[i] = cols[i]; - solver->cols = c; - solver->vals = values; - } - else{ - if(!r) - free(r); - if(!rp) - free(rp); - if(!c) - free(c); - if(!values) - free(values); - } - - - solv_id[*id] = nsolver; - solvers[nsolver++] = solver; - -} - -void psolver_final_() -{ - - int pid, i; - parms_Solver ksp; - parms_PC pc; - parms_Mat A; - parms_Map map; - psolver solver; - - for(i = 0; i < nsolver; i++){ - solver = solvers[i]; - - ksp = solver->ksp; - parms_SolverGetPC(ksp, &pc); - parms_SolverGetMat(ksp,&A); - map = solver->map; - - parms_SolverFree(&ksp); - parms_PCFree(&pc); - parms_MatFree(&A); - parms_MapFree(&map); - - if(solver->reuse){ - free(solver->rptr); - free(solver->rows); - free(solver->cols); - free(solver->vals); - } - free(solver->scale); - - free(solver); - } -} - -void psolve_(int *id, double *rhs, double *vals, double *sol, int *new) -{ - - parms_Viewer v; - psolver solver; - parms_Map map; - parms_Mat A; - parms_PC pc; - parms_Solver ksp; - - double resnorm; - double *x,*y, *scale, *values, tmp; - double t0,t1,t2,toh; - int *rptr, *cols; - int its,err,i,j,k, cnt, nloc, pid, sid; - - sid = solv_id[*id]; - - solver = solvers[sid]; - ksp = solver->ksp; - map = solver->map; - nloc = parms_MapGetLocalSize(map); - - scale = solver->scale; - - if(*new) { - if(solver->reuse){ - parms_SolverGetPC(ksp, &pc); - parms_SolverGetMat(ksp, &A); - - rptr = solver->rptr; - cols = solver->cols; - - values = solver->vals; - for(i = 0; i < nloc; i++){ - tmp = 0.; - for(j = rptr[i]; j < rptr[i+1]; j++) - tmp += fabs(vals[j]); - scale[i] = 1./tmp; - for(j = rptr[i]; j < rptr[i+1]; j++) - values[j] = vals[j]*scale[i]; - } - - // create Mat & set values - parms_MatReset(A,SAME_NONZERO_STRUCTURE); - parms_MatSetValues(A, nloc, solver->rows, rptr, cols, values, INSERT); - parms_MatSetup(A); - - parms_PCSetup(pc); - } - else - printf("ERROR: matrix data is static\n"); - } - - x = sol; - y = malloc(nloc*sizeof(double)); - for(i = 0; i < nloc; i++) - y[i] = rhs[i]*scale[i]; - - // solve system of equations - parms_SolverApply(ksp,y,x); - -/* - // get trueresidual and number of iterations - parms_SolverGetResidualNorm2(ksp,y,x,&resnorm); - its = parms_SolverGetIts(ksp); - printf("%e %d\n", resnorm, its); -*/ - - free(y); -} - -int set_pc_params(parms_PC pc, PCTYPE pctype, PCILUTYPE pcilutype, - int ilulevel, int fillin, double droptol){ - - int i, lfil[7]; - double dtol[7]; - - for(i = 0; i < 7; i++){ - lfil[i] = fillin; - dtol[i] = droptol; - } - - parms_PCSetType(pc, pctype); - parms_PCSetILUType(pc, pcilutype); - parms_PCSetNlevels(pc, ilulevel); - parms_PCSetFill(pc, lfil); - parms_PCSetTol(pc, dtol); - - return 0; -} - - -int set_solver_params(parms_Solver solver, SOLVERTYPE solvertype, - int maxits, int restart, double soltol){ - char buf[100]; - - parms_SolverSetType(solver, solvertype); - - sprintf(buf, "%d", maxits); - parms_SolverSetParam(solver, MAXITS, buf); - - sprintf(buf, "%d", restart); - parms_SolverSetParam(solver, KSIZE, buf); - - sprintf(buf, "%g", soltol); - parms_SolverSetParam(solver, DTOL, buf); - - return 0; -} - - -#endif From edbc4ad9f53602d577f752368509721ce52e2a71 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Wed, 17 Nov 2021 14:38:00 +0100 Subject: [PATCH 591/909] do not dump the derived types afret initialization (was for testing purpose there) --- src/fesom_module.F90 | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/src/fesom_module.F90 b/src/fesom_module.F90 index 9ef455193..fdeec1c53 100755 --- a/src/fesom_module.F90 +++ b/src/fesom_module.F90 @@ -234,32 +234,32 @@ subroutine fesom_init(fesom_total_nsteps) write(*,*) '============================================' endif - f%dump_dir='DUMP/' - INQUIRE(file=trim(f%dump_dir), EXIST=f%L_EXISTS) - if (.not. f%L_EXISTS) call system('mkdir '//trim(f%dump_dir)) + ! f%dump_dir='DUMP/' + ! INQUIRE(file=trim(f%dump_dir), EXIST=f%L_EXISTS) + ! if (.not. f%L_EXISTS) call system('mkdir '//trim(f%dump_dir)) - write (f%dump_filename, "(A7,I7.7)") "t_mesh.", f%mype - open (f%mype+300, file=TRIM(f%dump_dir)//trim(f%dump_filename), status='replace', form="unformatted") - write (f%mype+300) f%mesh - close (f%mype+300) + ! write (f%dump_filename, "(A7,I7.7)") "t_mesh.", f%mype + ! open (f%mype+300, file=TRIM(f%dump_dir)//trim(f%dump_filename), status='replace', form="unformatted") + ! write (f%mype+300) f%mesh + ! close (f%mype+300) ! open (f%mype+300, file=trim(f%dump_filename), status='old', form="unformatted") ! read (f%mype+300) f%mesh_copy ! close (f%mype+300) - write (f%dump_filename, "(A9,I7.7)") "t_tracer.", f%mype - open (f%mype+300, file=TRIM(f%dump_dir)//trim(f%dump_filename), status='replace', form="unformatted") - write (f%mype+300) f%tracers - close (f%mype+300) + ! write (f%dump_filename, "(A9,I7.7)") "t_tracer.", f%mype + ! open (f%mype+300, file=TRIM(f%dump_dir)//trim(f%dump_filename), status='replace', form="unformatted") + ! write (f%mype+300) f%tracers + ! close (f%mype+300) ! open (f%mype+300, file=trim(f%dump_filename), status='old', form="unformatted") ! read (f%mype+300) f%dynamics_copy ! close (f%mype+300) - write (f%dump_filename, "(A9,I7.7)") "t_dynamics.", f%mype - open (f%mype+300, file=TRIM(f%dump_dir)//trim(f%dump_filename), status='replace', form="unformatted") - write (f%mype+300) f%dynamics - close (f%mype+300) + ! write (f%dump_filename, "(A9,I7.7)") "t_dynamics.", f%mype + ! open (f%mype+300, file=TRIM(f%dump_dir)//trim(f%dump_filename), status='replace', form="unformatted") + ! write (f%mype+300) f%dynamics + ! close (f%mype+300) ! open (f%mype+300, file=trim(f%dump_filename), status='old', form="unformatted") ! read (f%mype+300) f%tracers_copy From 70be00f959caf47a7f4fbef47b611e8043ba700b Mon Sep 17 00:00:00 2001 From: "Kristian S. Mogensen" Date: Thu, 18 Nov 2021 09:17:25 +0000 Subject: [PATCH 592/909] Revert hack for sys_timer and implement a generic one. --- lib/parms/src/DDPQ/protos.h | 2 +- lib/parms/src/DDPQ/setblks.c | 12 ++++-------- lib/parms/src/DDPQ/systimer.c | 9 +++++++++ 3 files changed, 14 insertions(+), 9 deletions(-) create mode 100644 lib/parms/src/DDPQ/systimer.c diff --git a/lib/parms/src/DDPQ/protos.h b/lib/parms/src/DDPQ/protos.h index 9f9de8ce9..22434772a 100755 --- a/lib/parms/src/DDPQ/protos.h +++ b/lib/parms/src/DDPQ/protos.h @@ -128,6 +128,6 @@ extern int init_blocks( csptr csmat, int *pnBlock, int **pnB, int *t_angle ); /* systimer.c */ -//extern double sys_timer(); +extern double sys_timer(); #endif diff --git a/lib/parms/src/DDPQ/setblks.c b/lib/parms/src/DDPQ/setblks.c index 2a5352360..f9bae1db6 100755 --- a/lib/parms/src/DDPQ/setblks.c +++ b/lib/parms/src/DDPQ/setblks.c @@ -70,8 +70,7 @@ int init_blocks( csptr csmat, int *pnBlock, int **pnB, int **pperm, int nextBlockID, nextBlockPos, belongTo, grp; double eps_2 = eps * eps, t1, t2; - // t1 = sys_timer(); /* begin Hash method timer */ - t1 = 0.0; + t1 = sys_timer(); /* begin Hash method timer */ group = (KeyType *)Malloc( n*sizeof(KeyType), "init_blocks" ); compress = (CompressType *)Malloc( n*sizeof(CompressType), "init_blocks" ); perm = (int *)Malloc( n * sizeof(int), "init_blocks" ); @@ -131,12 +130,10 @@ int init_blocks( csptr csmat, int *pnBlock, int **pnB, int **pperm, } } } - //t2 = sys_timer(); /* end Hash method timer */ - t2 = 0.0; + t2 = sys_timer(); /* end Hash method timer */ *t_hash = t2 - t1; - //t1 = sys_timer(); /* begin angle method timer */ - t1 = 0.0; /* begin angle method timer */ + t1 = sys_timer(); /* begin angle method timer */ nB = (int *)Malloc( n * sizeof(int), "init_blocks" ); jbuf = (int *)Malloc( n * sizeof(int), "init_blocks" ); @@ -235,8 +232,7 @@ int init_blocks( csptr csmat, int *pnBlock, int **pnB, int **pperm, nB[belongTo]++; } } - //t2 = sys_timer(); /* end angle method timer */ - t2 = 0.0; + t2 = sys_timer(); /* end angle method timer */ *t_angle = t2 - t1; *pperm = perm; diff --git a/lib/parms/src/DDPQ/systimer.c b/lib/parms/src/DDPQ/systimer.c new file mode 100644 index 000000000..e10a154b4 --- /dev/null +++ b/lib/parms/src/DDPQ/systimer.c @@ -0,0 +1,9 @@ +#include +#include + +/* Missing sys_timer for shared libraries */ +double sys_timer() { + clock_t t; + t = clock(); + return ((double)t) / CLOCKS_PER_SEC; +} From 15311dc52b771ccc0b1bbeb46c2cfb108abe5b25 Mon Sep 17 00:00:00 2001 From: Natalja Rakowsky Date: Fri, 5 Jun 2020 15:39:47 +0200 Subject: [PATCH 593/909] better deal with NaN in netcdf: ieee_is_nan (cherry picked from commit b9f91419f47ffa79fea5c58b9d03393796e97110) --- src/gen_ic3d.F90 | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/src/gen_ic3d.F90 b/src/gen_ic3d.F90 index 405f0fa4c..ac866032c 100644 --- a/src/gen_ic3d.F90 +++ b/src/gen_ic3d.F90 @@ -308,6 +308,8 @@ SUBROUTINE getcoeffld(tracers, partit, mesh) !! ** Method : !! ** Action : !!---------------------------------------------------------------------- + + USE ieee_arithmetic IMPLICIT NONE type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit @@ -359,9 +361,20 @@ SUBROUTINE getcoeffld(tracers, partit, mesh) iost = nf_get_vara_double(ncid, id_data, nf_start, nf_edges, ncdata(2:nc_Nlon-1,:,:)) ncdata(1,:,:) =ncdata(nc_Nlon-1,:,:) ncdata(nc_Nlon,:,:)=ncdata(2,:,:) - where (ncdata < -0.99_WP*dummy ) ! dummy values are only positive - ncdata = dummy - end where + + ! replace nan by dummy value + do k=1,nc_Ndepth + do j=1,nc_Nlat + do i=1,nc_Nlon + if (ieee_is_nan(ncdata(i,j,k))) then + ncdata(i,j,k) = dummy + elseif (ncdata(i,j,k) < -0.99_WP*dummy .or. ncdata(i,j,k) > dummy) then + ! and in case the input data has other conventions on missing values: + ncdata(i,j,k) = dummy + endif + end do + end do + end do end if call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) call check_nferr(iost,filename,partit) From f5bd76bf9facc0865cb259a3b1f12185c83e69c8 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Thu, 18 Nov 2021 13:51:15 +0100 Subject: [PATCH 594/909] OMP for write_step_info etc. All operations involving minval/maxval/sum have been replaced with the self defined omp_min_max_sum1 & omp_min_max_sum2 which are placed in gen_support.F90 --- src/gen_support.F90 | 120 +++++- src/write_step_info.F90 | 934 +++++++++++++++++++--------------------- 2 files changed, 560 insertions(+), 494 deletions(-) diff --git a/src/gen_support.F90 b/src/gen_support.F90 index 4322df76c..aaf46c8b3 100644 --- a/src/gen_support.F90 +++ b/src/gen_support.F90 @@ -11,7 +11,7 @@ module g_support implicit none private - public :: smooth_nod, smooth_elem, integrate_nod, extrap_nod + public :: smooth_nod, smooth_elem, integrate_nod, extrap_nod, omp_min_max_sum1, omp_min_max_sum2 real(kind=WP), dimension(:), allocatable :: work_array ! !-------------------------------------------------------------------------------------------- @@ -476,7 +476,125 @@ subroutine extrap_nod3D(arr, partit, mesh) end subroutine extrap_nod3D ! !-------------------------------------------------------------------------------------------- +! returns min/max/sum of a one dimentional array (same as minval) but with the support of OpenMP +FUNCTION omp_min_max_sum1(arr, pos1, pos2, what, partit, nan) + USE MOD_PARTIT + implicit none + real(kind=WP), intent(in) :: arr(:) + integer, intent(in) :: pos1, pos2 + character(3), intent(in) :: what + real(kind=WP), optional :: nan !to be implemented upon the need (for masked arrays) + real(kind=WP) :: omp_min_max_sum1 + real(kind=WP) :: loc, val + integer :: n + + type(t_partit),intent(in), & + target :: partit + + SELECT CASE (trim(what)) + CASE ('sum') + val=0.0_WP +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, loc) + loc=0.0_WP +!$OMP DO + do n=pos1, pos2 + loc=loc+arr(n) + end do +!$OMP END DO +!$OMP CRITICAL + val=val+loc +!$OMP END CRITICAL +!$OMP END PARALLEL + CASE ('min') + val=arr(1) +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, loc) + loc=val +!$OMP DO + do n=pos1, pos2 + loc=min(loc, arr(n)) + end do +!$OMP END DO +!$OMP CRITICAL + val=min(val, loc) +!$OMP END CRITICAL +!$OMP END PARALLEL + CASE ('max') + val=arr(1) +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, loc) + loc=val +!$OMP DO + do n=pos1, pos2 + loc=max(loc, arr(n)) + end do +!$OMP END DO +!$OMP CRITICAL + val=max(val, loc) +!$OMP END CRITICAL +!$OMP END PARALLEL + CASE DEFAULT + if (partit%mype==0) write(*,*) trim(what), ' is not implemented in omp_min_max_sum case!' + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) + STOP + END SELECT + omp_min_max_sum1=val +END FUNCTION ! +!-------------------------------------------------------------------------------------------- +! returns min/max/sum of a two dimentional array (same as minval) but with the support of OpenMP +FUNCTION omp_min_max_sum2(arr, pos11, pos12, pos21, pos22, what, partit, nan) + implicit none + real(kind=WP), intent(in) :: arr(:,:) + integer, intent(in) :: pos11, pos12, pos21, pos22 + character(3), intent(in) :: what + real(kind=WP), optional :: nan !to be implemented upon the need (for masked arrays) + real(kind=WP) :: omp_min_max_sum2 + real(kind=WP) :: loc, val, vmasked + integer :: i, j + + + type(t_partit),intent(in), & + target :: partit + + SELECT CASE (trim(what)) + CASE ('min') + if (.not. present(nan)) vmasked=huge(vmasked) !just some crazy number + val=arr(1,1) +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(i, j, loc) + loc=val + do i=pos11, pos12 +!$OMP DO + do j=pos21, pos22 + if (arr(i,j)/=vmasked) loc=min(loc, arr(i,j)) + end do +!$OMP END DO + end do +!$OMP CRITICAL + val=min(val, loc) +!$OMP END CRITICAL +!$OMP END PARALLEL + CASE ('max') + if (.not. present(nan)) vmasked=tiny(vmasked) !just some crazy number + val=arr(1,1) +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(i, j, loc) + loc=val + do i=pos11, pos12 +!$OMP DO + do j=pos21, pos22 + if (arr(i,j)/=vmasked) loc=max(loc, arr(i,j)) + end do +!$OMP END DO + end do +!$OMP CRITICAL + val=max(val, loc) +!$OMP END CRITICAL +!$OMP END PARALLEL + CASE DEFAULT + if (partit%mype==0) write(*,*) trim(what), ' is not implemented in omp_min_max_sum case!' + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) + STOP + END SELECT +omp_min_max_sum2=val +END FUNCTION end module g_support diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index d1c2b4bfd..b97127caa 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -6,7 +6,7 @@ subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) USE MOD_PARSUP use MOD_TRACER use MOD_DYN - integer :: istep,outfreq + integer :: istep,outfreq type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in) , target :: tracers @@ -22,7 +22,7 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) USE MOD_PARSUP use MOD_TRACER use MOD_DYN - integer :: istep + integer :: istep type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in), target :: tracers @@ -34,30 +34,31 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) ! !=============================================================================== subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) - use g_config, only: dt, use_ice - use MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - use MOD_TRACER - use MOD_DYN - use o_PARAM - use o_ARRAYS, only: water_flux, heat_flux, & - pgf_x, pgf_y, Av, Kv - use i_ARRAYS - use g_comm_auto - implicit none - - integer :: n, istep,outfreq - real(kind=WP) :: int_eta, int_hbar, int_wflux, int_hflux, int_temp, int_salt - real(kind=WP) :: min_eta, min_hbar, min_wflux, min_hflux, min_temp, min_salt, & - min_wvel,min_hnode,min_deta,min_wvel2,min_hnode2, & - min_vvel, min_vvel2, min_uvel, min_uvel2 - real(kind=WP) :: max_eta, max_hbar, max_wflux, max_hflux, max_temp, max_salt, & - max_wvel, max_hnode, max_deta, max_wvel2, max_hnode2, max_m_ice, & - max_vvel, max_vvel2, max_uvel, max_uvel2, & - max_cfl_z, max_pgfx, max_pgfy, max_kv, max_av - real(kind=WP) :: int_deta , int_dhbar - real(kind=WP) :: loc, loc_eta, loc_hbar, loc_deta, loc_dhbar, loc_wflux,loc_hflux, loc_temp, loc_salt + use g_config, only: dt, use_ice + use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_TRACER + use MOD_DYN + use o_PARAM + use o_ARRAYS, only: water_flux, heat_flux, & + pgf_x, pgf_y, Av, Kv + use i_ARRAYS + use g_comm_auto + use g_support + implicit none + + integer :: n, istep,outfreq + real(kind=WP) :: int_eta, int_hbar, int_wflux, int_hflux, int_temp, int_salt + real(kind=WP) :: min_eta, min_hbar, min_wflux, min_hflux, min_temp, min_salt, & + min_wvel,min_hnode,min_deta,min_wvel2,min_hnode2, & + min_vvel, min_vvel2, min_uvel, min_uvel2 + real(kind=WP) :: max_eta, max_hbar, max_wflux, max_hflux, max_temp, max_salt, & + max_wvel, max_hnode, max_deta, max_wvel2, max_hnode2, max_m_ice, & + max_vvel, max_vvel2, max_uvel, max_uvel2, & + max_cfl_z, max_pgfx, max_pgfy, max_kv, max_av + real(kind=WP) :: int_deta , int_dhbar + real(kind=WP) :: loc, loc_eta, loc_hbar, loc_deta, loc_dhbar, loc_wflux,loc_hflux, loc_temp, loc_salt type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in) , target :: tracers @@ -76,209 +77,190 @@ subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) eta_n => dynamics%eta_n(:) d_eta => dynamics%d_eta(:) - if (mod(istep,outfreq)==0) then - - !_______________________________________________________________________ - int_eta =0. - int_hbar =0. - int_deta =0. - int_dhbar =0. - int_wflux =0. - int_hflux =0. - int_temp =0. - int_salt =0. - loc_eta =0. - loc_hbar =0. - loc_deta =0. - loc_dhbar =0. - loc_wflux =0. -!!PS loc_hflux =0. -!!PS loc_temp =0. -!!PS loc_salt =0. - loc =0. - !_______________________________________________________________________ - do n=1, myDim_nod2D -!!PS if (ulevels_nod2D(n)>1) cycle - loc_eta = loc_eta + areasvol(ulevels_nod2D(n), n)*eta_n(n) - loc_hbar = loc_hbar + areasvol(ulevels_nod2D(n), n)*hbar(n) - loc_deta = loc_deta + areasvol(ulevels_nod2D(n), n)*d_eta(n) - loc_dhbar = loc_dhbar + areasvol(ulevels_nod2D(n), n)*(hbar(n)-hbar_old(n)) - loc_wflux = loc_wflux + areasvol(ulevels_nod2D(n), n)*water_flux(n) -!!PS loc_hflux = loc_hflux + area(1, n)*heat_flux(n) -!!PS loc_temp = loc_temp + area(1, n)*sum(tracers%data(1)%values(:, n))/(nlevels_nod2D(n)-1) -!!PS loc_salt = loc_salt + area(1, n)*sum(tracers%data(2)%values(:, n))/(nlevels_nod2D(n)-1) - end do - - !_______________________________________________________________________ - call MPI_AllREDUCE(loc_eta , int_eta , 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) - call MPI_AllREDUCE(loc_hbar , int_hbar , 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) - call MPI_AllREDUCE(loc_deta , int_deta , 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) - call MPI_AllREDUCE(loc_dhbar, int_dhbar, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) - call MPI_AllREDUCE(loc_wflux, int_wflux, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) -!!PS call MPI_AllREDUCE(loc_hflux, int_hflux, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) -!!PS call MPI_AllREDUCE(loc_temp , int_temp , 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) -!!PS call MPI_AllREDUCE(loc_salt , int_salt , 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) -! -!!PS int_eta = int_eta /ocean_area -!!PS int_hbar = int_hbar /ocean_area -!!PS int_deta = int_deta /ocean_area -!!PS int_dhbar= int_dhbar/ocean_area -!!PS int_wflux= int_wflux/ocean_area - - int_eta = int_eta /ocean_areawithcav - int_hbar = int_hbar /ocean_areawithcav - int_deta = int_deta /ocean_areawithcav - int_dhbar= int_dhbar/ocean_areawithcav - int_wflux= int_wflux/ocean_areawithcav - -!!PS int_hflux= int_hflux/ocean_area -!!PS int_temp = int_temp /ocean_area -!!PS int_salt = int_salt /ocean_area - - !_______________________________________________________________________ - loc = minval(eta_n(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_eta , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(hbar(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_hbar , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(water_flux(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_wflux, 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(heat_flux(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_hflux, 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(tracers%data(1)%values(:,1:myDim_nod2D),MASK=(tracers%data(2)%values(:,1:myDim_nod2D)/=0.0)) - call MPI_AllREDUCE(loc , min_temp , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(tracers%data(2)%values(:,1:myDim_nod2D),MASK=(tracers%data(2)%values(:,1:myDim_nod2D)/=0.0)) - call MPI_AllREDUCE(loc , min_salt , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(Wvel(1,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_wvel , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(Wvel(2,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_wvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(UVnode(1,1,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_uvel , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(UVnode(1,2,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_uvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(UVnode(2,1,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_vvel , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(UVnode(2,2,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_vvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(d_eta(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_deta , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(hnode(1,1:myDim_nod2D),MASK=(hnode(1,1:myDim_nod2D)/=0.0)) - call MPI_AllREDUCE(loc , min_hnode , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(hnode(2,1:myDim_nod2D),MASK=(hnode(2,1:myDim_nod2D)/=0.0)) - call MPI_AllREDUCE(loc , min_hnode2 , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - - !_______________________________________________________________________ - loc = maxval(eta_n(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_eta , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(hbar(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_hbar , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(water_flux(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_wflux, 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(heat_flux(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_hflux, 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(tracers%data(1)%values(:,1:myDim_nod2D),MASK=(tracers%data(2)%values(:,1:myDim_nod2D)/=0.0)) - call MPI_AllREDUCE(loc , max_temp , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(tracers%data(2)%values(:,1:myDim_nod2D),MASK=(tracers%data(2)%values(:,1:myDim_nod2D)/=0.0)) - call MPI_AllREDUCE(loc , max_salt , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(Wvel(1,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_wvel , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(Wvel(2,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_wvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(UVnode(1,1,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_uvel , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(UVnode(1,2,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_uvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(UVnode(2,1,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_vvel , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(UVnode(2,2,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_vvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(d_eta(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_deta , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(hnode(1,1:myDim_nod2D),MASK=(hnode(1,1:myDim_nod2D)/=0.0)) - call MPI_AllREDUCE(loc , max_hnode , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(hnode(2,1:myDim_nod2D),MASK=(hnode(2,1:myDim_nod2D)/=0.0)) - call MPI_AllREDUCE(loc , max_hnode2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(CFL_z(:,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_cfl_z , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(abs(pgf_x(:,1:myDim_nod2D))) - call MPI_AllREDUCE(loc , max_pgfx , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(abs(pgf_y(:,1:myDim_nod2D))) - call MPI_AllREDUCE(loc , max_pgfy , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - if (use_ice) then - loc = maxval(m_ice(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_m_ice , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - end if - loc = maxval(abs(Av(:,1:myDim_nod2D))) - call MPI_AllREDUCE(loc , max_av , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(abs(Kv(:,1:myDim_nod2D))) - call MPI_AllREDUCE(loc , max_kv , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - !_______________________________________________________________________ - if (mype==0) then - write(*,*) '___CHECK GLOBAL OCEAN VARIABLES --> mstep=',mstep - write(*,*) ' ___global estimat of eta & hbar____________________' - write(*,*) ' int(eta), int(hbar) =', int_eta, int_hbar - write(*,*) ' --> error(eta-hbar) =', int_eta-int_hbar - write(*,*) ' min(eta) , max(eta) =', min_eta, max_eta - write(*,*) ' max(hbar), max(hbar) =', min_hbar, max_hbar - write(*,*) - write(*,*) ' int(deta), int(dhbar) =', int_deta, int_dhbar - write(*,*) ' --> error(deta-dhbar) =', int_deta-int_dhbar - write(*,*) ' --> error(deta-wflux) =', int_deta-int_wflux - write(*,*) ' --> error(dhbar-wflux) =', int_dhbar-int_wflux - write(*,*) - write(*,*) ' -int(wflux)*dt =', int_wflux*dt*(-1.0) - write(*,*) ' int(deta )-int(wflux)*dt =', int_deta-int_wflux*dt*(-1.0) - write(*,*) ' int(dhbar)-int(wflux)*dt =', int_dhbar-int_wflux*dt*(-1.0) - write(*,*) - write(*,*) ' ___global min/max/mean --> mstep=',mstep,'____________' - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' eta= ', min_eta ,' | ',max_eta ,' | ','N.A.' - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' deta= ', min_deta ,' | ',max_deta ,' | ','N.A.' - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' hbar= ', min_hbar ,' | ',max_hbar ,' | ','N.A.' - write(*,"(A, ES10.3, A, ES10.3, A, ES10.3)") ' wflux= ', min_wflux,' | ',max_wflux,' | ',int_wflux - write(*,"(A, ES10.3, A, ES10.3, A, ES10.3)") ' hflux= ', min_hflux,' | ',max_hflux,' | ',int_hflux - write(*,"(A, ES10.3, A, ES10.3, A, ES10.3)") ' temp= ', min_temp ,' | ',max_temp ,' | ',int_temp - write(*,"(A, ES10.3, A, ES10.3, A, ES10.3)") ' salt= ', min_salt ,' | ',max_salt ,' | ',int_salt - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' wvel(1,:)= ', min_wvel ,' | ',max_wvel ,' | ','N.A.' - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' wvel(2,:)= ', min_wvel2,' | ',max_wvel2,' | ','N.A.' - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' uvel(1,:)= ', min_uvel ,' | ',max_uvel ,' | ','N.A.' - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' uvel(2,:)= ', min_uvel2,' | ',max_uvel2,' | ','N.A.' - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' vvel(1,:)= ', min_vvel ,' | ',max_vvel ,' | ','N.A.' - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' vvel(2,:)= ', min_vvel2,' | ',max_vvel2,' | ','N.A.' - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' hnode(1,:)= ', min_hnode,' | ',max_hnode,' | ','N.A.' - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' hnode(2,:)= ', min_hnode2,' | ',max_hnode2,' | ','N.A.' - write(*,"(A, A , A, ES10.3, A, A )") ' cfl_z= ',' N.A. ',' | ',max_cfl_z ,' | ','N.A.' - write(*,"(A, A , A, ES10.3, A, A )") ' pgf_x= ',' N.A. ',' | ',max_pgfx ,' | ','N.A.' - write(*,"(A, A , A, ES10.3, A, A )") ' pgf_y= ',' N.A. ',' | ',max_pgfy ,' | ','N.A.' - write(*,"(A, A , A, ES10.3, A, A )") ' Av= ',' N.A. ',' | ',max_av ,' | ','N.A.' - write(*,"(A, A , A, ES10.3, A, A )") ' Kv= ',' N.A. ',' | ',max_kv ,' | ','N.A.' - if (use_ice) write(*,"(A, A , A, ES10.3, A, A )") ' m_ice= ',' N.A. ',' | ',max_m_ice ,' | ','N.A.' - write(*,*) - endif - endif ! --> if (mod(istep,logfile_outfreq)==0) then + if (mod(istep,outfreq)==0) then + + !_______________________________________________________________________ + int_eta =0. + int_hbar =0. + int_deta =0. + int_dhbar =0. + int_wflux =0. + int_hflux =0. + int_temp =0. + int_salt =0. + loc_eta =0. + loc_hbar =0. + loc_deta =0. + loc_dhbar =0. + loc_wflux =0. + loc =0. + !_______________________________________________________________________ +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n) REDUCTION(+:loc_eta, loc_hbar, loc_deta, loc_dhbar, loc_wflux) + do n=1, myDim_nod2D + loc_eta = loc_eta + areasvol(ulevels_nod2D(n), n)*eta_n(n) + loc_hbar = loc_hbar + areasvol(ulevels_nod2D(n), n)*hbar(n) + loc_deta = loc_deta + areasvol(ulevels_nod2D(n), n)*d_eta(n) + loc_dhbar = loc_dhbar + areasvol(ulevels_nod2D(n), n)*(hbar(n)-hbar_old(n)) + loc_wflux = loc_wflux + areasvol(ulevels_nod2D(n), n)*water_flux(n) + end do +!$OMP END PARALLEL DO + !_______________________________________________________________________ + call MPI_AllREDUCE(loc_eta , int_eta , 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) + call MPI_AllREDUCE(loc_hbar , int_hbar , 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) + call MPI_AllREDUCE(loc_deta , int_deta , 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) + call MPI_AllREDUCE(loc_dhbar, int_dhbar, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) + call MPI_AllREDUCE(loc_wflux, int_wflux, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) + + int_eta = int_eta /ocean_areawithcav + int_hbar = int_hbar /ocean_areawithcav + int_deta = int_deta /ocean_areawithcav + int_dhbar= int_dhbar/ocean_areawithcav + int_wflux= int_wflux/ocean_areawithcav + !_______________________________________________________________________ + loc=omp_min_max_sum1(eta_n, 1, myDim_nod2D, 'min', partit) + call MPI_AllREDUCE(loc , min_eta , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(hbar, 1, myDim_nod2D, 'min', partit) + call MPI_AllREDUCE(loc , min_hbar , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(water_flux, 1, myDim_nod2D, 'min', partit) + call MPI_AllREDUCE(loc , min_wflux, 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(heat_flux, 1, myDim_nod2D, 'min', partit) + call MPI_AllREDUCE(loc , min_hflux, 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum2(tracers%data(1)%values, 1, nl-1, 1, myDim_nod2D, 'min', partit, 0.0) + call MPI_AllREDUCE(loc , min_temp , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum2(tracers%data(2)%values, 1, nl-1, 1, myDim_nod2D, 'min', partit, 0.0) + call MPI_AllREDUCE(loc , min_salt , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(Wvel(1,:), 1, myDim_nod2D, 'min', partit) + call MPI_AllREDUCE(loc , min_wvel , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(Wvel(2,:), 1, myDim_nod2D, 'min', partit) + call MPI_AllREDUCE(loc , min_wvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(UVnode(1,1,:), 1, myDim_nod2D, 'min', partit) + call MPI_AllREDUCE(loc , min_uvel , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(UVnode(1,2,:), 1, myDim_nod2D, 'min', partit) + call MPI_AllREDUCE(loc , min_uvel2, 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(UVnode(2,1,:), 1, myDim_nod2D, 'min', partit) + call MPI_AllREDUCE(loc , min_vvel , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(UVnode(2,2,:), 1, myDim_nod2D, 'min', partit) + call MPI_AllREDUCE(loc , min_vvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(d_eta, 1, myDim_nod2D, 'min', partit) + call MPI_AllREDUCE(loc , min_deta , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(hnode(1,:), 1, myDim_nod2D, 'min', partit) + call MPI_AllREDUCE(loc , min_hnode , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(hnode(2,:), 1, myDim_nod2D, 'min', partit) + call MPI_AllREDUCE(loc , min_hnode2 , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + + !_______________________________________________________________________ + loc=omp_min_max_sum1(eta_n, 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_eta , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(hbar, 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_hbar , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(water_flux, 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_wflux, 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(heat_flux, 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_hflux, 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum2(tracers%data(1)%values, 1, nl-1, 1, myDim_nod2D, 'max', partit, 0.0) + call MPI_AllREDUCE(loc , max_temp , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum2(tracers%data(2)%values, 1, nl-1, 1, myDim_nod2D, 'min', partit, 0.0) + call MPI_AllREDUCE(loc , max_salt , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(Wvel(1,:), 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_wvel , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(Wvel(2,:), 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_wvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(UVnode(1,1,:), 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_uvel , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(UVnode(1,2,:), 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_uvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(UVnode(2,1,:), 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_vvel , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(UVnode(2,2,:), 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_vvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(d_eta, 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_deta , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(hnode(1, :), 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_hnode , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(hnode(2, :), 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_hnode2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum2(CFL_z, 1, nl-1, 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_cfl_z , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum2(pgf_x, 1, nl-1, 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_pgfx , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum2(pgf_y, 1, nl-1, 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_pgfy , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + if (use_ice) then + loc=omp_min_max_sum1(m_ice, 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_m_ice , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + end if + loc=omp_min_max_sum2(Av, 1, nl, 1, myDim_elem2D, 'max', partit) + call MPI_AllREDUCE(loc , max_av , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum2(Av, 1, nl, 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_kv , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + !_______________________________________________________________________ + if (mype==0) then + write(*,*) '___CHECK GLOBAL OCEAN VARIABLES --> mstep=',mstep + write(*,*) ' ___global estimat of eta & hbar____________________' + write(*,*) ' int(eta), int(hbar) =', int_eta, int_hbar + write(*,*) ' --> error(eta-hbar) =', int_eta-int_hbar + write(*,*) ' min(eta) , max(eta) =', min_eta, max_eta + write(*,*) ' max(hbar), max(hbar) =', min_hbar, max_hbar + write(*,*) + write(*,*) ' int(deta), int(dhbar) =', int_deta, int_dhbar + write(*,*) ' --> error(deta-dhbar) =', int_deta-int_dhbar + write(*,*) ' --> error(deta-wflux) =', int_deta-int_wflux + write(*,*) ' --> error(dhbar-wflux) =', int_dhbar-int_wflux + write(*,*) + write(*,*) ' -int(wflux)*dt =', int_wflux*dt*(-1.0) + write(*,*) ' int(deta )-int(wflux)*dt =', int_deta-int_wflux*dt*(-1.0) + write(*,*) ' int(dhbar)-int(wflux)*dt =', int_dhbar-int_wflux*dt*(-1.0) + write(*,*) + write(*,*) ' ___global min/max/mean --> mstep=',mstep,'____________' + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' eta= ', min_eta ,' | ',max_eta ,' | ','N.A.' + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' deta= ', min_deta ,' | ',max_deta ,' | ','N.A.' + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' hbar= ', min_hbar ,' | ',max_hbar ,' | ','N.A.' + write(*,"(A, ES10.3, A, ES10.3, A, ES10.3)") ' wflux= ', min_wflux,' | ',max_wflux,' | ',int_wflux + write(*,"(A, ES10.3, A, ES10.3, A, ES10.3)") ' hflux= ', min_hflux,' | ',max_hflux,' | ',int_hflux + write(*,"(A, ES10.3, A, ES10.3, A, ES10.3)") ' temp= ', min_temp ,' | ',max_temp ,' | ',int_temp + write(*,"(A, ES10.3, A, ES10.3, A, ES10.3)") ' salt= ', min_salt ,' | ',max_salt ,' | ',int_salt + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' wvel(1,:)= ', min_wvel ,' | ',max_wvel ,' | ','N.A.' + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' wvel(2,:)= ', min_wvel2,' | ',max_wvel2,' | ','N.A.' + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' uvel(1,:)= ', min_uvel ,' | ',max_uvel ,' | ','N.A.' + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' uvel(2,:)= ', min_uvel2,' | ',max_uvel2,' | ','N.A.' + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' vvel(1,:)= ', min_vvel ,' | ',max_vvel ,' | ','N.A.' + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' vvel(2,:)= ', min_vvel2,' | ',max_vvel2,' | ','N.A.' + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' hnode(1,:)= ', min_hnode,' | ',max_hnode,' | ','N.A.' + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' hnode(2,:)= ', min_hnode2,' | ',max_hnode2,' | ','N.A.' + write(*,"(A, A , A, ES10.3, A, A )") ' cfl_z= ',' N.A. ',' | ',max_cfl_z ,' | ','N.A.' + write(*,"(A, A , A, ES10.3, A, A )") ' pgf_x= ',' N.A. ',' | ',max_pgfx ,' | ','N.A.' + write(*,"(A, A , A, ES10.3, A, A )") ' pgf_y= ',' N.A. ',' | ',max_pgfy ,' | ','N.A.' + write(*,"(A, A , A, ES10.3, A, A )") ' Av= ',' N.A. ',' | ',max_av ,' | ','N.A.' + write(*,"(A, A , A, ES10.3, A, A )") ' Kv= ',' N.A. ',' | ',max_kv ,' | ','N.A.' + if (use_ice) then + write(*,"(A, A , A, ES10.3, A, A)") ' m_ice= ',' N.A. ',' | ',max_m_ice ,' | ','N.A.' + end if + end if + endif ! --> if (mod(istep,logfile_outfreq)==0) then end subroutine write_step_info ! ! !=============================================================================== subroutine check_blowup(istep, dynamics, tracers, partit, mesh) - use g_config, only: logfile_outfreq, which_ALE - use MOD_MESH + use g_config, only: logfile_outfreq, which_ALE + use MOD_MESH use MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP use MOD_DYN - use o_PARAM - use o_ARRAYS, only: water_flux, stress_surf, & - heat_flux, Kv, Av - use i_ARRAYS - use g_comm_auto - use io_BLOWUP - use g_forcing_arrays - use diagnostics - use write_step_info_interface - implicit none - - integer :: n, nz, istep, found_blowup_loc=0, found_blowup=0 - integer :: el, elidx + use o_PARAM + use o_ARRAYS, only: water_flux, stress_surf, & + heat_flux, Kv, Av + use i_ARRAYS + use g_comm_auto + use io_BLOWUP + use g_forcing_arrays + use diagnostics + use write_step_info_interface + implicit none + + integer :: n, nz, istep, found_blowup_loc=0, found_blowup=0 + integer :: el, elidx type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in) , target :: tracers @@ -291,279 +273,245 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) - Wvel => dynamics%w(:,:) - CFL_z => dynamics%cfl_z(:,:) + UV => dynamics%uv(:,:,:) + Wvel => dynamics%w(:,:) + CFL_z => dynamics%cfl_z(:,:) ssh_rhs => dynamics%ssh_rhs(:) ssh_rhs_old => dynamics%ssh_rhs_old(:) - eta_n => dynamics%eta_n(:) - d_eta => dynamics%d_eta(:) - - !___________________________________________________________________________ -! ! if (mod(istep,logfile_outfreq)==0) then -! ! if (mype==0) then -! ! write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep -! ! write(*,*) -! ! endif - do n=1, myDim_nod2d - - !___________________________________________________________________ - ! check ssh - if ( ((eta_n(n) /= eta_n(n)) .or. & - eta_n(n)<-50.0 .or. eta_n(n)>50.0 .or. & - (d_eta(n) /= d_eta(n)) ) ) then -!!PS eta_n(n)<-10.0 .or. eta_n(n)>10.0)) then - found_blowup_loc=1 - write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep - write(*,*) ' --STOP--> found eta_n become NaN or <-10.0, >10.0' - write(*,*) 'mype = ',mype - write(*,*) 'mstep = ',istep - write(*,*) 'node = ',n - write(*,*) 'uln, nln = ',ulevels_nod2D(n), nlevels_nod2D(n) - write(*,*) 'glon,glat = ',geo_coord_nod2D(:,n)/rad - write(*,*) - write(*,*) 'eta_n(n) = ',eta_n(n) - write(*,*) 'd_eta(n) = ',d_eta(n) - write(*,*) - write(*,*) 'zbar_3d_n = ',zbar_3d_n(:,n) - write(*,*) 'Z_3d_n = ',Z_3d_n(:,n) - write(*,*) - write(*,*) 'ssh_rhs = ',ssh_rhs(n),', ssh_rhs_old = ',ssh_rhs_old(n) - write(*,*) - write(*,*) 'hbar = ',hbar(n),', hbar_old = ',hbar_old(n) - write(*,*) - write(*,*) 'wflux = ',water_flux(n) - write(*,*) - write(*,*) 'u_wind = ',u_wind(n),', v_wind = ',v_wind(n) - write(*,*) - do nz=1,nod_in_elem2D_num(n) - write(*,*) 'stress_surf(1:2,',nz,') = ',stress_surf(:,nod_in_elem2D(nz,n)) - end do - write(*,*) - write(*,*) 'm_ice = ',m_ice(n),', m_ice_old = ',m_ice_old(n) - write(*,*) 'a_ice = ',a_ice(n),', a_ice_old = ',a_ice_old(n) -!!PS write(*,*) 'thdgr = ',thdgr(n),', thdgr_old = ',thdgr_old(n) -!!PS write(*,*) 'thdgrsn = ',thdgrsn(n) - write(*,*) -!!PS if (lcurt_stress_surf) then -!!PS write(*,*) 'curl_stress_surf = ',curl_stress_surf(n) -!!PS write(*,*) -!!PS endif -!!PS do el=1,nod_in_elem2d_num(n) -!!PS elidx = nod_in_elem2D(el,n) -!!PS write(*,*) ' elem#=',el,', elemidx=',elidx -!!PS write(*,*) ' pgf_x =',pgf_x(:,elidx) -!!PS write(*,*) ' pgf_y =',pgf_y(:,elidx) -!!PS ! write(*,*) ' U =',UV(1,:,elidx) -!!PS ! write(*,*) ' V =',UV(2,:,elidx) -!!PS write(*,*) -!!PS enddo -!!PS write(*,*) 'Wvel(1, n) = ',Wvel(,n) - write(*,*) 'Wvel(:, n) = ',Wvel(ulevels_nod2D(n):nlevels_nod2D(n),n) - write(*,*) - write(*,*) 'CFL_z(:,n) = ',CFL_z(ulevels_nod2D(n):nlevels_nod2D(n),n) - write(*,*) -!!PS write(*,*) 'hnode(1, n) = ',hnode(1,n) - write(*,*) 'hnode(:, n) = ',hnode(ulevels_nod2D(n):nlevels_nod2D(n),n) - write(*,*) - - endif - - !___________________________________________________________________ - ! check surface vertical velocity --> in case of zlevel and zstar - ! vertical coordinate its indicator if Volume is conserved for - ! Wvel(1,n)~maschine preccision -!!PS if ( .not. trim(which_ALE)=='linfs' .and. ( Wvel(1, n) /= Wvel(1, n) .or. abs(Wvel(1,n))>1e-12 )) then - if ( .not. trim(which_ALE)=='linfs' .and. ( Wvel(1, n) /= Wvel(1, n) )) then - found_blowup_loc=1 - write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep - write(*,*) ' --STOP--> found surface layer vertical velocity becomes NaN or >1e-12' - write(*,*) 'mype = ',mype - write(*,*) 'mstep = ',istep - write(*,*) 'node = ',n - write(*,*) 'uln, nln = ',ulevels_nod2D(n), nlevels_nod2D(n) - write(*,*) 'glon,glat = ',geo_coord_nod2D(:,n)/rad - write(*,*) - write(*,*) 'Wvel(1, n) = ',Wvel(1,n) - write(*,*) 'Wvel(:, n) = ',Wvel(:,n) - write(*,*) - write(*,*) 'hnode(1, n) = ',hnode(1,n) - write(*,*) 'hnode(:, n) = ',hnode(:,n) - write(*,*) 'hflux = ',heat_flux(n) - write(*,*) 'wflux = ',water_flux(n) - write(*,*) - write(*,*) 'eta_n = ',eta_n(n) - write(*,*) 'd_eta(n) = ',d_eta(n) - write(*,*) 'hbar = ',hbar(n) - write(*,*) 'hbar_old = ',hbar_old(n) - write(*,*) 'ssh_rhs = ',ssh_rhs(n) - write(*,*) 'ssh_rhs_old = ',ssh_rhs_old(n) - write(*,*) - write(*,*) 'CFL_z(:,n) = ',CFL_z(:,n) - write(*,*) - - end if ! --> if ( .not. trim(which_ALE)=='linfs' .and. ... - - !___________________________________________________________________ - ! check surface layer thinknesss - if ( .not. trim(which_ALE)=='linfs' .and. ( hnode(1, n) /= hnode(1, n) .or. hnode(1,n)< 0 )) then - found_blowup_loc=1 - write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep - write(*,*) ' --STOP--> found surface layer thickness becomes NaN or <0' - write(*,*) 'mype = ',mype - write(*,*) 'mstep = ',istep - write(*,*) 'node = ',n - write(*,*) - write(*,*) 'hnode(1, n) = ',hnode(1,n) - write(*,*) 'hnode(:, n) = ',hnode(:,n) - write(*,*) - write(*,*) 'glon,glat = ',geo_coord_nod2D(:,n)/rad - write(*,*) - end if ! --> if ( .not. trim(which_ALE)=='linfs' .and. ... - - - do nz=1,nlevels_nod2D(n)-1 - !_______________________________________________________________ - ! check temp - if ( (tracers%data(1)%values(nz, n) /= tracers%data(1)%values(nz, n)) .or. & - tracers%data(1)%values(nz, n) < -5.0 .or. tracers%data(1)%values(nz, n)>60) then - found_blowup_loc=1 - write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep - write(*,*) ' --STOP--> found temperture becomes NaN or <-5.0, >60' - write(*,*) 'mype = ',mype - write(*,*) 'mstep = ',istep - write(*,*) 'node = ',n - write(*,*) 'lon,lat = ',geo_coord_nod2D(:,n)/rad - write(*,*) 'nz = ',nz - write(*,*) 'nzmin, nzmax= ',ulevels_nod2D(n),nlevels_nod2D(n) - write(*,*) 'x=', geo_coord_nod2D(1,n)/rad, ' ; ', 'y=', geo_coord_nod2D(2,n)/rad - write(*,*) 'temp(nz, n) = ',tracers%data(1)%values(nz, n) - write(*,*) 'temp(: , n) = ',tracers%data(1)%values(:, n) - write(*,*) 'temp_old(nz,n)= ',tracers%data(1)%valuesAB(nz, n) - write(*,*) 'temp_old(: ,n)= ',tracers%data(1)%valuesAB(:, n) - write(*,*) - write(*,*) 'hflux = ',heat_flux(n) - write(*,*) 'wflux = ',water_flux(n) - write(*,*) - write(*,*) 'eta_n = ',eta_n(n) - write(*,*) 'd_eta(n) = ',d_eta(n) - write(*,*) 'hbar = ',hbar(n) - write(*,*) 'hbar_old = ',hbar_old(n) - write(*,*) 'ssh_rhs = ',ssh_rhs(n) - write(*,*) 'ssh_rhs_old = ',ssh_rhs_old(n) - write(*,*) - write(*,*) 'm_ice = ',m_ice(n) - write(*,*) 'm_ice_old = ',m_ice_old(n) - write(*,*) 'm_snow = ',m_snow(n) - write(*,*) 'm_snow_old = ',m_snow_old(n) - write(*,*) - write(*,*) 'hnode = ',hnode(:,n) - write(*,*) 'hnode_new = ',hnode_new(:,n) - write(*,*) - write(*,*) 'Kv = ',Kv(:,n) - write(*,*) - write(*,*) 'W = ',Wvel(:,n) - write(*,*) - write(*,*) 'CFL_z(:,n) = ',CFL_z(:,n) - write(*,*) -! do el=1,nod_in_elem2d_num(n) -! elidx = nod_in_elem2D(el,n) -! write(*,*) ' elem#=',el,', elemidx=',elidx -! write(*,*) ' helem =',helem(:,elidx) -! write(*,*) ' U =',UV(1,:,elidx) -! write(*,*) ' V =',UV(2,:,elidx) -! enddo - write(*,*) - - endif ! --> if ( (tracers%data(1)%values(nz, n) /= tracers%data(1)%values(nz, n)) .or. & ... - - !_______________________________________________________________ - ! check salt - if ( (tracers%data(2)%values(nz, n) /= tracers%data(2)%values(nz, n)) .or. & - tracers%data(2)%values(nz, n) < 0 .or. tracers%data(2)%values(nz, n)>50 ) then - found_blowup_loc=1 - write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep - write(*,*) ' --STOP--> found salinity becomes NaN or <0, >50' - write(*,*) 'mype = ',mype - write(*,*) 'mstep = ',istep - write(*,*) 'node = ',n - write(*,*) 'nz = ',nz - write(*,*) 'nzmin, nzmax= ',ulevels_nod2D(n),nlevels_nod2D(n) - write(*,*) 'x=', geo_coord_nod2D(1,n)/rad, ' ; ', 'y=', geo_coord_nod2D(2,n)/rad -! write(*,*) 'z=', Z_n(nz) - write(*,*) 'salt(nz, n) = ',tracers%data(2)%values(nz, n) - write(*,*) 'salt(: , n) = ',tracers%data(2)%values(:, n) - write(*,*) - write(*,*) 'temp(nz, n) = ',tracers%data(1)%values(nz, n) - write(*,*) 'temp(: , n) = ',tracers%data(1)%values(:, n) - write(*,*) - write(*,*) 'hflux = ',heat_flux(n) - write(*,*) - write(*,*) 'wflux = ',water_flux(n) - write(*,*) 'eta_n = ',eta_n(n) - write(*,*) 'd_eta(n) = ',d_eta(n) - write(*,*) 'hbar = ',hbar(n) - write(*,*) 'hbar_old = ',hbar_old(n) - write(*,*) 'ssh_rhs = ',ssh_rhs(n) - write(*,*) 'ssh_rhs_old = ',ssh_rhs_old(n) - write(*,*) - write(*,*) 'hnode = ',hnode(:,n) - write(*,*) 'hnode_new = ',hnode_new(:,n) - write(*,*) - write(*,*) 'zbar_3d_n = ',zbar_3d_n(:,n) - write(*,*) 'Z_3d_n = ',Z_3d_n(:,n) - write(*,*) - write(*,*) 'Kv = ',Kv(:,n) - write(*,*) - do el=1,nod_in_elem2d_num(n) - elidx = nod_in_elem2D(el,n) - write(*,*) ' elem#=',el,', elemidx=',elidx - write(*,*) ' Av =',Av(:,elidx) -! write(*,*) ' helem =',helem(:,elidx) -! write(*,*) ' U =',UV(1,:,elidx) -! write(*,*) ' V =',UV(2,:,elidx) - enddo - write(*,*) 'Wvel = ',Wvel(:,n) - write(*,*) - write(*,*) 'CFL_z(:,n) = ',CFL_z(:,n) - write(*,*) - write(*,*) 'glon,glat = ',geo_coord_nod2D(:,n)/rad - write(*,*) - endif ! --> if ( (tracers%data(2)%values(nz, n) /= tracers%data(2)%values(nz, n)) .or. & ... - end do ! --> do nz=1,nlevels_nod2D(n)-1 - end do ! --> do n=1, myDim_nod2d -! ! end if - - !_______________________________________________________________________ - ! check globally if one of the cpus hat a blowup situation. if its the - ! case CPU mype==0 needs to write out the stuff. Write out occurs in - ! moment only over CPU mype==0 - call MPI_AllREDUCE(found_blowup_loc , found_blowup , 1, MPI_INTEGER, MPI_MAX, MPI_COMM_FESOM, MPIerr) - if (found_blowup==1) then - call write_step_info(istep, 1, dynamics, tracers,partit,mesh) - if (mype==0) then - call sleep(1) - write(*,*) - write(*,*) ' MODEL BLOW UP !!!' - write(*,*) ' ____' - write(*,*) ' __,-~~/~ `---.' - write(*,*) ' _/_,---( , )' - write(*,*) ' __ / < / ) \___' - write(*,*) '- -- ----===;;;`====------------------===;;;===---- -- -' - write(*,*) ' \/ ~"~"~"~"~"~\~"~)~"/' - write(*,*) ' (_ ( \ ( > \)' - write(*,*) ' \_( _ < >_>`' - write(*,*) ' ~ `-i` ::>|--"' - write(*,*) ' I;|.|.|' - write(*,*) ' <|i::|i|`' - write(*,*) ' (` ^`"`- ")' - write(*,*) ' _____.,-#%&$@%#&#~,._____' - write(*,*) - end if - call blowup(istep, dynamics, tracers, partit, mesh) - if (mype==0) write(*,*) ' --> finished writing blow up file' - call par_ex(partit%MPI_COMM_FESOM, partit%mype) - endif + eta_n => dynamics%eta_n(:) + d_eta => dynamics%d_eta(:) +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz) + do n=1, myDim_nod2d + !___________________________________________________________________ + ! check ssh + if ( ((eta_n(n) /= eta_n(n)) .or. eta_n(n)<-50.0 .or. eta_n(n)>50.0 .or. (d_eta(n) /= d_eta(n)) ) ) then +!$OMP CRITICAL + found_blowup_loc=1 + write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep + write(*,*) ' --STOP--> found eta_n become NaN or <-10.0, >10.0' + write(*,*) 'mype = ',mype + write(*,*) 'mstep = ',istep + write(*,*) 'node = ',n + write(*,*) 'uln, nln = ',ulevels_nod2D(n), nlevels_nod2D(n) + write(*,*) 'glon,glat = ',geo_coord_nod2D(:,n)/rad + write(*,*) + write(*,*) 'eta_n(n) = ',eta_n(n) + write(*,*) 'd_eta(n) = ',d_eta(n) + write(*,*) + write(*,*) 'zbar_3d_n = ',zbar_3d_n(:,n) + write(*,*) 'Z_3d_n = ',Z_3d_n(:,n) + write(*,*) + write(*,*) 'ssh_rhs = ',ssh_rhs(n),', ssh_rhs_old = ',ssh_rhs_old(n) + write(*,*) + write(*,*) 'hbar = ',hbar(n),', hbar_old = ',hbar_old(n) + write(*,*) + write(*,*) 'wflux = ',water_flux(n) + write(*,*) + write(*,*) 'u_wind = ',u_wind(n),', v_wind = ',v_wind(n) + write(*,*) + do nz=1,nod_in_elem2D_num(n) + write(*,*) 'stress_surf(1:2,',nz,') = ',stress_surf(:,nod_in_elem2D(nz,n)) + end do + write(*,*) + write(*,*) 'm_ice = ',m_ice(n),', m_ice_old = ',m_ice_old(n) + write(*,*) 'a_ice = ',a_ice(n),', a_ice_old = ',a_ice_old(n) + write(*,*) + write(*,*) 'Wvel(:, n) = ',Wvel(ulevels_nod2D(n):nlevels_nod2D(n),n) + write(*,*) + write(*,*) 'CFL_z(:,n) = ',CFL_z(ulevels_nod2D(n):nlevels_nod2D(n),n) + write(*,*) + write(*,*) 'hnode(:, n) = ',hnode(ulevels_nod2D(n):nlevels_nod2D(n),n) + write(*,*) +!$OMP END CRITICAL + endif + + !___________________________________________________________________ + ! check surface vertical velocity --> in case of zlevel and zstar + ! vertical coordinate its indicator if Volume is conserved for + ! Wvel(1,n)~maschine preccision + if ( .not. trim(which_ALE)=='linfs' .and. ( Wvel(1, n) /= Wvel(1, n) )) then +!$OMP CRITICAL + found_blowup_loc=1 + write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep + write(*,*) ' --STOP--> found surface layer vertical velocity becomes NaN or >1e-12' + write(*,*) 'mype = ',mype + write(*,*) 'mstep = ',istep + write(*,*) 'node = ',n + write(*,*) 'uln, nln = ',ulevels_nod2D(n), nlevels_nod2D(n) + write(*,*) 'glon,glat = ',geo_coord_nod2D(:,n)/rad + write(*,*) + write(*,*) 'Wvel(1, n) = ',Wvel(1,n) + write(*,*) 'Wvel(:, n) = ',Wvel(:,n) + write(*,*) + write(*,*) 'hnode(1, n) = ',hnode(1,n) + write(*,*) 'hnode(:, n) = ',hnode(:,n) + write(*,*) 'hflux = ',heat_flux(n) + write(*,*) 'wflux = ',water_flux(n) + write(*,*) + write(*,*) 'eta_n = ',eta_n(n) + write(*,*) 'd_eta(n) = ',d_eta(n) + write(*,*) 'hbar = ',hbar(n) + write(*,*) 'hbar_old = ',hbar_old(n) + write(*,*) 'ssh_rhs = ',ssh_rhs(n) + write(*,*) 'ssh_rhs_old = ',ssh_rhs_old(n) + write(*,*) + write(*,*) 'CFL_z(:,n) = ',CFL_z(:,n) + write(*,*) +!$OMP END CRITICAL + end if ! --> if ( .not. trim(which_ALE)=='linfs' .and. ... + + !___________________________________________________________________ + ! check surface layer thinknesss + if ( .not. trim(which_ALE)=='linfs' .and. ( hnode(1, n) /= hnode(1, n) .or. hnode(1,n)< 0 )) then +!$OMP CRITICAL + found_blowup_loc=1 + write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep + write(*,*) ' --STOP--> found surface layer thickness becomes NaN or <0' + write(*,*) 'mype = ',mype + write(*,*) 'mstep = ',istep + write(*,*) 'node = ',n + write(*,*) + write(*,*) 'hnode(1, n) = ',hnode(1,n) + write(*,*) 'hnode(:, n) = ',hnode(:,n) + write(*,*) + write(*,*) 'glon,glat = ',geo_coord_nod2D(:,n)/rad + write(*,*) +!$OMP END CRITICAL + end if ! --> if ( .not. trim(which_ALE)=='linfs' .and. ... + + + do nz=1,nlevels_nod2D(n)-1 + !_______________________________________________________________ + ! check temp + if ( (tracers%data(1)%values(nz, n) /= tracers%data(1)%values(nz, n)) .or. & + tracers%data(1)%values(nz, n) < -5.0 .or. tracers%data(1)%values(nz, n)>60) then +!$OMP CRITICAL + found_blowup_loc=1 + write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep + write(*,*) ' --STOP--> found temperture becomes NaN or <-5.0, >60' + write(*,*) 'mype = ',mype + write(*,*) 'mstep = ',istep + write(*,*) 'node = ',n + write(*,*) 'lon,lat = ',geo_coord_nod2D(:,n)/rad + write(*,*) 'nz = ',nz + write(*,*) 'nzmin, nzmax= ',ulevels_nod2D(n),nlevels_nod2D(n) + write(*,*) 'x=', geo_coord_nod2D(1,n)/rad, ' ; ', 'y=', geo_coord_nod2D(2,n)/rad + write(*,*) 'temp(nz, n) = ',tracers%data(1)%values(nz, n) + write(*,*) 'temp(: , n) = ',tracers%data(1)%values(:, n) + write(*,*) 'temp_old(nz,n)= ',tracers%data(1)%valuesAB(nz, n) + write(*,*) 'temp_old(: ,n)= ',tracers%data(1)%valuesAB(:, n) + write(*,*) + write(*,*) 'hflux = ',heat_flux(n) + write(*,*) 'wflux = ',water_flux(n) + write(*,*) + write(*,*) 'eta_n = ',eta_n(n) + write(*,*) 'd_eta(n) = ',d_eta(n) + write(*,*) 'hbar = ',hbar(n) + write(*,*) 'hbar_old = ',hbar_old(n) + write(*,*) 'ssh_rhs = ',ssh_rhs(n) + write(*,*) 'ssh_rhs_old = ',ssh_rhs_old(n) + write(*,*) + write(*,*) 'm_ice = ',m_ice(n) + write(*,*) 'm_ice_old = ',m_ice_old(n) + write(*,*) 'm_snow = ',m_snow(n) + write(*,*) 'm_snow_old = ',m_snow_old(n) + write(*,*) + write(*,*) 'hnode = ',hnode(:,n) + write(*,*) 'hnode_new = ',hnode_new(:,n) + write(*,*) + write(*,*) 'Kv = ',Kv(:,n) + write(*,*) + write(*,*) 'W = ',Wvel(:,n) + write(*,*) + write(*,*) 'CFL_z(:,n) = ',CFL_z(:,n) + write(*,*) + write(*,*) +!$OMP END CRITICAL + endif ! --> if ( (tracers%data(1)%values(nz, n) /= tracers%data(1)%values(nz, n)) .or. & ... + + !_______________________________________________________________ + ! check salt + if ( (tracers%data(2)%values(nz, n) /= tracers%data(2)%values(nz, n)) .or. & + tracers%data(2)%values(nz, n) < 0 .or. tracers%data(2)%values(nz, n)>50 ) then +!$OMP CRITICAL + found_blowup_loc=1 + write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep + write(*,*) ' --STOP--> found salinity becomes NaN or <0, >50' + write(*,*) 'mype = ',mype + write(*,*) 'mstep = ',istep + write(*,*) 'node = ',n + write(*,*) 'nz = ',nz + write(*,*) 'nzmin, nzmax= ',ulevels_nod2D(n),nlevels_nod2D(n) + write(*,*) 'x=', geo_coord_nod2D(1,n)/rad, ' ; ', 'y=', geo_coord_nod2D(2,n)/rad + write(*,*) 'salt(nz, n) = ',tracers%data(2)%values(nz, n) + write(*,*) 'salt(: , n) = ',tracers%data(2)%values(:, n) + write(*,*) + write(*,*) 'temp(nz, n) = ',tracers%data(1)%values(nz, n) + write(*,*) 'temp(: , n) = ',tracers%data(1)%values(:, n) + write(*,*) + write(*,*) 'hflux = ',heat_flux(n) + write(*,*) + write(*,*) 'wflux = ',water_flux(n) + write(*,*) 'eta_n = ',eta_n(n) + write(*,*) 'd_eta(n) = ',d_eta(n) + write(*,*) 'hbar = ',hbar(n) + write(*,*) 'hbar_old = ',hbar_old(n) + write(*,*) 'ssh_rhs = ',ssh_rhs(n) + write(*,*) 'ssh_rhs_old = ',ssh_rhs_old(n) + write(*,*) + write(*,*) 'hnode = ',hnode(:,n) + write(*,*) 'hnode_new = ',hnode_new(:,n) + write(*,*) + write(*,*) 'zbar_3d_n = ',zbar_3d_n(:,n) + write(*,*) 'Z_3d_n = ',Z_3d_n(:,n) + write(*,*) + write(*,*) 'Kv = ',Kv(:,n) + write(*,*) + do el=1,nod_in_elem2d_num(n) + elidx = nod_in_elem2D(el,n) + write(*,*) ' elem#=',el,', elemidx=',elidx + write(*,*) ' Av =',Av(:,elidx) + enddo + write(*,*) 'Wvel = ',Wvel(:,n) + write(*,*) + write(*,*) 'CFL_z(:,n) = ',CFL_z(:,n) + write(*,*) + write(*,*) 'glon,glat = ',geo_coord_nod2D(:,n)/rad + write(*,*) +!$OMP END CRITICAL + endif ! --> if ( (tracers%data(2)%values(nz, n) /= tracers%data(2)%values(nz, n)) .or. & ... + end do ! --> do nz=1,nlevels_nod2D(n)-1 + end do ! --> do n=1, myDim_nod2d +!$OMP END PARALLEL DO + !_______________________________________________________________________ + ! check globally if one of the cpus hat a blowup situation. if its the + ! case CPU mype==0 needs to write out the stuff. Write out occurs in + ! moment only over CPU mype==0 + call MPI_AllREDUCE(found_blowup_loc , found_blowup , 1, MPI_INTEGER, MPI_MAX, MPI_COMM_FESOM, MPIerr) + if (found_blowup==1) then + call write_step_info(istep, 1, dynamics, tracers,partit,mesh) + if (mype==0) then + call sleep(1) + write(*,*) + write(*,*) ' MODEL BLOW UP !!!' + write(*,*) ' ____' + write(*,*) ' __,-~~/~ `---.' + write(*,*) ' _/_,---( , )' + write(*,*) ' __ / < / ) \___' + write(*,*) '- -- ----===;;;`====------------------===;;;===---- -- -' + write(*,*) ' \/ ~"~"~"~"~"~\~"~)~"/' + write(*,*) ' (_ ( \ ( > \)' + write(*,*) ' \_( _ < >_>`' + write(*,*) ' ~ `-i` ::>|--"' + write(*,*) ' I;|.|.|' + write(*,*) ' <|i::|i|`' + write(*,*) ' (` ^`"`- ")' + write(*,*) ' _____.,-#%&$@%#&#~,._____' + write(*,*) + end if + call blowup(istep, dynamics, tracers, partit, mesh) + if (mype==0) write(*,*) ' --> finished writing blow up file' + call par_ex(partit%MPI_COMM_FESOM, partit%mype) + endif end subroutine - - +!=============================================================================== From 192ae26d5002d5e7f0ffcd203b6c0d2982be0c82 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Thu, 18 Nov 2021 15:23:03 +0100 Subject: [PATCH 595/909] MP bug fix in GM part --- src/oce_fer_gm.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/oce_fer_gm.F90 b/src/oce_fer_gm.F90 index e32d14cf0..222912f21 100644 --- a/src/oce_fer_gm.F90 +++ b/src/oce_fer_gm.F90 @@ -61,7 +61,7 @@ subroutine fer_solve_Gamma(partit, mesh) #include "associate_part_ass.h" #include "associate_mesh_ass.h" -!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, nzmax, nzmin, zinv1,zinv2, zinv, m, r, a, b, c, cp, tp, zbar_n, Z_n) +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, nzmax, nzmin, zinv1,zinv2, zinv, m, r, a, b, c, cp, tp, tr, zbar_n, Z_n) !$OMP DO DO n=1,myDim_nod2D tr=>fer_gamma(:,:,n) From ac799b02a2fae0153f67e8defc07c13d96bcd815 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Sat, 18 Jul 2020 01:16:36 +0000 Subject: [PATCH 596/909] handle fill values differently --- src/gen_ic3d.F90 | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/gen_ic3d.F90 b/src/gen_ic3d.F90 index ac866032c..6bd4da6e4 100644 --- a/src/gen_ic3d.F90 +++ b/src/gen_ic3d.F90 @@ -329,6 +329,8 @@ SUBROUTINE getcoeffld(tracers, partit, mesh) real(wp), allocatable, dimension(:) :: data1d integer :: elnodes(3) integer :: ierror ! return error code + integer :: NO_FILL ! 0=no fillval, 1=fillval + real(wp) :: FILL_VALUE #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -347,6 +349,12 @@ SUBROUTINE getcoeffld(tracers, partit, mesh) ! get variable id if (mype==0) then iost = nf_inq_varid(ncid, varname, id_data) + iost = nf_inq_var_fill(ncid, id_data, NO_FILL, FILL_VALUE) ! FillValue defined? + if (NO_FILL==1) then + print *, 'No _FillValue is set in ', filename, ', trying dummy =', dummy, FILL_VALUE + else + print *, 'The FillValue in ', filename, ' is set to ', FILL_VALUE ! should set dummy accordingly + end if end if call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) call check_nferr(iost,filename,partit) @@ -362,11 +370,11 @@ SUBROUTINE getcoeffld(tracers, partit, mesh) ncdata(1,:,:) =ncdata(nc_Nlon-1,:,:) ncdata(nc_Nlon,:,:)=ncdata(2,:,:) - ! replace nan by dummy value + ! replace nan (or fillvalue) by dummy value do k=1,nc_Ndepth do j=1,nc_Nlat do i=1,nc_Nlon - if (ieee_is_nan(ncdata(i,j,k))) then + if (ieee_is_nan(ncdata(i,j,k)) .or. (ncdata(i,j,k)==FILL_VALUE)) then ncdata(i,j,k) = dummy elseif (ncdata(i,j,k) < -0.99_WP*dummy .or. ncdata(i,j,k) > dummy) then ! and in case the input data has other conventions on missing values: From 48ab71bf8d592197d2eda4e9667156e3ff7a18f9 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Thu, 18 Nov 2021 15:55:24 +0100 Subject: [PATCH 597/909] OMP loop added in mo_convect --- src/oce_mo_conv.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/oce_mo_conv.F90 b/src/oce_mo_conv.F90 index 221301801..8d6434a9f 100644 --- a/src/oce_mo_conv.F90 +++ b/src/oce_mo_conv.F90 @@ -26,9 +26,9 @@ subroutine mo_convect(partit, mesh) ! of vertical mixing in the Weddell Sea! ! Computes the mixing length derived from the Monin if (use_momix) then - mo = 0._WP !$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax) do node=1, myDim_nod2D+eDim_nod2D + mo(:, node) = 0._WP nzmax = nlevels_nod2d(node) nzmin = ulevels_nod2d(node) !___________________________________________________________________ From e376fc05742980226d0494179752b03da4f00ebb Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Fri, 19 Nov 2021 10:24:37 +0000 Subject: [PATCH 598/909] add configure script --- configure_any.sh | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100755 configure_any.sh diff --git a/configure_any.sh b/configure_any.sh new file mode 100755 index 000000000..7c63dae66 --- /dev/null +++ b/configure_any.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# custom build script in use at ECMWF + +set -e + +LIB=no +while getopts "l" OPT +do + case "$OPT" in + l) LIB=yes;; + esac +done +shift $((OPTIND-1)) + +source env.sh # source this from your run script too + +if [[ ${LIB} = yes ]]; then + mkdir build.lib || true # build dir for library + cd build.lib + cmake -DBUILD_FESOM_AS_LIBRARY=ON .. # not required when re-compiling + sed -i -e 's/-lFALSE//g' src/CMakeFiles/fesom.dir/link.txt # workaround for the moment on cray +else + mkdir build || true # build dir for binary + cd build + cmake .. # not required when re-compiling +fi +make install -j`nproc --all` From 625cadbc8bd7a210366ade80184fd691dd1905f7 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Fri, 19 Nov 2021 10:28:57 +0000 Subject: [PATCH 599/909] add job script for ecmwf --- work/job_ecmwf | 47 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100755 work/job_ecmwf diff --git a/work/job_ecmwf b/work/job_ecmwf new file mode 100755 index 000000000..287f8e442 --- /dev/null +++ b/work/job_ecmwf @@ -0,0 +1,47 @@ +#!/bin/bash +#PBS -S /usr/bin/ksh +#PBS -N fesom2-LandG +#PBS -q np +#PBS -l EC_total_tasks=288 + +# optionally, specifiy that no OpenMP is used +#PBS -l EC_threads_per_task=1 + +#PBS -l EC_hyperthreading=1 +#PBS -l EC_user_defined_priority=99 +#PBS -l walltime=00:57:00 + +##PBS -j oe #join out and err +#PBD -n +#PBS -o /scratch/rd/natr/run_core2_LandG/pbs.out +#PBS -e /scratch/rd/natr/run_core2_LandG/pbs.err + +#PBS -m abe +#PBS -M thomas.rackow@ecmwf.int + +#queue suitable for target processors min/max processors per node memory limit wall-clock +#np parallel MOM+CN 1/72 not shared 72 120 GB 48 hours + +path=`pwd` +echo Initial path: $path + +cd /scratch/rd/natr/run_core2_LandG/ + +# debug +set -x + +cp $HOME/fesom2/bin/fesom.x . # +# did manually +#cp -n $HOME/fesom2/config/namelist.config . # +#cp -n $HOME/fesom2/config/namelist.cvmix . # +#cp -n $HOME/fesom2/config/namelist.forcing . # +#cp -n $HOME/fesom2/config/namelist.oce . # +#cp -n $HOME/fesom2/config/namelist.io . # +#cp -n $HOME/fesom2/config/namelist.ice . # +#cp -n $HOME/fesom2/config/namelist.tra . # +#cp -n $HOME/fesom2/config/namelist.dyn . # + +date +echo tasks_per_node, total_tasks, HT: $EC_tasks_per_node $EC_total_tasks $EC_hyperthreads +aprun -N $EC_tasks_per_node -n $EC_total_tasks -j $EC_hyperthreads ./fesom.x > "fesom2.out" +date From f22ff2827b495877382da9aad1aecc0f9574fe38 Mon Sep 17 00:00:00 2001 From: "Kristian S. Mogensen" Date: Fri, 19 Nov 2021 14:33:38 +0000 Subject: [PATCH 600/909] Remove qsplitCF. --- lib/parms/src/DDPQ/misc.c | 49 --------------------------------------- 1 file changed, 49 deletions(-) diff --git a/lib/parms/src/DDPQ/misc.c b/lib/parms/src/DDPQ/misc.c index ba3c53204..955af1f98 100755 --- a/lib/parms/src/DDPQ/misc.c +++ b/lib/parms/src/DDPQ/misc.c @@ -10,55 +10,6 @@ #define DBL_EPSILON 2.2204460492503131e-16 // double epsilon -int qsplitCF(FLOAT *a, int *ind, int n, int ncut) -{ -/*---------------------------------------------------------------------- -| does a quick-sort split of a complex real array. -| on input a[0 : (n-1)] is a real array -| on output is permuted such that its elements satisfy: -| -| abs(a[i]) >= abs(a[ncut-1]) for i < ncut-1 and -| abs(a[i]) <= abs(a[ncut-1]) for i > ncut-1 -| -| ind[0 : (n-1)] is an integer array permuted in the same way as a. -|---------------------------------------------------------------------*/ - FLOAT tmp; - double abskey; - int j, itmp, first, mid, last; - first = 0; - last = n-1; - if (ncutlast) return 0; -/* outer loop -- while mid != ncut */ -label1: - mid = first; - abskey = ABS_VALUE(a[mid]); - for (j=first+1; j<=last; j++) { - if (ABS_VALUE(a[j]) > abskey) { - tmp = a[++mid]; - itmp = ind[mid]; - a[mid] = a[j]; - ind[mid] = ind[j]; - a[j] = tmp; - ind[j] = itmp; - } - } -/*-------------------- interchange */ - tmp = a[mid]; - a[mid] = a[first]; - a[first] = tmp; - itmp = ind[mid]; - ind[mid] = ind[first]; - ind[first] = itmp; -/*-------------------- test for while loop */ - if (mid == ncut) return 0; - if (mid > ncut) - last = mid-1; - else - first = mid+1; - goto label1; -} -/*--------------- end of zqsplitCF ----------------------------------------*/ - int SparTran(csptr amat, csptr bmat, int job, int flag) { /*---------------------------------------------------------------------- From cfbb5ee02030f0fc4582370df2512893963b89fb Mon Sep 17 00:00:00 2001 From: "Kristian S. Mogensen" Date: Fri, 19 Nov 2021 14:54:30 +0000 Subject: [PATCH 601/909] Remove #endif leftover from merge. --- src/ifs_interface/ifs_notused.F90 | 1 - 1 file changed, 1 deletion(-) diff --git a/src/ifs_interface/ifs_notused.F90 b/src/ifs_interface/ifs_notused.F90 index 6c78483d9..7d8603248 100644 --- a/src/ifs_interface/ifs_notused.F90 +++ b/src/ifs_interface/ifs_notused.F90 @@ -372,4 +372,3 @@ SUBROUTINE nemogcmcoup_end_ioserver END SUBROUTINE nemogcmcoup_end_ioserver -#endif From 1f91b6ffc97c62c004cde53161bfb84db175038e Mon Sep 17 00:00:00 2001 From: "Kristian S. Mogensen" Date: Fri, 19 Nov 2021 16:53:22 +0000 Subject: [PATCH 602/909] Revert to non module code. --- src/ifs_interface/ifs_interface.F90 | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/ifs_interface/ifs_interface.F90 b/src/ifs_interface/ifs_interface.F90 index 3164164ef..78d758d86 100644 --- a/src/ifs_interface/ifs_interface.F90 +++ b/src/ifs_interface/ifs_interface.F90 @@ -19,7 +19,6 @@ SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & USE g_config, only: dt USE g_clock, only: timenew, daynew, yearnew, month, day_in_month USE nemogcmcoup_steps, ONLY : substeps - USE fvom_module, only: fesom_init IMPLICIT NONE @@ -1461,7 +1460,6 @@ SUBROUTINE nemogcmcoup_step( istp, icdate, ictime ) USE g_clock, only: yearnew, month, day_in_month USE fesom_main_storage_module, only: fesom => f ! mype USE nemogcmcoup_steps, ONLY : substeps - USE fvom_module, only: fesom_runloop IMPLICIT NONE ! Arguments @@ -1503,7 +1501,6 @@ END SUBROUTINE nemogcmcoup_step SUBROUTINE nemogcmcoup_final USE fesom_main_storage_module, only: fesom => f ! mype - USE fvom_module, only: fesom_finalize ! Finalize the FESOM model From bcb2d1f4640e5bd0b49451bc22b20b995192aa52 Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 19 Nov 2021 19:27:53 +0100 Subject: [PATCH 603/909] exchange u_ice and v_ice against ice derived type variables ice%uvice(1:2,:) --- src/cavity_param.F90 | 59 +++-- src/fesom_module.F90 | 20 +- src/gen_forcing_couple.F90 | 41 ++-- src/ice_EVP.F90 | 471 ++++++++++++++++++++----------------- src/ice_fct.F90 | 238 ++++++++++++------- src/ice_maEVP.F90 | 39 ++- src/ice_modules.F90 | 3 +- src/ice_oce_coupling.F90 | 22 +- src/ice_setup_step.F90 | 97 ++++++-- src/ice_thermo_oce.F90 | 38 ++- src/io_blowup.F90 | 13 +- src/io_meandata.F90 | 15 +- src/io_restart.F90 | 15 +- src/oce_ale.F90 | 23 +- src/oce_mo_conv.F90 | 10 +- src/write_step_info.F90 | 24 +- 16 files changed, 716 insertions(+), 412 deletions(-) diff --git a/src/cavity_param.F90 b/src/cavity_param.F90 index cb2ec329c..64f7ffcb5 100644 --- a/src/cavity_param.F90 +++ b/src/cavity_param.F90 @@ -1,18 +1,34 @@ module cavity_heat_water_fluxes_3eq_interface - interface - subroutine cavity_heat_water_fluxes_3eq(dynamics, tracers, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use MOD_DYN - use mod_tracer - type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers - type(t_dyn), intent(in), target :: dynamics - end subroutine - end interface + interface + subroutine cavity_heat_water_fluxes_3eq(dynamics, tracers, partit, mesh) + USE MOD_DYN + USE MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_dyn), intent(in), target :: dynamics + type(t_tracer), intent(in), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + end subroutine + end interface end module + +module cavity_ice_clean_vel_interface + interface + subroutine cavity_ice_clean_vel(ice, partit, mesh) + use MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + end subroutine + end interface +end module + + ! ! !_______________________________________________________________________________ @@ -442,21 +458,28 @@ end subroutine cavity_momentum_fluxes ! ! !_______________________________________________________________________________ -subroutine cavity_ice_clean_vel(partit, mesh) - use MOD_MESH +subroutine cavity_ice_clean_vel(ice, partit, mesh) + USE MOD_ICE USE MOD_PARTIT USE MOD_PARSUP - use i_ARRAYS, only: U_ice, V_ice + USE MOD_MESH implicit none + type(t_ice), intent(inout), target :: ice type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh + !___________________________________________________________________________ integer :: node - + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: u_ice, v_ice #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - + u_ice => ice%uvice(1,:) + v_ice => ice%uvice(2,:) + + !___________________________________________________________________________ do node=1,myDim_nod2d+eDim_nod2d if(ulevels_nod2D(node)>1) then U_ice(node)=0._WP diff --git a/src/fesom_module.F90 b/src/fesom_module.F90 index fdeec1c53..df98a9f08 100755 --- a/src/fesom_module.F90 +++ b/src/fesom_module.F90 @@ -2,6 +2,7 @@ ! so they can be reused after fesom_init module fesom_main_storage_module USE MOD_MESH + USE MOD_ICE USE MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP @@ -56,6 +57,7 @@ module fesom_main_storage_module type(t_tracer) tracers type(t_dyn) dynamics type(t_partit) partit + type(t_ice) ice character(LEN=256) :: dump_dir, dump_filename @@ -63,6 +65,7 @@ module fesom_main_storage_module type(t_mesh) mesh_copy type(t_tracer) tracers_copy type(t_dyn) dynamics_copy + type(t_ice) ice_copy character(LEN=MPI_MAX_LIBRARY_VERSION_STRING) :: mpi_version_txt integer mpi_version_len @@ -171,7 +174,8 @@ subroutine fesom_init(fesom_total_nsteps) if (f%mype==0) f%t4=MPI_Wtime() if (use_ice) then - call ice_setup(f%tracers, f%partit, f%mesh) + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call ice_setup'//achar(27)//'[0m' + call ice_setup(f%ice, f%tracers, f%partit, f%mesh) ice_steps_since_upd = ice_ave_steps-1 ice_update=.true. if (f%mype==0) write(*,*) 'EVP scheme option=', whichEVP @@ -202,7 +206,7 @@ subroutine fesom_init(fesom_total_nsteps) ! if l_write is TRUE the restart will be forced ! if l_read the restart will be read ! as an example, for reading restart one does: call restart(0, .false., .false., .true., tracers, partit, mesh) - call restart(0, .false., r_restart, f%dynamics, f%tracers, f%partit, f%mesh) ! istep, l_write, l_read + call restart(0, .false., r_restart, f%ice, f%dynamics, f%tracers, f%partit, f%mesh) ! istep, l_write, l_read if (f%mype==0) f%t7=MPI_Wtime() ! store grid information into netcdf file if (.not. r_restart) call write_mesh_info(f%partit, f%mesh) @@ -337,7 +341,7 @@ subroutine fesom_runloop(current_nsteps) !___compute update of atmospheric forcing____________________________ if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call update_atm_forcing(n)'//achar(27)//'[0m' f%t0_frc = MPI_Wtime() - call update_atm_forcing(n, f%tracers, f%partit, f%mesh) + call update_atm_forcing(n, f%ice, f%tracers, f%partit, f%mesh) f%t1_frc = MPI_Wtime() !___compute ice step________________________________________________ if (ice_steps_since_upd>=ice_ave_steps-1) then @@ -348,10 +352,10 @@ subroutine fesom_runloop(current_nsteps) ice_steps_since_upd=ice_steps_since_upd+1 endif if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call ice_timestep(n)'//achar(27)//'[0m' - if (ice_update) call ice_timestep(n, f%partit, f%mesh) + if (ice_update) call ice_timestep(n, f%ice, f%partit, f%mesh) !___compute fluxes to the ocean: heat, freshwater, momentum_________ if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call oce_fluxes_mom...'//achar(27)//'[0m' - call oce_fluxes_mom(f%dynamics, f%partit, f%mesh) ! momentum only + call oce_fluxes_mom(f%ice, f%dynamics, f%partit, f%mesh) ! momentum only call oce_fluxes(f%dynamics, f%tracers, f%partit, f%mesh) end if call before_oce_step(f%dynamics, f%tracers, f%partit, f%mesh) ! prepare the things if required @@ -359,7 +363,7 @@ subroutine fesom_runloop(current_nsteps) !___model ocean step____________________________________________________ if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call oce_timestep_ale'//achar(27)//'[0m' - call oce_timestep_ale(n, f%dynamics, f%tracers, f%partit, f%mesh) + call oce_timestep_ale(n, f%ice, f%dynamics, f%tracers, f%partit, f%mesh) f%t3 = MPI_Wtime() !___compute energy diagnostics..._______________________________________ @@ -369,10 +373,10 @@ subroutine fesom_runloop(current_nsteps) f%t4 = MPI_Wtime() !___prepare output______________________________________________________ if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call output (n)'//achar(27)//'[0m' - call output (n, f%dynamics, f%tracers, f%partit, f%mesh) + call output (n, f%ice, f%dynamics, f%tracers, f%partit, f%mesh) f%t5 = MPI_Wtime() - call restart(n, .false., .false., f%dynamics, f%tracers, f%partit, f%mesh) + call restart(n, .false., .false., f%ice, f%dynamics, f%tracers, f%partit, f%mesh) f%t6 = MPI_Wtime() f%rtime_fullice = f%rtime_fullice + f%t2 - f%t1 diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index 684b8a2cd..b26a83767 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -44,18 +44,20 @@ subroutine integrate_2D(flux_global, flux_local, eff_vol, field2d, mask, partit, end module module update_atm_forcing_interface - interface - subroutine update_atm_forcing(istep, tracers, partit,mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use mod_tracer - integer, intent(in) :: istep - type(t_tracer), intent(in), target :: tracers - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - end subroutine - end interface + interface + subroutine update_atm_forcing(istep, ice, tracers, partit,mesh) + USE MOD_TRACER + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + integer, intent(in) :: istep + type(t_ice), intent(inout), target :: ice + type(t_tracer), intent(in), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + end subroutine + end interface end module module net_rec_from_atm_interface @@ -70,12 +72,13 @@ subroutine net_rec_from_atm(action, partit) end module ! Routines for updating ocean surface forcing fields !------------------------------------------------------------------------- -subroutine update_atm_forcing(istep, tracers, partit, mesh) +subroutine update_atm_forcing(istep, ice, tracers, partit, mesh) use o_PARAM use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use MOD_TRACER + use MOD_ICE use o_arrays use i_arrays use i_param @@ -98,9 +101,11 @@ subroutine update_atm_forcing(istep, tracers, partit, mesh) implicit none integer, intent(in) :: istep - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit + type(t_ice) , intent(inout), target :: ice type(t_tracer), intent(in), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + !_____________________________________________________________________________ integer :: i, itime,n2,n,nz,k,elem real(kind=WP) :: i_coef, aux real(kind=WP) :: dux, dvy,tx,ty,tvol @@ -121,10 +126,16 @@ subroutine update_atm_forcing(istep, tracers, partit, mesh) !integer, parameter :: nci=192, ncj=94 ! T62 grid !real(kind=WP), dimension(nci,ncj) :: array_nc, array_nc2,array_nc3,x !character(500) :: file + !_____________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: u_ice, v_ice #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + u_ice => ice%uvice(1,:) + v_ice => ice%uvice(2,:) + t1=MPI_Wtime() #ifdef __oasis if (firstcall) then diff --git a/src/ice_EVP.F90 b/src/ice_EVP.F90 index 690118d7d..dbc7f2c8b 100755 --- a/src/ice_EVP.F90 +++ b/src/ice_EVP.F90 @@ -1,30 +1,44 @@ module ice_EVP_interfaces - interface - subroutine stress_tensor(ice_strength, partit, mesh) - USE MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - real(kind=WP), intent(in) :: ice_strength(partit%mydim_elem2D) - end subroutine - - subroutine stress2rhs(inv_areamass, ice_strength, partit, mesh) - USE MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - REAL(kind=WP), intent(in) :: inv_areamass(partit%myDim_nod2D), ice_strength(partit%mydim_elem2D) - end subroutine - end interface + interface + subroutine stress_tensor(ice_strength, ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(in) :: ice_strength(partit%mydim_elem2D) + end subroutine + + subroutine stress2rhs(inv_areamass, ice_strength, ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(in) :: inv_areamass(partit%myDim_nod2D), ice_strength(partit%mydim_elem2D) + end subroutine + + subroutine EVPdynamics(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + end subroutine + end interface end module ! ! Contains routines of EVP dynamics ! !=================================================================== -subroutine stress_tensor(ice_strength, partit, mesh) +subroutine stress_tensor(ice_strength, ice, partit, mesh) ! EVP rheology. The routine computes stress tensor components based on ice ! velocity field. They are stored as elemental arrays (sigma11, sigma22 and ! sigma12). The ocean velocity is at nodal locations. @@ -35,26 +49,33 @@ subroutine stress_tensor(ice_strength, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP +USE MOD_ICE #if defined (__icepack) use icedrv_main, only: rdg_conv_elem, rdg_shear_elem, strength #endif implicit none -type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit +type(t_ice), intent(inout), target :: ice +type(t_mesh), intent(in), target :: mesh +!_______________________________________________________________________________ real(kind=WP), intent(in) :: ice_strength(partit%mydim_elem2D) real(kind=WP) :: eta, xi, delta, aa integer :: el, elnodes(3) real(kind=WP) :: asum, msum, vale, dx(3), dy(3) real(kind=WP) :: det1, det2, r1, r2, r3, si1, si2, dte real(kind=WP) :: zeta, delta_inv, d1, d2 - +!_______________________________________________________________________________ +real(kind=WP), dimension(:), pointer :: u_ice, v_ice #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - +u_ice => ice%uvice(1,:) +v_ice => ice%uvice(2,:) + +!_______________________________________________________________________________ vale = 1.0_WP/(ellipse**2) dte = ice_dt/(1.0_WP*evp_rheol_steps) @@ -138,197 +159,197 @@ subroutine stress_tensor(ice_strength, partit, mesh) end do end subroutine stress_tensor +! !=================================================================== +! subroutine stress_tensor_no1(ice_strength, partit, mesh) +! ! EVP rheology. The routine computes stress tensor components based on ice +! ! velocity field. They are stored as elemental arrays (sigma11, sigma22 and +! ! sigma12). The ocean velocity is at nodal locations. +! use o_param +! use i_param +! use i_arrays +! USE g_CONFIG +! USE MOD_MESH +! USE MOD_PARTIT +! USE MOD_PARSUP +! implicit none +! type(t_mesh), intent(in), target :: mesh +! type(t_partit), intent(inout), target :: partit +! real(kind=WP), intent(in) :: ice_strength(partit%mydim_elem2D) +! real(kind=WP) :: eta, xi, delta, aa +! integer :: el, elnodes(3) +! real(kind=WP) :: asum, msum, vale, dx(3), dy(3) +! real(kind=WP) :: det1, det2, r1, r2, r3, si1, si2, dte +! real(kind=WP) :: zeta, delta_inv, d1, d2 +! +! #include "associate_part_def.h" +! #include "associate_mesh_def.h" +! #include "associate_part_ass.h" +! #include "associate_mesh_ass.h" +! +! vale = 1.0_WP/(ellipse**2) +! +! dte = ice_dt/(1.0_WP*evp_rheol_steps) +! det1 = 1.0_WP/(1.0_WP + 0.5_WP*Tevp_inv*dte) +! det2 = 1.0_WP/(1.0_WP + 0.5_WP*Tevp_inv*dte) !*ellipse**2 +! +! +! do el=1,myDim_elem2D +! !__________________________________________________________________________ +! ! if element contains cavity node skip it +! if (ulevels(el) > 1) cycle +! ! ===== Check if there is ice on elem +! +! ! There is no ice in elem +! ! if (any(m_ice(elnodes)<= 0.) .or. any(a_ice(elnodes) <=0.)) CYCLE +! if (ice_strength(el) > 0.) then +! ! ===== +! ! ===== Deformation rate tensor on element elem: +! !du/dx +! +! eps11(el) = sum(mesh%gradient_sca(1:3,el)*U_ice(mesh%elem2D_nodes(1:3,el))) & +! -mesh% metric_factor(el) * sum(V_ice(mesh%elem2D_nodes(1:3,el)))/3.0_WP +! +! eps22(el) = sum(mesh%gradient_sca(4:6, el)*V_ice(mesh%elem2D_nodes(1:3,el))) +! +! eps12(el) = 0.5_WP*(sum(mesh%gradient_sca(4:6,el)*U_ice(mesh%elem2D_nodes(1:3,el))) & +! + sum(mesh%gradient_sca(1:3,el)*V_ice(mesh%elem2D_nodes(1:3,el))) & +! + mesh%metric_factor(el) * sum(U_ice(mesh%elem2D_nodes(1:3,el)))/3.0_WP) +! ! ===== moduli: +! delta = sqrt((eps11(el)*eps11(el) + eps22(el)*eps22(el))*(1.0_WP+vale) + 4.0_WP*vale*eps12(el)*eps12(el) + & +! 2.0_WP*eps11(el)*eps22(el)*(1.0_WP-vale)) +! +! ! ======================================= +! ! ===== Here the EVP rheology piece starts +! ! ======================================= +! +! ! ===== viscosity zeta should exceed zeta_min +! ! (done via limiting delta from above) +! +! !if(delta>pressure/zeta_min) delta=pressure/zeta_min +! !It does not work properly by +! !creating response where ice_strength is small +! ! Uncomment and test if necessary +! +! ! ===== if delta is too small or zero, viscosity will too large (unlimited) +! ! (limit delta_inv) +! delta_inv = 1.0_WP/max(delta,delta_min) +! +! !!PS delta_inv = delta/(delta+delta_min) +! +! zeta = ice_strength(el)*delta_inv +! ! ===== Limiting pressure/Delta (zeta): it may still happen that pressure/Delta +! ! is too large in some regions and CFL criterion is violated. +! ! The regularization below was introduced by Hunke, +! ! but seemingly is not used in the current CICE. +! ! Without it divergence and zeta can be noisy (but code +! ! remains stable), using it reduces viscosities too strongly. +! ! It is therefore commented +! +! !if (zeta>Clim_evp*voltriangle(el)) then +! !zeta=Clim_evp*voltriangle(el) +! !end if +! +! zeta = zeta*Tevp_inv +! +! r1 = zeta*(eps11(el)+eps22(el)) - ice_strength(el)*Tevp_inv +! r2 = zeta*(eps11(el)-eps22(el))*vale +! r3 = zeta*eps12(el)*vale +! +! si1 = det1*(sigma11(el) + sigma22(el) + dte*r1) +! si2 = det2*(sigma11(el) - sigma22(el) + dte*r2) +! +! sigma12(el) = det2*(sigma12(el)+dte*r3) +! sigma11(el) = 0.5_WP*(si1+si2) +! sigma22(el) = 0.5_WP*(si1-si2) +! endif +! end do +! end subroutine stress_tensor_no1 !=================================================================== -subroutine stress_tensor_no1(ice_strength, partit, mesh) -! EVP rheology. The routine computes stress tensor components based on ice -! velocity field. They are stored as elemental arrays (sigma11, sigma22 and -! sigma12). The ocean velocity is at nodal locations. -use o_param -use i_param -use i_arrays -USE g_CONFIG -USE MOD_MESH -USE MOD_PARTIT -USE MOD_PARSUP -implicit none -type(t_mesh), intent(in), target :: mesh -type(t_partit), intent(inout), target :: partit -real(kind=WP), intent(in) :: ice_strength(partit%mydim_elem2D) -real(kind=WP) :: eta, xi, delta, aa -integer :: el, elnodes(3) -real(kind=WP) :: asum, msum, vale, dx(3), dy(3) -real(kind=WP) :: det1, det2, r1, r2, r3, si1, si2, dte -real(kind=WP) :: zeta, delta_inv, d1, d2 - -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" - - vale = 1.0_WP/(ellipse**2) - - dte = ice_dt/(1.0_WP*evp_rheol_steps) - det1 = 1.0_WP/(1.0_WP + 0.5_WP*Tevp_inv*dte) - det2 = 1.0_WP/(1.0_WP + 0.5_WP*Tevp_inv*dte) !*ellipse**2 - - - do el=1,myDim_elem2D - !__________________________________________________________________________ - ! if element contains cavity node skip it - if (ulevels(el) > 1) cycle - ! ===== Check if there is ice on elem - - ! There is no ice in elem - ! if (any(m_ice(elnodes)<= 0.) .or. any(a_ice(elnodes) <=0.)) CYCLE - if (ice_strength(el) > 0.) then - ! ===== - ! ===== Deformation rate tensor on element elem: - !du/dx - - eps11(el) = sum(mesh%gradient_sca(1:3,el)*U_ice(mesh%elem2D_nodes(1:3,el))) & - -mesh% metric_factor(el) * sum(V_ice(mesh%elem2D_nodes(1:3,el)))/3.0_WP - - eps22(el) = sum(mesh%gradient_sca(4:6, el)*V_ice(mesh%elem2D_nodes(1:3,el))) - - eps12(el) = 0.5_WP*(sum(mesh%gradient_sca(4:6,el)*U_ice(mesh%elem2D_nodes(1:3,el))) & - + sum(mesh%gradient_sca(1:3,el)*V_ice(mesh%elem2D_nodes(1:3,el))) & - + mesh%metric_factor(el) * sum(U_ice(mesh%elem2D_nodes(1:3,el)))/3.0_WP) - ! ===== moduli: - delta = sqrt((eps11(el)*eps11(el) + eps22(el)*eps22(el))*(1.0_WP+vale) + 4.0_WP*vale*eps12(el)*eps12(el) + & - 2.0_WP*eps11(el)*eps22(el)*(1.0_WP-vale)) - - ! ======================================= - ! ===== Here the EVP rheology piece starts - ! ======================================= - - ! ===== viscosity zeta should exceed zeta_min - ! (done via limiting delta from above) - - !if(delta>pressure/zeta_min) delta=pressure/zeta_min - !It does not work properly by - !creating response where ice_strength is small - ! Uncomment and test if necessary - - ! ===== if delta is too small or zero, viscosity will too large (unlimited) - ! (limit delta_inv) - delta_inv = 1.0_WP/max(delta,delta_min) - -!!PS delta_inv = delta/(delta+delta_min) - - zeta = ice_strength(el)*delta_inv - ! ===== Limiting pressure/Delta (zeta): it may still happen that pressure/Delta - ! is too large in some regions and CFL criterion is violated. - ! The regularization below was introduced by Hunke, - ! but seemingly is not used in the current CICE. - ! Without it divergence and zeta can be noisy (but code - ! remains stable), using it reduces viscosities too strongly. - ! It is therefore commented - - !if (zeta>Clim_evp*voltriangle(el)) then - !zeta=Clim_evp*voltriangle(el) - !end if - - zeta = zeta*Tevp_inv - - r1 = zeta*(eps11(el)+eps22(el)) - ice_strength(el)*Tevp_inv - r2 = zeta*(eps11(el)-eps22(el))*vale - r3 = zeta*eps12(el)*vale - - si1 = det1*(sigma11(el) + sigma22(el) + dte*r1) - si2 = det2*(sigma11(el) - sigma22(el) + dte*r2) - - sigma12(el) = det2*(sigma12(el)+dte*r3) - sigma11(el) = 0.5_WP*(si1+si2) - sigma22(el) = 0.5_WP*(si1-si2) - endif - end do -end subroutine stress_tensor_no1 -!=================================================================== -subroutine stress2rhs_e(partit, mesh) -! EVP implementation: -! Computes the divergence of stress tensor and puts the result into the -! rhs vectors. Velocity is at nodes. -! The divergence is computed in a cysly over edges. It is slower that the -! approach in stress2rhs_e inherited from FESOM -USE o_PARAM -USE i_PARAM -USE i_therm_param -USE i_arrays -use g_config, only: use_cavity -USE MOD_MESH -USE MOD_PARTIT -USE MOD_PARSUP - -IMPLICIT NONE -type(t_mesh), intent(in), target :: mesh -type(t_partit), intent(inout), target :: partit -INTEGER :: n, elem, ed, elnodes(3), el(2), ednodes(2) -REAL(kind=WP) :: mass, uc, vc, deltaX1, deltaX2, deltaY1, deltaY2 -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" - - DO n=1, myDim_nod2D - U_rhs_ice(n)=0.0_WP - V_rhs_ice(n)=0.0_WP - END DO - - ! Stress divergence - DO ed=1,myDim_edge2D - ednodes=edges(:,ed) - el=edge_tri(:,ed) - if(myList_edge2D(ed)>edge2D_in) cycle - - ! stress boundary condition at ocean cavity boundary edge ==0 - if (use_cavity) then - if ( (ulevels(el(1))>1) .or. ( el(2)>0 .and. ulevels(el(2))>1) ) cycle - end if - - ! elements on both sides - uc = - sigma12(el(1))*edge_cross_dxdy(1,ed) + sigma11(el(1))*edge_cross_dxdy(2,ed) & - + sigma12(el(2))*edge_cross_dxdy(3,ed) - sigma11(el(2))*edge_cross_dxdy(4,ed) - - vc = - sigma22(el(1))*edge_cross_dxdy(1,ed) + sigma12(el(1))*edge_cross_dxdy(2,ed) & - + sigma22(el(2))*edge_cross_dxdy(3,ed) - sigma12(el(2))*edge_cross_dxdy(4,ed) - - U_rhs_ice(ednodes(1)) = U_rhs_ice(ednodes(1)) + uc - U_rhs_ice(ednodes(2)) = U_rhs_ice(ednodes(2)) - uc - V_rhs_ice(ednodes(1)) = V_rhs_ice(ednodes(1)) + vc - V_rhs_ice(ednodes(2)) = V_rhs_ice(ednodes(2)) - vc - END DO - - DO n=1, myDim_nod2D - !___________________________________________________________________________ - ! if cavity node skip it - if ( ulevels_nod2d(n) > 1 ) cycle - - !___________________________________________________________________________ - mass = area(1,n)*(rhoice*m_ice(n)+rhosno*m_snow(n)) - if(mass > 1.e-3_WP) then - U_rhs_ice(n) = U_rhs_ice(n) / mass - V_rhs_ice(n) = V_rhs_ice(n) / mass - else - U_rhs_ice(n)=0.0_WP - V_rhs_ice(n)=0.0_WP - end if - END DO - ! - ! elevation gradient contribution - ! - do elem=1,myDim_elem2D - !__________________________________________________________________________ - ! if element contains cavity node skip it - if (ulevels(elem) > 1) cycle - - !__________________________________________________________________________ - elnodes=elem2D_nodes(:,elem) - uc=elem_area(elem)*g*sum(gradient_sca(1:3,elem)*elevation(elnodes))/3.0_WP - vc=elem_area(elem)*g*sum(gradient_sca(4:6,elem)*elevation(elnodes))/3.0_WP - U_rhs_ice(elnodes)=U_rhs_ice(elnodes) - uc/area(1,elnodes) - V_rhs_ice(elnodes)=V_rhs_ice(elnodes) - vc/area(1,elnodes) - END DO -end subroutine stress2rhs_e +! subroutine stress2rhs_e(partit, mesh) +! ! EVP implementation: +! ! Computes the divergence of stress tensor and puts the result into the +! ! rhs vectors. Velocity is at nodes. +! ! The divergence is computed in a cysly over edges. It is slower that the +! ! approach in stress2rhs_e inherited from FESOM +! USE o_PARAM +! USE i_PARAM +! USE i_therm_param +! USE i_arrays +! use g_config, only: use_cavity +! USE MOD_MESH +! USE MOD_PARTIT +! USE MOD_PARSUP +! +! IMPLICIT NONE +! type(t_mesh), intent(in), target :: mesh +! type(t_partit), intent(inout), target :: partit +! INTEGER :: n, elem, ed, elnodes(3), el(2), ednodes(2) +! REAL(kind=WP) :: mass, uc, vc, deltaX1, deltaX2, deltaY1, deltaY2 +! #include "associate_part_def.h" +! #include "associate_mesh_def.h" +! #include "associate_part_ass.h" +! #include "associate_mesh_ass.h" +! +! DO n=1, myDim_nod2D +! U_rhs_ice(n)=0.0_WP +! V_rhs_ice(n)=0.0_WP +! END DO +! +! ! Stress divergence +! DO ed=1,myDim_edge2D +! ednodes=edges(:,ed) +! el=edge_tri(:,ed) +! if(myList_edge2D(ed)>edge2D_in) cycle +! +! ! stress boundary condition at ocean cavity boundary edge ==0 +! if (use_cavity) then +! if ( (ulevels(el(1))>1) .or. ( el(2)>0 .and. ulevels(el(2))>1) ) cycle +! end if +! +! ! elements on both sides +! uc = - sigma12(el(1))*edge_cross_dxdy(1,ed) + sigma11(el(1))*edge_cross_dxdy(2,ed) & +! + sigma12(el(2))*edge_cross_dxdy(3,ed) - sigma11(el(2))*edge_cross_dxdy(4,ed) +! +! vc = - sigma22(el(1))*edge_cross_dxdy(1,ed) + sigma12(el(1))*edge_cross_dxdy(2,ed) & +! + sigma22(el(2))*edge_cross_dxdy(3,ed) - sigma12(el(2))*edge_cross_dxdy(4,ed) +! +! U_rhs_ice(ednodes(1)) = U_rhs_ice(ednodes(1)) + uc +! U_rhs_ice(ednodes(2)) = U_rhs_ice(ednodes(2)) - uc +! V_rhs_ice(ednodes(1)) = V_rhs_ice(ednodes(1)) + vc +! V_rhs_ice(ednodes(2)) = V_rhs_ice(ednodes(2)) - vc +! END DO +! +! DO n=1, myDim_nod2D +! !___________________________________________________________________________ +! ! if cavity node skip it +! if ( ulevels_nod2d(n) > 1 ) cycle +! +! !___________________________________________________________________________ +! mass = area(1,n)*(rhoice*m_ice(n)+rhosno*m_snow(n)) +! if(mass > 1.e-3_WP) then +! U_rhs_ice(n) = U_rhs_ice(n) / mass +! V_rhs_ice(n) = V_rhs_ice(n) / mass +! else +! U_rhs_ice(n)=0.0_WP +! V_rhs_ice(n)=0.0_WP +! end if +! END DO +! ! +! ! elevation gradient contribution +! ! +! do elem=1,myDim_elem2D +! !__________________________________________________________________________ +! ! if element contains cavity node skip it +! if (ulevels(elem) > 1) cycle +! +! !__________________________________________________________________________ +! elnodes=elem2D_nodes(:,elem) +! uc=elem_area(elem)*g*sum(gradient_sca(1:3,elem)*elevation(elnodes))/3.0_WP +! vc=elem_area(elem)*g*sum(gradient_sca(4:6,elem)*elevation(elnodes))/3.0_WP +! U_rhs_ice(elnodes)=U_rhs_ice(elnodes) - uc/area(1,elnodes) +! V_rhs_ice(elnodes)=V_rhs_ice(elnodes) - vc/area(1,elnodes) +! END DO +! end subroutine stress2rhs_e !=================================================================== subroutine stress2rhs(inv_areamass, ice_strength, partit, mesh) ! EVP implementation: @@ -406,9 +427,13 @@ end subroutine stress2rhs ! ! !=================================================================== -subroutine EVPdynamics(partit, mesh) +subroutine EVPdynamics(ice, partit, mesh) ! EVP implementation. Does subcycling and boundary conditions. ! Velocities at nodes +USE MOD_ICE +USE MOD_PARTIT +USE MOD_PARSUP +USE MOD_MESH USE o_PARAM USE i_ARRAYS USE i_PARAM @@ -417,9 +442,6 @@ subroutine EVPdynamics(partit, mesh) USE g_CONFIG USE g_comm_auto use ice_EVP_interfaces -USE MOD_MESH -USE MOD_PARTIT -USE MOD_PARSUP #if defined (__icepack) use icedrv_main, only: rdg_conv_elem, rdg_shear_elem, strength @@ -427,8 +449,10 @@ subroutine EVPdynamics(partit, mesh) #endif IMPLICIT NONE -type(t_mesh), intent(in), target :: mesh +type(t_ice), intent(inout), target :: ice type(t_partit), intent(inout), target :: partit +type(t_mesh), intent(in), target :: mesh +!_______________________________________________________________________________ integer :: steps, shortstep real(kind=WP) :: rdt, asum, msum, r_a, r_b real(kind=WP) :: drag, det, umod, rhsu, rhsv @@ -447,12 +471,17 @@ subroutine EVPdynamics(partit, mesh) INTEGER :: elem REAL(kind=WP) :: mass, uc, vc, deltaX1, deltaX2, deltaY1, deltaY2 - +!_______________________________________________________________________________ +! pointer on necessary derived types +real(kind=WP), dimension(:), pointer :: u_ice, v_ice #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" +u_ice => ice%uvice(1,:) +v_ice => ice%uvice(2,:) +!_______________________________________________________________________________ ! If Icepack is used, always update the tracers #if defined (__icepack) @@ -614,8 +643,8 @@ subroutine EVPdynamics(partit, mesh) do shortstep=1, evp_rheol_steps - call stress_tensor(ice_strength, partit, mesh) - call stress2rhs(inv_areamass,ice_strength, partit, mesh) + call stress_tensor(ice_strength, ice, partit, mesh) + call stress2rhs(inv_areamass, ice_strength, ice, partit, mesh) U_ice_old = U_ice !PS V_ice_old = V_ice !PS diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index e6b1acd60..2b69e52d5 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -1,38 +1,76 @@ module ice_fct_interfaces - interface - subroutine ice_mass_matrix_fill(partit, mesh) - use MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - end subroutine - - subroutine ice_solve_high_order(partit, mesh) - use MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - end subroutine - - subroutine ice_solve_low_order(partit, mesh) - use MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - end subroutine - - subroutine ice_fem_fct(tr_array_id, partit, mesh) - use MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - integer :: tr_array_id - type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - end subroutine - end interface + interface + subroutine ice_mass_matrix_fill(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + end subroutine + + subroutine ice_solve_high_order(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + end subroutine + + subroutine ice_solve_low_order(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + end subroutine + + subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + integer :: tr_array_id + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + end subroutine + + subroutine ice_TG_rhs_div(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + end subroutine + + subroutine ice_TG_rhs(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + end subroutine + + subroutine ice_update_for_div(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + end subroutine + end interface end module ! @@ -48,25 +86,33 @@ subroutine ice_fem_fct(tr_array_id, partit, mesh) ! The code is adapted from FESOM ! ! ===================================================================== -subroutine ice_TG_rhs(partit, mesh) +subroutine ice_TG_rhs(ice, partit, mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP + USE MOD_ICE use i_Arrays use i_PARAM use o_PARAM USE g_CONFIG implicit none - real(kind=WP) :: diff, entries(3), um, vm, vol, dx(3), dy(3) - integer :: n, q, row, elem, elnodes(3) + type(t_ice), intent(inout), target :: ice type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh - + !_____________________________________________________________________________ + real(kind=WP) :: diff, entries(3), um, vm, vol, dx(3), dy(3) + integer :: n, q, row, elem, elnodes(3) + !_____________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: u_ice, v_ice #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - + u_ice => ice%uvice(1,:) + v_ice => ice%uvice(2,:) + + !_____________________________________________________________________________ ! Taylor-Galerkin (Lax-Wendroff) rhs DO row=1, myDim_nod2D rhs_m(row)=0._WP @@ -118,23 +164,25 @@ end subroutine ice_TG_rhs ! !---------------------------------------------------------------------------- ! -subroutine ice_fct_init(partit, mesh) - use o_PARAM - use MOD_MESH +subroutine ice_fct_init(ice, partit, mesh) + USE MOD_ICE USE MOD_PARTIT USE MOD_PARSUP + use MOD_MESH + use o_PARAM use i_ARRAYS use ice_fct_interfaces implicit none integer :: n_size + type(t_ice), intent(inout), target :: ice type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh - + !_____________________________________________________________________________ + ! pointer on necessary derived types #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - n_size=myDim_nod2D+eDim_nod2D @@ -164,38 +212,41 @@ subroutine ice_fct_init(partit, mesh) dm_snow = 0.0_WP ! Fill in the mass matrix - call ice_mass_matrix_fill(partit, mesh) + call ice_mass_matrix_fill(ice, partit, mesh) if (mype==0) write(*,*) 'Ice FCT is initialized' end subroutine ice_fct_init ! !---------------------------------------------------------------------------- ! -subroutine ice_fct_solve(partit, mesh) - use MOD_MESH +subroutine ice_fct_solve(ice, partit, mesh) + USE MOD_ICE USE MOD_PARTIT USE MOD_PARSUP + USE MOD_MESH use ice_fct_interfaces implicit none + type(t_ice), intent(inout), target :: ice type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh + !_____________________________________________________________________________ ! Driving routine - call ice_solve_high_order(partit, mesh) ! uses arrays of low-order solutions as temp + call ice_solve_high_order(ice, partit, mesh) ! uses arrays of low-order solutions as temp ! storage. It should preceed the call of low ! order solution. - call ice_solve_low_order(partit, mesh) + call ice_solve_low_order(ice, partit, mesh) - call ice_fem_fct(1, partit, mesh) ! m_ice - call ice_fem_fct(2, partit, mesh) ! a_ice - call ice_fem_fct(3, partit, mesh) ! m_snow + call ice_fem_fct(1, ice, partit, mesh) ! m_ice + call ice_fem_fct(2, ice, partit, mesh) ! a_ice + call ice_fem_fct(3, ice, partit, mesh) ! m_snow #if defined (__oifs) - call ice_fem_fct(4, partit, mesh) ! ice_temp + call ice_fem_fct(4, ice, partit, mesh) ! ice_temp #endif /* (__oifs) */ end subroutine ice_fct_solve ! ! !_______________________________________________________________________________ -subroutine ice_solve_low_order(partit, mesh) +subroutine ice_solve_low_order(ice, partit, mesh) !============================ ! Low-order solution @@ -207,24 +258,29 @@ subroutine ice_solve_low_order(partit, mesh) ! is implemented as the difference between the consistent and lumped mass ! matrices acting on the field from the previous time step. The consistent ! mass matrix on the lhs is replaced with the lumped one. - use MOD_MESH + USE MOD_ICE + USE MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP - use MOD_TRACER + USE MOD_MESH use i_ARRAYS use i_PARAM use g_comm_auto implicit none - integer :: row, clo, clo2, cn, location(100) - real(kind=WP) :: gamma + type(t_ice), intent(inout), target :: ice type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh - + !___________________________________________________________________________ + integer :: row, clo, clo2, cn, location(100) + real(kind=WP) :: gamma + !___________________________________________________________________________ + ! pointer on necessary derived types #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - + + !___________________________________________________________________________ gamma=ice_gamma_fct ! Added diffusivity parameter ! Adjust it to ensure posivity of solution do row=1,myDim_nod2D @@ -265,26 +321,31 @@ end subroutine ice_solve_low_order ! ! !_______________________________________________________________________________ -subroutine ice_solve_high_order(partit, mesh) - use MOD_MESH +subroutine ice_solve_high_order(ice, partit, mesh) + USE MOD_ICE + USE MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP - use MOD_TRACER + USE MOD_MESH use i_ARRAYS use o_PARAM use g_comm_auto implicit none - ! + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + !_____________________________________________________________________________ integer :: n,i,clo,clo2,cn,location(100),row real(kind=WP) :: rhs_new integer :: num_iter_solve=3 - type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - + !_____________________________________________________________________________ + ! pointer on necessary derived types #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + + !_____________________________________________________________________________ ! Does Taylor-Galerkin solution ! !the first approximation @@ -351,7 +412,7 @@ end subroutine ice_solve_high_order ! ! !_______________________________________________________________________________ -subroutine ice_fem_fct(tr_array_id, partit, mesh) +subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) ! Flux corrected transport algorithm for tracer advection ! ! It is based on Loehner et al. (Finite-element flux-corrected @@ -359,23 +420,25 @@ subroutine ice_fem_fct(tr_array_id, partit, mesh) ! Int. J. Numer. Meth. Fluids, 7 (1987), 1093--1109) as described by Kuzmin and ! Turek. (kuzmin@math.uni-dortmund.de) ! - use MOD_MESH + USE MOD_ICE USE MOD_PARTIT USE MOD_PARSUP - use MOD_TRACER + USE MOD_MESH use i_arrays use i_param use o_PARAM use g_comm_auto implicit none - + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + !_____________________________________________________________________________ integer :: tr_array_id integer :: icoef(3,3),n,q, elem,elnodes(3),row real(kind=WP), allocatable, dimension(:) :: tmax, tmin real(kind=WP) :: vol, flux, ae, gamma - type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - + !_____________________________________________________________________________ + ! pointer on necessary derived types #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -668,12 +731,13 @@ end subroutine ice_fem_fct ! ! !_______________________________________________________________________________ -SUBROUTINE ice_mass_matrix_fill(partit, mesh) +SUBROUTINE ice_mass_matrix_fill(ice, partit, mesh) ! Used in ice_fct inherited from FESOM use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use MOD_TRACER + use MOD_ICE use i_PARAM use i_ARRAYS ! @@ -684,9 +748,11 @@ SUBROUTINE ice_mass_matrix_fill(partit, mesh) integer, allocatable :: col_pos(:) real(kind=WP) :: aa integer :: flag=0,iflag=0 + type(t_ice), intent(inout), target :: ice type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh - + !_____________________________________________________________________________ + ! pointer on necessary derived types #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -752,10 +818,11 @@ END SUBROUTINE ice_mass_matrix_fill ! !========================================================= ! -subroutine ice_TG_rhs_div(partit, mesh) +subroutine ice_TG_rhs_div(ice, partit, mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP + USE MOD_ICE use i_Arrays use i_PARAM use o_PARAM @@ -764,14 +831,20 @@ subroutine ice_TG_rhs_div(partit, mesh) real(kind=WP) :: diff, entries(3), um, vm, vol, dx(3), dy(3) integer :: n, q, row, elem, elnodes(3) real(kind=WP) :: c1, c2, c3, c4, cx1, cx2, cx3, cx4, entries2(3) + type(t_ice), intent(inout), target :: ice type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh - + !_____________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: u_ice, v_ice #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - + u_ice => ice%uvice(1,:) + v_ice => ice%uvice(2,:) + + ! Computes the rhs in a Taylor-Galerkin way (with upwind type of ! correction for the advection operator) ! In this version I tr to split divergent term off, so that FCT works without it. @@ -848,11 +921,12 @@ end subroutine ice_TG_rhs_div ! ! !_______________________________________________________________________________ -subroutine ice_update_for_div(partit, mesh) +subroutine ice_update_for_div(ice, partit, mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use MOD_TRACER + use MOD_ICE use i_Arrays use i_PARAM use o_PARAM @@ -863,9 +937,11 @@ subroutine ice_update_for_div(partit, mesh) integer :: n,i,clo,clo2,cn,location(100),row real(kind=WP) :: rhs_new integer :: num_iter_solve=3 + type(t_ice), intent(inout), target :: ice type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh - + !_____________________________________________________________________________ + ! pointer on necessary derived types #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" diff --git a/src/ice_maEVP.F90 b/src/ice_maEVP.F90 index 749bb2b31..1a4e1702a 100644 --- a/src/ice_maEVP.F90 +++ b/src/ice_maEVP.F90 @@ -39,6 +39,26 @@ subroutine find_beta_field_a(partit, mesh) type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine + + subroutine EVPdynamics_a(ice, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_ice + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_ice), intent(inout), target :: ice + end subroutine + + subroutine EVPdynamics_m(ice, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_ice + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_ice), intent(inout), target :: ice + end subroutine end interface end module @@ -301,7 +321,7 @@ end subroutine stress2rhs_m ! !=================================================================== ! -subroutine EVPdynamics_m(partit, mesh) +subroutine EVPdynamics_m(ice, partit, mesh) ! assemble rhs and solve for ice velocity ! New implementation based on Bouillion et al. Ocean Modelling 2013 ! SD 30.07.14 @@ -313,6 +333,7 @@ subroutine EVPdynamics_m(partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP + USE MOD_ICE use g_config use i_arrays use o_arrays @@ -326,6 +347,7 @@ subroutine EVPdynamics_m(partit, mesh) implicit none type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit + type(t_ice), intent(inout), target :: ice integer :: steps, shortstep, i, ed,n real(kind=WP) :: rdt, drag, det real(kind=WP) :: inv_thickness(partit%myDim_nod2D), umod, rhsu, rhsv @@ -344,11 +366,14 @@ subroutine EVPdynamics_m(partit, mesh) real(kind=WP) :: mf,aa, bb,p_ice(3) real(kind=WP) :: mass(partit%myDim_nod2D) +real(kind=WP), dimension(:), pointer :: u_ice, v_ice #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - +u_ice => ice%uvice(1,:) +v_ice => ice%uvice(2,:) + val3=1.0_WP/3.0_WP vale=1.0_WP/(ellipse**2) det2=1.0_WP/(1.0_WP+alpha_evp) @@ -825,7 +850,7 @@ end subroutine stress_tensor_a ! !=================================================================== ! -subroutine EVPdynamics_a(partit, mesh) +subroutine EVPdynamics_a(ice, partit, mesh) ! assemble rhs and solve for ice velocity ! New implementation based on Bouillion et al. Ocean Modelling 2013 ! and Kimmritz et al., Ocean Modelling 2016 @@ -836,6 +861,7 @@ subroutine EVPdynamics_a(partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP +USE MOD_ICE use i_arrays USE o_arrays use i_param @@ -852,15 +878,20 @@ subroutine EVPdynamics_a(partit, mesh) implicit none type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit + type(t_ice), intent(inout), target :: ice integer :: steps, shortstep, i, ed real(kind=WP) :: rdt, drag, det, fc real(kind=WP) :: thickness, inv_thickness, umod, rhsu, rhsv REAL(kind=WP) :: t0,t1, t2, t3, t4, t5, t00, txx + + real(kind=WP), dimension(:), pointer :: u_ice, v_ice #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - + u_ice => ice%uvice(1,:) + v_ice => ice%uvice(2,:) + steps=evp_rheol_steps rdt=ice_dt u_ice_aux=u_ice ! Initialize solver variables diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index 9b31e31e7..0f6d864c4 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -59,7 +59,8 @@ MODULE i_ARRAYS logical :: ice_update = .true. ! integer :: ice_steps_since_upd = 0 ! real(kind=WP),allocatable,dimension(:,:) :: ice_grad_vel - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_ice, V_ice, m_ice, a_ice +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_ice, V_ice + REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: m_ice, a_ice REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_ice_old, V_ice_old, m_ice_old, a_ice_old, m_snow_old,thdgr_old !PS REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_rhs_ice, V_rhs_ice, m_snow REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_m, rhs_a, rhs_ms, ths_temp diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index ce4fd3eb9..7a3df4c00 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -28,13 +28,26 @@ subroutine oce_fluxes(dynamics, tracers, partit, mesh) type(t_tracer), intent(inout), target :: tracers type(t_dyn) , intent(in) , target :: dynamics end subroutine + + subroutine oce_fluxes_mom(ice, dynamics, partit, mesh) + use mod_mesh + USE MOD_PARTIT + use MOD_DYN + USE MOD_ICE + USE MOD_PARSUP + use mod_tracer + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + type(t_dyn) , intent(in) , target :: dynamics + type(t_ice) , intent(inout), target :: ice + end subroutine end interface end module ! ! !_______________________________________________________________________________ -subroutine oce_fluxes_mom(dynamics, partit, mesh) +subroutine oce_fluxes_mom(ice, dynamics, partit, mesh) ! transmits the relevant fields from the ice to the ocean model ! use o_PARAM @@ -43,6 +56,7 @@ subroutine oce_fluxes_mom(dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_DYN + USE MOD_ICE use i_ARRAYS use i_PARAM USE g_CONFIG @@ -59,12 +73,16 @@ subroutine oce_fluxes_mom(dynamics, partit, mesh) type(t_dyn) , intent(in) , target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh + type(t_ice), intent(inout), target :: ice + real(kind=WP), dimension(:), pointer :: u_ice, v_ice #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - + u_ice => ice%uvice(1,:) + v_ice => ice%uvice(2,:) + ! ================== ! momentum flux: ! ================== diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index aa244b212..cc86aae54 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -1,3 +1,4 @@ + module ice_array_setup_interface interface subroutine ice_array_setup(partit, mesh) @@ -13,34 +14,56 @@ subroutine ice_array_setup(partit, mesh) module ice_initial_state_interface interface - subroutine ice_initial_state(tracers, partit, mesh) + subroutine ice_initial_state(ice, tracers, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP use mod_tracer + USE MOD_ICE type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh type(t_tracer), intent(in), target :: tracers + type(t_ice) , intent(inout), target :: ice end subroutine end interface end module + module ice_setup_interface interface - subroutine ice_setup(tracers, partit, mesh) + subroutine ice_setup(ice, tracers, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP use mod_tracer + USE MOD_ICE type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh type(t_tracer), intent(in), target :: tracers + type(t_ice), intent(inout), target :: ice end subroutine end interface end module + +module ice_timestep_interface + interface + subroutine ice_timestep(istep, ice, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer + USE MOD_ICE + integer, intent(in) :: istep + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + type(t_ice), intent(inout), target :: ice + end subroutine + end interface +end module + ! !_______________________________________________________________________________ ! ice initialization + array allocation + time stepping -subroutine ice_setup(tracers, partit, mesh) +subroutine ice_setup(ice, tracers, partit, mesh) use o_param use i_param use i_arrays @@ -49,13 +72,19 @@ subroutine ice_setup(tracers, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP use mod_tracer + use MOD_ICE use ice_array_setup_interface use ice_initial_state_interface implicit none + type(t_ice), intent(inout), target :: ice type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in), target :: tracers + !___________________________________________________________________________ + ! initialise ice derived type + call ice_init(ice, partit, mesh) + ! ================ DO not change ice_dt=real(ice_ave_steps,WP)*dt ! ice_dt=dt @@ -64,12 +93,12 @@ subroutine ice_setup(tracers, partit, mesh) ! it always enters ! ================ call ice_array_setup(partit, mesh) - call ice_fct_init(partit, mesh) + call ice_fct_init(ice, partit, mesh) ! ================ ! Initialization routine, user input is required ! ================ !call ice_init_fields_test - call ice_initial_state(tracers, partit, mesh) ! Use it unless running test example + call ice_initial_state(ice, tracers, partit, mesh) ! Use it unless running test example if(partit%mype==0) write(*,*) 'Ice is initialized' end subroutine ice_setup ! @@ -104,7 +133,7 @@ subroutine ice_array_setup(partit, mesh) e_size=myDim_elem2D+eDim_elem2D ! Allocate memory for variables of ice model - allocate(u_ice(n_size), v_ice(n_size)) +! allocate(u_ice(n_size), v_ice(n_size)) allocate(U_rhs_ice(n_size), V_rhs_ice(n_size)) allocate(sigma11(e_size), sigma12(e_size), sigma22(e_size)) allocate(eps11(e_size), eps12(e_size), eps22(e_size)) @@ -141,8 +170,8 @@ subroutine ice_array_setup(partit, mesh) m_snow=0.0_WP U_rhs_ice=0.0_WP V_rhs_ice=0.0_WP - U_ice=0.0_WP - V_ice=0.0_WP +! U_ice=0.0_WP +! V_ice=0.0_WP sigma11=0.0_WP sigma22=0.0_WP sigma12=0.0_WP @@ -195,34 +224,42 @@ end subroutine ice_array_setup ! !_______________________________________________________________________________ ! Sea ice model step -subroutine ice_timestep(step, partit, mesh) +subroutine ice_timestep(step, ice, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP +USE MOD_ICE use i_arrays use o_param use g_CONFIG use i_PARAM, only: whichEVP - +use ice_EVP_interfaces +use ice_maEVP_interfaces +use ice_fct_interfaces +use ice_thermodynamics_interfaces #if defined (__icepack) use icedrv_main, only: step_icepack #endif implicit none +integer, intent(in) :: step +type(t_ice), intent(inout), target :: ice type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh -integer :: step,i +integer :: i REAL(kind=WP) :: t0,t1, t2, t3 #if defined (__icepack) real(kind=WP) :: time_evp, time_advec, time_therm #endif +real(kind=WP), dimension(:), pointer :: u_ice, v_ice #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - +u_ice => ice%uvice(1,:) +v_ice => ice%uvice(2,:) t0=MPI_Wtime() @@ -232,21 +269,24 @@ subroutine ice_timestep(step, partit, mesh) !___________________________________________________________________________ ! ===== Dynamics - if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call EVPdynamics...'//achar(27)//'[0m' + SELECT CASE (whichEVP) CASE (0) - call EVPdynamics (partit, mesh) + if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call EVPdynamics...'//achar(27)//'[0m' + call EVPdynamics (ice, partit, mesh) CASE (1) - call EVPdynamics_m(partit, mesh) + if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call EVPdynamics_m...'//achar(27)//'[0m' + call EVPdynamics_m(ice, partit, mesh) CASE (2) - call EVPdynamics_a(partit, mesh) + if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call EVPdynamics_a...'//achar(27)//'[0m' + call EVPdynamics_a(ice, partit, mesh) CASE DEFAULT if (mype==0) write(*,*) 'a non existing EVP scheme specified!' call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop END SELECT - if (use_cavity) call cavity_ice_clean_vel(partit, mesh) + if (use_cavity) call cavity_ice_clean_vel(ice, partit, mesh) t1=MPI_Wtime() !___________________________________________________________________________ @@ -262,26 +302,26 @@ subroutine ice_timestep(step, partit, mesh) end do #endif /* (__oifs) */ if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call ice_TG_rhs_div...'//achar(27)//'[0m' - call ice_TG_rhs_div (partit, mesh) + call ice_TG_rhs_div (ice, partit, mesh) if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call ice_fct_solve...'//achar(27)//'[0m' - call ice_fct_solve (partit, mesh) + call ice_fct_solve (ice, partit, mesh) if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call ice_update_for_div...'//achar(27)//'[0m' - call ice_update_for_div(partit, mesh) + call ice_update_for_div(ice, partit, mesh) #if defined (__oifs) do i=1,myDim_nod2D+eDim_nod2D if (a_ice(i)>0.0_WP) ice_temp(i) = ice_temp(i)/a_ice(i) end do #endif /* (__oifs) */ if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call cut_off...'//achar(27)//'[0m' - call cut_off(partit, mesh) + call cut_off(ice, partit, mesh) - if (use_cavity) call cavity_ice_clean_ma(partit, mesh) + if (use_cavity) call cavity_ice_clean_ma(ice, partit, mesh) t2=MPI_Wtime() !___________________________________________________________________________ ! ===== Thermodynamic part if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call thermodynamics...'//achar(27)//'[0m' - call thermodynamics(partit, mesh) + call thermodynamics(ice, partit, mesh) #endif /* (__icepack) */ @@ -318,29 +358,34 @@ end subroutine ice_timestep ! !_______________________________________________________________________________ ! sets inital values or reads restart file for ice model -subroutine ice_initial_state(tracers, partit, mesh) +subroutine ice_initial_state(ice, tracers, partit, mesh) use i_ARRAYs use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use MOD_TRACER + use MOD_ICE use o_PARAM use o_arrays use g_CONFIG implicit none ! + type(t_ice), intent(inout), target :: ice type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in), target :: tracers integer :: i character(MAX_PATH) :: filename real(kind=WP), external :: TFrez ! Sea water freeze temperature. - + + real(kind=WP), dimension(:), pointer :: u_ice, v_ice #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - + u_ice => ice%uvice(1,:) + v_ice => ice%uvice(2,:) + m_ice =0._WP a_ice =0._WP u_ice =0._WP diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index 3eb97d14c..5e0793ed0 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -1,14 +1,41 @@ +module ice_thermodynamics_interfaces + interface + subroutine thermodynamics(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + + subroutine cut_off(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface +end module + + !=================================================================== -subroutine cut_off(partit, mesh) +subroutine cut_off(ice, partit, mesh) use o_param use i_arrays use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP + USE MOD_ICE use g_config, only: use_cavity implicit none type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit + type(t_ice), intent(inout), target :: ice #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -78,7 +105,7 @@ end subroutine cut_off ! by Qiang Wang, 13.01.2009 !---------------------------------------------------------------------------- -subroutine thermodynamics(partit, mesh) +subroutine thermodynamics(ice, partit, mesh) ! ! For every surface node, this subroutine extracts the information ! needed for computation of thermodydnamics, calls the relevant @@ -90,6 +117,7 @@ subroutine thermodynamics(partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP + USE MOD_ICE use i_therm_param use i_param use i_arrays @@ -101,6 +129,7 @@ subroutine thermodynamics(partit, mesh) implicit none type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit + type(t_ice), intent(inout), target :: ice real(kind=WP) :: h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss,rsf,evap_in real(kind=WP) :: ug,ustar,T_oc,S_oc,h_ml,t,ch,ce,ch_i,ce_i,fw,ehf,evap @@ -113,11 +142,16 @@ subroutine thermodynamics(partit, mesh) integer, pointer :: myDim_nod2D, eDim_nod2D integer, dimension(:), pointer :: ulevels_nod2D real(kind=WP), dimension(:,:),pointer :: geo_coord_nod2D + real(kind=WP), dimension(:), pointer :: u_ice, v_ice myDim_nod2d=>partit%myDim_nod2D eDim_nod2D =>partit%eDim_nod2D ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D + u_ice => ice%uvice(1,:) + v_ice => ice%uvice(2,:) + + rsss=ref_sss ! u_ice and v_ice are at nodes diff --git a/src/io_blowup.F90 b/src/io_blowup.F90 index 52b83d251..a1a87d29e 100644 --- a/src/io_blowup.F90 +++ b/src/io_blowup.F90 @@ -7,6 +7,7 @@ MODULE io_BLOWUP USE MOD_PARSUP USE MOD_TRACER USE MOD_DYN + USE MOD_ICE use o_arrays use i_arrays implicit none @@ -65,13 +66,14 @@ MODULE io_BLOWUP !_______________________________________________________________________________ ! ini_ocean_io initializes bid datatype which contains information of all variables need to be written into ! the ocean restart file. This is the only place need to be modified if a new variable is added! - subroutine ini_blowup_io(year, dynamics, tracers, partit, mesh) + subroutine ini_blowup_io(year, ice, dynamics, tracers, partit, mesh) implicit none integer, intent(in) :: year type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in) , target :: tracers type(t_dyn) , intent(in) , target :: dynamics + type(t_ice) , intent(in) , target :: ice integer :: ncid, j integer :: varid character(500) :: longname @@ -153,8 +155,8 @@ subroutine ini_blowup_io(year, dynamics, tracers, partit, mesh) call def_variable(bid, 'a_ice' , (/nod2D/) , 'ice concentration [0 to 1]', '%', a_ice); call def_variable(bid, 'm_ice' , (/nod2D/) , 'effective ice thickness', 'm', m_ice); call def_variable(bid, 'm_snow' , (/nod2D/) , 'effective snow thickness', 'm', m_snow); - call def_variable(bid, 'u_ice' , (/nod2D/) , 'zonal velocity', 'm/s', u_ice); - call def_variable(bid, 'v_ice' , (/nod2D/) , 'meridional velocity', 'm', v_ice); + call def_variable(bid, 'u_ice' , (/nod2D/) , 'zonal velocity', 'm/s', ice%uvice(1,:)); + call def_variable(bid, 'v_ice' , (/nod2D/) , 'meridional velocity', 'm', ice%uvice(2,:)); !!PS call def_variable(bid, 'a_ice_old' , (/nod2D/) , 'ice concentration [0 to 1]', '%', a_ice_old); !PS !!PS call def_variable(bid, 'm_ice_old' , (/nod2D/) , 'effective ice thickness', 'm', m_ice_old); !PS !!PS call def_variable(bid, 'm_snow_old' , (/nod2D/) , 'effective snow thickness', 'm', m_snow_old); !PS @@ -175,16 +177,17 @@ end subroutine ini_blowup_io ! ! !_______________________________________________________________________________ - subroutine blowup(istep, dynamics, tracers, partit, mesh) + subroutine blowup(istep, ice, dynamics, tracers, partit, mesh) implicit none type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in) , target :: tracers type(t_dyn) , intent(in) , target :: dynamics + type(t_ice) , intent(in) , target :: ice integer :: istep ctime=timeold+(dayold-1.)*86400 - call ini_blowup_io(yearnew, dynamics, tracers, partit, mesh) + call ini_blowup_io(yearnew, ice, dynamics, tracers, partit, mesh) if(partit%mype==0) write(*,*)'Do output (netCDF, blowup) ...' if(partit%mype==0) write(*,*)' --> call assoc_ids(bid)' call assoc_ids(bid, partit) ; call was_error(bid, partit) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index f40ef0f78..89f6483e8 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -93,12 +93,13 @@ subroutine destructor(this) end subroutine -subroutine ini_mean_io(dynamics, tracers, partit, mesh) +subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) use MOD_MESH use MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP USE MOD_DYN + USE MOD_ICE use g_cvmix_tke use g_cvmix_idemix use g_cvmix_kpp @@ -116,6 +117,7 @@ subroutine ini_mean_io(dynamics, tracers, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in) , target :: tracers type(t_dyn) , intent(in) , target :: dynamics + type(t_ice) , intent(in) , target :: ice namelist /nml_listsize/ io_listsize namelist /nml_list / io_list @@ -168,11 +170,11 @@ subroutine ini_mean_io(dynamics, tracers, partit, mesh) ! output sea ice CASE ('uice ') if (use_ice) then - call def_stream(nod2D, myDim_nod2D, 'uice', 'ice velocity x', 'm/s', u_ice, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'uice', 'ice velocity x', 'm/s', ice%uvice(1,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('vice ') if (use_ice) then - call def_stream(nod2D, myDim_nod2D, 'vice', 'ice velocity y', 'm/s', v_ice, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'vice', 'ice velocity y', 'm/s', ice%uvice(2,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('a_ice ') if (use_ice) then @@ -808,12 +810,13 @@ subroutine update_means ! !-------------------------------------------------------------------------------------------- ! -subroutine output(istep, dynamics, tracers, partit, mesh) +subroutine output(istep, ice, dynamics, tracers, partit, mesh) use g_clock use mod_mesh USE MOD_PARTIT USE MOD_PARSUP use MOD_DYN + use MOD_ICE use mod_tracer use io_gather_module #if defined (__icepack) @@ -829,12 +832,14 @@ subroutine output(istep, dynamics, tracers, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in) , target :: tracers type(t_dyn) , intent(in) , target :: dynamics + type(t_ice) , intent(inout), target :: ice + character(:), allocatable :: filepath real(real64) :: rtime !timestamp of the record ctime=timeold+(dayold-1.)*86400 if (lfirst) then - call ini_mean_io(dynamics, tracers, partit, mesh) + call ini_mean_io(ice, dynamics, tracers, partit, mesh) #if defined (__icepack) call init_io_icepack(mesh) !icapack has its copy of p_partit => partit #endif diff --git a/src/io_restart.F90 b/src/io_restart.F90 index c5112b7f2..a7f28673a 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -7,6 +7,7 @@ MODULE io_RESTART USE MOD_PARSUP use mod_tracer use MOD_DYN + use MOD_ICE use o_arrays use i_arrays use g_cvmix_tke @@ -166,7 +167,7 @@ end subroutine ini_ocean_io !-------------------------------------------------------------------------------------------- ! ini_ice_io initializes iid datatype which contains information of all variables need to be written into ! the ice restart file. This is the only place need to be modified if a new variable is added! -subroutine ini_ice_io(year, partit, mesh) +subroutine ini_ice_io(year, ice, partit, mesh) implicit none integer, intent(in) :: year @@ -178,6 +179,7 @@ subroutine ini_ice_io(year, partit, mesh) character(4) :: cyear type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit + type(t_ice) , intent(in) , target :: ice #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -198,8 +200,8 @@ subroutine ini_ice_io(year, partit, mesh) call def_variable(iid, 'area', (/nod2D/), 'ice concentration [0 to 1]', '%', a_ice); call def_variable(iid, 'hice', (/nod2D/), 'effective ice thickness', 'm', m_ice); call def_variable(iid, 'hsnow', (/nod2D/), 'effective snow thickness', 'm', m_snow); - call def_variable(iid, 'uice', (/nod2D/), 'zonal velocity', 'm/s', u_ice); - call def_variable(iid, 'vice', (/nod2D/), 'meridional velocity', 'm', v_ice); + call def_variable(iid, 'uice', (/nod2D/), 'zonal velocity', 'm/s', ice%uvice(1,:)); + call def_variable(iid, 'vice', (/nod2D/), 'meridional velocity', 'm', ice%uvice(2,:)); #if defined (__oifs) call def_variable(iid, 'ice_albedo', (/nod2D/), 'ice albedo', '-', ice_alb); call def_variable(iid, 'ice_temp',(/nod2D/), 'ice surface temperature', 'K', ice_temp); @@ -209,7 +211,7 @@ end subroutine ini_ice_io ! !-------------------------------------------------------------------------------------------- ! -subroutine restart(istep, l_write, l_read, dynamics, tracers, partit, mesh) +subroutine restart(istep, l_write, l_read, ice, dynamics, tracers, partit, mesh) #if defined(__icepack) use icedrv_main, only: init_restart_icepack @@ -228,16 +230,17 @@ subroutine restart(istep, l_write, l_read, dynamics, tracers, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in) , target :: tracers type(t_dyn) , intent(in) , target :: dynamics + type(t_ice) , intent(in) , target :: ice ctime=timeold+(dayold-1.)*86400 if (.not. l_read) then call ini_ocean_io(yearnew, dynamics, tracers, partit, mesh) - if (use_ice) call ini_ice_io (yearnew, partit, mesh) + if (use_ice) call ini_ice_io (yearnew, ice, partit, mesh) #if defined(__icepack) if (use_ice) call init_restart_icepack(yearnew, mesh) !icapack has its copy of p_partit => partit #endif else call ini_ocean_io(yearold, dynamics, tracers, partit, mesh) - if (use_ice) call ini_ice_io (yearold, partit, mesh) + if (use_ice) call ini_ice_io (yearold, ice, partit, mesh) #if defined(__icepack) if (use_ice) call init_restart_icepack(yearold, mesh) !icapack has its copy of p_partit => partit #endif diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index dbc941243..693cf08ae 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -130,14 +130,16 @@ subroutine init_thickness_ale(dynamics, partit, mesh) module oce_timestep_ale_interface interface - subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) + subroutine oce_timestep_ale(n, ice, dynamics, tracers, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP use mod_tracer use MOD_DYN + use MOD_ICE integer , intent(in) :: n type(t_dyn) , intent(inout), target :: dynamics + type(t_ice), intent(inout), target :: ice type(t_tracer), intent(inout), target :: tracers type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(inout), target :: mesh @@ -2450,6 +2452,7 @@ subroutine vert_vel_ale(dynamics, partit, mesh) !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, lcflmax) lcflmax=0. + cflmax = 0. !$OMP DO do n=1, myDim_nod2D+eDim_nod2D lcflmax=max(lcflmax, maxval(CFL_z(:, n))) @@ -2812,11 +2815,12 @@ end subroutine impl_vert_visc_ale ! ! !=============================================================================== -subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) +subroutine oce_timestep_ale(n, ice, dynamics, tracers, partit, mesh) use g_config use MOD_MESH use MOD_TRACER use MOD_DYN + USE MOD_ICE use o_ARRAYS use o_PARAM USE MOD_PARTIT @@ -2846,6 +2850,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) type(t_tracer), intent(inout), target :: tracers type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(inout), target :: mesh + type(t_ice) , intent(inout), target :: ice !___________________________________________________________________________ real(kind=8) :: t0,t1, t2, t30, t3, t4, t5, t6, t7, t8, t9, t10, loc, glo integer :: node @@ -2927,20 +2932,20 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) END DO !$OMP END PARALLEL DO - call mo_convect(partit, mesh) + call mo_convect(ice, partit, mesh) ! use FESOM2.0 tuned pacanowski & philander parameterization for vertical ! mixing else if(mix_scheme_nmb==2 .or. mix_scheme_nmb==27) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call oce_mixing_PP'//achar(27)//'[0m' call oce_mixing_PP(dynamics, partit, mesh) - call mo_convect(partit, mesh) + call mo_convect(ice, partit, mesh) ! use CVMIX KPP (Large at al. 1994) else if(mix_scheme_nmb==3 .or. mix_scheme_nmb==37) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call calc_cvmix_kpp'//achar(27)//'[0m' call calc_cvmix_kpp(dynamics, tracers, partit, mesh) - call mo_convect(partit, mesh) + call mo_convect(ice, partit, mesh) ! use CVMIX PP (Pacanowski and Philander 1981) parameterisation for mixing ! based on Richardson number Ri = N^2/(du/dz)^2, using Brunt Väisälä frequency @@ -2948,7 +2953,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) else if(mix_scheme_nmb==4 .or. mix_scheme_nmb==47) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call calc_cvmix_pp'//achar(27)//'[0m' call calc_cvmix_pp(dynamics, partit, mesh) - call mo_convect(partit, mesh) + call mo_convect(ice, partit, mesh) ! use CVMIX TKE (turbulent kinetic energy closure) parameterisation for ! vertical mixing with or without the IDEMIX (dissipation of energy by @@ -2957,7 +2962,7 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) else if(mix_scheme_nmb==5 .or. mix_scheme_nmb==56) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call calc_cvmix_tke'//achar(27)//'[0m' call calc_cvmix_tke(dynamics, partit, mesh) - call mo_convect(partit, mesh) + call mo_convect(ice, partit, mesh) end if @@ -3071,12 +3076,12 @@ subroutine oce_timestep_ale(n, dynamics, tracers, partit, mesh) !___________________________________________________________________________ ! write out global fields for debugging if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call write_step_info'//achar(27)//'[0m' - call write_step_info(n,logfile_outfreq, dynamics, tracers, partit, mesh) + call write_step_info(n,logfile_outfreq, ice, dynamics, tracers, partit, mesh) ! check model for blowup --> ! write_step_info and check_blowup require ! togeather around 2.5% of model runtime if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call check_blowup'//achar(27)//'[0m' - call check_blowup(n, dynamics, tracers, partit, mesh) + call check_blowup(n, ice, dynamics, tracers, partit, mesh) t10=MPI_Wtime() !___________________________________________________________________________ diff --git a/src/oce_mo_conv.F90 b/src/oce_mo_conv.F90 index 8d6434a9f..d67000b19 100644 --- a/src/oce_mo_conv.F90 +++ b/src/oce_mo_conv.F90 @@ -1,11 +1,12 @@ ! ! !_______________________________________________________________________________ -subroutine mo_convect(partit, mesh) +subroutine mo_convect(ice, partit, mesh) USE o_PARAM USE MOD_MESH USE MOD_PARTIT USE MOD_PARSUP + USE MOD_ICE USE o_ARRAYS USE g_config use i_arrays @@ -15,12 +16,15 @@ subroutine mo_convect(partit, mesh) integer :: node, elem, nz, elnodes(3), nzmin, nzmax type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - + type(t_ice), intent(in), target :: ice + real(kind=WP), dimension(:), pointer :: u_ice, v_ice #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - + u_ice => ice%uvice(1,:) + v_ice => ice%uvice(2,:) + !___________________________________________________________________________ ! add vertical mixing scheme of Timmermann and Beckmann, 2004,"Parameterization ! of vertical mixing in the Weddell Sea! diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index b97127caa..a12012fe3 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -1,45 +1,50 @@ module write_step_info_interface interface - subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) + subroutine write_step_info(istep, outfreq, ice, dynamics, tracers, partit, mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use MOD_TRACER use MOD_DYN + use MOD_ICE integer :: istep,outfreq type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in) , target :: tracers type(t_dyn) , intent(in) , target :: dynamics + type(t_ice) , intent(in) , target :: ice end subroutine end interface end module module check_blowup_interface interface - subroutine check_blowup(istep, dynamics, tracers, partit, mesh) + subroutine check_blowup(istep, ice, dynamics, tracers, partit, mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use MOD_TRACER use MOD_DYN + use MOD_ICE integer :: istep type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in), target :: tracers type(t_dyn) , intent(in) , target :: dynamics + type(t_ice) , intent(in) , target :: ice end subroutine end interface end module ! ! !=============================================================================== -subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) +subroutine write_step_info(istep, outfreq, ice, dynamics, tracers, partit, mesh) use g_config, only: dt, use_ice use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use MOD_TRACER use MOD_DYN + use MOD_ICE use o_PARAM use o_ARRAYS, only: water_flux, heat_flux, & pgf_x, pgf_y, Av, Kv @@ -63,6 +68,7 @@ subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in) , target :: tracers type(t_dyn) , intent(in) , target :: dynamics + type(t_ice) , intent(in) , target :: ice real(kind=WP), dimension(:,:,:), pointer :: UV, UVnode real(kind=WP), dimension(:,:) , pointer :: Wvel, CFL_z real(kind=WP), dimension(:) , pointer :: eta_n, d_eta @@ -241,13 +247,14 @@ end subroutine write_step_info ! ! !=============================================================================== -subroutine check_blowup(istep, dynamics, tracers, partit, mesh) +subroutine check_blowup(istep, ice, dynamics, tracers, partit, mesh) use g_config, only: logfile_outfreq, which_ALE use MOD_MESH use MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP use MOD_DYN + use MOD_ICE use o_PARAM use o_ARRAYS, only: water_flux, stress_surf, & heat_flux, Kv, Av @@ -265,10 +272,12 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in) , target :: tracers type(t_dyn) , intent(in) , target :: dynamics + type(t_ice) , intent(in) , target :: ice real(kind=WP), dimension(:,:,:), pointer :: UV real(kind=WP), dimension(:,:) , pointer :: Wvel, CFL_z real(kind=WP), dimension(:) , pointer :: ssh_rhs, ssh_rhs_old real(kind=WP), dimension(:) , pointer :: eta_n, d_eta + real(kind=WP), dimension(:) , pointer :: u_ice, v_ice #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -280,6 +289,9 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) ssh_rhs_old => dynamics%ssh_rhs_old(:) eta_n => dynamics%eta_n(:) d_eta => dynamics%d_eta(:) + u_ice => ice%uvice(1,:) + v_ice => ice%uvice(2,:) + !$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz) do n=1, myDim_nod2d !___________________________________________________________________ @@ -489,7 +501,7 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) ! moment only over CPU mype==0 call MPI_AllREDUCE(found_blowup_loc , found_blowup , 1, MPI_INTEGER, MPI_MAX, MPI_COMM_FESOM, MPIerr) if (found_blowup==1) then - call write_step_info(istep, 1, dynamics, tracers,partit,mesh) + call write_step_info(istep, 1, ice, dynamics, tracers, partit, mesh) if (mype==0) then call sleep(1) write(*,*) @@ -509,7 +521,7 @@ subroutine check_blowup(istep, dynamics, tracers, partit, mesh) write(*,*) ' _____.,-#%&$@%#&#~,._____' write(*,*) end if - call blowup(istep, dynamics, tracers, partit, mesh) + call blowup(istep, ice, dynamics, tracers, partit, mesh) if (mype==0) write(*,*) ' --> finished writing blow up file' call par_ex(partit%MPI_COMM_FESOM, partit%mype) endif From 6081e92705eebe2988d2ad996cfe27fc8670f24f Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 19 Nov 2021 23:19:06 +0100 Subject: [PATCH 604/909] exchange a_ice, m_ice and m_snow against ice derived type variables ice%data(1:3)%values --- src/cavity_param.F90 | 44 +++- src/fesom_module.F90 | 2 +- src/gen_modules_cvmix_kpp.F90 | 21 +- src/ice_EVP.F90 | 4 + src/ice_fct.F90 | 132 ++++++---- src/ice_maEVP.F90 | 468 +++++++++++++++++++--------------- src/ice_modules.F90 | 4 +- src/ice_oce_coupling.F90 | 149 +++++------ src/ice_setup_step.F90 | 31 ++- src/ice_thermo_oce.F90 | 54 ++-- src/io_blowup.F90 | 6 +- src/io_meandata.F90 | 6 +- src/io_restart.F90 | 6 +- src/oce_ale.F90 | 4 +- src/oce_ale_vel_rhs.F90 | 61 +++-- src/oce_mo_conv.F90 | 17 +- src/oce_shortwave_pene.F90 | 51 ++-- src/write_step_info.F90 | 52 ++-- 18 files changed, 622 insertions(+), 490 deletions(-) diff --git a/src/cavity_param.F90 b/src/cavity_param.F90 index 64f7ffcb5..65dbf5152 100644 --- a/src/cavity_param.F90 +++ b/src/cavity_param.F90 @@ -1,4 +1,4 @@ -module cavity_heat_water_fluxes_3eq_interface +module cavity_interfaces interface subroutine cavity_heat_water_fluxes_3eq(dynamics, tracers, partit, mesh) USE MOD_DYN @@ -11,11 +11,7 @@ subroutine cavity_heat_water_fluxes_3eq(dynamics, tracers, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh end subroutine - end interface -end module -module cavity_ice_clean_vel_interface - interface subroutine cavity_ice_clean_vel(ice, partit, mesh) use MOD_ICE USE MOD_PARTIT @@ -25,6 +21,26 @@ subroutine cavity_ice_clean_vel(ice, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh end subroutine + + subroutine cavity_ice_clean_ma(ice, partit, mesh) + use MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + end subroutine + + subroutine cavity_momentum_fluxes(dynamics, partit, mesh) + use MOD_DYN + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_dyn), intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + end subroutine end interface end module @@ -490,21 +506,29 @@ end subroutine cavity_ice_clean_vel ! ! !_______________________________________________________________________________ -subroutine cavity_ice_clean_ma(partit, mesh) - use MOD_MESH +subroutine cavity_ice_clean_ma(ice, partit, mesh) + USE MOD_ICE USE MOD_PARTIT USE MOD_PARSUP - use i_ARRAYS, only: m_ice, m_snow, a_ice + USE MOD_MESH implicit none + type(t_ice), intent(inout), target :: ice type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh + !___________________________________________________________________________ integer :: node - + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + + !___________________________________________________________________________ do node=1,myDim_nod2d+eDim_nod2d if(ulevels_nod2D(node)>1) then m_ice(node) =0.0_WP diff --git a/src/fesom_module.F90 b/src/fesom_module.F90 index df98a9f08..eea1148dd 100755 --- a/src/fesom_module.F90 +++ b/src/fesom_module.F90 @@ -356,7 +356,7 @@ subroutine fesom_runloop(current_nsteps) !___compute fluxes to the ocean: heat, freshwater, momentum_________ if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call oce_fluxes_mom...'//achar(27)//'[0m' call oce_fluxes_mom(f%ice, f%dynamics, f%partit, f%mesh) ! momentum only - call oce_fluxes(f%dynamics, f%tracers, f%partit, f%mesh) + call oce_fluxes(f%ice, f%dynamics, f%tracers, f%partit, f%mesh) end if call before_oce_step(f%dynamics, f%tracers, f%partit, f%mesh) ! prepare the things if required f%t2 = MPI_Wtime() diff --git a/src/gen_modules_cvmix_kpp.F90 b/src/gen_modules_cvmix_kpp.F90 index 8a6039852..c6f63dd43 100644 --- a/src/gen_modules_cvmix_kpp.F90 +++ b/src/gen_modules_cvmix_kpp.F90 @@ -22,11 +22,12 @@ module g_cvmix_kpp ! module calls from FESOM use g_config use o_param - use mod_mesh + USE MOD_ICE + USE MOD_DYN + USE mod_tracer USE MOD_PARTIT USE MOD_PARSUP - use mod_tracer - use MOD_DYN + USE MOD_MESH use o_arrays use g_comm_auto use i_arrays @@ -349,11 +350,13 @@ end subroutine init_cvmix_kpp ! !=========================================================================== ! calculate PP vertrical mixing coefficients from CVMIX library - subroutine calc_cvmix_kpp(dynamics, tracers, partit, mesh) - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in), target :: tracers + subroutine calc_cvmix_kpp(ice, dynamics, tracers, partit, mesh) + type(t_ice) , intent(in), target :: ice type(t_dyn) , intent(in), target :: dynamics + type(t_tracer), intent(in), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + !_______________________________________________________________________ integer :: node, elem, nz, nln, nun, elnodes(3), aux_nz real(kind=WP) :: vshear2, dz2, aux, aux_wm(mesh%nl), aux_ws(mesh%nl) real(kind=WP) :: aux_coeff, sigma, stable @@ -363,6 +366,9 @@ subroutine calc_cvmix_kpp(dynamics, tracers, partit, mesh) real(kind=WP) :: sldepth, sfc_temp, sfc_salt, sfc_u, sfc_v, htot, delh, rho_sfc, rho_nz real(kind=WP) :: rhopot, bulk_0, bulk_pz, bulk_pz2 real(kind=WP) :: sfc_rhopot, sfc_bulk_0, sfc_bulk_pz, sfc_bulk_pz2 + !_______________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: a_ice real(kind=WP), dimension(:,:), pointer :: temp, salt real(kind=WP), dimension(:,:,:), pointer :: UVnode #include "associate_part_def.h" @@ -372,6 +378,7 @@ subroutine calc_cvmix_kpp(dynamics, tracers, partit, mesh) temp=>tracers%data(1)%values(:,:) salt=>tracers%data(2)%values(:,:) UVnode=>dynamics%uvnode(:,:,:) + a_ice => ice%data(1)%values(:) !_______________________________________________________________________ kpp_Av = 0.0_WP diff --git a/src/ice_EVP.F90 b/src/ice_EVP.F90 index dbc7f2c8b..78db3eb31 100755 --- a/src/ice_EVP.F90 +++ b/src/ice_EVP.F90 @@ -474,12 +474,16 @@ subroutine EVPdynamics(ice, partit, mesh) !_______________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: u_ice, v_ice +real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" u_ice => ice%uvice(1,:) v_ice => ice%uvice(2,:) +a_ice => ice%data(1)%values(:) +m_ice => ice%data(2)%values(:) +m_snow => ice%data(3)%values(:) !_______________________________________________________________________________ ! If Icepack is used, always update the tracers diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index 2b69e52d5..5b5715e18 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -87,32 +87,36 @@ subroutine ice_update_for_div(ice, partit, mesh) ! ! ===================================================================== subroutine ice_TG_rhs(ice, partit, mesh) - use MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_ICE - use i_Arrays - use i_PARAM - use o_PARAM - USE g_CONFIG - implicit none - type(t_ice), intent(inout), target :: ice - type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - !_____________________________________________________________________________ - real(kind=WP) :: diff, entries(3), um, vm, vol, dx(3), dy(3) - integer :: n, q, row, elem, elnodes(3) - !_____________________________________________________________________________ - ! pointer on necessary derived types - real(kind=WP), dimension(:), pointer :: u_ice, v_ice + use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_ICE + use i_Arrays + use i_PARAM + use o_PARAM + USE g_CONFIG + implicit none + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + !___________________________________________________________________________ + real(kind=WP) :: diff, entries(3), um, vm, vol, dx(3), dy(3) + integer :: n, q, row, elem, elnodes(3) + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: u_ice, v_ice + real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uvice(1,:) - v_ice => ice%uvice(2,:) - - !_____________________________________________________________________________ + u_ice => ice%uvice(1,:) + v_ice => ice%uvice(2,:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + + !___________________________________________________________________________ ! Taylor-Galerkin (Lax-Wendroff) rhs DO row=1, myDim_nod2D rhs_m(row)=0._WP @@ -259,7 +263,6 @@ subroutine ice_solve_low_order(ice, partit, mesh) ! matrices acting on the field from the previous time step. The consistent ! mass matrix on the lhs is replaced with the lumped one. USE MOD_ICE - USE MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP USE MOD_MESH @@ -275,10 +278,14 @@ subroutine ice_solve_low_order(ice, partit, mesh) real(kind=WP) :: gamma !___________________________________________________________________________ ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) !___________________________________________________________________________ gamma=ice_gamma_fct ! Added diffusivity parameter @@ -432,18 +439,23 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) type(t_ice), intent(inout), target :: ice type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh - !_____________________________________________________________________________ + !___________________________________________________________________________ integer :: tr_array_id integer :: icoef(3,3),n,q, elem,elnodes(3),row real(kind=WP), allocatable, dimension(:) :: tmax, tmin real(kind=WP) :: vol, flux, ae, gamma - !_____________________________________________________________________________ - ! pointer on necessary derived types + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + + !___________________________________________________________________________ gamma=ice_gamma_fct ! It should coinside with gamma in ! ts_solve_low_order @@ -820,31 +832,36 @@ END SUBROUTINE ice_mass_matrix_fill ! subroutine ice_TG_rhs_div(ice, partit, mesh) use MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_ICE - use i_Arrays - use i_PARAM - use o_PARAM - USE g_CONFIG - implicit none - real(kind=WP) :: diff, entries(3), um, vm, vol, dx(3), dy(3) - integer :: n, q, row, elem, elnodes(3) - real(kind=WP) :: c1, c2, c3, c4, cx1, cx2, cx3, cx4, entries2(3) - type(t_ice), intent(inout), target :: ice - type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - !_____________________________________________________________________________ - ! pointer on necessary derived types - real(kind=WP), dimension(:), pointer :: u_ice, v_ice + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_ICE + use i_Arrays + use i_PARAM + use o_PARAM + USE g_CONFIG + implicit none + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + real(kind=WP) :: diff, entries(3), um, vm, vol, dx(3), dy(3) + integer :: n, q, row, elem, elnodes(3) + real(kind=WP) :: c1, c2, c3, c4, cx1, cx2, cx3, cx4, entries2(3) + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: u_ice, v_ice + real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uvice(1,:) - v_ice => ice%uvice(2,:) - - + u_ice => ice%uvice(1,:) + v_ice => ice%uvice(2,:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + + !___________________________________________________________________________ ! Computes the rhs in a Taylor-Galerkin way (with upwind type of ! correction for the advection operator) ! In this version I tr to split divergent term off, so that FCT works without it. @@ -933,20 +950,25 @@ subroutine ice_update_for_div(ice, partit, mesh) USE g_CONFIG use g_comm_auto implicit none - ! + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ integer :: n,i,clo,clo2,cn,location(100),row real(kind=WP) :: rhs_new integer :: num_iter_solve=3 - type(t_ice), intent(inout), target :: ice - type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - !_____________________________________________________________________________ - ! pointer on necessary derived types + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + + !___________________________________________________________________________ ! Does Taylor-Galerkin solution ! !the first approximation diff --git a/src/ice_maEVP.F90 b/src/ice_maEVP.F90 index 1a4e1702a..3c3f40335 100644 --- a/src/ice_maEVP.F90 +++ b/src/ice_maEVP.F90 @@ -1,43 +1,51 @@ module ice_maEVP_interfaces interface - subroutine ssh2rhs(partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit + subroutine ssh2rhs(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh end subroutine - subroutine stress_tensor_a(partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit + subroutine stress_tensor_a(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh end subroutine - subroutine stress2rhs_m(partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit + subroutine stress2rhs_m(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh end subroutine - subroutine find_alpha_field_a(partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit + subroutine find_alpha_field_a(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh end subroutine subroutine find_beta_field_a(partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine subroutine EVPdynamics_a(ice, partit, mesh) @@ -66,38 +74,45 @@ subroutine EVPdynamics_m(ice, partit, mesh) ! New evp implementation following Bouillion et al. 2013 ! and Kimmritz et al. 2015 (mEVP) and Kimmritz et al. 2016 (aEVP) ! ==================================================================== -subroutine stress_tensor_m(partit, mesh) - ! Internal stress tensor - ! New implementation following Boullion et al, Ocean Modelling 2013. - ! SD, 30.07.2014 - !=================================================================== - use o_param - use i_param - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use g_config - use i_arrays - +subroutine stress_tensor_m(ice, partit, mesh) + ! Internal stress tensor + ! New implementation following Boullion et al, Ocean Modelling 2013. + ! SD, 30.07.2014 + !=================================================================== + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + use o_param + use i_param + use mod_mesh + use g_config + use i_arrays #if defined (__icepack) -use icedrv_main, only: rdg_conv_elem, rdg_shear_elem, strength + use icedrv_main, only: rdg_conv_elem, rdg_shear_elem, strength #endif - - implicit none - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - - integer :: elem, elnodes(3) - real(kind=WP) :: dx(3), dy(3), msum, asum - real(kind=WP) :: eps1, eps2, pressure, delta - real(kind=WP) :: val3, meancos, usum, vsum, vale - real(kind=WP) :: det1, det2, r1, r2, r3, si1, si2 - + implicit none + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: elem, elnodes(3) + real(kind=WP) :: dx(3), dy(3), msum, asum + real(kind=WP) :: eps1, eps2, pressure, delta + real(kind=WP) :: val3, meancos, usum, vsum, vale + real(kind=WP) :: det1, det2, r1, r2, r3, si1, si2 + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: a_ice, m_ice #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + + !___________________________________________________________________________ + val3=1.0_WP/3.0_WP vale=1.0_WP/(ellipse**2) det2=1.0_WP/(1.0_WP+alpha_evp) @@ -167,30 +182,37 @@ end subroutine stress_tensor_m ! ! ================================================================== ! -subroutine ssh2rhs(partit, mesh) - ! Compute the contribution from the elevation to the rhs - ! S.D. 30.07.2014 - use o_param - use i_param - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use g_config - use i_arrays - use i_therm_param - implicit none - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - - integer :: row, elem, elnodes(3), n - real(kind=WP) :: dx(3), dy(3), vol - real(kind=WP) :: val3, meancos, aa, bb, p_ice(3) - +subroutine ssh2rhs(ice, partit, mesh) + ! Compute the contribution from the elevation to the rhs + ! S.D. 30.07.2014 + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + use o_param + use i_param + use mod_mesh + use g_config + use i_arrays + use i_therm_param + implicit none + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: row, elem, elnodes(3), n + real(kind=WP) :: dx(3), dy(3), vol + real(kind=WP) :: val3, meancos, aa, bb, p_ice(3) + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: m_ice, m_snow #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + + !___________________________________________________________________________ val3=1.0_WP/3.0_WP ! use rhs_m and rhs_a for storing the contribution from elevation: @@ -248,34 +270,41 @@ end subroutine ssh2rhs ! !=================================================================== ! -subroutine stress2rhs_m(partit, mesh) +subroutine stress2rhs_m(ice, partit, mesh) ! add internal stress to the rhs ! SD, 30.07.2014 !----------------------------------------------------------------- - use o_param - use i_param - use i_therm_param - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use g_config - use i_arrays - implicit none - - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - - integer :: k, row, elem, elnodes(3) - real(kind=WP) :: dx(3), dy(3), vol - real(kind=WP) :: val3, mf, aa, bb - real(kind=WP) :: mass, cluster_area, elevation_elem(3) - + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + use o_param + use i_param + use i_therm_param + use mod_mesh + use g_config + use i_arrays + implicit none + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: k, row, elem, elnodes(3) + real(kind=WP) :: dx(3), dy(3), vol + real(kind=WP) :: val3, mf, aa, bb + real(kind=WP) :: mass, cluster_area, elevation_elem(3) + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + + !___________________________________________________________________________ val3=1.0_WP/3.0_WP do row=1, myDim_nod2d @@ -326,54 +355,56 @@ subroutine EVPdynamics_m(ice, partit, mesh) ! New implementation based on Bouillion et al. Ocean Modelling 2013 ! SD 30.07.14 !--------------------------------------------------------- - - use o_param - use i_param - use i_therm_param - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_ICE - use g_config - use i_arrays - use o_arrays - use g_comm_auto - + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + use o_param + use i_param + use i_therm_param + use g_config + use i_arrays + use o_arrays + use g_comm_auto #if defined (__icepack) - use icedrv_main, only: rdg_conv_elem, rdg_shear_elem, strength - use icedrv_main, only: icepack_to_fesom + use icedrv_main, only: rdg_conv_elem, rdg_shear_elem, strength + use icedrv_main, only: icepack_to_fesom #endif - - implicit none - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_ice), intent(inout), target :: ice - integer :: steps, shortstep, i, ed,n - real(kind=WP) :: rdt, drag, det - real(kind=WP) :: inv_thickness(partit%myDim_nod2D), umod, rhsu, rhsv - logical :: ice_el(partit%myDim_elem2D), ice_nod(partit%myDim_nod2D) - -!NR for stress_tensor_m - integer :: el, elnodes(3) - real(kind=WP) :: dx(3), dy(3), msum, asum - real(kind=WP) :: eps1, eps2, pressure, pressure_fac(partit%myDim_elem2D), delta - real(kind=WP) :: val3, meancos, vale - real(kind=WP) :: det1, det2, r1, r2, r3, si1, si2 - -!NR for stress2rhs_m - integer :: k, row - real(kind=WP) :: vol - real(kind=WP) :: mf,aa, bb,p_ice(3) - real(kind=WP) :: mass(partit%myDim_nod2D) - -real(kind=WP), dimension(:), pointer :: u_ice, v_ice + implicit none + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: steps, shortstep, i, ed,n + real(kind=WP) :: rdt, drag, det + real(kind=WP) :: inv_thickness(partit%myDim_nod2D), umod, rhsu, rhsv + logical :: ice_el(partit%myDim_elem2D), ice_nod(partit%myDim_nod2D) + !NR for stress_tensor_m + integer :: el, elnodes(3) + real(kind=WP) :: dx(3), dy(3), msum, asum + real(kind=WP) :: eps1, eps2, pressure, pressure_fac(partit%myDim_elem2D), delta + real(kind=WP) :: val3, meancos, vale + real(kind=WP) :: det1, det2, r1, r2, r3, si1, si2 + !NR for stress2rhs_m + integer :: k, row + real(kind=WP) :: vol + real(kind=WP) :: mf,aa, bb,p_ice(3) + real(kind=WP) :: mass(partit%myDim_nod2D) + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: u_ice, v_ice + real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" -u_ice => ice%uvice(1,:) -v_ice => ice%uvice(2,:) + u_ice => ice%uvice(1,:) + v_ice => ice%uvice(2,:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + !___________________________________________________________________________ val3=1.0_WP/3.0_WP vale=1.0_WP/(ellipse**2) det2=1.0_WP/(1.0_WP+alpha_evp) @@ -669,37 +700,43 @@ end subroutine EVPdynamics_m ! The subroutines involved are with _a. ! ==================================================================== ! -subroutine find_alpha_field_a(partit, mesh) +subroutine find_alpha_field_a(ice, partit, mesh) ! EVP stability parameter alpha is computed at each element ! aEVP implementation ! SD, 13.02.2017 ! ================================================================== - use o_param - use i_param - use i_therm_param - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use g_config - use i_arrays - + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + use o_param + use i_param + use i_therm_param + use g_config + use i_arrays #if defined (__icepack) - use icedrv_main, only: strength + use icedrv_main, only: strength #endif - - implicit none - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - integer :: elem, elnodes(3) - real(kind=WP) :: dx(3), dy(3), msum, asum - real(kind=WP) :: eps1, eps2, pressure, delta - real(kind=WP) :: val3, meancos, usum, vsum, vale - + implicit none + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: elem, elnodes(3) + real(kind=WP) :: dx(3), dy(3), msum, asum + real(kind=WP) :: eps1, eps2, pressure, delta + real(kind=WP) :: val3, meancos, usum, vsum, vale + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: a_ice, m_ice #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + + !___________________________________________________________________________ val3=1.0_WP/3.0_WP vale=1.0_WP/(ellipse**2) do elem=1,myDim_elem2D @@ -748,37 +785,44 @@ subroutine find_alpha_field_a(partit, mesh) end subroutine find_alpha_field_a ! ==================================================================== -subroutine stress_tensor_a(partit, mesh) +subroutine stress_tensor_a(ice, partit, mesh) ! Internal stress tensor ! New implementation following Boullion et al, Ocean Modelling 2013. ! and Kimmritz et al., Ocean Modelling 2016 ! SD, 14.02.2017 !=================================================================== - use o_param - use i_param - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use g_config - use i_arrays - + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + use o_param + use i_param + use mod_mesh + use g_config + use i_arrays #if defined (__icepack) - use icedrv_main, only: rdg_conv_elem, rdg_shear_elem, strength + use icedrv_main, only: rdg_conv_elem, rdg_shear_elem, strength #endif - - implicit none - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - integer :: elem, elnodes(3) - real(kind=WP) :: dx(3), dy(3), msum, asum - real(kind=WP) :: eps1, eps2, pressure, delta - real(kind=WP) :: val3, meancos, usum, vsum, vale - real(kind=WP) :: det1, det2, r1, r2, r3, si1, si2 + implicit none + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: elem, elnodes(3) + real(kind=WP) :: dx(3), dy(3), msum, asum + real(kind=WP) :: eps1, eps2, pressure, delta + real(kind=WP) :: val3, meancos, usum, vsum, vale + real(kind=WP) :: det1, det2, r1, r2, r3, si1, si2 + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: a_ice, m_ice #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + + !___________________________________________________________________________ val3=1.0_WP/3.0_WP vale=1.0_WP/(ellipse**2) do elem=1,myDim_elem2D @@ -856,47 +900,51 @@ subroutine EVPdynamics_a(ice, partit, mesh) ! and Kimmritz et al., Ocean Modelling 2016 ! SD 14.02.17 !--------------------------------------------------------- - -use o_param -use mod_mesh -USE MOD_PARTIT -USE MOD_PARSUP -USE MOD_ICE -use i_arrays -USE o_arrays -use i_param -use o_PARAM -use i_therm_param -use g_config, only: use_cavity -use g_comm_auto -use ice_maEVP_interfaces - + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + use o_param + use i_arrays + USE o_arrays + use i_param + use o_PARAM + use i_therm_param + use g_config, only: use_cavity + use g_comm_auto + use ice_maEVP_interfaces #if defined (__icepack) - use icedrv_main, only: rdg_conv_elem, rdg_shear_elem + use icedrv_main, only: rdg_conv_elem, rdg_shear_elem #endif - - implicit none - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_ice), intent(inout), target :: ice - integer :: steps, shortstep, i, ed - real(kind=WP) :: rdt, drag, det, fc - real(kind=WP) :: thickness, inv_thickness, umod, rhsu, rhsv - REAL(kind=WP) :: t0,t1, t2, t3, t4, t5, t00, txx - - real(kind=WP), dimension(:), pointer :: u_ice, v_ice + implicit none + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + !___________________________________________________________________________ + integer :: steps, shortstep, i, ed + real(kind=WP) :: rdt, drag, det, fc + real(kind=WP) :: thickness, inv_thickness, umod, rhsu, rhsv + REAL(kind=WP) :: t0,t1, t2, t3, t4, t5, t00, txx + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: u_ice, v_ice + real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uvice(1,:) - v_ice => ice%uvice(2,:) + u_ice => ice%uvice(1,:) + v_ice => ice%uvice(2,:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + !___________________________________________________________________________ steps=evp_rheol_steps rdt=ice_dt u_ice_aux=u_ice ! Initialize solver variables v_ice_aux=v_ice - call ssh2rhs(partit, mesh) + call ssh2rhs(ice, partit, mesh) #if defined (__icepack) rdg_conv_elem(:) = 0.0_WP @@ -904,8 +952,8 @@ subroutine EVPdynamics_a(ice, partit, mesh) #endif do shortstep=1, steps - call stress_tensor_a(partit, mesh) - call stress2rhs_m(partit, mesh) ! _m=_a, so no _m version is the only one! + call stress_tensor_a(ice, partit, mesh) + call stress2rhs_m(ice, partit, mesh) ! _m=_a, so no _m version is the only one! do i=1,myDim_nod2D !_______________________________________________________________________ @@ -960,7 +1008,7 @@ subroutine EVPdynamics_a(ice, partit, mesh) u_ice=u_ice_aux v_ice=v_ice_aux - call find_alpha_field_a(partit, mesh) ! alpha_evp_array is initialized with alpha_evp; + call find_alpha_field_a(ice, partit, mesh) ! alpha_evp_array is initialized with alpha_evp; ! At this stage we already have non-trivial velocities. call find_beta_field_a(partit, mesh) end subroutine EVPdynamics_a diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index 0f6d864c4..f70c6ca4c 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -60,9 +60,9 @@ MODULE i_ARRAYS integer :: ice_steps_since_upd = 0 ! real(kind=WP),allocatable,dimension(:,:) :: ice_grad_vel ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_ice, V_ice - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: m_ice, a_ice +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: m_ice, a_ice, m_snow REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_ice_old, V_ice_old, m_ice_old, a_ice_old, m_snow_old,thdgr_old !PS - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_rhs_ice, V_rhs_ice, m_snow + REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_rhs_ice, V_rhs_ice REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_m, rhs_a, rhs_ms, ths_temp REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_w, V_w REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: u_ice_aux, v_ice_aux ! of the size of u_ice, v_ice diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index 7a3df4c00..dcda19763 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -16,32 +16,33 @@ subroutine ocean2ice(dynamics, tracers, partit, mesh) end module module oce_fluxes_interface - interface - subroutine oce_fluxes(dynamics, tracers, partit, mesh) - use mod_mesh - USE MOD_PARTIT - use MOD_DYN - USE MOD_PARSUP - use mod_tracer - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - type(t_tracer), intent(inout), target :: tracers - type(t_dyn) , intent(in) , target :: dynamics - end subroutine - - subroutine oce_fluxes_mom(ice, dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - use MOD_DYN - USE MOD_ICE - USE MOD_PARSUP - use mod_tracer - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - type(t_dyn) , intent(in) , target :: dynamics - type(t_ice) , intent(inout), target :: ice - end subroutine - end interface + interface + subroutine oce_fluxes(ice, dynamics, tracers, partit, mesh) + USE MOD_ICE + USE MOD_DYN + USE MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice) , intent(inout), target :: ice + type(t_dyn) , intent(in) , target :: dynamics + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + + subroutine oce_fluxes_mom(ice, dynamics, partit, mesh) + USE MOD_ICE + USE MOD_DYN + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice) , intent(inout), target :: ice + type(t_dyn) , intent(in) , target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface end module ! @@ -50,38 +51,38 @@ subroutine oce_fluxes_mom(ice, dynamics, partit, mesh) subroutine oce_fluxes_mom(ice, dynamics, partit, mesh) ! transmits the relevant fields from the ice to the ocean model ! - use o_PARAM - use o_ARRAYS - use MOD_MESH + USE MOD_ICE + USE MOD_DYN USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN - USE MOD_ICE + USE MOD_MESH + use o_PARAM + use o_ARRAYS use i_ARRAYS use i_PARAM USE g_CONFIG use g_comm_auto - #if defined (__icepack) use icedrv_main, only: icepack_to_fesom #endif - implicit none - - integer :: n, elem, elnodes(3),n1 - real(kind=WP) :: aux, aux1 + type(t_ice) , intent(inout), target :: ice type(t_dyn) , intent(in) , target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh - type(t_ice), intent(inout), target :: ice - - real(kind=WP), dimension(:), pointer :: u_ice, v_ice + !___________________________________________________________________________ + integer :: n, elem, elnodes(3),n1 + real(kind=WP) :: aux, aux1 + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: u_ice, v_ice, a_ice #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uvice(1,:) - v_ice => ice%uvice(2,:) + u_ice => ice%uvice(1,:) + v_ice => ice%uvice(2,:) + a_ice => ice%data(1)%values(:) ! ================== ! momentum flux: @@ -221,43 +222,47 @@ end subroutine ocean2ice ! ! !_______________________________________________________________________________ -subroutine oce_fluxes(dynamics, tracers, partit, mesh) - - use MOD_MESH - use MOD_DYN - use MOD_TRACER - USE MOD_PARTIT - USE MOD_PARSUP - USE g_CONFIG - use o_ARRAYS - use i_ARRAYS - use g_comm_auto - use g_forcing_param, only: use_virt_salt - use g_forcing_arrays - use g_support - use i_therm_param - +subroutine oce_fluxes(ice, dynamics, tracers, partit, mesh) + USE MOD_ICE + use MOD_DYN + use MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_MESH + USE g_CONFIG + use o_ARRAYS + use i_ARRAYS + use g_comm_auto + use g_forcing_param, only: use_virt_salt + use g_forcing_arrays + use g_support + use i_therm_param + use cavity_interfaces #if defined (__icepack) - use icedrv_main, only: icepack_to_fesom, & - init_flux_atm_ocn + use icedrv_main, only: icepack_to_fesom, & + init_flux_atm_ocn #endif - use cavity_heat_water_fluxes_3eq_interface - implicit none - type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(inout), target :: tracers - type(t_dyn), intent(in), target :: dynamics - integer :: n, elem, elnodes(3),n1 - real(kind=WP) :: rsss, net - real(kind=WP), allocatable :: flux(:) - real(kind=WP), dimension(:,:), pointer :: temp, salt + use cavity_interfaces + implicit none + type(t_ice) , intent(inout), target :: ice + type(t_dyn) , intent(in) , target :: dynamics + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: n, elem, elnodes(3),n1 + real(kind=WP) :: rsss, net + real(kind=WP), allocatable :: flux(:) + !___________________________________________________________________________ + real(kind=WP), dimension(:,:), pointer :: temp, salt #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - temp=>tracers%data(1)%values(:,:) - salt=>tracers%data(2)%values(:,:) + temp=>tracers%data(1)%values(:,:) + salt=>tracers%data(2)%values(:,:) + !___________________________________________________________________________ allocate(flux(myDim_nod2D+eDim_nod2D)) flux = 0.0_WP @@ -414,7 +419,7 @@ subroutine oce_fluxes(dynamics, tracers, partit, mesh) end if !___________________________________________________________________________ - if (use_sw_pene) call cal_shortwave_rad(partit, mesh) + if (use_sw_pene) call cal_shortwave_rad(ice, partit, mesh) !___________________________________________________________________________ deallocate(flux) diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index cc86aae54..68d37473a 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -137,7 +137,7 @@ subroutine ice_array_setup(partit, mesh) allocate(U_rhs_ice(n_size), V_rhs_ice(n_size)) allocate(sigma11(e_size), sigma12(e_size), sigma22(e_size)) allocate(eps11(e_size), eps12(e_size), eps22(e_size)) - allocate(m_ice(n_size), a_ice(n_size), m_snow(n_size)) +! allocate(m_ice(n_size), a_ice(n_size), m_snow(n_size)) allocate(rhs_m(n_size), rhs_a(n_size), rhs_ms(n_size)) allocate(t_skin(n_size)) allocate(U_ice_old(n_size), V_ice_old(n_size)) !PS @@ -165,9 +165,9 @@ subroutine ice_array_setup(partit, mesh) rhs_m=0.0_WP rhs_ms=0.0_WP rhs_a=0.0_WP - m_ice=0.0_WP - a_ice=0.0_WP - m_snow=0.0_WP +! m_ice=0.0_WP +! a_ice=0.0_WP +! m_snow=0.0_WP U_rhs_ice=0.0_WP V_rhs_ice=0.0_WP ! U_ice=0.0_WP @@ -237,6 +237,7 @@ subroutine ice_timestep(step, ice, partit, mesh) use ice_maEVP_interfaces use ice_fct_interfaces use ice_thermodynamics_interfaces +use cavity_interfaces #if defined (__icepack) use icedrv_main, only: step_icepack #endif @@ -359,25 +360,27 @@ end subroutine ice_timestep !_______________________________________________________________________________ ! sets inital values or reads restart file for ice model subroutine ice_initial_state(ice, tracers, partit, mesh) - use i_ARRAYs - use MOD_MESH + USE MOD_ICE + USE MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP - use MOD_TRACER - use MOD_ICE + USE MOD_MESH + use i_ARRAYs use o_PARAM use o_arrays use g_CONFIG implicit none - ! type(t_ice), intent(inout), target :: ice - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + !___________________________________________________________________________ integer :: i character(MAX_PATH) :: filename real(kind=WP), external :: TFrez ! Sea water freeze temperature. - + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:), pointer :: u_ice, v_ice #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -385,7 +388,11 @@ subroutine ice_initial_state(ice, tracers, partit, mesh) #include "associate_mesh_ass.h" u_ice => ice%uvice(1,:) v_ice => ice%uvice(2,:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + !___________________________________________________________________________ m_ice =0._WP a_ice =0._WP u_ice =0._WP diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index 5e0793ed0..95d77c68e 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -36,12 +36,17 @@ subroutine cut_off(ice, partit, mesh) type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit type(t_ice), intent(inout), target :: ice - + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + !___________________________________________________________________________ ! lower cutoff: a_ice where(a_ice>1.0_WP) a_ice=1.0_WP @@ -66,23 +71,7 @@ subroutine cut_off(ice, partit, mesh) ice_temp=273.15_WP #endif /* (__oifs) */ end where - - -!!PS if (use_cavity) then -!!PS ! upper cutoff SH: m_ice -!!PS where(m_ice>5.0_WP .and. ulevels_nod2d==1 .and. geo_coord_nod2D(2,:)<0.0_WP) m_ice=5.0_WP -!!PS -!!PS ! upper cutoff NH: m_ice -!!PS where(m_ice>10.0_WP .and. ulevels_nod2d==1 .and. geo_coord_nod2D(2,:)>0.0_WP) m_ice=10.0_WP -!!PS -!!PS ! upper cutoff: m_snow -!!PS where(m_snow>2.5_WP .and. ulevels_nod2d==1) m_snow=2.5_WP -!!PS -!!PS !___________________________________________________________________________ -!!PS ! lower cutoff: m_snow -!!PS !!PS where(m_snow<0.1e-8_WP) m_snow=0.0_WP -!!PS end if - + !___________________________________________________________________________ #if defined (__oifs) where(ice_temp>273.15_WP) ice_temp=273.15_WP @@ -113,11 +102,11 @@ subroutine thermodynamics(ice, partit, mesh) ! variables. !------------------------------------------------------------------------ - use o_param - use mod_mesh + USE MOD_ICE USE MOD_PARTIT USE MOD_PARSUP - USE MOD_ICE + USE MOD_MESH + use o_param use i_therm_param use i_param use i_arrays @@ -127,10 +116,10 @@ subroutine thermodynamics(ice, partit, mesh) use g_comm_auto use g_sbf, only: l_snow implicit none + type(t_ice), intent(inout), target :: ice type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_ice), intent(inout), target :: ice - + !_____________________________________________________________________________ real(kind=WP) :: h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss,rsf,evap_in real(kind=WP) :: ug,ustar,T_oc,S_oc,h_ml,t,ch,ce,ch_i,ce_i,fw,ehf,evap real(kind=WP) :: ithdgr, ithdgrsn, iflice, hflatow, hfsenow, hflwrdout, subli @@ -138,20 +127,23 @@ subroutine thermodynamics(ice, partit, mesh) integer :: i, j, elem real(kind=WP), allocatable :: ustar_aux(:) real(kind=WP) lid_clo - + !_____________________________________________________________________________ + ! pointer on necessary derived types integer, pointer :: myDim_nod2D, eDim_nod2D integer, dimension(:), pointer :: ulevels_nod2D real(kind=WP), dimension(:,:),pointer :: geo_coord_nod2D - real(kind=WP), dimension(:), pointer :: u_ice, v_ice - + real(kind=WP), dimension(:), pointer :: u_ice, v_ice + real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow myDim_nod2d=>partit%myDim_nod2D eDim_nod2D =>partit%eDim_nod2D ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D - u_ice => ice%uvice(1,:) - v_ice => ice%uvice(2,:) - - + u_ice => ice%uvice(1,:) + v_ice => ice%uvice(2,:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + !_____________________________________________________________________________ rsss=ref_sss ! u_ice and v_ice are at nodes diff --git a/src/io_blowup.F90 b/src/io_blowup.F90 index a1a87d29e..282831aa5 100644 --- a/src/io_blowup.F90 +++ b/src/io_blowup.F90 @@ -152,9 +152,9 @@ subroutine ini_blowup_io(year, ice, dynamics, tracers, partit, mesh) !_____________________________________________________________________________ ! write snapshot ice variables to blowup file - call def_variable(bid, 'a_ice' , (/nod2D/) , 'ice concentration [0 to 1]', '%', a_ice); - call def_variable(bid, 'm_ice' , (/nod2D/) , 'effective ice thickness', 'm', m_ice); - call def_variable(bid, 'm_snow' , (/nod2D/) , 'effective snow thickness', 'm', m_snow); + call def_variable(bid, 'a_ice' , (/nod2D/) , 'ice concentration [0 to 1]', '%', ice%data(1)%values); + call def_variable(bid, 'm_ice' , (/nod2D/) , 'effective ice thickness', 'm', ice%data(2)%values); + call def_variable(bid, 'm_snow' , (/nod2D/) , 'effective snow thickness', 'm', ice%data(3)%values); call def_variable(bid, 'u_ice' , (/nod2D/) , 'zonal velocity', 'm/s', ice%uvice(1,:)); call def_variable(bid, 'v_ice' , (/nod2D/) , 'meridional velocity', 'm', ice%uvice(2,:)); !!PS call def_variable(bid, 'a_ice_old' , (/nod2D/) , 'ice concentration [0 to 1]', '%', a_ice_old); !PS diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 89f6483e8..9b75d7870 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -178,11 +178,11 @@ subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) end if CASE ('a_ice ') if (use_ice) then - call def_stream(nod2D, myDim_nod2D, 'a_ice', 'ice concentration', '%', a_ice(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'a_ice', 'ice concentration', '%', ice%data(1)%values(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('m_ice ') if (use_ice) then - call def_stream(nod2D, myDim_nod2D, 'm_ice', 'ice height', 'm', m_ice(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'm_ice', 'ice height', 'm', ice%data(2)%values(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('thdgr ') if (use_ice) then @@ -198,7 +198,7 @@ subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) end if CASE ('m_snow ') if (use_ice) then - call def_stream(nod2D, myDim_nod2D, 'm_snow', 'snow height', 'm', m_snow(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'm_snow', 'snow height', 'm', ice%data(3)%values(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if !___________________________________________________________________________________________________________________________________ diff --git a/src/io_restart.F90 b/src/io_restart.F90 index a7f28673a..b97e08813 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -197,9 +197,9 @@ subroutine ini_ice_io(year, ice, partit, mesh) !===================== Definition part ===================================== !=========================================================================== !___Define the netCDF variables for 2D fields_______________________________ - call def_variable(iid, 'area', (/nod2D/), 'ice concentration [0 to 1]', '%', a_ice); - call def_variable(iid, 'hice', (/nod2D/), 'effective ice thickness', 'm', m_ice); - call def_variable(iid, 'hsnow', (/nod2D/), 'effective snow thickness', 'm', m_snow); + call def_variable(iid, 'area', (/nod2D/), 'ice concentration [0 to 1]', '%', ice%data(1)%values(:)); + call def_variable(iid, 'hice', (/nod2D/), 'effective ice thickness', 'm', ice%data(2)%values(:)); + call def_variable(iid, 'hsnow', (/nod2D/), 'effective snow thickness', 'm', ice%data(3)%values(:)); call def_variable(iid, 'uice', (/nod2D/), 'zonal velocity', 'm/s', ice%uvice(1,:)); call def_variable(iid, 'vice', (/nod2D/), 'meridional velocity', 'm', ice%uvice(2,:)); #if defined (__oifs) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 693cf08ae..4a0776a75 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2944,7 +2944,7 @@ subroutine oce_timestep_ale(n, ice, dynamics, tracers, partit, mesh) ! use CVMIX KPP (Large at al. 1994) else if(mix_scheme_nmb==3 .or. mix_scheme_nmb==37) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call calc_cvmix_kpp'//achar(27)//'[0m' - call calc_cvmix_kpp(dynamics, tracers, partit, mesh) + call calc_cvmix_kpp(ice, dynamics, tracers, partit, mesh) call mo_convect(ice, partit, mesh) ! use CVMIX PP (Pacanowski and Philander 1981) parameterisation for mixing @@ -2981,7 +2981,7 @@ subroutine oce_timestep_ale(n, ice, dynamics, tracers, partit, mesh) !___________________________________________________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call compute_vel_rhs'//achar(27)//'[0m' - call compute_vel_rhs(dynamics, partit, mesh) + call compute_vel_rhs(ice, dynamics, partit, mesh) !___________________________________________________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call viscosity_filter'//achar(27)//'[0m' diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index 87679efc5..10da4721a 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -1,42 +1,43 @@ module compute_vel_rhs_interface - interface - subroutine compute_vel_rhs(dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_DYN - type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - - end subroutine - end interface + interface + subroutine compute_vel_rhs(ice, dynamics, partit, mesh) + USE MOD_ICE + USE MOD_DYN + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice) , intent(inout), target :: ice + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface end module module momentum_adv_scalar_interface - interface - subroutine momentum_adv_scalar(dynamics, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_DYN - type(t_dyn) , intent(inout), target :: dynamics - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - - end subroutine - end interface + interface + subroutine momentum_adv_scalar(dynamics, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface end module ! ! !_______________________________________________________________________________ -subroutine compute_vel_rhs(dynamics, partit, mesh) - use MOD_MESH +subroutine compute_vel_rhs(ice, dynamics, partit, mesh) + USE MOD_ICE + USE MOD_DYN USE MOD_PARTIT USE MOD_PARSUP - USE MOD_DYN + USE MOD_MESH use o_ARRAYS, only: coriolis, ssh_gp, pgf_x, pgf_y use i_ARRAYS use i_therm_param @@ -48,6 +49,7 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) use g_sbf, only: l_mslp use momentum_adv_scalar_interface implicit none + type(t_ice) , intent(inout), target :: ice type(t_dyn) , intent(inout), target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh @@ -62,6 +64,7 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) ! pointer on necessary derived types real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhsAB, UV_rhs real(kind=WP), dimension(:) , pointer :: eta_n + real(kind=WP), dimension(:) , pointer :: m_ice, m_snow #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -70,7 +73,9 @@ subroutine compute_vel_rhs(dynamics, partit, mesh) UV_rhs =>dynamics%uv_rhs(:,:,:) UV_rhsAB =>dynamics%uv_rhsAB(:,:,:) eta_n =>dynamics%eta_n(:) - + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + !___________________________________________________________________________ use_pice=0 if (use_floatice .and. .not. trim(which_ale)=='linfs') use_pice=1 diff --git a/src/oce_mo_conv.F90 b/src/oce_mo_conv.F90 index d67000b19..f46e210a5 100644 --- a/src/oce_mo_conv.F90 +++ b/src/oce_mo_conv.F90 @@ -12,18 +12,21 @@ subroutine mo_convect(ice, partit, mesh) use i_arrays use g_comm_auto IMPLICIT NONE - - integer :: node, elem, nz, elnodes(3), nzmin, nzmax - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit type(t_ice), intent(in), target :: ice - real(kind=WP), dimension(:), pointer :: u_ice, v_ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + !___________________________________________________________________________ + integer :: node, elem, nz, elnodes(3), nzmin, nzmax + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: u_ice, v_ice, a_ice #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uvice(1,:) - v_ice => ice%uvice(2,:) + u_ice => ice%uvice(1,:) + v_ice => ice%uvice(2,:) + a_ice => ice%data(1)%values(:) !___________________________________________________________________________ ! add vertical mixing scheme of Timmermann and Beckmann, 2004,"Parameterization diff --git a/src/oce_shortwave_pene.F90 b/src/oce_shortwave_pene.F90 index d548d8156..19961ddf5 100644 --- a/src/oce_shortwave_pene.F90 +++ b/src/oce_shortwave_pene.F90 @@ -1,34 +1,41 @@ -subroutine cal_shortwave_rad(partit, mesh) +subroutine cal_shortwave_rad(ice, partit, mesh) ! This routine is inherited from FESOM 1.4 and adopted appropreately. It calculates ! shortwave penetration into the ocean assuming the constant chlorophyll concentration. ! No penetration under the ice is applied. A decent way for ice region is to be discussed. ! This routine should be called after ice2oce coupling done if ice model is used. ! Ref.: Morel and Antoine 1994, Sweeney et al. 2005 - USE MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - USE o_PARAM - USE o_ARRAYS - USE g_CONFIG - use g_forcing_arrays - use g_comm_auto - use i_param - use i_arrays - use i_therm_param - IMPLICIT NONE - - integer :: m, n2, n3, k, nzmax, nzmin - real(kind=WP):: swsurf, aux - real(kind=WP):: c, c2, c3, c4, c5 - real(kind=WP):: v1, v2, sc1, sc2 - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + USE o_PARAM + USE o_ARRAYS + USE g_CONFIG + use g_forcing_arrays + use g_comm_auto + use i_param + use i_arrays + use i_therm_param + IMPLICIT NONE + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: m, n2, n3, k, nzmax, nzmin + real(kind=WP):: swsurf, aux + real(kind=WP):: c, c2, c3, c4, c5 + real(kind=WP):: v1, v2, sc1, sc2 + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: a_ice #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - + a_ice => ice%data(1)%values(:) + + !___________________________________________________________________________ + sw_3d=0.0_WP !_____________________________________________________________________________ do n2=1, myDim_nod2D+eDim_nod2D diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index a12012fe3..1773fc44a 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -71,7 +71,7 @@ subroutine write_step_info(istep, outfreq, ice, dynamics, tracers, partit, mesh) type(t_ice) , intent(in) , target :: ice real(kind=WP), dimension(:,:,:), pointer :: UV, UVnode real(kind=WP), dimension(:,:) , pointer :: Wvel, CFL_z - real(kind=WP), dimension(:) , pointer :: eta_n, d_eta + real(kind=WP), dimension(:) , pointer :: eta_n, d_eta, m_ice #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -82,7 +82,7 @@ subroutine write_step_info(istep, outfreq, ice, dynamics, tracers, partit, mesh) CFL_z => dynamics%cfl_z(:,:) eta_n => dynamics%eta_n(:) d_eta => dynamics%d_eta(:) - + m_ice => ice%data(2)%values(:) if (mod(istep,outfreq)==0) then !_______________________________________________________________________ @@ -248,36 +248,40 @@ end subroutine write_step_info ! !=============================================================================== subroutine check_blowup(istep, ice, dynamics, tracers, partit, mesh) - use g_config, only: logfile_outfreq, which_ALE - use MOD_MESH - use MOD_TRACER + USE MOD_ICE + USE MOD_DYN + USE MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP - use MOD_DYN - use MOD_ICE - use o_PARAM - use o_ARRAYS, only: water_flux, stress_surf, & - heat_flux, Kv, Av - use i_ARRAYS - use g_comm_auto - use io_BLOWUP - use g_forcing_arrays - use diagnostics - use write_step_info_interface - implicit none + USE MOD_MESH + use g_config, only: logfile_outfreq, which_ALE + use o_PARAM + use o_ARRAYS, only: water_flux, stress_surf, & + heat_flux, Kv, Av + use i_ARRAYS + use g_comm_auto + use io_BLOWUP + use g_forcing_arrays + use diagnostics + use write_step_info_interface + implicit none - integer :: n, nz, istep, found_blowup_loc=0, found_blowup=0 - integer :: el, elidx - type(t_mesh) , intent(in) , target :: mesh + type(t_ice) , intent(in) , target :: ice + type(t_dyn) , intent(in) , target :: dynamics type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in) , target :: tracers - type(t_dyn) , intent(in) , target :: dynamics - type(t_ice) , intent(in) , target :: ice + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: n, nz, istep, found_blowup_loc=0, found_blowup=0 + integer :: el, elidx + !___________________________________________________________________________ + ! pointer on necessary derived types real(kind=WP), dimension(:,:,:), pointer :: UV real(kind=WP), dimension(:,:) , pointer :: Wvel, CFL_z real(kind=WP), dimension(:) , pointer :: ssh_rhs, ssh_rhs_old real(kind=WP), dimension(:) , pointer :: eta_n, d_eta real(kind=WP), dimension(:) , pointer :: u_ice, v_ice + real(kind=WP), dimension(:) , pointer :: a_ice, m_ice, m_snow #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -291,7 +295,11 @@ subroutine check_blowup(istep, ice, dynamics, tracers, partit, mesh) d_eta => dynamics%d_eta(:) u_ice => ice%uvice(1,:) v_ice => ice%uvice(2,:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + !___________________________________________________________________________ !$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz) do n=1, myDim_nod2d !___________________________________________________________________ From 45f94ddd878f3675d1e1d24700862ea3b1f5de02 Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 20 Nov 2021 00:13:57 +0100 Subject: [PATCH 605/909] exchange sigma11, sigma12, sigma22, eps11, eps12, eps22 against ice derived type variables ice%work%sigmaXX and ice%work%epsXX. An clean up some routines a bit --- src/ice_EVP.F90 | 846 +++++++++++++------------- src/ice_maEVP.F90 | 1282 ++++++++++++++++++++-------------------- src/ice_modules.F90 | 4 +- src/ice_setup_step.F90 | 199 +++---- 4 files changed, 1167 insertions(+), 1164 deletions(-) diff --git a/src/ice_EVP.F90 b/src/ice_EVP.F90 index 78db3eb31..dd9b59478 100755 --- a/src/ice_EVP.F90 +++ b/src/ice_EVP.F90 @@ -37,127 +37,128 @@ subroutine EVPdynamics(ice, partit, mesh) ! ! Contains routines of EVP dynamics ! -!=================================================================== -subroutine stress_tensor(ice_strength, ice, partit, mesh) +!_______________________________________________________________________________ ! EVP rheology. The routine computes stress tensor components based on ice ! velocity field. They are stored as elemental arrays (sigma11, sigma22 and ! sigma12). The ocean velocity is at nodal locations. -use o_param -use i_param -use i_arrays -USE g_CONFIG -USE MOD_MESH -USE MOD_PARTIT -USE MOD_PARSUP -USE MOD_ICE - +subroutine stress_tensor(ice_strength, ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + use o_param + use i_param + use i_arrays + use g_CONFIG #if defined (__icepack) -use icedrv_main, only: rdg_conv_elem, rdg_shear_elem, strength + use icedrv_main, only: rdg_conv_elem, rdg_shear_elem, strength #endif - -implicit none -type(t_partit), intent(inout), target :: partit -type(t_ice), intent(inout), target :: ice -type(t_mesh), intent(in), target :: mesh -!_______________________________________________________________________________ -real(kind=WP), intent(in) :: ice_strength(partit%mydim_elem2D) -real(kind=WP) :: eta, xi, delta, aa -integer :: el, elnodes(3) -real(kind=WP) :: asum, msum, vale, dx(3), dy(3) -real(kind=WP) :: det1, det2, r1, r2, r3, si1, si2, dte -real(kind=WP) :: zeta, delta_inv, d1, d2 -!_______________________________________________________________________________ -real(kind=WP), dimension(:), pointer :: u_ice, v_ice + implicit none + type(t_partit), intent(inout), target :: partit + type(t_ice), intent(inout), target :: ice + type(t_mesh), intent(in), target :: mesh + !___________________________________________________________________________ + real(kind=WP), intent(in) :: ice_strength(partit%mydim_elem2D) + real(kind=WP) :: eta, xi, delta, aa + integer :: el, elnodes(3) + real(kind=WP) :: asum, msum, vale, dx(3), dy(3) + real(kind=WP) :: det1, det2, r1, r2, r3, si1, si2, dte + real(kind=WP) :: zeta, delta_inv, d1, d2 + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: u_ice, v_ice + real(kind=WP), dimension(:), pointer :: eps11, eps12, eps22 + real(kind=WP), dimension(:), pointer :: sigma11, sigma12, sigma22 #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" -u_ice => ice%uvice(1,:) -v_ice => ice%uvice(2,:) + u_ice => ice%uvice(1,:) + v_ice => ice%uvice(2,:) + eps11 => ice%work%eps11(:) + eps12 => ice%work%eps12(:) + eps22 => ice%work%eps22(:) + sigma11 => ice%work%sigma11(:) + sigma12 => ice%work%sigma12(:) + sigma22 => ice%work%sigma22(:) -!_______________________________________________________________________________ - vale = 1.0_WP/(ellipse**2) - - dte = ice_dt/(1.0_WP*evp_rheol_steps) - det1 = 1.0_WP/(1.0_WP + 0.5_WP*Tevp_inv*dte) - det2 = 1.0_WP/(1.0_WP + 0.5_WP*Tevp_inv*dte) !*ellipse**2 - - - do el=1,myDim_elem2D - !__________________________________________________________________________ - ! if element contains cavity node skip it - if (ulevels(el) > 1) cycle - - ! ===== Check if there is ice on elem - - ! There is no ice in elem - ! if (any(m_ice(elnodes)<= 0.) .or. any(a_ice(elnodes) <=0.)) CYCLE - if (ice_strength(el) > 0.) then - ! ===== - ! ===== Deformation rate tensor on element elem: - !du/dx - - eps11(el) = sum(gradient_sca(1:3,el)*U_ice(elem2D_nodes(1:3,el))) & - - metric_factor(el) * sum(V_ice(elem2D_nodes(1:3,el)))/3.0_WP - - eps22(el) = sum(gradient_sca(4:6, el)*V_ice(elem2D_nodes(1:3,el))) - - eps12(el) = 0.5_WP*(sum(gradient_sca(4:6,el)*U_ice(elem2D_nodes(1:3,el))) & - + sum(gradient_sca(1:3,el)*V_ice(elem2D_nodes(1:3,el))) & - + metric_factor(el) * sum(U_ice(elem2D_nodes(1:3,el)))/3.0_WP) - ! ===== moduli: - delta = sqrt((eps11(el)*eps11(el) + eps22(el)*eps22(el))*(1.0_WP+vale) + 4.0_WP*vale*eps12(el)*eps12(el) + & - 2.0_WP*eps11(el)*eps22(el)*(1.0_WP-vale)) - - ! ======================================= - ! ===== Here the EVP rheology piece starts - ! ======================================= - - ! ===== viscosity zeta should exceed zeta_min - ! (done via limiting delta from above) - - !if(delta>pressure/zeta_min) delta=pressure/zeta_min - !It does not work properly by - !creating response where ice_strength is small - ! Uncomment and test if necessary - - ! ===== if delta is too small or zero, viscosity will too large (unlimited) - ! (limit delta_inv) - delta_inv = 1.0_WP/max(delta,delta_min) - zeta = ice_strength(el)*delta_inv - ! ===== Limiting pressure/Delta (zeta): it may still happen that pressure/Delta - ! is too large in some regions and CFL criterion is violated. - ! The regularization below was introduced by Hunke, - ! but seemingly is not used in the current CICE. - ! Without it divergence and zeta can be noisy (but code - ! remains stable), using it reduces viscosities too strongly. - ! It is therefore commented - - !if (zeta>Clim_evp*voltriangle(el)) then - !zeta=Clim_evp*voltriangle(el) - !end if - - zeta = zeta*Tevp_inv - - r1 = zeta*(eps11(el)+eps22(el)) - ice_strength(el)*Tevp_inv - r2 = zeta*(eps11(el)-eps22(el))*vale - r3 = zeta*eps12(el)*vale - - si1 = det1*(sigma11(el) + sigma22(el) + dte*r1) - si2 = det2*(sigma11(el) - sigma22(el) + dte*r2) + !___________________________________________________________________________ + vale = 1.0_WP/(ellipse**2) + dte = ice_dt/(1.0_WP*evp_rheol_steps) + det1 = 1.0_WP/(1.0_WP + 0.5_WP*Tevp_inv*dte) + det2 = 1.0_WP/(1.0_WP + 0.5_WP*Tevp_inv*dte) !*ellipse**2 + + do el=1,myDim_elem2D + !_______________________________________________________________________ + ! if element contains cavity node skip it + if (ulevels(el) > 1) cycle - sigma12(el) = det2*(sigma12(el)+dte*r3) - sigma11(el) = 0.5_WP*(si1+si2) - sigma22(el) = 0.5_WP*(si1-si2) - + ! ===== Check if there is ice on elem + ! There is no ice in elem + ! if (any(m_ice(elnodes)<= 0.) .or. any(a_ice(elnodes) <=0.)) CYCLE + if (ice_strength(el) > 0.) then + ! ===== + ! ===== Deformation rate tensor on element elem: + !du/dx + eps11(el) = sum(gradient_sca(1:3,el)*U_ice(elem2D_nodes(1:3,el))) & + - metric_factor(el) * sum(V_ice(elem2D_nodes(1:3,el)))/3.0_WP + + eps22(el) = sum(gradient_sca(4:6, el)*V_ice(elem2D_nodes(1:3,el))) + + eps12(el) = 0.5_WP*(sum(gradient_sca(4:6,el)*U_ice(elem2D_nodes(1:3,el))) & + + sum(gradient_sca(1:3,el)*V_ice(elem2D_nodes(1:3,el))) & + + metric_factor(el) * sum(U_ice(elem2D_nodes(1:3,el)))/3.0_WP) + ! ===== moduli: + delta = sqrt((eps11(el)*eps11(el) + eps22(el)*eps22(el))*(1.0_WP+vale) + 4.0_WP*vale*eps12(el)*eps12(el) + & + 2.0_WP*eps11(el)*eps22(el)*(1.0_WP-vale)) + + ! ======================================= + ! ===== Here the EVP rheology piece starts + ! ======================================= + + ! ===== viscosity zeta should exceed zeta_min + ! (done via limiting delta from above) + + !if(delta>pressure/zeta_min) delta=pressure/zeta_min + !It does not work properly by + !creating response where ice_strength is small + ! Uncomment and test if necessary + + ! ===== if delta is too small or zero, viscosity will too large (unlimited) + ! (limit delta_inv) + delta_inv = 1.0_WP/max(delta,delta_min) + zeta = ice_strength(el)*delta_inv + ! ===== Limiting pressure/Delta (zeta): it may still happen that pressure/Delta + ! is too large in some regions and CFL criterion is violated. + ! The regularization below was introduced by Hunke, + ! but seemingly is not used in the current CICE. + ! Without it divergence and zeta can be noisy (but code + ! remains stable), using it reduces viscosities too strongly. + ! It is therefore commented + + !if (zeta>Clim_evp*voltriangle(el)) then + !zeta=Clim_evp*voltriangle(el) + !end if + + zeta = zeta*Tevp_inv + + r1 = zeta*(eps11(el)+eps22(el)) - ice_strength(el)*Tevp_inv + r2 = zeta*(eps11(el)-eps22(el))*vale + r3 = zeta*eps12(el)*vale + + si1 = det1*(sigma11(el) + sigma22(el) + dte*r1) + si2 = det2*(sigma11(el) - sigma22(el) + dte*r2) + + sigma12(el) = det2*(sigma12(el)+dte*r3) + sigma11(el) = 0.5_WP*(si1+si2) + sigma22(el) = 0.5_WP*(si1-si2) + #if defined (__icepack) - rdg_conv_elem(el) = -min((eps11(el)+eps22(el)),0.0_WP) - rdg_shear_elem(el) = 0.5_WP*(delta - abs(eps11(el)+eps22(el))) + rdg_conv_elem(el) = -min((eps11(el)+eps22(el)),0.0_WP) + rdg_shear_elem(el) = 0.5_WP*(delta - abs(eps11(el)+eps22(el))) #endif - - endif - end do - + endif + end do end subroutine stress_tensor ! !=================================================================== ! subroutine stress_tensor_no1(ice_strength, partit, mesh) @@ -350,367 +351,354 @@ end subroutine stress_tensor ! V_rhs_ice(elnodes)=V_rhs_ice(elnodes) - vc/area(1,elnodes) ! END DO ! end subroutine stress2rhs_e -!=================================================================== -subroutine stress2rhs(inv_areamass, ice_strength, partit, mesh) +! +! +!_______________________________________________________________________________ ! EVP implementation: ! Computes the divergence of stress tensor and puts the result into the ! rhs vectors -USE o_PARAM -USE i_PARAM -USE i_THERM_PARAM -USE i_arrays -USE MOD_MESH -USE MOD_PARTIT -USE MOD_PARSUP - -IMPLICIT NONE -type(t_mesh), intent(in), target :: mesh -type(t_partit), intent(inout), target :: partit -REAL(kind=WP), intent(in) :: inv_areamass(partit%myDim_nod2D), ice_strength(partit%mydim_elem2D) -INTEGER :: n, el, k -REAL(kind=WP) :: val3 +subroutine stress2rhs(inv_areamass, ice_strength, ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + USE o_PARAM + USE i_PARAM + USE i_THERM_PARAM + USE i_arrays + IMPLICIT NONE + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + !___________________________________________________________________________ + REAL(kind=WP), intent(in) :: inv_areamass(partit%myDim_nod2D), ice_strength(partit%mydim_elem2D) + INTEGER :: n, el, k + REAL(kind=WP) :: val3 + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: sigma11, sigma12, sigma22 #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" -val3=1/3.0_WP - -DO n=1, myDim_nod2D - U_rhs_ice(n)=0.0_WP - V_rhs_ice(n)=0.0_WP -END DO - -do el=1,myDim_elem2D - ! ===== Skip if ice is absent - -! if (any(m_ice(elnodes)<= 0.) .or. any(a_ice(elnodes) <=0.)) CYCLE - !____________________________________________________________________________ - ! if element contains cavity node skip it - if (ulevels(el) > 1) cycle - - !____________________________________________________________________________ - if (ice_strength(el) > 0._WP) then - -!$IVDEP - DO k=1,3 - - U_rhs_ice(elem2D_nodes(k,el)) = U_rhs_ice(elem2D_nodes(k,el)) & - - elem_area(el) * & - (sigma11(el)*gradient_sca(k,el) + sigma12(el)*gradient_sca(k+3,el) & - +sigma12(el)*val3*metric_factor(el)) !metrics - - V_rhs_ice(elem2D_nodes(k,el)) = V_rhs_ice(elem2D_nodes(k,el)) & - - elem_area(el) * & - (sigma12(el)*gradient_sca(k,el) + sigma22(el)*gradient_sca(k+3,el) & - -sigma11(el)*val3*metric_factor(el)) - END DO - - - endif - end do + sigma11 => ice%work%sigma11(:) + sigma12 => ice%work%sigma12(:) + sigma22 => ice%work%sigma22(:) + !___________________________________________________________________________ + val3=1/3.0_WP + + DO n=1, myDim_nod2D + U_rhs_ice(n)=0.0_WP + V_rhs_ice(n)=0.0_WP + END DO + + do el=1,myDim_elem2D + ! ===== Skip if ice is absent + ! if (any(m_ice(elnodes)<= 0.) .or. any(a_ice(elnodes) <=0.)) CYCLE + !_______________________________________________________________________ + ! if element contains cavity node skip it + if (ulevels(el) > 1) cycle + + !_______________________________________________________________________ + if (ice_strength(el) > 0._WP) then + !$IVDEP + DO k=1,3 + U_rhs_ice(elem2D_nodes(k,el)) = U_rhs_ice(elem2D_nodes(k,el)) & + - elem_area(el) * & + (sigma11(el)*gradient_sca(k,el) + sigma12(el)*gradient_sca(k+3,el) & + +sigma12(el)*val3*metric_factor(el)) !metrics + + V_rhs_ice(elem2D_nodes(k,el)) = V_rhs_ice(elem2D_nodes(k,el)) & + - elem_area(el) * & + (sigma12(el)*gradient_sca(k,el) + sigma22(el)*gradient_sca(k+3,el) & + -sigma11(el)*val3*metric_factor(el)) + END DO + endif + end do - DO n=1, myDim_nod2D - !__________________________________________________________________________ - ! if cavity node skip it - if (ulevels_nod2d(n)>1) cycle - - !__________________________________________________________________________ - if (inv_areamass(n) > 0._WP) then - U_rhs_ice(n) = U_rhs_ice(n)*inv_areamass(n) + rhs_a(n) - V_rhs_ice(n) = V_rhs_ice(n)*inv_areamass(n) + rhs_m(n) - else - U_rhs_ice(n) = 0._WP - V_rhs_ice(n) = 0._WP - endif - END DO + DO n=1, myDim_nod2D + !_______________________________________________________________________ + ! if cavity node skip it + if (ulevels_nod2d(n)>1) cycle + + !_______________________________________________________________________ + if (inv_areamass(n) > 0._WP) then + U_rhs_ice(n) = U_rhs_ice(n)*inv_areamass(n) + rhs_a(n) + V_rhs_ice(n) = V_rhs_ice(n)*inv_areamass(n) + rhs_m(n) + else + U_rhs_ice(n) = 0._WP + V_rhs_ice(n) = 0._WP + endif + END DO end subroutine stress2rhs ! ! -!=================================================================== -subroutine EVPdynamics(ice, partit, mesh) +!_______________________________________________________________________________ ! EVP implementation. Does subcycling and boundary conditions. ! Velocities at nodes -USE MOD_ICE -USE MOD_PARTIT -USE MOD_PARSUP -USE MOD_MESH -USE o_PARAM -USE i_ARRAYS -USE i_PARAM -USE i_therm_param -USE o_ARRAYS -USE g_CONFIG -USE g_comm_auto -use ice_EVP_interfaces - +subroutine EVPdynamics(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + USE o_PARAM + USE i_ARRAYS + USE i_PARAM + USE i_therm_param + USE o_ARRAYS + USE g_CONFIG + USE g_comm_auto + use ice_EVP_interfaces #if defined (__icepack) - use icedrv_main, only: rdg_conv_elem, rdg_shear_elem, strength - use icedrv_main, only: icepack_to_fesom + use icedrv_main, only: rdg_conv_elem, rdg_shear_elem, strength + use icedrv_main, only: icepack_to_fesom #endif - -IMPLICIT NONE -type(t_ice), intent(inout), target :: ice -type(t_partit), intent(inout), target :: partit -type(t_mesh), intent(in), target :: mesh -!_______________________________________________________________________________ -integer :: steps, shortstep -real(kind=WP) :: rdt, asum, msum, r_a, r_b -real(kind=WP) :: drag, det, umod, rhsu, rhsv -integer :: n, ed, ednodes(2), el, elnodes(3) -real(kind=WP) :: ax, ay, aa, elevation_dx, elevation_dy - -real(kind=WP) :: inv_areamass(partit%myDim_nod2D), inv_mass(partit%myDim_nod2D) -real(kind=WP) :: ice_strength(partit%myDim_elem2D), elevation_elem(3), p_ice(3) -integer :: use_pice - -real(kind=WP) :: eta, xi, delta -integer :: k -real(kind=WP) :: vale, dx(3), dy(3), val3 -real(kind=WP) :: det1, det2, r1, r2, r3, si1, si2, dte -real(kind=WP) :: zeta, delta_inv, d1, d2 - -INTEGER :: elem -REAL(kind=WP) :: mass, uc, vc, deltaX1, deltaX2, deltaY1, deltaY2 -!_______________________________________________________________________________ -! pointer on necessary derived types -real(kind=WP), dimension(:), pointer :: u_ice, v_ice -real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow + IMPLICIT NONE + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + !___________________________________________________________________________ + integer :: steps, shortstep + real(kind=WP) :: rdt, asum, msum, r_a, r_b + real(kind=WP) :: drag, det, umod, rhsu, rhsv + integer :: n, ed, ednodes(2), el, elnodes(3) + real(kind=WP) :: ax, ay, aa, elevation_dx, elevation_dy + + real(kind=WP) :: inv_areamass(partit%myDim_nod2D), inv_mass(partit%myDim_nod2D) + real(kind=WP) :: ice_strength(partit%myDim_elem2D), elevation_elem(3), p_ice(3) + integer :: use_pice + + real(kind=WP) :: eta, xi, delta + integer :: k + real(kind=WP) :: vale, dx(3), dy(3), val3 + real(kind=WP) :: det1, det2, r1, r2, r3, si1, si2, dte + real(kind=WP) :: zeta, delta_inv, d1, d2 + INTEGER :: elem + REAL(kind=WP) :: mass, uc, vc, deltaX1, deltaX2, deltaY1, deltaY2 + !_______________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: u_ice, v_ice + real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" -u_ice => ice%uvice(1,:) -v_ice => ice%uvice(2,:) -a_ice => ice%data(1)%values(:) -m_ice => ice%data(2)%values(:) -m_snow => ice%data(3)%values(:) - -!_______________________________________________________________________________ -! If Icepack is used, always update the tracers - + u_ice => ice%uvice(1,:) + v_ice => ice%uvice(2,:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + + !_______________________________________________________________________________ + ! If Icepack is used, always update the tracers #if defined (__icepack) - a_ice_old(:) = a_ice(:) - m_ice_old(:) = a_ice(:) - m_snow_old(:) = m_snow(:) - - call icepack_to_fesom (nx_in=(myDim_nod2D+eDim_nod2D), & - aice_out=a_ice, & - vice_out=m_ice, & - vsno_out=m_snow) + a_ice_old(:) = a_ice(:) + m_ice_old(:) = a_ice(:) + m_snow_old(:) = m_snow(:) + call icepack_to_fesom (nx_in=(myDim_nod2D+eDim_nod2D), & + aice_out=a_ice, & + vice_out=m_ice, & + vsno_out=m_snow) #endif -rdt=ice_dt/(1.0*evp_rheol_steps) -ax=cos(theta_io) -ay=sin(theta_io) + rdt=ice_dt/(1.0*evp_rheol_steps) + ax=cos(theta_io) + ay=sin(theta_io) -! Precompute values that are never changed during the iteration - inv_areamass =0.0_WP - inv_mass =0.0_WP - rhs_a =0.0_WP - rhs_m =0.0_WP - do n=1,myDim_nod2D !___________________________________________________________________________ - ! if cavity node skip it - if (ulevels_nod2d(n)>1) cycle - - !___________________________________________________________________________ - if ((rhoice*m_ice(n)+rhosno*m_snow(n)) > 1.e-3_WP) then - inv_areamass(n) = 1._WP/(area(1,n)*(rhoice*m_ice(n)+rhosno*m_snow(n))) - else - inv_areamass(n) = 0._WP - endif + ! Precompute values that are never changed during the iteration + inv_areamass =0.0_WP + inv_mass =0.0_WP + rhs_a =0.0_WP + rhs_m =0.0_WP + do n=1,myDim_nod2D + !_______________________________________________________________________ + ! if cavity node skip it + if (ulevels_nod2d(n)>1) cycle - if (a_ice(n) < 0.01_WP) then - ! Skip if ice is absent - inv_mass(n) = 0._WP - else - inv_mass(n) = (rhoice*m_ice(n)+rhosno*m_snow(n))/a_ice(n) - inv_mass(n) = 1.0_WP/max(inv_mass(n), 9.0_WP) ! Limit the mass - ! if it is too small - endif + !_______________________________________________________________________ + if ((rhoice*m_ice(n)+rhosno*m_snow(n)) > 1.e-3_WP) then + inv_areamass(n) = 1._WP/(area(1,n)*(rhoice*m_ice(n)+rhosno*m_snow(n))) + else + inv_areamass(n) = 0._WP + endif - rhs_a(n)=0.0_WP ! these are used as temporal storage here - rhs_m(n)=0.0_WP ! for the contribution due to ssh - enddo + if (a_ice(n) < 0.01_WP) then + ! Skip if ice is absent + inv_mass(n) = 0._WP + else + inv_mass(n) = (rhoice*m_ice(n)+rhosno*m_snow(n))/a_ice(n) + inv_mass(n) = 1.0_WP/max(inv_mass(n), 9.0_WP) ! Limit the mass + ! if it is too small + endif + rhs_a(n)=0.0_WP ! these are used as temporal storage here + rhs_m(n)=0.0_WP ! for the contribution due to ssh + enddo -!_______________________________________________________________________________ !!PS -use_pice=0 -if (use_floatice .and. .not. trim(which_ale)=='linfs') use_pice=1 -if ( .not. trim(which_ALE)=='linfs') then - ! for full free surface include pressure from ice mass - ice_strength=0.0_WP - do el = 1,myDim_elem2D - - elnodes = elem2D_nodes(:,el) - !_______________________________________________________________________ - ! if element has any cavity node skip it - if (ulevels(el) > 1) cycle - - !_______________________________________________________________________ - if (any(m_ice(elnodes)<=0._WP) .or. & - any(a_ice(elnodes)<=0._WP)) then - - ! There is no ice in elem - ice_strength(el) = 0._WP - - !_______________________________________________________________________ - else - msum = sum(m_ice(elnodes))/3.0_WP - asum = sum(a_ice(elnodes))/3.0_WP - - !___________________________________________________________________ - ! Hunke and Dukowicz c*h*p* + !___________________________________________________________________________ + use_pice=0 + if (use_floatice .and. .not. trim(which_ale)=='linfs') use_pice=1 + if ( .not. trim(which_ALE)=='linfs') then + ! for full free surface include pressure from ice mass + ice_strength=0.0_WP + do el = 1,myDim_elem2D + + elnodes = elem2D_nodes(:,el) + !___________________________________________________________________ + ! if element has any cavity node skip it + if (ulevels(el) > 1) cycle + + !___________________________________________________________________ + if (any(m_ice(elnodes)<=0._WP) .or. & + any(a_ice(elnodes)<=0._WP)) then + + ! There is no ice in elem + ice_strength(el) = 0._WP + + !___________________________________________________________________ + else + msum = sum(m_ice(elnodes))/3.0_WP + asum = sum(a_ice(elnodes))/3.0_WP + + !_______________________________________________________________ + ! Hunke and Dukowicz c*h*p* #if defined (__icepack) - ice_strength(el) = pstar*msum*exp(-c_pressure*(1.0_WP-asum)) + ice_strength(el) = pstar*msum*exp(-c_pressure*(1.0_WP-asum)) #else - ice_strength(el) = pstar*msum*exp(-c_pressure*(1.0_WP-asum)) + ice_strength(el) = pstar*msum*exp(-c_pressure*(1.0_WP-asum)) #endif - ice_strength(el) = 0.5_WP*ice_strength(el) - - !___________________________________________________________________ - ! use rhs_m and rhs_a for storing the contribution from elevation: - aa = 9.81_WP*elem_area(el)/3.0_WP - - !___________________________________________________________________ - ! add and limit pressure from ice weight in case of floating ice - ! like in FESOM 1.4 - p_ice=(rhoice*m_ice(elnodes)+rhosno*m_snow(elnodes))*inv_rhowat - do n=1,3 - p_ice(n)=min(p_ice(n),max_ice_loading) - end do -!!PS p_ice= 0.0_WP - - !___________________________________________________________________ - elevation_elem = elevation(elnodes) - elevation_dx = sum(gradient_sca(1:3,el)*(elevation_elem+p_ice*use_pice)) - elevation_dy = sum(gradient_sca(4:6,el)*(elevation_elem+p_ice*use_pice)) - - !___________________________________________________________________ - rhs_a(elnodes) = rhs_a(elnodes)-aa*elevation_dx - rhs_m(elnodes) = rhs_m(elnodes)-aa*elevation_dy - end if - enddo -else - ! for linear free surface - ice_strength=0.0_WP - do el = 1,myDim_elem2D - elnodes = elem2D_nodes(:,el) - !_______________________________________________________________________ - ! if element has any cavity node skip it - if (ulevels(el) > 1) cycle - - !_______________________________________________________________________ - if (any(m_ice(elnodes) <= 0._WP) .or. & - any(a_ice(elnodes) <=0._WP)) then - - ! There is no ice in elem - ice_strength(el) = 0._WP - else - msum = sum(m_ice(elnodes))/3.0_WP - asum = sum(a_ice(elnodes))/3.0_WP - - ! ===== Hunke and Dukowicz c*h*p* + ice_strength(el) = 0.5_WP*ice_strength(el) + + !_______________________________________________________________ + ! use rhs_m and rhs_a for storing the contribution from elevation: + aa = 9.81_WP*elem_area(el)/3.0_WP + + !_______________________________________________________________ + ! add and limit pressure from ice weight in case of floating ice + ! like in FESOM 1.4 + p_ice=(rhoice*m_ice(elnodes)+rhosno*m_snow(elnodes))*inv_rhowat + do n=1,3 + p_ice(n)=min(p_ice(n),max_ice_loading) + end do + !!PS p_ice= 0.0_WP + + !_______________________________________________________________ + elevation_elem = elevation(elnodes) + elevation_dx = sum(gradient_sca(1:3,el)*(elevation_elem+p_ice*use_pice)) + elevation_dy = sum(gradient_sca(4:6,el)*(elevation_elem+p_ice*use_pice)) + + !_______________________________________________________________ + rhs_a(elnodes) = rhs_a(elnodes)-aa*elevation_dx + rhs_m(elnodes) = rhs_m(elnodes)-aa*elevation_dy + end if + enddo + else + ! for linear free surface + ice_strength=0.0_WP + do el = 1,myDim_elem2D + elnodes = elem2D_nodes(:,el) + !___________________________________________________________________ + ! if element has any cavity node skip it + if (ulevels(el) > 1) cycle + + !___________________________________________________________________ + if (any(m_ice(elnodes) <= 0._WP) .or. & + any(a_ice(elnodes) <=0._WP)) then + + ! There is no ice in elem + ice_strength(el) = 0._WP + else + msum = sum(m_ice(elnodes))/3.0_WP + asum = sum(a_ice(elnodes))/3.0_WP + + ! ===== Hunke and Dukowicz c*h*p* #if defined (__icepack) - ice_strength(el) = pstar*msum*exp(-c_pressure*(1.0_WP-asum)) + ice_strength(el) = pstar*msum*exp(-c_pressure*(1.0_WP-asum)) #else - ice_strength(el) = pstar*msum*exp(-c_pressure*(1.0_WP-asum)) + ice_strength(el) = pstar*msum*exp(-c_pressure*(1.0_WP-asum)) #endif - ice_strength(el) = 0.5_WP*ice_strength(el) - - ! use rhs_m and rhs_a for storing the contribution from elevation: - aa = 9.81_WP*elem_area(el)/3.0_WP - - elevation_dx = sum(gradient_sca(1:3,el)*elevation(elnodes)) - elevation_dy = sum(gradient_sca(4:6,el)*elevation(elnodes)) - - rhs_a(elnodes) = rhs_a(elnodes)-aa*elevation_dx - rhs_m(elnodes) = rhs_m(elnodes)-aa*elevation_dy - end if - enddo -endif ! --> if ( .not. trim(which_ALE)=='linfs') then - -do n=1,myDim_nod2D - if (ulevels_nod2d(n)>1) cycle + ice_strength(el) = 0.5_WP*ice_strength(el) + + ! use rhs_m and rhs_a for storing the contribution from elevation: + aa = 9.81_WP*elem_area(el)/3.0_WP + + elevation_dx = sum(gradient_sca(1:3,el)*elevation(elnodes)) + elevation_dy = sum(gradient_sca(4:6,el)*elevation(elnodes)) + + rhs_a(elnodes) = rhs_a(elnodes)-aa*elevation_dx + rhs_m(elnodes) = rhs_m(elnodes)-aa*elevation_dy + end if + enddo + endif ! --> if ( .not. trim(which_ALE)=='linfs') then + !___________________________________________________________________________ - rhs_a(n) = rhs_a(n)/area(1,n) - rhs_m(n) = rhs_m(n)/area(1,n) - enddo -! End of Precomputing - -!============================================================== -! And the ice stepping starts + do n=1,myDim_nod2D + if (ulevels_nod2d(n)>1) cycle + rhs_a(n) = rhs_a(n)/area(1,n) + rhs_m(n) = rhs_m(n)/area(1,n) + enddo + ! + !___________________________________________________________________________ + ! End of Precomputing --> And the ice stepping starts #if defined (__icepack) - rdg_conv_elem(:) = 0.0_WP - rdg_shear_elem(:) = 0.0_WP + rdg_conv_elem(:) = 0.0_WP + rdg_shear_elem(:) = 0.0_WP #endif - -do shortstep=1, evp_rheol_steps - - call stress_tensor(ice_strength, ice, partit, mesh) - call stress2rhs(inv_areamass, ice_strength, ice, partit, mesh) - - U_ice_old = U_ice !PS - V_ice_old = V_ice !PS - do n=1,myDim_nod2D - - !_________________________________________________________________________ - ! if cavity node skip it - if ( ulevels_nod2d(n)>1 ) cycle - - !_________________________________________________________________________ - if (a_ice(n) >= 0.01_WP) then ! Skip if ice is absent - - - umod = sqrt((U_ice(n)-U_w(n))**2+(V_ice(n)-V_w(n))**2) - - drag = Cd_oce_ice*umod*density_0*inv_mass(n) - - rhsu = U_ice(n) +rdt*(drag*(ax*U_w(n) - ay*V_w(n))+ & - inv_mass(n)*stress_atmice_x(n) + U_rhs_ice(n)) - - rhsv = V_ice(n) +rdt*(drag*(ax*V_w(n) + ay*U_w(n))+ & - inv_mass(n)*stress_atmice_y(n) + V_rhs_ice(n)) - - r_a = 1._WP + ax*drag*rdt - r_b = rdt*(coriolis_node(n) + ay*drag) - - det = 1.0_WP/(r_a*r_a + r_b*r_b) - - U_ice(n) = det*(r_a*rhsu +r_b*rhsv) - V_ice(n) = det*(r_a*rhsv -r_b*rhsu) - else ! Set velocities to 0 if ice is absent - U_ice(n) = 0.0_WP - V_ice(n) = 0.0_WP - end if - - end do - - !___________________________________________________________________________ - ! apply sea ice velocity boundary condition - DO ed=1,myDim_edge2D + do shortstep=1, evp_rheol_steps !_______________________________________________________________________ - ! apply coastal sea ice velocity boundary conditions - if(myList_edge2D(ed) > edge2D_in) then - U_ice(edges(1:2,ed))=0.0_WP - V_ice(edges(1:2,ed))=0.0_WP - endif - + call stress_tensor(ice_strength, ice, partit, mesh) + call stress2rhs(inv_areamass, ice_strength, ice, partit, mesh) + !_______________________________________________________________________ + U_ice_old = U_ice !PS + V_ice_old = V_ice !PS + do n=1,myDim_nod2D + !___________________________________________________________________ + ! if cavity node skip it + if ( ulevels_nod2d(n)>1 ) cycle + + !___________________________________________________________________ + if (a_ice(n) >= 0.01_WP) then ! Skip if ice is absent + umod = sqrt((U_ice(n)-U_w(n))**2+(V_ice(n)-V_w(n))**2) + drag = Cd_oce_ice*umod*density_0*inv_mass(n) + + rhsu = U_ice(n) +rdt*(drag*(ax*U_w(n) - ay*V_w(n))+ & + inv_mass(n)*stress_atmice_x(n) + U_rhs_ice(n)) + rhsv = V_ice(n) +rdt*(drag*(ax*V_w(n) + ay*U_w(n))+ & + inv_mass(n)*stress_atmice_y(n) + V_rhs_ice(n)) + + r_a = 1._WP + ax*drag*rdt + r_b = rdt*(coriolis_node(n) + ay*drag) + det = 1.0_WP/(r_a*r_a + r_b*r_b) + U_ice(n) = det*(r_a*rhsu +r_b*rhsv) + V_ice(n) = det*(r_a*rhsv -r_b*rhsu) + else ! Set velocities to 0 if ice is absent + U_ice(n) = 0.0_WP + V_ice(n) = 0.0_WP + end if + end do + !_______________________________________________________________________ - ! apply sea ice velocity boundary conditions at cavity-ocean edge - if (use_cavity) then - if ( (ulevels(edge_tri(1,ed))>1) .or. & - ( edge_tri(2,ed)>0 .and. ulevels(edge_tri(2,ed))>1) ) then + ! apply sea ice velocity boundary condition + DO ed=1,myDim_edge2D + !___________________________________________________________________ + ! apply coastal sea ice velocity boundary conditions + if(myList_edge2D(ed) > edge2D_in) then U_ice(edges(1:2,ed))=0.0_WP V_ice(edges(1:2,ed))=0.0_WP + endif + + !___________________________________________________________________ + ! apply sea ice velocity boundary conditions at cavity-ocean edge + if (use_cavity) then + if ( (ulevels(edge_tri(1,ed))>1) .or. & + ( edge_tri(2,ed)>0 .and. ulevels(edge_tri(2,ed))>1) ) then + U_ice(edges(1:2,ed))=0.0_WP + V_ice(edges(1:2,ed))=0.0_WP + end if end if - end if - - end do - - !___________________________________________________________________________ - call exchange_nod(U_ice,V_ice,partit) -END DO - - + end do + !_______________________________________________________________________ + call exchange_nod(U_ice,V_ice,partit) + END DO !--> do shortstep=1, evp_rheol_steps end subroutine EVPdynamics diff --git a/src/ice_maEVP.F90 b/src/ice_maEVP.F90 index 3c3f40335..37654c055 100644 --- a/src/ice_maEVP.F90 +++ b/src/ice_maEVP.F90 @@ -1,6 +1,6 @@ module ice_maEVP_interfaces - interface - subroutine ssh2rhs(ice, partit, mesh) + interface + subroutine ssh2rhs(ice, partit, mesh) USE MOD_ICE USE MOD_PARTIT USE MOD_PARSUP @@ -8,9 +8,9 @@ subroutine ssh2rhs(ice, partit, mesh) type(t_ice) , intent(inout), target :: ice type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh - end subroutine - - subroutine stress_tensor_a(ice, partit, mesh) + end subroutine + + subroutine stress_tensor_a(ice, partit, mesh) USE MOD_ICE USE MOD_PARTIT USE MOD_PARSUP @@ -18,9 +18,9 @@ subroutine stress_tensor_a(ice, partit, mesh) type(t_ice) , intent(inout), target :: ice type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh - end subroutine - - subroutine stress2rhs_m(ice, partit, mesh) + end subroutine + + subroutine stress2rhs_m(ice, partit, mesh) USE MOD_ICE USE MOD_PARTIT USE MOD_PARSUP @@ -28,9 +28,9 @@ subroutine stress2rhs_m(ice, partit, mesh) type(t_ice) , intent(inout), target :: ice type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh - end subroutine - - subroutine find_alpha_field_a(ice, partit, mesh) + end subroutine + + subroutine find_alpha_field_a(ice, partit, mesh) USE MOD_ICE USE MOD_PARTIT USE MOD_PARSUP @@ -38,47 +38,46 @@ subroutine find_alpha_field_a(ice, partit, mesh) type(t_ice) , intent(inout), target :: ice type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh - end subroutine - - subroutine find_beta_field_a(partit, mesh) - use mod_mesh + end subroutine + + subroutine find_beta_field_a(partit, mesh) USE MOD_PARTIT USE MOD_PARSUP + USE MOD_MESH type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - end subroutine - - subroutine EVPdynamics_a(ice, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_ice - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_ice), intent(inout), target :: ice - end subroutine - - subroutine EVPdynamics_m(ice, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_ice - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_ice), intent(inout), target :: ice - end subroutine - end interface + end subroutine + + subroutine EVPdynamics_a(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_ice), intent(inout), target :: ice + end subroutine + + subroutine EVPdynamics_m(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_ice), intent(inout), target :: ice + end subroutine + end interface end module - -! ==================================================================== +! +! +!_______________________________________________________________________________ ! New evp implementation following Bouillion et al. 2013 ! and Kimmritz et al. 2015 (mEVP) and Kimmritz et al. 2016 (aEVP) -! ==================================================================== +! Internal stress tensor +! New implementation following Boullion et al, Ocean Modelling 2013. +! SD, 30.07.2014 subroutine stress_tensor_m(ice, partit, mesh) - ! Internal stress tensor - ! New implementation following Boullion et al, Ocean Modelling 2013. - ! SD, 30.07.2014 - !=================================================================== USE MOD_ICE USE MOD_PARTIT USE MOD_PARSUP @@ -104,63 +103,69 @@ subroutine stress_tensor_m(ice, partit, mesh) !___________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: a_ice, m_ice + real(kind=WP), dimension(:), pointer :: eps11, eps12, eps22 + real(kind=WP), dimension(:), pointer :: sigma11, sigma12, sigma22 #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" a_ice => ice%data(1)%values(:) m_ice => ice%data(2)%values(:) + eps11 => ice%work%eps11(:) + eps12 => ice%work%eps12(:) + eps22 => ice%work%eps22(:) + sigma11 => ice%work%sigma11(:) + sigma12 => ice%work%sigma12(:) + sigma22 => ice%work%sigma22(:) !___________________________________________________________________________ - - val3=1.0_WP/3.0_WP - vale=1.0_WP/(ellipse**2) - det2=1.0_WP/(1.0_WP+alpha_evp) - det1=alpha_evp*det2 - do elem=1,myDim_elem2D - elnodes=elem2D_nodes(:,elem) - !_______________________________________________________________________ - ! if element has any cavity node skip it - if (ulevels(elem) > 1) cycle - - msum=sum(m_ice(elnodes))*val3 - if(msum<=0.01_WP) cycle !DS - asum=sum(a_ice(elnodes))*val3 - - dx=gradient_sca(1:3,elem) - dy=gradient_sca(4:6,elem) - ! METRICS: - vsum=sum(v_ice_aux(elnodes)) - usum=sum(u_ice_aux(elnodes)) - meancos=metric_factor(elem) - ! - ! ====== Deformation rate tensor on element elem: - eps11(elem)=sum(dx*u_ice_aux(elnodes)) - eps11(elem)=eps11(elem)-val3*vsum*meancos !metrics - eps22(elem)=sum(dy*v_ice_aux(elnodes)) - eps12(elem)=0.5_WP*sum(dy*u_ice_aux(elnodes) + dx*v_ice_aux(elnodes)) - eps12(elem)=eps12(elem)+0.5_WP*val3*usum*meancos !metrics - - ! ======= Switch to eps1,eps2 - eps1=eps11(elem)+eps22(elem) - eps2=eps11(elem)-eps22(elem) - - ! ====== moduli: - delta=eps1**2+vale*(eps2**2+4.0_WP*eps12(elem)**2) - delta=sqrt(delta) - + val3=1.0_WP/3.0_WP + vale=1.0_WP/(ellipse**2) + det2=1.0_WP/(1.0_WP+alpha_evp) + det1=alpha_evp*det2 + do elem=1,myDim_elem2D + elnodes=elem2D_nodes(:,elem) + !_______________________________________________________________________ + ! if element has any cavity node skip it + if (ulevels(elem) > 1) cycle + + msum=sum(m_ice(elnodes))*val3 + if(msum<=0.01_WP) cycle !DS + asum=sum(a_ice(elnodes))*val3 + + dx=gradient_sca(1:3,elem) + dy=gradient_sca(4:6,elem) + ! METRICS: + vsum=sum(v_ice_aux(elnodes)) + usum=sum(u_ice_aux(elnodes)) + meancos=metric_factor(elem) + ! + ! ====== Deformation rate tensor on element elem: + eps11(elem)=sum(dx*u_ice_aux(elnodes)) + eps11(elem)=eps11(elem)-val3*vsum*meancos !metrics + eps22(elem)=sum(dy*v_ice_aux(elnodes)) + eps12(elem)=0.5_WP*sum(dy*u_ice_aux(elnodes) + dx*v_ice_aux(elnodes)) + eps12(elem)=eps12(elem)+0.5_WP*val3*usum*meancos !metrics + + ! ======= Switch to eps1,eps2 + eps1=eps11(elem)+eps22(elem) + eps2=eps11(elem)-eps22(elem) + + ! ====== moduli: + delta=eps1**2+vale*(eps2**2+4.0_WP*eps12(elem)**2) + delta=sqrt(delta) + #if defined (__icepack) - pressure = sum(strength(elnodes))*val3/max(delta,delta_min) + pressure = sum(strength(elnodes))*val3/max(delta,delta_min) #else - pressure=pstar*msum*exp(-c_pressure*(1.0_WP-asum))/max(delta,delta_min) + pressure=pstar*msum*exp(-c_pressure*(1.0_WP-asum))/max(delta,delta_min) #endif - r1=pressure*(eps1-max(delta,delta_min)) r2=pressure*eps2*vale r3=pressure*eps12(elem)*vale si1=sigma11(elem)+sigma22(elem) si2=sigma11(elem)-sigma22(elem) - + si1=det1*si1+det2*r1 si2=det1*si2+det2*r2 sigma12(elem)=det1*sigma12(elem)+det2*r3 @@ -171,20 +176,19 @@ subroutine stress_tensor_m(ice, partit, mesh) rdg_conv_elem(elem) = -min((eps11(elem)+eps22(elem)),0.0_WP) rdg_shear_elem(elem) = 0.5_WP*(delta - abs(eps11(elem)+eps22(elem))) #endif - - end do - ! Equations solved in terms of si1, si2, eps1, eps2 are (43)-(45) of - ! Boullion et al Ocean Modelling 2013, but in an implicit mode: - ! si1_{p+1}=det1*si1_p+det2*r1, where det1=alpha/(1+alpha) and det2=1/(1+alpha), - ! and similarly for si2 and sigma12 + end do + ! Equations solved in terms of si1, si2, eps1, eps2 are (43)-(45) of + ! Boullion et al Ocean Modelling 2013, but in an implicit mode: + ! si1_{p+1}=det1*si1_p+det2*r1, where det1=alpha/(1+alpha) and det2=1/(1+alpha), + ! and similarly for si2 and sigma12 end subroutine stress_tensor_m ! -! ================================================================== -! +! +!_______________________________________________________________________________ +! Compute the contribution from the elevation to the rhs +! S.D. 30.07.2014 subroutine ssh2rhs(ice, partit, mesh) - ! Compute the contribution from the elevation to the rhs - ! S.D. 30.07.2014 USE MOD_ICE USE MOD_PARTIT USE MOD_PARSUP @@ -213,68 +217,66 @@ subroutine ssh2rhs(ice, partit, mesh) m_snow => ice%data(3)%values(:) !___________________________________________________________________________ - val3=1.0_WP/3.0_WP - - ! use rhs_m and rhs_a for storing the contribution from elevation: - do row=1, myDim_nod2d - rhs_a(row)=0.0_WP - rhs_m(row)=0.0_WP - end do + val3=1.0_WP/3.0_WP + + ! use rhs_m and rhs_a for storing the contribution from elevation: + do row=1, myDim_nod2d + rhs_a(row)=0.0_WP + rhs_m(row)=0.0_WP + end do - !_____________________________________________________________________________ - ! use floating sea ice for zlevel and zstar - if (use_floatice .and. .not. trim(which_ale)=='linfs') then - do elem=1,myDim_elem2d - elnodes=elem2D_nodes(:,elem) - !_______________________________________________________________________ - ! if element has any cavity node skip it - if (ulevels(elem) > 1) cycle - - !_______________________________________________________________________ - vol=elem_area(elem) - dx=gradient_sca(1:3,elem) - dy=gradient_sca(4:6,elem) - - !_______________________________________________________________________ - ! add pressure gradient from sea ice --> in case of floating sea ice - p_ice=(rhoice*m_ice(elnodes)+rhosno*m_snow(elnodes))*inv_rhowat - do n=1,3 - p_ice(n)=min(p_ice(n),max_ice_loading) + !_____________________________________________________________________________ + ! use floating sea ice for zlevel and zstar + if (use_floatice .and. .not. trim(which_ale)=='linfs') then + do elem=1,myDim_elem2d + elnodes=elem2D_nodes(:,elem) + !_______________________________________________________________________ + ! if element has any cavity node skip it + if (ulevels(elem) > 1) cycle + + !_______________________________________________________________________ + vol=elem_area(elem) + dx=gradient_sca(1:3,elem) + dy=gradient_sca(4:6,elem) + + !_______________________________________________________________________ + ! add pressure gradient from sea ice --> in case of floating sea ice + p_ice=(rhoice*m_ice(elnodes)+rhosno*m_snow(elnodes))*inv_rhowat + do n=1,3 + p_ice(n)=min(p_ice(n),max_ice_loading) + end do + + !_______________________________________________________________________ + bb=g*val3*vol + aa=bb*sum(dx*(elevation(elnodes)+p_ice)) + bb=bb*sum(dy*(elevation(elnodes)+p_ice)) + rhs_a(elnodes)=rhs_a(elnodes)-aa + rhs_m(elnodes)=rhs_m(elnodes)-bb end do - - !_______________________________________________________________________ - bb=g*val3*vol - aa=bb*sum(dx*(elevation(elnodes)+p_ice)) - bb=bb*sum(dy*(elevation(elnodes)+p_ice)) - rhs_a(elnodes)=rhs_a(elnodes)-aa - rhs_m(elnodes)=rhs_m(elnodes)-bb - end do - else - do elem=1,myDim_elem2d - elnodes=elem2D_nodes(:,elem) - !_______________________________________________________________________ - ! if element has any cavity node skip it - if (ulevels(elem) > 1) cycle - - vol=elem_area(elem) - dx=gradient_sca(1:3,elem) - dy=gradient_sca(4:6,elem) - bb=g*val3*vol - aa=bb*sum(dx*elevation(elnodes)) - bb=bb*sum(dy*elevation(elnodes)) - rhs_a(elnodes)=rhs_a(elnodes)-aa - rhs_m(elnodes)=rhs_m(elnodes)-bb - end do - end if + else + do elem=1,myDim_elem2d + elnodes=elem2D_nodes(:,elem) + !_______________________________________________________________________ + ! if element has any cavity node skip it + if (ulevels(elem) > 1) cycle + + vol=elem_area(elem) + dx=gradient_sca(1:3,elem) + dy=gradient_sca(4:6,elem) + bb=g*val3*vol + aa=bb*sum(dx*elevation(elnodes)) + bb=bb*sum(dy*elevation(elnodes)) + rhs_a(elnodes)=rhs_a(elnodes)-aa + rhs_m(elnodes)=rhs_m(elnodes)-bb + end do + end if end subroutine ssh2rhs ! -!=================================================================== ! +!_______________________________________________________________________________ +! add internal stress to the rhs +! SD, 30.07.2014 subroutine stress2rhs_m(ice, partit, mesh) - - ! add internal stress to the rhs - ! SD, 30.07.2014 - !----------------------------------------------------------------- USE MOD_ICE USE MOD_PARTIT USE MOD_PARSUP @@ -296,6 +298,7 @@ subroutine stress2rhs_m(ice, partit, mesh) !___________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow + real(kind=WP), dimension(:), pointer :: sigma11, sigma12, sigma22 #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -303,58 +306,60 @@ subroutine stress2rhs_m(ice, partit, mesh) a_ice => ice%data(1)%values(:) m_ice => ice%data(2)%values(:) m_snow => ice%data(3)%values(:) + sigma11 => ice%work%sigma11(:) + sigma12 => ice%work%sigma12(:) + sigma22 => ice%work%sigma22(:) !___________________________________________________________________________ - val3=1.0_WP/3.0_WP - - do row=1, myDim_nod2d - u_rhs_ice(row)=0.0_WP - v_rhs_ice(row)=0.0_WP - end do + val3=1.0_WP/3.0_WP + + do row=1, myDim_nod2d + u_rhs_ice(row)=0.0_WP + v_rhs_ice(row)=0.0_WP + end do - do elem=1,myDim_elem2d - elnodes=elem2D_nodes(:,elem) - !_______________________________________________________________________ - ! if element has any cavity node skip it - if (ulevels(elem) > 1) cycle + do elem=1,myDim_elem2d + elnodes=elem2D_nodes(:,elem) + !_______________________________________________________________________ + ! if element has any cavity node skip it + if (ulevels(elem) > 1) cycle - if(sum(a_ice(elnodes)) < 0.01_WP) cycle !DS - - vol=elem_area(elem) - dx=gradient_sca(1:3,elem) - dy=gradient_sca(4:6,elem) - mf=metric_factor(elem) !metrics + if(sum(a_ice(elnodes)) < 0.01_WP) cycle !DS + + vol=elem_area(elem) + dx=gradient_sca(1:3,elem) + dy=gradient_sca(4:6,elem) + mf=metric_factor(elem) !metrics - do k=1,3 - row=elnodes(k) - u_rhs_ice(row)=u_rhs_ice(row) - vol* & - (sigma11(elem)*dx(k)+sigma12(elem)*dy(k)) & - -vol*sigma12(elem)*val3*mf !metrics - v_rhs_ice(row)=v_rhs_ice(row) - vol* & - (sigma12(elem)*dx(k)+sigma22(elem)*dy(k)) & - +vol*sigma11(elem)*val3*mf ! metrics - end do - end do + do k=1,3 + row=elnodes(k) + u_rhs_ice(row)=u_rhs_ice(row) - vol* & + (sigma11(elem)*dx(k)+sigma12(elem)*dy(k)) & + -vol*sigma12(elem)*val3*mf !metrics + v_rhs_ice(row)=v_rhs_ice(row) - vol* & + (sigma12(elem)*dx(k)+sigma22(elem)*dy(k)) & + +vol*sigma11(elem)*val3*mf ! metrics + end do + end do - do row=1, myDim_nod2d - !_________________________________________________________________________ - ! if cavity node skip it - if ( ulevels_nod2d(row)>1 ) cycle - - mass=(m_ice(row)*rhoice+m_snow(row)*rhosno) - mass=mass/(1.0_WP+mass*mass) - u_rhs_ice(row)=(u_rhs_ice(row)*mass + rhs_a(row))/area(1,row) - v_rhs_ice(row)=(v_rhs_ice(row)*mass + rhs_m(row))/area(1,row) - end do + do row=1, myDim_nod2d + !_______________________________________________________________________ + ! if cavity node skip it + if ( ulevels_nod2d(row)>1 ) cycle + + mass=(m_ice(row)*rhoice+m_snow(row)*rhosno) + mass=mass/(1.0_WP+mass*mass) + u_rhs_ice(row)=(u_rhs_ice(row)*mass + rhs_a(row))/area(1,row) + v_rhs_ice(row)=(v_rhs_ice(row)*mass + rhs_m(row))/area(1,row) + end do end subroutine stress2rhs_m ! -!=================================================================== ! +!_______________________________________________________________________________ +! assemble rhs and solve for ice velocity +! New implementation based on Bouillion et al. Ocean Modelling 2013 +! SD 30.07.14 subroutine EVPdynamics_m(ice, partit, mesh) - ! assemble rhs and solve for ice velocity - ! New implementation based on Bouillion et al. Ocean Modelling 2013 - ! SD 30.07.14 - !--------------------------------------------------------- USE MOD_ICE USE MOD_PARTIT USE MOD_PARSUP @@ -394,317 +399,311 @@ subroutine EVPdynamics_m(ice, partit, mesh) ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: u_ice, v_ice real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow + real(kind=WP), dimension(:), pointer :: eps11, eps12, eps22 + real(kind=WP), dimension(:), pointer :: sigma11, sigma12, sigma22 #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uvice(1,:) - v_ice => ice%uvice(2,:) + u_ice => ice%uvice(1,:) + v_ice => ice%uvice(2,:) a_ice => ice%data(1)%values(:) m_ice => ice%data(2)%values(:) m_snow => ice%data(3)%values(:) + eps11 => ice%work%eps11(:) + eps12 => ice%work%eps12(:) + eps22 => ice%work%eps22(:) + sigma11 => ice%work%sigma11(:) + sigma12 => ice%work%sigma12(:) + sigma22 => ice%work%sigma22(:) !___________________________________________________________________________ - val3=1.0_WP/3.0_WP - vale=1.0_WP/(ellipse**2) - det2=1.0_WP/(1.0_WP+alpha_evp) - det1=alpha_evp*det2 - rdt=ice_dt - steps=evp_rheol_steps - - u_ice_aux=u_ice ! Initialize solver variables - v_ice_aux=v_ice + val3=1.0_WP/3.0_WP + vale=1.0_WP/(ellipse**2) + det2=1.0_WP/(1.0_WP+alpha_evp) + det1=alpha_evp*det2 + rdt=ice_dt + steps=evp_rheol_steps + + u_ice_aux=u_ice ! Initialize solver variables + v_ice_aux=v_ice #if defined (__icepack) - a_ice_old(:) = a_ice(:) - m_ice_old(:) = a_ice(:) - m_snow_old(:) = m_snow(:) + a_ice_old(:) = a_ice(:) + m_ice_old(:) = a_ice(:) + m_snow_old(:) = m_snow(:) - call icepack_to_fesom (nx_in=(myDim_nod2D+eDim_nod2D), & - aice_out=a_ice, & - vice_out=m_ice, & - vsno_out=m_snow) + call icepack_to_fesom (nx_in=(myDim_nod2D+eDim_nod2D), & + aice_out=a_ice, & + vice_out=m_ice, & + vsno_out=m_snow) #endif -!NR inlined, to have all initialization in one place. -! call ssh2rhs + !NR inlined, to have all initialization in one place. + ! call ssh2rhs - ! use rhs_m and rhs_a for storing the contribution from elevation: - do row=1, myDim_nod2d - rhs_a(row)=0.0_WP - rhs_m(row)=0.0_WP - end do + ! use rhs_m and rhs_a for storing the contribution from elevation: + do row=1, myDim_nod2d + rhs_a(row)=0.0_WP + rhs_m(row)=0.0_WP + end do - !_____________________________________________________________________________ - ! use floating sea ice for zlevel and zstar - if (use_floatice .and. .not. trim(which_ale)=='linfs') then - do el=1,myDim_elem2d - elnodes=elem2D_nodes(:,el) - - !_______________________________________________________________________ - ! if element has any cavity node skip it - if (ulevels(el) > 1) cycle - - !_______________________________________________________________________ - vol=elem_area(el) - dx=gradient_sca(1:3,el) - dy=gradient_sca(4:6,el) - - !_______________________________________________________________________ - ! add pressure gradient from sea ice --> in case of floating sea ice - p_ice=(rhoice*m_ice(elnodes)+rhosno*m_snow(elnodes))*inv_rhowat - do n=1,3 - p_ice(n)=min(p_ice(n),max_ice_loading) + !_____________________________________________________________________________ + ! use floating sea ice for zlevel and zstar + if (use_floatice .and. .not. trim(which_ale)=='linfs') then + do el=1,myDim_elem2d + elnodes=elem2D_nodes(:,el) + + !_______________________________________________________________________ + ! if element has any cavity node skip it + if (ulevels(el) > 1) cycle + + !_______________________________________________________________________ + vol=elem_area(el) + dx=gradient_sca(1:3,el) + dy=gradient_sca(4:6,el) + + !_______________________________________________________________________ + ! add pressure gradient from sea ice --> in case of floating sea ice + p_ice=(rhoice*m_ice(elnodes)+rhosno*m_snow(elnodes))*inv_rhowat + do n=1,3 + p_ice(n)=min(p_ice(n),max_ice_loading) + end do + + !_______________________________________________________________________ + bb=g*val3*vol + aa=bb*sum(dx*(elevation(elnodes)+p_ice)) + bb=bb*sum(dy*(elevation(elnodes)+p_ice)) + rhs_a(elnodes)=rhs_a(elnodes)-aa + rhs_m(elnodes)=rhs_m(elnodes)-bb end do - + !_____________________________________________________________________________ + ! use levitating sea ice for linfs, zlevel and zstar + else + do el=1,myDim_elem2d + elnodes=elem2D_nodes(:,el) + !_______________________________________________________________________ + ! if element has any cavity node skip it + if (ulevels(el) > 1) cycle + + vol=elem_area(el) + dx=gradient_sca(1:3,el) + dy=gradient_sca(4:6,el) + bb=g*val3*vol + aa=bb*sum(dx*elevation(elnodes)) + bb=bb*sum(dy*elevation(elnodes)) + rhs_a(elnodes)=rhs_a(elnodes)-aa + rhs_m(elnodes)=rhs_m(elnodes)-bb + end do + end if + + !___________________________________________________________________________ + ! precompute thickness (the inverse is needed) and mass (scaled by area) + do i=1,myDim_nod2D + inv_thickness(i) = 0._WP + mass(i) = 0._WP + ice_nod(i) = .false. !_______________________________________________________________________ - bb=g*val3*vol - aa=bb*sum(dx*(elevation(elnodes)+p_ice)) - bb=bb*sum(dy*(elevation(elnodes)+p_ice)) - rhs_a(elnodes)=rhs_a(elnodes)-aa - rhs_m(elnodes)=rhs_m(elnodes)-bb - end do - !_____________________________________________________________________________ - ! use levitating sea ice for linfs, zlevel and zstar - else - do el=1,myDim_elem2d + ! if cavity ndoe skip it + if ( ulevels_nod2d(i)>1 ) cycle + + if (a_ice(i) >= 0.01_WP) then + inv_thickness(i) = (rhoice*m_ice(i)+rhosno*m_snow(i))/a_ice(i) + inv_thickness(i) = 1.0_WP/max(inv_thickness(i), 9.0_WP) ! Limit the mass + + mass(i) = (m_ice(i)*rhoice+m_snow(i)*rhosno) + mass(i) = mass(i)/((1.0_WP+mass(i)*mass(i))*area(1,i)) + + ! scale rhs_a, rhs_m, too. + rhs_a(i) = rhs_a(i)/area(1,i) + rhs_m(i) = rhs_m(i)/area(1,i) + + ice_nod(i) = .true. + endif + enddo + + !___________________________________________________________________________ + ! precompute pressure factor + do el=1,myDim_elem2D elnodes=elem2D_nodes(:,el) + pressure_fac(el) = 0._WP + ice_el(el) = .false. + !_______________________________________________________________________ ! if element has any cavity node skip it - if (ulevels(el) > 1) cycle + if (ulevels(el) > 1) cycle - vol=elem_area(el) - dx=gradient_sca(1:3,el) - dy=gradient_sca(4:6,el) - bb=g*val3*vol - aa=bb*sum(dx*elevation(elnodes)) - bb=bb*sum(dy*elevation(elnodes)) - rhs_a(elnodes)=rhs_a(elnodes)-aa - rhs_m(elnodes)=rhs_m(elnodes)-bb + msum=sum(m_ice(elnodes))*val3 + if(msum > 0.01) then + ice_el(el) = .true. + asum=sum(a_ice(elnodes))*val3 + pressure_fac(el) = det2*pstar*msum*exp(-c_pressure*(1.0_WP-asum)) + endif end do - end if - -! precompute thickness (the inverse is needed) and mass (scaled by area) - do i=1,myDim_nod2D - inv_thickness(i) = 0._WP - mass(i) = 0._WP - ice_nod(i) = .false. - !_________________________________________________________________________ - ! if cavity ndoe skip it - if ( ulevels_nod2d(i)>1 ) cycle - - if (a_ice(i) >= 0.01_WP) then - inv_thickness(i) = (rhoice*m_ice(i)+rhosno*m_snow(i))/a_ice(i) - inv_thickness(i) = 1.0_WP/max(inv_thickness(i), 9.0_WP) ! Limit the mass - - mass(i) = (m_ice(i)*rhoice+m_snow(i)*rhosno) - mass(i) = mass(i)/((1.0_WP+mass(i)*mass(i))*area(1,i)) - ! scale rhs_a, rhs_m, too. - rhs_a(i) = rhs_a(i)/area(1,i) - rhs_m(i) = rhs_m(i)/area(1,i) - - ice_nod(i) = .true. - endif - enddo - -! precompute pressure factor - do el=1,myDim_elem2D - elnodes=elem2D_nodes(:,el) - - pressure_fac(el) = 0._WP - ice_el(el) = .false. - - !_______________________________________________________________________ - ! if element has any cavity node skip it - if (ulevels(el) > 1) cycle - - msum=sum(m_ice(elnodes))*val3 - if(msum > 0.01) then - ice_el(el) = .true. - asum=sum(a_ice(elnodes))*val3 - pressure_fac(el) = det2*pstar*msum*exp(-c_pressure*(1.0_WP-asum)) - endif - end do - - do row=1, myDim_nod2d - u_rhs_ice(row)=0.0_WP - v_rhs_ice(row)=0.0_WP - end do - -!======================================= -! Ice EVPdynamics Iteration main loop: -!======================================= + do row=1, myDim_nod2d + u_rhs_ice(row)=0.0_WP + v_rhs_ice(row)=0.0_WP + end do + !___________________________________________________________________________ + ! Ice EVPdynamics Iteration main loop: #if defined (__icepack) - rdg_conv_elem(:) = 0.0_WP - rdg_shear_elem(:) = 0.0_WP + rdg_conv_elem(:) = 0.0_WP + rdg_shear_elem(:) = 0.0_WP #endif - - do shortstep=1, steps - -!NR inlining, to make it easier to have local arrays and fuse loops -!NR call stress_tensor_m - ! Internal stress tensor - ! New implementation following Boullion et al, Ocean Modelling 2013. - ! SD, 30.07.2014 - !=================================================================== - - do el=1,myDim_elem2D - !__________________________________________________________________________ - if (ulevels(el)>1) cycle - - !__________________________________________________________________________ - if(ice_el(el)) then - - elnodes=elem2D_nodes(:,el) - dx=gradient_sca(1:3,el) - dy=gradient_sca(4:6,el) - ! METRICS: - meancos = val3*metric_factor(el) - ! - ! ====== Deformation rate tensor on element elem: - eps11(el) = sum(dx(:)*u_ice_aux(elnodes)) - sum(v_ice_aux(elnodes))*meancos !metrics - eps22(el) = sum(dy(:)*v_ice_aux(elnodes)) - eps12(el) = 0.5_WP*(sum(dy(:)*u_ice_aux(elnodes) + dx(:)*v_ice_aux(elnodes)) & - +sum(u_ice_aux(elnodes))*meancos ) !metrics - - ! ======= Switch to eps1,eps2 - eps1 = eps11(el) + eps22(el) - eps2 = eps11(el) - eps22(el) - - ! ====== moduli: - delta = sqrt(eps1**2+vale*(eps2**2+4.0_WP*eps12(el)**2)) - - pressure = pressure_fac(el)/(delta+delta_min) - -! si1 = det1*(sigma11(el)+sigma22(el)) + pressure*(eps1-delta) -! si2 = det1*(sigma11(el)-sigma22(el)) + pressure*eps2*vale -! sigma11(el) = 0.5_WP*(si1+si2) -! sigma22(el) = 0.5_WP*(si1-si2) -!NR directly insert si1, si2 cancels some operations and should increase accuracy - sigma12(el) = det1*sigma12(el) + pressure*eps12(el)*vale - sigma11(el) = det1*sigma11(el) + 0.5_WP*pressure*(eps1 - delta + eps2*vale) - sigma22(el) = det1*sigma22(el) + 0.5_WP*pressure*(eps1 - delta - eps2*vale) + do shortstep=1, steps + !NR inlining, to make it easier to have local arrays and fuse loops + !NR call stress_tensor_m + ! Internal stress tensor + ! New implementation following Boullion et al, Ocean Modelling 2013. + ! SD, 30.07.2014 + !_______________________________________________________________________ + do el=1,myDim_elem2D + if (ulevels(el)>1) cycle + + !___________________________________________________________________ + if(ice_el(el)) then + + elnodes=elem2D_nodes(:,el) + dx=gradient_sca(1:3,el) + dy=gradient_sca(4:6,el) + ! METRICS: + meancos = val3*metric_factor(el) + ! + ! ====== Deformation rate tensor on element elem: + eps11(el) = sum(dx(:)*u_ice_aux(elnodes)) - sum(v_ice_aux(elnodes))*meancos !metrics + eps22(el) = sum(dy(:)*v_ice_aux(elnodes)) + eps12(el) = 0.5_WP*(sum(dy(:)*u_ice_aux(elnodes) + dx(:)*v_ice_aux(elnodes)) & + +sum(u_ice_aux(elnodes))*meancos ) !metrics + + ! ======= Switch to eps1,eps2 + eps1 = eps11(el) + eps22(el) + eps2 = eps11(el) - eps22(el) + + ! ====== moduli: + delta = sqrt(eps1**2+vale*(eps2**2+4.0_WP*eps12(el)**2)) + + pressure = pressure_fac(el)/(delta+delta_min) + + ! si1 = det1*(sigma11(el)+sigma22(el)) + pressure*(eps1-delta) + ! si2 = det1*(sigma11(el)-sigma22(el)) + pressure*eps2*vale + ! sigma11(el) = 0.5_WP*(si1+si2) + ! sigma22(el) = 0.5_WP*(si1-si2) + !NR directly insert si1, si2 cancels some operations and should increase accuracy + sigma12(el) = det1*sigma12(el) + pressure*eps12(el)*vale + sigma11(el) = det1*sigma11(el) + 0.5_WP*pressure*(eps1 - delta + eps2*vale) + sigma22(el) = det1*sigma22(el) + 0.5_WP*pressure*(eps1 - delta - eps2*vale) #if defined (__icepack) - rdg_conv_elem(el) = -min((eps11(el)+eps22(el)),0.0_WP) - rdg_shear_elem(el) = 0.5_WP*(delta - abs(eps11(el)+eps22(el))) + rdg_conv_elem(el) = -min((eps11(el)+eps22(el)),0.0_WP) + rdg_shear_elem(el) = 0.5_WP*(delta - abs(eps11(el)+eps22(el))) #endif - ! end do ! fuse loops - ! Equations solved in terms of si1, si2, eps1, eps2 are (43)-(45) of - ! Boullion et al Ocean Modelling 2013, but in an implicit mode: - ! si1_{p+1}=det1*si1_p+det2*r1, where det1=alpha/(1+alpha) and det2=1/(1+alpha), - ! and similarly for si2 and sigma12 - - !NR inlining call stress2rhs_m - ! add internal stress to the rhs - ! SD, 30.07.2014 - !----------------------------------------------------------------- - if (elnodes(1) <= myDim_nod2D) then - u_rhs_ice(elnodes(1)) = u_rhs_ice(elnodes(1)) - elem_area(el)* & - (sigma11(el)*dx(1)+sigma12(el)*(dy(1) + meancos)) !metrics - v_rhs_ice(elnodes(1)) = v_rhs_ice(elnodes(1)) - elem_area(el)* & - (sigma12(el)*dx(1)+sigma22(el)*dy(1) - sigma11(el)*meancos) ! metrics - end if - - if (elnodes(2) <= myDim_nod2D) then - u_rhs_ice(elnodes(2)) = u_rhs_ice(elnodes(2)) - elem_area(el)* & - (sigma11(el)*dx(2)+sigma12(el)*(dy(2) + meancos)) !metrics - v_rhs_ice(elnodes(2)) = v_rhs_ice(elnodes(2)) - elem_area(el)* & - (sigma12(el)*dx(2)+sigma22(el)*dy(2) - sigma11(el)*meancos) ! metrics - end if - - if (elnodes(3) <= myDim_nod2D) then - u_rhs_ice(elnodes(3)) = u_rhs_ice(elnodes(3)) - elem_area(el)* & - (sigma11(el)*dx(3)+sigma12(el)*(dy(3) + meancos)) !metrics - v_rhs_ice(elnodes(3)) = v_rhs_ice(elnodes(3)) - elem_area(el)* & - (sigma12(el)*dx(3)+sigma22(el)*dy(3) - sigma11(el)*meancos) ! metrics - end if - end if - end do ! --> do el=1,myDim_elem2D - - do i=1, myDim_nod2d - !__________________________________________________________________________ - if (ulevels_nod2D(i)>1) cycle - - !__________________________________________________________________________ - if (ice_nod(i)) then ! Skip if ice is absent + ! end do ! fuse loops + ! Equations solved in terms of si1, si2, eps1, eps2 are (43)-(45) of + ! Boullion et al Ocean Modelling 2013, but in an implicit mode: + ! si1_{p+1}=det1*si1_p+det2*r1, where det1=alpha/(1+alpha) and det2=1/(1+alpha), + ! and similarly for si2 and sigma12 - u_rhs_ice(i) = u_rhs_ice(i)*mass(i) + rhs_a(i) - v_rhs_ice(i) = v_rhs_ice(i)*mass(i) + rhs_m(i) + !NR inlining call stress2rhs_m + ! add internal stress to the rhs + ! SD, 30.07.2014 + !----------------------------------------------------------------- + if (elnodes(1) <= myDim_nod2D) then + u_rhs_ice(elnodes(1)) = u_rhs_ice(elnodes(1)) - elem_area(el)* & + (sigma11(el)*dx(1)+sigma12(el)*(dy(1) + meancos)) !metrics + v_rhs_ice(elnodes(1)) = v_rhs_ice(elnodes(1)) - elem_area(el)* & + (sigma12(el)*dx(1)+sigma22(el)*dy(1) - sigma11(el)*meancos) ! metrics + end if - ! end do !NR fuse loops - !============= stress2rhs_m ends ====================== + if (elnodes(2) <= myDim_nod2D) then + u_rhs_ice(elnodes(2)) = u_rhs_ice(elnodes(2)) - elem_area(el)* & + (sigma11(el)*dx(2)+sigma12(el)*(dy(2) + meancos)) !metrics + v_rhs_ice(elnodes(2)) = v_rhs_ice(elnodes(2)) - elem_area(el)* & + (sigma12(el)*dx(2)+sigma22(el)*dy(2) - sigma11(el)*meancos) ! metrics + end if - ! do i=1,myDim_nod2D - - umod = sqrt((u_ice_aux(i)-u_w(i))**2+(v_ice_aux(i)-v_w(i))**2) - drag = rdt*Cd_oce_ice*umod*density_0*inv_thickness(i) - - !rhs for water stress, air stress, and u_rhs_ice/v (internal stress + ssh) - rhsu = u_ice(i)+drag*u_w(i)+rdt*(inv_thickness(i)*stress_atmice_x(i)+u_rhs_ice(i)) + beta_evp*u_ice_aux(i) - rhsv = v_ice(i)+drag*v_w(i)+rdt*(inv_thickness(i)*stress_atmice_y(i)+v_rhs_ice(i)) + beta_evp*v_ice_aux(i) - - !solve (Coriolis and water stress are treated implicitly) - det = bc_index_nod2D(i) / ((1.0_WP+beta_evp+drag)**2 + (rdt*coriolis_node(i))**2) - - u_ice_aux(i) = det*((1.0_WP+beta_evp+drag)*rhsu +rdt*coriolis_node(i)*rhsv) - v_ice_aux(i) = det*((1.0_WP+beta_evp+drag)*rhsv -rdt*coriolis_node(i)*rhsu) - end if - end do ! --> do i=1, myDim_nod2d + if (elnodes(3) <= myDim_nod2D) then + u_rhs_ice(elnodes(3)) = u_rhs_ice(elnodes(3)) - elem_area(el)* & + (sigma11(el)*dx(3)+sigma12(el)*(dy(3) + meancos)) !metrics + v_rhs_ice(elnodes(3)) = v_rhs_ice(elnodes(3)) - elem_area(el)* & + (sigma12(el)*dx(3)+sigma22(el)*dy(3) - sigma11(el)*meancos) ! metrics + end if + end if + end do ! --> do el=1,myDim_elem2D + + do i=1, myDim_nod2d + !___________________________________________________________________ + if (ulevels_nod2D(i)>1) cycle + + !___________________________________________________________________ + if (ice_nod(i)) then ! Skip if ice is absent + u_rhs_ice(i) = u_rhs_ice(i)*mass(i) + rhs_a(i) + v_rhs_ice(i) = v_rhs_ice(i)*mass(i) + rhs_m(i) + ! end do !NR fuse loops + !============= stress2rhs_m ends ====================== + ! do i=1,myDim_nod2D + umod = sqrt((u_ice_aux(i)-u_w(i))**2+(v_ice_aux(i)-v_w(i))**2) + drag = rdt*Cd_oce_ice*umod*density_0*inv_thickness(i) + + !rhs for water stress, air stress, and u_rhs_ice/v (internal stress + ssh) + rhsu = u_ice(i)+drag*u_w(i)+rdt*(inv_thickness(i)*stress_atmice_x(i)+u_rhs_ice(i)) + beta_evp*u_ice_aux(i) + rhsv = v_ice(i)+drag*v_w(i)+rdt*(inv_thickness(i)*stress_atmice_y(i)+v_rhs_ice(i)) + beta_evp*v_ice_aux(i) + + !solve (Coriolis and water stress are treated implicitly) + det = bc_index_nod2D(i) / ((1.0_WP+beta_evp+drag)**2 + (rdt*coriolis_node(i))**2) + + u_ice_aux(i) = det*((1.0_WP+beta_evp+drag)*rhsu +rdt*coriolis_node(i)*rhsv) + v_ice_aux(i) = det*((1.0_WP+beta_evp+drag)*rhsv -rdt*coriolis_node(i)*rhsu) + end if + end do ! --> do i=1, myDim_nod2d - !___________________________________________________________________________ - ! apply sea ice velocity boundary condition - do ed=1,myDim_edge2D !_______________________________________________________________________ - ! apply coastal sea ice velocity boundary conditions - if(myList_edge2D(ed) > edge2D_in) then - u_ice_aux(edges(:,ed))=0.0_WP - v_ice_aux(edges(:,ed))=0.0_WP - end if + ! apply sea ice velocity boundary condition + do ed=1,myDim_edge2D + !___________________________________________________________________ + ! apply coastal sea ice velocity boundary conditions + if(myList_edge2D(ed) > edge2D_in) then + u_ice_aux(edges(:,ed))=0.0_WP + v_ice_aux(edges(:,ed))=0.0_WP + end if + + !___________________________________________________________________ + ! apply sea ice velocity boundary conditions at cavity-ocean edge + if (use_cavity) then + if ( (ulevels(edge_tri(1,ed))>1) .or. & + ( edge_tri(2,ed)>0 .and. ulevels(edge_tri(2,ed))>1) ) then + u_ice_aux(edges(1:2,ed))=0.0_WP + v_ice_aux(edges(1:2,ed))=0.0_WP + end if + end if + end do ! --> do ed=1,myDim_edge2D !_______________________________________________________________________ - ! apply sea ice velocity boundary conditions at cavity-ocean edge - if (use_cavity) then - if ( (ulevels(edge_tri(1,ed))>1) .or. & - ( edge_tri(2,ed)>0 .and. ulevels(edge_tri(2,ed))>1) ) then - u_ice_aux(edges(1:2,ed))=0.0_WP - v_ice_aux(edges(1:2,ed))=0.0_WP - end if - end if - end do ! --> do ed=1,myDim_edge2D - - !___________________________________________________________________________ - call exchange_nod_begin(u_ice_aux, v_ice_aux, partit) - - do row=1, myDim_nod2d - u_rhs_ice(row)=0.0_WP - v_rhs_ice(row)=0.0_WP - end do + call exchange_nod_begin(u_ice_aux, v_ice_aux, partit) - call exchange_nod_end(partit) - - end do ! --> do shortstep=1, steps + do row=1, myDim_nod2d + u_rhs_ice(row)=0.0_WP + v_rhs_ice(row)=0.0_WP + end do - u_ice=u_ice_aux - v_ice=v_ice_aux + call exchange_nod_end(partit) + + end do ! --> do shortstep=1, steps + u_ice=u_ice_aux + v_ice=v_ice_aux end subroutine EVPdynamics_m ! ! -! -! ==================================================================== +!_______________________________________________________________________________ ! aEVP implementation: Similar to mEVP, but alpha is variable. ! The subroutines involved are with _a. -! ==================================================================== -! +! EVP stability parameter alpha is computed at each element +! aEVP implementation +! SD, 13.02.2017 subroutine find_alpha_field_a(ice, partit, mesh) - ! EVP stability parameter alpha is computed at each element - ! aEVP implementation - ! SD, 13.02.2017 - ! ================================================================== USE MOD_ICE USE MOD_PARTIT USE MOD_PARSUP @@ -729,68 +728,76 @@ subroutine find_alpha_field_a(ice, partit, mesh) !___________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: a_ice, m_ice + real(kind=WP), dimension(:), pointer :: eps11, eps12, eps22 + real(kind=WP), dimension(:), pointer :: sigma11, sigma12, sigma22 #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" a_ice => ice%data(1)%values(:) m_ice => ice%data(2)%values(:) + eps11 => ice%work%eps11(:) + eps12 => ice%work%eps12(:) + eps22 => ice%work%eps22(:) + sigma11 => ice%work%sigma11(:) + sigma12 => ice%work%sigma12(:) + sigma22 => ice%work%sigma22(:) !___________________________________________________________________________ - val3=1.0_WP/3.0_WP - vale=1.0_WP/(ellipse**2) - do elem=1,myDim_elem2D - elnodes=elem2D_nodes(:,elem) - !_______________________________________________________________________ - ! if element has any cavity node skip it - if (ulevels(elem) > 1) cycle + val3=1.0_WP/3.0_WP + vale=1.0_WP/(ellipse**2) + do elem=1,myDim_elem2D + elnodes=elem2D_nodes(:,elem) + !_______________________________________________________________________ + ! if element has any cavity node skip it + if (ulevels(elem) > 1) cycle + + msum=sum(m_ice(elnodes))*val3 + if(msum<=0.01_WP) cycle !DS + asum=sum(a_ice(elnodes))*val3 + + dx=gradient_sca(1:3,elem) + dy=gradient_sca(4:6,elem) + ! METRICS: + vsum=sum(v_ice_aux(elnodes)) + usum=sum(u_ice_aux(elnodes)) + meancos=metric_factor(elem) + ! + ! ====== Deformation rate tensor on element elem: + eps11(elem)=sum(dx*u_ice_aux(elnodes)) + eps11(elem)=eps11(elem)-val3*vsum*meancos !metrics + eps22(elem)=sum(dy*v_ice_aux(elnodes)) + eps12(elem)=0.5_WP*sum(dy*u_ice_aux(elnodes) + dx*v_ice_aux(elnodes)) + eps12(elem)=eps12(elem)+0.5_WP*val3*usum*meancos !metrics + + ! ======= Switch to eps1,eps2 + eps1=eps11(elem)+eps22(elem) + eps2=eps11(elem)-eps22(elem) - msum=sum(m_ice(elnodes))*val3 - if(msum<=0.01_WP) cycle !DS - asum=sum(a_ice(elnodes))*val3 - - dx=gradient_sca(1:3,elem) - dy=gradient_sca(4:6,elem) - ! METRICS: - vsum=sum(v_ice_aux(elnodes)) - usum=sum(u_ice_aux(elnodes)) - meancos=metric_factor(elem) - ! - ! ====== Deformation rate tensor on element elem: - eps11(elem)=sum(dx*u_ice_aux(elnodes)) - eps11(elem)=eps11(elem)-val3*vsum*meancos !metrics - eps22(elem)=sum(dy*v_ice_aux(elnodes)) - eps12(elem)=0.5_WP*sum(dy*u_ice_aux(elnodes) + dx*v_ice_aux(elnodes)) - eps12(elem)=eps12(elem)+0.5_WP*val3*usum*meancos !metrics - - ! ======= Switch to eps1,eps2 - eps1=eps11(elem)+eps22(elem) - eps2=eps11(elem)-eps22(elem) - - ! ====== moduli: - delta=eps1**2+vale*(eps2**2+4.0_WP*eps12(elem)**2) - delta=sqrt(delta) + ! ====== moduli: + delta=eps1**2+vale*(eps2**2+4.0_WP*eps12(elem)**2) + delta=sqrt(delta) #if defined (__icepack) - pressure = sum(strength(elnodes))*val3/(delta+delta_min)/msum + pressure = sum(strength(elnodes))*val3/(delta+delta_min)/msum #else - pressure = pstar*exp(-c_pressure*(1.0_WP-asum))/(delta+delta_min) ! no multiplication + pressure = pstar*exp(-c_pressure*(1.0_WP-asum))/(delta+delta_min) ! no multiplication ! with thickness (msum) #endif - !adjust c_aevp such, that alpha_evp_array and beta_evp_array become in acceptable range - alpha_evp_array(elem)=max(50.0_WP,sqrt(ice_dt*c_aevp*pressure/rhoice/elem_area(elem))) - ! /voltriangle(elem) for FESOM1.4 - ! We do not allow alpha to be too small! - end do - end subroutine find_alpha_field_a -! ==================================================================== - + !adjust c_aevp such, that alpha_evp_array and beta_evp_array become in acceptable range + alpha_evp_array(elem)=max(50.0_WP,sqrt(ice_dt*c_aevp*pressure/rhoice/elem_area(elem))) + ! /voltriangle(elem) for FESOM1.4 + ! We do not allow alpha to be too small! + end do !--> do elem=1,myDim_elem2D +end subroutine find_alpha_field_a +! +! +!_______________________________________________________________________________ +! Internal stress tensor +! New implementation following Boullion et al, Ocean Modelling 2013. +! and Kimmritz et al., Ocean Modelling 2016 +! SD, 14.02.2017 subroutine stress_tensor_a(ice, partit, mesh) - ! Internal stress tensor - ! New implementation following Boullion et al, Ocean Modelling 2013. - ! and Kimmritz et al., Ocean Modelling 2016 - ! SD, 14.02.2017 - !=================================================================== USE MOD_ICE USE MOD_PARTIT USE MOD_PARSUP @@ -815,57 +822,65 @@ subroutine stress_tensor_a(ice, partit, mesh) !___________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: a_ice, m_ice + real(kind=WP), dimension(:), pointer :: eps11, eps12, eps22 + real(kind=WP), dimension(:), pointer :: sigma11, sigma12, sigma22 #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" a_ice => ice%data(1)%values(:) m_ice => ice%data(2)%values(:) + eps11 => ice%work%eps11(:) + eps12 => ice%work%eps12(:) + eps22 => ice%work%eps22(:) + sigma11 => ice%work%sigma11(:) + sigma12 => ice%work%sigma12(:) + sigma22 => ice%work%sigma22(:) !___________________________________________________________________________ - val3=1.0_WP/3.0_WP - vale=1.0_WP/(ellipse**2) - do elem=1,myDim_elem2D - !__________________________________________________________________________ - ! if element has any cavity node skip it - if (ulevels(elem) > 1) cycle - - !__________________________________________________________________________ - det2=1.0_WP/(1.0_WP+alpha_evp_array(elem)) ! Take alpha from array - det1=alpha_evp_array(elem)*det2 - - elnodes=elem2D_nodes(:,elem) - - msum=sum(m_ice(elnodes))*val3 - if(msum<=0.01_WP) cycle !DS - asum=sum(a_ice(elnodes))*val3 - - dx=gradient_sca(1:3,elem) - dy=gradient_sca(4:6,elem) - ! METRICS: - vsum=sum(v_ice_aux(elnodes)) - usum=sum(u_ice_aux(elnodes)) - meancos=metric_factor(elem) - ! - ! ====== Deformation rate tensor on element elem: - eps11(elem)=sum(dx*u_ice_aux(elnodes)) - eps11(elem)=eps11(elem)-val3*vsum*meancos !metrics - eps22(elem)=sum(dy*v_ice_aux(elnodes)) - eps12(elem)=0.5_WP*sum(dy*u_ice_aux(elnodes) + dx*v_ice_aux(elnodes)) - eps12(elem)=eps12(elem)+0.5_WP*val3*usum*meancos !metrics - - ! ======= Switch to eps1,eps2 - eps1=eps11(elem)+eps22(elem) - eps2=eps11(elem)-eps22(elem) - - ! ====== moduli: - delta=eps1**2+vale*(eps2**2+4.0_WP*eps12(elem)**2) - delta=sqrt(delta) + val3=1.0_WP/3.0_WP + vale=1.0_WP/(ellipse**2) + do elem=1,myDim_elem2D + !__________________________________________________________________________ + ! if element has any cavity node skip it + if (ulevels(elem) > 1) cycle + + !__________________________________________________________________________ + det2=1.0_WP/(1.0_WP+alpha_evp_array(elem)) ! Take alpha from array + det1=alpha_evp_array(elem)*det2 + + elnodes=elem2D_nodes(:,elem) + + msum=sum(m_ice(elnodes))*val3 + if(msum<=0.01_WP) cycle !DS + asum=sum(a_ice(elnodes))*val3 + + dx=gradient_sca(1:3,elem) + dy=gradient_sca(4:6,elem) + ! METRICS: + vsum=sum(v_ice_aux(elnodes)) + usum=sum(u_ice_aux(elnodes)) + meancos=metric_factor(elem) + ! + ! ====== Deformation rate tensor on element elem: + eps11(elem)=sum(dx*u_ice_aux(elnodes)) + eps11(elem)=eps11(elem)-val3*vsum*meancos !metrics + eps22(elem)=sum(dy*v_ice_aux(elnodes)) + eps12(elem)=0.5_WP*sum(dy*u_ice_aux(elnodes) + dx*v_ice_aux(elnodes)) + eps12(elem)=eps12(elem)+0.5_WP*val3*usum*meancos !metrics + + ! ======= Switch to eps1,eps2 + eps1=eps11(elem)+eps22(elem) + eps2=eps11(elem)-eps22(elem) + + ! ====== moduli: + delta=eps1**2+vale*(eps2**2+4.0_WP*eps12(elem)**2) + delta=sqrt(delta) #if defined (__icepack) - pressure = sum(strength(elnodes))*val3/(delta+delta_min) + pressure = sum(strength(elnodes))*val3/(delta+delta_min) #else - pressure=pstar*msum*exp(-c_pressure*(1.0_WP-asum))/(delta+delta_min) + pressure=pstar*msum*exp(-c_pressure*(1.0_WP-asum))/(delta+delta_min) #endif r1=pressure*(eps1-delta) @@ -884,22 +899,20 @@ subroutine stress_tensor_a(ice, partit, mesh) rdg_conv_elem(elem) = -min((eps11(elem)+eps22(elem)),0.0_WP) rdg_shear_elem(elem) = 0.5_WP*(delta - abs(eps11(elem)+eps22(elem))) #endif - - end do - ! Equations solved in terms of si1, si2, eps1, eps2 are (43)-(45) of - ! Boullion et al Ocean Modelling 2013, but in an implicit mode: - ! si1_{p+1}=det1*si1_p+det2*r1, where det1=alpha/(1+alpha) and det2=1/(1+alpha), - ! and similarly for si2 and sigma12 + end do ! --> do elem=1,myDim_elem2D + ! Equations solved in terms of si1, si2, eps1, eps2 are (43)-(45) of + ! Boullion et al Ocean Modelling 2013, but in an implicit mode: + ! si1_{p+1}=det1*si1_p+det2*r1, where det1=alpha/(1+alpha) and det2=1/(1+alpha), + ! and similarly for si2 and sigma12 end subroutine stress_tensor_a ! -!=================================================================== ! +!_______________________________________________________________________________ +! assemble rhs and solve for ice velocity +! New implementation based on Bouillion et al. Ocean Modelling 2013 +! and Kimmritz et al., Ocean Modelling 2016 +! SD 14.02.17 subroutine EVPdynamics_a(ice, partit, mesh) - ! assemble rhs and solve for ice velocity - ! New implementation based on Bouillion et al. Ocean Modelling 2013 - ! and Kimmritz et al., Ocean Modelling 2016 - ! SD 14.02.17 - !--------------------------------------------------------- USE MOD_ICE USE MOD_PARTIT USE MOD_PARSUP @@ -940,106 +953,107 @@ subroutine EVPdynamics_a(ice, partit, mesh) m_snow => ice%data(3)%values(:) !___________________________________________________________________________ - steps=evp_rheol_steps - rdt=ice_dt - u_ice_aux=u_ice ! Initialize solver variables - v_ice_aux=v_ice - call ssh2rhs(ice, partit, mesh) + steps=evp_rheol_steps + rdt=ice_dt + u_ice_aux=u_ice ! Initialize solver variables + v_ice_aux=v_ice + call ssh2rhs(ice, partit, mesh) #if defined (__icepack) - rdg_conv_elem(:) = 0.0_WP - rdg_shear_elem(:) = 0.0_WP + rdg_conv_elem(:) = 0.0_WP + rdg_shear_elem(:) = 0.0_WP #endif - do shortstep=1, steps - call stress_tensor_a(ice, partit, mesh) - call stress2rhs_m(ice, partit, mesh) ! _m=_a, so no _m version is the only one! - do i=1,myDim_nod2D - - !_______________________________________________________________________ - ! if element has any cavity node skip it - if (ulevels_nod2d(i)>1) cycle + do shortstep=1, steps + call stress_tensor_a(ice, partit, mesh) + call stress2rhs_m(ice, partit, mesh) ! _m=_a, so no _m version is the only one! + do i=1,myDim_nod2D - thickness=(rhoice*m_ice(i)+rhosno*m_snow(i))/max(a_ice(i),0.01_WP) - thickness=max(thickness, 9.0_WP) ! Limit if it is too small (0.01 m) - inv_thickness=1.0_WP/thickness - - umod=sqrt((u_ice_aux(i)-u_w(i))**2+(v_ice_aux(i)-v_w(i))**2) - drag=rdt*Cd_oce_ice*umod*density_0*inv_thickness - - !rhs for water stress, air stress, and u_rhs_ice/v (internal stress + ssh) - rhsu=u_ice(i)+drag*u_w(i)+rdt*(inv_thickness*stress_atmice_x(i)+u_rhs_ice(i)) - rhsv=v_ice(i)+drag*v_w(i)+rdt*(inv_thickness*stress_atmice_y(i)+v_rhs_ice(i)) - - rhsu=beta_evp_array(i)*u_ice_aux(i)+rhsu - rhsv=beta_evp_array(i)*v_ice_aux(i)+rhsv - !solve (Coriolis and water stress are treated implicitly) - fc=rdt*coriolis_node(i) - det=(1.0_WP+beta_evp_array(i)+drag)**2+fc**2 - det=bc_index_nod2D(i)/det - u_ice_aux(i)=det*((1.0_WP+beta_evp_array(i)+drag)*rhsu+fc*rhsv) - v_ice_aux(i)=det*((1.0_WP+beta_evp_array(i)+drag)*rhsv-fc*rhsu) - end do - - !___________________________________________________________________________ - ! apply sea ice velocity boundary condition - do ed=1,myDim_edge2D - !_______________________________________________________________________ - ! apply coastal sea ice velocity boundary conditions - if(myList_edge2D(ed) > edge2D_in) then - u_ice_aux(edges(:,ed))=0.0_WP - v_ice_aux(edges(:,ed))=0.0_WP - end if + !_______________________________________________________________________ + ! if element has any cavity node skip it + if (ulevels_nod2d(i)>1) cycle + + thickness=(rhoice*m_ice(i)+rhosno*m_snow(i))/max(a_ice(i),0.01_WP) + thickness=max(thickness, 9.0_WP) ! Limit if it is too small (0.01 m) + inv_thickness=1.0_WP/thickness + + umod=sqrt((u_ice_aux(i)-u_w(i))**2+(v_ice_aux(i)-v_w(i))**2) + drag=rdt*Cd_oce_ice*umod*density_0*inv_thickness + + !rhs for water stress, air stress, and u_rhs_ice/v (internal stress + ssh) + rhsu=u_ice(i)+drag*u_w(i)+rdt*(inv_thickness*stress_atmice_x(i)+u_rhs_ice(i)) + rhsv=v_ice(i)+drag*v_w(i)+rdt*(inv_thickness*stress_atmice_y(i)+v_rhs_ice(i)) + + rhsu=beta_evp_array(i)*u_ice_aux(i)+rhsu + rhsv=beta_evp_array(i)*v_ice_aux(i)+rhsv + !solve (Coriolis and water stress are treated implicitly) + fc=rdt*coriolis_node(i) + det=(1.0_WP+beta_evp_array(i)+drag)**2+fc**2 + det=bc_index_nod2D(i)/det + u_ice_aux(i)=det*((1.0_WP+beta_evp_array(i)+drag)*rhsu+fc*rhsv) + v_ice_aux(i)=det*((1.0_WP+beta_evp_array(i)+drag)*rhsv-fc*rhsu) + end do - !_______________________________________________________________________ - ! apply sea ice velocity boundary conditions at cavity-ocean edge - if (use_cavity) then - if ( (ulevels(edge_tri(1,ed))>1) .or. & - ( edge_tri(2,ed)>0 .and. ulevels(edge_tri(2,ed))>1) ) then - u_ice_aux(edges(1:2,ed))=0.0_WP - v_ice_aux(edges(1:2,ed))=0.0_WP + !___________________________________________________________________________ + ! apply sea ice velocity boundary condition + do ed=1,myDim_edge2D + !_______________________________________________________________________ + ! apply coastal sea ice velocity boundary conditions + if(myList_edge2D(ed) > edge2D_in) then + u_ice_aux(edges(:,ed))=0.0_WP + v_ice_aux(edges(:,ed))=0.0_WP + end if + + !_______________________________________________________________________ + ! apply sea ice velocity boundary conditions at cavity-ocean edge + if (use_cavity) then + if ( (ulevels(edge_tri(1,ed))>1) .or. & + ( edge_tri(2,ed)>0 .and. ulevels(edge_tri(2,ed))>1) ) then + u_ice_aux(edges(1:2,ed))=0.0_WP + v_ice_aux(edges(1:2,ed))=0.0_WP + end if end if - end if - end do ! --> do ed=1,myDim_edge2D - - call exchange_nod(u_ice_aux, v_ice_aux, partit) - end do - + end do ! --> do ed=1,myDim_edge2D + + call exchange_nod(u_ice_aux, v_ice_aux, partit) + end do + u_ice=u_ice_aux v_ice=v_ice_aux - - call find_alpha_field_a(ice, partit, mesh) ! alpha_evp_array is initialized with alpha_evp; - ! At this stage we already have non-trivial velocities. - call find_beta_field_a(partit, mesh) + + call find_alpha_field_a(ice, partit, mesh) ! alpha_evp_array is initialized with alpha_evp; + ! At this stage we already have non-trivial velocities. + call find_beta_field_a(partit, mesh) end subroutine EVPdynamics_a ! -! ================================================================= ! -subroutine find_beta_field_a(partit, mesh) +!_______________________________________________________________________________ ! beta_evp_array is defined at nodes, and this is the only ! reason we need it in addition to alpha_evp_array (we work with ! alpha=beta, and keep different names for generality; mEVP can work with ! alpha \ne beta, but not aEVP). - -use mod_mesh -USE MOD_PARTIT -USE MOD_PARSUP -use o_param -USE i_param -use i_arrays -Implicit none -integer :: n - -type(t_mesh), intent(in), target :: mesh -type(t_partit), intent(inout), target :: partit - +subroutine find_beta_field_a(partit, mesh) + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + use o_param + USE i_param + use i_arrays + Implicit none + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + !___________________________________________________________________________ + integer :: n + !___________________________________________________________________________ + ! pointer on necessary derived types #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + !___________________________________________________________________________ DO n=1, myDim_nod2D - !_______________________________________________________________________ + !________________________________________________________________________ ! if element has any cavity node skip it if (ulevels_nod2d(n)>1) cycle diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index f70c6ca4c..f8c3ecd5e 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -68,8 +68,8 @@ MODULE i_ARRAYS REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: u_ice_aux, v_ice_aux ! of the size of u_ice, v_ice REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_mdiv, rhs_adiv, rhs_msdiv REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: elevation - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: sigma11, sigma12, sigma22 - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: eps11, eps12, eps22 +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: sigma11, sigma12, sigma22 +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: eps11, eps12, eps22 REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: fresh_wa_flux REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: net_heat_flux #if defined (__oasis) || defined (__ifsinterface) diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index 68d37473a..72de53c91 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -1,63 +1,63 @@ module ice_array_setup_interface - interface - subroutine ice_array_setup(partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use mod_tracer - type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - end subroutine - end interface + interface + subroutine ice_array_setup(partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + end subroutine + end interface end module module ice_initial_state_interface - interface - subroutine ice_initial_state(ice, tracers, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use mod_tracer - USE MOD_ICE - type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers - type(t_ice) , intent(inout), target :: ice - end subroutine - end interface + interface + subroutine ice_initial_state(ice, tracers, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer + USE MOD_ICE + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers + type(t_ice) , intent(inout), target :: ice + end subroutine + end interface end module module ice_setup_interface - interface - subroutine ice_setup(ice, tracers, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use mod_tracer - USE MOD_ICE - type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers - type(t_ice), intent(inout), target :: ice - end subroutine - end interface + interface + subroutine ice_setup(ice, tracers, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer + USE MOD_ICE + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(in), target :: tracers + type(t_ice), intent(inout), target :: ice + end subroutine + end interface end module module ice_timestep_interface - interface - subroutine ice_timestep(istep, ice, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use mod_tracer - USE MOD_ICE - integer, intent(in) :: istep - type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - type(t_ice), intent(inout), target :: ice - end subroutine - end interface + interface + subroutine ice_timestep(istep, ice, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer + USE MOD_ICE + integer, intent(in) :: istep + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + type(t_ice), intent(inout), target :: ice + end subroutine + end interface end module ! @@ -135,8 +135,8 @@ subroutine ice_array_setup(partit, mesh) ! Allocate memory for variables of ice model ! allocate(u_ice(n_size), v_ice(n_size)) allocate(U_rhs_ice(n_size), V_rhs_ice(n_size)) - allocate(sigma11(e_size), sigma12(e_size), sigma22(e_size)) - allocate(eps11(e_size), eps12(e_size), eps22(e_size)) +! allocate(sigma11(e_size), sigma12(e_size), sigma22(e_size)) +! allocate(eps11(e_size), eps12(e_size), eps22(e_size)) ! allocate(m_ice(n_size), a_ice(n_size), m_snow(n_size)) allocate(rhs_m(n_size), rhs_a(n_size), rhs_ms(n_size)) allocate(t_skin(n_size)) @@ -172,12 +172,12 @@ subroutine ice_array_setup(partit, mesh) V_rhs_ice=0.0_WP ! U_ice=0.0_WP ! V_ice=0.0_WP - sigma11=0.0_WP - sigma22=0.0_WP - sigma12=0.0_WP - eps11=0.0_WP - eps12=0.0_WP - eps22=0.0_WP +! sigma11=0.0_WP +! sigma22=0.0_WP +! sigma12=0.0_WP +! eps11=0.0_WP +! eps12=0.0_WP +! eps22=0.0_WP t_skin=0.0_WP rhs_mdiv=0.0_WP rhs_adiv=0.0_WP @@ -225,52 +225,50 @@ end subroutine ice_array_setup !_______________________________________________________________________________ ! Sea ice model step subroutine ice_timestep(step, ice, partit, mesh) -use mod_mesh -USE MOD_PARTIT -USE MOD_PARSUP -USE MOD_ICE -use i_arrays -use o_param -use g_CONFIG -use i_PARAM, only: whichEVP -use ice_EVP_interfaces -use ice_maEVP_interfaces -use ice_fct_interfaces -use ice_thermodynamics_interfaces -use cavity_interfaces + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_ICE + use i_arrays + use o_param + use g_CONFIG + use i_PARAM, only: whichEVP + use ice_EVP_interfaces + use ice_maEVP_interfaces + use ice_fct_interfaces + use ice_thermodynamics_interfaces + use cavity_interfaces #if defined (__icepack) use icedrv_main, only: step_icepack #endif - -implicit none -integer, intent(in) :: step -type(t_ice), intent(inout), target :: ice -type(t_partit), intent(inout), target :: partit -type(t_mesh), intent(in), target :: mesh -integer :: i -REAL(kind=WP) :: t0,t1, t2, t3 - + implicit none + integer, intent(in) :: step + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + !___________________________________________________________________________ + integer :: i + REAL(kind=WP) :: t0,t1, t2, t3 #if defined (__icepack) -real(kind=WP) :: time_evp, time_advec, time_therm + real(kind=WP) :: time_evp, time_advec, time_therm #endif - -real(kind=WP), dimension(:), pointer :: u_ice, v_ice + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: u_ice, v_ice #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" -u_ice => ice%uvice(1,:) -v_ice => ice%uvice(2,:) - -t0=MPI_Wtime() - + u_ice => ice%uvice(1,:) + v_ice => ice%uvice(2,:) + !___________________________________________________________________________ + t0=MPI_Wtime() #if defined (__icepack) call step_icepack(mesh, time_evp, time_advec, time_therm) ! EVP, advection and thermodynamic parts #else !___________________________________________________________________________ ! ===== Dynamics - SELECT CASE (whichEVP) CASE (0) if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call EVPdynamics...'//achar(27)//'[0m' @@ -303,16 +301,20 @@ subroutine ice_timestep(step, ice, partit, mesh) end do #endif /* (__oifs) */ if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call ice_TG_rhs_div...'//achar(27)//'[0m' - call ice_TG_rhs_div (ice, partit, mesh) + call ice_TG_rhs_div (ice, partit, mesh) + if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call ice_fct_solve...'//achar(27)//'[0m' call ice_fct_solve (ice, partit, mesh) + if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call ice_update_for_div...'//achar(27)//'[0m' call ice_update_for_div(ice, partit, mesh) + #if defined (__oifs) do i=1,myDim_nod2D+eDim_nod2D if (a_ice(i)>0.0_WP) ice_temp(i) = ice_temp(i)/a_ice(i) end do #endif /* (__oifs) */ + if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call cut_off...'//achar(27)//'[0m' call cut_off(ice, partit, mesh) @@ -325,7 +327,7 @@ subroutine ice_timestep(step, ice, partit, mesh) call thermodynamics(ice, partit, mesh) #endif /* (__icepack) */ - + !___________________________________________________________________________ do i=1,myDim_nod2D+eDim_nod2D if ( ( U_ice(i)/=0.0_WP .and. mesh%ulevels_nod2d(i)>1) .or. (V_ice(i)/=0.0_WP .and. mesh%ulevels_nod2d(i)>1) ) then write(*,*) " --> found cavity velocity /= 0.0_WP , ", mype @@ -339,21 +341,20 @@ subroutine ice_timestep(step, ice, partit, mesh) rtime_ice = rtime_ice + (t3-t0) rtime_tot = rtime_tot + (t3-t0) if(mod(step,logfile_outfreq)==0 .and. mype==0) then - write(*,*) '___ICE STEP EXECUTION TIMES____________________________' + write(*,*) '___ICE STEP EXECUTION TIMES____________________________' #if defined (__icepack) - write(*,"(A, ES10.3)") ' Ice Dyn. :', time_evp + write(*,"(A, ES10.3)") ' Ice Dyn. :', time_evp write(*,"(A, ES10.3)") ' Ice Advect. :', time_advec write(*,"(A, ES10.3)") ' Ice Thermodyn. :', time_therm #else - write(*,"(A, ES10.3)") ' Ice Dyn. :', t1-t0 - write(*,"(A, ES10.3)") ' Ice Advect. :', t2-t1 - write(*,"(A, ES10.3)") ' Ice Thermodyn. :', t3-t2 + write(*,"(A, ES10.3)") ' Ice Dyn. :', t1-t0 + write(*,"(A, ES10.3)") ' Ice Advect. :', t2-t1 + write(*,"(A, ES10.3)") ' Ice Thermodyn. :', t3-t2 #endif /* (__icepack) */ - write(*,*) ' _______________________________' - write(*,"(A, ES10.3)") ' Ice TOTAL :', t3-t0 - write(*,*) + write(*,*) ' _______________________________' + write(*,"(A, ES10.3)") ' Ice TOTAL :', t3-t0 + write(*,*) endif - end subroutine ice_timestep ! ! From c0da3fcd5bc35a6d70020dfb76bc26c9a48c1de8 Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 20 Nov 2021 00:21:39 +0100 Subject: [PATCH 606/909] what the fuck --- src/MOD_ICE.F90 | 659 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 659 insertions(+) create mode 100644 src/MOD_ICE.F90 diff --git a/src/MOD_ICE.F90 b/src/MOD_ICE.F90 new file mode 100644 index 000000000..32c1652f1 --- /dev/null +++ b/src/MOD_ICE.F90 @@ -0,0 +1,659 @@ +MODULE MOD_ICE +USE o_PARAM, only: WP +USE, intrinsic :: ISO_FORTRAN_ENV +USE MOD_WRITE_BINARY_ARRAYS +USE MOD_READ_BINARY_ARRAYS +IMPLICIT NONE +SAVE + +! +! +!_______________________________________________________________________________ +! set data array derived type for ice-tracers (area, mice, msnow) more tracer +! are theretical possible +TYPE T_ICE_DATA + !___________________________________________________________________________ + real(kind=WP), allocatable, dimension(:) :: values, values_old, values_rhs, & + values_div_rhs, dvalues, valuesl + integer :: ID + !___________________________________________________________________________ + contains + procedure WRITE_T_ICE_DATA + procedure READ_T_ICE_DATA + generic :: write(unformatted) => WRITE_T_ICE_DATA + generic :: read(unformatted) => READ_T_ICE_DATA +END TYPE T_ICE_DATA +! +! +!_______________________________________________________________________________ +! set work array derived type for ice +TYPE T_ICE_WORK + !___________________________________________________________________________ + real(kind=WP), allocatable, dimension(:) :: fct_tmax, fct_tmin + real(kind=WP), allocatable, dimension(:) :: fct_plus, fct_minus + real(kind=WP), allocatable, dimension(:,:) :: fct_fluxes + real(kind=WP), allocatable, dimension(:) :: fct_massmatrix + real(kind=WP), allocatable, dimension(:) :: sigma11, sigma12, sigma22 + real(kind=WP), allocatable, dimension(:) :: eps11, eps12, eps22 + real(kind=WP), allocatable, dimension(:) :: ice_strength, inv_areamass, inv_mass + real(kind=WP), allocatable, dimension(:) :: t_skin, thdgr, thdgrsn, thdgr_old + !___________________________________________________________________________ + contains + procedure WRITE_T_ICE_WORK + procedure READ_T_ICE_WORK + generic :: write(unformatted) => WRITE_T_ICE_WORK + generic :: read(unformatted) => READ_T_ICE_WORK +END TYPE T_ICE_WORK +! +! +!_______________________________________________________________________________ +! set work array derived type for ice +TYPE T_ICE_THERMO + !___________________________________________________________________________ + real(kind=WP), allocatable, dimension(:) :: t_skin, thdgr, thdgrsn, thdgr_old, ustar + !___________________________________________________________________________ + contains + procedure WRITE_T_ICE_THERMO + procedure READ_T_ICE_THERMO + generic :: write(unformatted) => WRITE_T_ICE_THERMO + generic :: read(unformatted) => READ_T_ICE_THERMO +END TYPE T_ICE_THERMO +! +! +!_______________________________________________________________________________ +! set work array derived type for ice +TYPE T_ICE_ATMCOUPL +#if defined (__oasis) || defined (__ifsinterface) + !___________________________________________________________________________ + real(kind=WP), allocatable, dimension(:) :: oce_flx_h, ice_flx_h, tmpoce_flx_h, tmpice_flx_h +#if defined (__oifs) || defined (__ifsinterface) + !___________________________________________________________________________ + real(kind=WP), allocatable, dimension(:) :: ice_alb, enthalpyoffuse + ! !!! DONT FORGET ice_temp rhs_tempdiv rhs_temp is advected for oifs !!! --> becomes additional ice + ! tracer in ice%data(4)%values +#endif /* (__oifs) */ +#endif /* (__oasis) */ + !___________________________________________________________________________ + contains + procedure WRITE_T_ICE_ATMCOUPL + procedure READ_T_ICE_ATMCOUPL + generic :: write(unformatted) => WRITE_T_ICE_ATMCOUPL + generic :: read(unformatted) => READ_T_ICE_ATMCOUPL +END TYPE T_ICE_ATMCOUPL + +! +! +!_______________________________________________________________________________ +! set main ice derived type contains parameters, data array, work array, u_ice, vice +TYPE T_ICE + + !___________________________________________________________________________ + ! zonal & merdional ice velocity + real(kind=WP), allocatable, dimension(:,:) :: uvice, uvice_rhs, uvice_old, uvice_aux + + ! surface stess atm<-->ice, oce<-->ice + real(kind=WP), allocatable, dimension(:,:) :: stress_atmice_xy, stress_iceoce_xy + + ! oce temp, salt, ssh, and uv at surface + real(kind=WP), allocatable, dimension(:) :: srfoce_temp, srfoce_salt, srfoce_ssh + real(kind=WP), allocatable, dimension(:,:) :: srfoce_uv + + ! freshwater & heatflux + real(kind=WP), allocatable, dimension(:) :: flx_fw, flx_h + + !___________________________________________________________________________ + ! total number of ice tracers (default=3, 1=area, 2=mice, 3=msnow, (4=ice_temp) +#if defined (__oifs) || defined (__ifsinterface) + integer :: num_itracers=4 +#else + integer :: num_itracers=3 +#endif + + ! put ice tracers data arrays + type(t_ice_data), allocatable, dimension(:) :: data + + !___________________________________________________________________________ + ! put ice working arrays + type(t_ice_work) :: work + + ! put thermodynamics arrays + type(t_ice_thermo) :: thermo + +#if defined (__oasis) || defined (__ifsinterface) + !___________________________________________________________________________ + ! put ice arrays for coupled model + type(t_ice_atmcoupl) :: atmcoupl +#endif /* (__oasis) */ + !___________________________________________________________________________ + ! set ice model parameters: + ! --- RHEOLOGY --- + real(kind=WP) :: pstar = 30000.0_WP ![N/m^2] + real(kind=WP) :: ellipse = 2.0_WP ! + real(kind=WP) :: c_pressure = 20.0_WP ! + real(kind=WP) :: delta_min = 1.0e-11 ! [s^(-1)] + real(kind=WP) :: Clim_evp = 615 ! kg/m^2 + real(kind=WP) :: zeta_min = 4.0e+8 ! kg/s + integer :: evp_rheol_steps=120 ! EVP rheology cybcycling steps + real(kind=WP) :: ice_gamma_fct=0.25_WP ! smoothing parameter in ice fct advection + real(kind=WP) :: ice_diff = 10.0_WP ! diffusion to stabilize ice advection + real(kind=WP) :: theta_io =0.0_WP ! rotation angle (ice-ocean), available + ! --- in EVP --- + real(kind=WP) :: alpha_evp=250, beta_evp=250 + real(kind=WP) :: c_aevp=0.15 ! 0.1--0.2, but should be adjusted experimentally + ! --- Ice forcing averaging --- + integer :: ice_ave_steps=1 !ice step=ice_ave_steps*oce_step + real(kind=WP) :: cd_oce_ice = 5.5e-3 ! drag coef. oce - ice + logical :: ice_free_slip=.false. + integer :: whichEVP=0 ! 0=standart; 1=mEVP; 2=aEVP + + real(kind=WP) :: ice_dt ! ice step=ice_ave_steps*oce_step + real(kind=WP) :: Tevp_inv + integer :: ice_steps_since_upd=0 + + logical :: ice_update = .true. + !___________________________________________________________________________ + contains + procedure WRITE_T_ICE + procedure READ_T_ICE + generic :: write(unformatted) => WRITE_T_ICE + generic :: read(unformatted) => READ_T_ICE +END TYPE T_ICE + +contains +! +! +!_______________________________________________________________________________ +! Unformatted writing for T_ICE_DATA +subroutine WRITE_T_ICE_DATA(tdata, unit, iostat, iomsg) + IMPLICIT NONE + class(T_ICE_DATA), intent(in) :: tdata + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + call write_bin_array(tdata%values, unit, iostat, iomsg) + call write_bin_array(tdata%values_old, unit, iostat, iomsg) + call write_bin_array(tdata%values_rhs, unit, iostat, iomsg) + call write_bin_array(tdata%values_div_rhs, unit, iostat, iomsg) + call write_bin_array(tdata%dvalues, unit, iostat, iomsg) + call write_bin_array(tdata%valuesl, unit, iostat, iomsg) + write(unit, iostat=iostat, iomsg=iomsg) tdata%ID +end subroutine WRITE_T_ICE_DATA + +! Unformatted reading for T_ICE_DATA +subroutine READ_T_ICE_DATA(tdata, unit, iostat, iomsg) + IMPLICIT NONE + class(T_ICE_DATA), intent(inout) :: tdata + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + call read_bin_array(tdata%values, unit, iostat, iomsg) + call read_bin_array(tdata%values_old, unit, iostat, iomsg) + call read_bin_array(tdata%values_rhs, unit, iostat, iomsg) + call read_bin_array(tdata%values_div_rhs, unit, iostat, iomsg) + call read_bin_array(tdata%dvalues, unit, iostat, iomsg) + call read_bin_array(tdata%valuesl, unit, iostat, iomsg) + read(unit, iostat=iostat, iomsg=iomsg) tdata%ID +end subroutine READ_T_ICE_DATA +! +! +!_______________________________________________________________________________ +! Unformatted writing for T_ICE_WORK +subroutine WRITE_T_ICE_WORK(twork, unit, iostat, iomsg) + IMPLICIT NONE + class(T_ICE_WORK), intent(in) :: twork + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + call write_bin_array(twork%fct_tmax, unit, iostat, iomsg) + call write_bin_array(twork%fct_tmin, unit, iostat, iomsg) + call write_bin_array(twork%fct_plus, unit, iostat, iomsg) + call write_bin_array(twork%fct_minus, unit, iostat, iomsg) + call write_bin_array(twork%fct_fluxes, unit, iostat, iomsg) + call write_bin_array(twork%fct_massmatrix,unit, iostat, iomsg) + call write_bin_array(twork%sigma11, unit, iostat, iomsg) + call write_bin_array(twork%sigma12, unit, iostat, iomsg) + call write_bin_array(twork%sigma22, unit, iostat, iomsg) + call write_bin_array(twork%eps11, unit, iostat, iomsg) + call write_bin_array(twork%eps12, unit, iostat, iomsg) + call write_bin_array(twork%eps22, unit, iostat, iomsg) + call write_bin_array(twork%ice_strength, unit, iostat, iomsg) + call write_bin_array(twork%inv_areamass, unit, iostat, iomsg) + call write_bin_array(twork%inv_mass, unit, iostat, iomsg) +end subroutine WRITE_T_ICE_WORK + +! Unformatted reading for T_ICE_WORK +subroutine READ_T_ICE_WORK(twork, unit, iostat, iomsg) + IMPLICIT NONE + class(T_ICE_WORK), intent(inout) :: twork + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + call read_bin_array(twork%fct_tmax, unit, iostat, iomsg) + call read_bin_array(twork%fct_tmin, unit, iostat, iomsg) + call read_bin_array(twork%fct_plus, unit, iostat, iomsg) + call read_bin_array(twork%fct_minus, unit, iostat, iomsg) + call read_bin_array(twork%fct_fluxes, unit, iostat, iomsg) + call read_bin_array(twork%fct_massmatrix,unit, iostat, iomsg) + call read_bin_array(twork%sigma11, unit, iostat, iomsg) + call read_bin_array(twork%sigma12, unit, iostat, iomsg) + call read_bin_array(twork%sigma22, unit, iostat, iomsg) + call read_bin_array(twork%eps11, unit, iostat, iomsg) + call read_bin_array(twork%eps12, unit, iostat, iomsg) + call read_bin_array(twork%eps22, unit, iostat, iomsg) + call read_bin_array(twork%ice_strength, unit, iostat, iomsg) + call read_bin_array(twork%inv_areamass, unit, iostat, iomsg) + call read_bin_array(twork%inv_mass, unit, iostat, iomsg) +end subroutine READ_T_ICE_WORK +! +! +!_______________________________________________________________________________ +! Unformatted writing for T_ICE_WORK +subroutine WRITE_T_ICE_THERMO(ttherm, unit, iostat, iomsg) + IMPLICIT NONE + class(T_ICE_THERMO), intent(in) :: ttherm + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + call write_bin_array(ttherm%t_skin, unit, iostat, iomsg) + call write_bin_array(ttherm%thdgr, unit, iostat, iomsg) + call write_bin_array(ttherm%thdgrsn, unit, iostat, iomsg) + call write_bin_array(ttherm%thdgr_old, unit, iostat, iomsg) + call write_bin_array(ttherm%ustar, unit, iostat, iomsg) +end subroutine WRITE_T_ICE_THERMO + +! Unformatted reading for T_ICE_WORK +subroutine READ_T_ICE_THERMO(ttherm, unit, iostat, iomsg) + IMPLICIT NONE + class(T_ICE_THERMO), intent(inout) :: ttherm + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + call read_bin_array(ttherm%t_skin, unit, iostat, iomsg) + call read_bin_array(ttherm%thdgr, unit, iostat, iomsg) + call read_bin_array(ttherm%thdgrsn, unit, iostat, iomsg) + call read_bin_array(ttherm%thdgr_old, unit, iostat, iomsg) + call read_bin_array(ttherm%ustar, unit, iostat, iomsg) +end subroutine READ_T_ICE_THERMO +! +! +!_______________________________________________________________________________ +! Unformatted writing for T_ICE_ATMCOUPL +subroutine WRITE_T_ICE_ATMCOUPL(tcoupl, unit, iostat, iomsg) + IMPLICIT NONE + class(T_ICE_ATMCOUPL), intent(in) :: tcoupl + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg +#if defined (__oasis) || defined (__ifsinterface) + call write_bin_array(tcoupl%oce_flx_h, unit, iostat, iomsg) + call write_bin_array(tcoupl%ice_flx_h, unit, iostat, iomsg) + call write_bin_array(tcoupl%tmpoce_flx_h, unit, iostat, iomsg) + call write_bin_array(tcoupl%tmpice_flx_h, unit, iostat, iomsg) +#if defined (__oifs) || defined (__ifsinterface) + call write_bin_array(tcoupl%ice_alb, unit, iostat, iomsg) + call write_bin_array(tcoupl%enthalpyoffuse, unit, iostat, iomsg) +#endif /* (__oifs) */ +#endif /* (__oasis) */ +end subroutine WRITE_T_ICE_ATMCOUPL + +! Unformatted reading for T_ICE_ATMCOUPL +subroutine READ_T_ICE_ATMCOUPL(tcoupl, unit, iostat, iomsg) + IMPLICIT NONE + class(T_ICE_ATMCOUPL), intent(inout) :: tcoupl + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg +#if defined (__oasis) || defined (__ifsinterface) + call read_bin_array(tcoupl%oce_flx_h, unit, iostat, iomsg) + call read_bin_array(tcoupl%ice_flx_h, unit, iostat, iomsg) + call read_bin_array(tcoupl%tmpoce_flx_h, unit, iostat, iomsg) + call read_bin_array(tcoupl%tmpice_flx_h, unit, iostat, iomsg) +#if defined (__oifs) || defined (__ifsinterface) + call read_bin_array(tcoupl%ice_alb, unit, iostat, iomsg) + call read_bin_array(tcoupl%enthalpyoffuse, unit, iostat, iomsg) +#endif /* (__oifs) */ +#endif /* (__oasis) */ +end subroutine READ_T_ICE_ATMCOUPL +! +! +!_______________________________________________________________________________ +! Unformatted writing for T_ICE_ATMCOUPL +subroutine WRITE_T_ICE(ice, unit, iostat, iomsg) + IMPLICIT NONE + class(T_ICE), intent(in) :: ice + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: i + !___________________________________________________________________________ + call write_bin_array(ice%uvice, unit, iostat, iomsg) + call write_bin_array(ice%uvice_rhs, unit, iostat, iomsg) + call write_bin_array(ice%uvice_old, unit, iostat, iomsg) + if (ice%whichEVP /= 0) call write_bin_array(ice%uvice_aux, unit, iostat, iomsg) + call write_bin_array(ice%stress_atmice_xy, unit, iostat, iomsg) + call write_bin_array(ice%stress_iceoce_xy, unit, iostat, iomsg) + call write_bin_array(ice%srfoce_temp, unit, iostat, iomsg) + call write_bin_array(ice%srfoce_salt, unit, iostat, iomsg) + call write_bin_array(ice%srfoce_ssh, unit, iostat, iomsg) + call write_bin_array(ice%flx_fw, unit, iostat, iomsg) + call write_bin_array(ice%flx_h, unit, iostat, iomsg) + + !___________________________________________________________________________ + write(unit, iostat=iostat, iomsg=iomsg) ice%num_itracers + do i=1, ice%num_itracers + write(unit, iostat=iostat, iomsg=iomsg) ice%data(i) + end do + !___________________________________________________________________________ + write(unit, iostat=iostat, iomsg=iomsg) ice%thermo + write(unit, iostat=iostat, iomsg=iomsg) ice%work +#if defined (__oasis) || defined (__ifsinterface) + write(unit, iostat=iostat, iomsg=iomsg) ice%atmcoupl +#endif /* (__oasis) */ + + !___________________________________________________________________________ + write(unit, iostat=iostat, iomsg=iomsg) ice%pstar + write(unit, iostat=iostat, iomsg=iomsg) ice%ellipse + write(unit, iostat=iostat, iomsg=iomsg) ice%c_pressure + write(unit, iostat=iostat, iomsg=iomsg) ice%delta_min + write(unit, iostat=iostat, iomsg=iomsg) ice%Clim_evp + write(unit, iostat=iostat, iomsg=iomsg) ice%zeta_min + write(unit, iostat=iostat, iomsg=iomsg) ice%evp_rheol_steps + write(unit, iostat=iostat, iomsg=iomsg) ice%ice_gamma_fct + write(unit, iostat=iostat, iomsg=iomsg) ice%ice_diff + write(unit, iostat=iostat, iomsg=iomsg) ice%Tevp_inv + write(unit, iostat=iostat, iomsg=iomsg) ice%theta_io + write(unit, iostat=iostat, iomsg=iomsg) ice%alpha_evp + write(unit, iostat=iostat, iomsg=iomsg) ice%beta_evp + write(unit, iostat=iostat, iomsg=iomsg) ice%c_aevp + write(unit, iostat=iostat, iomsg=iomsg) ice%ice_ave_steps + write(unit, iostat=iostat, iomsg=iomsg) ice%cd_oce_ice + write(unit, iostat=iostat, iomsg=iomsg) ice%ice_free_slip + write(unit, iostat=iostat, iomsg=iomsg) ice%whichEVP + write(unit, iostat=iostat, iomsg=iomsg) ice%ice_dt + write(unit, iostat=iostat, iomsg=iomsg) ice%Tevp_inv + write(unit, iostat=iostat, iomsg=iomsg) ice%ice_steps_since_upd + write(unit, iostat=iostat, iomsg=iomsg) ice%ice_update +end subroutine WRITE_T_ICE + +! Unformatted reading for T_ICE +subroutine READ_T_ICE(ice, unit, iostat, iomsg) + IMPLICIT NONE + class(T_ICE), intent(inout) :: ice + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: i + !___________________________________________________________________________ + call read_bin_array(ice%uvice, unit, iostat, iomsg) + call read_bin_array(ice%uvice_rhs, unit, iostat, iomsg) + call read_bin_array(ice%uvice_old, unit, iostat, iomsg) + if (ice%whichEVP /= 0) call read_bin_array(ice%uvice_aux, unit, iostat, iomsg) + call read_bin_array(ice%stress_atmice_xy, unit, iostat, iomsg) + call read_bin_array(ice%stress_iceoce_xy, unit, iostat, iomsg) + call read_bin_array(ice%srfoce_temp, unit, iostat, iomsg) + call read_bin_array(ice%srfoce_salt, unit, iostat, iomsg) + call read_bin_array(ice%srfoce_ssh, unit, iostat, iomsg) + call read_bin_array(ice%flx_fw, unit, iostat, iomsg) + call read_bin_array(ice%flx_h, unit, iostat, iomsg) + + !___________________________________________________________________________ + read(unit, iostat=iostat, iomsg=iomsg) ice%num_itracers + do i=1, ice%num_itracers + read(unit, iostat=iostat, iomsg=iomsg) ice%data(i) + end do + !___________________________________________________________________________ + read(unit, iostat=iostat, iomsg=iomsg) ice%thermo + read(unit, iostat=iostat, iomsg=iomsg) ice%work +#if defined (__oasis) || defined (__ifsinterface) + read(unit, iostat=iostat, iomsg=iomsg) ice%atmcoupl +#endif /* (__oasis) */ + + !___________________________________________________________________________ + read(unit, iostat=iostat, iomsg=iomsg) ice%pstar + read(unit, iostat=iostat, iomsg=iomsg) ice%ellipse + read(unit, iostat=iostat, iomsg=iomsg) ice%c_pressure + read(unit, iostat=iostat, iomsg=iomsg) ice%delta_min + read(unit, iostat=iostat, iomsg=iomsg) ice%Clim_evp + read(unit, iostat=iostat, iomsg=iomsg) ice%zeta_min + read(unit, iostat=iostat, iomsg=iomsg) ice%evp_rheol_steps + read(unit, iostat=iostat, iomsg=iomsg) ice%ice_gamma_fct + read(unit, iostat=iostat, iomsg=iomsg) ice%ice_diff + read(unit, iostat=iostat, iomsg=iomsg) ice%Tevp_inv + read(unit, iostat=iostat, iomsg=iomsg) ice%theta_io + read(unit, iostat=iostat, iomsg=iomsg) ice%alpha_evp + read(unit, iostat=iostat, iomsg=iomsg) ice%beta_evp + read(unit, iostat=iostat, iomsg=iomsg) ice%c_aevp + read(unit, iostat=iostat, iomsg=iomsg) ice%ice_ave_steps + read(unit, iostat=iostat, iomsg=iomsg) ice%cd_oce_ice + read(unit, iostat=iostat, iomsg=iomsg) ice%ice_free_slip + read(unit, iostat=iostat, iomsg=iomsg) ice%whichEVP + read(unit, iostat=iostat, iomsg=iomsg) ice%ice_dt + read(unit, iostat=iostat, iomsg=iomsg) ice%Tevp_inv + read(unit, iostat=iostat, iomsg=iomsg) ice%ice_steps_since_upd + read(unit, iostat=iostat, iomsg=iomsg) ice%ice_update +end subroutine READ_T_ICE +END MODULE MOD_ICE +! +! +!_______________________________________________________________________________ +! interface to initialise derived type for sea ice +module ice_init_interface + interface + subroutine ice_init(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARSUP + USE MOD_PARTIT + USE MOD_MESH + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface +end module +! +! +!_______________________________________________________________________________ +! initialise derived type for sea ice +subroutine ice_init(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + USE o_param, only: WP + IMPLICIT NONE + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: elem_size, node_size, n + integer, save :: nm_unit = 105 ! unit to open namelist file, skip 100-102 for cray + integer :: iost + !___________________________________________________________________________ + ! define ice namelist parameter + integer :: whichEVP, evp_rheol_steps, ice_ave_steps + real(kind=WP) :: Pstar, ellipse, c_pressure, delta_min, ice_gamma_fct, & + ice_diff, theta_io, alpha_evp, beta_evp, c_aevp, Cd_oce_ice + namelist /ice_dyn/ whichEVP, Pstar, ellipse, c_pressure, delta_min, evp_rheol_steps, & + Cd_oce_ice, ice_gamma_fct, ice_diff, theta_io, ice_ave_steps, & + alpha_evp, beta_evp, c_aevp + !___________________________________________________________________________ + ! pointer on necessary derived types +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + !___________________________________________________________________________ + ! open and read namelist.ice for I/O + open(unit=nm_unit, file='namelist.ice', form='formatted', access='sequential', status='old', iostat=iost ) + if (iost == 0) then + if (mype==0) write(*,*) ' file : ', 'namelist.ice',' open ok' + else + if (mype==0) write(*,*) 'ERROR: --> bad opening file : ', 'namelist.ice',' ; iostat=',iost + call par_ex(partit%MPI_COMM_FESOM, partit%mype) + stop + end if + read(nm_unit, nml=ice_dyn, iostat=iost) + close(nm_unit) + + !___________________________________________________________________________ + ! set parameters in ice derived type from namelist.ice + ice%whichEVP = whichEVP + ice%pstar = Pstar + ice%ellipse = ellipse + ice%c_pressure = c_pressure + ice%delta_min = delta_min + ice%evp_rheol_steps = evp_rheol_steps + ice%cd_oce_ice = Cd_oce_ice + ice%ice_gamma_fct = ice_gamma_fct + ice%ice_diff = ice_diff + ice%theta_io = theta_io + ice%ice_ave_steps = ice_ave_steps + ice%alpha_evp = alpha_evp + ice%beta_evp = beta_evp + ice%c_aevp = c_aevp + + !!PS no namelist paramter in moment + !!PS ice%zeta_min = zeta_min + !!PS ice%Tevp_inv = Tevp_inv + !!PS ice%ice_free_slip = ice_free_slip + !!PS ice%ice_dt = ice_dt + !!PS ice%Tevp_inv = Tevp_inv + + !___________________________________________________________________________ + ! define local vertice & elem array size + elem_size=myDim_elem2D+eDim_elem2D + node_size=myDim_nod2D+eDim_nod2D + + !___________________________________________________________________________ + ! allocate/initialise arrays in ice derived type + ! initialise velocity and stress related arrays in ice derived type + allocate(ice%uvice( 2, node_size)) + allocate(ice%uvice_rhs( 2, node_size)) + allocate(ice%uvice_old( 2, node_size)) + allocate(ice%stress_atmice_xy( 2, node_size)) + allocate(ice%stress_iceoce_xy( 2, node_size)) + ice%uvice = 0.0_WP + ice%uvice_rhs = 0.0_WP + ice%uvice_old = 0.0_WP + ice%stress_atmice_xy = 0.0_WP + ice%stress_iceoce_xy = 0.0_WP + if (ice%whichEVP /= 0) then + allocate(ice%uvice_aux( 2, node_size)) + ice%uvice_aux = 0.0_WP + end if + + !___________________________________________________________________________ + ! initialise surface ocean arrays in ice derived type + allocate(ice%srfoce_uv( 2, node_size)) + allocate(ice%srfoce_temp( node_size)) + allocate(ice%srfoce_salt( node_size)) + allocate(ice%srfoce_ssh( node_size)) + ice%srfoce_uv = 0.0_WP + ice%srfoce_temp = 0.0_WP + ice%srfoce_salt = 0.0_WP + ice%srfoce_ssh = 0.0_WP + + allocate(ice%flx_fw(node_size)) + allocate(ice%flx_h( node_size)) + ice%flx_fw = 0.0_WP + ice%flx_h = 0.0_WP + + !___________________________________________________________________________ + ! initialse data array of ice derived type containing "ice tracer" that have + ! to be advected: a_ice (index=1), m_ice (index=2), m_snow (index=3), + ! ice_temp (index=4, only when coupled) + allocate(ice%data(ice%num_itracers)) + do n = 1, ice%num_itracers + allocate(ice%data(n)%values( node_size)) + allocate(ice%data(n)%values_old(node_size)) + allocate(ice%data(n)%values_rhs(node_size)) + allocate(ice%data(n)%values_div_rhs(node_size)) + allocate(ice%data(n)%dvalues( node_size)) + allocate(ice%data(n)%valuesl( node_size)) + ice%data(n)%ID = n + ice%data(n)%values = 0.0_WP + ice%data(n)%values_old = 0.0_WP + ice%data(n)%values_rhs = 0.0_WP + ice%data(n)%values_div_rhs = 0.0_WP + ice%data(n)%dvalues = 0.0_WP + ice%data(n)%valuesl = 0.0_WP + end do + + !___________________________________________________________________________ + ! initialse work array of ice derived type + allocate(ice%work%fct_tmax( node_size)) + allocate(ice%work%fct_tmin( node_size)) + allocate(ice%work%fct_plus( node_size)) + allocate(ice%work%fct_minus( node_size)) + allocate(ice%work%fct_fluxes( elem_size, 3)) + ice%work%fct_tmax = 0.0_WP + ice%work%fct_tmin = 0.0_WP + ice%work%fct_plus = 0.0_WP + ice%work%fct_minus = 0.0_WP + ice%work%fct_fluxes = 0.0_WP + + allocate(ice%work%fct_massmatrix(sum(nn_num(1:myDim_nod2D)))) + ice%work%fct_massmatrix = 0.0_WP + + allocate(ice%work%sigma11( elem_size)) + allocate(ice%work%sigma12( elem_size)) + allocate(ice%work%sigma22( elem_size)) + allocate(ice%work%eps11( elem_size)) + allocate(ice%work%eps12( elem_size)) + allocate(ice%work%eps22( elem_size)) + ice%work%sigma11 = 0.0_WP + ice%work%sigma12 = 0.0_WP + ice%work%sigma22 = 0.0_WP + ice%work%eps11 = 0.0_WP + ice%work%eps12 = 0.0_WP + ice%work%eps22 = 0.0_WP + + allocate(ice%work%ice_strength( elem_size)) + allocate(ice%work%inv_areamass( node_size)) + allocate(ice%work%inv_mass( node_size)) + ice%work%ice_strength= 0.0_WP + ice%work%inv_areamass= 0.0_WP + ice%work%inv_mass = 0.0_WP + + allocate(ice%work%t_skin( node_size)) + allocate(ice%work%thdgr( node_size)) + allocate(ice%work%thdgrsn( node_size)) + allocate(ice%work%thdgr_old( node_size)) + ice%work%t_skin = 0.0_WP + ice%work%thdgr = 0.0_WP + ice%work%thdgrsn = 0.0_WP + ice%work%thdgr_old = 0.0_WP + + !___________________________________________________________________________ + ! initialse thermo array of ice derived type + allocate(ice%thermo%ustar( node_size)) + allocate(ice%thermo%t_skin( node_size)) + allocate(ice%thermo%thdgr( node_size)) + allocate(ice%thermo%thdgrsn( node_size)) + allocate(ice%thermo%thdgr_old( node_size)) + ice%thermo%ustar = 0.0_WP + ice%thermo%t_skin = 0.0_WP + ice%thermo%thdgr = 0.0_WP + ice%thermo%thdgrsn = 0.0_WP + ice%thermo%thdgr_old = 0.0_WP + + !___________________________________________________________________________ + ! initialse coupling array of ice derived type +#if defined (__oasis) || defined (__ifsinterface) + allocate(ice%atmcoupl%oce_flx_h( node_size)) + allocate(ice%atmcoupl%ice_flx_h( node_size)) + allocate(ice%atmcoupl%tmpoce_flx_h( node_size)) + allocate(ice%atmcoupl%tmpice_flx_h( node_size)) + ice%atmcoupl%oce_flx_h = 0.0_WP + ice%atmcoupl%ice_flx_h = 0.0_WP + ice%atmcoupl%tmpoce_flx_h = 0.0_WP + ice%atmcoupl%tmpice_flx_h = 0.0_WP +#if defined (__oifs) || defined (__ifsinterface) + allocate(ice%atmcoupl%ice_alb( node_size)) + allocate(ice%atmcoupl%enthalpyoffuse(node_size)) + ice%atmcoupl%ice_alb = 0.0_WP + ice%atmcoupl%enthalpyoffuse= 0.0_WP +#endif /* (__oifs) */ +#endif /* (__oasis) */ +end subroutine ice_init \ No newline at end of file From 2f57d0993b9f03d2fe678cdd89be1c082bfb83b3 Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 20 Nov 2021 16:03:46 +0100 Subject: [PATCH 607/909] fix EVP interfaces --- src/ice_EVP.F90 | 14 ++++++++++++++ src/ice_maEVP.F90 | 10 +++++++--- src/ice_setup_step.F90 | 4 ++-- 3 files changed, 23 insertions(+), 5 deletions(-) diff --git a/src/ice_EVP.F90 b/src/ice_EVP.F90 index dd9b59478..7ad2903f3 100755 --- a/src/ice_EVP.F90 +++ b/src/ice_EVP.F90 @@ -34,6 +34,20 @@ subroutine EVPdynamics(ice, partit, mesh) end interface end module +module ice_EVPdynamics_interface + interface + subroutine EVPdynamics(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + end subroutine + end interface +end module + ! ! Contains routines of EVP dynamics ! diff --git a/src/ice_maEVP.F90 b/src/ice_maEVP.F90 index 37654c055..ac0fe0b84 100644 --- a/src/ice_maEVP.F90 +++ b/src/ice_maEVP.F90 @@ -47,7 +47,11 @@ subroutine find_beta_field_a(partit, mesh) type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit end subroutine - + end interface +end module + +module ice_maEVPdynamics_interface + interface subroutine EVPdynamics_a(ice, partit, mesh) USE MOD_ICE USE MOD_PARTIT @@ -67,8 +71,8 @@ subroutine EVPdynamics_m(ice, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_ice), intent(inout), target :: ice end subroutine - end interface -end module + end interface +end module ! ! !_______________________________________________________________________________ diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index 72de53c91..59c704de8 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -233,8 +233,8 @@ subroutine ice_timestep(step, ice, partit, mesh) use o_param use g_CONFIG use i_PARAM, only: whichEVP - use ice_EVP_interfaces - use ice_maEVP_interfaces + use ice_EVPdynamics_interface + use ice_maEVPdynamics_interface use ice_fct_interfaces use ice_thermodynamics_interfaces use cavity_interfaces From a3cf08e0ce02f34236b4110fe00d1552c462afbb Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 20 Nov 2021 16:10:18 +0100 Subject: [PATCH 608/909] fix EVP interfaces --- src/ice_EVP.F90 | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/src/ice_EVP.F90 b/src/ice_EVP.F90 index 7ad2903f3..926be8775 100755 --- a/src/ice_EVP.F90 +++ b/src/ice_EVP.F90 @@ -21,16 +21,6 @@ subroutine stress2rhs(inv_areamass, ice_strength, ice, partit, mesh) type(t_mesh), intent(in), target :: mesh real(kind=WP), intent(in) :: inv_areamass(partit%myDim_nod2D), ice_strength(partit%mydim_elem2D) end subroutine - - subroutine EVPdynamics(ice, partit, mesh) - USE MOD_ICE - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_MESH - type(t_ice), intent(inout), target :: ice - type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - end subroutine end interface end module From c590edab60961e08ba717b0833572dc032b117db Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 20 Nov 2021 16:21:10 +0100 Subject: [PATCH 609/909] add ice-derived type to src/ice_thermo_cpl.F90 --- src/ice_thermo_cpl.F90 | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index 7709c0ccf..5bc02472c 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -1,5 +1,5 @@ #if defined (__oasis) || defined (__ifsinterface) -subroutine thermodynamics(partit, mesh) +subroutine thermodynamics(ice, partit, mesh) !=================================================================== ! @@ -16,9 +16,10 @@ subroutine thermodynamics(partit, mesh) !=================================================================== use o_param - USE MOD_MESH + USE MOD_ICE USE MOD_PARTIT USE MOD_PARSUP + USE MOD_MESH use i_therm_param use i_param use i_arrays @@ -28,7 +29,10 @@ subroutine thermodynamics(partit, mesh) use g_comm_auto use g_rotate_grid implicit none - + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !_____________________________________________________________________________ integer :: inod !---- prognostic variables (updated in `ice_growth') real(kind=WP) :: A, h, hsn, alb, t @@ -52,18 +56,21 @@ subroutine thermodynamics(partit, mesh) real(kind=WP), parameter :: Aimin = 0.001, himin = 0.005 - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - + !_____________________________________________________________________________ + ! pointer on necessary derived types integer, pointer :: myDim_nod2D, eDim_nod2D integer, dimension(:), pointer :: ulevels_nod2D real(kind=WP), dimension(:,:),pointer :: geo_coord_nod2D - + real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow myDim_nod2d=>partit%myDim_nod2D eDim_nod2D =>partit%eDim_nod2D ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D - + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + + !_____________________________________________________________________________ rsss = ref_sss !---- total evaporation (needed in oce_salt_balance.F90) From 59454b2596fae52cd77debb0bb9d41461209e729 Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 20 Nov 2021 16:27:56 +0100 Subject: [PATCH 610/909] add uice, vice src/ice_thermo_cpl.F90 --- src/ice_thermo_cpl.F90 | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index 5bc02472c..de34a78fa 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -58,14 +58,17 @@ subroutine thermodynamics(ice, partit, mesh) !_____________________________________________________________________________ ! pointer on necessary derived types - integer, pointer :: myDim_nod2D, eDim_nod2D - integer, dimension(:), pointer :: ulevels_nod2D - real(kind=WP), dimension(:,:),pointer :: geo_coord_nod2D - real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow + integer , pointer :: myDim_nod2D, eDim_nod2D + integer , dimension(:) , pointer :: ulevels_nod2D + real(kind=WP), dimension(:,:), pointer :: geo_coord_nod2D + real(kind=WP), dimension(:) , pointer :: u_ice, v_ice + real(kind=WP), dimension(:) , pointer :: a_ice, m_ice, m_snow myDim_nod2d=>partit%myDim_nod2D eDim_nod2D =>partit%eDim_nod2D ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D + u_ice => ice%uvice(1,:) + v_ice => ice%uvice(2,:) a_ice => ice%data(1)%values(:) m_ice => ice%data(2)%values(:) m_snow => ice%data(3)%values(:) From a4e225a45d925572bf67d60841fa85d68880519f Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 20 Nov 2021 16:44:00 +0100 Subject: [PATCH 611/909] add a_ice, m_ice, msnow ice derived types to src/ifs_interface/ifs_interface.F90 --- src/ifs_interface/ifs_interface.F90 | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/ifs_interface/ifs_interface.F90 b/src/ifs_interface/ifs_interface.F90 index 78d758d86..3dd257c72 100644 --- a/src/ifs_interface/ifs_interface.F90 +++ b/src/ifs_interface/ifs_interface.F90 @@ -348,7 +348,7 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & USE par_kind ! in ifs_modules.F90 USE fesom_main_storage_module, only: fesom => f !USE o_ARRAYS, ONLY : UV ! tr_arr is now tracers, UV in dynamics derived type - USE i_arrays, ONLY : m_ice, a_ice, m_snow + !USE i_arrays, ONLY : m_ice, a_ice, m_snow USE i_therm_param, ONLY : tmelt USE g_rotate_grid, only: vector_r2g USE parinter @@ -367,7 +367,7 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & integer, dimension(:,:) , pointer :: elem2D_nodes integer, pointer :: myDim_nod2D, eDim_nod2D integer, pointer :: myDim_elem2D, eDim_elem2D, eXDim_elem2D - + real(kind=wpIFS), dimension(:), pointer :: a_ice, m_ice, m_snow ! Message passing information INTEGER, INTENT(IN) :: mype, npes, icomm @@ -394,7 +394,10 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => fesom%mesh%coord_nod2D elem2D_nodes(1:3, 1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => fesom%mesh%elem2D_nodes - + a_ice => fesom%ice%data(1)%values(:) + m_ice => fesom%ice%data(2)%values(:) + m_snow => fesom%ice%data(3)%values(:) + ! =================================================================== ! From 9b58f0cf31d714c2655aaf8f18c07415dcb1920c Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 20 Nov 2021 16:53:52 +0100 Subject: [PATCH 612/909] switch on debug flag as default->for gfortran test --- config/namelist.config | 1 + 1 file changed, 1 insertion(+) diff --git a/config/namelist.config b/config/namelist.config index b283fdd8d..5a908cb16 100644 --- a/config/namelist.config +++ b/config/namelist.config @@ -54,6 +54,7 @@ use_cavity=.false. ! use_cavity_partial_cell=.false. use_floatice = .false. use_sw_pene=.true. +flag_debug=.true. / &machine From f5fd93b6e7a2b68343e822a8a778da22b7e8565a Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 20 Nov 2021 18:20:40 +0100 Subject: [PATCH 613/909] change flag_debug to true in config/namelist.config.toy_soufflet --- config/namelist.config.toy_soufflet | 1 + 1 file changed, 1 insertion(+) diff --git a/config/namelist.config.toy_soufflet b/config/namelist.config.toy_soufflet index b074865ba..6f63269d9 100644 --- a/config/namelist.config.toy_soufflet +++ b/config/namelist.config.toy_soufflet @@ -54,6 +54,7 @@ use_floatice = .false. use_sw_pene=.false. toy_ocean=.true. which_toy="soufflet" +flag_debug=.true. / &machine From 92bfd8a68cdbb4b9b665ca94e99fef1e03031acf Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 20 Nov 2021 18:43:31 +0100 Subject: [PATCH 614/909] test something --- src/oce_ale.F90 | 5 ++++- src/oce_ale_vel_rhs.F90 | 4 +++- src/oce_setup_step.F90 | 1 + 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 4a0776a75..8dd74a2d4 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -3007,7 +3007,10 @@ subroutine oce_timestep_ale(n, ice, dynamics, tracers, partit, mesh) t30=MPI_Wtime() call solve_ssh_ale(dynamics, partit, mesh) - if ((toy_ocean) .AND. (TRIM(which_toy)=="soufflet")) call relax_zonal_vel(dynamics, partit, mesh) + if ((toy_ocean) .AND. (TRIM(which_toy)=="soufflet")) + if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call relax_zonal_vel'//achar(27)//'[0m' + call relax_zonal_vel(dynamics, partit, mesh) + end if t3=MPI_Wtime() ! estimate new horizontal velocity u^(n+1) diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index 10da4721a..f02d6da25 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -75,7 +75,7 @@ subroutine compute_vel_rhs(ice, dynamics, partit, mesh) eta_n =>dynamics%eta_n(:) m_ice => ice%data(2)%values(:) m_snow => ice%data(3)%values(:) - + write(*,*) ">-))))°> something is fishy 1" !___________________________________________________________________________ use_pice=0 if (use_floatice .and. .not. trim(which_ale)=='linfs') use_pice=1 @@ -147,6 +147,7 @@ subroutine compute_vel_rhs(ice, dynamics, partit, mesh) UV_rhsAB(2,nz,elem) =-UV(1,nz,elem)*ff! - mm*UV(1,nz,elem)*UV(2,nz,elem) end do end do + write(*,*) ">-))))°> something is fishy 2" !$OMP END PARALLEL DO !___________________________________________________________________________ ! advection @@ -172,6 +173,7 @@ subroutine compute_vel_rhs(ice, dynamics, partit, mesh) UV_rhs(2,nz,elem)=dt*(UV_rhs(2,nz,elem)+UV_rhsAB(2,nz,elem)*ff)/elem_area(elem) end do end do + write(*,*) ">-))))°> something is fishy 3" !$OMP END PARALLEL DO ! ======================= ! U_rhs contains all contributions to velocity from old time steps diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 36f18b0eb..e892fec38 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -227,6 +227,7 @@ subroutine ocean_setup(dynamics, tracers, partit, mesh) end if END SELECT else + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call oce_initial_state'//achar(27)//'[0m' call oce_initial_state(tracers, partit, mesh) ! Use it if not running tests end if From 1829068d6419b1cf43163183664a2aece08a1706 Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 20 Nov 2021 18:47:01 +0100 Subject: [PATCH 615/909] fix small issue for testing channel --- src/oce_ale.F90 | 2 +- src/oce_ale_vel_rhs.F90 | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 8dd74a2d4..13accd4cf 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -3007,7 +3007,7 @@ subroutine oce_timestep_ale(n, ice, dynamics, tracers, partit, mesh) t30=MPI_Wtime() call solve_ssh_ale(dynamics, partit, mesh) - if ((toy_ocean) .AND. (TRIM(which_toy)=="soufflet")) + if ((toy_ocean) .AND. (TRIM(which_toy)=="soufflet")) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call relax_zonal_vel'//achar(27)//'[0m' call relax_zonal_vel(dynamics, partit, mesh) end if diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index f02d6da25..e2f873c24 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -147,8 +147,8 @@ subroutine compute_vel_rhs(ice, dynamics, partit, mesh) UV_rhsAB(2,nz,elem) =-UV(1,nz,elem)*ff! - mm*UV(1,nz,elem)*UV(2,nz,elem) end do end do - write(*,*) ">-))))°> something is fishy 2" !$OMP END PARALLEL DO + write(*,*) ">-))))°> something is fishy 2" !___________________________________________________________________________ ! advection if (dynamics%momadv_opt==1) then @@ -173,8 +173,8 @@ subroutine compute_vel_rhs(ice, dynamics, partit, mesh) UV_rhs(2,nz,elem)=dt*(UV_rhs(2,nz,elem)+UV_rhsAB(2,nz,elem)*ff)/elem_area(elem) end do end do - write(*,*) ">-))))°> something is fishy 3" !$OMP END PARALLEL DO + write(*,*) ">-))))°> something is fishy 3" ! ======================= ! U_rhs contains all contributions to velocity from old time steps ! ======================= From ac392dfa21c86c91a31f08b20e1ac0f1f5f41e6a Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 20 Nov 2021 19:27:05 +0100 Subject: [PATCH 616/909] try somethinbg out --- src/oce_ale_vel_rhs.F90 | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index e2f873c24..9de0e0cd7 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -69,16 +69,19 @@ subroutine compute_vel_rhs(ice, dynamics, partit, mesh) #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV =>dynamics%uv(:,:,:) - UV_rhs =>dynamics%uv_rhs(:,:,:) - UV_rhsAB =>dynamics%uv_rhsAB(:,:,:) - eta_n =>dynamics%eta_n(:) - m_ice => ice%data(2)%values(:) - m_snow => ice%data(3)%values(:) + write(*,*) ">-))))°> something is fishy 0" + UV => dynamics%uv(:,:,:) + UV_rhs => dynamics%uv_rhs(:,:,:) + UV_rhsAB => dynamics%uv_rhsAB(:,:,:) + eta_n => dynamics%eta_n(:) + ! because of toy channel cannot acces here the pointer since its not initialised +! m_ice => ice%data(2)%values(:) +! m_snow => ice%data(3)%values(:) write(*,*) ">-))))°> something is fishy 1" !___________________________________________________________________________ use_pice=0 if (use_floatice .and. .not. trim(which_ale)=='linfs') use_pice=1 + if ((toy_ocean) .and. (trim(which_toy)=="soufflet")) use_pice=0 !$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(elem, nz, nzmin, nzmax, elnodes, ff, mm, Fx, Fy, pre, p_ice, p_air, p_eta) do elem=1, myDim_elem2D @@ -116,7 +119,8 @@ subroutine compute_vel_rhs(ice, dynamics, partit, mesh) ! to account for floating ice if (use_pice > 0) then p_ice = 0.0_WP - p_ice = (m_ice(elnodes)*rhoice+m_snow(elnodes)*rhosno)*inv_rhowat + !!PS p_ice = (m_ice(elnodes)*rhoice+m_snow(elnodes)*rhosno)*inv_rhowat + p_ice = (ice%data(2)%values(elnodes)*rhoice+ice%data(3)%values(elnodes)*rhosno)*inv_rhowat ! limit maximum ice loading like in FESOM1.4 p_ice = g*min(p_ice,max_ice_loading) else From d50ceae133a87b200eac84023f5e801505b481d3 Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 20 Nov 2021 19:42:31 +0100 Subject: [PATCH 617/909] try somethinbg out next --- src/oce_ale_vel_rhs.F90 | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index 9de0e0cd7..e3e6b140e 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -74,9 +74,8 @@ subroutine compute_vel_rhs(ice, dynamics, partit, mesh) UV_rhs => dynamics%uv_rhs(:,:,:) UV_rhsAB => dynamics%uv_rhsAB(:,:,:) eta_n => dynamics%eta_n(:) - ! because of toy channel cannot acces here the pointer since its not initialised -! m_ice => ice%data(2)%values(:) -! m_snow => ice%data(3)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) write(*,*) ">-))))°> something is fishy 1" !___________________________________________________________________________ use_pice=0 @@ -119,8 +118,8 @@ subroutine compute_vel_rhs(ice, dynamics, partit, mesh) ! to account for floating ice if (use_pice > 0) then p_ice = 0.0_WP - !!PS p_ice = (m_ice(elnodes)*rhoice+m_snow(elnodes)*rhosno)*inv_rhowat - p_ice = (ice%data(2)%values(elnodes)*rhoice+ice%data(3)%values(elnodes)*rhosno)*inv_rhowat + p_ice = (m_ice(elnodes)*rhoice+m_snow(elnodes)*rhosno)*inv_rhowat + !!PS p_ice = (ice%data(2)%values(elnodes)*rhoice+ice%data(3)%values(elnodes)*rhosno)*inv_rhowat ! limit maximum ice loading like in FESOM1.4 p_ice = g*min(p_ice,max_ice_loading) else From 204aa1d79283787836f2736f3b9090c289a8f8a0 Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 20 Nov 2021 19:55:07 +0100 Subject: [PATCH 618/909] try somethinbg out next 2 --- src/oce_ale_vel_rhs.F90 | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index e3e6b140e..67d490957 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -64,7 +64,7 @@ subroutine compute_vel_rhs(ice, dynamics, partit, mesh) ! pointer on necessary derived types real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhsAB, UV_rhs real(kind=WP), dimension(:) , pointer :: eta_n - real(kind=WP), dimension(:) , pointer :: m_ice, m_snow + real(kind=WP), dimension(:) , pointer :: m_ice, m_snow, a_ice #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -74,7 +74,11 @@ subroutine compute_vel_rhs(ice, dynamics, partit, mesh) UV_rhs => dynamics%uv_rhs(:,:,:) UV_rhsAB => dynamics%uv_rhsAB(:,:,:) eta_n => dynamics%eta_n(:) + write(*,*) ">-))))°> something is fishy 0.1" + a_ice => ice%data(1)%values(:) + write(*,*) ">-))))°> something is fishy 0.2" m_ice => ice%data(2)%values(:) + write(*,*) ">-))))°> something is fishy 0.3" m_snow => ice%data(3)%values(:) write(*,*) ">-))))°> something is fishy 1" !___________________________________________________________________________ @@ -119,7 +123,6 @@ subroutine compute_vel_rhs(ice, dynamics, partit, mesh) if (use_pice > 0) then p_ice = 0.0_WP p_ice = (m_ice(elnodes)*rhoice+m_snow(elnodes)*rhosno)*inv_rhowat - !!PS p_ice = (ice%data(2)%values(elnodes)*rhoice+ice%data(3)%values(elnodes)*rhosno)*inv_rhowat ! limit maximum ice loading like in FESOM1.4 p_ice = g*min(p_ice,max_ice_loading) else From cebe98bb685eb0d6036ec4f7db0ca7384082f011 Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 20 Nov 2021 20:11:15 +0100 Subject: [PATCH 619/909] try somethinbg out next 3 --- src/MOD_ICE.F90 | 44 ++++++++++++++++++++++++++++++++++++++++- src/fesom_module.F90 | 3 +++ src/oce_ale_vel_rhs.F90 | 4 ---- 3 files changed, 46 insertions(+), 5 deletions(-) diff --git a/src/MOD_ICE.F90 b/src/MOD_ICE.F90 index 32c1652f1..1b401fe8b 100644 --- a/src/MOD_ICE.F90 +++ b/src/MOD_ICE.F90 @@ -656,4 +656,46 @@ subroutine ice_init(ice, partit, mesh) ice%atmcoupl%enthalpyoffuse= 0.0_WP #endif /* (__oifs) */ #endif /* (__oasis) */ -end subroutine ice_init \ No newline at end of file +end subroutine ice_init +! +! +! +! +! +!_______________________________________________________________________________ +! initialise derived type for sea ice +subroutine ice_init_toyocean_dummy(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + USE o_param, only: WP + IMPLICIT NONE + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: node_size, n + !___________________________________________________________________________ + ! pointer on necessary derived types +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + !___________________________________________________________________________ + ! define local vertice & elem array size + node_size=myDim_nod2D+eDim_nod2D + + !___________________________________________________________________________ + ! allocate/initialise arrays in ice derived type + ! initialise velocity and stress related arrays in ice derived type + allocate(ice%uvice( 2, node_size)) + ice%uvice = 0.0_WP + allocate(ice%data(ice%num_itracers)) + do n = 1, ice%num_itracers + allocate(ice%data(n)%values( node_size)) + ice%data(n)%ID = n + ice%data(n)%values = 0.0_WP + end do +end subroutine ice_init_toyocean_dummy diff --git a/src/fesom_module.F90 b/src/fesom_module.F90 index eea1148dd..bcd321dbb 100755 --- a/src/fesom_module.F90 +++ b/src/fesom_module.F90 @@ -179,7 +179,10 @@ subroutine fesom_init(fesom_total_nsteps) ice_steps_since_upd = ice_ave_steps-1 ice_update=.true. if (f%mype==0) write(*,*) 'EVP scheme option=', whichEVP + else + call ice_init_toyocean_dummy(f%ice, f%partit, f%mesh) endif + if (f%mype==0) f%t5=MPI_Wtime() call compute_diagnostics(0, f%dynamics, f%tracers, f%partit, f%mesh) ! allocate arrays for diagnostic #if defined (__oasis) diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index 67d490957..0d3352a1a 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -74,11 +74,7 @@ subroutine compute_vel_rhs(ice, dynamics, partit, mesh) UV_rhs => dynamics%uv_rhs(:,:,:) UV_rhsAB => dynamics%uv_rhsAB(:,:,:) eta_n => dynamics%eta_n(:) - write(*,*) ">-))))°> something is fishy 0.1" - a_ice => ice%data(1)%values(:) - write(*,*) ">-))))°> something is fishy 0.2" m_ice => ice%data(2)%values(:) - write(*,*) ">-))))°> something is fishy 0.3" m_snow => ice%data(3)%values(:) write(*,*) ">-))))°> something is fishy 1" !___________________________________________________________________________ From 46e91d439f9efd8541e335ba0563200e8da9975f Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 20 Nov 2021 21:36:51 +0100 Subject: [PATCH 620/909] add ice derived type to icepack routines --- src/fesom_module.F90 | 7 ++++++- src/ice_setup_step.F90 | 2 +- src/icepack_drivers/icedrv_init.F90 | 6 ++++-- src/icepack_drivers/icedrv_step.F90 | 14 ++++++++------ src/icepack_drivers/icedrv_transfer.F90 | 12 ++++++++---- 5 files changed, 27 insertions(+), 14 deletions(-) diff --git a/src/fesom_module.F90 b/src/fesom_module.F90 index bcd321dbb..06b1a9d6d 100755 --- a/src/fesom_module.F90 +++ b/src/fesom_module.F90 @@ -180,6 +180,11 @@ subroutine fesom_init(fesom_total_nsteps) ice_update=.true. if (f%mype==0) write(*,*) 'EVP scheme option=', whichEVP else + ! create a dummy ice derived type with only a_ice, m_ice, m_snow and + ! uvice since oce_timesteps still needs in moment + ! ice as an input for mo_convect(ice, partit, mesh), call + ! compute_vel_rhs(ice, dynamics, partit, mesh), + ! call write_step_info(...) and call check_blowup(...) call ice_init_toyocean_dummy(f%ice, f%partit, f%mesh) endif @@ -197,7 +202,7 @@ subroutine fesom_init(fesom_total_nsteps) if (f%mype==0) write(*,*) 'Icepack: reading namelists from namelist.icepack' call set_icepack(f%partit) call alloc_icepack - call init_icepack(f%tracers%data(1), f%mesh) + call init_icepack(f%ice, f%tracers%data(1), f%mesh) if (f%mype==0) write(*,*) 'Icepack: setup complete' #endif call clock_newyear ! check if it is a new year diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index 59c704de8..4a6ebee15 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -264,7 +264,7 @@ subroutine ice_timestep(step, ice, partit, mesh) !___________________________________________________________________________ t0=MPI_Wtime() #if defined (__icepack) - call step_icepack(mesh, time_evp, time_advec, time_therm) ! EVP, advection and thermodynamic parts + call step_icepack(ice, mesh, time_evp, time_advec, time_therm) ! EVP, advection and thermodynamic parts #else !___________________________________________________________________________ diff --git a/src/icepack_drivers/icedrv_init.F90 b/src/icepack_drivers/icedrv_init.F90 index 5c3e77fc7..63bd5b24e 100644 --- a/src/icepack_drivers/icedrv_init.F90 +++ b/src/icepack_drivers/icedrv_init.F90 @@ -910,7 +910,7 @@ end subroutine init_faero !======================================================================= - module subroutine init_icepack(tracer, mesh) + module subroutine init_icepack(ice, tracer, mesh) use icepack_intfc, only: icepack_init_itd use icepack_intfc, only: icepack_init_itd_hist @@ -918,6 +918,7 @@ module subroutine init_icepack(tracer, mesh) use icepack_intfc, only: icepack_init_fsd_bounds use icepack_intfc, only: icepack_warnings_flush use mod_mesh + use mod_ice use mod_tracer implicit none @@ -930,6 +931,7 @@ module subroutine init_icepack(tracer, mesh) character(len=*), parameter :: subname='(icedrv_initialize)' type(t_mesh), intent(in), target :: mesh type(t_tracer_data), intent(in), target :: tracer + type(t_tracer_ice), intent(in), target :: ice call icepack_query_parameters(wave_spec_out=wave_spec) call icepack_query_tracer_flags(tr_aero_out=tr_aero) call icepack_query_tracer_flags(tr_zaero_out=tr_zaero) @@ -980,7 +982,7 @@ module subroutine init_icepack(tracer, mesh) endif call init_fsd - call fesom_to_icepack(mesh) + call fesom_to_icepack(ice, mesh) call init_state(tracer) ! initialize the ice state call init_history_therm ! initialize thermo history variables diff --git a/src/icepack_drivers/icedrv_step.F90 b/src/icepack_drivers/icedrv_step.F90 index 38937781e..dd3f6e543 100644 --- a/src/icepack_drivers/icedrv_step.F90 +++ b/src/icepack_drivers/icedrv_step.F90 @@ -1116,12 +1116,13 @@ end subroutine coupling_prep !======================================================================= - module subroutine step_icepack(mesh, time_evp, time_advec, time_therm) + module subroutine step_icepack(ice, mesh, time_evp, time_advec, time_therm) use icepack_intfc, only: icepack_ice_strength use g_config, only: dt use i_PARAM, only: whichEVP use mod_mesh + use mod_ice implicit none @@ -1142,6 +1143,7 @@ module subroutine step_icepack(mesh, time_evp, time_advec, time_therm) time_evp type(t_mesh), target, intent(in) :: mesh + type(t_ice), target, intent(inout) :: ice character(len=*), parameter :: subname='(ice_step)' @@ -1171,7 +1173,7 @@ module subroutine step_icepack(mesh, time_evp, time_advec, time_therm) ! copy variables from fesom2 (also ice velocities) !----------------------------------------------------------------- - call fesom_to_icepack(mesh) + call fesom_to_icepack(ice, mesh) !----------------------------------------------------------------- ! tendencies needed by fesom @@ -1239,11 +1241,11 @@ module subroutine step_icepack(mesh, time_evp, time_advec, time_therm) select case (whichEVP) case (0) - call EVPdynamics (p_partit, mesh) + call EVPdynamics (ice, p_partit, mesh) case (1) - call EVPdynamics_m(p_partit, mesh) + call EVPdynamics_m(ice, p_partit, mesh) case (2) - call EVPdynamics_a(p_partit, mesh) + call EVPdynamics_a(ice, p_partit, mesh) case default if (mype==0) write(*,*) 'A non existing EVP scheme specified!' call par_ex(p_partit%MPI_COMM_FESOM, p_partit%mype) @@ -1257,7 +1259,7 @@ module subroutine step_icepack(mesh, time_evp, time_advec, time_therm) ! update ice velocities !----------------------------------------------------------------- - call fesom_to_icepack(mesh) + call fesom_to_icepack(ice, mesh) !----------------------------------------------------------------- ! advect tracers diff --git a/src/icepack_drivers/icedrv_transfer.F90 b/src/icepack_drivers/icedrv_transfer.F90 index 32e916823..fd6fca29f 100644 --- a/src/icepack_drivers/icedrv_transfer.F90 +++ b/src/icepack_drivers/icedrv_transfer.F90 @@ -10,7 +10,7 @@ contains - module subroutine fesom_to_icepack(mesh) + module subroutine fesom_to_icepack(ice, mesh) use g_forcing_arrays, only: Tair, shum, u_wind, v_wind, & ! Atmospheric forcing fields shortwave, longwave, prec_rain, & @@ -20,8 +20,9 @@ module subroutine fesom_to_icepack(mesh) use g_sbf, only: l_mslp use i_arrays, only: S_oc_array, T_oc_array, & ! Ocean and sea ice fields u_w, v_w, & - u_ice, v_ice, & stress_atmice_x, stress_atmice_y +! u_ice, v_ice, & + use i_param, only: cd_oce_ice ! Sea ice parameters use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted use icepack_intfc, only: icepack_query_parameters @@ -32,6 +33,7 @@ module subroutine fesom_to_icepack(mesh) use o_param, only: mstep use mod_mesh use mod_tracer + use mod_ice use g_clock implicit none @@ -60,9 +62,11 @@ module subroutine fesom_to_icepack(mesh) cprho type(t_mesh), target, intent(in) :: mesh - + type(t_ice), target, intent(inout) :: ice + real(kind=WP), dimension(:), pointer :: u_ice, v_ice #include "associate_mesh.h" - + u_ice => ice%uvice(1,:) + v_ice => ice%uvice(2,:) ! Ice uvel(:) = u_ice(:) From b1f65682298fa50bda78c9a1e9e375767d765cf9 Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 20 Nov 2021 21:47:20 +0100 Subject: [PATCH 621/909] add ice derived type to icepack routines, fix issue --- src/icepack_drivers/icedrv_main.F90 | 8 ++++++-- src/icepack_drivers/icedrv_step.F90 | 6 +++--- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/src/icepack_drivers/icedrv_main.F90 b/src/icepack_drivers/icedrv_main.F90 index e6fc705f6..48b20db93 100644 --- a/src/icepack_drivers/icedrv_main.F90 +++ b/src/icepack_drivers/icedrv_main.F90 @@ -801,10 +801,12 @@ module subroutine init_icepack(tracer, mesh) end subroutine init_icepack ! Copy variables from fesom to icepack - module subroutine fesom_to_icepack(mesh) + module subroutine fesom_to_icepack(ice, mesh) use mod_mesh + use mod_ice implicit none type(t_mesh), intent(in), target :: mesh + type(t_ice), intent(in), target :: ice end subroutine fesom_to_icepack ! Copy variables from icepack to fesom @@ -860,8 +862,9 @@ module subroutine init_advection_icepack(mesh) end subroutine init_advection_icepack ! Driving subroutine for column physics - module subroutine step_icepack(mesh, time_evp, time_advec, time_therm) + module subroutine step_icepack(ice, mesh, time_evp, time_advec, time_therm) use mod_mesh + use mod_ice use g_config, only: dt use i_PARAM, only: whichEVP use icepack_intfc, only: icepack_ice_strength @@ -871,6 +874,7 @@ module subroutine step_icepack(mesh, time_evp, time_advec, time_therm) time_advec, & time_evp type(t_mesh), intent(in), target :: mesh + type(t_ice), intent(in), target :: ice end subroutine step_icepack ! Initialize output diff --git a/src/icepack_drivers/icedrv_step.F90 b/src/icepack_drivers/icedrv_step.F90 index dd3f6e543..f267db5cc 100644 --- a/src/icepack_drivers/icedrv_step.F90 +++ b/src/icepack_drivers/icedrv_step.F90 @@ -1141,10 +1141,10 @@ module subroutine step_icepack(ice, mesh, time_evp, time_advec, time_therm) time_therm, & time_advec, & time_evp - - type(t_mesh), target, intent(in) :: mesh + type(t_ice), target, intent(inout) :: ice - + type(t_mesh), target, intent(in) :: mesh + character(len=*), parameter :: subname='(ice_step)' From 75f8ef6b3bd85204c5b9597378c03b948284160b Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 20 Nov 2021 21:50:20 +0100 Subject: [PATCH 622/909] add ice derived type to icepack routines, fix issue 2 --- src/icepack_drivers/icedrv_main.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/icepack_drivers/icedrv_main.F90 b/src/icepack_drivers/icedrv_main.F90 index 48b20db93..79ce73f0e 100644 --- a/src/icepack_drivers/icedrv_main.F90 +++ b/src/icepack_drivers/icedrv_main.F90 @@ -806,7 +806,7 @@ module subroutine fesom_to_icepack(ice, mesh) use mod_ice implicit none type(t_mesh), intent(in), target :: mesh - type(t_ice), intent(in), target :: ice + type(t_ice), intent(inout), target :: ice end subroutine fesom_to_icepack ! Copy variables from icepack to fesom @@ -874,7 +874,7 @@ module subroutine step_icepack(ice, mesh, time_evp, time_advec, time_therm) time_advec, & time_evp type(t_mesh), intent(in), target :: mesh - type(t_ice), intent(in), target :: ice + type(t_ice), intent(inout), target :: ice end subroutine step_icepack ! Initialize output From dd2cd1cbf8fd5482116689d222040f3aa54c677a Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 20 Nov 2021 21:58:20 +0100 Subject: [PATCH 623/909] add ice derived type to icepack routines, fix issue 3 --- src/ice_oce_coupling.F90 | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index dcda19763..0b11827f0 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -255,12 +255,16 @@ subroutine oce_fluxes(ice, dynamics, tracers, partit, mesh) real(kind=WP), allocatable :: flux(:) !___________________________________________________________________________ real(kind=WP), dimension(:,:), pointer :: temp, salt + real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - temp=>tracers%data(1)%values(:,:) - salt=>tracers%data(2)%values(:,:) + temp => tracers%data(1)%values(:,:) + salt => tracers%data(2)%values(:,:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) !___________________________________________________________________________ allocate(flux(myDim_nod2D+eDim_nod2D)) From 1d82b471491236fadc95c99b0147751d1ad7c38f Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 20 Nov 2021 22:05:14 +0100 Subject: [PATCH 624/909] add ice derived type to icepack routines, fix issue 4 --- src/icepack_drivers/icedrv_init.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/icepack_drivers/icedrv_init.F90 b/src/icepack_drivers/icedrv_init.F90 index 63bd5b24e..26821ade3 100644 --- a/src/icepack_drivers/icedrv_init.F90 +++ b/src/icepack_drivers/icedrv_init.F90 @@ -931,7 +931,7 @@ module subroutine init_icepack(ice, tracer, mesh) character(len=*), parameter :: subname='(icedrv_initialize)' type(t_mesh), intent(in), target :: mesh type(t_tracer_data), intent(in), target :: tracer - type(t_tracer_ice), intent(in), target :: ice + type(t_ice), intent(in), target :: ice call icepack_query_parameters(wave_spec_out=wave_spec) call icepack_query_tracer_flags(tr_aero_out=tr_aero) call icepack_query_tracer_flags(tr_zaero_out=tr_zaero) From 6cd796f220fddf733fcdd7963348b300422155a4 Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 20 Nov 2021 22:17:53 +0100 Subject: [PATCH 625/909] add ice derived type to icepack routines, fix issue 5 --- src/icepack_drivers/icedrv_init.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/icepack_drivers/icedrv_init.F90 b/src/icepack_drivers/icedrv_init.F90 index 26821ade3..9294e424c 100644 --- a/src/icepack_drivers/icedrv_init.F90 +++ b/src/icepack_drivers/icedrv_init.F90 @@ -931,7 +931,7 @@ module subroutine init_icepack(ice, tracer, mesh) character(len=*), parameter :: subname='(icedrv_initialize)' type(t_mesh), intent(in), target :: mesh type(t_tracer_data), intent(in), target :: tracer - type(t_ice), intent(in), target :: ice + type(t_ice), intent(inout), target :: ice call icepack_query_parameters(wave_spec_out=wave_spec) call icepack_query_tracer_flags(tr_aero_out=tr_aero) call icepack_query_tracer_flags(tr_zaero_out=tr_zaero) From fd73fb740ba60d894fe3e853e4d687228795e4a2 Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 20 Nov 2021 22:23:11 +0100 Subject: [PATCH 626/909] add ice derived type to icepack routines, fix issue 6 --- src/icepack_drivers/icedrv_main.F90 | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/icepack_drivers/icedrv_main.F90 b/src/icepack_drivers/icedrv_main.F90 index 79ce73f0e..399165851 100644 --- a/src/icepack_drivers/icedrv_main.F90 +++ b/src/icepack_drivers/icedrv_main.F90 @@ -792,12 +792,14 @@ module subroutine init_history_bgc() end subroutine init_history_bgc ! Initialize all - module subroutine init_icepack(tracer, mesh) + module subroutine init_icepack(ice, tracer, mesh) use mod_mesh use mod_tracer + use mod_ice implicit none type(t_mesh), intent(in), target :: mesh type(t_tracer_data), intent(in), target :: tracer + type(t_ice) , intent(in), target :: ice end subroutine init_icepack ! Copy variables from fesom to icepack From 64d00cd7e09aacd939d8c04eb1e41666ada88bbb Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 20 Nov 2021 22:26:04 +0100 Subject: [PATCH 627/909] add ice derived type to icepack routines, fix issue 7 --- src/icepack_drivers/icedrv_main.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/icepack_drivers/icedrv_main.F90 b/src/icepack_drivers/icedrv_main.F90 index 399165851..03c2e0625 100644 --- a/src/icepack_drivers/icedrv_main.F90 +++ b/src/icepack_drivers/icedrv_main.F90 @@ -799,7 +799,7 @@ module subroutine init_icepack(ice, tracer, mesh) implicit none type(t_mesh), intent(in), target :: mesh type(t_tracer_data), intent(in), target :: tracer - type(t_ice) , intent(in), target :: ice + type(t_ice) , intent(inout), target :: ice end subroutine init_icepack ! Copy variables from fesom to icepack From 411fc261e2c3727cf7971f5394329740fbf9b615 Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 20 Nov 2021 22:52:29 +0100 Subject: [PATCH 628/909] add ice derived type to icepack routines, fix issue 8 --- src/icepack_drivers/icedrv_step.F90 | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/icepack_drivers/icedrv_step.F90 b/src/icepack_drivers/icedrv_step.F90 index f267db5cc..1997b9b45 100644 --- a/src/icepack_drivers/icedrv_step.F90 +++ b/src/icepack_drivers/icedrv_step.F90 @@ -1123,7 +1123,8 @@ module subroutine step_icepack(ice, mesh, time_evp, time_advec, time_therm) use i_PARAM, only: whichEVP use mod_mesh use mod_ice - + use ice_EVPdynamics_interface + use ice_maEVPdynamics_interface implicit none integer (kind=int_kind) :: & From 9f0469795bf29f5c74091aef457cbdc66b683cd0 Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 20 Nov 2021 23:08:59 +0100 Subject: [PATCH 629/909] exchange varaibles a_ice_old, m_ice_old, m_snow_old, u_ice_old, v_ice_old with corresponding ice derived type varaibles --- src/ice_EVP.F90 | 4 +++- src/ice_modules.F90 | 3 ++- src/ice_oce_coupling.F90 | 12 +++++++----- src/ice_setup_step.F90 | 14 +++++++------- src/ice_thermo_oce.F90 | 4 ++++ src/write_step_info.F90 | 25 +++++++++++++++---------- 6 files changed, 38 insertions(+), 24 deletions(-) diff --git a/src/ice_EVP.F90 b/src/ice_EVP.F90 index 926be8775..80a5be58e 100755 --- a/src/ice_EVP.F90 +++ b/src/ice_EVP.F90 @@ -483,6 +483,7 @@ subroutine EVPdynamics(ice, partit, mesh) ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: u_ice, v_ice real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow + real(kind=WP), dimension(:), pointer :: u_ice_old, v_ice_old #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -492,7 +493,8 @@ subroutine EVPdynamics(ice, partit, mesh) a_ice => ice%data(1)%values(:) m_ice => ice%data(2)%values(:) m_snow => ice%data(3)%values(:) - + u_ice_old => ice%uvice_old(1,:) + v_ice_old => ice%uvice_old(2,:) !_______________________________________________________________________________ ! If Icepack is used, always update the tracers #if defined (__icepack) diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index f8c3ecd5e..3b6e61f86 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -61,7 +61,8 @@ MODULE i_ARRAYS real(kind=WP),allocatable,dimension(:,:) :: ice_grad_vel ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_ice, V_ice ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: m_ice, a_ice, m_snow - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_ice_old, V_ice_old, m_ice_old, a_ice_old, m_snow_old,thdgr_old !PS +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_ice_old, V_ice_old, m_ice_old, a_ice_old, m_snow_old,thdgr_old !PS + REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: thdgr_old REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_rhs_ice, V_rhs_ice REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_m, rhs_a, rhs_ms, ths_temp REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_w, V_w diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index 0b11827f0..61a739840 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -256,15 +256,17 @@ subroutine oce_fluxes(ice, dynamics, tracers, partit, mesh) !___________________________________________________________________________ real(kind=WP), dimension(:,:), pointer :: temp, salt real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow + real(kind=WP), dimension(:), pointer :: a_ice_old #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - temp => tracers%data(1)%values(:,:) - salt => tracers%data(2)%values(:,:) - a_ice => ice%data(1)%values(:) - m_ice => ice%data(2)%values(:) - m_snow => ice%data(3)%values(:) + temp => tracers%data(1)%values(:,:) + salt => tracers%data(2)%values(:,:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + a_ice_old=> ice%data(1)%values_old(:) !___________________________________________________________________________ allocate(flux(myDim_nod2D+eDim_nod2D)) diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index 4a6ebee15..4baf8f4b0 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -140,8 +140,8 @@ subroutine ice_array_setup(partit, mesh) ! allocate(m_ice(n_size), a_ice(n_size), m_snow(n_size)) allocate(rhs_m(n_size), rhs_a(n_size), rhs_ms(n_size)) allocate(t_skin(n_size)) - allocate(U_ice_old(n_size), V_ice_old(n_size)) !PS - allocate(m_ice_old(n_size), a_ice_old(n_size), m_snow_old(n_size), thdgr_old(n_size)) !PS +! allocate(U_ice_old(n_size), V_ice_old(n_size)) !PS +! allocate(m_ice_old(n_size), a_ice_old(n_size), m_snow_old(n_size), thdgr_old(n_size)) !PS if (whichEVP > 0) then allocate(u_ice_aux(n_size), v_ice_aux(n_size)) allocate(alpha_evp_array(myDim_elem2D)) @@ -155,12 +155,12 @@ subroutine ice_array_setup(partit, mesh) allocate(rhs_mdiv(n_size), rhs_adiv(n_size), rhs_msdiv(n_size)) - m_ice_old=0.0_WP !PS - a_ice_old=0.0_WP !PS - m_snow_old=0.0_WP !PS +! m_ice_old=0.0_WP !PS +! a_ice_old=0.0_WP !PS +! m_snow_old=0.0_WP !PS thdgr_old=0.0_WP !PS - U_ice_old=0.0_WP !PS - V_ice_old=0.0_WP !PS +! U_ice_old=0.0_WP !PS +! V_ice_old=0.0_WP !PS rhs_m=0.0_WP rhs_ms=0.0_WP diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index 95d77c68e..7b658c316 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -134,6 +134,7 @@ subroutine thermodynamics(ice, partit, mesh) real(kind=WP), dimension(:,:),pointer :: geo_coord_nod2D real(kind=WP), dimension(:), pointer :: u_ice, v_ice real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow + real(kind=WP), dimension(:), pointer :: a_ice_old, m_ice_old, m_snow_old myDim_nod2d=>partit%myDim_nod2D eDim_nod2D =>partit%eDim_nod2D ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D @@ -143,6 +144,9 @@ subroutine thermodynamics(ice, partit, mesh) a_ice => ice%data(1)%values(:) m_ice => ice%data(2)%values(:) m_snow => ice%data(3)%values(:) + a_ice_old => ice%data(1)%values_old(:) + m_ice_old => ice%data(2)%values_old(:) + m_snow_old => ice%data(3)%values_old(:) !_____________________________________________________________________________ rsss=ref_sss diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index 1773fc44a..94a141361 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -1,3 +1,4 @@ + module write_step_info_interface interface subroutine write_step_info(istep, outfreq, ice, dynamics, tracers, partit, mesh) @@ -282,22 +283,26 @@ subroutine check_blowup(istep, ice, dynamics, tracers, partit, mesh) real(kind=WP), dimension(:) , pointer :: eta_n, d_eta real(kind=WP), dimension(:) , pointer :: u_ice, v_ice real(kind=WP), dimension(:) , pointer :: a_ice, m_ice, m_snow + real(kind=WP), dimension(:) , pointer :: a_ice_old, m_ice_old, m_snow_old #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) - Wvel => dynamics%w(:,:) - CFL_z => dynamics%cfl_z(:,:) + UV => dynamics%uv(:,:,:) + Wvel => dynamics%w(:,:) + CFL_z => dynamics%cfl_z(:,:) ssh_rhs => dynamics%ssh_rhs(:) ssh_rhs_old => dynamics%ssh_rhs_old(:) - eta_n => dynamics%eta_n(:) - d_eta => dynamics%d_eta(:) - u_ice => ice%uvice(1,:) - v_ice => ice%uvice(2,:) - a_ice => ice%data(1)%values(:) - m_ice => ice%data(2)%values(:) - m_snow => ice%data(3)%values(:) + eta_n => dynamics%eta_n(:) + d_eta => dynamics%d_eta(:) + u_ice => ice%uvice(1,:) + v_ice => ice%uvice(2,:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + a_ice_old => ice%data(1)%values_old(:) + m_ice_old => ice%data(2)%values_old(:) + m_snow_old => ice%data(3)%values_old(:) !___________________________________________________________________________ !$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz) From eba87dfc47428dd8de9ab7458eb987d8751d125e Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 20 Nov 2021 23:36:22 +0100 Subject: [PATCH 630/909] fix issue --- src/ice_setup_step.F90 | 6 ++++++ src/ice_thermo_oce.F90 | 15 +++++++++------ src/oce_ale_vel_rhs.F90 | 7 +++---- 3 files changed, 18 insertions(+), 10 deletions(-) diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index 4baf8f4b0..fe8727bb2 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -83,6 +83,7 @@ subroutine ice_setup(ice, tracers, partit, mesh) !___________________________________________________________________________ ! initialise ice derived type + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call ice_init'//achar(27)//'[0m' call ice_init(ice, partit, mesh) ! ================ DO not change @@ -92,12 +93,16 @@ subroutine ice_setup(ice, tracers, partit, mesh) Clim_evp=Clim_evp*(evp_rheol_steps/ice_dt)**2/Tevp_inv ! This is combination ! it always enters ! ================ + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call ice_array_setup'//achar(27)//'[0m' call ice_array_setup(partit, mesh) + + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call ice_fct_init'//achar(27)//'[0m' call ice_fct_init(ice, partit, mesh) ! ================ ! Initialization routine, user input is required ! ================ !call ice_init_fields_test + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call ice_initial_state'//achar(27)//'[0m' call ice_initial_state(ice, tracers, partit, mesh) ! Use it unless running test example if(partit%mype==0) write(*,*) 'Ice is initialized' end subroutine ice_setup @@ -142,6 +147,7 @@ subroutine ice_array_setup(partit, mesh) allocate(t_skin(n_size)) ! allocate(U_ice_old(n_size), V_ice_old(n_size)) !PS ! allocate(m_ice_old(n_size), a_ice_old(n_size), m_snow_old(n_size), thdgr_old(n_size)) !PS + allocate(thdgr_old(n_size)) !PS if (whichEVP > 0) then allocate(u_ice_aux(n_size), v_ice_aux(n_size)) allocate(alpha_evp_array(myDim_elem2D)) diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index 7b658c316..5518e55dd 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -133,12 +133,13 @@ subroutine thermodynamics(ice, partit, mesh) integer, dimension(:), pointer :: ulevels_nod2D real(kind=WP), dimension(:,:),pointer :: geo_coord_nod2D real(kind=WP), dimension(:), pointer :: u_ice, v_ice - real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow + real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:), pointer :: a_ice_old, m_ice_old, m_snow_old myDim_nod2d=>partit%myDim_nod2D eDim_nod2D =>partit%eDim_nod2D ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D + u_ice => ice%uvice(1,:) v_ice => ice%uvice(2,:) a_ice => ice%data(1)%values(:) @@ -149,7 +150,7 @@ subroutine thermodynamics(ice, partit, mesh) m_snow_old => ice%data(3)%values_old(:) !_____________________________________________________________________________ rsss=ref_sss - + ! u_ice and v_ice are at nodes ! u_w, v_w are at nodes (interpolated from elements) ! u_wind and v_wind are always at nodes @@ -169,7 +170,7 @@ subroutine thermodynamics(ice, partit, mesh) ! ================ ! end: friction velocity ! ================ - + do i=1, myDim_nod2d+eDim_nod2D !__________________________________________________________________________ ! if there is a cavity no sea ice thermodynamics is apllied @@ -221,19 +222,20 @@ subroutine thermodynamics(ice, partit, mesh) else lid_clo=0.5_WP endif - + call therm_ice(h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss, & ug,ustar,T_oc,S_oc,h_ml,t,ice_dt,ch,ce,ch_i,ce_i,evap_in,fw,ehf,evap, & rsf, ithdgr, ithdgrsn, iflice, hflatow, hfsenow, hflwrdout,lid_clo,subli) - + m_ice_old(i) = m_ice(i) !PS m_snow_old(i) = m_snow(i) !PS a_ice_old(i) = a_ice(i) !PS thdgr_old(i) = thdgr(i) !PS - + m_ice(i) = h m_snow(i) = hsn a_ice(i) = A + t_skin(i) = t fresh_wa_flux(i) = fw !positive down net_heat_flux(i) = ehf !positive down @@ -258,6 +260,7 @@ subroutine thermodynamics(ice, partit, mesh) end if end do + deallocate(ustar_aux) end subroutine thermodynamics ! diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index 0d3352a1a..a6df4c9a4 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -69,14 +69,13 @@ subroutine compute_vel_rhs(ice, dynamics, partit, mesh) #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - write(*,*) ">-))))°> something is fishy 0" UV => dynamics%uv(:,:,:) UV_rhs => dynamics%uv_rhs(:,:,:) UV_rhsAB => dynamics%uv_rhsAB(:,:,:) eta_n => dynamics%eta_n(:) m_ice => ice%data(2)%values(:) m_snow => ice%data(3)%values(:) - write(*,*) ">-))))°> something is fishy 1" + !___________________________________________________________________________ use_pice=0 if (use_floatice .and. .not. trim(which_ale)=='linfs') use_pice=1 @@ -150,7 +149,7 @@ subroutine compute_vel_rhs(ice, dynamics, partit, mesh) end do end do !$OMP END PARALLEL DO - write(*,*) ">-))))°> something is fishy 2" + !___________________________________________________________________________ ! advection if (dynamics%momadv_opt==1) then @@ -176,7 +175,7 @@ subroutine compute_vel_rhs(ice, dynamics, partit, mesh) end do end do !$OMP END PARALLEL DO - write(*,*) ">-))))°> something is fishy 3" + ! ======================= ! U_rhs contains all contributions to velocity from old time steps ! ======================= From ce4fd5315f4e3a826cb73429cac61d06efccd5ac Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 20 Nov 2021 23:59:46 +0100 Subject: [PATCH 631/909] exchange varaibles rhs_a, rhs_m, rhs_ms, u_rhs_ice, v_rhs_ice with corresponding ice derived type varaibles --- src/ice_EVP.F90 | 12 ++++++++++++ src/ice_fct.F90 | 22 +++++++++++++++++++--- src/ice_maEVP.F90 | 22 +++++++++++++++++++--- src/ice_modules.F90 | 5 +++-- src/ice_setup_step.F90 | 14 +++++++------- 5 files changed, 60 insertions(+), 15 deletions(-) diff --git a/src/ice_EVP.F90 b/src/ice_EVP.F90 index 80a5be58e..b24cf5050 100755 --- a/src/ice_EVP.F90 +++ b/src/ice_EVP.F90 @@ -381,6 +381,7 @@ subroutine stress2rhs(inv_areamass, ice_strength, ice, partit, mesh) !___________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: sigma11, sigma12, sigma22 + real(kind=WP), dimension(:), pointer :: u_rhs_ice, v_rhs_ice, rhs_a, rhs_m #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -388,6 +389,11 @@ subroutine stress2rhs(inv_areamass, ice_strength, ice, partit, mesh) sigma11 => ice%work%sigma11(:) sigma12 => ice%work%sigma12(:) sigma22 => ice%work%sigma22(:) + u_rhs_ice => ice%uvice_rhs(1,:) + v_rhs_ice => ice%uvice_rhs(2,:) + rhs_a => ice%data(1)%values_rhs(:) + rhs_m => ice%data(2)%values_rhs(:) + !___________________________________________________________________________ val3=1/3.0_WP @@ -484,6 +490,7 @@ subroutine EVPdynamics(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: u_ice, v_ice real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:), pointer :: u_ice_old, v_ice_old + real(kind=WP), dimension(:), pointer :: u_rhs_ice, v_rhs_ice, rhs_a, rhs_m #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -495,6 +502,11 @@ subroutine EVPdynamics(ice, partit, mesh) m_snow => ice%data(3)%values(:) u_ice_old => ice%uvice_old(1,:) v_ice_old => ice%uvice_old(2,:) + u_rhs_ice => ice%uvice_rhs(1,:) + v_rhs_ice => ice%uvice_rhs(2,:) + rhs_a => ice%data(1)%values_rhs(:) + rhs_m => ice%data(2)%values_rhs(:) + !_______________________________________________________________________________ ! If Icepack is used, always update the tracers #if defined (__icepack) diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index 5b5715e18..8af4cd29c 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -106,15 +106,19 @@ subroutine ice_TG_rhs(ice, partit, mesh) ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: u_ice, v_ice real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow + real(kind=WP), dimension(:), pointer :: rhs_a, rhs_m, rhs_ms #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uvice(1,:) - v_ice => ice%uvice(2,:) + u_ice => ice%uvice(1,:) + v_ice => ice%uvice(2,:) a_ice => ice%data(1)%values(:) m_ice => ice%data(2)%values(:) m_snow => ice%data(3)%values(:) + rhs_a => ice%data(1)%values_rhs(:) + rhs_m => ice%data(2)%values_rhs(:) + rhs_ms => ice%data(3)%values_rhs(:) !___________________________________________________________________________ ! Taylor-Galerkin (Lax-Wendroff) rhs @@ -279,6 +283,7 @@ subroutine ice_solve_low_order(ice, partit, mesh) !___________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow + real(kind=WP), dimension(:), pointer :: rhs_a, rhs_m, rhs_ms #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -286,6 +291,9 @@ subroutine ice_solve_low_order(ice, partit, mesh) a_ice => ice%data(1)%values(:) m_ice => ice%data(2)%values(:) m_snow => ice%data(3)%values(:) + rhs_a => ice%data(1)%values_rhs(:) + rhs_m => ice%data(2)%values_rhs(:) + rhs_ms => ice%data(3)%values_rhs(:) !___________________________________________________________________________ gamma=ice_gamma_fct ! Added diffusivity parameter @@ -347,11 +355,15 @@ subroutine ice_solve_high_order(ice, partit, mesh) integer :: num_iter_solve=3 !_____________________________________________________________________________ ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: rhs_a, rhs_m, rhs_ms #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - + rhs_a => ice%data(1)%values_rhs(:) + rhs_m => ice%data(2)%values_rhs(:) + rhs_ms => ice%data(3)%values_rhs(:) + !_____________________________________________________________________________ ! Does Taylor-Galerkin solution ! @@ -851,6 +863,7 @@ subroutine ice_TG_rhs_div(ice, partit, mesh) ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: u_ice, v_ice real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow + real(kind=WP), dimension(:), pointer :: rhs_a, rhs_m, rhs_ms #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -860,6 +873,9 @@ subroutine ice_TG_rhs_div(ice, partit, mesh) a_ice => ice%data(1)%values(:) m_ice => ice%data(2)%values(:) m_snow => ice%data(3)%values(:) + rhs_a => ice%data(1)%values_rhs(:) + rhs_m => ice%data(2)%values_rhs(:) + rhs_ms => ice%data(3)%values_rhs(:) !___________________________________________________________________________ ! Computes the rhs in a Taylor-Galerkin way (with upwind type of diff --git a/src/ice_maEVP.F90 b/src/ice_maEVP.F90 index ac0fe0b84..9c72f825b 100644 --- a/src/ice_maEVP.F90 +++ b/src/ice_maEVP.F90 @@ -213,12 +213,15 @@ subroutine ssh2rhs(ice, partit, mesh) !___________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: m_ice, m_snow + real(kind=WP), dimension(:), pointer :: rhs_a, rhs_m #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" m_ice => ice%data(2)%values(:) m_snow => ice%data(3)%values(:) + rhs_a => ice%data(1)%values_rhs(:) + rhs_m => ice%data(2)%values_rhs(:) !___________________________________________________________________________ val3=1.0_WP/3.0_WP @@ -303,6 +306,7 @@ subroutine stress2rhs_m(ice, partit, mesh) ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:), pointer :: sigma11, sigma12, sigma22 + real(kind=WP), dimension(:), pointer :: u_rhs_ice, v_rhs_ice, rhs_a, rhs_m #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -313,6 +317,10 @@ subroutine stress2rhs_m(ice, partit, mesh) sigma11 => ice%work%sigma11(:) sigma12 => ice%work%sigma12(:) sigma22 => ice%work%sigma22(:) + u_rhs_ice => ice%uvice_rhs(1,:) + v_rhs_ice => ice%uvice_rhs(2,:) + rhs_a => ice%data(1)%values_rhs(:) + rhs_m => ice%data(2)%values_rhs(:) !___________________________________________________________________________ val3=1.0_WP/3.0_WP @@ -405,6 +413,7 @@ subroutine EVPdynamics_m(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:), pointer :: eps11, eps12, eps22 real(kind=WP), dimension(:), pointer :: sigma11, sigma12, sigma22 + real(kind=WP), dimension(:), pointer :: u_rhs_ice, v_rhs_ice, rhs_a, rhs_m #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -420,6 +429,10 @@ subroutine EVPdynamics_m(ice, partit, mesh) sigma11 => ice%work%sigma11(:) sigma12 => ice%work%sigma12(:) sigma22 => ice%work%sigma22(:) + u_rhs_ice => ice%uvice_rhs(1,:) + v_rhs_ice => ice%uvice_rhs(2,:) + rhs_a => ice%data(1)%values_rhs(:) + rhs_m => ice%data(2)%values_rhs(:) !___________________________________________________________________________ val3=1.0_WP/3.0_WP @@ -946,16 +959,19 @@ subroutine EVPdynamics_a(ice, partit, mesh) ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: u_ice, v_ice real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow + real(kind=WP), dimension(:), pointer :: u_rhs_ice, v_rhs_ice #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uvice(1,:) - v_ice => ice%uvice(2,:) + u_ice => ice%uvice(1,:) + v_ice => ice%uvice(2,:) a_ice => ice%data(1)%values(:) m_ice => ice%data(2)%values(:) m_snow => ice%data(3)%values(:) - + u_rhs_ice => ice%uvice_rhs(1,:) + v_rhs_ice => ice%uvice_rhs(2,:) + !___________________________________________________________________________ steps=evp_rheol_steps rdt=ice_dt diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index 3b6e61f86..16ae7f079 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -63,8 +63,9 @@ MODULE i_ARRAYS ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: m_ice, a_ice, m_snow ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_ice_old, V_ice_old, m_ice_old, a_ice_old, m_snow_old,thdgr_old !PS REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: thdgr_old - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_rhs_ice, V_rhs_ice - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_m, rhs_a, rhs_ms, ths_temp +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_rhs_ice, V_rhs_ice +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_m, rhs_a, rhs_ms, ths_temp + REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: ths_temp REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_w, V_w REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: u_ice_aux, v_ice_aux ! of the size of u_ice, v_ice REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_mdiv, rhs_adiv, rhs_msdiv diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index fe8727bb2..37918b4a6 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -139,11 +139,11 @@ subroutine ice_array_setup(partit, mesh) ! Allocate memory for variables of ice model ! allocate(u_ice(n_size), v_ice(n_size)) - allocate(U_rhs_ice(n_size), V_rhs_ice(n_size)) +! allocate(U_rhs_ice(n_size), V_rhs_ice(n_size)) ! allocate(sigma11(e_size), sigma12(e_size), sigma22(e_size)) ! allocate(eps11(e_size), eps12(e_size), eps22(e_size)) ! allocate(m_ice(n_size), a_ice(n_size), m_snow(n_size)) - allocate(rhs_m(n_size), rhs_a(n_size), rhs_ms(n_size)) +! allocate(rhs_m(n_size), rhs_a(n_size), rhs_ms(n_size)) allocate(t_skin(n_size)) ! allocate(U_ice_old(n_size), V_ice_old(n_size)) !PS ! allocate(m_ice_old(n_size), a_ice_old(n_size), m_snow_old(n_size), thdgr_old(n_size)) !PS @@ -168,14 +168,14 @@ subroutine ice_array_setup(partit, mesh) ! U_ice_old=0.0_WP !PS ! V_ice_old=0.0_WP !PS - rhs_m=0.0_WP - rhs_ms=0.0_WP - rhs_a=0.0_WP +! rhs_m=0.0_WP +! rhs_ms=0.0_WP +! rhs_a=0.0_WP ! m_ice=0.0_WP ! a_ice=0.0_WP ! m_snow=0.0_WP - U_rhs_ice=0.0_WP - V_rhs_ice=0.0_WP +! U_rhs_ice=0.0_WP +! V_rhs_ice=0.0_WP ! U_ice=0.0_WP ! V_ice=0.0_WP ! sigma11=0.0_WP From fbeefa4186e900ebf61ef097b581c130690f3605 Mon Sep 17 00:00:00 2001 From: Patrick Date: Sun, 21 Nov 2021 22:43:25 +0100 Subject: [PATCH 632/909] exchange varaibles rhs_adiv, rhs_mdiv, rhs_msdiv with corresponding ice derived type varaibles --- src/ice_fct.F90 | 8 ++++++++ src/ice_modules.F90 | 2 +- src/ice_setup_step.F90 | 8 ++++---- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index 8af4cd29c..ab1a8fcfb 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -864,6 +864,7 @@ subroutine ice_TG_rhs_div(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: u_ice, v_ice real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:), pointer :: rhs_a, rhs_m, rhs_ms + real(kind=WP), dimension(:), pointer :: rhs_adiv, rhs_mdiv, rhs_msdiv #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -876,6 +877,9 @@ subroutine ice_TG_rhs_div(ice, partit, mesh) rhs_a => ice%data(1)%values_rhs(:) rhs_m => ice%data(2)%values_rhs(:) rhs_ms => ice%data(3)%values_rhs(:) + rhs_adiv => ice%data(1)%values_div_rhs(:) + rhs_mdiv => ice%data(2)%values_div_rhs(:) + rhs_msdiv => ice%data(3)%values_div_rhs(:) !___________________________________________________________________________ ! Computes the rhs in a Taylor-Galerkin way (with upwind type of @@ -976,6 +980,7 @@ subroutine ice_update_for_div(ice, partit, mesh) !___________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow + real(kind=WP), dimension(:), pointer :: rhs_adiv, rhs_mdiv, rhs_msdiv #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -983,6 +988,9 @@ subroutine ice_update_for_div(ice, partit, mesh) a_ice => ice%data(1)%values(:) m_ice => ice%data(2)%values(:) m_snow => ice%data(3)%values(:) + rhs_adiv => ice%data(1)%values_div_rhs(:) + rhs_mdiv => ice%data(2)%values_div_rhs(:) + rhs_msdiv => ice%data(3)%values_div_rhs(:) !___________________________________________________________________________ ! Does Taylor-Galerkin solution diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index 16ae7f079..d840e8bf8 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -68,7 +68,7 @@ MODULE i_ARRAYS REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: ths_temp REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_w, V_w REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: u_ice_aux, v_ice_aux ! of the size of u_ice, v_ice - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_mdiv, rhs_adiv, rhs_msdiv +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_mdiv, rhs_adiv, rhs_msdiv REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: elevation ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: sigma11, sigma12, sigma22 ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: eps11, eps12, eps22 diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index 37918b4a6..9233bf916 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -159,7 +159,7 @@ subroutine ice_array_setup(partit, mesh) v_ice_aux=0.0_WP end if - allocate(rhs_mdiv(n_size), rhs_adiv(n_size), rhs_msdiv(n_size)) +! allocate(rhs_mdiv(n_size), rhs_adiv(n_size), rhs_msdiv(n_size)) ! m_ice_old=0.0_WP !PS ! a_ice_old=0.0_WP !PS @@ -185,9 +185,9 @@ subroutine ice_array_setup(partit, mesh) ! eps12=0.0_WP ! eps22=0.0_WP t_skin=0.0_WP - rhs_mdiv=0.0_WP - rhs_adiv=0.0_WP - rhs_msdiv=0.0_WP +! rhs_mdiv=0.0_WP +! rhs_adiv=0.0_WP +! rhs_msdiv=0.0_WP ! Allocate memory for arrays used in coupling From ebc94f72bca5a193ab09f4233ce3124c0bfb9225 Mon Sep 17 00:00:00 2001 From: Patrick Date: Sun, 21 Nov 2021 23:15:15 +0100 Subject: [PATCH 633/909] exchange varaibles thdgr, thdgrsn, thdrg_old with corresponding ice derived type varaibles --- src/gen_forcing_init.F90 | 7 ++++--- src/gen_modules_forcing.F90 | 3 ++- src/ice_oce_coupling.F90 | 8 +++++--- src/ice_setup_step.F90 | 2 +- src/ice_thermo_oce.F90 | 3 +++ src/io_meandata.F90 | 4 ++-- src/oce_ale.F90 | 2 +- src/oce_ale_tracer.F90 | 10 +++++++--- src/oce_spp.F90 | 12 +++++++----- 9 files changed, 32 insertions(+), 19 deletions(-) diff --git a/src/gen_forcing_init.F90 b/src/gen_forcing_init.F90 index 4c7d14263..393a32303 100755 --- a/src/gen_forcing_init.F90 +++ b/src/gen_forcing_init.F90 @@ -145,10 +145,11 @@ subroutine forcing_array_setup(partit, mesh) !for ice diagnose if(use_ice) then - allocate(thdgr(n2), thdgrsn(n2), flice(n2)) +! allocate(thdgr(n2), thdgrsn(n2)) + allocate(flice(n2)) allocate(olat_heat(n2), osen_heat(n2), olwout(n2)) - thdgr=0.0_WP - thdgrsn=0.0_WP +! thdgr=0.0_WP +! thdgrsn=0.0_WP flice=0.0_WP olat_heat=0.0_WP osen_heat=0.0_WP diff --git a/src/gen_modules_forcing.F90 b/src/gen_modules_forcing.F90 index 25c99ee99..d71060223 100755 --- a/src/gen_modules_forcing.F90 +++ b/src/gen_modules_forcing.F90 @@ -77,7 +77,8 @@ module g_forcing_arrays real(kind=WP), allocatable, dimension(:) :: chl real(kind=WP), allocatable, dimension(:,:) :: sw_3d - real(kind=WP), allocatable, dimension(:) :: thdgr, thdgrsn, flice +! real(kind=WP), allocatable, dimension(:) :: thdgr, thdgrsn + real(kind=WP), allocatable, dimension(:) :: flice real(kind=WP), allocatable, dimension(:) :: olat_heat, osen_heat, olwout real(kind=WP), allocatable, dimension(:) :: real_salt_flux !PS diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index 61a739840..5b0f913ab 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -255,8 +255,9 @@ subroutine oce_fluxes(ice, dynamics, tracers, partit, mesh) real(kind=WP), allocatable :: flux(:) !___________________________________________________________________________ real(kind=WP), dimension(:,:), pointer :: temp, salt - real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow - real(kind=WP), dimension(:), pointer :: a_ice_old + real(kind=WP), dimension(:) , pointer :: a_ice, m_ice, m_snow + real(kind=WP), dimension(:) , pointer :: a_ice_old + real(kind=WP), dimension(:) , pointer :: thdgr, thdgrsn #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -267,7 +268,8 @@ subroutine oce_fluxes(ice, dynamics, tracers, partit, mesh) m_ice => ice%data(2)%values(:) m_snow => ice%data(3)%values(:) a_ice_old=> ice%data(1)%values_old(:) - + thdgr => ice%thermo%thdgr + thdgrsn => ice%thermo%thdgrsn !___________________________________________________________________________ allocate(flux(myDim_nod2D+eDim_nod2D)) flux = 0.0_WP diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index 9233bf916..3da6ed3f0 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -164,7 +164,7 @@ subroutine ice_array_setup(partit, mesh) ! m_ice_old=0.0_WP !PS ! a_ice_old=0.0_WP !PS ! m_snow_old=0.0_WP !PS - thdgr_old=0.0_WP !PS +! thdgr_old=0.0_WP !PS ! U_ice_old=0.0_WP !PS ! V_ice_old=0.0_WP !PS diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index 5518e55dd..5620a6bd8 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -135,6 +135,7 @@ subroutine thermodynamics(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: u_ice, v_ice real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:), pointer :: a_ice_old, m_ice_old, m_snow_old + real(kind=WP), dimension(:) , pointer :: thdgr, thdgrsn myDim_nod2d=>partit%myDim_nod2D eDim_nod2D =>partit%eDim_nod2D ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D @@ -148,6 +149,8 @@ subroutine thermodynamics(ice, partit, mesh) a_ice_old => ice%data(1)%values_old(:) m_ice_old => ice%data(2)%values_old(:) m_snow_old => ice%data(3)%values_old(:) + thdgr => ice%thermo%thdgr + thdgrsn => ice%thermo%thdgrsn !_____________________________________________________________________________ rsss=ref_sss diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 9b75d7870..f0e9e755d 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -186,11 +186,11 @@ subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) end if CASE ('thdgr ') if (use_ice) then - call def_stream(nod2D, myDim_nod2D, 'thdgr', 'thermodynamic growth rate ice', 'm/s', thdgr(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'thdgr', 'thermodynamic growth rate ice', 'm/s', ice%thermo%thdgr(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('thdgrsn ') if (use_ice) then - call def_stream(nod2D, myDim_nod2D, 'thdgrsn', 'thermodynamic growth rate snow', 'm/s', thdgrsn(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'thdgrsn', 'thermodynamic growth rate snow', 'm/s', ice%thermo%thdgrsn(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('flice ') if (use_ice) then diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 13accd4cf..1e1e082a7 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -3068,7 +3068,7 @@ subroutine oce_timestep_ale(n, ice, dynamics, tracers, partit, mesh) !___________________________________________________________________________ ! solve tracer equation if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call solve_tracers_ale'//achar(27)//'[0m' - call solve_tracers_ale(dynamics, tracers, partit, mesh) + call solve_tracers_ale(ice, dynamics, tracers, partit, mesh) t8=MPI_Wtime() !___________________________________________________________________________ diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 83c57d42e..8b77301d5 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -108,12 +108,14 @@ subroutine diff_part_bh(tr_num, dynamics, tracer, partit, mesh) module solve_tracers_ale_interface interface - subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) + subroutine solve_tracers_ale(ice, dynamics, tracers, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP use mod_tracer use MOD_DYN + USE MOD_ICE + type(t_ice) , intent(inout), target :: ice type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracers type(t_partit), intent(inout), target :: partit @@ -125,13 +127,14 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) ! !=============================================================================== ! Driving routine Here with ALE changes!!! -subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) +subroutine solve_tracers_ale(ice, dynamics, tracers, partit, mesh) use g_config use o_PARAM, only: SPP, Fer_GM use mod_mesh USE MOD_PARTIT USE MOD_PARSUP USE MOD_DYN + USE MOD_ICE use mod_tracer use g_comm_auto use o_tracers @@ -139,6 +142,7 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) use diff_tracers_ale_interface use oce_adv_tra_driver_interfaces implicit none + type(t_ice) , intent(in) , target :: ice type(t_dyn) , intent(inout), target :: dynamics type(t_tracer), intent(inout), target :: tracers type(t_partit), intent(inout), target :: partit @@ -165,7 +169,7 @@ subroutine solve_tracers_ale(dynamics, tracers, partit, mesh) del_ttf => tracers%work%del_ttf !___________________________________________________________________________ - if (SPP) call cal_rejected_salt(partit, mesh) + if (SPP) call cal_rejected_salt(ice, partit, mesh) if (SPP) call app_rejected_salt(tracers%data(2)%values, partit, mesh) !___________________________________________________________________________ diff --git a/src/oce_spp.F90 b/src/oce_spp.F90 index f59d09b9c..2183170f8 100644 --- a/src/oce_spp.F90 +++ b/src/oce_spp.F90 @@ -8,14 +8,14 @@ ! Ref: Duffy1997, Duffy1999, Nguyen2009 ! Originaly coded by Qiang Wang in FESOM 1.4 !-------------------------------------------------------- -subroutine cal_rejected_salt(partit, mesh) +subroutine cal_rejected_salt(ice, partit, mesh) use o_arrays +USE MOD_ICE use mod_mesh USE MOD_PARTIT USE MOD_PARSUP use g_comm_auto use o_tracers -use g_forcing_arrays, only: thdgr use i_ARRAYS, only: S_oc_array use i_therm_param, only: rhoice, rhowat, Sice use g_config, only: dt @@ -23,13 +23,15 @@ subroutine cal_rejected_salt(partit, mesh) integer :: row real(kind=WP) :: aux -type(t_mesh), intent(in), target :: mesh -type(t_partit), intent(in), target :: partit - +type(t_ice) , intent(in), target :: ice +type(t_mesh) , intent(in), target :: mesh +type(t_partit), intent(in), target :: partit +real(kind=WP), dimension(:) , pointer :: thdgr #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" +thdgr => ice%thermo%thdgr aux=rhoice/rhowat*dt do row=1, myDim_nod2d +eDim_nod2D! myDim is sufficient From 327c1dc09dde7a87aaac7a529c841a6fa521d2d7 Mon Sep 17 00:00:00 2001 From: Patrick Date: Sun, 21 Nov 2021 23:18:57 +0100 Subject: [PATCH 634/909] exchange varaibles thdgr, thdgrsn, thdrg_old with corresponding ice derived type varaibles in src/ice_thermo_cpl.F90 --- src/ice_thermo_cpl.F90 | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index de34a78fa..7a3be4c49 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -63,6 +63,7 @@ subroutine thermodynamics(ice, partit, mesh) real(kind=WP), dimension(:,:), pointer :: geo_coord_nod2D real(kind=WP), dimension(:) , pointer :: u_ice, v_ice real(kind=WP), dimension(:) , pointer :: a_ice, m_ice, m_snow + real(kind=WP), dimension(:) , pointer :: thdgr, thdgrsn myDim_nod2d=>partit%myDim_nod2D eDim_nod2D =>partit%eDim_nod2D ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D @@ -72,7 +73,8 @@ subroutine thermodynamics(ice, partit, mesh) a_ice => ice%data(1)%values(:) m_ice => ice%data(2)%values(:) m_snow => ice%data(3)%values(:) - + thdgr => ice%thermo%thdgr + thdgrsn => ice%thermo%thdgrsn !_____________________________________________________________________________ rsss = ref_sss From c9ec918e078c8ea91b72139edb5524c3becc1e35 Mon Sep 17 00:00:00 2001 From: Patrick Date: Sun, 21 Nov 2021 23:37:31 +0100 Subject: [PATCH 635/909] exchange varaibles t_skin, ustar with corresponding ice derived type varaibles --- src/MOD_ICE.F90 | 10 ---------- src/ice_modules.F90 | 4 ++-- src/ice_setup_step.F90 | 6 +++--- src/ice_thermo_oce.F90 | 14 ++++++++------ 4 files changed, 13 insertions(+), 21 deletions(-) diff --git a/src/MOD_ICE.F90 b/src/MOD_ICE.F90 index 1b401fe8b..50beb4053 100644 --- a/src/MOD_ICE.F90 +++ b/src/MOD_ICE.F90 @@ -36,7 +36,6 @@ MODULE MOD_ICE real(kind=WP), allocatable, dimension(:) :: sigma11, sigma12, sigma22 real(kind=WP), allocatable, dimension(:) :: eps11, eps12, eps22 real(kind=WP), allocatable, dimension(:) :: ice_strength, inv_areamass, inv_mass - real(kind=WP), allocatable, dimension(:) :: t_skin, thdgr, thdgrsn, thdgr_old !___________________________________________________________________________ contains procedure WRITE_T_ICE_WORK @@ -616,15 +615,6 @@ subroutine ice_init(ice, partit, mesh) ice%work%inv_areamass= 0.0_WP ice%work%inv_mass = 0.0_WP - allocate(ice%work%t_skin( node_size)) - allocate(ice%work%thdgr( node_size)) - allocate(ice%work%thdgrsn( node_size)) - allocate(ice%work%thdgr_old( node_size)) - ice%work%t_skin = 0.0_WP - ice%work%thdgr = 0.0_WP - ice%work%thdgrsn = 0.0_WP - ice%work%thdgr_old = 0.0_WP - !___________________________________________________________________________ ! initialse thermo array of ice derived type allocate(ice%thermo%ustar( node_size)) diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index d840e8bf8..c7633db07 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -62,7 +62,7 @@ MODULE i_ARRAYS ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_ice, V_ice ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: m_ice, a_ice, m_snow ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_ice_old, V_ice_old, m_ice_old, a_ice_old, m_snow_old,thdgr_old !PS - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: thdgr_old +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: thdgr_old ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_rhs_ice, V_rhs_ice ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_m, rhs_a, rhs_ms, ths_temp REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: ths_temp @@ -91,7 +91,7 @@ MODULE i_ARRAYS REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_iceoce_y REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_atmice_x REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_atmice_y - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: t_skin +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: t_skin ! FCT implementation REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: m_icel, a_icel, m_snowl REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: dm_ice, da_ice, dm_snow diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index 3da6ed3f0..2e69b2309 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -144,10 +144,10 @@ subroutine ice_array_setup(partit, mesh) ! allocate(eps11(e_size), eps12(e_size), eps22(e_size)) ! allocate(m_ice(n_size), a_ice(n_size), m_snow(n_size)) ! allocate(rhs_m(n_size), rhs_a(n_size), rhs_ms(n_size)) - allocate(t_skin(n_size)) +! allocate(t_skin(n_size)) ! allocate(U_ice_old(n_size), V_ice_old(n_size)) !PS ! allocate(m_ice_old(n_size), a_ice_old(n_size), m_snow_old(n_size), thdgr_old(n_size)) !PS - allocate(thdgr_old(n_size)) !PS +! allocate(thdgr_old(n_size)) !PS if (whichEVP > 0) then allocate(u_ice_aux(n_size), v_ice_aux(n_size)) allocate(alpha_evp_array(myDim_elem2D)) @@ -184,7 +184,7 @@ subroutine ice_array_setup(partit, mesh) ! eps11=0.0_WP ! eps12=0.0_WP ! eps22=0.0_WP - t_skin=0.0_WP +! t_skin=0.0_WP ! rhs_mdiv=0.0_WP ! rhs_adiv=0.0_WP ! rhs_msdiv=0.0_WP diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index 5620a6bd8..66c887773 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -125,7 +125,7 @@ subroutine thermodynamics(ice, partit, mesh) real(kind=WP) :: ithdgr, ithdgrsn, iflice, hflatow, hfsenow, hflwrdout, subli real(kind=WP) :: lat integer :: i, j, elem - real(kind=WP), allocatable :: ustar_aux(:) + !!PS real(kind=WP), allocatable :: ustar_aux(:) real(kind=WP) lid_clo !_____________________________________________________________________________ ! pointer on necessary derived types @@ -135,7 +135,7 @@ subroutine thermodynamics(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: u_ice, v_ice real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:), pointer :: a_ice_old, m_ice_old, m_snow_old - real(kind=WP), dimension(:) , pointer :: thdgr, thdgrsn + real(kind=WP), dimension(:) , pointer :: thdgr, thdgrsn, thdgr_old, t_skin, ustar_aux myDim_nod2d=>partit%myDim_nod2D eDim_nod2D =>partit%eDim_nod2D ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D @@ -151,6 +151,10 @@ subroutine thermodynamics(ice, partit, mesh) m_snow_old => ice%data(3)%values_old(:) thdgr => ice%thermo%thdgr thdgrsn => ice%thermo%thdgrsn + thdgr_old => ice%thermo%thdgr_old + t_skin => ice%thermo%t_skin + ustar_aux => ice%thermo%ustar + !_____________________________________________________________________________ rsss=ref_sss @@ -160,7 +164,7 @@ subroutine thermodynamics(ice, partit, mesh) ! ================ ! Friction velocity ! ================ - allocate(ustar_aux(myDim_nod2D+eDim_nod2D)) +! allocate(ustar_aux(myDim_nod2D+eDim_nod2D)) ustar_aux=0.0_WP DO i=1, myDim_nod2D ustar=0.0_WP @@ -262,9 +266,7 @@ subroutine thermodynamics(ice, partit, mesh) prec_snow(i) = snow end if - end do - - deallocate(ustar_aux) + end do end subroutine thermodynamics ! !=================================================================== From 64f5e7c214139c112a466648a73c837640a9871f Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 22 Nov 2021 11:29:37 +0100 Subject: [PATCH 636/909] exchange varaibles T_oce_arrayy, S_oce_array, elevation, u_w, v_w with corresponding ice derived type varaibles --- src/cavity_param.F90 | 1 - src/fesom_module.F90 | 2 +- src/gen_bulk_formulae.F90 | 34 ++++++++++++++++------ src/gen_forcing_couple.F90 | 12 ++++---- src/ice_EVP.F90 | 26 +++++++++-------- src/ice_maEVP.F90 | 13 +++++++-- src/ice_modules.F90 | 6 ++-- src/ice_oce_coupling.F90 | 58 ++++++++++++++++++++++---------------- src/ice_setup_step.F90 | 12 ++++---- src/ice_thermo_cpl.F90 | 20 ++++++++----- src/ice_thermo_oce.F90 | 5 ++++ src/oce_spp.F90 | 6 ++-- 12 files changed, 124 insertions(+), 71 deletions(-) diff --git a/src/cavity_param.F90 b/src/cavity_param.F90 index 65dbf5152..a3c31b96c 100644 --- a/src/cavity_param.F90 +++ b/src/cavity_param.F90 @@ -426,7 +426,6 @@ subroutine cavity_momentum_fluxes(dynamics, partit, mesh) USE MOD_DYN use o_PARAM , only: density_0, C_d, WP use o_ARRAYS, only: stress_surf, stress_node_surf - use i_ARRAYS, only: u_w, v_w implicit none !___________________________________________________________________________ diff --git a/src/fesom_module.F90 b/src/fesom_module.F90 index 06b1a9d6d..60eda1d49 100755 --- a/src/fesom_module.F90 +++ b/src/fesom_module.F90 @@ -344,7 +344,7 @@ subroutine fesom_runloop(current_nsteps) if(use_ice) then !___compute fluxes from ocean to ice________________________________ if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call ocean2ice(n)'//achar(27)//'[0m' - call ocean2ice(f%dynamics, f%tracers, f%partit, f%mesh) + call ocean2ice(f%ice, f%dynamics, f%tracers, f%partit, f%mesh) !___compute update of atmospheric forcing____________________________ if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call update_atm_forcing(n)'//achar(27)//'[0m' diff --git a/src/gen_bulk_formulae.F90 b/src/gen_bulk_formulae.F90 index 12075a59e..6b544c3ad 100755 --- a/src/gen_bulk_formulae.F90 +++ b/src/gen_bulk_formulae.F90 @@ -3,6 +3,7 @@ MODULE gen_bulk use mod_mesh USE MOD_PARTIT USE MOD_PARSUP + USE MOD_ICE use i_therm_param use i_arrays use g_forcing_arrays @@ -19,7 +20,7 @@ MODULE gen_bulk ! ! !_______________________________________________________________________________ -subroutine ncar_ocean_fluxes_mode_fesom14(partit, mesh) +subroutine ncar_ocean_fluxes_mode_fesom14(ice, partit, mesh) ! Compute drag coefficient and the transfer coefficients for evaporation ! and sensible heat according to LY2004. ! In this routine we assume air temperature and humidity are at the same @@ -47,9 +48,14 @@ subroutine ncar_ocean_fluxes_mode_fesom14(partit, mesh) real(kind=WP), parameter :: grav = 9.80_WP, vonkarm = 0.40_WP real(kind=WP), parameter :: q1=640380._WP, q2=-5107.4_WP ! for saturated surface specific humidity real(kind=WP), parameter :: zz = 10.0_WP - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_ice) , intent(inout), target :: ice + real(kind=WP), dimension(:) , pointer :: T_oc_array, u_w, v_w + u_w => ice%srfoce_uv(1,:) + v_w => ice%srfoce_uv(2,:) + T_oc_array => ice%srfoce_temp(:) + do i=1, partit%myDim_nod2d+partit%eDim_nod2d t=tair(i) + tmelt ! degree celcium to Kelvin ts=t_oc_array(i) + tmelt ! @@ -114,7 +120,7 @@ end subroutine ncar_ocean_fluxes_mode_fesom14 ! ! !_______________________________________________________________________________ -subroutine ncar_ocean_fluxes_mode(partit, mesh) +subroutine ncar_ocean_fluxes_mode(ice, partit, mesh) ! Compute drag coefficient and the transfer coefficients for evaporation ! and sensible heat according to LY2004. ! with updates from Large et al. 2009 for the computation of the wind drag @@ -153,8 +159,14 @@ subroutine ncar_ocean_fluxes_mode(partit, mesh) real(kind=WP) :: test, cd_prev, inc_ratio=1.0e-4 real(kind=WP) :: t_prev, q_prev - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit + type(t_ice) , intent(inout), target :: ice + real(kind=WP), dimension(:) , pointer :: T_oc_array, u_w, v_w + u_w => ice%srfoce_uv(1,:) + v_w => ice%srfoce_uv(2,:) + T_oc_array => ice%srfoce_temp(:) + do i=1,partit%myDim_nod2d+partit%eDim_nod2d if (mesh%ulevels_nod2d(i)>1) cycle @@ -342,13 +354,14 @@ subroutine cal_wind_drag_coeff(partit) end subroutine cal_wind_drag_coeff ! -SUBROUTINE nemo_ocean_fluxes_mode(partit) +SUBROUTINE nemo_ocean_fluxes_mode(ice, partit) !!---------------------------------------------------------------------- !! ** Purpose : Change model variables according to atm fluxes !! source of original code: NEMO 3.1.1 + NCAR !!---------------------------------------------------------------------- IMPLICIT NONE - type(t_partit), intent(in) :: partit + type(t_partit), intent(inout), target :: partit + type(t_ice), intent(inout), target :: ice integer :: i real(wp) :: rtmp ! temporal real real(wp) :: wndm ! delta of wind module and ocean curent module @@ -367,6 +380,11 @@ SUBROUTINE nemo_ocean_fluxes_mode(partit) t_zu, & ! air temp. shifted at zu [K] q_zu ! spec. hum. shifted at zu [kg/kg] real(wp) :: zevap, zqsb, zqla, zqlw + real(kind=WP), dimension(:) , pointer :: u_w, v_w, t_oc_array + u_w => ice%srfoce_uv(1,:) + v_w => ice%srfoce_uv(2,:) + t_oc_array => ice%srfoce_temp(:) + !!$OMP PARALLEL !!$OMP DO do i = 1, partit%myDim_nod2D+partit%eDim_nod2d diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index b26a83767..76e827b20 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -128,14 +128,16 @@ subroutine update_atm_forcing(istep, ice, tracers, partit, mesh) !character(500) :: file !_____________________________________________________________________________ ! pointer on necessary derived types - real(kind=WP), dimension(:), pointer :: u_ice, v_ice + real(kind=WP), dimension(:), pointer :: u_ice, v_ice, u_w, v_w #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uvice(1,:) - v_ice => ice%uvice(2,:) - + u_ice => ice%uvice(1,:) + v_ice => ice%uvice(2,:) + u_w => ice%srfoce_uv(1,:) + v_w => ice%srfoce_uv(2,:) + t1=MPI_Wtime() #ifdef __oasis if (firstcall) then @@ -341,7 +343,7 @@ subroutine update_atm_forcing(istep, ice, tracers, partit, mesh) cd_atm_oce_arr=0.0_WP ch_atm_oce_arr=0.0_WP ce_atm_oce_arr=0.0_WP - call ncar_ocean_fluxes_mode(partit, mesh) + call ncar_ocean_fluxes_mode(ice, partit, mesh) elseif(AOMIP_drag_coeff) then cd_atm_oce_arr=cd_atm_ice_arr end if diff --git a/src/ice_EVP.F90 b/src/ice_EVP.F90 index b24cf5050..1eab293a2 100755 --- a/src/ice_EVP.F90 +++ b/src/ice_EVP.F90 @@ -491,21 +491,25 @@ subroutine EVPdynamics(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:), pointer :: u_ice_old, v_ice_old real(kind=WP), dimension(:), pointer :: u_rhs_ice, v_rhs_ice, rhs_a, rhs_m + real(kind=WP), dimension(:), pointer :: u_w, v_w, elevation #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uvice(1,:) - v_ice => ice%uvice(2,:) - a_ice => ice%data(1)%values(:) - m_ice => ice%data(2)%values(:) - m_snow => ice%data(3)%values(:) - u_ice_old => ice%uvice_old(1,:) - v_ice_old => ice%uvice_old(2,:) - u_rhs_ice => ice%uvice_rhs(1,:) - v_rhs_ice => ice%uvice_rhs(2,:) - rhs_a => ice%data(1)%values_rhs(:) - rhs_m => ice%data(2)%values_rhs(:) + u_ice => ice%uvice(1,:) + v_ice => ice%uvice(2,:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + u_ice_old => ice%uvice_old(1,:) + v_ice_old => ice%uvice_old(2,:) + u_rhs_ice => ice%uvice_rhs(1,:) + v_rhs_ice => ice%uvice_rhs(2,:) + rhs_a => ice%data(1)%values_rhs(:) + rhs_m => ice%data(2)%values_rhs(:) + u_w => ice%srfoce_uv(1,:) + v_w => ice%srfoce_uv(2,:) + elevation => ice%srfoce_ssh(:) !_______________________________________________________________________________ ! If Icepack is used, always update the tracers diff --git a/src/ice_maEVP.F90 b/src/ice_maEVP.F90 index 9c72f825b..69fbd28d9 100644 --- a/src/ice_maEVP.F90 +++ b/src/ice_maEVP.F90 @@ -214,6 +214,7 @@ subroutine ssh2rhs(ice, partit, mesh) ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: m_ice, m_snow real(kind=WP), dimension(:), pointer :: rhs_a, rhs_m + real(kind=WP), dimension(:), pointer :: elevation #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -222,6 +223,7 @@ subroutine ssh2rhs(ice, partit, mesh) m_snow => ice%data(3)%values(:) rhs_a => ice%data(1)%values_rhs(:) rhs_m => ice%data(2)%values_rhs(:) + elevation => ice%srfoce_ssh !___________________________________________________________________________ val3=1.0_WP/3.0_WP @@ -413,7 +415,9 @@ subroutine EVPdynamics_m(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:), pointer :: eps11, eps12, eps22 real(kind=WP), dimension(:), pointer :: sigma11, sigma12, sigma22 - real(kind=WP), dimension(:), pointer :: u_rhs_ice, v_rhs_ice, rhs_a, rhs_m + real(kind=WP), dimension(:), pointer :: u_rhs_ice, v_rhs_ice, rhs_a, rhs_m + real(kind=WP), dimension(:), pointer :: u_w, v_w + real(kind=WP), dimension(:), pointer :: elevation #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -433,7 +437,9 @@ subroutine EVPdynamics_m(ice, partit, mesh) v_rhs_ice => ice%uvice_rhs(2,:) rhs_a => ice%data(1)%values_rhs(:) rhs_m => ice%data(2)%values_rhs(:) - + u_w => ice%srfoce_uv(1,:) + v_w => ice%srfoce_uv(2,:) + elevation => ice%srfoce_ssh(:) !___________________________________________________________________________ val3=1.0_WP/3.0_WP vale=1.0_WP/(ellipse**2) @@ -960,6 +966,7 @@ subroutine EVPdynamics_a(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: u_ice, v_ice real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:), pointer :: u_rhs_ice, v_rhs_ice + real(kind=WP), dimension(:), pointer :: u_w, v_w #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -971,6 +978,8 @@ subroutine EVPdynamics_a(ice, partit, mesh) m_snow => ice%data(3)%values(:) u_rhs_ice => ice%uvice_rhs(1,:) v_rhs_ice => ice%uvice_rhs(2,:) + u_w => ice%srfoce_uv(1,:) + v_w => ice%srfoce_uv(2,:) !___________________________________________________________________________ steps=evp_rheol_steps diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index c7633db07..cfb202a36 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -66,10 +66,10 @@ MODULE i_ARRAYS ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_rhs_ice, V_rhs_ice ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_m, rhs_a, rhs_ms, ths_temp REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: ths_temp - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_w, V_w +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_w, V_w REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: u_ice_aux, v_ice_aux ! of the size of u_ice, v_ice ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_mdiv, rhs_adiv, rhs_msdiv - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: elevation +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: elevation ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: sigma11, sigma12, sigma22 ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: eps11, eps12, eps22 REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: fresh_wa_flux @@ -86,7 +86,7 @@ MODULE i_ARRAYS #endif #endif /* (__oasis) || defined (__ifsinterface)*/ - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: S_oc_array, T_oc_array +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: S_oc_array, T_oc_array REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_iceoce_x REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_iceoce_y REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_atmice_x diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index 5b0f913ab..b3e43d8dc 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -1,18 +1,19 @@ module ocean2ice_interface - interface - subroutine ocean2ice(dynamics, tracers, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use mod_tracer - use MOD_DYN - type(t_dyn) , intent(in) , target :: dynamics - type(t_tracer), intent(inout), target :: tracers - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - - end subroutine - end interface + interface + subroutine ocean2ice(ice, dynamics, tracers, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_TRACER + USE MOD_DYN + USE MOD_ICE + type(t_ice) , intent(in) , target :: ice + type(t_dyn) , intent(in) , target :: dynamics + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface end module module oce_fluxes_interface @@ -75,14 +76,16 @@ subroutine oce_fluxes_mom(ice, dynamics, partit, mesh) real(kind=WP) :: aux, aux1 !___________________________________________________________________________ ! pointer on necessary derived types - real(kind=WP), dimension(:), pointer :: u_ice, v_ice, a_ice + real(kind=WP), dimension(:), pointer :: u_ice, v_ice, a_ice, u_w, v_w #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uvice(1,:) - v_ice => ice%uvice(2,:) - a_ice => ice%data(1)%values(:) + u_ice => ice%uvice(1,:) + v_ice => ice%uvice(2,:) + a_ice => ice%data(1)%values(:) + u_w => ice%srfoce_uv(1,:) + v_w => ice%srfoce_uv(2,:) ! ================== ! momentum flux: @@ -137,7 +140,7 @@ end subroutine oce_fluxes_mom ! ! !_______________________________________________________________________________ -subroutine ocean2ice(dynamics, tracers, partit, mesh) +subroutine ocean2ice(ice, dynamics, tracers, partit, mesh) ! transmits the relevant fields from the ocean to the ice model @@ -145,12 +148,14 @@ subroutine ocean2ice(dynamics, tracers, partit, mesh) use i_ARRAYS use MOD_MESH use MOD_DYN + use MOD_ICE use MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP USE g_CONFIG use g_comm_auto implicit none + type(t_ice) , intent(in) , target :: ice type(t_dyn) , intent(in) , target :: dynamics type(t_tracer), intent(inout), target :: tracers type(t_partit), intent(inout), target :: partit @@ -159,16 +164,21 @@ subroutine ocean2ice(dynamics, tracers, partit, mesh) real(kind=WP) :: uw, vw, vol real(kind=WP), dimension(:,:) , pointer :: temp, salt real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:) , pointer :: S_oc_array, T_oc_array, u_w, v_w, elevation #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - temp => tracers%data(1)%values(:,:) - salt => tracers%data(2)%values(:,:) - UV => dynamics%uv(:,:,:) - + temp => tracers%data(1)%values(:,:) + salt => tracers%data(2)%values(:,:) + UV => dynamics%uv(:,:,:) + u_w => ice%srfoce_uv(1,:) + v_w => ice%srfoce_uv(2,:) + T_oc_array => ice%srfoce_temp(:) + S_oc_array => ice%srfoce_salt(:) + elevation => ice%srfoce_ssh(:) + ! the arrays in the ice model are renamed - if (ice_update) then do n=1, myDim_nod2d+eDim_nod2d if (ulevels_nod2D(n)>1) cycle diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index 2e69b2309..06bc15936 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -192,21 +192,21 @@ subroutine ice_array_setup(partit, mesh) ! Allocate memory for arrays used in coupling ! with ocean and atmosphere - allocate(S_oc_array(n_size), T_oc_array(n_size)) ! copies of ocean T ans S - S_oc_array = 0.0_WP - T_oc_array = 0.0_WP +! allocate(S_oc_array(n_size), T_oc_array(n_size)) ! copies of ocean T ans S +! S_oc_array = 0.0_WP +! T_oc_array = 0.0_WP allocate(fresh_wa_flux(n_size), net_heat_flux(n_size)) fresh_wa_flux = 0.0_WP net_heat_flux = 0.0_WP allocate(stress_atmice_x(n_size), stress_atmice_y(n_size)) stress_atmice_x = 0.0_WP stress_atmice_y = 0.0_WP - allocate(elevation(n_size)) ! =ssh of ocean - elevation = 0.0_WP +! allocate(elevation(n_size)) ! =ssh of ocean +! elevation = 0.0_WP allocate(stress_iceoce_x(n_size), stress_iceoce_y(n_size)) stress_iceoce_x = 0.0_WP stress_iceoce_y = 0.0_WP - allocate(U_w(n_size), V_w(n_size)) ! =uf and vf of ocean at surface nodes +! allocate(U_w(n_size), V_w(n_size)) ! =uf and vf of ocean at surface nodes #if defined (__oasis) || defined (__ifsinterface) allocate(oce_heat_flux(n_size), ice_heat_flux(n_size)) allocate(tmp_oce_heat_flux(n_size), tmp_ice_heat_flux(n_size)) diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index 7a3be4c49..3ec666545 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -64,17 +64,23 @@ subroutine thermodynamics(ice, partit, mesh) real(kind=WP), dimension(:) , pointer :: u_ice, v_ice real(kind=WP), dimension(:) , pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:) , pointer :: thdgr, thdgrsn + real(kind=WP), dimension(:) , pointer :: S_oc_array, T_oc_array, u_w, v_w myDim_nod2d=>partit%myDim_nod2D eDim_nod2D =>partit%eDim_nod2D ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D - u_ice => ice%uvice(1,:) - v_ice => ice%uvice(2,:) - a_ice => ice%data(1)%values(:) - m_ice => ice%data(2)%values(:) - m_snow => ice%data(3)%values(:) - thdgr => ice%thermo%thdgr - thdgrsn => ice%thermo%thdgrsn + u_ice => ice%uvice(1,:) + v_ice => ice%uvice(2,:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + thdgr => ice%thermo%thdgr + thdgrsn => ice%thermo%thdgrsn + T_oc_array => ice%srfoce_temp(:) + S_oc_array => ice%srfoce_salt(:) + u_w => ice%srfoce_uv(1,:) + v_w => ice%srfoce_uv(2,:) + !_____________________________________________________________________________ rsss = ref_sss diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index 66c887773..8c4df04c0 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -136,6 +136,7 @@ subroutine thermodynamics(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:), pointer :: a_ice_old, m_ice_old, m_snow_old real(kind=WP), dimension(:) , pointer :: thdgr, thdgrsn, thdgr_old, t_skin, ustar_aux + real(kind=WP), dimension(:) , pointer :: S_oc_array, T_oc_array, u_w, v_w myDim_nod2d=>partit%myDim_nod2D eDim_nod2D =>partit%eDim_nod2D ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D @@ -154,6 +155,10 @@ subroutine thermodynamics(ice, partit, mesh) thdgr_old => ice%thermo%thdgr_old t_skin => ice%thermo%t_skin ustar_aux => ice%thermo%ustar + u_w => ice%srfoce_uv(1,:) + v_w => ice%srfoce_uv(2,:) + T_oc_array => ice%srfoce_temp(:) + S_oc_array => ice%srfoce_salt(:) !_____________________________________________________________________________ rsss=ref_sss diff --git a/src/oce_spp.F90 b/src/oce_spp.F90 index 2183170f8..e186193e1 100644 --- a/src/oce_spp.F90 +++ b/src/oce_spp.F90 @@ -16,7 +16,6 @@ subroutine cal_rejected_salt(ice, partit, mesh) USE MOD_PARSUP use g_comm_auto use o_tracers -use i_ARRAYS, only: S_oc_array use i_therm_param, only: rhoice, rhowat, Sice use g_config, only: dt implicit none @@ -26,12 +25,13 @@ subroutine cal_rejected_salt(ice, partit, mesh) type(t_ice) , intent(in), target :: ice type(t_mesh) , intent(in), target :: mesh type(t_partit), intent(in), target :: partit -real(kind=WP), dimension(:) , pointer :: thdgr +real(kind=WP), dimension(:) , pointer :: thdgr, S_oc_array #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" -thdgr => ice%thermo%thdgr +thdgr => ice%thermo%thdgr +S_oc_array => ice%srfoce_salt aux=rhoice/rhowat*dt do row=1, myDim_nod2d +eDim_nod2D! myDim is sufficient From 1b28f405aab8378c6236b6fa0eb272e55ceb4b9f Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 22 Nov 2021 12:21:37 +0100 Subject: [PATCH 637/909] OMP im io_meandata->update_means. Same performance as MPI --- src/io_meandata.F90 | 80 ++++++++++++++++++++++++++++++++------------- 1 file changed, 58 insertions(+), 22 deletions(-) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index f40ef0f78..3e76c0c12 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -783,27 +783,50 @@ subroutine update_means implicit none type(Meandata), pointer :: entry integer :: n - - do n=1, io_NSTREAMS + integer :: I, J + DO n=1, io_NSTREAMS entry=>io_stream(n) !_____________ compute in 8 byte accuracy _________________________ - if (entry%accuracy == i_real8) then - if (entry%flip) then - entry%local_values_r8 = entry%local_values_r8 + transpose(entry%ptr3(1:size(entry%local_values_r8,dim=2),1:size(entry%local_values_r8,dim=1))) - else - entry%local_values_r8 = entry%local_values_r8 + entry%ptr3(1:size(entry%local_values_r8,dim=1),1:size(entry%local_values_r8,dim=2)) - end if + IF (entry%accuracy == i_real8) then + IF (entry%flip) then +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I,J) + DO J=1, size(entry%local_values_r8,dim=2) + DO I=1, size(entry%local_values_r8,dim=1) + entry%local_values_r8(I,J)=entry%local_values_r8(I,J)+entry%ptr3(J,I) + END DO + END DO +!$OMP END PARALLEL DO + ELSE +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I,J) + DO J=1, size(entry%local_values_r8,dim=2) + DO I=1, size(entry%local_values_r8,dim=1) + entry%local_values_r8(I,J)=entry%local_values_r8(I,J)+entry%ptr3(I,J) + END DO + END DO +!$OMP END PARALLEL DO + END IF !_____________ compute in 4 byte accuracy _________________________ - elseif (entry%accuracy == i_real4) then - if (entry%flip) then - entry%local_values_r4 = entry%local_values_r4 + transpose(real(entry%ptr3(1:size(entry%local_values_r4,dim=2),1:size(entry%local_values_r4,dim=1)),real32)) - else - entry%local_values_r4 = entry%local_values_r4 + real(entry%ptr3(1:size(entry%local_values_r4,dim=1),1:size(entry%local_values_r4,dim=2)),real32) - end if - endif - - entry%addcounter=entry%addcounter+1 - end do + ELSE IF (entry%accuracy == i_real4) then + IF (entry%flip) then +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I,J) + DO J=1, size(entry%local_values_r4,dim=2) + DO I=1, size(entry%local_values_r4,dim=1) + entry%local_values_r4(I,J)=entry%local_values_r4(I,J)+real(entry%ptr3(J,I), real32) + END DO + END DO +!$OMP END PARALLEL DO + ELSE +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I, J) + DO J=1, size(entry%local_values_r4,dim=2) + DO I=1, size(entry%local_values_r4,dim=1) + entry%local_values_r4(I,J)=entry%local_values_r4(I,J)+real(entry%ptr3(I,J), real32) + END DO + END DO +!$OMP END PARALLEL DO + END IF + END IF + entry%addcounter=entry%addcounter+1 + END DO end subroutine ! !-------------------------------------------------------------------------------------------- @@ -823,6 +846,7 @@ subroutine output(istep, dynamics, tracers, partit, mesh) integer :: istep logical, save :: lfirst=.true. integer :: n, k + integer :: i, j !for OMP loops logical :: do_output type(Meandata), pointer :: entry type(t_mesh) , intent(in) , target :: mesh @@ -906,11 +930,23 @@ subroutine output(istep, dynamics, tracers, partit, mesh) end if if (entry%accuracy == i_real8) then - entry%local_values_r8_copy = entry%local_values_r8 /real(entry%addcounter,real64) ! compute_means - entry%local_values_r8 = 0._real64 ! clean_meanarrays + DO I=1, size(entry%local_values_r8,dim=1) +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(J) + DO J=1, size(entry%local_values_r8,dim=2) + entry%local_values_r8_copy(I,J) = entry%local_values_r8(I,J) /real(entry%addcounter,real64) ! compute_means + entry%local_values_r8(I,J) = 0._real64 ! clean_meanarrays + END DO +!$OMP END PARALLEL DO + END DO else if (entry%accuracy == i_real4) then - entry%local_values_r4_copy = entry%local_values_r4 /real(entry%addcounter,real32) ! compute_means - entry%local_values_r4 = 0._real32 ! clean_meanarrays + DO I=1, size(entry%local_values_r4,dim=1) +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(J) + DO J=1, size(entry%local_values_r4,dim=2) + entry%local_values_r4_copy(I,J) = entry%local_values_r4(I,J) /real(entry%addcounter,real32) ! compute_means + entry%local_values_r4(I,J) = 0._real32 ! clean_meanarrays + END DO +!$OMP END PARALLEL DO + END DO end if entry%addcounter = 0 ! clean_meanarrays entry%ctime_copy = ctime From 9dee72572b2761764f33ec7effd1615b3360f74c Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 22 Nov 2021 12:32:52 +0100 Subject: [PATCH 638/909] further OMP speed up in io_meandata.F90 --- src/io_meandata.F90 | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 3e76c0c12..7a17ec4f0 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -930,23 +930,23 @@ subroutine output(istep, dynamics, tracers, partit, mesh) end if if (entry%accuracy == i_real8) then - DO I=1, size(entry%local_values_r8,dim=1) -!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(J) - DO J=1, size(entry%local_values_r8,dim=2) +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I,J) + DO J=1, size(entry%local_values_r8,dim=2) + DO I=1, size(entry%local_values_r8,dim=1) entry%local_values_r8_copy(I,J) = entry%local_values_r8(I,J) /real(entry%addcounter,real64) ! compute_means entry%local_values_r8(I,J) = 0._real64 ! clean_meanarrays END DO -!$OMP END PARALLEL DO END DO +!$OMP END PARALLEL DO else if (entry%accuracy == i_real4) then - DO I=1, size(entry%local_values_r4,dim=1) -!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(J) - DO J=1, size(entry%local_values_r4,dim=2) +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I,J) + DO J=1, size(entry%local_values_r4,dim=2) + DO I=1, size(entry%local_values_r4,dim=1) entry%local_values_r4_copy(I,J) = entry%local_values_r4(I,J) /real(entry%addcounter,real32) ! compute_means entry%local_values_r4(I,J) = 0._real32 ! clean_meanarrays END DO -!$OMP END PARALLEL DO END DO +!$OMP END PARALLEL DO end if entry%addcounter = 0 ! clean_meanarrays entry%ctime_copy = ctime From 80be8c101ef54fc1e93c7f99352abeffb07f186f Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 22 Nov 2021 14:27:31 +0100 Subject: [PATCH 639/909] OMP in ice_oce_coupling for flux computations. some routines in gen_support.F90 have been updated as well. --- src/gen_support.F90 | 28 ++++++-- src/ice_oce_coupling.F90 | 139 ++++++++++++++++++++++++------------- src/oce_shortwave_pene.F90 | 17 +++-- 3 files changed, 122 insertions(+), 62 deletions(-) diff --git a/src/gen_support.F90 b/src/gen_support.F90 index aaf46c8b3..eee296fa0 100644 --- a/src/gen_support.F90 +++ b/src/gen_support.F90 @@ -316,17 +316,24 @@ subroutine integrate_nod_2D(data, int2D, partit, mesh) real(kind=WP), intent(inout) :: int2D integer :: row - real(kind=WP) :: lval + real(kind=WP) :: lval_omp, lval #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - lval=0.0_WP + +lval=0.0_WP +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(row, lval_omp) + lval_omp=0.0_WP +!$OMP DO do row=1, myDim_nod2D - !!PS lval=lval+data(row)*area(1,row) - lval=lval+data(row)*areasvol(ulevels_nod2D(row),row) + lval_omp=lval_omp+data(row)*areasvol(ulevels_nod2D(row),row) end do - +!$OMP END DO +!$OMP CRITICAL +lval=lval+lval_omp +!$OMP END CRITICAL +!$OMP END PARALLEL int2D=0.0_WP call MPI_AllREDUCE(lval, int2D, 1, MPI_DOUBLE_PRECISION, MPI_SUM, & MPI_COMM_FESOM, MPIerr) @@ -346,19 +353,26 @@ subroutine integrate_nod_3D(data, int3D, partit, mesh) real(kind=WP), intent(inout) :: int3D integer :: k, row - real(kind=WP) :: lval + real(kind=WP) :: lval_omp, lval #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" lval=0.0_WP +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(row, k, lval_omp) + lval_omp=0.0_WP +!$OMP DO do row=1, myDim_nod2D - !!PS do k=1, nlevels_nod2D(row)-1 do k=ulevels_nod2D(row), nlevels_nod2D(row)-1 lval=lval+data(k, row)*areasvol(k,row)*hnode_new(k,row) ! --> TEST_cavity end do end do +!$OMP END DO +!$OMP CRITICAL +lval=lval+lval_omp +!$OMP END CRITICAL +!$OMP END PARALLEL int3D=0.0_WP call MPI_AllREDUCE(lval, int3D, 1, MPI_DOUBLE_PRECISION, MPI_SUM, & MPI_COMM_FESOM, MPIerr) diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index ce4fd3eb9..3f2e38c12 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -55,7 +55,7 @@ subroutine oce_fluxes_mom(dynamics, partit, mesh) implicit none integer :: n, elem, elnodes(3),n1 - real(kind=WP) :: aux, aux1 + real(kind=WP) :: aux type(t_dyn) , intent(in) , target :: dynamics type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh @@ -75,6 +75,8 @@ subroutine oce_fluxes_mom(dynamics, partit, mesh) aice_out=a_ice) #endif +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, elem, elnodes, n1, aux) +!$OMP DO do n=1,myDim_nod2D+eDim_nod2D !_______________________________________________________________________ ! if cavity node skip it @@ -94,8 +96,9 @@ subroutine oce_fluxes_mom(dynamics, partit, mesh) stress_node_surf(1,n) = stress_iceoce_x(n)*a_ice(n) + stress_atmoce_x(n)*(1.0_WP-a_ice(n)) stress_node_surf(2,n) = stress_iceoce_y(n)*a_ice(n) + stress_atmoce_y(n)*(1.0_WP-a_ice(n)) end do - +!$OMP END DO !___________________________________________________________________________ +!$OMP DO DO elem=1,myDim_elem2D !_______________________________________________________________________ ! if cavity element skip it @@ -107,10 +110,9 @@ subroutine oce_fluxes_mom(dynamics, partit, mesh) stress_atmoce_x(elnodes)*(1.0_WP-a_ice(elnodes)))/3.0_WP stress_surf(2,elem)=sum(stress_iceoce_y(elnodes)*a_ice(elnodes) + & stress_atmoce_y(elnodes)*(1.0_WP-a_ice(elnodes)))/3.0_WP - !!PS stress_surf(1,elem)=sum(stress_node_surf(1,elnodes))/3.0_WP - !!PS stress_surf(2,elem)=sum(stress_node_surf(2,elnodes))/3.0_WP END DO - +!$OMP END DO +!$OMP END PARALLEL !___________________________________________________________________________ if (use_cavity) call cavity_momentum_fluxes(dynamics, partit, mesh) @@ -149,28 +151,36 @@ subroutine ocean2ice(dynamics, tracers, partit, mesh) UV => dynamics%uv(:,:,:) ! the arrays in the ice model are renamed - + +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, elem, k, uw, vw, vol) if (ice_update) then +!$OMP DO do n=1, myDim_nod2d+eDim_nod2d if (ulevels_nod2D(n)>1) cycle T_oc_array(n) = temp(1,n) S_oc_array(n) = salt(1,n) elevation(n) = hbar(n) end do +!$OMP END DO else +!$OMP DO do n=1, myDim_nod2d+eDim_nod2d if (ulevels_nod2D(n)>1) cycle T_oc_array(n) = (T_oc_array(n)*real(ice_steps_since_upd,WP)+temp(1,n))/real(ice_steps_since_upd+1,WP) S_oc_array(n) = (S_oc_array(n)*real(ice_steps_since_upd,WP)+salt(1,n))/real(ice_steps_since_upd+1,WP) elevation(n) = (elevation(n) *real(ice_steps_since_upd,WP)+ hbar(n))/real(ice_steps_since_upd+1,WP) - !NR !PS elevation(n)=(elevation(n)*real(ice_steps_since_upd)+eta_n(n))/real(ice_steps_since_upd+1,WP) - !NR elevation(n)=(elevation(n)*real(ice_steps_since_upd)+hbar(n))/real(ice_steps_since_upd+1,WP) !PS end do -!!PS elevation(:)= (elevation(:)*real(ice_steps_since_upd)+hbar(:))/real(ice_steps_since_upd+1,WP) +!$OMP END DO end if - - u_w = 0.0_WP - v_w = 0.0_WP + +!$OMP DO + do n=1, myDim_nod2d+eDim_nod2d + u_w(n) = 0.0_WP + v_w(n) = 0.0_WP + end do +!$OMP END DO + +!$OMP DO do n=1, myDim_nod2d if (ulevels_nod2D(n)>1) cycle uw = 0.0_WP @@ -179,14 +189,10 @@ subroutine ocean2ice(dynamics, tracers, partit, mesh) do k=1, nod_in_elem2D_num(n) elem=nod_in_elem2D(k,n) if (ulevels(elem)>1) cycle - !uw = uw+ UV(1,1,elem)*elem_area(elem) - !vw = vw+ UV(2,1,elem)*elem_area(elem) vol = vol + elem_area(elem) uw = uw+ UV(1,1,elem)*elem_area(elem) vw = vw+ UV(2,1,elem)*elem_area(elem) end do - !!PS uw = uw/area(1,n)/3.0_WP - !!PS vw = vw/area(1,n)/3.0_WP uw = uw/vol vw = vw/vol @@ -198,6 +204,8 @@ subroutine ocean2ice(dynamics, tracers, partit, mesh) v_w(n)=(v_w(n)*real(ice_steps_since_upd,WP)+vw)/real(ice_steps_since_upd+1,WP) endif end do +!$OMP END DO +!$OMP END PARALLEL call exchange_nod(u_w, v_w, partit) end subroutine ocean2ice ! @@ -241,8 +249,13 @@ subroutine oce_fluxes(dynamics, tracers, partit, mesh) salt=>tracers%data(2)%values(:,:) allocate(flux(myDim_nod2D+eDim_nod2D)) - flux = 0.0_WP - + +!$OMP PARALLEL DO + do n=1, myDim_nod2d+eDim_nod2d + flux(n) = 0.0_WP + end do +!$OMP END PARALLEL DO + ! ================== ! heat and freshwater ! ================== @@ -276,19 +289,23 @@ subroutine oce_fluxes(dynamics, tracers, partit, mesh) call init_flux_atm_ocn() #else - heat_flux = -net_heat_flux - water_flux = -fresh_wa_flux -#endif - heat_flux_in=heat_flux ! sw_pene will change the heat_flux - if (use_cavity) call cavity_heat_water_fluxes_3eq(dynamics, tracers, partit, mesh) - !!PS if (use_cavity) call cavity_heat_water_fluxes_2eq(mesh) - -!!PS where(ulevels_nod2D>1) heat_flux=0.0_WP -!!PS where(ulevels_nod2D>1) water_flux=0.0_WP - +!$OMP PARALLEL DO + do n=1, myDim_nod2d+eDim_nod2d + heat_flux(n) = -net_heat_flux(n) + water_flux(n) = -fresh_wa_flux(n) + end do +!$OMP END PARALLEL DO +#endif + +!$OMP PARALLEL DO + do n=1, myDim_nod2d+eDim_nod2d + heat_flux_in(n)=heat_flux(n) ! sw_pene will change the heat_flux + end do +!$OMP END PARALLEL DO + if (use_cavity) call cavity_heat_water_fluxes_3eq(dynamics, tracers, partit, mesh) !___________________________________________________________________________ call exchange_nod(heat_flux, water_flux, partit) - +!$OMP BARRIER !___________________________________________________________________________ ! on freshwater inflow/outflow or virtual salinity: ! 1. In zlevel & zstar the freshwater flux is applied in the update of the @@ -305,55 +322,71 @@ subroutine oce_fluxes(dynamics, tracers, partit, mesh) ! balance virtual salt flux if (use_virt_salt) then ! will remain zero otherwise rsss=ref_sss +!$OMP PARALLEL DO do n=1, myDim_nod2D+eDim_nod2D - !!PS if (ref_sss_local) rsss = salt(1,n) if (ref_sss_local) rsss = salt(ulevels_nod2d(n),n) virtual_salt(n)=rsss*water_flux(n) end do - +!$OMP END PARALLEL DO if (use_cavity) then flux = virtual_salt where (ulevels_nod2d > 1) flux = 0.0_WP call integrate_nod(flux, net, partit, mesh) else call integrate_nod(virtual_salt, net, partit, mesh) - end if - virtual_salt=virtual_salt-net/ocean_area + end if +!$OMP PARALLEL DO + do n=1, myDim_nod2D+eDim_nod2D + virtual_salt(n)=virtual_salt(n)-net/ocean_area + end do +!$OMP END PARALLEL DO end if - where (ulevels_nod2d == 1) - dens_flux=sw_alpha(1,:) * heat_flux_in / vcpw + sw_beta(1, :) * (relax_salt + water_flux * salt(1,:)) - elsewhere - dens_flux=0.0_WP - end where +!$OMP PARALLEL DO + do n=1, myDim_nod2D+eDim_nod2D + if (ulevels_nod2d(n) == 1) then + dens_flux(n)=sw_alpha(1,n) * heat_flux_in(n) / vcpw + sw_beta(1, n) * (relax_salt(n) + water_flux(n) * salt(1,n)) + else + dens_flux(n)=0.0_WP + end if + end do +!$OMP END PARALLEL DO !___________________________________________________________________________ ! balance SSS restoring to climatology - if (use_cavity) then + if (use_cavity) then do n=1, myDim_nod2D+eDim_nod2D relax_salt(n) = 0.0_WP - if (ulevels_nod2d(n)>1) cycle - !!PS relax_salt(n)=surf_relax_S*(Ssurf(n)-salt(1,n)) + if (ulevels_nod2d(n) > 1) cycle relax_salt(n)=surf_relax_S*(Ssurf(n)-salt(ulevels_nod2d(n),n)) end do else +!$OMP PARALLEL DO do n=1, myDim_nod2D+eDim_nod2D - !!PS relax_salt(n)=surf_relax_S*(Ssurf(n)-salt(1,n)) relax_salt(n)=surf_relax_S*(Ssurf(n)-salt(ulevels_nod2d(n),n)) end do +!$OMP END PARALLEL DO end if ! --> if use_cavity=.true. relax_salt anyway zero where is cavity see above call integrate_nod(relax_salt, net, partit, mesh) - relax_salt=relax_salt-net/ocean_area +!$OMP PARALLEL DO + do n=1, myDim_nod2D+eDim_nod2D + relax_salt(n)=relax_salt(n)-net/ocean_area + end do +!$OMP END PARALLEL DO !___________________________________________________________________________ ! enforce the total freshwater/salt flux be zero ! 1. water flux ! if (.not. use_virt_salt) can be used! ! we conserve only the fluxes from the database plus evaporation. - flux = evaporation-ice_sublimation & ! the ice2atmos subplimation does not contribute to the freshwater flux into the ocean - +prec_rain & - +prec_snow*(1.0_WP-a_ice_old) & - +runoff +!$OMP PARALLEL DO + do n=1, myDim_nod2D+eDim_nod2D + flux(n) = evaporation(n)-ice_sublimation(n) & ! the ice2atmos subplimation does not contribute to the freshwater flux into the ocean + +prec_rain(n) & + +prec_snow(n)*(1.0_WP-a_ice_old(n)) & + +runoff(n) + end do +!$OMP END PARALLEL DO ! --> In case of zlevel and zstar and levitating sea ice, sea ice is just sitting ! on top of the ocean without displacement of water, there the thermodynamic ! growth rates of sea ice have to be taken into account to preserve the fresh water @@ -364,7 +397,11 @@ subroutine oce_fluxes(dynamics, tracers, partit, mesh) ! salinity flux !!PS if ( .not. use_floatice .and. .not. use_virt_salt) then if (.not. use_virt_salt) then - flux = flux-thdgr*rhoice*inv_rhowat-thdgrsn*rhosno*inv_rhowat +!$OMP PARALLEL DO + do n=1, myDim_nod2D+eDim_nod2D + flux(n) = flux(n)-thdgr(n)*rhoice*inv_rhowat-thdgrsn(n)*rhosno*inv_rhowat + end do +!$OMP END PARALLEL DO end if ! Also balance freshwater flux that come from ocean-cavity boundary @@ -392,7 +429,11 @@ subroutine oce_fluxes(dynamics, tracers, partit, mesh) ! ocean where (ulevels_nod2d == 1) water_flux=water_flux+net/ocean_area else - water_flux=water_flux+net/ocean_area +!$OMP PARALLEL DO + do n=1, myDim_nod2D+eDim_nod2D + water_flux(n)=water_flux(n)+net/ocean_area + end do +!$OMP END PARALLEL DO end if !___________________________________________________________________________ diff --git a/src/oce_shortwave_pene.F90 b/src/oce_shortwave_pene.F90 index d548d8156..84a178001 100644 --- a/src/oce_shortwave_pene.F90 +++ b/src/oce_shortwave_pene.F90 @@ -29,8 +29,16 @@ subroutine cal_shortwave_rad(partit, mesh) #include "associate_part_ass.h" #include "associate_mesh_ass.h" - sw_3d=0.0_WP +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(m, n2, n3, k, nzmax, nzmin, swsurf, aux, c, c2, c3, c4, c5, v1, v2, sc1, sc2) +!$OMP DO + do n2=1, myDim_nod2D+eDim_nod2D + do k=1, nl + sw_3d(k, n2)=0.0_WP + end do + end do +!$OMP END DO !_____________________________________________________________________________ +!$OMP DO do n2=1, myDim_nod2D+eDim_nod2D !__________________________________________________________________________ @@ -63,15 +71,12 @@ subroutine cal_shortwave_rad(partit, mesh) v1=0.321_WP+v1 sc1=1.54_WP-0.197_WP*c+0.166_WP*c2-0.252_WP*c3-0.055_WP*c4+0.042_WP*c5 sc2=7.925_WP-6.644_WP*c+3.662_WP*c2-1.815_WP*c3-0.218_WP*c4+0.502_WP*c5 - ! convert from heat flux [W/m2] to temperature flux [K m/s] swsurf=swsurf/vcpw ! vis. sw. rad. in the colume nzmax=(nlevels_nod2D(n2)) nzmin=(ulevels_nod2D(n2)) - !!PS sw_3d(1, n2)=swsurf sw_3d(nzmin, n2)=swsurf - !!PS do k=2, nzmax do k=nzmin+1, nzmax aux=(v1*exp(zbar_3d_n(k,n2)/sc1)+v2*exp(zbar_3d_n(k,n2)/sc2)) sw_3d(k, n2)=swsurf*aux @@ -95,6 +100,6 @@ subroutine cal_shortwave_rad(partit, mesh) !end if end do -!call par_ex -!stop +!$OMP END DO +!$OMP END PARALLEL end subroutine cal_shortwave_rad From 913ef83553f9a53c8f6f188cb6e5337de5c97ac2 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 22 Nov 2021 15:49:59 +0100 Subject: [PATCH 640/909] OMP in surfce forcing / flux computation --- src/gen_bulk_formulae.F90 | 29 ++++++++++++++++++----------- src/gen_forcing_couple.F90 | 35 +++++++++++++++++++++-------------- src/gen_surface_forcing.F90 | 20 ++++++++++---------- 3 files changed, 49 insertions(+), 35 deletions(-) diff --git a/src/gen_bulk_formulae.F90 b/src/gen_bulk_formulae.F90 index 12075a59e..3dbc25341 100755 --- a/src/gen_bulk_formulae.F90 +++ b/src/gen_bulk_formulae.F90 @@ -49,7 +49,10 @@ subroutine ncar_ocean_fluxes_mode_fesom14(partit, mesh) real(kind=WP), parameter :: zz = 10.0_WP type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - + +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(i, j, m, cd_n10, ce_n10, ch_n10, cd_n10_rt, cd, ce, ch, cd_rt, zeta, x2, x, psi_m, psi_h, stab, & +!$OMP t, ts, q, qs, u, u10, tv, xx, dux, dvy, tstar, qstar, ustar, bstar ) +!$OMP DO do i=1, partit%myDim_nod2d+partit%eDim_nod2d t=tair(i) + tmelt ! degree celcium to Kelvin ts=t_oc_array(i) + tmelt ! @@ -104,12 +107,12 @@ subroutine ncar_ocean_fluxes_mode_fesom14(partit, mesh) ch = ch_n10/(1.0_WP+ch_n10*xx/cd_n10_rt)*sqrt(cd/cd_n10) ! 10b (corrected code aug2007) ce = ce_n10/(1.0_WP+ce_n10*xx/cd_n10_rt)*sqrt(cd/cd_n10) ! 10c (corrected code aug2007) end do - cd_atm_oce_arr(i)=cd ch_atm_oce_arr(i)=ch ce_atm_oce_arr(i)=ce end do - +!$OMP END DO +!$OMP END PARALLEL end subroutine ncar_ocean_fluxes_mode_fesom14 ! ! @@ -156,6 +159,11 @@ subroutine ncar_ocean_fluxes_mode(partit, mesh) type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit + +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(i, j, m, cd_n10, ce_n10, ch_n10, cd_n10_rt, hl1, cd, ce, ch, cd_rt, x2, x, stab, & +!$OMP zeta_u, zeta_t, zeta_q, psi_m_u, psi_h_u, psi_m_t, psi_h_t, psi_m_q, psi_h_q, & +!$OMP ts, qs, tv, xx, dux, dvy, t, t10, q, q10, u, u10 ) +!$OMP DO do i=1,partit%myDim_nod2d+partit%eDim_nod2d if (mesh%ulevels_nod2d(i)>1) cycle ! degree celcium to Kelvin @@ -264,7 +272,6 @@ subroutine ncar_ocean_fluxes_mode(partit, mesh) !___________________________________________________________________ ! (3a) shift wind speed to 10m and neutral stability u10 = u/(1.0_WP+cd_n10_rt*(log(ncar_bulk_z_wind/10._WP)-psi_m_u)/vonkarm) ! L-Y eqn. 9a !why cd_n10_rt not cd_rt -!!PS u10 = u/(1.0_WP+cd_rt*(log(ncar_bulk_z_wind/10._WP)-psi_m_u)/vonkarm) ! L-Y eqn. 9a !why cd_n10_rt not cd_rt u10 = max(u10, u10min) ! 0.3 [m/s] floor on wind ! (3b) shift temperature and humidity to wind height t10 = t - tstar/vonkarm*(log(ncar_bulk_z_tair/ncar_bulk_z_wind)+psi_h_u-psi_h_t)! L-Y eqn. 9b @@ -315,7 +322,8 @@ subroutine ncar_ocean_fluxes_mode(partit, mesh) ch_atm_oce_arr(i)=ch ce_atm_oce_arr(i)=ce end do - +!$OMP END DO +!$OMP END PARALLEL end subroutine ncar_ocean_fluxes_mode ! !--------------------------------------------------------------------------------------------------- @@ -335,11 +343,12 @@ subroutine cal_wind_drag_coeff(partit) real(kind=WP) :: ws type(t_partit), intent(in) :: partit +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(i, ws) do i=1,partit%myDim_nod2d+partit%eDim_nod2d ws=sqrt(u_wind(i)**2+v_wind(i)**2) cd_atm_ice_arr(i)=(1.1_WP+0.04_WP*ws)*1.0e-3_WP end do - +!$OMP END PARALLEL DO end subroutine cal_wind_drag_coeff ! SUBROUTINE nemo_ocean_fluxes_mode(partit) @@ -366,9 +375,8 @@ SUBROUTINE nemo_ocean_fluxes_mode(partit) Ce, & ! transfert coefficient for evaporation (Q_lat) t_zu, & ! air temp. shifted at zu [K] q_zu ! spec. hum. shifted at zu [kg/kg] - real(wp) :: zevap, zqsb, zqla, zqlw -!!$OMP PARALLEL -!!$OMP DO + +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(i, wdx, wdy, wndm, zst, q_sat, Cd, Ch, Ce, t_zu, q_zu) do i = 1, partit%myDim_nod2D+partit%eDim_nod2d wdx = atmdata(i_xwind,i) - u_w(i) ! wind from data - ocean current ( x direction) wdy = atmdata(i_ywind,i) - v_w(i) ! wind from data - ocean current ( y direction) @@ -383,8 +391,7 @@ SUBROUTINE nemo_ocean_fluxes_mode(partit) ch_atm_oce_arr(i)=Ch ce_atm_oce_arr(i)=Ce end do -!!$OMP END DO -!!$OMP END PARALLEL +!$OMP END PARALLEL DO END SUBROUTINE nemo_ocean_fluxes_mode !------------------------------------------------------------------------------- diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index 684b8a2cd..57e7bfb2a 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -293,18 +293,23 @@ subroutine update_atm_forcing(istep, tracers, partit, mesh) #else #ifndef __ifsinterface call sbc_do(partit, mesh) - u_wind = atmdata(i_xwind,:) - v_wind = atmdata(i_ywind,:) - shum = atmdata(i_humi ,:) - longwave = atmdata(i_qlw ,:) - shortwave = atmdata(i_qsr ,:) - Tair = atmdata(i_tair ,:)-273.15_WP - prec_rain = atmdata(i_prec ,:)/1000._WP - prec_snow = atmdata(i_snow ,:)/1000._WP - press_air = atmdata(i_mslp ,:) ! unit should be Pa +!$OMP PARALLEL DO + DO n=1, myDim_nod2D+eDim_nod2D + u_wind = atmdata(i_xwind,n) + v_wind = atmdata(i_ywind,n) + shum = atmdata(i_humi ,n) + longwave = atmdata(i_qlw ,n) + shortwave = atmdata(i_qsr ,n) + Tair = atmdata(i_tair ,n)-273.15_WP + prec_rain = atmdata(i_prec ,n)/1000._WP + prec_snow = atmdata(i_snow ,n)/1000._WP + press_air = atmdata(i_mslp ,n) ! unit should be Pa + END DO +!$OMP END PARALLEL DO #endif if (use_cavity) then +!$OMP PARALLEL DO do i=1,myDim_nod2d+eDim_nod2d if (ulevels_nod2d(i)>1) then u_wind(i) = 0.0_WP @@ -316,8 +321,9 @@ subroutine update_atm_forcing(istep, tracers, partit, mesh) prec_snow(i)= 0.0_WP press_air(i)= 0.0_WP runoff(i) = 0.0_WP - end if + end if end do +!$OMP END PARALLEL DO endif ! second, compute exchange coefficients @@ -327,14 +333,15 @@ subroutine update_atm_forcing(istep, tracers, partit, mesh) end if ! 2) drag coeff. and heat exchange coeff. over ocean in case using ncar formulae if(ncar_bulk_formulae) then - cd_atm_oce_arr=0.0_WP - ch_atm_oce_arr=0.0_WP - ce_atm_oce_arr=0.0_WP +! cd_atm_oce_arr=0.0_WP +! ch_atm_oce_arr=0.0_WP +! ce_atm_oce_arr=0.0_WP call ncar_ocean_fluxes_mode(partit, mesh) elseif(AOMIP_drag_coeff) then cd_atm_oce_arr=cd_atm_ice_arr end if ! third, compute wind stress +!$OMP PARALLEL DO do i=1,myDim_nod2d+eDim_nod2d !__________________________________________________________________________ if (ulevels_nod2d(i)>1) then @@ -359,7 +366,7 @@ subroutine update_atm_forcing(istep, tracers, partit, mesh) stress_atmice_x(i) = Cd_atm_ice_arr(i)*aux*dux stress_atmice_y(i) = Cd_atm_ice_arr(i)*aux*dvy end do - +!$OMP END PARALLEL DO ! heat and fresh water fluxes are treated in i_therm and ice2ocean #endif /* (__oasis) */ diff --git a/src/gen_surface_forcing.F90 b/src/gen_surface_forcing.F90 index 885e47dde..15bd0d508 100644 --- a/src/gen_surface_forcing.F90 +++ b/src/gen_surface_forcing.F90 @@ -556,6 +556,7 @@ SUBROUTINE nc_sbc_ini(rdate, partit, mesh) do fld_idx = 1, i_totfl flf=>sbc_flfi(fld_idx) ! prepare nearest coordinates in INfile , save to bilin_indx_i/j +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(i, x, y) do i = 1, myDim_nod2D+eDim_nod2D x = geo_coord_nod2D(1,i)/rad if (x < 0) x=x+360._WP @@ -588,6 +589,7 @@ SUBROUTINE nc_sbc_ini(rdate, partit, mesh) end if end if end do +!$OMP END PARALLEL DO end do lfirst=.false. end if @@ -793,8 +795,7 @@ SUBROUTINE getcoeffld(fld_idx, rdate, partit, mesh) ! end if ! bilinear space interpolation, and time interpolation , ! data is assumed to be sampled on a regular grid -!!$OMP PARALLEL -!!$OMP DO +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(ii, i, j, ip1, jp1, x, y, extrp, x1, x2, y1, y2, denom, data1, data2) do ii = 1, myDim_nod2D+eDim_nod2D i = bilin_indx_i(fld_idx, ii) j = bilin_indx_j(fld_idx, ii) @@ -851,9 +852,8 @@ SUBROUTINE getcoeffld(fld_idx, rdate, partit, mesh) coef_a(fld_idx, ii) = ( data2 - data1 ) / delta_t !( nc_time(t_indx+1) - nc_time(t_indx) ) coef_b(fld_idx, ii) = data1 - coef_a(fld_idx, ii) * nc_time(t_indx) - end do !ii -!!$OMP END DO -!!$OMP END PARALLEL + end do +!$OMP END PARALLEL DO END SUBROUTINE getcoeffld SUBROUTINE data_timeinterp(rdate, partit) @@ -871,16 +871,14 @@ SUBROUTINE data_timeinterp(rdate, partit) ! assign data from interpolation to taux and tauy integer :: fld_idx, i,j,ii -!!$OMP PARALLEL -!!$OMP DO do fld_idx = 1, i_totfl +!$OMP PARALLEL DO do i = 1, partit%myDim_nod2D+partit%eDim_nod2D ! store processed forcing data for fesom computation atmdata(fld_idx,i) = rdate * coef_a(fld_idx,i) + coef_b(fld_idx,i) end do !nod2D - end do !fld_idx -!!$OMP END DO -!!$OMP END PARALLEL +!$OMP END PARALLEL DO + end do END SUBROUTINE data_timeinterp SUBROUTINE sbc_ini(partit, mesh) @@ -1110,10 +1108,12 @@ SUBROUTINE sbc_do(partit, mesh) end do if (do_rotation) then +!$OMP PARALLEL DO do i=1, myDim_nod2D+eDim_nod2D call vector_g2r(coef_a(i_xwind,i), coef_a(i_ywind,i), coord_nod2D(1,i), coord_nod2D(2,i), 0) call vector_g2r(coef_b(i_xwind,i), coef_b(i_ywind,i), coord_nod2D(1,i), coord_nod2D(2,i), 0) end do +!$OMP END PARALLEL DO end if !========================================================================== From 82f6b253c4d97c5e7d844b69c2bc3e1c76f7308b Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 22 Nov 2021 16:01:50 +0100 Subject: [PATCH 641/909] fixed OMP error in upd_atm_forcing --- src/gen_forcing_couple.F90 | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index 57e7bfb2a..56f5d1240 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -295,15 +295,15 @@ subroutine update_atm_forcing(istep, tracers, partit, mesh) call sbc_do(partit, mesh) !$OMP PARALLEL DO DO n=1, myDim_nod2D+eDim_nod2D - u_wind = atmdata(i_xwind,n) - v_wind = atmdata(i_ywind,n) - shum = atmdata(i_humi ,n) - longwave = atmdata(i_qlw ,n) - shortwave = atmdata(i_qsr ,n) - Tair = atmdata(i_tair ,n)-273.15_WP - prec_rain = atmdata(i_prec ,n)/1000._WP - prec_snow = atmdata(i_snow ,n)/1000._WP - press_air = atmdata(i_mslp ,n) ! unit should be Pa + u_wind(n) = atmdata(i_xwind,n) + v_wind(n) = atmdata(i_ywind,n) + shum(n) = atmdata(i_humi ,n) + longwave(n) = atmdata(i_qlw ,n) + shortwave(n) = atmdata(i_qsr ,n) + Tair(n) = atmdata(i_tair ,n)-273.15_WP + prec_rain(n) = atmdata(i_prec ,n)/1000._WP + prec_snow(n) = atmdata(i_snow ,n)/1000._WP + press_air(n) = atmdata(i_mslp ,n) ! unit should be Pa END DO !$OMP END PARALLEL DO #endif From 3f2c856b59abf7c399132bf022c05c98293c77f6 Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 22 Nov 2021 16:18:50 +0100 Subject: [PATCH 642/909] exchange of u_w and v_w seems to work now --- src/MOD_ICE.F90 | 30 +++++++++++++++++++++--------- src/gen_bulk_formulae.F90 | 12 ++++++------ src/gen_forcing_couple.F90 | 4 ++-- src/ice_EVP.F90 | 4 ++-- src/ice_maEVP.F90 | 16 ++++++++-------- src/ice_oce_coupling.F90 | 12 ++++++------ src/ice_thermo_cpl.F90 | 4 ++-- src/ice_thermo_oce.F90 | 4 ++-- 8 files changed, 49 insertions(+), 37 deletions(-) diff --git a/src/MOD_ICE.F90 b/src/MOD_ICE.F90 index 50beb4053..45f0572c1 100644 --- a/src/MOD_ICE.F90 +++ b/src/MOD_ICE.F90 @@ -61,8 +61,9 @@ MODULE MOD_ICE ! !_______________________________________________________________________________ ! set work array derived type for ice -TYPE T_ICE_ATMCOUPL #if defined (__oasis) || defined (__ifsinterface) +TYPE T_ICE_ATMCOUPL + !___________________________________________________________________________ real(kind=WP), allocatable, dimension(:) :: oce_flx_h, ice_flx_h, tmpoce_flx_h, tmpice_flx_h #if defined (__oifs) || defined (__ifsinterface) @@ -71,7 +72,6 @@ MODULE MOD_ICE ! !!! DONT FORGET ice_temp rhs_tempdiv rhs_temp is advected for oifs !!! --> becomes additional ice ! tracer in ice%data(4)%values #endif /* (__oifs) */ -#endif /* (__oasis) */ !___________________________________________________________________________ contains procedure WRITE_T_ICE_ATMCOUPL @@ -79,6 +79,7 @@ MODULE MOD_ICE generic :: write(unformatted) => WRITE_T_ICE_ATMCOUPL generic :: read(unformatted) => READ_T_ICE_ATMCOUPL END TYPE T_ICE_ATMCOUPL +#endif /* (__oasis) */ ! ! @@ -95,7 +96,8 @@ MODULE MOD_ICE ! oce temp, salt, ssh, and uv at surface real(kind=WP), allocatable, dimension(:) :: srfoce_temp, srfoce_salt, srfoce_ssh - real(kind=WP), allocatable, dimension(:,:) :: srfoce_uv +! real(kind=WP), allocatable, dimension(:,:) :: srfoce_uv + real(kind=WP), allocatable, dimension(:) :: srfoce_u, srfoce_v ! freshwater & heatflux real(kind=WP), allocatable, dimension(:) :: flx_fw, flx_h @@ -277,13 +279,13 @@ end subroutine READ_T_ICE_THERMO ! !_______________________________________________________________________________ ! Unformatted writing for T_ICE_ATMCOUPL +#if defined (__oasis) || defined (__ifsinterface) subroutine WRITE_T_ICE_ATMCOUPL(tcoupl, unit, iostat, iomsg) IMPLICIT NONE class(T_ICE_ATMCOUPL), intent(in) :: tcoupl integer, intent(in) :: unit integer, intent(out) :: iostat character(*), intent(inout) :: iomsg -#if defined (__oasis) || defined (__ifsinterface) call write_bin_array(tcoupl%oce_flx_h, unit, iostat, iomsg) call write_bin_array(tcoupl%ice_flx_h, unit, iostat, iomsg) call write_bin_array(tcoupl%tmpoce_flx_h, unit, iostat, iomsg) @@ -292,17 +294,17 @@ subroutine WRITE_T_ICE_ATMCOUPL(tcoupl, unit, iostat, iomsg) call write_bin_array(tcoupl%ice_alb, unit, iostat, iomsg) call write_bin_array(tcoupl%enthalpyoffuse, unit, iostat, iomsg) #endif /* (__oifs) */ -#endif /* (__oasis) */ end subroutine WRITE_T_ICE_ATMCOUPL +#endif /* (__oasis) */ ! Unformatted reading for T_ICE_ATMCOUPL +#if defined (__oasis) || defined (__ifsinterface) subroutine READ_T_ICE_ATMCOUPL(tcoupl, unit, iostat, iomsg) IMPLICIT NONE class(T_ICE_ATMCOUPL), intent(inout) :: tcoupl integer, intent(in) :: unit integer, intent(out) :: iostat character(*), intent(inout) :: iomsg -#if defined (__oasis) || defined (__ifsinterface) call read_bin_array(tcoupl%oce_flx_h, unit, iostat, iomsg) call read_bin_array(tcoupl%ice_flx_h, unit, iostat, iomsg) call read_bin_array(tcoupl%tmpoce_flx_h, unit, iostat, iomsg) @@ -311,8 +313,8 @@ subroutine READ_T_ICE_ATMCOUPL(tcoupl, unit, iostat, iomsg) call read_bin_array(tcoupl%ice_alb, unit, iostat, iomsg) call read_bin_array(tcoupl%enthalpyoffuse, unit, iostat, iomsg) #endif /* (__oifs) */ -#endif /* (__oasis) */ end subroutine READ_T_ICE_ATMCOUPL +#endif /* (__oasis) */ ! ! !_______________________________________________________________________________ @@ -331,6 +333,9 @@ subroutine WRITE_T_ICE(ice, unit, iostat, iomsg) if (ice%whichEVP /= 0) call write_bin_array(ice%uvice_aux, unit, iostat, iomsg) call write_bin_array(ice%stress_atmice_xy, unit, iostat, iomsg) call write_bin_array(ice%stress_iceoce_xy, unit, iostat, iomsg) +! call write_bin_array(ice%srfoce_uv, unit, iostat, iomsg) + call write_bin_array(ice%srfoce_u, unit, iostat, iomsg) + call write_bin_array(ice%srfoce_v, unit, iostat, iomsg) call write_bin_array(ice%srfoce_temp, unit, iostat, iomsg) call write_bin_array(ice%srfoce_salt, unit, iostat, iomsg) call write_bin_array(ice%srfoce_ssh, unit, iostat, iomsg) @@ -389,6 +394,9 @@ subroutine READ_T_ICE(ice, unit, iostat, iomsg) if (ice%whichEVP /= 0) call read_bin_array(ice%uvice_aux, unit, iostat, iomsg) call read_bin_array(ice%stress_atmice_xy, unit, iostat, iomsg) call read_bin_array(ice%stress_iceoce_xy, unit, iostat, iomsg) +! call read_bin_array(ice%srfoce_uv, unit, iostat, iomsg) + call read_bin_array(ice%srfoce_u, unit, iostat, iomsg) + call read_bin_array(ice%srfoce_v, unit, iostat, iomsg) call read_bin_array(ice%srfoce_temp, unit, iostat, iomsg) call read_bin_array(ice%srfoce_salt, unit, iostat, iomsg) call read_bin_array(ice%srfoce_ssh, unit, iostat, iomsg) @@ -544,11 +552,15 @@ subroutine ice_init(ice, partit, mesh) !___________________________________________________________________________ ! initialise surface ocean arrays in ice derived type - allocate(ice%srfoce_uv( 2, node_size)) +! allocate(ice%srfoce_uv( 2, node_size)) + allocate(ice%srfoce_u( node_size)) + allocate(ice%srfoce_v( node_size)) allocate(ice%srfoce_temp( node_size)) allocate(ice%srfoce_salt( node_size)) allocate(ice%srfoce_ssh( node_size)) - ice%srfoce_uv = 0.0_WP +! ice%srfoce_uv = 0.0_WP + ice%srfoce_u = 0.0_WP + ice%srfoce_v = 0.0_WP ice%srfoce_temp = 0.0_WP ice%srfoce_salt = 0.0_WP ice%srfoce_ssh = 0.0_WP diff --git a/src/gen_bulk_formulae.F90 b/src/gen_bulk_formulae.F90 index 6b544c3ad..ce98a2b34 100755 --- a/src/gen_bulk_formulae.F90 +++ b/src/gen_bulk_formulae.F90 @@ -52,8 +52,8 @@ subroutine ncar_ocean_fluxes_mode_fesom14(ice, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_ice) , intent(inout), target :: ice real(kind=WP), dimension(:) , pointer :: T_oc_array, u_w, v_w - u_w => ice%srfoce_uv(1,:) - v_w => ice%srfoce_uv(2,:) + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) T_oc_array => ice%srfoce_temp(:) do i=1, partit%myDim_nod2d+partit%eDim_nod2d @@ -163,8 +163,8 @@ subroutine ncar_ocean_fluxes_mode(ice, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_ice) , intent(inout), target :: ice real(kind=WP), dimension(:) , pointer :: T_oc_array, u_w, v_w - u_w => ice%srfoce_uv(1,:) - v_w => ice%srfoce_uv(2,:) + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) T_oc_array => ice%srfoce_temp(:) @@ -381,8 +381,8 @@ SUBROUTINE nemo_ocean_fluxes_mode(ice, partit) q_zu ! spec. hum. shifted at zu [kg/kg] real(wp) :: zevap, zqsb, zqla, zqlw real(kind=WP), dimension(:) , pointer :: u_w, v_w, t_oc_array - u_w => ice%srfoce_uv(1,:) - v_w => ice%srfoce_uv(2,:) + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) t_oc_array => ice%srfoce_temp(:) !!$OMP PARALLEL diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index 76e827b20..060e5e45e 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -135,8 +135,8 @@ subroutine update_atm_forcing(istep, ice, tracers, partit, mesh) #include "associate_mesh_ass.h" u_ice => ice%uvice(1,:) v_ice => ice%uvice(2,:) - u_w => ice%srfoce_uv(1,:) - v_w => ice%srfoce_uv(2,:) + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) t1=MPI_Wtime() #ifdef __oasis diff --git a/src/ice_EVP.F90 b/src/ice_EVP.F90 index 1eab293a2..19caddcf3 100755 --- a/src/ice_EVP.F90 +++ b/src/ice_EVP.F90 @@ -507,8 +507,8 @@ subroutine EVPdynamics(ice, partit, mesh) v_rhs_ice => ice%uvice_rhs(2,:) rhs_a => ice%data(1)%values_rhs(:) rhs_m => ice%data(2)%values_rhs(:) - u_w => ice%srfoce_uv(1,:) - v_w => ice%srfoce_uv(2,:) + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) elevation => ice%srfoce_ssh(:) !_______________________________________________________________________________ diff --git a/src/ice_maEVP.F90 b/src/ice_maEVP.F90 index 69fbd28d9..94ef7f327 100644 --- a/src/ice_maEVP.F90 +++ b/src/ice_maEVP.F90 @@ -57,9 +57,9 @@ subroutine EVPdynamics_a(ice, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_MESH - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit - type(t_ice), intent(inout), target :: ice + type(t_ice) , intent(inout), target :: ice end subroutine subroutine EVPdynamics_m(ice, partit, mesh) @@ -67,9 +67,9 @@ subroutine EVPdynamics_m(ice, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_MESH - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit - type(t_ice), intent(inout), target :: ice + type(t_ice) , intent(inout), target :: ice end subroutine end interface end module @@ -437,8 +437,8 @@ subroutine EVPdynamics_m(ice, partit, mesh) v_rhs_ice => ice%uvice_rhs(2,:) rhs_a => ice%data(1)%values_rhs(:) rhs_m => ice%data(2)%values_rhs(:) - u_w => ice%srfoce_uv(1,:) - v_w => ice%srfoce_uv(2,:) + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) elevation => ice%srfoce_ssh(:) !___________________________________________________________________________ val3=1.0_WP/3.0_WP @@ -978,8 +978,8 @@ subroutine EVPdynamics_a(ice, partit, mesh) m_snow => ice%data(3)%values(:) u_rhs_ice => ice%uvice_rhs(1,:) v_rhs_ice => ice%uvice_rhs(2,:) - u_w => ice%srfoce_uv(1,:) - v_w => ice%srfoce_uv(2,:) + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) !___________________________________________________________________________ steps=evp_rheol_steps diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index b3e43d8dc..7d0e5faa0 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -7,7 +7,7 @@ subroutine ocean2ice(ice, dynamics, tracers, partit, mesh) USE MOD_TRACER USE MOD_DYN USE MOD_ICE - type(t_ice) , intent(in) , target :: ice + type(t_ice) , intent(inout), target :: ice type(t_dyn) , intent(in) , target :: dynamics type(t_tracer), intent(inout), target :: tracers type(t_partit), intent(inout), target :: partit @@ -84,8 +84,8 @@ subroutine oce_fluxes_mom(ice, dynamics, partit, mesh) u_ice => ice%uvice(1,:) v_ice => ice%uvice(2,:) a_ice => ice%data(1)%values(:) - u_w => ice%srfoce_uv(1,:) - v_w => ice%srfoce_uv(2,:) + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) ! ================== ! momentum flux: @@ -155,7 +155,7 @@ subroutine ocean2ice(ice, dynamics, tracers, partit, mesh) USE g_CONFIG use g_comm_auto implicit none - type(t_ice) , intent(in) , target :: ice + type(t_ice) , intent(inout), target :: ice type(t_dyn) , intent(in) , target :: dynamics type(t_tracer), intent(inout), target :: tracers type(t_partit), intent(inout), target :: partit @@ -172,8 +172,8 @@ subroutine ocean2ice(ice, dynamics, tracers, partit, mesh) temp => tracers%data(1)%values(:,:) salt => tracers%data(2)%values(:,:) UV => dynamics%uv(:,:,:) - u_w => ice%srfoce_uv(1,:) - v_w => ice%srfoce_uv(2,:) + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) T_oc_array => ice%srfoce_temp(:) S_oc_array => ice%srfoce_salt(:) elevation => ice%srfoce_ssh(:) diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index 3ec666545..41d03da16 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -78,8 +78,8 @@ subroutine thermodynamics(ice, partit, mesh) thdgrsn => ice%thermo%thdgrsn T_oc_array => ice%srfoce_temp(:) S_oc_array => ice%srfoce_salt(:) - u_w => ice%srfoce_uv(1,:) - v_w => ice%srfoce_uv(2,:) + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) !_____________________________________________________________________________ rsss = ref_sss diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index 8c4df04c0..05664f16b 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -155,8 +155,8 @@ subroutine thermodynamics(ice, partit, mesh) thdgr_old => ice%thermo%thdgr_old t_skin => ice%thermo%t_skin ustar_aux => ice%thermo%ustar - u_w => ice%srfoce_uv(1,:) - v_w => ice%srfoce_uv(2,:) + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) T_oc_array => ice%srfoce_temp(:) S_oc_array => ice%srfoce_salt(:) From b5f69598ddee5ce12c627527e7d4de9129b93297 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 22 Nov 2021 17:02:17 +0100 Subject: [PATCH 643/909] OMP bug fixes in surface forcing and a general bug fix: press_air was addressed iven if not allocated in update_atm_forcing (need to be fixed in the master branch as well) --- src/gen_forcing_couple.F90 | 10 +++++----- src/gen_surface_forcing.F90 | 18 ++++++++++++------ 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index 56f5d1240..c32d68f29 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -303,11 +303,12 @@ subroutine update_atm_forcing(istep, tracers, partit, mesh) Tair(n) = atmdata(i_tair ,n)-273.15_WP prec_rain(n) = atmdata(i_prec ,n)/1000._WP prec_snow(n) = atmdata(i_snow ,n)/1000._WP - press_air(n) = atmdata(i_mslp ,n) ! unit should be Pa + if (l_mslp) then + press_air(n) = atmdata(i_mslp ,n) ! unit should be Pa + end if END DO !$OMP END PARALLEL DO -#endif - +#endif if (use_cavity) then !$OMP PARALLEL DO do i=1,myDim_nod2d+eDim_nod2d @@ -325,7 +326,6 @@ subroutine update_atm_forcing(istep, tracers, partit, mesh) end do !$OMP END PARALLEL DO endif - ! second, compute exchange coefficients ! 1) drag coefficient if(AOMIP_drag_coeff) then @@ -341,7 +341,7 @@ subroutine update_atm_forcing(istep, tracers, partit, mesh) cd_atm_oce_arr=cd_atm_ice_arr end if ! third, compute wind stress -!$OMP PARALLEL DO +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(i, dux, dvy, aux) do i=1,myDim_nod2d+eDim_nod2d !__________________________________________________________________________ if (ulevels_nod2d(i)>1) then diff --git a/src/gen_surface_forcing.F90 b/src/gen_surface_forcing.F90 index 15bd0d508..d745a5700 100644 --- a/src/gen_surface_forcing.F90 +++ b/src/gen_surface_forcing.F90 @@ -528,6 +528,7 @@ SUBROUTINE nc_sbc_ini(rdate, partit, mesh) integer :: numnodes ! nu,ber of nodes in elem (3 for triangle, 4 for ... ) real(wp) :: x, y ! coordinates of elements integer :: fld_idx + integer :: warn_omp type(flfi_type), pointer :: flf type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit @@ -541,8 +542,8 @@ SUBROUTINE nc_sbc_ini(rdate, partit, mesh) ! & qns(elem2D), emp(elem2D), qsr(elem2D), & ! & STAT=sbc_alloc ) ! used to inerpolate on nodes - warn = 0 - + warn = 0 + warn_omp = 0 ! get ini year; Fill names of sbc_flfi idate=int(rdate) @@ -556,7 +557,8 @@ SUBROUTINE nc_sbc_ini(rdate, partit, mesh) do fld_idx = 1, i_totfl flf=>sbc_flfi(fld_idx) ! prepare nearest coordinates in INfile , save to bilin_indx_i/j -!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(i, x, y) +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(i, x, y, warn) +!$OMP DO do i = 1, myDim_nod2D+eDim_nod2D x = geo_coord_nod2D(1,i)/rad if (x < 0) x=x+360._WP @@ -581,15 +583,19 @@ SUBROUTINE nc_sbc_ini(rdate, partit, mesh) bilin_indx_j(fld_idx, i)=0 end if end if - if (warn == 0) then + if (warn_omp == 0) then if (bilin_indx_i(fld_idx, i) < 1 .or. bilin_indx_j(fld_idx, i) < 1) then ! WRITE(*,*) ' WARNING: node/element coordinate out of forcing bounds,' ! WRITE(*,*) ' nearest value will be used as a constant field' - warn = 1 + warn_omp = 1 end if end if end do -!$OMP END PARALLEL DO +!$OMP END DO +!$OMP CRITICAL + warn=max(warn_omp, warn) +!$OMP END CRITICAL +!$OMP END PARALLEL end do lfirst=.false. end if From 7bb42641fe8b8ce74dd5072a801c5c9804bb3096 Mon Sep 17 00:00:00 2001 From: dsidoren Date: Mon, 22 Nov 2021 17:07:46 +0100 Subject: [PATCH 644/909] bug fix with press_air with use_cavity condition --- src/gen_forcing_couple.F90 | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index c32d68f29..5bb702a89 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -320,7 +320,9 @@ subroutine update_atm_forcing(istep, tracers, partit, mesh) Tair(i) = 0.0_WP prec_rain(i)= 0.0_WP prec_snow(i)= 0.0_WP - press_air(i)= 0.0_WP + if (l_mslp) then + press_air(i)= 0.0_WP + end if runoff(i) = 0.0_WP end if end do From 88ce71161b68b5235dd3d960b76f4249694490fe Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 22 Nov 2021 17:52:00 +0100 Subject: [PATCH 645/909] do not bundle 2d ice vector variables together leave them separated otherwise you run into numerical problems with exchange_nod --- src/MOD_ICE.F90 | 88 +++++++++++++++++++++++++------------- src/cavity_param.F90 | 4 +- src/gen_forcing_couple.F90 | 4 +- src/ice_EVP.F90 | 20 ++++----- src/ice_fct.F90 | 8 ++-- src/ice_maEVP.F90 | 20 ++++----- src/ice_oce_coupling.F90 | 4 +- src/ice_setup_step.F90 | 8 ++-- src/ice_thermo_cpl.F90 | 4 +- src/ice_thermo_oce.F90 | 4 +- src/io_blowup.F90 | 4 +- src/io_meandata.F90 | 4 +- src/io_restart.F90 | 4 +- src/oce_mo_conv.F90 | 4 +- src/write_step_info.F90 | 4 +- 15 files changed, 106 insertions(+), 78 deletions(-) diff --git a/src/MOD_ICE.F90 b/src/MOD_ICE.F90 index 45f0572c1..2b8f75af4 100644 --- a/src/MOD_ICE.F90 +++ b/src/MOD_ICE.F90 @@ -89,10 +89,12 @@ MODULE MOD_ICE !___________________________________________________________________________ ! zonal & merdional ice velocity - real(kind=WP), allocatable, dimension(:,:) :: uvice, uvice_rhs, uvice_old, uvice_aux + real(kind=WP), allocatable, dimension(:) :: uice, uice_rhs, uice_old, uice_aux + real(kind=WP), allocatable, dimension(:) :: vice, vice_rhs, vice_old, vice_aux ! surface stess atm<-->ice, oce<-->ice - real(kind=WP), allocatable, dimension(:,:) :: stress_atmice_xy, stress_iceoce_xy + real(kind=WP), allocatable, dimension(:) :: stress_atmice_x, stress_iceoce_x + real(kind=WP), allocatable, dimension(:) :: stress_atmice_y, stress_iceoce_y ! oce temp, salt, ssh, and uv at surface real(kind=WP), allocatable, dimension(:) :: srfoce_temp, srfoce_salt, srfoce_ssh @@ -327,12 +329,18 @@ subroutine WRITE_T_ICE(ice, unit, iostat, iomsg) character(*), intent(inout) :: iomsg integer :: i !___________________________________________________________________________ - call write_bin_array(ice%uvice, unit, iostat, iomsg) - call write_bin_array(ice%uvice_rhs, unit, iostat, iomsg) - call write_bin_array(ice%uvice_old, unit, iostat, iomsg) - if (ice%whichEVP /= 0) call write_bin_array(ice%uvice_aux, unit, iostat, iomsg) - call write_bin_array(ice%stress_atmice_xy, unit, iostat, iomsg) - call write_bin_array(ice%stress_iceoce_xy, unit, iostat, iomsg) + call write_bin_array(ice%uice, unit, iostat, iomsg) + call write_bin_array(ice%uice_rhs, unit, iostat, iomsg) + call write_bin_array(ice%uice_old, unit, iostat, iomsg) + if (ice%whichEVP /= 0) call write_bin_array(ice%uice_aux, unit, iostat, iomsg) + call write_bin_array(ice%vice, unit, iostat, iomsg) + call write_bin_array(ice%vice_rhs, unit, iostat, iomsg) + call write_bin_array(ice%vice_old, unit, iostat, iomsg) + if (ice%whichEVP /= 0) call write_bin_array(ice%vice_aux, unit, iostat, iomsg) + call write_bin_array(ice%stress_atmice_x, unit, iostat, iomsg) + call write_bin_array(ice%stress_iceoce_x, unit, iostat, iomsg) + call write_bin_array(ice%stress_atmice_y, unit, iostat, iomsg) + call write_bin_array(ice%stress_iceoce_y, unit, iostat, iomsg) ! call write_bin_array(ice%srfoce_uv, unit, iostat, iomsg) call write_bin_array(ice%srfoce_u, unit, iostat, iomsg) call write_bin_array(ice%srfoce_v, unit, iostat, iomsg) @@ -388,12 +396,18 @@ subroutine READ_T_ICE(ice, unit, iostat, iomsg) character(*), intent(inout) :: iomsg integer :: i !___________________________________________________________________________ - call read_bin_array(ice%uvice, unit, iostat, iomsg) - call read_bin_array(ice%uvice_rhs, unit, iostat, iomsg) - call read_bin_array(ice%uvice_old, unit, iostat, iomsg) - if (ice%whichEVP /= 0) call read_bin_array(ice%uvice_aux, unit, iostat, iomsg) - call read_bin_array(ice%stress_atmice_xy, unit, iostat, iomsg) - call read_bin_array(ice%stress_iceoce_xy, unit, iostat, iomsg) + call read_bin_array(ice%uice, unit, iostat, iomsg) + call read_bin_array(ice%uice_rhs, unit, iostat, iomsg) + call read_bin_array(ice%uice_old, unit, iostat, iomsg) + if (ice%whichEVP /= 0) call read_bin_array(ice%uice_aux, unit, iostat, iomsg) + call read_bin_array(ice%vice, unit, iostat, iomsg) + call read_bin_array(ice%vice_rhs, unit, iostat, iomsg) + call read_bin_array(ice%vice_old, unit, iostat, iomsg) + if (ice%whichEVP /= 0) call read_bin_array(ice%vice_aux, unit, iostat, iomsg) + call read_bin_array(ice%stress_atmice_x, unit, iostat, iomsg) + call read_bin_array(ice%stress_iceoce_x, unit, iostat, iomsg) + call read_bin_array(ice%stress_atmice_y, unit, iostat, iomsg) + call read_bin_array(ice%stress_iceoce_y, unit, iostat, iomsg) ! call read_bin_array(ice%srfoce_uv, unit, iostat, iomsg) call read_bin_array(ice%srfoce_u, unit, iostat, iomsg) call read_bin_array(ice%srfoce_v, unit, iostat, iomsg) @@ -535,30 +549,42 @@ subroutine ice_init(ice, partit, mesh) !___________________________________________________________________________ ! allocate/initialise arrays in ice derived type ! initialise velocity and stress related arrays in ice derived type - allocate(ice%uvice( 2, node_size)) - allocate(ice%uvice_rhs( 2, node_size)) - allocate(ice%uvice_old( 2, node_size)) - allocate(ice%stress_atmice_xy( 2, node_size)) - allocate(ice%stress_iceoce_xy( 2, node_size)) - ice%uvice = 0.0_WP - ice%uvice_rhs = 0.0_WP - ice%uvice_old = 0.0_WP - ice%stress_atmice_xy = 0.0_WP - ice%stress_iceoce_xy = 0.0_WP + allocate(ice%uice( node_size)) + allocate(ice%uice_rhs( node_size)) + allocate(ice%uice_old( node_size)) + allocate(ice%vice( node_size)) + allocate(ice%vice_rhs( node_size)) + allocate(ice%vice_old( node_size)) + allocate(ice%stress_atmice_x( node_size)) + allocate(ice%stress_iceoce_x( node_size)) + allocate(ice%stress_atmice_y( node_size)) + allocate(ice%stress_iceoce_y( node_size)) + ice%uice = 0.0_WP + ice%uice_rhs = 0.0_WP + ice%uice_old = 0.0_WP + ice%stress_atmice_x = 0.0_WP + ice%stress_iceoce_x = 0.0_WP if (ice%whichEVP /= 0) then - allocate(ice%uvice_aux( 2, node_size)) - ice%uvice_aux = 0.0_WP + allocate(ice%uice_aux( node_size)) + ice%uice_aux = 0.0_WP + end if + ice%vice = 0.0_WP + ice%vice_rhs = 0.0_WP + ice%vice_old = 0.0_WP + ice%stress_atmice_y = 0.0_WP + ice%stress_iceoce_y = 0.0_WP + if (ice%whichEVP /= 0) then + allocate(ice%vice_aux( node_size)) + ice%vice_aux = 0.0_WP end if !___________________________________________________________________________ ! initialise surface ocean arrays in ice derived type -! allocate(ice%srfoce_uv( 2, node_size)) allocate(ice%srfoce_u( node_size)) allocate(ice%srfoce_v( node_size)) allocate(ice%srfoce_temp( node_size)) allocate(ice%srfoce_salt( node_size)) allocate(ice%srfoce_ssh( node_size)) -! ice%srfoce_uv = 0.0_WP ice%srfoce_u = 0.0_WP ice%srfoce_v = 0.0_WP ice%srfoce_temp = 0.0_WP @@ -692,8 +718,10 @@ subroutine ice_init_toyocean_dummy(ice, partit, mesh) !___________________________________________________________________________ ! allocate/initialise arrays in ice derived type ! initialise velocity and stress related arrays in ice derived type - allocate(ice%uvice( 2, node_size)) - ice%uvice = 0.0_WP + allocate(ice%uice( node_size)) + allocate(ice%vice( node_size)) + ice%uice = 0.0_WP + ice%vice = 0.0_WP allocate(ice%data(ice%num_itracers)) do n = 1, ice%num_itracers allocate(ice%data(n)%values( node_size)) diff --git a/src/cavity_param.F90 b/src/cavity_param.F90 index a3c31b96c..aacfd27b0 100644 --- a/src/cavity_param.F90 +++ b/src/cavity_param.F90 @@ -491,8 +491,8 @@ subroutine cavity_ice_clean_vel(ice, partit, mesh) #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uvice(1,:) - v_ice => ice%uvice(2,:) + u_ice => ice%uice(:) + v_ice => ice%vice(:) !___________________________________________________________________________ do node=1,myDim_nod2d+eDim_nod2d diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index 060e5e45e..64a1ed646 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -133,8 +133,8 @@ subroutine update_atm_forcing(istep, ice, tracers, partit, mesh) #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uvice(1,:) - v_ice => ice%uvice(2,:) + u_ice => ice%uice(:) + v_ice => ice%vice(:) u_w => ice%srfoce_u(:) v_w => ice%srfoce_v(:) diff --git a/src/ice_EVP.F90 b/src/ice_EVP.F90 index 19caddcf3..ab092a1d4 100755 --- a/src/ice_EVP.F90 +++ b/src/ice_EVP.F90 @@ -77,8 +77,8 @@ subroutine stress_tensor(ice_strength, ice, partit, mesh) #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uvice(1,:) - v_ice => ice%uvice(2,:) + u_ice => ice%uice(:) + v_ice => ice%vice(:) eps11 => ice%work%eps11(:) eps12 => ice%work%eps12(:) eps22 => ice%work%eps22(:) @@ -389,8 +389,8 @@ subroutine stress2rhs(inv_areamass, ice_strength, ice, partit, mesh) sigma11 => ice%work%sigma11(:) sigma12 => ice%work%sigma12(:) sigma22 => ice%work%sigma22(:) - u_rhs_ice => ice%uvice_rhs(1,:) - v_rhs_ice => ice%uvice_rhs(2,:) + u_rhs_ice => ice%uice_rhs(:) + v_rhs_ice => ice%vice_rhs(:) rhs_a => ice%data(1)%values_rhs(:) rhs_m => ice%data(2)%values_rhs(:) @@ -496,15 +496,15 @@ subroutine EVPdynamics(ice, partit, mesh) #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uvice(1,:) - v_ice => ice%uvice(2,:) + u_ice => ice%uice(:) + v_ice => ice%vice(:) a_ice => ice%data(1)%values(:) m_ice => ice%data(2)%values(:) m_snow => ice%data(3)%values(:) - u_ice_old => ice%uvice_old(1,:) - v_ice_old => ice%uvice_old(2,:) - u_rhs_ice => ice%uvice_rhs(1,:) - v_rhs_ice => ice%uvice_rhs(2,:) + u_ice_old => ice%uice_old(:) + v_ice_old => ice%vice_old(:) + u_rhs_ice => ice%uice_rhs(:) + v_rhs_ice => ice%vice_rhs(:) rhs_a => ice%data(1)%values_rhs(:) rhs_m => ice%data(2)%values_rhs(:) u_w => ice%srfoce_u(:) diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index ab1a8fcfb..77b6f9392 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -111,8 +111,8 @@ subroutine ice_TG_rhs(ice, partit, mesh) #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uvice(1,:) - v_ice => ice%uvice(2,:) + u_ice => ice%uice(:) + v_ice => ice%vice(:) a_ice => ice%data(1)%values(:) m_ice => ice%data(2)%values(:) m_snow => ice%data(3)%values(:) @@ -869,8 +869,8 @@ subroutine ice_TG_rhs_div(ice, partit, mesh) #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uvice(1,:) - v_ice => ice%uvice(2,:) + u_ice => ice%uice(:) + v_ice => ice%vice(:) a_ice => ice%data(1)%values(:) m_ice => ice%data(2)%values(:) m_snow => ice%data(3)%values(:) diff --git a/src/ice_maEVP.F90 b/src/ice_maEVP.F90 index 94ef7f327..5d9de09c9 100644 --- a/src/ice_maEVP.F90 +++ b/src/ice_maEVP.F90 @@ -319,8 +319,8 @@ subroutine stress2rhs_m(ice, partit, mesh) sigma11 => ice%work%sigma11(:) sigma12 => ice%work%sigma12(:) sigma22 => ice%work%sigma22(:) - u_rhs_ice => ice%uvice_rhs(1,:) - v_rhs_ice => ice%uvice_rhs(2,:) + u_rhs_ice => ice%uice_rhs(:) + v_rhs_ice => ice%vice_rhs(:) rhs_a => ice%data(1)%values_rhs(:) rhs_m => ice%data(2)%values_rhs(:) @@ -422,8 +422,8 @@ subroutine EVPdynamics_m(ice, partit, mesh) #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uvice(1,:) - v_ice => ice%uvice(2,:) + u_ice => ice%uice(:) + v_ice => ice%vice(:) a_ice => ice%data(1)%values(:) m_ice => ice%data(2)%values(:) m_snow => ice%data(3)%values(:) @@ -433,8 +433,8 @@ subroutine EVPdynamics_m(ice, partit, mesh) sigma11 => ice%work%sigma11(:) sigma12 => ice%work%sigma12(:) sigma22 => ice%work%sigma22(:) - u_rhs_ice => ice%uvice_rhs(1,:) - v_rhs_ice => ice%uvice_rhs(2,:) + u_rhs_ice => ice%uice_rhs(:) + v_rhs_ice => ice%vice_rhs(:) rhs_a => ice%data(1)%values_rhs(:) rhs_m => ice%data(2)%values_rhs(:) u_w => ice%srfoce_u(:) @@ -971,13 +971,13 @@ subroutine EVPdynamics_a(ice, partit, mesh) #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uvice(1,:) - v_ice => ice%uvice(2,:) + u_ice => ice%uice(:) + v_ice => ice%vice(:) a_ice => ice%data(1)%values(:) m_ice => ice%data(2)%values(:) m_snow => ice%data(3)%values(:) - u_rhs_ice => ice%uvice_rhs(1,:) - v_rhs_ice => ice%uvice_rhs(2,:) + u_rhs_ice => ice%uice_rhs(:) + v_rhs_ice => ice%vice_rhs(:) u_w => ice%srfoce_u(:) v_w => ice%srfoce_v(:) diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index 7d0e5faa0..9699bb32c 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -81,8 +81,8 @@ subroutine oce_fluxes_mom(ice, dynamics, partit, mesh) #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uvice(1,:) - v_ice => ice%uvice(2,:) + u_ice => ice%uice(:) + v_ice => ice%vice(:) a_ice => ice%data(1)%values(:) u_w => ice%srfoce_u(:) v_w => ice%srfoce_v(:) diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index 06bc15936..5f0b296a1 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -265,8 +265,8 @@ subroutine ice_timestep(step, ice, partit, mesh) #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uvice(1,:) - v_ice => ice%uvice(2,:) + u_ice => ice%uice(:) + v_ice => ice%vice(:) !___________________________________________________________________________ t0=MPI_Wtime() #if defined (__icepack) @@ -393,8 +393,8 @@ subroutine ice_initial_state(ice, tracers, partit, mesh) #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uvice(1,:) - v_ice => ice%uvice(2,:) + u_ice => ice%uice(:) + v_ice => ice%vice(:) a_ice => ice%data(1)%values(:) m_ice => ice%data(2)%values(:) m_snow => ice%data(3)%values(:) diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index 41d03da16..92337ac2c 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -69,8 +69,8 @@ subroutine thermodynamics(ice, partit, mesh) eDim_nod2D =>partit%eDim_nod2D ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D - u_ice => ice%uvice(1,:) - v_ice => ice%uvice(2,:) + u_ice => ice%uice(:) + v_ice => ice%vice(:) a_ice => ice%data(1)%values(:) m_ice => ice%data(2)%values(:) m_snow => ice%data(3)%values(:) diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index 05664f16b..1d4cf7e5d 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -142,8 +142,8 @@ subroutine thermodynamics(ice, partit, mesh) ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D - u_ice => ice%uvice(1,:) - v_ice => ice%uvice(2,:) + u_ice => ice%uice(:) + v_ice => ice%vice(:) a_ice => ice%data(1)%values(:) m_ice => ice%data(2)%values(:) m_snow => ice%data(3)%values(:) diff --git a/src/io_blowup.F90 b/src/io_blowup.F90 index 282831aa5..ef5c6d5cf 100644 --- a/src/io_blowup.F90 +++ b/src/io_blowup.F90 @@ -155,8 +155,8 @@ subroutine ini_blowup_io(year, ice, dynamics, tracers, partit, mesh) call def_variable(bid, 'a_ice' , (/nod2D/) , 'ice concentration [0 to 1]', '%', ice%data(1)%values); call def_variable(bid, 'm_ice' , (/nod2D/) , 'effective ice thickness', 'm', ice%data(2)%values); call def_variable(bid, 'm_snow' , (/nod2D/) , 'effective snow thickness', 'm', ice%data(3)%values); - call def_variable(bid, 'u_ice' , (/nod2D/) , 'zonal velocity', 'm/s', ice%uvice(1,:)); - call def_variable(bid, 'v_ice' , (/nod2D/) , 'meridional velocity', 'm', ice%uvice(2,:)); + call def_variable(bid, 'u_ice' , (/nod2D/) , 'zonal velocity', 'm/s', ice%uice); + call def_variable(bid, 'v_ice' , (/nod2D/) , 'meridional velocity', 'm', ice%vice); !!PS call def_variable(bid, 'a_ice_old' , (/nod2D/) , 'ice concentration [0 to 1]', '%', a_ice_old); !PS !!PS call def_variable(bid, 'm_ice_old' , (/nod2D/) , 'effective ice thickness', 'm', m_ice_old); !PS !!PS call def_variable(bid, 'm_snow_old' , (/nod2D/) , 'effective snow thickness', 'm', m_snow_old); !PS diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index f0e9e755d..cd7b50f29 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -170,11 +170,11 @@ subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) ! output sea ice CASE ('uice ') if (use_ice) then - call def_stream(nod2D, myDim_nod2D, 'uice', 'ice velocity x', 'm/s', ice%uvice(1,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'uice', 'ice velocity x', 'm/s', ice%uice(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('vice ') if (use_ice) then - call def_stream(nod2D, myDim_nod2D, 'vice', 'ice velocity y', 'm/s', ice%uvice(2,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'vice', 'ice velocity y', 'm/s', ice%vice(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('a_ice ') if (use_ice) then diff --git a/src/io_restart.F90 b/src/io_restart.F90 index b97e08813..ae0162539 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -200,8 +200,8 @@ subroutine ini_ice_io(year, ice, partit, mesh) call def_variable(iid, 'area', (/nod2D/), 'ice concentration [0 to 1]', '%', ice%data(1)%values(:)); call def_variable(iid, 'hice', (/nod2D/), 'effective ice thickness', 'm', ice%data(2)%values(:)); call def_variable(iid, 'hsnow', (/nod2D/), 'effective snow thickness', 'm', ice%data(3)%values(:)); - call def_variable(iid, 'uice', (/nod2D/), 'zonal velocity', 'm/s', ice%uvice(1,:)); - call def_variable(iid, 'vice', (/nod2D/), 'meridional velocity', 'm', ice%uvice(2,:)); + call def_variable(iid, 'uice', (/nod2D/), 'zonal velocity', 'm/s', ice%uice(:)); + call def_variable(iid, 'vice', (/nod2D/), 'meridional velocity', 'm', ice%vice(:)); #if defined (__oifs) call def_variable(iid, 'ice_albedo', (/nod2D/), 'ice albedo', '-', ice_alb); call def_variable(iid, 'ice_temp',(/nod2D/), 'ice surface temperature', 'K', ice_temp); diff --git a/src/oce_mo_conv.F90 b/src/oce_mo_conv.F90 index f46e210a5..c3cc7a844 100644 --- a/src/oce_mo_conv.F90 +++ b/src/oce_mo_conv.F90 @@ -24,8 +24,8 @@ subroutine mo_convect(ice, partit, mesh) #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uvice(1,:) - v_ice => ice%uvice(2,:) + u_ice => ice%uice(:) + v_ice => ice%vice(:) a_ice => ice%data(1)%values(:) !___________________________________________________________________________ diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index 94a141361..dc1b6ae8a 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -295,8 +295,8 @@ subroutine check_blowup(istep, ice, dynamics, tracers, partit, mesh) ssh_rhs_old => dynamics%ssh_rhs_old(:) eta_n => dynamics%eta_n(:) d_eta => dynamics%d_eta(:) - u_ice => ice%uvice(1,:) - v_ice => ice%uvice(2,:) + u_ice => ice%uice(:) + v_ice => ice%vice(:) a_ice => ice%data(1)%values(:) m_ice => ice%data(2)%values(:) m_snow => ice%data(3)%values(:) From 424754cc71019f19d791050d10a5c6440834c1fa Mon Sep 17 00:00:00 2001 From: "Kristian S. Mogensen" Date: Mon, 22 Nov 2021 17:24:07 +0000 Subject: [PATCH 646/909] First coupled IFS-FESOM from the refactored branch. --- .gitignore | 1 + src/CMakeLists.txt | 2 +- src/fesom_module.F90 | 2 ++ src/gen_surface_forcing.F90 | 3 +++ src/ifs_interface/ifs_interface.F90 | 6 +++++- 5 files changed, 12 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index b47b73763..7e9b58fcd 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,4 @@ lib/*.a lib/*.so /work_* Makefile.in +mesh_part/build diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index c23f7beca..34d375b07 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -101,7 +101,7 @@ if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Intel ) target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -init=zero -no-wrap-margin) # target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -g -traceback -check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU ) - target_compile_options(${PROJECT_NAME} PRIVATE -O3 -finit-local-zero -finline-functions -march=native -fimplicit-none -fdefault-real-8 -ffree-line-length-none) + target_compile_options(${PROJECT_NAME} PRIVATE -O3 -finit-local-zero -finline-functions -march=native -fimplicit-none -fdefault-real-8 -ffree-line-length-none -g) if(CMAKE_Fortran_COMPILER_VERSION VERSION_GREATER_EQUAL 10 ) target_compile_options(${PROJECT_NAME} PRIVATE -fallow-argument-mismatch) # gfortran v10 is strict about erroneous API calls: "Rank mismatch between actual argument at (1) and actual argument at (2) (scalar and rank-1)" endif() diff --git a/src/fesom_module.F90 b/src/fesom_module.F90 index fdeec1c53..1eea9e7ae 100755 --- a/src/fesom_module.F90 +++ b/src/fesom_module.F90 @@ -90,10 +90,12 @@ subroutine fesom_init(fesom_total_nsteps) ! EO parameters logical mpi_is_initialized +#if !defined __ifsinterface if(command_argument_count() > 0) then call command_line_options%parse() stop end if +#endif mpi_is_initialized = .false. f%fesom_did_mpi_init = .false. diff --git a/src/gen_surface_forcing.F90 b/src/gen_surface_forcing.F90 index 885e47dde..aec6da5f0 100644 --- a/src/gen_surface_forcing.F90 +++ b/src/gen_surface_forcing.F90 @@ -937,6 +937,8 @@ SUBROUTINE sbc_ini(partit, mesh) write(*,*) "Surface boundary conditions parameters:" end if +#if !defined __ifsinterface + i_totfl=0 if (l_xwind) then if (mype==0) then @@ -1041,6 +1043,7 @@ SUBROUTINE sbc_ini(partit, mesh) ALLOCATE(sbc_flfi(i_totfl)) call nc_sbc_ini(rdate, partit, mesh) !========================================================================== +#endif ! runoff if (runoff_data_source=='CORE1' .or. runoff_data_source=='CORE2' ) then ! runoff in CORE is constant in time diff --git a/src/ifs_interface/ifs_interface.F90 b/src/ifs_interface/ifs_interface.F90 index 78d758d86..fb88ff5e3 100644 --- a/src/ifs_interface/ifs_interface.F90 +++ b/src/ifs_interface/ifs_interface.F90 @@ -16,6 +16,7 @@ SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & USE par_kind !in ifs_modules.F90 USE fesom_main_storage_module, only: fesom => f ! only: MPI_COMM_FESOM, mype (previously in g_parsup) + USE fesom_module, ONLY : fesom_init USE g_config, only: dt USE g_clock, only: timenew, daynew, yearnew, month, day_in_month USE nemogcmcoup_steps, ONLY : substeps @@ -49,7 +50,8 @@ SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & READ(9,namfesomstep) CLOSE(9) - fesom%MPI_COMM_FESOM=icomm + fesom%partit%MPI_COMM_FESOM=icomm + itini = 1 CALL fesom_init(itend_fesom) !also sets mype and npes itend=itend_fesom/substeps @@ -1459,6 +1461,7 @@ SUBROUTINE nemogcmcoup_step( istp, icdate, ictime ) USE g_clock, only: yearnew, month, day_in_month USE fesom_main_storage_module, only: fesom => f ! mype + USE fesom_module, ONLY : fesom_runloop USE nemogcmcoup_steps, ONLY : substeps IMPLICIT NONE @@ -1501,6 +1504,7 @@ END SUBROUTINE nemogcmcoup_step SUBROUTINE nemogcmcoup_final USE fesom_main_storage_module, only: fesom => f ! mype + USE fesom_module, ONLY : fesom_finalize ! Finalize the FESOM model From d222252734eb7e66d7b741729f80727cbcddd631 Mon Sep 17 00:00:00 2001 From: "Kristian S. Mogensen" Date: Tue, 23 Nov 2021 19:29:22 +0000 Subject: [PATCH 647/909] ECMWF Atos. --- env.sh | 4 ++++ env/atosecmwf/shell | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) create mode 100644 env/atosecmwf/shell diff --git a/env.sh b/env.sh index ecb576453..46ac10042 100755 --- a/env.sh +++ b/env.sh @@ -25,6 +25,8 @@ else LOGINHOST=$1 fi +echo $LOGINHOST + if [[ $LOGINHOST =~ ^m[A-Za-z0-9]+\.hpc\.dkrz\.de$ ]]; then STRATEGY="mistral.dkrz.de" elif [[ $LOGINHOST =~ ^ollie[0-9]$ ]] || [[ $LOGINHOST =~ ^prod-[0-9]{4}$ ]]; then @@ -49,6 +51,8 @@ elif [[ $LOGINHOST =~ ^cc[a-b]+-login[0-9]+\.ecmwf\.int$ ]]; then STRATEGY="ecaccess.ecmwf.int" elif [[ $LOGINHOST =~ ^[A-Za-z0-9]+\.ecmwf\.int$ ]]; then STRATEGY="wsecmwf" +elif [[ $LOGINHOST =~ \.bullx$ ]]; then + STRATEGY="atosecmwf" else echo "can not determine environment for host: "$LOGINHOST [ $BEING_EXECUTED = true ] && exit 1 diff --git a/env/atosecmwf/shell b/env/atosecmwf/shell new file mode 100644 index 000000000..3c5efc9c9 --- /dev/null +++ b/env/atosecmwf/shell @@ -0,0 +1,39 @@ +# used at ECMWF + +module unload metview +module unload emos +module unload eccodes +module unload fftw +module unload openmpi +module unload boost +module unload fcm +module unload hdf5 +module unload netcdf +module unload netcdf4 +module unload python3 +module unload nag +module unload gnu +module unload clang +module unload intel +module unload cmake +module unload prgenv +module unload gcc + +# Load modules +module load prgenv/intel +module load intel/2021.2.0 +module load hpcx-openmpi/2.9.0 +module load intel-mkl/19.0.5 +module load fftw/3.3.9 +module load netcdf4/4.7.4 +module load hdf5/1.10.6 +module load boost/1.71.0 +module load eigen/3.3.7 +module load cmake/3.20.2 +module load ninja/1.10.0 +module load fcm/2019.05.0 + +export NETCDF_DIR=$NETCDF4_DIR + +export FC=mpif90 CC=mpicc CXX=mpicxx # MPI wrappers for Fortran, cc and CC similarly +#export FC=mpif90 CC=gcc CXX=mpicxx # MPI wrappers for Fortran, cc and CC similarly From 650251c0610a69f618ccd98a231ca864373facd8 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 25 Nov 2021 11:58:03 +0100 Subject: [PATCH 648/909] exchange a_icel, m_icel, m_snowl against ice derived type variables --- src/ice_fct.F90 | 25 ++++++++++++++++++++----- src/ice_modules.F90 | 2 +- 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index 77b6f9392..77be09f9d 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -195,10 +195,10 @@ subroutine ice_fct_init(ice, partit, mesh) n_size=myDim_nod2D+eDim_nod2D ! Initialization of arrays necessary to implement FCT algorithm - allocate(m_icel(n_size), a_icel(n_size), m_snowl(n_size)) ! low-order solutions - m_icel=0.0_WP - a_icel=0.0_WP - m_snowl=0.0_WP +! allocate(m_icel(n_size), a_icel(n_size), m_snowl(n_size)) ! low-order solutions +! m_icel=0.0_WP +! a_icel=0.0_WP +! m_snowl=0.0_WP #if defined (__oifs) allocate(m_templ(n_size)) allocate(dm_temp(n_size)) @@ -284,6 +284,7 @@ subroutine ice_solve_low_order(ice, partit, mesh) ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:), pointer :: rhs_a, rhs_m, rhs_ms + real(kind=WP), dimension(:), pointer :: a_icel, m_icel, m_snowl #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -294,6 +295,9 @@ subroutine ice_solve_low_order(ice, partit, mesh) rhs_a => ice%data(1)%values_rhs(:) rhs_m => ice%data(2)%values_rhs(:) rhs_ms => ice%data(3)%values_rhs(:) + a_icel => ice%data(1)%valuesl(:) + m_icel => ice%data(2)%valuesl(:) + m_snowl => ice%data(3)%valuesl(:) !___________________________________________________________________________ gamma=ice_gamma_fct ! Added diffusivity parameter @@ -356,6 +360,7 @@ subroutine ice_solve_high_order(ice, partit, mesh) !_____________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: rhs_a, rhs_m, rhs_ms + real(kind=WP), dimension(:), pointer :: a_icel, m_icel, m_snowl #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -363,7 +368,9 @@ subroutine ice_solve_high_order(ice, partit, mesh) rhs_a => ice%data(1)%values_rhs(:) rhs_m => ice%data(2)%values_rhs(:) rhs_ms => ice%data(3)%values_rhs(:) - + a_icel => ice%data(1)%valuesl(:) + m_icel => ice%data(2)%valuesl(:) + m_snowl => ice%data(3)%valuesl(:) !_____________________________________________________________________________ ! Does Taylor-Galerkin solution ! @@ -459,6 +466,7 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) !___________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow + real(kind=WP), dimension(:), pointer :: a_icel, m_icel, m_snowl #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -466,6 +474,9 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) a_ice => ice%data(1)%values(:) m_ice => ice%data(2)%values(:) m_snow => ice%data(3)%values(:) + a_icel => ice%data(1)%valuesl(:) + m_icel => ice%data(2)%valuesl(:) + m_snowl => ice%data(3)%valuesl(:) !___________________________________________________________________________ gamma=ice_gamma_fct ! It should coinside with gamma in @@ -981,6 +992,7 @@ subroutine ice_update_for_div(ice, partit, mesh) ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:), pointer :: rhs_adiv, rhs_mdiv, rhs_msdiv + real(kind=WP), dimension(:), pointer :: a_icel, m_icel, m_snowl #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -991,6 +1003,9 @@ subroutine ice_update_for_div(ice, partit, mesh) rhs_adiv => ice%data(1)%values_div_rhs(:) rhs_mdiv => ice%data(2)%values_div_rhs(:) rhs_msdiv => ice%data(3)%values_div_rhs(:) + a_icel => ice%data(1)%valuesl(:) + m_icel => ice%data(2)%valuesl(:) + m_snowl => ice%data(3)%valuesl(:) !___________________________________________________________________________ ! Does Taylor-Galerkin solution diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index cfb202a36..bb46f9b94 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -93,7 +93,7 @@ MODULE i_ARRAYS REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_atmice_y ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: t_skin ! FCT implementation - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: m_icel, a_icel, m_snowl +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: m_icel, a_icel, m_snowl REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: dm_ice, da_ice, dm_snow REAL(kind=WP), ALLOCATABLE, DIMENSION(:,:) :: icefluxes REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: icepplus, icepminus From ff993963a631da5864c6154d5578a9e463c21de2 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 25 Nov 2021 12:03:46 +0100 Subject: [PATCH 649/909] exchange da_ice, dm_ice, dm_snow against ice derived type variables --- src/ice_fct.F90 | 21 +++++++++++++++++---- src/ice_modules.F90 | 2 +- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index 77be09f9d..be705bb6a 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -214,10 +214,10 @@ subroutine ice_fct_init(ice, partit, mesh) dm_temp=0.0_WP #endif /* (__oifs) */ - allocate(dm_ice(n_size), da_ice(n_size), dm_snow(n_size)) ! increments of high - dm_ice = 0.0_WP ! order solutions - da_ice = 0.0_WP - dm_snow = 0.0_WP +! allocate(dm_ice(n_size), da_ice(n_size), dm_snow(n_size)) ! increments of high +! dm_ice = 0.0_WP ! order solutions +! da_ice = 0.0_WP +! dm_snow = 0.0_WP ! Fill in the mass matrix call ice_mass_matrix_fill(ice, partit, mesh) @@ -361,6 +361,7 @@ subroutine ice_solve_high_order(ice, partit, mesh) ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: rhs_a, rhs_m, rhs_ms real(kind=WP), dimension(:), pointer :: a_icel, m_icel, m_snowl + real(kind=WP), dimension(:), pointer :: da_ice, dm_ice, dm_snow #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -371,6 +372,10 @@ subroutine ice_solve_high_order(ice, partit, mesh) a_icel => ice%data(1)%valuesl(:) m_icel => ice%data(2)%valuesl(:) m_snowl => ice%data(3)%valuesl(:) + da_ice => ice%data(1)%dvalues(:) + dm_ice => ice%data(2)%dvalues(:) + dm_snow => ice%data(3)%dvalues(:) + !_____________________________________________________________________________ ! Does Taylor-Galerkin solution ! @@ -467,6 +472,7 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:), pointer :: a_icel, m_icel, m_snowl + real(kind=WP), dimension(:), pointer :: da_ice, dm_ice, dm_snow #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -477,6 +483,9 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) a_icel => ice%data(1)%valuesl(:) m_icel => ice%data(2)%valuesl(:) m_snowl => ice%data(3)%valuesl(:) + da_ice => ice%data(1)%dvalues(:) + dm_ice => ice%data(2)%dvalues(:) + dm_snow => ice%data(3)%dvalues(:) !___________________________________________________________________________ gamma=ice_gamma_fct ! It should coinside with gamma in @@ -993,6 +1002,7 @@ subroutine ice_update_for_div(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:), pointer :: rhs_adiv, rhs_mdiv, rhs_msdiv real(kind=WP), dimension(:), pointer :: a_icel, m_icel, m_snowl + real(kind=WP), dimension(:), pointer :: da_ice, dm_ice, dm_snow #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -1006,6 +1016,9 @@ subroutine ice_update_for_div(ice, partit, mesh) a_icel => ice%data(1)%valuesl(:) m_icel => ice%data(2)%valuesl(:) m_snowl => ice%data(3)%valuesl(:) + da_ice => ice%data(1)%dvalues(:) + dm_ice => ice%data(2)%dvalues(:) + dm_snow => ice%data(3)%dvalues(:) !___________________________________________________________________________ ! Does Taylor-Galerkin solution diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index bb46f9b94..c3db01185 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -94,7 +94,7 @@ MODULE i_ARRAYS ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: t_skin ! FCT implementation ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: m_icel, a_icel, m_snowl - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: dm_ice, da_ice, dm_snow +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: dm_ice, da_ice, dm_snow REAL(kind=WP), ALLOCATABLE, DIMENSION(:,:) :: icefluxes REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: icepplus, icepminus REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: mass_matrix From cb20ce972fba419c84911698348c2f89cbc70f09 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 25 Nov 2021 12:24:38 +0100 Subject: [PATCH 650/909] exchange icefluxes, icepplus, icepminus against ice derived type variables --- src/ice_fct.F90 | 22 +++++++++++++--------- src/ice_modules.F90 | 4 ++-- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index be705bb6a..787c79a04 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -203,11 +203,11 @@ subroutine ice_fct_init(ice, partit, mesh) allocate(m_templ(n_size)) allocate(dm_temp(n_size)) #endif /* (__oifs) */ - allocate(icefluxes(myDim_elem2D,3)) - allocate(icepplus(n_size), icepminus(n_size)) - icefluxes = 0.0_WP - icepplus = 0.0_WP - icepminus= 0.0_WP +! allocate(icefluxes(myDim_elem2D,3)) +! allocate(icepplus(n_size), icepminus(n_size)) +! icefluxes = 0.0_WP +! icepplus = 0.0_WP +! icepminus= 0.0_WP #if defined (__oifs) m_templ=0.0_WP @@ -470,9 +470,11 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) real(kind=WP) :: vol, flux, ae, gamma !___________________________________________________________________________ ! pointer on necessary derived types - real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow - real(kind=WP), dimension(:), pointer :: a_icel, m_icel, m_snowl - real(kind=WP), dimension(:), pointer :: da_ice, dm_ice, dm_snow + real(kind=WP), dimension(:) , pointer :: a_ice, m_ice, m_snow + real(kind=WP), dimension(:) , pointer :: a_icel, m_icel, m_snowl + real(kind=WP), dimension(:) , pointer :: da_ice, dm_ice, dm_snow + real(kind=WP), dimension(:) , pointer :: icepplus, icepminus + real(kind=WP), dimension(:,:), pointer :: icefluxes #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -486,7 +488,9 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) da_ice => ice%data(1)%dvalues(:) dm_ice => ice%data(2)%dvalues(:) dm_snow => ice%data(3)%dvalues(:) - + icefluxes => ice%work%fct_fluxes(:,:) + icepplus => ice%work%fct_plus(:) + icepminus => ice%work%fct_minus(:) !___________________________________________________________________________ gamma=ice_gamma_fct ! It should coinside with gamma in ! ts_solve_low_order diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index c3db01185..c4526a072 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -95,8 +95,8 @@ MODULE i_ARRAYS ! FCT implementation ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: m_icel, a_icel, m_snowl ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: dm_ice, da_ice, dm_snow - REAL(kind=WP), ALLOCATABLE, DIMENSION(:,:) :: icefluxes - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: icepplus, icepminus +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:,:) :: icefluxes +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: icepplus, icepminus REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: mass_matrix REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: alpha_evp_array(:) ! of myDim_elem2D REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: beta_evp_array(:) ! of myDim_node2D+eDim_node2D From 77cf7552c505a8fd59dd99d7d252058b2f697452 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 25 Nov 2021 12:30:58 +0100 Subject: [PATCH 651/909] exchange tmax, tmin against ice derived type variables --- src/ice_fct.F90 | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index 787c79a04..c2a64097c 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -466,14 +466,14 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) !___________________________________________________________________________ integer :: tr_array_id integer :: icoef(3,3),n,q, elem,elnodes(3),row - real(kind=WP), allocatable, dimension(:) :: tmax, tmin +! real(kind=WP), allocatable, dimension(:) :: tmax, tmin real(kind=WP) :: vol, flux, ae, gamma !___________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:) , pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:) , pointer :: a_icel, m_icel, m_snowl real(kind=WP), dimension(:) , pointer :: da_ice, dm_ice, dm_snow - real(kind=WP), dimension(:) , pointer :: icepplus, icepminus + real(kind=WP), dimension(:) , pointer :: icepplus, icepminus, tmax, tmin real(kind=WP), dimension(:,:), pointer :: icefluxes #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -491,6 +491,9 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) icefluxes => ice%work%fct_fluxes(:,:) icepplus => ice%work%fct_plus(:) icepminus => ice%work%fct_minus(:) + tmax => ice%work%fct_tmax(:) + tmin => ice%work%fct_tmin(:) + !___________________________________________________________________________ gamma=ice_gamma_fct ! It should coinside with gamma in ! ts_solve_low_order @@ -503,7 +506,7 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) ! we need its antidiffusive contribution to ! each of its 3 nodes - allocate(tmax(myDim_nod2D), tmin(myDim_nod2D)) +! allocate(tmax(myDim_nod2D), tmin(myDim_nod2D)) tmax = 0.0_WP tmin = 0.0_WP @@ -774,7 +777,7 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) call exchange_nod(ice_temp, partit) #endif /* (__oifs) */ - deallocate(tmin, tmax) +! deallocate(tmin, tmax) end subroutine ice_fem_fct ! ! From 34cc39c1bca3c9e1d4ef8b907069495b70aad7e7 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 25 Nov 2021 12:42:56 +0100 Subject: [PATCH 652/909] exchange mass_matrix against ice derived type variables --- src/ice_fct.F90 | 12 ++++++++++-- src/ice_modules.F90 | 2 +- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index c2a64097c..dfee3fcf3 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -285,6 +285,7 @@ subroutine ice_solve_low_order(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:), pointer :: rhs_a, rhs_m, rhs_ms real(kind=WP), dimension(:), pointer :: a_icel, m_icel, m_snowl + real(kind=WP), dimension(:), pointer :: mass_matrix #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -298,6 +299,7 @@ subroutine ice_solve_low_order(ice, partit, mesh) a_icel => ice%data(1)%valuesl(:) m_icel => ice%data(2)%valuesl(:) m_snowl => ice%data(3)%valuesl(:) + mass_matrix => ice%work%fct_massmatrix !___________________________________________________________________________ gamma=ice_gamma_fct ! Added diffusivity parameter @@ -362,6 +364,7 @@ subroutine ice_solve_high_order(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: rhs_a, rhs_m, rhs_ms real(kind=WP), dimension(:), pointer :: a_icel, m_icel, m_snowl real(kind=WP), dimension(:), pointer :: da_ice, dm_ice, dm_snow + real(kind=WP), dimension(:), pointer :: mass_matrix #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -375,6 +378,7 @@ subroutine ice_solve_high_order(ice, partit, mesh) da_ice => ice%data(1)%dvalues(:) dm_ice => ice%data(2)%dvalues(:) dm_snow => ice%data(3)%dvalues(:) + mass_matrix => ice%work%fct_massmatrix !_____________________________________________________________________________ ! Does Taylor-Galerkin solution @@ -799,15 +803,17 @@ SUBROUTINE ice_mass_matrix_fill(ice, partit, mesh) integer, allocatable :: col_pos(:) real(kind=WP) :: aa integer :: flag=0,iflag=0 - type(t_ice), intent(inout), target :: ice + type(t_ice) , intent(inout), target :: ice type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh !_____________________________________________________________________________ ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: mass_matrix #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + mass_matrix => ice%work%fct_massmatrix ! ! a) allocate(mass_matrix(sum(nn_num(1:myDim_nod2D)))) @@ -1010,6 +1016,7 @@ subroutine ice_update_for_div(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: rhs_adiv, rhs_mdiv, rhs_msdiv real(kind=WP), dimension(:), pointer :: a_icel, m_icel, m_snowl real(kind=WP), dimension(:), pointer :: da_ice, dm_ice, dm_snow + real(kind=WP), dimension(:), pointer :: mass_matrix #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -1026,6 +1033,7 @@ subroutine ice_update_for_div(ice, partit, mesh) da_ice => ice%data(1)%dvalues(:) dm_ice => ice%data(2)%dvalues(:) dm_snow => ice%data(3)%dvalues(:) + mass_matrix => ice%work%fct_massmatrix(:) !___________________________________________________________________________ ! Does Taylor-Galerkin solution diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index c4526a072..726d0c734 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -97,7 +97,7 @@ MODULE i_ARRAYS ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: dm_ice, da_ice, dm_snow ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:,:) :: icefluxes ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: icepplus, icepminus - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: mass_matrix +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: mass_matrix REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: alpha_evp_array(:) ! of myDim_elem2D REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: beta_evp_array(:) ! of myDim_node2D+eDim_node2D From e3bae191a2ea06a7efe8922c3068ab193556f2e4 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 25 Nov 2021 12:57:15 +0100 Subject: [PATCH 653/909] exchange fresh_wa_flux, net_heat_flux against ice derived type variables --- src/cavity_param.F90 | 55 +++++++++++++++++++++++++++------------- src/ice_fct.F90 | 4 +-- src/ice_modules.F90 | 4 +-- src/ice_oce_coupling.F90 | 22 +++++++++------- src/ice_setup_step.F90 | 6 ++--- src/ice_thermo_oce.F90 | 37 ++++++++++++++------------- 6 files changed, 78 insertions(+), 50 deletions(-) diff --git a/src/cavity_param.F90 b/src/cavity_param.F90 index aacfd27b0..e9e608275 100644 --- a/src/cavity_param.F90 +++ b/src/cavity_param.F90 @@ -1,15 +1,29 @@ module cavity_interfaces interface - subroutine cavity_heat_water_fluxes_3eq(dynamics, tracers, partit, mesh) + subroutine cavity_heat_water_fluxes_3eq(ice, dynamics, tracers, partit, mesh) + USE MOD_ICE USE MOD_DYN USE MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP USE MOD_MESH - type(t_dyn), intent(in), target :: dynamics - type(t_tracer), intent(in), target :: tracers + type(t_ice) , intent(inout), target :: ice + type(t_dyn) , intent(in) , target :: dynamics + type(t_tracer), intent(in) , target :: tracers type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh + end subroutine + + subroutine cavity_heat_water_fluxes_2eq(ice, tracers, partit, mesh) + USE MOD_ICE + USE MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice) , intent(inout), target :: ice + type(t_tracer), intent(in) , target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh end subroutine subroutine cavity_ice_clean_vel(ice, partit, mesh) @@ -171,20 +185,21 @@ end subroutine compute_nrst_pnt2cavline ! adjusted for use in FESOM by Ralph Timmermann, 16.02.2011 ! Reviewed by ? ! adapted by P. SCholz for FESOM2.0 -subroutine cavity_heat_water_fluxes_3eq(dynamics, tracers, partit, mesh) +subroutine cavity_heat_water_fluxes_3eq(ice, dynamics, tracers, partit, mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use MOD_TRACER use MOD_DYN + use MOD_ICE use o_PARAM , only: density_0, WP use o_ARRAYS, only: heat_flux, water_flux, density_m_rho0, density_ref - use i_ARRAYS, only: net_heat_flux, fresh_wa_flux implicit none !___________________________________________________________________________ - type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + type(t_tracer), intent(in) , target :: tracers + type(t_ice) , intent(inout), target :: ice type(t_dyn), intent(in), target :: dynamics real (kind=WP) :: temp,sal,tin,zice real (kind=WP) :: rhow, rhor, rho @@ -224,12 +239,15 @@ subroutine cavity_heat_water_fluxes_3eq(dynamics, tracers, partit, mesh) ! oomw= -30. ! oofw= -2.5 real(kind=WP), dimension(:,:,:), pointer :: UVnode + real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UVnode=>dynamics%uvnode(:,:,:) - + fresh_wa_flux => ice%flx_fw(:) + net_heat_flux => ice%flx_h(:) + !___________________________________________________________________________ do node=1,myDim_nod2D !+eDim_nod2D nzmin = ulevels_nod2D(node) @@ -364,29 +382,32 @@ end subroutine cavity_heat_water_fluxes_3eq ! Compute the heat and freshwater fluxes under ice cavity using simple 2equ. ! Coded by Adriana Huerta-Casas ! Reviewed by Qiang Wang -subroutine cavity_heat_water_fluxes_2eq(tracers, partit, mesh) +subroutine cavity_heat_water_fluxes_2eq(ice, tracers, partit, mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use MOD_TRACER + use MOD_ICE use o_PARAM , only: WP use o_ARRAYS, only: heat_flux, water_flux - use i_ARRAYS, only: net_heat_flux, fresh_wa_flux implicit none - type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + type(t_tracer), intent(in) , target :: tracers + type(t_ice) , intent(inout), target :: ice integer :: node, nzmin real(kind=WP) :: gama, L, aux real(kind=WP) :: c2, c3, c4, c5, c6 real(kind=WP) :: t_i, s_i, p, t_fz - + real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - + fresh_wa_flux => ice%flx_fw(:) + net_heat_flux => ice%flx_h(:) + !___________________________________________________________________________ ! parameter for computing heat and water fluxes gama = 1.0e-4_WP ! heat exchange velocity [m/s] diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index dfee3fcf3..8e37ec572 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -299,7 +299,7 @@ subroutine ice_solve_low_order(ice, partit, mesh) a_icel => ice%data(1)%valuesl(:) m_icel => ice%data(2)%valuesl(:) m_snowl => ice%data(3)%valuesl(:) - mass_matrix => ice%work%fct_massmatrix + mass_matrix => ice%work%fct_massmatrix(:) !___________________________________________________________________________ gamma=ice_gamma_fct ! Added diffusivity parameter @@ -378,7 +378,7 @@ subroutine ice_solve_high_order(ice, partit, mesh) da_ice => ice%data(1)%dvalues(:) dm_ice => ice%data(2)%dvalues(:) dm_snow => ice%data(3)%dvalues(:) - mass_matrix => ice%work%fct_massmatrix + mass_matrix => ice%work%fct_massmatrix(:) !_____________________________________________________________________________ ! Does Taylor-Galerkin solution diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index 726d0c734..44f1fa934 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -72,8 +72,8 @@ MODULE i_ARRAYS ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: elevation ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: sigma11, sigma12, sigma22 ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: eps11, eps12, eps22 - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: fresh_wa_flux - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: net_heat_flux +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: fresh_wa_flux +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: net_heat_flux #if defined (__oasis) || defined (__ifsinterface) real(kind=WP),target, allocatable, dimension(:) :: ice_alb, ice_temp ! new fields for OIFS coupling real(kind=WP),target, allocatable, dimension(:) :: oce_heat_flux, ice_heat_flux diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index 9699bb32c..be8fb7f8e 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -268,18 +268,22 @@ subroutine oce_fluxes(ice, dynamics, tracers, partit, mesh) real(kind=WP), dimension(:) , pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:) , pointer :: a_ice_old real(kind=WP), dimension(:) , pointer :: thdgr, thdgrsn + real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - temp => tracers%data(1)%values(:,:) - salt => tracers%data(2)%values(:,:) - a_ice => ice%data(1)%values(:) - m_ice => ice%data(2)%values(:) - m_snow => ice%data(3)%values(:) - a_ice_old=> ice%data(1)%values_old(:) - thdgr => ice%thermo%thdgr - thdgrsn => ice%thermo%thdgrsn + temp => tracers%data(1)%values(:,:) + salt => tracers%data(2)%values(:,:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + a_ice_old => ice%data(1)%values_old(:) + thdgr => ice%thermo%thdgr(:) + thdgrsn => ice%thermo%thdgrsn(:) + fresh_wa_flux => ice%flx_fw(:) + net_heat_flux => ice%flx_h(:) + !___________________________________________________________________________ allocate(flux(myDim_nod2D+eDim_nod2D)) flux = 0.0_WP @@ -321,7 +325,7 @@ subroutine oce_fluxes(ice, dynamics, tracers, partit, mesh) water_flux = -fresh_wa_flux #endif heat_flux_in=heat_flux ! sw_pene will change the heat_flux - if (use_cavity) call cavity_heat_water_fluxes_3eq(dynamics, tracers, partit, mesh) + if (use_cavity) call cavity_heat_water_fluxes_3eq(ice, dynamics, tracers, partit, mesh) !!PS if (use_cavity) call cavity_heat_water_fluxes_2eq(mesh) !!PS where(ulevels_nod2D>1) heat_flux=0.0_WP diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index 5f0b296a1..66a1c4774 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -195,9 +195,9 @@ subroutine ice_array_setup(partit, mesh) ! allocate(S_oc_array(n_size), T_oc_array(n_size)) ! copies of ocean T ans S ! S_oc_array = 0.0_WP ! T_oc_array = 0.0_WP - allocate(fresh_wa_flux(n_size), net_heat_flux(n_size)) - fresh_wa_flux = 0.0_WP - net_heat_flux = 0.0_WP +! allocate(fresh_wa_flux(n_size), net_heat_flux(n_size)) +! fresh_wa_flux = 0.0_WP +! net_heat_flux = 0.0_WP allocate(stress_atmice_x(n_size), stress_atmice_y(n_size)) stress_atmice_x = 0.0_WP stress_atmice_y = 0.0_WP diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index 1d4cf7e5d..4857e4325 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -137,28 +137,31 @@ subroutine thermodynamics(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: a_ice_old, m_ice_old, m_snow_old real(kind=WP), dimension(:) , pointer :: thdgr, thdgrsn, thdgr_old, t_skin, ustar_aux real(kind=WP), dimension(:) , pointer :: S_oc_array, T_oc_array, u_w, v_w + real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux myDim_nod2d=>partit%myDim_nod2D eDim_nod2D =>partit%eDim_nod2D ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D - u_ice => ice%uice(:) - v_ice => ice%vice(:) - a_ice => ice%data(1)%values(:) - m_ice => ice%data(2)%values(:) - m_snow => ice%data(3)%values(:) - a_ice_old => ice%data(1)%values_old(:) - m_ice_old => ice%data(2)%values_old(:) - m_snow_old => ice%data(3)%values_old(:) - thdgr => ice%thermo%thdgr - thdgrsn => ice%thermo%thdgrsn - thdgr_old => ice%thermo%thdgr_old - t_skin => ice%thermo%t_skin - ustar_aux => ice%thermo%ustar - u_w => ice%srfoce_u(:) - v_w => ice%srfoce_v(:) - T_oc_array => ice%srfoce_temp(:) - S_oc_array => ice%srfoce_salt(:) + u_ice => ice%uice(:) + v_ice => ice%vice(:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + a_ice_old => ice%data(1)%values_old(:) + m_ice_old => ice%data(2)%values_old(:) + m_snow_old => ice%data(3)%values_old(:) + thdgr => ice%thermo%thdgr + thdgrsn => ice%thermo%thdgrsn + thdgr_old => ice%thermo%thdgr_old + t_skin => ice%thermo%t_skin + ustar_aux => ice%thermo%ustar + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) + T_oc_array => ice%srfoce_temp(:) + S_oc_array => ice%srfoce_salt(:) + fresh_wa_flux => ice%flx_fw(:) + net_heat_flux => ice%flx_h(:) !_____________________________________________________________________________ rsss=ref_sss From 1804cba8c1c26f4026335bb92acbb93b8ff694c8 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 25 Nov 2021 13:00:58 +0100 Subject: [PATCH 654/909] exchange fresh_wa_flux, net_heat_flux against ice derived type variables in src/ice_thermo_cpl.F90 --- src/ice_thermo_cpl.F90 | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index 92337ac2c..6c1eb9e7e 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -65,21 +65,24 @@ subroutine thermodynamics(ice, partit, mesh) real(kind=WP), dimension(:) , pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:) , pointer :: thdgr, thdgrsn real(kind=WP), dimension(:) , pointer :: S_oc_array, T_oc_array, u_w, v_w + real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux myDim_nod2d=>partit%myDim_nod2D eDim_nod2D =>partit%eDim_nod2D ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D - u_ice => ice%uice(:) - v_ice => ice%vice(:) - a_ice => ice%data(1)%values(:) - m_ice => ice%data(2)%values(:) - m_snow => ice%data(3)%values(:) - thdgr => ice%thermo%thdgr - thdgrsn => ice%thermo%thdgrsn - T_oc_array => ice%srfoce_temp(:) - S_oc_array => ice%srfoce_salt(:) - u_w => ice%srfoce_u(:) - v_w => ice%srfoce_v(:) + u_ice => ice%uice(:) + v_ice => ice%vice(:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + thdgr => ice%thermo%thdgr + thdgrsn => ice%thermo%thdgrsn + T_oc_array => ice%srfoce_temp(:) + S_oc_array => ice%srfoce_salt(:) + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) + fresh_wa_flux => ice%flx_fw(:) + net_heat_flux => ice%flx_h(:) !_____________________________________________________________________________ rsss = ref_sss From 5201dcd8b66125368816a3c2b6c62d4903f7d82f Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 25 Nov 2021 13:25:27 +0100 Subject: [PATCH 655/909] turn back fresh_wa_flux --- src/cavity_param.F90 | 12 ++++++++---- src/ice_modules.F90 | 2 +- src/ice_oce_coupling.F90 | 5 +++-- src/ice_setup_step.F90 | 3 ++- src/ice_thermo_cpl.F90 | 5 +++-- src/ice_thermo_oce.F90 | 5 +++-- 6 files changed, 20 insertions(+), 12 deletions(-) diff --git a/src/cavity_param.F90 b/src/cavity_param.F90 index e9e608275..0ba757a5c 100644 --- a/src/cavity_param.F90 +++ b/src/cavity_param.F90 @@ -194,6 +194,7 @@ subroutine cavity_heat_water_fluxes_3eq(ice, dynamics, tracers, partit, mesh) use MOD_ICE use o_PARAM , only: density_0, WP use o_ARRAYS, only: heat_flux, water_flux, density_m_rho0, density_ref + use i_ARRAYS implicit none !___________________________________________________________________________ type(t_partit), intent(inout), target :: partit @@ -239,13 +240,14 @@ subroutine cavity_heat_water_fluxes_3eq(ice, dynamics, tracers, partit, mesh) ! oomw= -30. ! oofw= -2.5 real(kind=WP), dimension(:,:,:), pointer :: UVnode - real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux +! real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux + real(kind=WP), dimension(:) , pointer :: net_heat_flux #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UVnode=>dynamics%uvnode(:,:,:) - fresh_wa_flux => ice%flx_fw(:) +! fresh_wa_flux => ice%flx_fw(:) net_heat_flux => ice%flx_h(:) !___________________________________________________________________________ @@ -390,6 +392,7 @@ subroutine cavity_heat_water_fluxes_2eq(ice, tracers, partit, mesh) use MOD_ICE use o_PARAM , only: WP use o_ARRAYS, only: heat_flux, water_flux + use i_ARRAYS implicit none type(t_partit), intent(inout), target :: partit @@ -400,12 +403,13 @@ subroutine cavity_heat_water_fluxes_2eq(ice, tracers, partit, mesh) real(kind=WP) :: gama, L, aux real(kind=WP) :: c2, c3, c4, c5, c6 real(kind=WP) :: t_i, s_i, p, t_fz - real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux +! real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux + real(kind=WP), dimension(:) , pointer :: net_heat_flux #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - fresh_wa_flux => ice%flx_fw(:) +! fresh_wa_flux => ice%flx_fw(:) net_heat_flux => ice%flx_h(:) !___________________________________________________________________________ diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index 44f1fa934..95219c143 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -72,7 +72,7 @@ MODULE i_ARRAYS ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: elevation ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: sigma11, sigma12, sigma22 ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: eps11, eps12, eps22 -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: fresh_wa_flux + REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: fresh_wa_flux ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: net_heat_flux #if defined (__oasis) || defined (__ifsinterface) real(kind=WP),target, allocatable, dimension(:) :: ice_alb, ice_temp ! new fields for OIFS coupling diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index be8fb7f8e..47236b4e9 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -268,7 +268,8 @@ subroutine oce_fluxes(ice, dynamics, tracers, partit, mesh) real(kind=WP), dimension(:) , pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:) , pointer :: a_ice_old real(kind=WP), dimension(:) , pointer :: thdgr, thdgrsn - real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux +! real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux + real(kind=WP), dimension(:) , pointer :: net_heat_flux #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -281,7 +282,7 @@ subroutine oce_fluxes(ice, dynamics, tracers, partit, mesh) a_ice_old => ice%data(1)%values_old(:) thdgr => ice%thermo%thdgr(:) thdgrsn => ice%thermo%thdgrsn(:) - fresh_wa_flux => ice%flx_fw(:) +! fresh_wa_flux => ice%flx_fw(:) net_heat_flux => ice%flx_h(:) !___________________________________________________________________________ diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index 66a1c4774..847b2dce0 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -196,7 +196,8 @@ subroutine ice_array_setup(partit, mesh) ! S_oc_array = 0.0_WP ! T_oc_array = 0.0_WP ! allocate(fresh_wa_flux(n_size), net_heat_flux(n_size)) -! fresh_wa_flux = 0.0_WP + allocate(fresh_wa_flux(n_size)) + fresh_wa_flux = 0.0_WP ! net_heat_flux = 0.0_WP allocate(stress_atmice_x(n_size), stress_atmice_y(n_size)) stress_atmice_x = 0.0_WP diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index 6c1eb9e7e..6260e2199 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -65,7 +65,8 @@ subroutine thermodynamics(ice, partit, mesh) real(kind=WP), dimension(:) , pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:) , pointer :: thdgr, thdgrsn real(kind=WP), dimension(:) , pointer :: S_oc_array, T_oc_array, u_w, v_w - real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux +! real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux + real(kind=WP), dimension(:) , pointer :: net_heat_flux myDim_nod2d=>partit%myDim_nod2D eDim_nod2D =>partit%eDim_nod2D ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D @@ -81,7 +82,7 @@ subroutine thermodynamics(ice, partit, mesh) S_oc_array => ice%srfoce_salt(:) u_w => ice%srfoce_u(:) v_w => ice%srfoce_v(:) - fresh_wa_flux => ice%flx_fw(:) +! fresh_wa_flux => ice%flx_fw(:) net_heat_flux => ice%flx_h(:) !_____________________________________________________________________________ diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index 4857e4325..19a9f42af 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -137,7 +137,8 @@ subroutine thermodynamics(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: a_ice_old, m_ice_old, m_snow_old real(kind=WP), dimension(:) , pointer :: thdgr, thdgrsn, thdgr_old, t_skin, ustar_aux real(kind=WP), dimension(:) , pointer :: S_oc_array, T_oc_array, u_w, v_w - real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux +! real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux + real(kind=WP), dimension(:) , pointer :: net_heat_flux myDim_nod2d=>partit%myDim_nod2D eDim_nod2D =>partit%eDim_nod2D ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D @@ -160,8 +161,8 @@ subroutine thermodynamics(ice, partit, mesh) v_w => ice%srfoce_v(:) T_oc_array => ice%srfoce_temp(:) S_oc_array => ice%srfoce_salt(:) - fresh_wa_flux => ice%flx_fw(:) net_heat_flux => ice%flx_h(:) +! fresh_wa_flux => ice%flx_fw(:) !_____________________________________________________________________________ rsss=ref_sss From 811baf29e11bc6027f8182acdfa54dee7d0b344b Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 25 Nov 2021 13:42:03 +0100 Subject: [PATCH 656/909] turn back net_heat_flux --- src/cavity_param.F90 | 8 ++++---- src/ice_modules.F90 | 2 +- src/ice_oce_coupling.F90 | 4 ++-- src/ice_setup_step.F90 | 3 ++- src/ice_thermo_cpl.F90 | 4 ++-- src/ice_thermo_oce.F90 | 4 ++-- 6 files changed, 13 insertions(+), 12 deletions(-) diff --git a/src/cavity_param.F90 b/src/cavity_param.F90 index 0ba757a5c..3d45f6610 100644 --- a/src/cavity_param.F90 +++ b/src/cavity_param.F90 @@ -241,14 +241,14 @@ subroutine cavity_heat_water_fluxes_3eq(ice, dynamics, tracers, partit, mesh) ! oofw= -2.5 real(kind=WP), dimension(:,:,:), pointer :: UVnode ! real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux - real(kind=WP), dimension(:) , pointer :: net_heat_flux +! real(kind=WP), dimension(:) , pointer :: net_heat_flux #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UVnode=>dynamics%uvnode(:,:,:) ! fresh_wa_flux => ice%flx_fw(:) - net_heat_flux => ice%flx_h(:) +! net_heat_flux => ice%flx_h(:) !___________________________________________________________________________ do node=1,myDim_nod2D !+eDim_nod2D @@ -404,13 +404,13 @@ subroutine cavity_heat_water_fluxes_2eq(ice, tracers, partit, mesh) real(kind=WP) :: c2, c3, c4, c5, c6 real(kind=WP) :: t_i, s_i, p, t_fz ! real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux - real(kind=WP), dimension(:) , pointer :: net_heat_flux +! real(kind=WP), dimension(:) , pointer :: net_heat_flux #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" ! fresh_wa_flux => ice%flx_fw(:) - net_heat_flux => ice%flx_h(:) +! net_heat_flux => ice%flx_h(:) !___________________________________________________________________________ ! parameter for computing heat and water fluxes diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index 95219c143..726d0c734 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -73,7 +73,7 @@ MODULE i_ARRAYS ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: sigma11, sigma12, sigma22 ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: eps11, eps12, eps22 REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: fresh_wa_flux -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: net_heat_flux + REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: net_heat_flux #if defined (__oasis) || defined (__ifsinterface) real(kind=WP),target, allocatable, dimension(:) :: ice_alb, ice_temp ! new fields for OIFS coupling real(kind=WP),target, allocatable, dimension(:) :: oce_heat_flux, ice_heat_flux diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index 47236b4e9..221d54cb1 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -269,7 +269,7 @@ subroutine oce_fluxes(ice, dynamics, tracers, partit, mesh) real(kind=WP), dimension(:) , pointer :: a_ice_old real(kind=WP), dimension(:) , pointer :: thdgr, thdgrsn ! real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux - real(kind=WP), dimension(:) , pointer :: net_heat_flux +! real(kind=WP), dimension(:) , pointer :: net_heat_flux #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -283,7 +283,7 @@ subroutine oce_fluxes(ice, dynamics, tracers, partit, mesh) thdgr => ice%thermo%thdgr(:) thdgrsn => ice%thermo%thdgrsn(:) ! fresh_wa_flux => ice%flx_fw(:) - net_heat_flux => ice%flx_h(:) +! net_heat_flux => ice%flx_h(:) !___________________________________________________________________________ allocate(flux(myDim_nod2D+eDim_nod2D)) diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index 847b2dce0..149da6c98 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -197,8 +197,9 @@ subroutine ice_array_setup(partit, mesh) ! T_oc_array = 0.0_WP ! allocate(fresh_wa_flux(n_size), net_heat_flux(n_size)) allocate(fresh_wa_flux(n_size)) +allocate(net_heat_flux(n_size)) fresh_wa_flux = 0.0_WP -! net_heat_flux = 0.0_WP + net_heat_flux = 0.0_WP allocate(stress_atmice_x(n_size), stress_atmice_y(n_size)) stress_atmice_x = 0.0_WP stress_atmice_y = 0.0_WP diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index 6260e2199..3ff94efdb 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -66,7 +66,7 @@ subroutine thermodynamics(ice, partit, mesh) real(kind=WP), dimension(:) , pointer :: thdgr, thdgrsn real(kind=WP), dimension(:) , pointer :: S_oc_array, T_oc_array, u_w, v_w ! real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux - real(kind=WP), dimension(:) , pointer :: net_heat_flux +! real(kind=WP), dimension(:) , pointer :: net_heat_flux myDim_nod2d=>partit%myDim_nod2D eDim_nod2D =>partit%eDim_nod2D ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D @@ -83,7 +83,7 @@ subroutine thermodynamics(ice, partit, mesh) u_w => ice%srfoce_u(:) v_w => ice%srfoce_v(:) ! fresh_wa_flux => ice%flx_fw(:) - net_heat_flux => ice%flx_h(:) +! net_heat_flux => ice%flx_h(:) !_____________________________________________________________________________ rsss = ref_sss diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index 19a9f42af..1f8addb29 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -138,7 +138,7 @@ subroutine thermodynamics(ice, partit, mesh) real(kind=WP), dimension(:) , pointer :: thdgr, thdgrsn, thdgr_old, t_skin, ustar_aux real(kind=WP), dimension(:) , pointer :: S_oc_array, T_oc_array, u_w, v_w ! real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux - real(kind=WP), dimension(:) , pointer :: net_heat_flux +! real(kind=WP), dimension(:) , pointer :: net_heat_flux myDim_nod2d=>partit%myDim_nod2D eDim_nod2D =>partit%eDim_nod2D ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D @@ -161,7 +161,7 @@ subroutine thermodynamics(ice, partit, mesh) v_w => ice%srfoce_v(:) T_oc_array => ice%srfoce_temp(:) S_oc_array => ice%srfoce_salt(:) - net_heat_flux => ice%flx_h(:) +! net_heat_flux => ice%flx_h(:) ! fresh_wa_flux => ice%flx_fw(:) !_____________________________________________________________________________ From 98dd6d54dfccc3213e5776da5c061e92e4f1545b Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 25 Nov 2021 15:53:04 +0100 Subject: [PATCH 657/909] put back fct derived types --- src/ice_fct.F90 | 4 ++-- src/ice_modules.F90 | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index 8e37ec572..a5f62d385 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -808,12 +808,12 @@ SUBROUTINE ice_mass_matrix_fill(ice, partit, mesh) type(t_mesh) , intent(in) , target :: mesh !_____________________________________________________________________________ ! pointer on necessary derived types - real(kind=WP), dimension(:), pointer :: mass_matrix +! real(kind=WP), dimension(:), pointer :: mass_matrix #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - mass_matrix => ice%work%fct_massmatrix +! mass_matrix => ice%work%fct_massmatrix ! ! a) allocate(mass_matrix(sum(nn_num(1:myDim_nod2D)))) diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index 726d0c734..c4526a072 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -97,7 +97,7 @@ MODULE i_ARRAYS ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: dm_ice, da_ice, dm_snow ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:,:) :: icefluxes ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: icepplus, icepminus -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: mass_matrix + REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: mass_matrix REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: alpha_evp_array(:) ! of myDim_elem2D REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: beta_evp_array(:) ! of myDim_node2D+eDim_node2D From 9a4ffedbab98292b1945c7e007e91ef24ed1cb77 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 25 Nov 2021 15:59:27 +0100 Subject: [PATCH 658/909] put back mass_matrix --- src/ice_fct.F90 | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index a5f62d385..e55515696 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -285,7 +285,7 @@ subroutine ice_solve_low_order(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:), pointer :: rhs_a, rhs_m, rhs_ms real(kind=WP), dimension(:), pointer :: a_icel, m_icel, m_snowl - real(kind=WP), dimension(:), pointer :: mass_matrix +! real(kind=WP), dimension(:), pointer :: mass_matrix #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -299,7 +299,7 @@ subroutine ice_solve_low_order(ice, partit, mesh) a_icel => ice%data(1)%valuesl(:) m_icel => ice%data(2)%valuesl(:) m_snowl => ice%data(3)%valuesl(:) - mass_matrix => ice%work%fct_massmatrix(:) +! mass_matrix => ice%work%fct_massmatrix(:) !___________________________________________________________________________ gamma=ice_gamma_fct ! Added diffusivity parameter @@ -364,7 +364,7 @@ subroutine ice_solve_high_order(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: rhs_a, rhs_m, rhs_ms real(kind=WP), dimension(:), pointer :: a_icel, m_icel, m_snowl real(kind=WP), dimension(:), pointer :: da_ice, dm_ice, dm_snow - real(kind=WP), dimension(:), pointer :: mass_matrix +! real(kind=WP), dimension(:), pointer :: mass_matrix #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -378,7 +378,7 @@ subroutine ice_solve_high_order(ice, partit, mesh) da_ice => ice%data(1)%dvalues(:) dm_ice => ice%data(2)%dvalues(:) dm_snow => ice%data(3)%dvalues(:) - mass_matrix => ice%work%fct_massmatrix(:) +! mass_matrix => ice%work%fct_massmatrix(:) !_____________________________________________________________________________ ! Does Taylor-Galerkin solution @@ -1016,7 +1016,7 @@ subroutine ice_update_for_div(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: rhs_adiv, rhs_mdiv, rhs_msdiv real(kind=WP), dimension(:), pointer :: a_icel, m_icel, m_snowl real(kind=WP), dimension(:), pointer :: da_ice, dm_ice, dm_snow - real(kind=WP), dimension(:), pointer :: mass_matrix +! real(kind=WP), dimension(:), pointer :: mass_matrix #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -1033,7 +1033,7 @@ subroutine ice_update_for_div(ice, partit, mesh) da_ice => ice%data(1)%dvalues(:) dm_ice => ice%data(2)%dvalues(:) dm_snow => ice%data(3)%dvalues(:) - mass_matrix => ice%work%fct_massmatrix(:) +! mass_matrix => ice%work%fct_massmatrix(:) !___________________________________________________________________________ ! Does Taylor-Galerkin solution From 92c2ece543e5b3dadccba6f599fc26abd7fd0783 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 25 Nov 2021 16:10:09 +0100 Subject: [PATCH 659/909] exchange again fresh_wa_flux, net_heat_flux against the ice derived type variables --- src/cavity_param.F90 | 12 ++++++------ src/ice_modules.F90 | 4 ++-- src/ice_oce_coupling.F90 | 6 +++--- src/ice_setup_step.F90 | 8 ++++---- src/ice_thermo_cpl.F90 | 6 +++--- src/ice_thermo_oce.F90 | 6 +++--- 6 files changed, 21 insertions(+), 21 deletions(-) diff --git a/src/cavity_param.F90 b/src/cavity_param.F90 index 3d45f6610..fdd451c37 100644 --- a/src/cavity_param.F90 +++ b/src/cavity_param.F90 @@ -240,15 +240,15 @@ subroutine cavity_heat_water_fluxes_3eq(ice, dynamics, tracers, partit, mesh) ! oomw= -30. ! oofw= -2.5 real(kind=WP), dimension(:,:,:), pointer :: UVnode -! real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux + real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux ! real(kind=WP), dimension(:) , pointer :: net_heat_flux #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" UVnode=>dynamics%uvnode(:,:,:) -! fresh_wa_flux => ice%flx_fw(:) -! net_heat_flux => ice%flx_h(:) + fresh_wa_flux => ice%flx_fw(:) + net_heat_flux => ice%flx_h(:) !___________________________________________________________________________ do node=1,myDim_nod2D !+eDim_nod2D @@ -403,14 +403,14 @@ subroutine cavity_heat_water_fluxes_2eq(ice, tracers, partit, mesh) real(kind=WP) :: gama, L, aux real(kind=WP) :: c2, c3, c4, c5, c6 real(kind=WP) :: t_i, s_i, p, t_fz -! real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux + real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux ! real(kind=WP), dimension(:) , pointer :: net_heat_flux #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" -! fresh_wa_flux => ice%flx_fw(:) -! net_heat_flux => ice%flx_h(:) + fresh_wa_flux => ice%flx_fw(:) + net_heat_flux => ice%flx_h(:) !___________________________________________________________________________ ! parameter for computing heat and water fluxes diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index c4526a072..09f2824f2 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -72,8 +72,8 @@ MODULE i_ARRAYS ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: elevation ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: sigma11, sigma12, sigma22 ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: eps11, eps12, eps22 - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: fresh_wa_flux - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: net_heat_flux +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: fresh_wa_flux +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: net_heat_flux #if defined (__oasis) || defined (__ifsinterface) real(kind=WP),target, allocatable, dimension(:) :: ice_alb, ice_temp ! new fields for OIFS coupling real(kind=WP),target, allocatable, dimension(:) :: oce_heat_flux, ice_heat_flux diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index 221d54cb1..dffd5bca8 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -268,7 +268,7 @@ subroutine oce_fluxes(ice, dynamics, tracers, partit, mesh) real(kind=WP), dimension(:) , pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:) , pointer :: a_ice_old real(kind=WP), dimension(:) , pointer :: thdgr, thdgrsn -! real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux + real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux ! real(kind=WP), dimension(:) , pointer :: net_heat_flux #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -282,8 +282,8 @@ subroutine oce_fluxes(ice, dynamics, tracers, partit, mesh) a_ice_old => ice%data(1)%values_old(:) thdgr => ice%thermo%thdgr(:) thdgrsn => ice%thermo%thdgrsn(:) -! fresh_wa_flux => ice%flx_fw(:) -! net_heat_flux => ice%flx_h(:) + fresh_wa_flux => ice%flx_fw(:) + net_heat_flux => ice%flx_h(:) !___________________________________________________________________________ allocate(flux(myDim_nod2D+eDim_nod2D)) diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index 149da6c98..fc364078b 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -196,10 +196,10 @@ subroutine ice_array_setup(partit, mesh) ! S_oc_array = 0.0_WP ! T_oc_array = 0.0_WP ! allocate(fresh_wa_flux(n_size), net_heat_flux(n_size)) - allocate(fresh_wa_flux(n_size)) -allocate(net_heat_flux(n_size)) - fresh_wa_flux = 0.0_WP - net_heat_flux = 0.0_WP +! allocate(fresh_wa_flux(n_size)) +! allocate(net_heat_flux(n_size)) +! fresh_wa_flux = 0.0_WP +! net_heat_flux = 0.0_WP allocate(stress_atmice_x(n_size), stress_atmice_y(n_size)) stress_atmice_x = 0.0_WP stress_atmice_y = 0.0_WP diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index 3ff94efdb..15fd41fbf 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -65,7 +65,7 @@ subroutine thermodynamics(ice, partit, mesh) real(kind=WP), dimension(:) , pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:) , pointer :: thdgr, thdgrsn real(kind=WP), dimension(:) , pointer :: S_oc_array, T_oc_array, u_w, v_w -! real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux + real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux ! real(kind=WP), dimension(:) , pointer :: net_heat_flux myDim_nod2d=>partit%myDim_nod2D eDim_nod2D =>partit%eDim_nod2D @@ -82,8 +82,8 @@ subroutine thermodynamics(ice, partit, mesh) S_oc_array => ice%srfoce_salt(:) u_w => ice%srfoce_u(:) v_w => ice%srfoce_v(:) -! fresh_wa_flux => ice%flx_fw(:) -! net_heat_flux => ice%flx_h(:) + fresh_wa_flux => ice%flx_fw(:) + net_heat_flux => ice%flx_h(:) !_____________________________________________________________________________ rsss = ref_sss diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index 1f8addb29..5c20fedee 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -137,7 +137,7 @@ subroutine thermodynamics(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: a_ice_old, m_ice_old, m_snow_old real(kind=WP), dimension(:) , pointer :: thdgr, thdgrsn, thdgr_old, t_skin, ustar_aux real(kind=WP), dimension(:) , pointer :: S_oc_array, T_oc_array, u_w, v_w -! real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux + real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux ! real(kind=WP), dimension(:) , pointer :: net_heat_flux myDim_nod2d=>partit%myDim_nod2D eDim_nod2D =>partit%eDim_nod2D @@ -161,8 +161,8 @@ subroutine thermodynamics(ice, partit, mesh) v_w => ice%srfoce_v(:) T_oc_array => ice%srfoce_temp(:) S_oc_array => ice%srfoce_salt(:) -! net_heat_flux => ice%flx_h(:) -! fresh_wa_flux => ice%flx_fw(:) + net_heat_flux => ice%flx_h(:) + fresh_wa_flux => ice%flx_fw(:) !_____________________________________________________________________________ rsss=ref_sss From 54b093071b96a258c894047ea2bb7691005358d7 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 25 Nov 2021 16:27:43 +0100 Subject: [PATCH 660/909] exchange stress_atmice_x, stress_atmice_y, stress_iceoce_x, stress_iceoce_x against ice derived types --- src/gen_forcing_couple.F90 | 12 +++++--- src/ice_EVP.F90 | 31 ++++++++++--------- src/ice_maEVP.F90 | 63 +++++++++++++++++++++----------------- src/ice_modules.F90 | 8 ++--- src/ice_oce_coupling.F90 | 15 +++++---- src/ice_setup_step.F90 | 16 +++++----- src/ice_thermo_cpl.F90 | 1 - src/io_meandata.F90 | 8 ++--- 8 files changed, 85 insertions(+), 69 deletions(-) diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index 64a1ed646..eda1d7e2b 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -129,14 +129,18 @@ subroutine update_atm_forcing(istep, ice, tracers, partit, mesh) !_____________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: u_ice, v_ice, u_w, v_w + real(kind=WP), dimension(:), pointer :: stress_atmice_x, stress_atmice_y #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uice(:) - v_ice => ice%vice(:) - u_w => ice%srfoce_u(:) - v_w => ice%srfoce_v(:) + u_ice => ice%uice(:) + v_ice => ice%vice(:) + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) + stress_atmice_x => ice%stress_atmice_x(:) + stress_atmice_y => ice%stress_atmice_y(:) + t1=MPI_Wtime() #ifdef __oasis diff --git a/src/ice_EVP.F90 b/src/ice_EVP.F90 index ab092a1d4..b46cfb84e 100755 --- a/src/ice_EVP.F90 +++ b/src/ice_EVP.F90 @@ -492,24 +492,27 @@ subroutine EVPdynamics(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: u_ice_old, v_ice_old real(kind=WP), dimension(:), pointer :: u_rhs_ice, v_rhs_ice, rhs_a, rhs_m real(kind=WP), dimension(:), pointer :: u_w, v_w, elevation + real(kind=WP), dimension(:), pointer :: stress_atmice_x, stress_atmice_y #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uice(:) - v_ice => ice%vice(:) - a_ice => ice%data(1)%values(:) - m_ice => ice%data(2)%values(:) - m_snow => ice%data(3)%values(:) - u_ice_old => ice%uice_old(:) - v_ice_old => ice%vice_old(:) - u_rhs_ice => ice%uice_rhs(:) - v_rhs_ice => ice%vice_rhs(:) - rhs_a => ice%data(1)%values_rhs(:) - rhs_m => ice%data(2)%values_rhs(:) - u_w => ice%srfoce_u(:) - v_w => ice%srfoce_v(:) - elevation => ice%srfoce_ssh(:) + u_ice => ice%uice(:) + v_ice => ice%vice(:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + u_ice_old => ice%uice_old(:) + v_ice_old => ice%vice_old(:) + u_rhs_ice => ice%uice_rhs(:) + v_rhs_ice => ice%vice_rhs(:) + rhs_a => ice%data(1)%values_rhs(:) + rhs_m => ice%data(2)%values_rhs(:) + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) + elevation => ice%srfoce_ssh(:) + stress_atmice_x => ice%stress_atmice_x(:) + stress_atmice_y => ice%stress_atmice_y(:) !_______________________________________________________________________________ ! If Icepack is used, always update the tracers diff --git a/src/ice_maEVP.F90 b/src/ice_maEVP.F90 index 5d9de09c9..35c452c8f 100644 --- a/src/ice_maEVP.F90 +++ b/src/ice_maEVP.F90 @@ -418,28 +418,32 @@ subroutine EVPdynamics_m(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: u_rhs_ice, v_rhs_ice, rhs_a, rhs_m real(kind=WP), dimension(:), pointer :: u_w, v_w real(kind=WP), dimension(:), pointer :: elevation + real(kind=WP), dimension(:), pointer :: stress_atmice_x, stress_atmice_y #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uice(:) - v_ice => ice%vice(:) - a_ice => ice%data(1)%values(:) - m_ice => ice%data(2)%values(:) - m_snow => ice%data(3)%values(:) - eps11 => ice%work%eps11(:) - eps12 => ice%work%eps12(:) - eps22 => ice%work%eps22(:) - sigma11 => ice%work%sigma11(:) - sigma12 => ice%work%sigma12(:) - sigma22 => ice%work%sigma22(:) - u_rhs_ice => ice%uice_rhs(:) - v_rhs_ice => ice%vice_rhs(:) - rhs_a => ice%data(1)%values_rhs(:) - rhs_m => ice%data(2)%values_rhs(:) - u_w => ice%srfoce_u(:) - v_w => ice%srfoce_v(:) - elevation => ice%srfoce_ssh(:) + u_ice => ice%uice(:) + v_ice => ice%vice(:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + eps11 => ice%work%eps11(:) + eps12 => ice%work%eps12(:) + eps22 => ice%work%eps22(:) + sigma11 => ice%work%sigma11(:) + sigma12 => ice%work%sigma12(:) + sigma22 => ice%work%sigma22(:) + u_rhs_ice => ice%uice_rhs(:) + v_rhs_ice => ice%vice_rhs(:) + rhs_a => ice%data(1)%values_rhs(:) + rhs_m => ice%data(2)%values_rhs(:) + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) + elevation => ice%srfoce_ssh(:) + stress_atmice_x => ice%stress_atmice_x(:) + stress_atmice_y => ice%stress_atmice_y(:) + !___________________________________________________________________________ val3=1.0_WP/3.0_WP vale=1.0_WP/(ellipse**2) @@ -967,20 +971,23 @@ subroutine EVPdynamics_a(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:), pointer :: u_rhs_ice, v_rhs_ice real(kind=WP), dimension(:), pointer :: u_w, v_w + real(kind=WP), dimension(:), pointer :: stress_atmice_x, stress_atmice_y #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uice(:) - v_ice => ice%vice(:) - a_ice => ice%data(1)%values(:) - m_ice => ice%data(2)%values(:) - m_snow => ice%data(3)%values(:) - u_rhs_ice => ice%uice_rhs(:) - v_rhs_ice => ice%vice_rhs(:) - u_w => ice%srfoce_u(:) - v_w => ice%srfoce_v(:) - + u_ice => ice%uice(:) + v_ice => ice%vice(:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + u_rhs_ice => ice%uice_rhs(:) + v_rhs_ice => ice%vice_rhs(:) + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) + stress_atmice_x => ice%stress_atmice_x + stress_atmice_y => ice%stress_atmice_y + !___________________________________________________________________________ steps=evp_rheol_steps rdt=ice_dt diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index 09f2824f2..85f083160 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -87,10 +87,10 @@ MODULE i_ARRAYS #endif /* (__oasis) || defined (__ifsinterface)*/ ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: S_oc_array, T_oc_array - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_iceoce_x - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_iceoce_y - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_atmice_x - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_atmice_y +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_iceoce_x +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_iceoce_y +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_atmice_x +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_atmice_y ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: t_skin ! FCT implementation ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: m_icel, a_icel, m_snowl diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index dffd5bca8..0eff4bb5e 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -77,16 +77,19 @@ subroutine oce_fluxes_mom(ice, dynamics, partit, mesh) !___________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: u_ice, v_ice, a_ice, u_w, v_w + real(kind=WP), dimension(:), pointer :: stress_iceoce_x, stress_iceoce_y #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uice(:) - v_ice => ice%vice(:) - a_ice => ice%data(1)%values(:) - u_w => ice%srfoce_u(:) - v_w => ice%srfoce_v(:) - + u_ice => ice%uice(:) + v_ice => ice%vice(:) + a_ice => ice%data(1)%values(:) + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) + stress_iceoce_x => ice%stress_iceoce_x(:) + stress_iceoce_y => ice%stress_iceoce_y(:) + ! ================== ! momentum flux: ! ================== diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index fc364078b..fb5778a83 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -200,14 +200,14 @@ subroutine ice_array_setup(partit, mesh) ! allocate(net_heat_flux(n_size)) ! fresh_wa_flux = 0.0_WP ! net_heat_flux = 0.0_WP - allocate(stress_atmice_x(n_size), stress_atmice_y(n_size)) - stress_atmice_x = 0.0_WP - stress_atmice_y = 0.0_WP -! allocate(elevation(n_size)) ! =ssh of ocean -! elevation = 0.0_WP - allocate(stress_iceoce_x(n_size), stress_iceoce_y(n_size)) - stress_iceoce_x = 0.0_WP - stress_iceoce_y = 0.0_WP +! allocate(stress_atmice_x(n_size), stress_atmice_y(n_size)) +! stress_atmice_x = 0.0_WP +! stress_atmice_y = 0.0_WP +! ! allocate(elevation(n_size)) ! =ssh of ocean +! ! elevation = 0.0_WP +! allocate(stress_iceoce_x(n_size), stress_iceoce_y(n_size)) +! stress_iceoce_x = 0.0_WP +! stress_iceoce_y = 0.0_WP ! allocate(U_w(n_size), V_w(n_size)) ! =uf and vf of ocean at surface nodes #if defined (__oasis) || defined (__ifsinterface) allocate(oce_heat_flux(n_size), ice_heat_flux(n_size)) diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index 15fd41fbf..6c1eb9e7e 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -66,7 +66,6 @@ subroutine thermodynamics(ice, partit, mesh) real(kind=WP), dimension(:) , pointer :: thdgr, thdgrsn real(kind=WP), dimension(:) , pointer :: S_oc_array, T_oc_array, u_w, v_w real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux -! real(kind=WP), dimension(:) , pointer :: net_heat_flux myDim_nod2d=>partit%myDim_nod2D eDim_nod2D =>partit%eDim_nod2D ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index cd7b50f29..7bffcf6bc 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -215,17 +215,17 @@ subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) CASE ('fw ') call def_stream(nod2D, myDim_nod2D, 'fw', 'fresh water flux', 'm/s', water_flux(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('atmice_x ') - call def_stream(nod2D, myDim_nod2D, 'atmice_x', 'stress atmice x', 'N/m2', stress_atmice_x(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'atmice_x', 'stress atmice x', 'N/m2', ice%stress_atmice_x(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('atmice_y ') - call def_stream(nod2D, myDim_nod2D, 'atmice_y', 'stress atmice y', 'N/m2', stress_atmice_y(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'atmice_y', 'stress atmice y', 'N/m2', ice%stress_atmice_y(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('atmoce_x ') call def_stream(nod2D, myDim_nod2D, 'atmoce_x', 'stress atmoce x', 'N/m2', stress_atmoce_x(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('atmoce_y ') call def_stream(nod2D, myDim_nod2D, 'atmoce_y', 'stress atmoce y', 'N/m2', stress_atmoce_y(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('iceoce_x ') - call def_stream(nod2D, myDim_nod2D, 'iceoce_x', 'stress iceoce x', 'N/m2', stress_iceoce_x(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'iceoce_x', 'stress iceoce x', 'N/m2', ice%stress_iceoce_x(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('iceoce_y ') - call def_stream(nod2D, myDim_nod2D, 'iceoce_y', 'stress iceoce y', 'N/m2', stress_iceoce_y(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'iceoce_y', 'stress iceoce y', 'N/m2', ice%stress_iceoce_y(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('alpha ') call def_stream(nod2D, myDim_nod2D, 'alpha', 'thermal expansion', 'none', sw_alpha(1,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('beta ') From 2e38989293efdf5f1c2b7ba92913fe458840da11 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 25 Nov 2021 16:36:50 +0100 Subject: [PATCH 661/909] exchange stress_atmice_x, stress_atmice_y, stress_iceoce_x, stress_iceoce_x against ice derived types in src/ifs_interface/ifs_interface.F90 --- src/ifs_interface/ifs_interface.F90 | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/ifs_interface/ifs_interface.F90 b/src/ifs_interface/ifs_interface.F90 index 3dd257c72..b3e55cf4c 100644 --- a/src/ifs_interface/ifs_interface.F90 +++ b/src/ifs_interface/ifs_interface.F90 @@ -616,10 +616,12 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & !#include "associate_mesh.h" ! associate only the necessary things real(kind=wpIFS), dimension(:,:), pointer :: coord_nod2D +! real(kind=wpIFS), dimension(:,:), pointer :: stress_atmice_x, stress_atmice_y myDim_nod2D => fesom%partit%myDim_nod2D eDim_nod2D => fesom%partit%eDim_nod2D coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => fesom%mesh%coord_nod2D - + stress_atmice_x => fesom%ice%stress_atmice_x + stress_atmice_y => fesom%ice%stress_atmice_y ! =================================================================== ! ! Sort out incoming arrays from the IFS and put them on the ocean grid From c3faf4509f4549538ccbb743e7dd502dce9dd73f Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 25 Nov 2021 16:42:00 +0100 Subject: [PATCH 662/909] fix stress_atmice_x, stress_atmice_y, stress_iceoce_x, stress_iceoce_x against ice derived types in src/ifs_interface/ifs_interface.F90 --- src/ifs_interface/ifs_interface.F90 | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/ifs_interface/ifs_interface.F90 b/src/ifs_interface/ifs_interface.F90 index b3e55cf4c..4eb9e991c 100644 --- a/src/ifs_interface/ifs_interface.F90 +++ b/src/ifs_interface/ifs_interface.F90 @@ -567,7 +567,8 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & USE g_rotate_grid, only: vector_r2g, vector_g2r USE g_forcing_arrays, only: shortwave, prec_rain, prec_snow, runoff, & & evap_no_ifrac, sublimation !'longwave' only stand-alone, 'evaporation' filled later - USE i_ARRAYS, only: stress_atmice_x, stress_atmice_y, oce_heat_flux, ice_heat_flux +! USE i_ARRAYS, only: stress_atmice_x, stress_atmice_y, oce_heat_flux, ice_heat_flux + SE i_ARRAYS, only: oce_heat_flux, ice_heat_flux USE o_ARRAYS, only: stress_atmoce_x, stress_atmoce_y USE g_comm_auto ! exchange_nod does the halo exchange From 3bd3abf99c1ebd4ab005d11eec5eb316e04de568 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 25 Nov 2021 16:47:08 +0100 Subject: [PATCH 663/909] fix bug regading stress_atmice_x, stress_atmice_y, stress_iceoce_x, stress_iceoce_x against ice derived types in src/ifs_interface/ifs_interface.F90 --- src/ifs_interface/ifs_interface.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ifs_interface/ifs_interface.F90 b/src/ifs_interface/ifs_interface.F90 index 4eb9e991c..e0403524f 100644 --- a/src/ifs_interface/ifs_interface.F90 +++ b/src/ifs_interface/ifs_interface.F90 @@ -568,7 +568,7 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & USE g_forcing_arrays, only: shortwave, prec_rain, prec_snow, runoff, & & evap_no_ifrac, sublimation !'longwave' only stand-alone, 'evaporation' filled later ! USE i_ARRAYS, only: stress_atmice_x, stress_atmice_y, oce_heat_flux, ice_heat_flux - SE i_ARRAYS, only: oce_heat_flux, ice_heat_flux + USE i_ARRAYS, only: oce_heat_flux, ice_heat_flux USE o_ARRAYS, only: stress_atmoce_x, stress_atmoce_y USE g_comm_auto ! exchange_nod does the halo exchange From 38a905a62ba842c4a08a0180ee31e63549cf13f3 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 25 Nov 2021 16:52:27 +0100 Subject: [PATCH 664/909] fix bug regading stress_atmice_x, stress_atmice_y, stress_iceoce_x, stress_iceoce_x against ice derived types in src/ifs_interface/ifs_interface.F90 --> forgot pointer --- src/ifs_interface/ifs_interface.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ifs_interface/ifs_interface.F90 b/src/ifs_interface/ifs_interface.F90 index e0403524f..4f512893e 100644 --- a/src/ifs_interface/ifs_interface.F90 +++ b/src/ifs_interface/ifs_interface.F90 @@ -617,7 +617,7 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & !#include "associate_mesh.h" ! associate only the necessary things real(kind=wpIFS), dimension(:,:), pointer :: coord_nod2D -! real(kind=wpIFS), dimension(:,:), pointer :: stress_atmice_x, stress_atmice_y + real(kind=wpIFS), dimension(:,:), pointer :: stress_atmice_x, stress_atmice_y myDim_nod2D => fesom%partit%myDim_nod2D eDim_nod2D => fesom%partit%eDim_nod2D coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => fesom%mesh%coord_nod2D From d5ad054dd4ece9f969a43e34d36ba0a3f1754052 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 25 Nov 2021 16:59:28 +0100 Subject: [PATCH 665/909] WTF i should stop for today git add src/ifs_interface/ifs_interface.F90 --- src/ifs_interface/ifs_interface.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ifs_interface/ifs_interface.F90 b/src/ifs_interface/ifs_interface.F90 index 4f512893e..b339a8cbb 100644 --- a/src/ifs_interface/ifs_interface.F90 +++ b/src/ifs_interface/ifs_interface.F90 @@ -617,7 +617,7 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & !#include "associate_mesh.h" ! associate only the necessary things real(kind=wpIFS), dimension(:,:), pointer :: coord_nod2D - real(kind=wpIFS), dimension(:,:), pointer :: stress_atmice_x, stress_atmice_y + real(kind=wpIFS), dimension(:) , pointer :: stress_atmice_x, stress_atmice_y myDim_nod2D => fesom%partit%myDim_nod2D eDim_nod2D => fesom%partit%eDim_nod2D coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => fesom%mesh%coord_nod2D From e6057f2ebe610af6e172e618caca421d7f8759b4 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 25 Nov 2021 17:14:45 +0100 Subject: [PATCH 666/909] exchange u_ice_aux, v_ice_aux against icederived type --- src/ice_maEVP.F90 | 18 ++++++++++++++++-- src/ice_modules.F90 | 2 +- src/ice_setup_step.F90 | 6 +++--- 3 files changed, 20 insertions(+), 6 deletions(-) diff --git a/src/ice_maEVP.F90 b/src/ice_maEVP.F90 index 35c452c8f..3bc29c107 100644 --- a/src/ice_maEVP.F90 +++ b/src/ice_maEVP.F90 @@ -109,6 +109,7 @@ subroutine stress_tensor_m(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: a_ice, m_ice real(kind=WP), dimension(:), pointer :: eps11, eps12, eps22 real(kind=WP), dimension(:), pointer :: sigma11, sigma12, sigma22 + real(kind=WP), dimension(:), pointer :: u_ice_aux, v_ice_aux #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -121,6 +122,8 @@ subroutine stress_tensor_m(ice, partit, mesh) sigma11 => ice%work%sigma11(:) sigma12 => ice%work%sigma12(:) sigma22 => ice%work%sigma22(:) + u_ice_aux => ice%uice_aux(:) + v_ice_aux => ice%vice_aux(:) !___________________________________________________________________________ val3=1.0_WP/3.0_WP @@ -419,6 +422,7 @@ subroutine EVPdynamics_m(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: u_w, v_w real(kind=WP), dimension(:), pointer :: elevation real(kind=WP), dimension(:), pointer :: stress_atmice_x, stress_atmice_y + real(kind=WP), dimension(:), pointer :: u_ice_aux, v_ice_aux #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -443,6 +447,8 @@ subroutine EVPdynamics_m(ice, partit, mesh) elevation => ice%srfoce_ssh(:) stress_atmice_x => ice%stress_atmice_x(:) stress_atmice_y => ice%stress_atmice_y(:) + u_ice_aux => ice%uice_aux(:) + v_ice_aux => ice%vice_aux(:) !___________________________________________________________________________ val3=1.0_WP/3.0_WP @@ -757,6 +763,7 @@ subroutine find_alpha_field_a(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: a_ice, m_ice real(kind=WP), dimension(:), pointer :: eps11, eps12, eps22 real(kind=WP), dimension(:), pointer :: sigma11, sigma12, sigma22 + real(kind=WP), dimension(:), pointer :: u_ice_aux, v_ice_aux #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -769,6 +776,8 @@ subroutine find_alpha_field_a(ice, partit, mesh) sigma11 => ice%work%sigma11(:) sigma12 => ice%work%sigma12(:) sigma22 => ice%work%sigma22(:) + u_ice_aux => ice%uice_aux(:) + v_ice_aux => ice%vice_aux(:) !___________________________________________________________________________ val3=1.0_WP/3.0_WP @@ -851,6 +860,7 @@ subroutine stress_tensor_a(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: a_ice, m_ice real(kind=WP), dimension(:), pointer :: eps11, eps12, eps22 real(kind=WP), dimension(:), pointer :: sigma11, sigma12, sigma22 + real(kind=WP), dimension(:), pointer :: u_ice_aux, v_ice_aux #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -863,6 +873,8 @@ subroutine stress_tensor_a(ice, partit, mesh) sigma11 => ice%work%sigma11(:) sigma12 => ice%work%sigma12(:) sigma22 => ice%work%sigma22(:) + u_ice_aux => ice%uice_aux(:) + v_ice_aux => ice%vice_aux(:) !___________________________________________________________________________ val3=1.0_WP/3.0_WP @@ -971,7 +983,8 @@ subroutine EVPdynamics_a(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:), pointer :: u_rhs_ice, v_rhs_ice real(kind=WP), dimension(:), pointer :: u_w, v_w - real(kind=WP), dimension(:), pointer :: stress_atmice_x, stress_atmice_y + real(kind=WP), dimension(:), pointer :: stress_atmice_x, stress_atmice_y + real(kind=WP), dimension(:), pointer :: u_ice_aux, v_ice_aux #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -987,7 +1000,8 @@ subroutine EVPdynamics_a(ice, partit, mesh) v_w => ice%srfoce_v(:) stress_atmice_x => ice%stress_atmice_x stress_atmice_y => ice%stress_atmice_y - + u_ice_aux => ice%uice_aux(:) + v_ice_aux => ice%vice_aux(:) !___________________________________________________________________________ steps=evp_rheol_steps rdt=ice_dt diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index 85f083160..a45228ab5 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -67,7 +67,7 @@ MODULE i_ARRAYS ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_m, rhs_a, rhs_ms, ths_temp REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: ths_temp ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_w, V_w - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: u_ice_aux, v_ice_aux ! of the size of u_ice, v_ice +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: u_ice_aux, v_ice_aux ! of the size of u_ice, v_ice ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_mdiv, rhs_adiv, rhs_msdiv ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: elevation ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: sigma11, sigma12, sigma22 diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index fb5778a83..b417d4a9a 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -149,14 +149,14 @@ subroutine ice_array_setup(partit, mesh) ! allocate(m_ice_old(n_size), a_ice_old(n_size), m_snow_old(n_size), thdgr_old(n_size)) !PS ! allocate(thdgr_old(n_size)) !PS if (whichEVP > 0) then - allocate(u_ice_aux(n_size), v_ice_aux(n_size)) +! allocate(u_ice_aux(n_size), v_ice_aux(n_size)) allocate(alpha_evp_array(myDim_elem2D)) allocate(beta_evp_array(n_size)) alpha_evp_array=alpha_evp beta_evp_array =alpha_evp ! alpha=beta works most reliable - u_ice_aux=0.0_WP - v_ice_aux=0.0_WP +! u_ice_aux=0.0_WP +! v_ice_aux=0.0_WP end if ! allocate(rhs_mdiv(n_size), rhs_adiv(n_size), rhs_msdiv(n_size)) From 79cecb35cab95638e976322543f4d7d7f4814242 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 25 Nov 2021 17:33:23 +0100 Subject: [PATCH 667/909] exchange alpha_evp_array, beta_evp_array against ice derived types --- src/MOD_ICE.F90 | 31 +++++++++++++++++++--------- src/ice_maEVP.F90 | 47 +++++++++++++++++++++++++++--------------- src/ice_modules.F90 | 4 ++-- src/ice_setup_step.F90 | 20 +++++++++--------- src/io_meandata.F90 | 4 ++-- 5 files changed, 65 insertions(+), 41 deletions(-) diff --git a/src/MOD_ICE.F90 b/src/MOD_ICE.F90 index 2b8f75af4..0f558e818 100644 --- a/src/MOD_ICE.F90 +++ b/src/MOD_ICE.F90 @@ -104,6 +104,9 @@ MODULE MOD_ICE ! freshwater & heatflux real(kind=WP), allocatable, dimension(:) :: flx_fw, flx_h + ! maEVP variables + real(kind=WP), allocatable, dimension(:) :: alpha_evp_array, beta_evp_array + !___________________________________________________________________________ ! total number of ice tracers (default=3, 1=area, 2=mice, 3=msnow, (4=ice_temp) #if defined (__oifs) || defined (__ifsinterface) @@ -341,7 +344,6 @@ subroutine WRITE_T_ICE(ice, unit, iostat, iomsg) call write_bin_array(ice%stress_iceoce_x, unit, iostat, iomsg) call write_bin_array(ice%stress_atmice_y, unit, iostat, iomsg) call write_bin_array(ice%stress_iceoce_y, unit, iostat, iomsg) -! call write_bin_array(ice%srfoce_uv, unit, iostat, iomsg) call write_bin_array(ice%srfoce_u, unit, iostat, iomsg) call write_bin_array(ice%srfoce_v, unit, iostat, iomsg) call write_bin_array(ice%srfoce_temp, unit, iostat, iomsg) @@ -349,7 +351,11 @@ subroutine WRITE_T_ICE(ice, unit, iostat, iomsg) call write_bin_array(ice%srfoce_ssh, unit, iostat, iomsg) call write_bin_array(ice%flx_fw, unit, iostat, iomsg) call write_bin_array(ice%flx_h, unit, iostat, iomsg) - + if (ice%whichEVP > 0) then + call write_bin_array(ice%alpha_evp_array, unit, iostat, iomsg) + call write_bin_array(ice%beta_evp_array, unit, iostat, iomsg) + end if + !___________________________________________________________________________ write(unit, iostat=iostat, iomsg=iomsg) ice%num_itracers do i=1, ice%num_itracers @@ -408,7 +414,6 @@ subroutine READ_T_ICE(ice, unit, iostat, iomsg) call read_bin_array(ice%stress_iceoce_x, unit, iostat, iomsg) call read_bin_array(ice%stress_atmice_y, unit, iostat, iomsg) call read_bin_array(ice%stress_iceoce_y, unit, iostat, iomsg) -! call read_bin_array(ice%srfoce_uv, unit, iostat, iomsg) call read_bin_array(ice%srfoce_u, unit, iostat, iomsg) call read_bin_array(ice%srfoce_v, unit, iostat, iomsg) call read_bin_array(ice%srfoce_temp, unit, iostat, iomsg) @@ -416,7 +421,10 @@ subroutine READ_T_ICE(ice, unit, iostat, iomsg) call read_bin_array(ice%srfoce_ssh, unit, iostat, iomsg) call read_bin_array(ice%flx_fw, unit, iostat, iomsg) call read_bin_array(ice%flx_h, unit, iostat, iomsg) - + if (ice%whichEVP > 0) then + call read_bin_array(ice%alpha_evp_array, unit, iostat, iomsg) + call read_bin_array(ice%beta_evp_array, unit, iostat, iomsg) + end if !___________________________________________________________________________ read(unit, iostat=iostat, iomsg=iomsg) ice%num_itracers do i=1, ice%num_itracers @@ -564,20 +572,23 @@ subroutine ice_init(ice, partit, mesh) ice%uice_old = 0.0_WP ice%stress_atmice_x = 0.0_WP ice%stress_iceoce_x = 0.0_WP - if (ice%whichEVP /= 0) then - allocate(ice%uice_aux( node_size)) - ice%uice_aux = 0.0_WP - end if ice%vice = 0.0_WP ice%vice_rhs = 0.0_WP ice%vice_old = 0.0_WP ice%stress_atmice_y = 0.0_WP ice%stress_iceoce_y = 0.0_WP if (ice%whichEVP /= 0) then + allocate(ice%uice_aux( node_size)) allocate(ice%vice_aux( node_size)) + ice%uice_aux = 0.0_WP ice%vice_aux = 0.0_WP - end if - + end if + if (ice%whichEVP == 2) then + allocate(ice%alpha_evp_array( node_size)) + allocate(ice%beta_evp_array( node_size)) + ice%alpha_evp_array = 0.0_WP + ice%beta_evp_array = 0.0_WP + end if !___________________________________________________________________________ ! initialise surface ocean arrays in ice derived type allocate(ice%srfoce_u( node_size)) diff --git a/src/ice_maEVP.F90 b/src/ice_maEVP.F90 index 3bc29c107..d0a5b2c8b 100644 --- a/src/ice_maEVP.F90 +++ b/src/ice_maEVP.F90 @@ -40,12 +40,14 @@ subroutine find_alpha_field_a(ice, partit, mesh) type(t_mesh) , intent(in) , target :: mesh end subroutine - subroutine find_beta_field_a(partit, mesh) + subroutine find_beta_field_a(ice, partit, mesh) + USE MOD_ICE USE MOD_PARTIT USE MOD_PARSUP USE MOD_MESH - type(t_mesh), intent(in), target :: mesh + type(t_ice) , intent(inout), target :: ice type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh end subroutine end interface end module @@ -764,6 +766,7 @@ subroutine find_alpha_field_a(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: eps11, eps12, eps22 real(kind=WP), dimension(:), pointer :: sigma11, sigma12, sigma22 real(kind=WP), dimension(:), pointer :: u_ice_aux, v_ice_aux + real(kind=WP), dimension(:), pointer :: alpha_evp_array #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -778,7 +781,7 @@ subroutine find_alpha_field_a(ice, partit, mesh) sigma22 => ice%work%sigma22(:) u_ice_aux => ice%uice_aux(:) v_ice_aux => ice%vice_aux(:) - + alpha_evp_array => ice%alpha_evp_array(:) !___________________________________________________________________________ val3=1.0_WP/3.0_WP vale=1.0_WP/(ellipse**2) @@ -861,20 +864,22 @@ subroutine stress_tensor_a(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: eps11, eps12, eps22 real(kind=WP), dimension(:), pointer :: sigma11, sigma12, sigma22 real(kind=WP), dimension(:), pointer :: u_ice_aux, v_ice_aux + real(kind=WP), dimension(:), pointer :: alpha_evp_array #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - a_ice => ice%data(1)%values(:) - m_ice => ice%data(2)%values(:) - eps11 => ice%work%eps11(:) - eps12 => ice%work%eps12(:) - eps22 => ice%work%eps22(:) - sigma11 => ice%work%sigma11(:) - sigma12 => ice%work%sigma12(:) - sigma22 => ice%work%sigma22(:) - u_ice_aux => ice%uice_aux(:) - v_ice_aux => ice%vice_aux(:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + eps11 => ice%work%eps11(:) + eps12 => ice%work%eps12(:) + eps22 => ice%work%eps22(:) + sigma11 => ice%work%sigma11(:) + sigma12 => ice%work%sigma12(:) + sigma22 => ice%work%sigma22(:) + u_ice_aux => ice%uice_aux(:) + v_ice_aux => ice%vice_aux(:) + alpha_evp_array => ice%alpha_evp_array(:) !___________________________________________________________________________ val3=1.0_WP/3.0_WP @@ -985,6 +990,7 @@ subroutine EVPdynamics_a(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: u_w, v_w real(kind=WP), dimension(:), pointer :: stress_atmice_x, stress_atmice_y real(kind=WP), dimension(:), pointer :: u_ice_aux, v_ice_aux + real(kind=WP), dimension(:), pointer :: beta_evp_array #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -1002,6 +1008,8 @@ subroutine EVPdynamics_a(ice, partit, mesh) stress_atmice_y => ice%stress_atmice_y u_ice_aux => ice%uice_aux(:) v_ice_aux => ice%vice_aux(:) + beta_evp_array => ice%beta_evp_array(:) + !___________________________________________________________________________ steps=evp_rheol_steps rdt=ice_dt @@ -1073,7 +1081,7 @@ subroutine EVPdynamics_a(ice, partit, mesh) call find_alpha_field_a(ice, partit, mesh) ! alpha_evp_array is initialized with alpha_evp; ! At this stage we already have non-trivial velocities. - call find_beta_field_a(partit, mesh) + call find_beta_field_a(ice, partit, mesh) end subroutine EVPdynamics_a ! ! @@ -1082,25 +1090,30 @@ end subroutine EVPdynamics_a ! reason we need it in addition to alpha_evp_array (we work with ! alpha=beta, and keep different names for generality; mEVP can work with ! alpha \ne beta, but not aEVP). -subroutine find_beta_field_a(partit, mesh) +subroutine find_beta_field_a(ice, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_MESH + USE MOD_ICE use o_param USE i_param use i_arrays Implicit none - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit + type(t_ice) , intent(inout), target :: ice !___________________________________________________________________________ integer :: n !___________________________________________________________________________ ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: alpha_evp_array, beta_evp_array #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - + alpha_evp_array => ice%alpha_evp_array(:) + beta_evp_array => ice%beta_evp_array(:) + !___________________________________________________________________________ DO n=1, myDim_nod2D !________________________________________________________________________ diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index a45228ab5..ea7e182ec 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -98,8 +98,8 @@ MODULE i_ARRAYS ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:,:) :: icefluxes ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: icepplus, icepminus REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: mass_matrix - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: alpha_evp_array(:) ! of myDim_elem2D - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: beta_evp_array(:) ! of myDim_node2D+eDim_node2D +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: alpha_evp_array(:) ! of myDim_elem2D +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: beta_evp_array(:) ! of myDim_node2D+eDim_node2D ! Mean arrays REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_ice_mean, V_ice_mean diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index b417d4a9a..9e9cfa818 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -148,16 +148,16 @@ subroutine ice_array_setup(partit, mesh) ! allocate(U_ice_old(n_size), V_ice_old(n_size)) !PS ! allocate(m_ice_old(n_size), a_ice_old(n_size), m_snow_old(n_size), thdgr_old(n_size)) !PS ! allocate(thdgr_old(n_size)) !PS - if (whichEVP > 0) then -! allocate(u_ice_aux(n_size), v_ice_aux(n_size)) - allocate(alpha_evp_array(myDim_elem2D)) - allocate(beta_evp_array(n_size)) - - alpha_evp_array=alpha_evp - beta_evp_array =alpha_evp ! alpha=beta works most reliable -! u_ice_aux=0.0_WP -! v_ice_aux=0.0_WP - end if +! if (whichEVP > 0) then +! ! allocate(u_ice_aux(n_size), v_ice_aux(n_size)) +! allocate(alpha_evp_array(myDim_elem2D)) +! allocate(beta_evp_array(n_size)) +! +! alpha_evp_array=alpha_evp +! beta_evp_array =alpha_evp ! alpha=beta works most reliable +! ! u_ice_aux=0.0_WP +! ! v_ice_aux=0.0_WP +! end if ! allocate(rhs_mdiv(n_size), rhs_adiv(n_size), rhs_msdiv(n_size)) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 7bffcf6bc..6aee933a2 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -509,8 +509,8 @@ subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) end if if (whichEVP==2) then - call def_stream(elem2D, myDim_elem2D, 'alpha_EVP', 'alpha in EVP', 'n/a', alpha_evp_array, 1, 'd', i_real4, partit, mesh) - call def_stream(nod2D, myDim_nod2D, 'beta_EVP', 'beta in EVP', 'n/a', beta_evp_array, 1, 'd', i_real4, partit, mesh) + call def_stream(elem2D, myDim_elem2D, 'alpha_EVP', 'alpha in EVP', 'n/a', ice%alpha_evp_array, 1, 'd', i_real4, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'beta_EVP', 'beta in EVP', 'n/a', ice%beta_evp_array, 1, 'd', i_real4, partit, mesh) end if !___________________________________________________________________________ From 013d7153ad7367db86ecc26635564a6dbf2886a7 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 25 Nov 2021 22:53:09 +0100 Subject: [PATCH 668/909] exchange, ice_temp, rhs_temp, m_templ, dm_temp, rhs_tempdiv against ice derived type variables --- src/MOD_ICE.F90 | 1 + src/gen_forcing_couple.F90 | 15 +++-- src/ice_fct.F90 | 134 ++++++++++++++++++++++++------------- src/ice_modules.F90 | 6 +- src/ice_setup_step.F90 | 25 ++++--- src/ice_thermo_cpl.F90 | 15 +++-- src/ice_thermo_oce.F90 | 23 ++++--- src/io_meandata.F90 | 2 +- src/io_restart.F90 | 2 +- 9 files changed, 147 insertions(+), 76 deletions(-) diff --git a/src/MOD_ICE.F90 b/src/MOD_ICE.F90 index 0f558e818..ebc86a5c3 100644 --- a/src/MOD_ICE.F90 +++ b/src/MOD_ICE.F90 @@ -626,6 +626,7 @@ subroutine ice_init(ice, partit, mesh) ice%data(n)%values_div_rhs = 0.0_WP ice%data(n)%dvalues = 0.0_WP ice%data(n)%valuesl = 0.0_WP + if (n==4) ice%data(n)%values = 265.15_WP end do !___________________________________________________________________________ diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index eda1d7e2b..dd386a588 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -129,7 +129,10 @@ subroutine update_atm_forcing(istep, ice, tracers, partit, mesh) !_____________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: u_ice, v_ice, u_w, v_w - real(kind=WP), dimension(:), pointer :: stress_atmice_x, stress_atmice_y + real(kind=WP), dimension(:), pointer :: stress_atmice_x, stress_atmice_y +#if defined (__oifs) || defined (__ifsinterface) + real(kind=WP), dimension(:), pointer :: ice_temp +#endif #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -140,10 +143,12 @@ subroutine update_atm_forcing(istep, ice, tracers, partit, mesh) v_w => ice%srfoce_v(:) stress_atmice_x => ice%stress_atmice_x(:) stress_atmice_y => ice%stress_atmice_y(:) - +#if defined (__oifs) || defined (__ifsinterface) + ice_temp => ice%data(4)%values(:) +#endif t1=MPI_Wtime() -#ifdef __oasis +#ifdef (__oasis) || defined (__ifsinterface) if (firstcall) then allocate(exchange(myDim_nod2D+eDim_nod2D), mask(myDim_nod2D+eDim_nod2D)) allocate(a2o_fcorr_stat(nrecv,6)) @@ -155,7 +160,7 @@ subroutine update_atm_forcing(istep, ice, tracers, partit, mesh) do i=1,nsend exchange =0. if (i.eq.1) then -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) ! AWI-CM3 outgoing state vectors do n=1,myDim_nod2D+eDim_nod2D exchange(n)=tracers%data(1)%values(1, n)+tmelt ! sea surface temperature [K] @@ -281,7 +286,7 @@ subroutine update_atm_forcing(istep, ice, tracers, partit, mesh) mask=1. call force_flux_consv(runoff, mask, i, 0,action, partit, mesh) end if -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) elseif (i.eq.13) then if (action) then diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index e55515696..7dfc9bab5 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -107,26 +107,32 @@ subroutine ice_TG_rhs(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: u_ice, v_ice real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:), pointer :: rhs_a, rhs_m, rhs_ms +#if defined (__oifs) || defined (__ifsinterface) + real(kind=WP), dimension(:), pointer :: ice_temp, rhs_temp +#endif #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uice(:) - v_ice => ice%vice(:) - a_ice => ice%data(1)%values(:) - m_ice => ice%data(2)%values(:) - m_snow => ice%data(3)%values(:) - rhs_a => ice%data(1)%values_rhs(:) - rhs_m => ice%data(2)%values_rhs(:) - rhs_ms => ice%data(3)%values_rhs(:) - + u_ice => ice%uice(:) + v_ice => ice%vice(:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + rhs_a => ice%data(1)%values_rhs(:) + rhs_m => ice%data(2)%values_rhs(:) + rhs_ms => ice%data(3)%values_rhs(:) +#if defined (__oifs) || defined (__ifsinterface) + ice_temp => ice%data(4)%values(:) + rhs_temp => ice%data(4)%values_rhs(:) +#endif !___________________________________________________________________________ ! Taylor-Galerkin (Lax-Wendroff) rhs DO row=1, myDim_nod2D rhs_m(row)=0._WP rhs_a(row)=0._WP rhs_ms(row)=0._WP -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) ths_temp(row)=0._WP #endif /* (__oifs) */ END DO @@ -163,7 +169,7 @@ subroutine ice_TG_rhs(ice, partit, mesh) rhs_m(row)=rhs_m(row)+sum(entries*m_ice(elnodes)) rhs_a(row)=rhs_a(row)+sum(entries*a_ice(elnodes)) rhs_ms(row)=rhs_ms(row)+sum(entries*m_snow(elnodes)) -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) rhs_temp(row)=rhs_temp(row)+sum(entries*ice_temp(elnodes)) #endif /* (__oifs) */ END DO @@ -199,9 +205,9 @@ subroutine ice_fct_init(ice, partit, mesh) ! m_icel=0.0_WP ! a_icel=0.0_WP ! m_snowl=0.0_WP -#if defined (__oifs) - allocate(m_templ(n_size)) - allocate(dm_temp(n_size)) +#if defined (__oifs) || defined (__ifsinterface) +! allocate(m_templ(n_size)) +! allocate(dm_temp(n_size)) #endif /* (__oifs) */ ! allocate(icefluxes(myDim_elem2D,3)) ! allocate(icepplus(n_size), icepminus(n_size)) @@ -209,9 +215,9 @@ subroutine ice_fct_init(ice, partit, mesh) ! icepplus = 0.0_WP ! icepminus= 0.0_WP -#if defined (__oifs) - m_templ=0.0_WP - dm_temp=0.0_WP +#if defined (__oifs) || defined (__ifsinterface) +! m_templ=0.0_WP +! dm_temp=0.0_WP #endif /* (__oifs) */ ! allocate(dm_ice(n_size), da_ice(n_size), dm_snow(n_size)) ! increments of high @@ -246,7 +252,7 @@ subroutine ice_fct_solve(ice, partit, mesh) call ice_fem_fct(1, ice, partit, mesh) ! m_ice call ice_fem_fct(2, ice, partit, mesh) ! a_ice call ice_fem_fct(3, ice, partit, mesh) ! m_snow -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) call ice_fem_fct(4, ice, partit, mesh) ! ice_temp #endif /* (__oifs) */ @@ -286,6 +292,9 @@ subroutine ice_solve_low_order(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: rhs_a, rhs_m, rhs_ms real(kind=WP), dimension(:), pointer :: a_icel, m_icel, m_snowl ! real(kind=WP), dimension(:), pointer :: mass_matrix +#if defined (__oifs) || defined (__ifsinterface) + real(kind=WP), dimension(:), pointer :: ice_temp, rhs_temp, m_templ +#endif #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -300,7 +309,11 @@ subroutine ice_solve_low_order(ice, partit, mesh) m_icel => ice%data(2)%valuesl(:) m_snowl => ice%data(3)%valuesl(:) ! mass_matrix => ice%work%fct_massmatrix(:) - +#if defined (__oifs) || defined (__ifsinterface) + ice_temp => ice%data(4)%values(:) + rhs_temp => ice%data(4)%values_rhs(:) + m_templ => ice%data(4)%valuesl(:) +#endif !___________________________________________________________________________ gamma=ice_gamma_fct ! Added diffusivity parameter ! Adjust it to ensure posivity of solution @@ -323,7 +336,7 @@ subroutine ice_solve_low_order(ice, partit, mesh) m_snowl(row)=(rhs_ms(row)+gamma*sum(mass_matrix(clo:clo2)* & m_snow(location(1:cn))))/area(1,row) + & (1.0_WP-gamma)*m_snow(row) -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) m_templ(row)=(rhs_temp(row)+gamma*sum(mass_matrix(clo:clo2)* & ice_temp(location(1:cn))))/area(1,row) + & (1.0_WP-gamma)*ice_temp(row) @@ -333,7 +346,7 @@ subroutine ice_solve_low_order(ice, partit, mesh) ! Low-order solution must be known to neighbours call exchange_nod(m_icel,a_icel,m_snowl, partit) -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) call exchange_nod(m_templ, partit) #endif /* (__oifs) */ @@ -365,6 +378,9 @@ subroutine ice_solve_high_order(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: a_icel, m_icel, m_snowl real(kind=WP), dimension(:), pointer :: da_ice, dm_ice, dm_snow ! real(kind=WP), dimension(:), pointer :: mass_matrix +#if defined (__oifs) || defined (__ifsinterface) + real(kind=WP), dimension(:), pointer :: rhs_temp, m_templ, dm_temp +#endif #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -379,7 +395,11 @@ subroutine ice_solve_high_order(ice, partit, mesh) dm_ice => ice%data(2)%dvalues(:) dm_snow => ice%data(3)%dvalues(:) ! mass_matrix => ice%work%fct_massmatrix(:) - +#if defined (__oifs) || defined (__ifsinterface) + rhs_temp => ice%data(4)%values_rhs(:) + m_templ => ice%data(4)%valuesl(:) + dm_temp => ice%data(4)%dvalues(:) +#endif !_____________________________________________________________________________ ! Does Taylor-Galerkin solution ! @@ -392,14 +412,14 @@ subroutine ice_solve_high_order(ice, partit, mesh) dm_ice(row)=rhs_m(row)/area(1,row) da_ice(row)=rhs_a(row)/area(1,row) dm_snow(row)=rhs_ms(row)/area(1,row) -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) dm_temp(row)=rhs_temp(row)/area(1,row) #endif /* (__oifs) */ end do call exchange_nod(dm_ice, da_ice, dm_snow, partit) -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) call exchange_nod(dm_temp, partit) #endif /* (__oifs) */ !iterate @@ -419,7 +439,7 @@ subroutine ice_solve_high_order(ice, partit, mesh) a_icel(row)=da_ice(row)+rhs_new/area(1,row) rhs_new=rhs_ms(row) - sum(mass_matrix(clo:clo2)*dm_snow(location(1:cn))) m_snowl(row)=dm_snow(row)+rhs_new/area(1,row) -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) rhs_new=rhs_temp(row) - sum(mass_matrix(clo:clo2)*dm_temp(location(1:cn))) m_templ(row)=dm_temp(row)+rhs_new/area(1,row) #endif /* (__oifs) */ @@ -432,13 +452,13 @@ subroutine ice_solve_high_order(ice, partit, mesh) dm_ice(row)=m_icel(row) da_ice(row)=a_icel(row) dm_snow(row)=m_snowl(row) -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) dm_temp(row)=m_templ(row) #endif /* (__oifs) */ end do call exchange_nod(dm_ice, da_ice, dm_snow, partit) -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) call exchange_nod(dm_temp, partit) #endif /* (__oifs) */ @@ -479,6 +499,9 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) real(kind=WP), dimension(:) , pointer :: da_ice, dm_ice, dm_snow real(kind=WP), dimension(:) , pointer :: icepplus, icepminus, tmax, tmin real(kind=WP), dimension(:,:), pointer :: icefluxes +#if defined (__oifs) || defined (__ifsinterface) + real(kind=WP), dimension(:), pointer :: ice_temp, m_templ, dm_temp +#endif #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -497,7 +520,11 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) icepminus => ice%work%fct_minus(:) tmax => ice%work%fct_tmax(:) tmin => ice%work%fct_tmin(:) - +#if defined (__oifs) || defined (__ifsinterface) + ice_temp => ice%data(4)%values(:) + m_templ => ice%data(4)%valuesl(:) + dm_temp => ice%data(4)%dvalues(:) +#endif !___________________________________________________________________________ gamma=ice_gamma_fct ! It should coinside with gamma in ! ts_solve_low_order @@ -552,7 +579,7 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) end do end if -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) if (tr_array_id==4) then do q=1,3 icefluxes(elem,q)=-sum(icoef(:,q)*(gamma*ice_temp(elnodes) + & @@ -609,7 +636,7 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) end do end if -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) if (tr_array_id==4) then do row=1, myDim_nod2D if (ulevels_nod2d(row)>1) cycle @@ -755,7 +782,7 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) end do end if -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) if(tr_array_id==4) then do n=1,myDim_nod2D if(ulevels_nod2D(n)>1) cycle !LK89140 @@ -773,11 +800,11 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) end do end do end if -#endif /* (__oifs) */ +#endif /* (__oifs) */ || defined (__ifsinterface) call exchange_nod(m_ice, a_ice, m_snow, partit) -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) call exchange_nod(ice_temp, partit) #endif /* (__oifs) */ @@ -898,6 +925,9 @@ subroutine ice_TG_rhs_div(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:), pointer :: rhs_a, rhs_m, rhs_ms real(kind=WP), dimension(:), pointer :: rhs_adiv, rhs_mdiv, rhs_msdiv +#if defined (__oifs) || defined (__ifsinterface) + real(kind=WP), dimension(:), pointer :: ice_temp, rhs_temp, rhs_tempdiv +#endif #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -913,7 +943,11 @@ subroutine ice_TG_rhs_div(ice, partit, mesh) rhs_adiv => ice%data(1)%values_div_rhs(:) rhs_mdiv => ice%data(2)%values_div_rhs(:) rhs_msdiv => ice%data(3)%values_div_rhs(:) - +#if defined (__oifs) || defined (__ifsinterface) + ice_temp => ice%data(4)%values(:) + rhs_temp => ice%data(4)%values_rhs(:) + rhs_tempdiv => ice%data(4)%values_div_rhs(:) +#endif !___________________________________________________________________________ ! Computes the rhs in a Taylor-Galerkin way (with upwind type of ! correction for the advection operator) @@ -924,13 +958,13 @@ subroutine ice_TG_rhs_div(ice, partit, mesh) rhs_m(row)=0.0_WP rhs_a(row)=0.0_WP rhs_ms(row)=0.0_WP -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) rhs_temp(row)=0.0_WP #endif /* (__oifs) */ rhs_mdiv(row)=0.0_WP rhs_adiv(row)=0.0_WP rhs_msdiv(row)=0.0_WP -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) rhs_tempdiv(row)=0.0_WP #endif /* (__oifs) */ END DO @@ -967,21 +1001,21 @@ subroutine ice_TG_rhs_div(ice, partit, mesh) cx1=vol*ice_dt*c4*(sum(m_ice(elnodes))+m_ice(elnodes(n))+sum(entries2*m_ice(elnodes)))/12.0_WP cx2=vol*ice_dt*c4*(sum(a_ice(elnodes))+a_ice(elnodes(n))+sum(entries2*a_ice(elnodes)))/12.0_WP cx3=vol*ice_dt*c4*(sum(m_snow(elnodes))+m_snow(elnodes(n))+sum(entries2*m_snow(elnodes)))/12.0_WP -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) cx4=vol*ice_dt*c4*(sum(ice_temp(elnodes))+ice_temp(elnodes(n))+sum(entries2*ice_temp(elnodes)))/12.0_WP #endif /* (__oifs) */ rhs_m(row)=rhs_m(row)+sum(entries*m_ice(elnodes))+cx1 rhs_a(row)=rhs_a(row)+sum(entries*a_ice(elnodes))+cx2 rhs_ms(row)=rhs_ms(row)+sum(entries*m_snow(elnodes))+cx3 -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) rhs_temp(row)=rhs_temp(row)+sum(entries*ice_temp(elnodes))+cx4 #endif /* (__oifs) */ rhs_mdiv(row)=rhs_mdiv(row)-cx1 rhs_adiv(row)=rhs_adiv(row)-cx2 rhs_msdiv(row)=rhs_msdiv(row)-cx3 -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) rhs_tempdiv(row)=rhs_tempdiv(row)-cx4 #endif /* (__oifs) */ @@ -1017,6 +1051,9 @@ subroutine ice_update_for_div(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: a_icel, m_icel, m_snowl real(kind=WP), dimension(:), pointer :: da_ice, dm_ice, dm_snow ! real(kind=WP), dimension(:), pointer :: mass_matrix +#if defined (__oifs) || defined (__ifsinterface) + real(kind=WP), dimension(:), pointer :: ice_temp, m_templ, dm_temp, rhs_tempdiv +#endif #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -1034,7 +1071,12 @@ subroutine ice_update_for_div(ice, partit, mesh) dm_ice => ice%data(2)%dvalues(:) dm_snow => ice%data(3)%dvalues(:) ! mass_matrix => ice%work%fct_massmatrix(:) - +#if defined (__oifs) || defined (__ifsinterface) + ice_temp => ice%data(4)%values(:) + m_templ => ice%data(4)%valuesl(:) + dm_temp => ice%data(4)%dvalues(:) + rhs_tempdiv => ice%data(4)%values_div_rhs(:) +#endif !___________________________________________________________________________ ! Does Taylor-Galerkin solution ! @@ -1047,14 +1089,14 @@ subroutine ice_update_for_div(ice, partit, mesh) dm_ice(row) =rhs_mdiv(row) /area(1,row) da_ice(row) =rhs_adiv(row) /area(1,row) dm_snow(row)=rhs_msdiv(row)/area(1,row) -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) dm_temp(row)=rhs_tempdiv(row)/area(1,row) #endif /* (__oifs) */ end do call exchange_nod(dm_ice, partit) call exchange_nod(da_ice, partit) call exchange_nod(dm_snow, partit) -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) call exchange_nod(dm_temp, partit) #endif /* (__oifs) */ @@ -1075,7 +1117,7 @@ subroutine ice_update_for_div(ice, partit, mesh) a_icel(row)=da_ice(row)+rhs_new/area(1,row) rhs_new=rhs_msdiv(row) - sum(mass_matrix(clo:clo2)*dm_snow(location(1:cn))) m_snowl(row)=dm_snow(row)+rhs_new/area(1,row) -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) rhs_new=rhs_tempdiv(row) - sum(mass_matrix(clo:clo2)*dm_temp(location(1:cn))) m_templ(row)=dm_temp(row)+rhs_new/area(1,row) #endif /* (__oifs) */ @@ -1088,21 +1130,21 @@ subroutine ice_update_for_div(ice, partit, mesh) dm_ice(row)=m_icel(row) da_ice(row)=a_icel(row) dm_snow(row)=m_snowl(row) -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) dm_temp(row)=m_templ(row) #endif /* (__oifs) */ end do call exchange_nod(dm_ice, partit) call exchange_nod(da_ice, partit) call exchange_nod(dm_snow, partit) -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) call exchange_nod(dm_temp, partit) #endif /* (__oifs) */ end do m_ice=m_ice+dm_ice a_ice=a_ice+da_ice m_snow=m_snow+dm_snow -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) ice_temp=ice_temp+dm_temp #endif /* (__oifs) */ diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index ea7e182ec..1c87bb436 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -75,12 +75,14 @@ MODULE i_ARRAYS ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: fresh_wa_flux ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: net_heat_flux #if defined (__oasis) || defined (__ifsinterface) - real(kind=WP),target, allocatable, dimension(:) :: ice_alb, ice_temp ! new fields for OIFS coupling +! real(kind=WP),target, allocatable, dimension(:) :: ice_alb, ice_temp ! new fields for OIFS coupling + real(kind=WP),target, allocatable, dimension(:) :: ice_alb ! new fields for OIFS coupling real(kind=WP),target, allocatable, dimension(:) :: oce_heat_flux, ice_heat_flux real(kind=WP),target, allocatable, dimension(:) :: tmp_oce_heat_flux, tmp_ice_heat_flux !temporary flux fields !(for flux correction) - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_temp, m_templ, dm_temp, rhs_tempdiv +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_temp, m_templ, dm_temp, rhs_tempdiv +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: m_templ, dm_temp, rhs_tempdiv #if defined (__oifs) || defined (__ifsinterface) real(kind=WP),target, allocatable, dimension(:) :: enthalpyoffuse #endif diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index 9e9cfa818..2caa05e8d 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -213,12 +213,14 @@ subroutine ice_array_setup(partit, mesh) allocate(oce_heat_flux(n_size), ice_heat_flux(n_size)) allocate(tmp_oce_heat_flux(n_size), tmp_ice_heat_flux(n_size)) #if defined (__oifs) || defined (__ifsinterface) - allocate(ice_alb(n_size), ice_temp(n_size), enthalpyoffuse(n_size)) - allocate(rhs_tempdiv(n_size), rhs_temp(n_size)) +! allocate(ice_alb(n_size), ice_temp(n_size), enthalpyoffuse(n_size)) +allocate(ice_alb(n_size), enthalpyoffuse(n_size)) +! allocate(rhs_tempdiv(n_size), rhs_temp(n_size)) + allocate(rhs_tempdiv(n_size)) ice_alb=0.6_WP - ice_temp=265.15_WP +! ice_temp=265.15_WP rhs_tempdiv=0._WP - rhs_temp=0._WP +! rhs_temp=0._WP enthalpyoffuse=0._WP #endif /* (__oifs) || defined (__ifsinterface) */ oce_heat_flux=0._WP @@ -263,12 +265,19 @@ subroutine ice_timestep(step, ice, partit, mesh) !___________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: u_ice, v_ice +#if defined (__oifs) || defined (__ifsinterface) + real(kind=WP), dimension(:), pointer :: ice_temp, a_ice +#endif #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uice(:) - v_ice => ice%vice(:) + u_ice => ice%uice(:) + v_ice => ice%vice(:) +#if defined (__oifs) || defined (__ifsinterface) + a_ice => ice%data(1)%values(:) + ice_temp => ice%data(4)%values(:) +#endif !___________________________________________________________________________ t0=MPI_Wtime() #if defined (__icepack) @@ -303,7 +312,7 @@ subroutine ice_timestep(step, ice, partit, mesh) ! call ice_fct_solve ! call cut_off ! new FCT routines from Sergey Danilov 08.05.2018 -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) do i=1,myDim_nod2D+eDim_nod2D ice_temp(i) = ice_temp(i)*a_ice(i) end do @@ -317,7 +326,7 @@ subroutine ice_timestep(step, ice, partit, mesh) if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call ice_update_for_div...'//achar(27)//'[0m' call ice_update_for_div(ice, partit, mesh) -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) do i=1,myDim_nod2D+eDim_nod2D if (a_ice(i)>0.0_WP) ice_temp(i) = ice_temp(i)/a_ice(i) end do diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index 6c1eb9e7e..a4a5dd969 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -66,6 +66,9 @@ subroutine thermodynamics(ice, partit, mesh) real(kind=WP), dimension(:) , pointer :: thdgr, thdgrsn real(kind=WP), dimension(:) , pointer :: S_oc_array, T_oc_array, u_w, v_w real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux +#if defined (__oifs) || defined (__ifsinterface) + real(kind=WP), dimension(:) , pointer :: ice_temp +#endif myDim_nod2d=>partit%myDim_nod2D eDim_nod2D =>partit%eDim_nod2D ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D @@ -83,7 +86,9 @@ subroutine thermodynamics(ice, partit, mesh) v_w => ice%srfoce_v(:) fresh_wa_flux => ice%flx_fw(:) net_heat_flux => ice%flx_h(:) - +#if defined (__oifs) || defined (__ifsinterface) + ice_temp => ice%data(4)%values(:) +#endif !_____________________________________________________________________________ rsss = ref_sss @@ -101,7 +106,7 @@ subroutine thermodynamics(ice, partit, mesh) h = m_ice(inod) hsn = m_snow(inod) -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) a2ohf = oce_heat_flux(inod) + shortwave(inod) + enthalpyoffuse(inod) #else a2ohf = oce_heat_flux(inod) + shortwave(inod) @@ -124,7 +129,7 @@ subroutine thermodynamics(ice, partit, mesh) rsf = 0._WP end if -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) !---- different lead closing parameter for NH and SH if (geo_coord_nod2D(2, inod)>0) then h0min = 0.3 @@ -297,7 +302,7 @@ subroutine ice_growth !---- snow melt rate over sea ice (dsnow <= 0) !---- if there is atmospheric melting over sea ice, first melt any !---- snow that is present, but do not melt more snow than available -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) !---- new condition added - surface temperature must be !---- larger than 273K to melt snow if (t.gt.273_WP) then @@ -309,7 +314,7 @@ subroutine ice_growth #else dsnow = A*min(Qatmice-Qicecon,0._WP) dsnow = max(dsnow*rhoice/rhosno,-hsn) -#endif +#endif !---- update snow thickness after atmospheric snow melt hsn = hsn + dsnow diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index 5c20fedee..c29d899a4 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -39,14 +39,20 @@ subroutine cut_off(ice, partit, mesh) !___________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow +#if defined (__oifs) || defined (__ifsinterface) + real(kind=WP), dimension(:), pointer :: ice_temp +#endif /* (__oifs) */ #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - a_ice => ice%data(1)%values(:) - m_ice => ice%data(2)%values(:) - m_snow => ice%data(3)%values(:) - + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) +#if defined (__oifs) || defined (__ifsinterface) + ice_temp => ice%data(4)%values(:) +#endif /* (__oifs) */ + !___________________________________________________________________________ ! lower cutoff: a_ice where(a_ice>1.0_WP) a_ice=1.0_WP @@ -54,7 +60,7 @@ subroutine cut_off(ice, partit, mesh) ! upper cutoff: a_ice where(a_ice<0.1e-8_WP) a_ice=0.0_WP -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) m_ice=0.0_WP m_snow=0.0_WP ice_temp=273.15_WP @@ -65,7 +71,7 @@ subroutine cut_off(ice, partit, mesh) ! lower cutoff: m_ice where(m_ice<0.1e-8_WP) m_ice=0.0_WP -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) m_snow=0.0_WP a_ice=0.0_WP ice_temp=273.15_WP @@ -73,15 +79,16 @@ subroutine cut_off(ice, partit, mesh) end where !___________________________________________________________________________ -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) where(ice_temp>273.15_WP) ice_temp=273.15_WP #endif /* (__oifs) */ -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) where(ice_temp < 173.15_WP .and. a_ice >= 0.1e-8_WP) ice_temp=271.35_WP #endif /* (__oifs) */ end subroutine cut_off + #if !defined (__oasis) && !defined (__ifsinterface) !=================================================================== ! Sea-ice thermodynamics routines diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 6aee933a2..37ba4be2b 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -396,7 +396,7 @@ subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) CASE ('alb ') call def_stream(nod2D, myDim_nod2D, 'alb', 'ice albedo', 'none', ice_alb(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('ist ') - call def_stream(nod2D, myDim_nod2D, 'ist', 'ice surface temperature', 'K', ice_temp(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'ist', 'ice surface temperature', 'K', ice%data(4)%values(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('qsi ') call def_stream(nod2D, myDim_nod2D, 'qsi', 'ice heat flux', 'W/m^2', ice_heat_flux(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('qso ') diff --git a/src/io_restart.F90 b/src/io_restart.F90 index ae0162539..8f4e1185d 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -204,7 +204,7 @@ subroutine ini_ice_io(year, ice, partit, mesh) call def_variable(iid, 'vice', (/nod2D/), 'meridional velocity', 'm', ice%vice(:)); #if defined (__oifs) call def_variable(iid, 'ice_albedo', (/nod2D/), 'ice albedo', '-', ice_alb); - call def_variable(iid, 'ice_temp',(/nod2D/), 'ice surface temperature', 'K', ice_temp); + call def_variable(iid, 'ice_temp',(/nod2D/), 'ice surface temperature', 'K', ice%data(4)%values); #endif /* (__oifs) */ end subroutine ini_ice_io From e04f264a9e4f5951938547ba83a7b5d9513a5c88 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 25 Nov 2021 22:59:11 +0100 Subject: [PATCH 669/909] fix directive --- src/gen_forcing_couple.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index dd386a588..f4bed2242 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -148,7 +148,7 @@ subroutine update_atm_forcing(istep, ice, tracers, partit, mesh) #endif t1=MPI_Wtime() -#ifdef (__oasis) || defined (__ifsinterface) +#if defined (__oasis) || defined (__ifsinterface) if (firstcall) then allocate(exchange(myDim_nod2D+eDim_nod2D), mask(myDim_nod2D+eDim_nod2D)) allocate(a2o_fcorr_stat(nrecv,6)) From af55f82d8d30cb039cd79b254242586363fc7472 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 25 Nov 2021 23:03:39 +0100 Subject: [PATCH 670/909] fix bug for coupling --- src/ice_setup_step.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index 2caa05e8d..ff951948d 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -216,10 +216,10 @@ subroutine ice_array_setup(partit, mesh) ! allocate(ice_alb(n_size), ice_temp(n_size), enthalpyoffuse(n_size)) allocate(ice_alb(n_size), enthalpyoffuse(n_size)) ! allocate(rhs_tempdiv(n_size), rhs_temp(n_size)) - allocate(rhs_tempdiv(n_size)) +! allocate(rhs_tempdiv(n_size)) ice_alb=0.6_WP ! ice_temp=265.15_WP - rhs_tempdiv=0._WP +! rhs_tempdiv=0._WP ! rhs_temp=0._WP enthalpyoffuse=0._WP #endif /* (__oifs) || defined (__ifsinterface) */ From 2242743a406c1091bc5d077e266b44124e94ccf8 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 25 Nov 2021 23:09:55 +0100 Subject: [PATCH 671/909] fix bug for coupling --- src/gen_forcing_couple.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index f4bed2242..9c5cec8ed 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -148,7 +148,7 @@ subroutine update_atm_forcing(istep, ice, tracers, partit, mesh) #endif t1=MPI_Wtime() -#if defined (__oasis) || defined (__ifsinterface) +#if defined (__oasis) if (firstcall) then allocate(exchange(myDim_nod2D+eDim_nod2D), mask(myDim_nod2D+eDim_nod2D)) allocate(a2o_fcorr_stat(nrecv,6)) From 343c3ee47e0d040a9a81394cfa199fa02e3f7fec Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 25 Nov 2021 23:17:02 +0100 Subject: [PATCH 672/909] fix typo in ice_fct: line136 --- src/ice_fct.F90 | 2 +- src/ice_modules.F90 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index 7dfc9bab5..7ce6846e8 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -133,7 +133,7 @@ subroutine ice_TG_rhs(ice, partit, mesh) rhs_a(row)=0._WP rhs_ms(row)=0._WP #if defined (__oifs) || defined (__ifsinterface) - ths_temp(row)=0._WP + rhs_temp(row)=0._WP #endif /* (__oifs) */ END DO diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index 1c87bb436..a6f7d3fa8 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -65,7 +65,7 @@ MODULE i_ARRAYS ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: thdgr_old ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_rhs_ice, V_rhs_ice ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_m, rhs_a, rhs_ms, ths_temp - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: ths_temp +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: ths_temp ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_w, V_w ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: u_ice_aux, v_ice_aux ! of the size of u_ice, v_ice ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_mdiv, rhs_adiv, rhs_msdiv From 47027dea54dc02232ff572260679596017c0ae63 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 25 Nov 2021 23:38:31 +0100 Subject: [PATCH 673/909] exchange oce_heat_flux, ice_heat_flux variables with ice derived type --- src/gen_forcing_couple.F90 | 27 +++++++++++++++++++-------- src/ice_modules.F90 | 2 +- src/ice_setup_step.F90 | 12 ++++++------ src/ice_thermo_cpl.F90 | 7 +++++++ src/ifs_interface/ifs_interface.F90 | 6 +++++- src/io_meandata.F90 | 4 ++-- 6 files changed, 40 insertions(+), 18 deletions(-) diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index 9c5cec8ed..a501f97db 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -130,6 +130,10 @@ subroutine update_atm_forcing(istep, ice, tracers, partit, mesh) ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: u_ice, v_ice, u_w, v_w real(kind=WP), dimension(:), pointer :: stress_atmice_x, stress_atmice_y +#if defined (__oasis) || defined (__ifsinterface) + real(kind=WP), dimension(:), pointer :: oce_heat_flux, ice_heat_flux + real(kind=WP), dimension(:), pointer :: tmp_oce_heat_flux, tmp_ice_heat_flux +#endif #if defined (__oifs) || defined (__ifsinterface) real(kind=WP), dimension(:), pointer :: ice_temp #endif @@ -137,16 +141,23 @@ subroutine update_atm_forcing(istep, ice, tracers, partit, mesh) #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uice(:) - v_ice => ice%vice(:) - u_w => ice%srfoce_u(:) - v_w => ice%srfoce_v(:) - stress_atmice_x => ice%stress_atmice_x(:) - stress_atmice_y => ice%stress_atmice_y(:) + u_ice => ice%uice(:) + v_ice => ice%vice(:) + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) + stress_atmice_x => ice%stress_atmice_x(:) + stress_atmice_y => ice%stress_atmice_y(:) #if defined (__oifs) || defined (__ifsinterface) - ice_temp => ice%data(4)%values(:) + ice_temp => ice%data(4)%values(:) #endif - +#if defined (__oasis) || defined (__ifsinterface) + oce_heat_flux => ice%atmcoupl%oce_flx_h(:) + ice_heat_flux => ice%atmcoupl%ice_flx_h(:) + tmp_oce_heat_flux=> ice%atmcoupl%tmpoce_flx_h(:) + tmp_ice_heat_flux=> ice%atmcoupl%tmpice_flx_h(:) +#endif + + !_____________________________________________________________________________ t1=MPI_Wtime() #if defined (__oasis) if (firstcall) then diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index a6f7d3fa8..487ff654d 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -77,7 +77,7 @@ MODULE i_ARRAYS #if defined (__oasis) || defined (__ifsinterface) ! real(kind=WP),target, allocatable, dimension(:) :: ice_alb, ice_temp ! new fields for OIFS coupling real(kind=WP),target, allocatable, dimension(:) :: ice_alb ! new fields for OIFS coupling - real(kind=WP),target, allocatable, dimension(:) :: oce_heat_flux, ice_heat_flux +! real(kind=WP),target, allocatable, dimension(:) :: oce_heat_flux, ice_heat_flux real(kind=WP),target, allocatable, dimension(:) :: tmp_oce_heat_flux, tmp_ice_heat_flux !temporary flux fields !(for flux correction) diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index ff951948d..598e3efe3 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -210,8 +210,8 @@ subroutine ice_array_setup(partit, mesh) ! stress_iceoce_y = 0.0_WP ! allocate(U_w(n_size), V_w(n_size)) ! =uf and vf of ocean at surface nodes #if defined (__oasis) || defined (__ifsinterface) - allocate(oce_heat_flux(n_size), ice_heat_flux(n_size)) - allocate(tmp_oce_heat_flux(n_size), tmp_ice_heat_flux(n_size)) +! allocate(oce_heat_flux(n_size), ice_heat_flux(n_size)) +! allocate(tmp_oce_heat_flux(n_size), tmp_ice_heat_flux(n_size)) #if defined (__oifs) || defined (__ifsinterface) ! allocate(ice_alb(n_size), ice_temp(n_size), enthalpyoffuse(n_size)) allocate(ice_alb(n_size), enthalpyoffuse(n_size)) @@ -223,10 +223,10 @@ subroutine ice_array_setup(partit, mesh) ! rhs_temp=0._WP enthalpyoffuse=0._WP #endif /* (__oifs) || defined (__ifsinterface) */ - oce_heat_flux=0._WP - ice_heat_flux=0._WP - tmp_oce_heat_flux=0._WP - tmp_ice_heat_flux=0._WP +! oce_heat_flux=0._WP +! ice_heat_flux=0._WP +! tmp_oce_heat_flux=0._WP +! tmp_ice_heat_flux=0._WP #endif /* (__oasis) || defined (__ifsinterface) */ end subroutine ice_array_setup ! diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index a4a5dd969..e3cf68ed0 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -69,6 +69,9 @@ subroutine thermodynamics(ice, partit, mesh) #if defined (__oifs) || defined (__ifsinterface) real(kind=WP), dimension(:) , pointer :: ice_temp #endif +#if defined (__oasis) || defined (__ifsinterface) + real(kind=WP), dimension(:), pointer :: oce_heat_flux, ice_heat_flux +#endif myDim_nod2d=>partit%myDim_nod2D eDim_nod2D =>partit%eDim_nod2D ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D @@ -89,6 +92,10 @@ subroutine thermodynamics(ice, partit, mesh) #if defined (__oifs) || defined (__ifsinterface) ice_temp => ice%data(4)%values(:) #endif +#if defined (__oasis) || defined (__ifsinterface) + oce_heat_flux => ice%atmcoupl%oce_flx_h(:) + ice_heat_flux => ice%atmcoupl%ice_flx_h(:) +#endif !_____________________________________________________________________________ rsss = ref_sss diff --git a/src/ifs_interface/ifs_interface.F90 b/src/ifs_interface/ifs_interface.F90 index b339a8cbb..f63bfe83b 100644 --- a/src/ifs_interface/ifs_interface.F90 +++ b/src/ifs_interface/ifs_interface.F90 @@ -568,7 +568,7 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & USE g_forcing_arrays, only: shortwave, prec_rain, prec_snow, runoff, & & evap_no_ifrac, sublimation !'longwave' only stand-alone, 'evaporation' filled later ! USE i_ARRAYS, only: stress_atmice_x, stress_atmice_y, oce_heat_flux, ice_heat_flux - USE i_ARRAYS, only: oce_heat_flux, ice_heat_flux +! USE i_ARRAYS, only: oce_heat_flux, ice_heat_flux USE o_ARRAYS, only: stress_atmoce_x, stress_atmoce_y USE g_comm_auto ! exchange_nod does the halo exchange @@ -618,11 +618,15 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & ! associate only the necessary things real(kind=wpIFS), dimension(:,:), pointer :: coord_nod2D real(kind=wpIFS), dimension(:) , pointer :: stress_atmice_x, stress_atmice_y + real(kind=wpIFS), dimension(:) , pointer :: oce_heat_flux, ice_heat_flux myDim_nod2D => fesom%partit%myDim_nod2D eDim_nod2D => fesom%partit%eDim_nod2D coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => fesom%mesh%coord_nod2D stress_atmice_x => fesom%ice%stress_atmice_x stress_atmice_y => fesom%ice%stress_atmice_y + oce_heat_flux => fesom%ice%atmcoupl%oce_flx_h(:) + ice_heat_flux => fesom%ice%atmcoupl%ice_flx_h(:) + ! =================================================================== ! ! Sort out incoming arrays from the IFS and put them on the ocean grid diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 37ba4be2b..0f6232be1 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -398,9 +398,9 @@ subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) CASE ('ist ') call def_stream(nod2D, myDim_nod2D, 'ist', 'ice surface temperature', 'K', ice%data(4)%values(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('qsi ') - call def_stream(nod2D, myDim_nod2D, 'qsi', 'ice heat flux', 'W/m^2', ice_heat_flux(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'qsi', 'ice heat flux', 'W/m^2', ice%atmcoupl%ice_flx_h(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('qso ') - call def_stream(nod2D, myDim_nod2D, 'qso', 'oce heat flux', 'W/m^2', oce_heat_flux(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'qso', 'oce heat flux', 'W/m^2', ice%atmcoupl%oce_flx_h(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) #endif !___________________________________________________________________________________________________________________________________ From 73b6addf629ca3fe0a5f48ba7a455a4f73256aa6 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 25 Nov 2021 23:43:02 +0100 Subject: [PATCH 674/909] fix bug --- src/ice_modules.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index 487ff654d..8975a1331 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -78,7 +78,7 @@ MODULE i_ARRAYS ! real(kind=WP),target, allocatable, dimension(:) :: ice_alb, ice_temp ! new fields for OIFS coupling real(kind=WP),target, allocatable, dimension(:) :: ice_alb ! new fields for OIFS coupling ! real(kind=WP),target, allocatable, dimension(:) :: oce_heat_flux, ice_heat_flux - real(kind=WP),target, allocatable, dimension(:) :: tmp_oce_heat_flux, tmp_ice_heat_flux +! real(kind=WP),target, allocatable, dimension(:) :: tmp_oce_heat_flux, tmp_ice_heat_flux !temporary flux fields !(for flux correction) ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_temp, m_templ, dm_temp, rhs_tempdiv From 6d0c17b9a5907390d9bb6229bc8f32be1a7c7322 Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 26 Nov 2021 00:01:13 +0100 Subject: [PATCH 675/909] exchange ice_alb, enthalpyoffuse against ice derived type --- src/MOD_ICE.F90 | 2 +- src/gen_forcing_couple.F90 | 4 +++- src/ice_modules.F90 | 4 ++-- src/ice_setup_step.F90 | 6 +++--- src/ice_thermo_cpl.F90 | 8 +++++--- src/io_meandata.F90 | 4 ++-- src/io_restart.F90 | 4 ++-- 7 files changed, 18 insertions(+), 14 deletions(-) diff --git a/src/MOD_ICE.F90 b/src/MOD_ICE.F90 index ebc86a5c3..6f5e1612d 100644 --- a/src/MOD_ICE.F90 +++ b/src/MOD_ICE.F90 @@ -692,7 +692,7 @@ subroutine ice_init(ice, partit, mesh) #if defined (__oifs) || defined (__ifsinterface) allocate(ice%atmcoupl%ice_alb( node_size)) allocate(ice%atmcoupl%enthalpyoffuse(node_size)) - ice%atmcoupl%ice_alb = 0.0_WP + ice%atmcoupl%ice_alb = 0.6_WP ice%atmcoupl%enthalpyoffuse= 0.0_WP #endif /* (__oifs) */ #endif /* (__oasis) */ diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index a501f97db..dc67f8c2a 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -135,7 +135,7 @@ subroutine update_atm_forcing(istep, ice, tracers, partit, mesh) real(kind=WP), dimension(:), pointer :: tmp_oce_heat_flux, tmp_ice_heat_flux #endif #if defined (__oifs) || defined (__ifsinterface) - real(kind=WP), dimension(:), pointer :: ice_temp + real(kind=WP), dimension(:), pointer :: ice_temp, ice_alb, enthalpyoffuse #endif #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -149,6 +149,8 @@ subroutine update_atm_forcing(istep, ice, tracers, partit, mesh) stress_atmice_y => ice%stress_atmice_y(:) #if defined (__oifs) || defined (__ifsinterface) ice_temp => ice%data(4)%values(:) + ice_alb => ice%atmcoupl%ice_alb(:) + enthalpyoffuse => ice%atmcoupl%enthalpyoffuse(:) #endif #if defined (__oasis) || defined (__ifsinterface) oce_heat_flux => ice%atmcoupl%oce_flx_h(:) diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index 8975a1331..0b6a7dde5 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -76,7 +76,7 @@ MODULE i_ARRAYS ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: net_heat_flux #if defined (__oasis) || defined (__ifsinterface) ! real(kind=WP),target, allocatable, dimension(:) :: ice_alb, ice_temp ! new fields for OIFS coupling - real(kind=WP),target, allocatable, dimension(:) :: ice_alb ! new fields for OIFS coupling +! real(kind=WP),target, allocatable, dimension(:) :: ice_alb ! new fields for OIFS coupling ! real(kind=WP),target, allocatable, dimension(:) :: oce_heat_flux, ice_heat_flux ! real(kind=WP),target, allocatable, dimension(:) :: tmp_oce_heat_flux, tmp_ice_heat_flux !temporary flux fields @@ -84,7 +84,7 @@ MODULE i_ARRAYS ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_temp, m_templ, dm_temp, rhs_tempdiv ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: m_templ, dm_temp, rhs_tempdiv #if defined (__oifs) || defined (__ifsinterface) - real(kind=WP),target, allocatable, dimension(:) :: enthalpyoffuse +! real(kind=WP),target, allocatable, dimension(:) :: enthalpyoffuse #endif #endif /* (__oasis) || defined (__ifsinterface)*/ diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index 598e3efe3..2e6d028dc 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -214,14 +214,14 @@ subroutine ice_array_setup(partit, mesh) ! allocate(tmp_oce_heat_flux(n_size), tmp_ice_heat_flux(n_size)) #if defined (__oifs) || defined (__ifsinterface) ! allocate(ice_alb(n_size), ice_temp(n_size), enthalpyoffuse(n_size)) -allocate(ice_alb(n_size), enthalpyoffuse(n_size)) +! allocate(ice_alb(n_size), enthalpyoffuse(n_size)) ! allocate(rhs_tempdiv(n_size), rhs_temp(n_size)) ! allocate(rhs_tempdiv(n_size)) - ice_alb=0.6_WP +! ice_alb=0.6_WP ! ice_temp=265.15_WP ! rhs_tempdiv=0._WP ! rhs_temp=0._WP - enthalpyoffuse=0._WP +! enthalpyoffuse=0._WP #endif /* (__oifs) || defined (__ifsinterface) */ ! oce_heat_flux=0._WP ! ice_heat_flux=0._WP diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index e3cf68ed0..02ba58934 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -67,7 +67,7 @@ subroutine thermodynamics(ice, partit, mesh) real(kind=WP), dimension(:) , pointer :: S_oc_array, T_oc_array, u_w, v_w real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux #if defined (__oifs) || defined (__ifsinterface) - real(kind=WP), dimension(:) , pointer :: ice_temp + real(kind=WP), dimension(:) , pointer :: ice_temp, ice_alb, enthalpyoffuse #endif #if defined (__oasis) || defined (__ifsinterface) real(kind=WP), dimension(:), pointer :: oce_heat_flux, ice_heat_flux @@ -81,8 +81,8 @@ subroutine thermodynamics(ice, partit, mesh) a_ice => ice%data(1)%values(:) m_ice => ice%data(2)%values(:) m_snow => ice%data(3)%values(:) - thdgr => ice%thermo%thdgr - thdgrsn => ice%thermo%thdgrsn + thdgr => ice%thermo%thdgr(:) + thdgrsn => ice%thermo%thdgrsn(:) T_oc_array => ice%srfoce_temp(:) S_oc_array => ice%srfoce_salt(:) u_w => ice%srfoce_u(:) @@ -91,6 +91,8 @@ subroutine thermodynamics(ice, partit, mesh) net_heat_flux => ice%flx_h(:) #if defined (__oifs) || defined (__ifsinterface) ice_temp => ice%data(4)%values(:) + ice_alb => ice%atmcoupl%ice_alb(:) + enthalpyoffuse=> ice%atmcoupl%enthalpyoffuse(:) #endif #if defined (__oasis) || defined (__ifsinterface) oce_heat_flux => ice%atmcoupl%oce_flx_h(:) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 0f6232be1..123514aee 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -394,13 +394,13 @@ subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) #if defined (__oifs) CASE ('alb ') - call def_stream(nod2D, myDim_nod2D, 'alb', 'ice albedo', 'none', ice_alb(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'alb', 'ice albedo', 'none', ice%atmcoupl%ice_alb(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('ist ') call def_stream(nod2D, myDim_nod2D, 'ist', 'ice surface temperature', 'K', ice%data(4)%values(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('qsi ') call def_stream(nod2D, myDim_nod2D, 'qsi', 'ice heat flux', 'W/m^2', ice%atmcoupl%ice_flx_h(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('qso ') - call def_stream(nod2D, myDim_nod2D, 'qso', 'oce heat flux', 'W/m^2', ice%atmcoupl%oce_flx_h(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'qso', 'oce heat flux', 'W/m^2', ice%atmcoupl%oce_flx_h(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) #endif !___________________________________________________________________________________________________________________________________ diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 8f4e1185d..98e1da355 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -203,8 +203,8 @@ subroutine ini_ice_io(year, ice, partit, mesh) call def_variable(iid, 'uice', (/nod2D/), 'zonal velocity', 'm/s', ice%uice(:)); call def_variable(iid, 'vice', (/nod2D/), 'meridional velocity', 'm', ice%vice(:)); #if defined (__oifs) - call def_variable(iid, 'ice_albedo', (/nod2D/), 'ice albedo', '-', ice_alb); - call def_variable(iid, 'ice_temp',(/nod2D/), 'ice surface temperature', 'K', ice%data(4)%values); + call def_variable(iid, 'ice_albedo', (/nod2D/), 'ice albedo', '-', ice%atmcoupl%ice_alb); + call def_variable(iid, 'ice_temp', (/nod2D/), 'ice surface temperature', 'K', ice%data(4)%values); #endif /* (__oifs) */ end subroutine ini_ice_io From d2a095d8bebcabf733ae887de1fb7a71a9d95cf9 Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 26 Nov 2021 00:07:48 +0100 Subject: [PATCH 676/909] kick out subroutine ice_array_setup and subroutine ice_fct_init --- src/ice_fct.F90 | 108 ++++++++--------- src/ice_setup_step.F90 | 264 +++++++++++++++++++++-------------------- 2 files changed, 187 insertions(+), 185 deletions(-) diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index 7ce6846e8..27b44cd1a 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -175,60 +175,60 @@ subroutine ice_TG_rhs(ice, partit, mesh) END DO end do end subroutine ice_TG_rhs -! -!---------------------------------------------------------------------------- -! -subroutine ice_fct_init(ice, partit, mesh) - USE MOD_ICE - USE MOD_PARTIT - USE MOD_PARSUP - use MOD_MESH - use o_PARAM - use i_ARRAYS - use ice_fct_interfaces - implicit none - integer :: n_size - type(t_ice), intent(inout), target :: ice - type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - !_____________________________________________________________________________ - ! pointer on necessary derived types -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" - - n_size=myDim_nod2D+eDim_nod2D - - ! Initialization of arrays necessary to implement FCT algorithm -! allocate(m_icel(n_size), a_icel(n_size), m_snowl(n_size)) ! low-order solutions -! m_icel=0.0_WP -! a_icel=0.0_WP -! m_snowl=0.0_WP -#if defined (__oifs) || defined (__ifsinterface) -! allocate(m_templ(n_size)) -! allocate(dm_temp(n_size)) -#endif /* (__oifs) */ -! allocate(icefluxes(myDim_elem2D,3)) -! allocate(icepplus(n_size), icepminus(n_size)) -! icefluxes = 0.0_WP -! icepplus = 0.0_WP -! icepminus= 0.0_WP - -#if defined (__oifs) || defined (__ifsinterface) -! m_templ=0.0_WP -! dm_temp=0.0_WP -#endif /* (__oifs) */ - -! allocate(dm_ice(n_size), da_ice(n_size), dm_snow(n_size)) ! increments of high -! dm_ice = 0.0_WP ! order solutions -! da_ice = 0.0_WP -! dm_snow = 0.0_WP - - ! Fill in the mass matrix - call ice_mass_matrix_fill(ice, partit, mesh) - if (mype==0) write(*,*) 'Ice FCT is initialized' -end subroutine ice_fct_init +! ! +! !---------------------------------------------------------------------------- +! ! +! subroutine ice_fct_init(ice, partit, mesh) +! USE MOD_ICE +! USE MOD_PARTIT +! USE MOD_PARSUP +! use MOD_MESH +! use o_PARAM +! use i_ARRAYS +! use ice_fct_interfaces +! implicit none +! integer :: n_size +! type(t_ice), intent(inout), target :: ice +! type(t_partit), intent(inout), target :: partit +! type(t_mesh), intent(in), target :: mesh +! !_____________________________________________________________________________ +! ! pointer on necessary derived types +! #include "associate_part_def.h" +! #include "associate_mesh_def.h" +! #include "associate_part_ass.h" +! #include "associate_mesh_ass.h" +! +! n_size=myDim_nod2D+eDim_nod2D +! +! ! Initialization of arrays necessary to implement FCT algorithm +! ! allocate(m_icel(n_size), a_icel(n_size), m_snowl(n_size)) ! low-order solutions +! ! m_icel=0.0_WP +! ! a_icel=0.0_WP +! ! m_snowl=0.0_WP +! #if defined (__oifs) || defined (__ifsinterface) +! ! allocate(m_templ(n_size)) +! ! allocate(dm_temp(n_size)) +! #endif /* (__oifs) */ +! ! allocate(icefluxes(myDim_elem2D,3)) +! ! allocate(icepplus(n_size), icepminus(n_size)) +! ! icefluxes = 0.0_WP +! ! icepplus = 0.0_WP +! ! icepminus= 0.0_WP +! +! #if defined (__oifs) || defined (__ifsinterface) +! ! m_templ=0.0_WP +! ! dm_temp=0.0_WP +! #endif /* (__oifs) */ +! +! ! allocate(dm_ice(n_size), da_ice(n_size), dm_snow(n_size)) ! increments of high +! ! dm_ice = 0.0_WP ! order solutions +! ! da_ice = 0.0_WP +! ! dm_snow = 0.0_WP +! +! ! Fill in the mass matrix +! call ice_mass_matrix_fill(ice, partit, mesh) +! if (mype==0) write(*,*) 'Ice FCT is initialized' +! end subroutine ice_fct_init ! !---------------------------------------------------------------------------- ! diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index 2e6d028dc..d43ae1130 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -75,6 +75,7 @@ subroutine ice_setup(ice, tracers, partit, mesh) use MOD_ICE use ice_array_setup_interface use ice_initial_state_interface + use ice_fct_interfaces implicit none type(t_ice), intent(inout), target :: ice type(t_mesh), intent(in), target :: mesh @@ -86,150 +87,151 @@ subroutine ice_setup(ice, tracers, partit, mesh) if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call ice_init'//achar(27)//'[0m' call ice_init(ice, partit, mesh) - ! ================ DO not change + !___________________________________________________________________________ + ! DO not change ice_dt=real(ice_ave_steps,WP)*dt ! ice_dt=dt Tevp_inv=3.0_WP/ice_dt Clim_evp=Clim_evp*(evp_rheol_steps/ice_dt)**2/Tevp_inv ! This is combination - ! it always enters - ! ================ - if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call ice_array_setup'//achar(27)//'[0m' - call ice_array_setup(partit, mesh) - + ! it always ent + + !___________________________________________________________________________ if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call ice_fct_init'//achar(27)//'[0m' - call ice_fct_init(ice, partit, mesh) - ! ================ + call ice_mass_matrix_fill(ice, partit, mesh) +! call ice_fct_init(ice, partit, mesh) + + !___________________________________________________________________________ ! Initialization routine, user input is required - ! ================ !call ice_init_fields_test if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call ice_initial_state'//achar(27)//'[0m' call ice_initial_state(ice, tracers, partit, mesh) ! Use it unless running test example + if(partit%mype==0) write(*,*) 'Ice is initialized' end subroutine ice_setup ! -! -!_______________________________________________________________________________ -subroutine ice_array_setup(partit, mesh) -! -! inializing sea ice model -! -! Variables that serve for exchange with atmosphere are nodal, to keep -! back compatibility with FESOM input routines - -use o_param -use i_param -use MOD_MESH -USE MOD_PARTIT -USE MOD_PARSUP -use i_arrays -USE g_CONFIG - -implicit none -type(t_partit), intent(inout), target :: partit -type(t_mesh), intent(in), target :: mesh -integer :: n_size, e_size, mn, k, n, n1, n2 - -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" - -n_size=myDim_nod2D+eDim_nod2D -e_size=myDim_elem2D+eDim_elem2D - -! Allocate memory for variables of ice model -! allocate(u_ice(n_size), v_ice(n_size)) -! allocate(U_rhs_ice(n_size), V_rhs_ice(n_size)) -! allocate(sigma11(e_size), sigma12(e_size), sigma22(e_size)) -! allocate(eps11(e_size), eps12(e_size), eps22(e_size)) -! allocate(m_ice(n_size), a_ice(n_size), m_snow(n_size)) -! allocate(rhs_m(n_size), rhs_a(n_size), rhs_ms(n_size)) -! allocate(t_skin(n_size)) -! allocate(U_ice_old(n_size), V_ice_old(n_size)) !PS -! allocate(m_ice_old(n_size), a_ice_old(n_size), m_snow_old(n_size), thdgr_old(n_size)) !PS -! allocate(thdgr_old(n_size)) !PS -! if (whichEVP > 0) then -! ! allocate(u_ice_aux(n_size), v_ice_aux(n_size)) -! allocate(alpha_evp_array(myDim_elem2D)) -! allocate(beta_evp_array(n_size)) +! ! +! !_______________________________________________________________________________ +! subroutine ice_array_setup(partit, mesh) +! ! +! ! inializing sea ice model +! ! +! ! Variables that serve for exchange with atmosphere are nodal, to keep +! ! back compatibility with FESOM input routines +! +! use o_param +! use i_param +! use MOD_MESH +! USE MOD_PARTIT +! USE MOD_PARSUP +! use i_arrays +! USE g_CONFIG ! -! alpha_evp_array=alpha_evp -! beta_evp_array =alpha_evp ! alpha=beta works most reliable -! ! u_ice_aux=0.0_WP -! ! v_ice_aux=0.0_WP -! end if - -! allocate(rhs_mdiv(n_size), rhs_adiv(n_size), rhs_msdiv(n_size)) - -! m_ice_old=0.0_WP !PS -! a_ice_old=0.0_WP !PS -! m_snow_old=0.0_WP !PS -! thdgr_old=0.0_WP !PS -! U_ice_old=0.0_WP !PS -! V_ice_old=0.0_WP !PS - -! rhs_m=0.0_WP -! rhs_ms=0.0_WP -! rhs_a=0.0_WP -! m_ice=0.0_WP -! a_ice=0.0_WP -! m_snow=0.0_WP -! U_rhs_ice=0.0_WP -! V_rhs_ice=0.0_WP -! U_ice=0.0_WP -! V_ice=0.0_WP -! sigma11=0.0_WP -! sigma22=0.0_WP -! sigma12=0.0_WP -! eps11=0.0_WP -! eps12=0.0_WP -! eps22=0.0_WP -! t_skin=0.0_WP -! rhs_mdiv=0.0_WP -! rhs_adiv=0.0_WP -! rhs_msdiv=0.0_WP - - -! Allocate memory for arrays used in coupling -! with ocean and atmosphere -! allocate(S_oc_array(n_size), T_oc_array(n_size)) ! copies of ocean T ans S -! S_oc_array = 0.0_WP -! T_oc_array = 0.0_WP -! allocate(fresh_wa_flux(n_size), net_heat_flux(n_size)) -! allocate(fresh_wa_flux(n_size)) -! allocate(net_heat_flux(n_size)) -! fresh_wa_flux = 0.0_WP -! net_heat_flux = 0.0_WP -! allocate(stress_atmice_x(n_size), stress_atmice_y(n_size)) -! stress_atmice_x = 0.0_WP -! stress_atmice_y = 0.0_WP -! ! allocate(elevation(n_size)) ! =ssh of ocean -! ! elevation = 0.0_WP -! allocate(stress_iceoce_x(n_size), stress_iceoce_y(n_size)) -! stress_iceoce_x = 0.0_WP -! stress_iceoce_y = 0.0_WP -! allocate(U_w(n_size), V_w(n_size)) ! =uf and vf of ocean at surface nodes -#if defined (__oasis) || defined (__ifsinterface) -! allocate(oce_heat_flux(n_size), ice_heat_flux(n_size)) -! allocate(tmp_oce_heat_flux(n_size), tmp_ice_heat_flux(n_size)) -#if defined (__oifs) || defined (__ifsinterface) -! allocate(ice_alb(n_size), ice_temp(n_size), enthalpyoffuse(n_size)) -! allocate(ice_alb(n_size), enthalpyoffuse(n_size)) -! allocate(rhs_tempdiv(n_size), rhs_temp(n_size)) -! allocate(rhs_tempdiv(n_size)) -! ice_alb=0.6_WP -! ice_temp=265.15_WP -! rhs_tempdiv=0._WP -! rhs_temp=0._WP -! enthalpyoffuse=0._WP -#endif /* (__oifs) || defined (__ifsinterface) */ -! oce_heat_flux=0._WP -! ice_heat_flux=0._WP -! tmp_oce_heat_flux=0._WP -! tmp_ice_heat_flux=0._WP -#endif /* (__oasis) || defined (__ifsinterface) */ -end subroutine ice_array_setup -! +! implicit none +! type(t_partit), intent(inout), target :: partit +! type(t_mesh), intent(in), target :: mesh +! integer :: n_size, e_size, mn, k, n, n1, n2 +! +! #include "associate_part_def.h" +! #include "associate_mesh_def.h" +! #include "associate_part_ass.h" +! #include "associate_mesh_ass.h" +! +! n_size=myDim_nod2D+eDim_nod2D +! e_size=myDim_elem2D+eDim_elem2D +! +! ! Allocate memory for variables of ice model +! ! allocate(u_ice(n_size), v_ice(n_size)) +! ! allocate(U_rhs_ice(n_size), V_rhs_ice(n_size)) +! ! allocate(sigma11(e_size), sigma12(e_size), sigma22(e_size)) +! ! allocate(eps11(e_size), eps12(e_size), eps22(e_size)) +! ! allocate(m_ice(n_size), a_ice(n_size), m_snow(n_size)) +! ! allocate(rhs_m(n_size), rhs_a(n_size), rhs_ms(n_size)) +! ! allocate(t_skin(n_size)) +! ! allocate(U_ice_old(n_size), V_ice_old(n_size)) !PS +! ! allocate(m_ice_old(n_size), a_ice_old(n_size), m_snow_old(n_size), thdgr_old(n_size)) !PS +! ! allocate(thdgr_old(n_size)) !PS +! ! if (whichEVP > 0) then +! ! ! allocate(u_ice_aux(n_size), v_ice_aux(n_size)) +! ! allocate(alpha_evp_array(myDim_elem2D)) +! ! allocate(beta_evp_array(n_size)) +! ! +! ! alpha_evp_array=alpha_evp +! ! beta_evp_array =alpha_evp ! alpha=beta works most reliable +! ! ! u_ice_aux=0.0_WP +! ! ! v_ice_aux=0.0_WP +! ! end if +! +! ! allocate(rhs_mdiv(n_size), rhs_adiv(n_size), rhs_msdiv(n_size)) +! +! ! m_ice_old=0.0_WP !PS +! ! a_ice_old=0.0_WP !PS +! ! m_snow_old=0.0_WP !PS +! ! thdgr_old=0.0_WP !PS +! ! U_ice_old=0.0_WP !PS +! ! V_ice_old=0.0_WP !PS +! +! ! rhs_m=0.0_WP +! ! rhs_ms=0.0_WP +! ! rhs_a=0.0_WP +! ! m_ice=0.0_WP +! ! a_ice=0.0_WP +! ! m_snow=0.0_WP +! ! U_rhs_ice=0.0_WP +! ! V_rhs_ice=0.0_WP +! ! U_ice=0.0_WP +! ! V_ice=0.0_WP +! ! sigma11=0.0_WP +! ! sigma22=0.0_WP +! ! sigma12=0.0_WP +! ! eps11=0.0_WP +! ! eps12=0.0_WP +! ! eps22=0.0_WP +! ! t_skin=0.0_WP +! ! rhs_mdiv=0.0_WP +! ! rhs_adiv=0.0_WP +! ! rhs_msdiv=0.0_WP +! +! +! ! Allocate memory for arrays used in coupling +! ! with ocean and atmosphere +! ! allocate(S_oc_array(n_size), T_oc_array(n_size)) ! copies of ocean T ans S +! ! S_oc_array = 0.0_WP +! ! T_oc_array = 0.0_WP +! ! allocate(fresh_wa_flux(n_size), net_heat_flux(n_size)) +! ! allocate(fresh_wa_flux(n_size)) +! ! allocate(net_heat_flux(n_size)) +! ! fresh_wa_flux = 0.0_WP +! ! net_heat_flux = 0.0_WP +! ! allocate(stress_atmice_x(n_size), stress_atmice_y(n_size)) +! ! stress_atmice_x = 0.0_WP +! ! stress_atmice_y = 0.0_WP +! ! ! allocate(elevation(n_size)) ! =ssh of ocean +! ! ! elevation = 0.0_WP +! ! allocate(stress_iceoce_x(n_size), stress_iceoce_y(n_size)) +! ! stress_iceoce_x = 0.0_WP +! ! stress_iceoce_y = 0.0_WP +! ! allocate(U_w(n_size), V_w(n_size)) ! =uf and vf of ocean at surface nodes +! #if defined (__oasis) || defined (__ifsinterface) +! ! allocate(oce_heat_flux(n_size), ice_heat_flux(n_size)) +! ! allocate(tmp_oce_heat_flux(n_size), tmp_ice_heat_flux(n_size)) +! #if defined (__oifs) || defined (__ifsinterface) +! ! allocate(ice_alb(n_size), ice_temp(n_size), enthalpyoffuse(n_size)) +! ! allocate(ice_alb(n_size), enthalpyoffuse(n_size)) +! ! allocate(rhs_tempdiv(n_size), rhs_temp(n_size)) +! ! allocate(rhs_tempdiv(n_size)) +! ! ice_alb=0.6_WP +! ! ice_temp=265.15_WP +! ! rhs_tempdiv=0._WP +! ! rhs_temp=0._WP +! ! enthalpyoffuse=0._WP +! #endif /* (__oifs) || defined (__ifsinterface) */ +! ! oce_heat_flux=0._WP +! ! ice_heat_flux=0._WP +! ! tmp_oce_heat_flux=0._WP +! ! tmp_ice_heat_flux=0._WP +! #endif /* (__oasis) || defined (__ifsinterface) */ +! end subroutine ice_array_setup +! ! ! ! !_______________________________________________________________________________ From c858f49ebda1d0ab59ace692de7dd5de4ccc2388 Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 26 Nov 2021 00:19:59 +0100 Subject: [PATCH 677/909] try again to put ice derived mass matrix --- src/ice_fct.F90 | 26 +++++++++++++------------- src/ice_modules.F90 | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index 27b44cd1a..09741426b 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -291,7 +291,7 @@ subroutine ice_solve_low_order(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:), pointer :: rhs_a, rhs_m, rhs_ms real(kind=WP), dimension(:), pointer :: a_icel, m_icel, m_snowl -! real(kind=WP), dimension(:), pointer :: mass_matrix + real(kind=WP), dimension(:), pointer :: mass_matrix #if defined (__oifs) || defined (__ifsinterface) real(kind=WP), dimension(:), pointer :: ice_temp, rhs_temp, m_templ #endif @@ -308,11 +308,11 @@ subroutine ice_solve_low_order(ice, partit, mesh) a_icel => ice%data(1)%valuesl(:) m_icel => ice%data(2)%valuesl(:) m_snowl => ice%data(3)%valuesl(:) -! mass_matrix => ice%work%fct_massmatrix(:) + mass_matrix => ice%work%fct_massmatrix(:) #if defined (__oifs) || defined (__ifsinterface) - ice_temp => ice%data(4)%values(:) - rhs_temp => ice%data(4)%values_rhs(:) - m_templ => ice%data(4)%valuesl(:) + ice_temp => ice%data(4)%values(:) + rhs_temp => ice%data(4)%values_rhs(:) + m_templ => ice%data(4)%valuesl(:) #endif !___________________________________________________________________________ gamma=ice_gamma_fct ! Added diffusivity parameter @@ -377,7 +377,7 @@ subroutine ice_solve_high_order(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: rhs_a, rhs_m, rhs_ms real(kind=WP), dimension(:), pointer :: a_icel, m_icel, m_snowl real(kind=WP), dimension(:), pointer :: da_ice, dm_ice, dm_snow -! real(kind=WP), dimension(:), pointer :: mass_matrix + real(kind=WP), dimension(:), pointer :: mass_matrix #if defined (__oifs) || defined (__ifsinterface) real(kind=WP), dimension(:), pointer :: rhs_temp, m_templ, dm_temp #endif @@ -394,7 +394,7 @@ subroutine ice_solve_high_order(ice, partit, mesh) da_ice => ice%data(1)%dvalues(:) dm_ice => ice%data(2)%dvalues(:) dm_snow => ice%data(3)%dvalues(:) -! mass_matrix => ice%work%fct_massmatrix(:) + mass_matrix => ice%work%fct_massmatrix(:) #if defined (__oifs) || defined (__ifsinterface) rhs_temp => ice%data(4)%values_rhs(:) m_templ => ice%data(4)%valuesl(:) @@ -835,16 +835,16 @@ SUBROUTINE ice_mass_matrix_fill(ice, partit, mesh) type(t_mesh) , intent(in) , target :: mesh !_____________________________________________________________________________ ! pointer on necessary derived types -! real(kind=WP), dimension(:), pointer :: mass_matrix + real(kind=WP), dimension(:), pointer :: mass_matrix #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" -! mass_matrix => ice%work%fct_massmatrix + mass_matrix => ice%work%fct_massmatrix(:) ! ! a) - allocate(mass_matrix(sum(nn_num(1:myDim_nod2D)))) - mass_matrix =0.0_WP +! allocate(mass_matrix(sum(nn_num(1:myDim_nod2D)))) +! mass_matrix =0.0_WP allocate(col_pos(myDim_nod2D+eDim_nod2D)) DO elem=1,myDim_elem2D @@ -1050,7 +1050,7 @@ subroutine ice_update_for_div(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: rhs_adiv, rhs_mdiv, rhs_msdiv real(kind=WP), dimension(:), pointer :: a_icel, m_icel, m_snowl real(kind=WP), dimension(:), pointer :: da_ice, dm_ice, dm_snow -! real(kind=WP), dimension(:), pointer :: mass_matrix + real(kind=WP), dimension(:), pointer :: mass_matrix #if defined (__oifs) || defined (__ifsinterface) real(kind=WP), dimension(:), pointer :: ice_temp, m_templ, dm_temp, rhs_tempdiv #endif @@ -1070,7 +1070,7 @@ subroutine ice_update_for_div(ice, partit, mesh) da_ice => ice%data(1)%dvalues(:) dm_ice => ice%data(2)%dvalues(:) dm_snow => ice%data(3)%dvalues(:) -! mass_matrix => ice%work%fct_massmatrix(:) + mass_matrix => ice%work%fct_massmatrix(:) #if defined (__oifs) || defined (__ifsinterface) ice_temp => ice%data(4)%values(:) m_templ => ice%data(4)%valuesl(:) diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index 0b6a7dde5..ee87ad184 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -99,7 +99,7 @@ MODULE i_ARRAYS ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: dm_ice, da_ice, dm_snow ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:,:) :: icefluxes ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: icepplus, icepminus - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: mass_matrix +! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: mass_matrix ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: alpha_evp_array(:) ! of myDim_elem2D ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: beta_evp_array(:) ! of myDim_node2D+eDim_node2D From 83203604a53f692d9a9ee56a839ef2ef9caf8475 Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 26 Nov 2021 12:28:44 +0100 Subject: [PATCH 678/909] exchange paramters ice_dt, Tevp_inv with derived type --- src/ice_EVP.F90 | 22 +++++++++++----------- src/ice_fct.F90 | 18 +++++++++--------- src/ice_maEVP.F90 | 6 +++--- src/ice_modules.F90 | 4 ++-- src/ice_setup_step.F90 | 6 +++--- src/ice_thermo_oce.F90 | 2 +- 6 files changed, 29 insertions(+), 29 deletions(-) diff --git a/src/ice_EVP.F90 b/src/ice_EVP.F90 index b46cfb84e..10b3aaa8f 100755 --- a/src/ice_EVP.F90 +++ b/src/ice_EVP.F90 @@ -88,9 +88,9 @@ subroutine stress_tensor(ice_strength, ice, partit, mesh) !___________________________________________________________________________ vale = 1.0_WP/(ellipse**2) - dte = ice_dt/(1.0_WP*evp_rheol_steps) - det1 = 1.0_WP/(1.0_WP + 0.5_WP*Tevp_inv*dte) - det2 = 1.0_WP/(1.0_WP + 0.5_WP*Tevp_inv*dte) !*ellipse**2 + dte = ice%ice_dt/(1.0_WP*evp_rheol_steps) + det1 = 1.0_WP/(1.0_WP + 0.5_WP*ice%Tevp_inv*dte) + det2 = 1.0_WP/(1.0_WP + 0.5_WP*ice%Tevp_inv*dte) !*ellipse**2 do el=1,myDim_elem2D !_______________________________________________________________________ @@ -144,9 +144,9 @@ subroutine stress_tensor(ice_strength, ice, partit, mesh) !zeta=Clim_evp*voltriangle(el) !end if - zeta = zeta*Tevp_inv + zeta = zeta*ice%Tevp_inv - r1 = zeta*(eps11(el)+eps22(el)) - ice_strength(el)*Tevp_inv + r1 = zeta*(eps11(el)+eps22(el)) - ice_strength(el)*ice%Tevp_inv r2 = zeta*(eps11(el)-eps22(el))*vale r3 = zeta*eps12(el)*vale @@ -193,9 +193,9 @@ end subroutine stress_tensor ! ! vale = 1.0_WP/(ellipse**2) ! -! dte = ice_dt/(1.0_WP*evp_rheol_steps) -! det1 = 1.0_WP/(1.0_WP + 0.5_WP*Tevp_inv*dte) -! det2 = 1.0_WP/(1.0_WP + 0.5_WP*Tevp_inv*dte) !*ellipse**2 +! dte = ice%ice_dt/(1.0_WP*evp_rheol_steps) +! det1 = 1.0_WP/(1.0_WP + 0.5_WP*ice%Tevp_inv*dte) +! det2 = 1.0_WP/(1.0_WP + 0.5_WP*ice%Tevp_inv*dte) !*ellipse**2 ! ! ! do el=1,myDim_elem2D @@ -254,9 +254,9 @@ end subroutine stress_tensor ! !zeta=Clim_evp*voltriangle(el) ! !end if ! -! zeta = zeta*Tevp_inv +! zeta = zeta*ice%Tevp_inv ! -! r1 = zeta*(eps11(el)+eps22(el)) - ice_strength(el)*Tevp_inv +! r1 = zeta*(eps11(el)+eps22(el)) - ice_strength(el)*ice%Tevp_inv ! r2 = zeta*(eps11(el)-eps22(el))*vale ! r3 = zeta*eps12(el)*vale ! @@ -526,7 +526,7 @@ subroutine EVPdynamics(ice, partit, mesh) vsno_out=m_snow) #endif - rdt=ice_dt/(1.0*evp_rheol_steps) + rdt=ice%ice_dt/(1.0*evp_rheol_steps) ax=cos(theta_io) ay=sin(theta_io) diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index 09741426b..99fc8aab9 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -161,10 +161,10 @@ subroutine ice_TG_rhs(ice, partit, mesh) !entries(q)= vol*dt*((dx(n)*um+dy(n)*vm)/3.0_WP - & ! diff*(dx(n)*dx(q)+ dy(n)*dy(q))- & ! 0.5*dt*(um*dx(n)+vm*dy(n))*(um*dx(q)+vm*dy(q))) - entries(q)= vol*ice_dt*((dx(n)*(um+u_ice(elnodes(q)))+ & + entries(q)= vol*ice%ice_dt*((dx(n)*(um+u_ice(elnodes(q)))+ & dy(n)*(vm+v_ice(elnodes(q))))/12.0_WP - & diff*(dx(n)*dx(q)+ dy(n)*dy(q))- & - 0.5_WP*ice_dt*(um*dx(n)+vm*dy(n))*(um*dx(q)+vm*dy(q))/9.0_WP) + 0.5_WP*ice%ice_dt*(um*dx(n)+vm*dy(n))*(um*dx(q)+vm*dy(q))/9.0_WP) END DO rhs_m(row)=rhs_m(row)+sum(entries*m_ice(elnodes)) rhs_a(row)=rhs_a(row)+sum(entries*a_ice(elnodes)) @@ -990,19 +990,19 @@ subroutine ice_TG_rhs_div(ice, partit, mesh) row=elnodes(n) !!PS if(ulevels_nod2D(row)>1) cycle !LK89140 DO q = 1,3 - entries(q)= vol*ice_dt*((1.0_WP-0.5_WP*ice_dt*c4)*(dx(n)*(um+u_ice(elnodes(q)))+ & + entries(q)= vol*ice%ice_dt*((1.0_WP-0.5_WP*ice%ice_dt*c4)*(dx(n)*(um+u_ice(elnodes(q)))+ & dy(n)*(vm+v_ice(elnodes(q))))/12.0_WP - & - 0.5_WP*ice_dt*(c1*dx(n)*dx(q)+c2*dy(n)*dy(q)+c3*(dx(n)*dy(q)+dx(q)*dy(n)))) + 0.5_WP*ice%ice_dt*(c1*dx(n)*dx(q)+c2*dy(n)*dy(q)+c3*(dx(n)*dy(q)+dx(q)*dy(n)))) !um*dx(n)+vm*dy(n))*(um*dx(q)+vm*dy(q))/9.0) - entries2(q)=0.5_WP*ice_dt*(dx(n)*(um+u_ice(elnodes(q)))+ & + entries2(q)=0.5_WP*ice%ice_dt*(dx(n)*(um+u_ice(elnodes(q)))+ & dy(n)*(vm+v_ice(elnodes(q)))-dx(q)*(um+u_ice(row))- & dy(q)*(vm+v_ice(row))) END DO - cx1=vol*ice_dt*c4*(sum(m_ice(elnodes))+m_ice(elnodes(n))+sum(entries2*m_ice(elnodes)))/12.0_WP - cx2=vol*ice_dt*c4*(sum(a_ice(elnodes))+a_ice(elnodes(n))+sum(entries2*a_ice(elnodes)))/12.0_WP - cx3=vol*ice_dt*c4*(sum(m_snow(elnodes))+m_snow(elnodes(n))+sum(entries2*m_snow(elnodes)))/12.0_WP + cx1=vol*ice%ice_dt*c4*(sum(m_ice(elnodes))+m_ice(elnodes(n))+sum(entries2*m_ice(elnodes)))/12.0_WP + cx2=vol*ice%ice_dt*c4*(sum(a_ice(elnodes))+a_ice(elnodes(n))+sum(entries2*a_ice(elnodes)))/12.0_WP + cx3=vol*ice%ice_dt*c4*(sum(m_snow(elnodes))+m_snow(elnodes(n))+sum(entries2*m_snow(elnodes)))/12.0_WP #if defined (__oifs) || defined (__ifsinterface) - cx4=vol*ice_dt*c4*(sum(ice_temp(elnodes))+ice_temp(elnodes(n))+sum(entries2*ice_temp(elnodes)))/12.0_WP + cx4=vol*ice%ice_dt*c4*(sum(ice_temp(elnodes))+ice_temp(elnodes(n))+sum(entries2*ice_temp(elnodes)))/12.0_WP #endif /* (__oifs) */ rhs_m(row)=rhs_m(row)+sum(entries*m_ice(elnodes))+cx1 diff --git a/src/ice_maEVP.F90 b/src/ice_maEVP.F90 index d0a5b2c8b..b9d9208c8 100644 --- a/src/ice_maEVP.F90 +++ b/src/ice_maEVP.F90 @@ -457,7 +457,7 @@ subroutine EVPdynamics_m(ice, partit, mesh) vale=1.0_WP/(ellipse**2) det2=1.0_WP/(1.0_WP+alpha_evp) det1=alpha_evp*det2 - rdt=ice_dt + rdt=ice%ice_dt steps=evp_rheol_steps u_ice_aux=u_ice ! Initialize solver variables @@ -824,7 +824,7 @@ subroutine find_alpha_field_a(ice, partit, mesh) ! with thickness (msum) #endif !adjust c_aevp such, that alpha_evp_array and beta_evp_array become in acceptable range - alpha_evp_array(elem)=max(50.0_WP,sqrt(ice_dt*c_aevp*pressure/rhoice/elem_area(elem))) + alpha_evp_array(elem)=max(50.0_WP,sqrt(ice%ice_dt*c_aevp*pressure/rhoice/elem_area(elem))) ! /voltriangle(elem) for FESOM1.4 ! We do not allow alpha to be too small! end do !--> do elem=1,myDim_elem2D @@ -1012,7 +1012,7 @@ subroutine EVPdynamics_a(ice, partit, mesh) !___________________________________________________________________________ steps=evp_rheol_steps - rdt=ice_dt + rdt=ice%ice_dt u_ice_aux=u_ice ! Initialize solver variables v_ice_aux=v_ice call ssh2rhs(ice, partit, mesh) diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index ee87ad184..5574e292f 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -28,7 +28,7 @@ MODULE i_PARAM ! in ice fct advection REAL(kind=WP) :: ice_diff=10.0_WP ! diffusion to stabilize ! ice advection - REAL(kind=WP) :: Tevp_inv +! REAL(kind=WP) :: Tevp_inv real(kind=WP) :: theta_io=0.0_WP ! rotation angle ! (ice-ocean), available ! in EVP @@ -40,7 +40,7 @@ MODULE i_PARAM logical :: ice_free_slip=.false. integer :: whichEVP=0 !0=standart; 1=mEVP; 2=aEVP - real(kind=WP) :: ice_dt !ice step=ice_ave_steps*oce_step +! real(kind=WP) :: ice_dt !ice step=ice_ave_steps*oce_step NAMELIST /ice_dyn/ whichEVP, Pstar, ellipse, c_pressure, delta_min, evp_rheol_steps, Cd_oce_ice, & ice_gamma_fct, ice_diff, theta_io, ice_ave_steps, alpha_evp, beta_evp, c_aevp diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index d43ae1130..d2fcf4035 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -89,10 +89,10 @@ subroutine ice_setup(ice, tracers, partit, mesh) !___________________________________________________________________________ ! DO not change - ice_dt=real(ice_ave_steps,WP)*dt + ice%ice_dt=real(ice_ave_steps,WP)*dt ! ice_dt=dt - Tevp_inv=3.0_WP/ice_dt - Clim_evp=Clim_evp*(evp_rheol_steps/ice_dt)**2/Tevp_inv ! This is combination + ice%Tevp_inv=3.0_WP/ice%ice_dt + Clim_evp=Clim_evp*(evp_rheol_steps/ice%ice_dt)**2/ice%Tevp_inv ! This is combination ! it always ent !___________________________________________________________________________ diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index c29d899a4..5fdb7d64b 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -247,7 +247,7 @@ subroutine thermodynamics(ice, partit, mesh) endif call therm_ice(h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss, & - ug,ustar,T_oc,S_oc,h_ml,t,ice_dt,ch,ce,ch_i,ce_i,evap_in,fw,ehf,evap, & + ug,ustar,T_oc,S_oc,h_ml,t,ice%ice_dt,ch,ce,ch_i,ce_i,evap_in,fw,ehf,evap, & rsf, ithdgr, ithdgrsn, iflice, hflatow, hfsenow, hflwrdout,lid_clo,subli) m_ice_old(i) = m_ice(i) !PS From 98a9fcf893a091aa99aebcb518854c87180c6def Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 26 Nov 2021 12:43:58 +0100 Subject: [PATCH 679/909] exchange paramters Pstar, ellipse, c_pressure, delta_min, Clim_evp with derived type --- src/ice_EVP.F90 | 18 +++++++++--------- src/ice_maEVP.F90 | 26 +++++++++++++------------- src/ice_modules.F90 | 16 ++++++++++------ src/ice_setup_step.F90 | 6 +++--- 4 files changed, 35 insertions(+), 31 deletions(-) diff --git a/src/ice_EVP.F90 b/src/ice_EVP.F90 index 10b3aaa8f..d0036cc87 100755 --- a/src/ice_EVP.F90 +++ b/src/ice_EVP.F90 @@ -87,7 +87,7 @@ subroutine stress_tensor(ice_strength, ice, partit, mesh) sigma22 => ice%work%sigma22(:) !___________________________________________________________________________ - vale = 1.0_WP/(ellipse**2) + vale = 1.0_WP/(ice%ellipse**2) dte = ice%ice_dt/(1.0_WP*evp_rheol_steps) det1 = 1.0_WP/(1.0_WP + 0.5_WP*ice%Tevp_inv*dte) det2 = 1.0_WP/(1.0_WP + 0.5_WP*ice%Tevp_inv*dte) !*ellipse**2 @@ -130,7 +130,7 @@ subroutine stress_tensor(ice_strength, ice, partit, mesh) ! ===== if delta is too small or zero, viscosity will too large (unlimited) ! (limit delta_inv) - delta_inv = 1.0_WP/max(delta,delta_min) + delta_inv = 1.0_WP/max(delta,ice%delta_min) zeta = ice_strength(el)*delta_inv ! ===== Limiting pressure/Delta (zeta): it may still happen that pressure/Delta ! is too large in some regions and CFL criterion is violated. @@ -191,7 +191,7 @@ end subroutine stress_tensor ! #include "associate_part_ass.h" ! #include "associate_mesh_ass.h" ! -! vale = 1.0_WP/(ellipse**2) +! vale = 1.0_WP/(ice%ellipse**2) ! ! dte = ice%ice_dt/(1.0_WP*evp_rheol_steps) ! det1 = 1.0_WP/(1.0_WP + 0.5_WP*ice%Tevp_inv*dte) @@ -237,9 +237,9 @@ end subroutine stress_tensor ! ! ! ===== if delta is too small or zero, viscosity will too large (unlimited) ! ! (limit delta_inv) -! delta_inv = 1.0_WP/max(delta,delta_min) +! delta_inv = 1.0_WP/max(delta,ice%delta_min) ! -! !!PS delta_inv = delta/(delta+delta_min) +! !!PS delta_inv = delta/(delta+ice%delta_min) ! ! zeta = ice_strength(el)*delta_inv ! ! ===== Limiting pressure/Delta (zeta): it may still happen that pressure/Delta @@ -588,9 +588,9 @@ subroutine EVPdynamics(ice, partit, mesh) !_______________________________________________________________ ! Hunke and Dukowicz c*h*p* #if defined (__icepack) - ice_strength(el) = pstar*msum*exp(-c_pressure*(1.0_WP-asum)) + ice_strength(el) = ice%pstar*msum*exp(-ice%c_pressure*(1.0_WP-asum)) #else - ice_strength(el) = pstar*msum*exp(-c_pressure*(1.0_WP-asum)) + ice_strength(el) = ice%pstar*msum*exp(-ice%c_pressure*(1.0_WP-asum)) #endif ice_strength(el) = 0.5_WP*ice_strength(el) @@ -638,9 +638,9 @@ subroutine EVPdynamics(ice, partit, mesh) ! ===== Hunke and Dukowicz c*h*p* #if defined (__icepack) - ice_strength(el) = pstar*msum*exp(-c_pressure*(1.0_WP-asum)) + ice_strength(el) = ice%pstar*msum*exp(-ice%c_pressure*(1.0_WP-asum)) #else - ice_strength(el) = pstar*msum*exp(-c_pressure*(1.0_WP-asum)) + ice_strength(el) = ice%pstar*msum*exp(-ice%c_pressure*(1.0_WP-asum)) #endif ice_strength(el) = 0.5_WP*ice_strength(el) diff --git a/src/ice_maEVP.F90 b/src/ice_maEVP.F90 index b9d9208c8..723200509 100644 --- a/src/ice_maEVP.F90 +++ b/src/ice_maEVP.F90 @@ -129,7 +129,7 @@ subroutine stress_tensor_m(ice, partit, mesh) !___________________________________________________________________________ val3=1.0_WP/3.0_WP - vale=1.0_WP/(ellipse**2) + vale=1.0_WP/(ice%ellipse**2) det2=1.0_WP/(1.0_WP+alpha_evp) det1=alpha_evp*det2 do elem=1,myDim_elem2D @@ -165,11 +165,11 @@ subroutine stress_tensor_m(ice, partit, mesh) delta=sqrt(delta) #if defined (__icepack) - pressure = sum(strength(elnodes))*val3/max(delta,delta_min) + pressure = sum(strength(elnodes))*val3/max(delta,ice%delta_min) #else - pressure=pstar*msum*exp(-c_pressure*(1.0_WP-asum))/max(delta,delta_min) + pressure=ice%pstar*msum*exp(-ice%c_pressure*(1.0_WP-asum))/max(delta,ice%delta_min) #endif - r1=pressure*(eps1-max(delta,delta_min)) + r1=pressure*(eps1-max(delta,ice%delta_min)) r2=pressure*eps2*vale r3=pressure*eps12(elem)*vale si1=sigma11(elem)+sigma22(elem) @@ -454,7 +454,7 @@ subroutine EVPdynamics_m(ice, partit, mesh) !___________________________________________________________________________ val3=1.0_WP/3.0_WP - vale=1.0_WP/(ellipse**2) + vale=1.0_WP/(ice%ellipse**2) det2=1.0_WP/(1.0_WP+alpha_evp) det1=alpha_evp*det2 rdt=ice%ice_dt @@ -572,7 +572,7 @@ subroutine EVPdynamics_m(ice, partit, mesh) if(msum > 0.01) then ice_el(el) = .true. asum=sum(a_ice(elnodes))*val3 - pressure_fac(el) = det2*pstar*msum*exp(-c_pressure*(1.0_WP-asum)) + pressure_fac(el) = det2*ice%pstar*msum*exp(-ice%c_pressure*(1.0_WP-asum)) endif end do @@ -619,7 +619,7 @@ subroutine EVPdynamics_m(ice, partit, mesh) ! ====== moduli: delta = sqrt(eps1**2+vale*(eps2**2+4.0_WP*eps12(el)**2)) - pressure = pressure_fac(el)/(delta+delta_min) + pressure = pressure_fac(el)/(delta+ice%delta_min) ! si1 = det1*(sigma11(el)+sigma22(el)) + pressure*(eps1-delta) ! si2 = det1*(sigma11(el)-sigma22(el)) + pressure*eps2*vale @@ -784,7 +784,7 @@ subroutine find_alpha_field_a(ice, partit, mesh) alpha_evp_array => ice%alpha_evp_array(:) !___________________________________________________________________________ val3=1.0_WP/3.0_WP - vale=1.0_WP/(ellipse**2) + vale=1.0_WP/(ice%ellipse**2) do elem=1,myDim_elem2D elnodes=elem2D_nodes(:,elem) !_______________________________________________________________________ @@ -818,9 +818,9 @@ subroutine find_alpha_field_a(ice, partit, mesh) delta=sqrt(delta) #if defined (__icepack) - pressure = sum(strength(elnodes))*val3/(delta+delta_min)/msum + pressure = sum(strength(elnodes))*val3/(delta+ice%delta_min)/msum #else - pressure = pstar*exp(-c_pressure*(1.0_WP-asum))/(delta+delta_min) ! no multiplication + pressure = ice%pstar*exp(-ice%c_pressure*(1.0_WP-asum))/(delta+ice%delta_min) ! no multiplication ! with thickness (msum) #endif !adjust c_aevp such, that alpha_evp_array and beta_evp_array become in acceptable range @@ -883,7 +883,7 @@ subroutine stress_tensor_a(ice, partit, mesh) !___________________________________________________________________________ val3=1.0_WP/3.0_WP - vale=1.0_WP/(ellipse**2) + vale=1.0_WP/(ice%ellipse**2) do elem=1,myDim_elem2D !__________________________________________________________________________ ! if element has any cavity node skip it @@ -922,9 +922,9 @@ subroutine stress_tensor_a(ice, partit, mesh) delta=sqrt(delta) #if defined (__icepack) - pressure = sum(strength(elnodes))*val3/(delta+delta_min) + pressure = sum(strength(elnodes))*val3/(delta+ice%delta_min) #else - pressure=pstar*msum*exp(-c_pressure*(1.0_WP-asum))/(delta+delta_min) + pressure=ice%pstar*msum*exp(-ice%c_pressure*(1.0_WP-asum))/(delta+ice%delta_min) #endif r1=pressure*(eps1-delta) diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index 5574e292f..e8bdb083c 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -16,11 +16,11 @@ MODULE i_PARAM SAVE ! ice model parameters: ! RHEOLOGY - REAL(kind=WP) :: Pstar = 30000.0_WP ![N/m^2] - REAL(kind=WP) :: ellipse =2.0_WP ! - REAL(kind=WP) :: c_pressure =20.0_WP ! - REAL(kind=WP) :: delta_min=1.0e-11 ! [s^(-1)] - REAL(kind=WP) :: Clim_evp=615 ! kg/m^2 +! REAL(kind=WP) :: Pstar = 30000.0_WP ![N/m^2] +! REAL(kind=WP) :: ellipse =2.0_WP ! +! REAL(kind=WP) :: c_pressure =20.0_WP ! +! REAL(kind=WP) :: delta_min=1.0e-11 ! [s^(-1)] +! REAL(kind=WP) :: Clim_evp=615 ! kg/m^2 REAL(kind=WP) :: zeta_min=4.0e+8 ! kg/s INTEGER :: evp_rheol_steps=120 ! EVP rheology ! cybcycling steps @@ -42,9 +42,13 @@ MODULE i_PARAM integer :: whichEVP=0 !0=standart; 1=mEVP; 2=aEVP ! real(kind=WP) :: ice_dt !ice step=ice_ave_steps*oce_step -NAMELIST /ice_dyn/ whichEVP, Pstar, ellipse, c_pressure, delta_min, evp_rheol_steps, Cd_oce_ice, & +! NAMELIST /ice_dyn/ whichEVP, Pstar, ellipse, c_pressure, delta_min, evp_rheol_steps, Cd_oce_ice, & +! ice_gamma_fct, ice_diff, theta_io, ice_ave_steps, alpha_evp, beta_evp, c_aevp + +NAMELIST /ice_dyn/ whichEVP, evp_rheol_steps, Cd_oce_ice, & ice_gamma_fct, ice_diff, theta_io, ice_ave_steps, alpha_evp, beta_evp, c_aevp + END MODULE i_PARAM ! !============================================================================= diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index d2fcf4035..bcec8ce45 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -89,10 +89,10 @@ subroutine ice_setup(ice, tracers, partit, mesh) !___________________________________________________________________________ ! DO not change - ice%ice_dt=real(ice_ave_steps,WP)*dt + ice%ice_dt = real(ice_ave_steps,WP)*dt ! ice_dt=dt - ice%Tevp_inv=3.0_WP/ice%ice_dt - Clim_evp=Clim_evp*(evp_rheol_steps/ice%ice_dt)**2/ice%Tevp_inv ! This is combination + ice%Tevp_inv = 3.0_WP/ice%ice_dt + ice%Clim_evp = ice%Clim_evp*(evp_rheol_steps/ice%ice_dt)**2/ice%Tevp_inv ! This is combination ! it always ent !___________________________________________________________________________ From 68b50d07239c41280182fc2a3eb5aa777abd4987 Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 26 Nov 2021 13:06:45 +0100 Subject: [PATCH 680/909] exchange paramters zeta_min, evp_rheol_steps, ice_gamma_fct, ice_diff, theta_io, alpha_evp, beta_evp with derived type --- src/MOD_ICE.F90 | 4 ++-- src/ice_EVP.F90 | 24 ++++++++++++------------ src/ice_fct.F90 | 6 +++--- src/ice_maEVP.F90 | 22 +++++++++++----------- src/ice_modules.F90 | 20 ++++++++++++-------- src/ice_setup_step.F90 | 2 +- src/io_meandata.F90 | 8 +++++--- 7 files changed, 46 insertions(+), 40 deletions(-) diff --git a/src/MOD_ICE.F90 b/src/MOD_ICE.F90 index 6f5e1612d..8ab374911 100644 --- a/src/MOD_ICE.F90 +++ b/src/MOD_ICE.F90 @@ -586,8 +586,8 @@ subroutine ice_init(ice, partit, mesh) if (ice%whichEVP == 2) then allocate(ice%alpha_evp_array( node_size)) allocate(ice%beta_evp_array( node_size)) - ice%alpha_evp_array = 0.0_WP - ice%beta_evp_array = 0.0_WP + ice%alpha_evp_array = ice%alpha_evp + ice%beta_evp_array = ice%alpha_evp end if !___________________________________________________________________________ ! initialise surface ocean arrays in ice derived type diff --git a/src/ice_EVP.F90 b/src/ice_EVP.F90 index d0036cc87..ffba80ca0 100755 --- a/src/ice_EVP.F90 +++ b/src/ice_EVP.F90 @@ -88,7 +88,7 @@ subroutine stress_tensor(ice_strength, ice, partit, mesh) !___________________________________________________________________________ vale = 1.0_WP/(ice%ellipse**2) - dte = ice%ice_dt/(1.0_WP*evp_rheol_steps) + dte = ice%ice_dt/(1.0_WP*ice%evp_rheol_steps) det1 = 1.0_WP/(1.0_WP + 0.5_WP*ice%Tevp_inv*dte) det2 = 1.0_WP/(1.0_WP + 0.5_WP*ice%Tevp_inv*dte) !*ellipse**2 @@ -123,7 +123,7 @@ subroutine stress_tensor(ice_strength, ice, partit, mesh) ! ===== viscosity zeta should exceed zeta_min ! (done via limiting delta from above) - !if(delta>pressure/zeta_min) delta=pressure/zeta_min + !if(delta>pressure/ice%zeta_min) delta=pressure/ice%zeta_min !It does not work properly by !creating response where ice_strength is small ! Uncomment and test if necessary @@ -140,8 +140,8 @@ subroutine stress_tensor(ice_strength, ice, partit, mesh) ! remains stable), using it reduces viscosities too strongly. ! It is therefore commented - !if (zeta>Clim_evp*voltriangle(el)) then - !zeta=Clim_evp*voltriangle(el) + !if (zeta>ice%clim_evp*voltriangle(el)) then + !zeta=ice%clim_evp*voltriangle(el) !end if zeta = zeta*ice%Tevp_inv @@ -230,7 +230,7 @@ end subroutine stress_tensor ! ! ===== viscosity zeta should exceed zeta_min ! ! (done via limiting delta from above) ! -! !if(delta>pressure/zeta_min) delta=pressure/zeta_min +! !if(delta>pressure/ice%zeta_min) delta=pressure/ice%zeta_min ! !It does not work properly by ! !creating response where ice_strength is small ! ! Uncomment and test if necessary @@ -250,8 +250,8 @@ end subroutine stress_tensor ! ! remains stable), using it reduces viscosities too strongly. ! ! It is therefore commented ! -! !if (zeta>Clim_evp*voltriangle(el)) then -! !zeta=Clim_evp*voltriangle(el) +! !if (zeta>ice%clim_evp*voltriangle(el)) then +! !zeta=ice%clim_evp*voltriangle(el) ! !end if ! ! zeta = zeta*ice%Tevp_inv @@ -526,9 +526,9 @@ subroutine EVPdynamics(ice, partit, mesh) vsno_out=m_snow) #endif - rdt=ice%ice_dt/(1.0*evp_rheol_steps) - ax=cos(theta_io) - ay=sin(theta_io) + rdt=ice%ice_dt/(1.0*ice%evp_rheol_steps) + ax=cos(ice%theta_io) + ay=sin(ice%theta_io) !___________________________________________________________________________ ! Precompute values that are never changed during the iteration @@ -670,7 +670,7 @@ subroutine EVPdynamics(ice, partit, mesh) rdg_conv_elem(:) = 0.0_WP rdg_shear_elem(:) = 0.0_WP #endif - do shortstep=1, evp_rheol_steps + do shortstep=1, ice%evp_rheol_steps !_______________________________________________________________________ call stress_tensor(ice_strength, ice, partit, mesh) call stress2rhs(inv_areamass, ice_strength, ice, partit, mesh) @@ -725,5 +725,5 @@ subroutine EVPdynamics(ice, partit, mesh) end do !_______________________________________________________________________ call exchange_nod(U_ice,V_ice,partit) - END DO !--> do shortstep=1, evp_rheol_steps + END DO !--> do shortstep=1, ice%evp_rheol_steps end subroutine EVPdynamics diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index 99fc8aab9..995a95c2e 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -154,7 +154,7 @@ subroutine ice_TG_rhs(ice, partit, mesh) vm=sum(V_ice(elnodes)) !diffusivity - diff=ice_diff*sqrt(elem_area(elem)/scale_area) + diff=ice%ice_diff*sqrt(elem_area(elem)/scale_area) DO n=1,3 row=elnodes(n) DO q = 1,3 @@ -315,7 +315,7 @@ subroutine ice_solve_low_order(ice, partit, mesh) m_templ => ice%data(4)%valuesl(:) #endif !___________________________________________________________________________ - gamma=ice_gamma_fct ! Added diffusivity parameter + gamma=ice%ice_gamma_fct ! Added diffusivity parameter ! Adjust it to ensure posivity of solution do row=1,myDim_nod2D !_______________________________________________________________________ @@ -526,7 +526,7 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) dm_temp => ice%data(4)%dvalues(:) #endif !___________________________________________________________________________ - gamma=ice_gamma_fct ! It should coinside with gamma in + gamma=ice%ice_gamma_fct ! It should coinside with gamma in ! ts_solve_low_order !========================== diff --git a/src/ice_maEVP.F90 b/src/ice_maEVP.F90 index 723200509..3be411b8f 100644 --- a/src/ice_maEVP.F90 +++ b/src/ice_maEVP.F90 @@ -130,8 +130,8 @@ subroutine stress_tensor_m(ice, partit, mesh) !___________________________________________________________________________ val3=1.0_WP/3.0_WP vale=1.0_WP/(ice%ellipse**2) - det2=1.0_WP/(1.0_WP+alpha_evp) - det1=alpha_evp*det2 + det2=1.0_WP/(1.0_WP+ice%alpha_evp) + det1=ice%alpha_evp*det2 do elem=1,myDim_elem2D elnodes=elem2D_nodes(:,elem) !_______________________________________________________________________ @@ -455,10 +455,10 @@ subroutine EVPdynamics_m(ice, partit, mesh) !___________________________________________________________________________ val3=1.0_WP/3.0_WP vale=1.0_WP/(ice%ellipse**2) - det2=1.0_WP/(1.0_WP+alpha_evp) - det1=alpha_evp*det2 + det2=1.0_WP/(1.0_WP+ice%alpha_evp) + det1=ice%alpha_evp*det2 rdt=ice%ice_dt - steps=evp_rheol_steps + steps=ice%evp_rheol_steps u_ice_aux=u_ice ! Initialize solver variables v_ice_aux=v_ice @@ -683,14 +683,14 @@ subroutine EVPdynamics_m(ice, partit, mesh) drag = rdt*Cd_oce_ice*umod*density_0*inv_thickness(i) !rhs for water stress, air stress, and u_rhs_ice/v (internal stress + ssh) - rhsu = u_ice(i)+drag*u_w(i)+rdt*(inv_thickness(i)*stress_atmice_x(i)+u_rhs_ice(i)) + beta_evp*u_ice_aux(i) - rhsv = v_ice(i)+drag*v_w(i)+rdt*(inv_thickness(i)*stress_atmice_y(i)+v_rhs_ice(i)) + beta_evp*v_ice_aux(i) + rhsu = u_ice(i)+drag*u_w(i)+rdt*(inv_thickness(i)*stress_atmice_x(i)+u_rhs_ice(i)) + ice%beta_evp*u_ice_aux(i) + rhsv = v_ice(i)+drag*v_w(i)+rdt*(inv_thickness(i)*stress_atmice_y(i)+v_rhs_ice(i)) + ice%beta_evp*v_ice_aux(i) !solve (Coriolis and water stress are treated implicitly) - det = bc_index_nod2D(i) / ((1.0_WP+beta_evp+drag)**2 + (rdt*coriolis_node(i))**2) + det = bc_index_nod2D(i) / ((1.0_WP+ice%beta_evp+drag)**2 + (rdt*coriolis_node(i))**2) - u_ice_aux(i) = det*((1.0_WP+beta_evp+drag)*rhsu +rdt*coriolis_node(i)*rhsv) - v_ice_aux(i) = det*((1.0_WP+beta_evp+drag)*rhsv -rdt*coriolis_node(i)*rhsu) + u_ice_aux(i) = det*((1.0_WP+ice%beta_evp+drag)*rhsu +rdt*coriolis_node(i)*rhsv) + v_ice_aux(i) = det*((1.0_WP+ice%beta_evp+drag)*rhsv -rdt*coriolis_node(i)*rhsu) end if end do ! --> do i=1, myDim_nod2d @@ -1011,7 +1011,7 @@ subroutine EVPdynamics_a(ice, partit, mesh) beta_evp_array => ice%beta_evp_array(:) !___________________________________________________________________________ - steps=evp_rheol_steps + steps=ice%evp_rheol_steps rdt=ice%ice_dt u_ice_aux=u_ice ! Initialize solver variables v_ice_aux=v_ice diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index e8bdb083c..61ef839bd 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -21,18 +21,22 @@ MODULE i_PARAM ! REAL(kind=WP) :: c_pressure =20.0_WP ! ! REAL(kind=WP) :: delta_min=1.0e-11 ! [s^(-1)] ! REAL(kind=WP) :: Clim_evp=615 ! kg/m^2 - REAL(kind=WP) :: zeta_min=4.0e+8 ! kg/s - INTEGER :: evp_rheol_steps=120 ! EVP rheology + + +! REAL(kind=WP) :: zeta_min=4.0e+8 ! kg/s +! INTEGER :: evp_rheol_steps=120 ! EVP rheology ! cybcycling steps - REAL(kind=WP) :: ice_gamma_fct=0.25_WP ! smoothing parameter +! REAL(kind=WP) :: ice_gamma_fct=0.25_WP ! smoothing parameter ! in ice fct advection - REAL(kind=WP) :: ice_diff=10.0_WP ! diffusion to stabilize +! REAL(kind=WP) :: ice_diff=10.0_WP ! diffusion to stabilize ! ice advection ! REAL(kind=WP) :: Tevp_inv - real(kind=WP) :: theta_io=0.0_WP ! rotation angle +! real(kind=WP) :: theta_io=0.0_WP ! rotation angle ! (ice-ocean), available ! in EVP - real(kind=WP) :: alpha_evp=250, beta_evp=250 +! real(kind=WP) :: alpha_evp=250, beta_evp=250 + + real(kind=WP) :: c_aevp=0.15 ! 0.1--0.2, but should be adjusted experimentally ! Ice forcing averaging integer :: ice_ave_steps=1 !ice step=ice_ave_steps*oce_step @@ -45,8 +49,8 @@ MODULE i_PARAM ! NAMELIST /ice_dyn/ whichEVP, Pstar, ellipse, c_pressure, delta_min, evp_rheol_steps, Cd_oce_ice, & ! ice_gamma_fct, ice_diff, theta_io, ice_ave_steps, alpha_evp, beta_evp, c_aevp -NAMELIST /ice_dyn/ whichEVP, evp_rheol_steps, Cd_oce_ice, & -ice_gamma_fct, ice_diff, theta_io, ice_ave_steps, alpha_evp, beta_evp, c_aevp +NAMELIST /ice_dyn/ whichEVP, Cd_oce_ice, & +ice_ave_steps, c_aevp END MODULE i_PARAM diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index bcec8ce45..244c88fb3 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -92,7 +92,7 @@ subroutine ice_setup(ice, tracers, partit, mesh) ice%ice_dt = real(ice_ave_steps,WP)*dt ! ice_dt=dt ice%Tevp_inv = 3.0_WP/ice%ice_dt - ice%Clim_evp = ice%Clim_evp*(evp_rheol_steps/ice%ice_dt)**2/ice%Tevp_inv ! This is combination + ice%Clim_evp = ice%Clim_evp*(ice%evp_rheol_steps/ice%ice_dt)**2/ice%Tevp_inv ! This is combination ! it always ent !___________________________________________________________________________ diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 123514aee..4786346df 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -584,12 +584,13 @@ function mesh_dimname_from_dimsize(size, partit, mesh) result(name) ! !-------------------------------------------------------------------------------------------- ! -subroutine create_new_file(entry, dynamics, partit, mesh) +subroutine create_new_file(entry, ice, dynamics, partit, mesh) use g_clock use mod_mesh USE MOD_PARTIT USE MOD_PARSUP USE MOD_DYN + USE MOD_ICE use fesom_version_info_module use g_config use i_PARAM @@ -600,6 +601,7 @@ subroutine create_new_file(entry, dynamics, partit, mesh) type(t_mesh) , intent(in) :: mesh type(t_partit), intent(in) :: partit type(t_dyn) , intent(in) :: dynamics + type(t_ice) , intent(in) :: ice type(Meandata), intent(inout) :: entry character(len=*), parameter :: global_attributes_prefix = "FESOM_" @@ -673,7 +675,7 @@ subroutine create_new_file(entry, dynamics, partit, mesh) call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'include_fleapyear', NF_INT, 1, include_fleapyear), __LINE__) call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_floatice' , NF_INT, 1, use_floatice), __LINE__) call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'whichEVP' , NF_INT, 1, whichEVP), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'evp_rheol_steps' , NF_INT, 1, evp_rheol_steps), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'evp_rheol_steps' , NF_INT, 1, ice%evp_rheol_steps), __LINE__) call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'opt_visc' , NF_INT, 1, dynamics%opt_visc), __LINE__) call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_wsplit' , NF_INT, 1, dynamics%use_wsplit), __LINE__) call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_partial_cell' , NF_INT, 1, use_partial_cell), __LINE__) @@ -887,7 +889,7 @@ subroutine output(istep, ice, dynamics, tracers, partit, mesh) entry%filename = filepath ! use any existing file with this name or create a new one if( nf_open(entry%filename, nf_write, entry%ncid) /= nf_noerr ) then - call create_new_file(entry, dynamics, partit, mesh) + call create_new_file(entry, ice, dynamics, partit, mesh) call assert_nf( nf_open(entry%filename, nf_write, entry%ncid), __LINE__) end if call assoc_ids(entry) From 13ecf2bed751807c8c198cfad4394f313f8f452a Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 26 Nov 2021 14:01:27 +0100 Subject: [PATCH 681/909] exlude reading namelist.ice, ice_dyn from subroutine model setup --- src/gen_model_setup.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gen_model_setup.F90 b/src/gen_model_setup.F90 index df60f8d56..bc860b413 100755 --- a/src/gen_model_setup.F90 +++ b/src/gen_model_setup.F90 @@ -66,7 +66,7 @@ subroutine setup_model(partit) if(use_ice) then nmlfile ='namelist.ice' ! name of ice namelist file open (newunit=fileunit, file=nmlfile) - read (fileunit, NML=ice_dyn) +! read (fileunit, NML=ice_dyn) read (fileunit, NML=ice_therm) close (fileunit) endif From 54b758b69232457d07d9685683904ddfaa18b59a Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 27 Sep 2021 16:58:06 +0200 Subject: [PATCH 682/909] add optional environment for running awicm3 on mistral using OpenMPI --- env/mistral.dkrz.de/shell-intel+openmpi | 50 +++++++++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 env/mistral.dkrz.de/shell-intel+openmpi diff --git a/env/mistral.dkrz.de/shell-intel+openmpi b/env/mistral.dkrz.de/shell-intel+openmpi new file mode 100644 index 000000000..d4227b601 --- /dev/null +++ b/env/mistral.dkrz.de/shell-intel+openmpi @@ -0,0 +1,50 @@ +# make the contents as shell agnostic as possible so we can include them with bash, zsh and others + +module load gcc/4.8.2 +export LD_LIBRARY_PATH=/sw/rhel6-x64/gcc/gcc-4.8.2/lib64:$LD_LIBRARY_PATH # avoid GLIBCXX_3.4.15 not found error +module unload intel && module load intel/18.0.4 + +export FC=mpif90 CC=mpicc CXX=mpicxx; module unload intelmpi; module load openmpi/2.0.2p2_hpcx-intel14 + +# from https://www.dkrz.de/up/systems/mistral/running-jobs/mpi-runtime-settings +export I_MPI_FABRICS=shm:dapl +export I_MPI_FALLBACK=disable +export I_MPI_SLURM_EXT=0 # disable optimized startup algorithm for intel MPI +export I_MPI_LARGE_SCALE_THRESHOLD=8192 # set to a value larger than the number of MPI-tasks used !!! +export I_MPI_DYNAMIC_CONNECTION=1 +export I_MPI_CHECK_DAPL_PROVIDER_COMPATIBILITY=0 +export I_MPI_HARD_FINALIZE=1 +export I_MPI_ADJUST_GATHER=1 # do not use =3 (Shumilin's algorithm) + +export MXM_LOG_LEVEL=ERROR # try to disable "Conflicting CPU frequencies detected" messages from OpenMPI + +module unload netcdf && module load netcdf_c/4.3.2-gcc48 +module unload cmake && module load cmake +# we will get a segfault at runtime if we use a gcc from any of the provided gcc modules +export PATH=/sw/rhel6-x64/gcc/binutils-2.24-gccsys/bin:${PATH} + +export NETCDF_Fortran_INCLUDE_DIRECTORIES=/sw/rhel6-x64/netcdf/netcdf_fortran-4.4.2-intel14/include +export NETCDF_C_INCLUDE_DIRECTORIES=/sw/rhel6-x64/netcdf/netcdf_c-4.3.2-intel14/include +export NETCDF_CXX_INCLUDE_DIRECTORIES=/sw/rhel6-x64/netcdf/netcdf_cxx-4.2.1-gcc48/include + +export HDF5_C_INCLUDE_DIRECTORIES=/sw/rhel6-x64/hdf5/hdf5-1.8.14-threadsafe-intel14/include + + + +### not sure if the following settings are really necessary to run the coupled awicm3 + +export GRIBROOT=/pf/a/a270092/ecmwf/grib_api_intel_modulegcc +export NETCDFFROOT=/sw/rhel6-x64/netcdf/netcdf_fortran-4.4.3-intel14 +export NETCDFROOT=/sw/rhel6-x64/netcdf/netcdf_c-4.4.0-gcc48 +export HDF5ROOT=/sw/rhel6-x64/hdf5/hdf5-1.8.14-threadsafe-gcc48 +export SZIPROOT=/sw/rhel6-x64/sys/libaec-0.3.2-gcc48 +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${NETCDFFROOT}/lib:${HDF5ROOT}/lib:${NETCDFROOT}/lib:${SZIPROOT}/lib:${GRIBROOT}/lib + +export FESOM_USE_CPLNG='active' + +export DR_HOOK=1 +export DR_HOOK_IGNORE_SIGNALS='-1' +export DR_HOOK_OPT=prof +export DR_HOOK_PROFILE_LIMIT=0.5 +export OIFS_DUMMY_ACTION=ABORT +export HDF5_DISABLE_VERSION_CHECK=1 From 36c00eea52e2257c3d41e0c7b9a9a9c719311b73 Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 26 Nov 2021 14:41:13 +0100 Subject: [PATCH 683/909] exchange paramters c_aevp, ice_ave_steps, cd_oce_ice, whichEVP with ice derived type --- src/MOD_ICE.F90 | 24 ++++++++++++++++++++---- src/fesom_module.F90 | 6 +++--- src/ice_EVP.F90 | 2 +- src/ice_maEVP.F90 | 6 +++--- src/ice_modules.F90 | 14 +++++++------- src/ice_oce_coupling.F90 | 2 +- src/ice_setup_step.F90 | 5 ++--- src/ice_thermo_cpl.F90 | 2 +- src/ice_thermo_oce.F90 | 2 +- src/io_meandata.F90 | 7 +++---- src/oce_mesh.F90 | 20 ++++++++++---------- 11 files changed, 52 insertions(+), 38 deletions(-) diff --git a/src/MOD_ICE.F90 b/src/MOD_ICE.F90 index 8ab374911..38ebcaa2e 100644 --- a/src/MOD_ICE.F90 +++ b/src/MOD_ICE.F90 @@ -475,7 +475,7 @@ subroutine ice_init(ice, partit, mesh) USE MOD_MESH type(t_ice) , intent(inout), target :: ice type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh) , intent(inout), target :: mesh end subroutine end interface end module @@ -492,9 +492,9 @@ subroutine ice_init(ice, partit, mesh) IMPLICIT NONE type(t_ice) , intent(inout), target :: ice type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh) , intent(inout), target :: mesh !___________________________________________________________________________ - integer :: elem_size, node_size, n + integer :: elem_size, node_size, n, ed(2) integer, save :: nm_unit = 105 ! unit to open namelist file, skip 100-102 for cray integer :: iost !___________________________________________________________________________ @@ -695,7 +695,23 @@ subroutine ice_init(ice, partit, mesh) ice%atmcoupl%ice_alb = 0.6_WP ice%atmcoupl%enthalpyoffuse= 0.0_WP #endif /* (__oifs) */ -#endif /* (__oasis) */ +#endif /* (__oasis) */ + + !___________________________________________________________________________ + ! --> took from oce_mesh.F90 --> subroutine mesh_auxiliary_arrays(partit, mesh) + ! to here since namelist.ice is now read in ice_init where whichEVP is not available + ! when mesh_auxiliary_arrays is called + !array of 2D boundary conditions is used in ice_maEVP + if (ice%whichEVP > 0) then + allocate(mesh%bc_index_nod2D(myDim_nod2D+eDim_nod2D)) + mesh%bc_index_nod2D=1._WP + do n=1, myDim_edge2D + ed=mesh%edges(:, n) + if (myList_edge2D(n) <= mesh%edge2D_in) cycle + mesh%bc_index_nod2D(ed)=0._WP + end do + end if + end subroutine ice_init ! ! diff --git a/src/fesom_module.F90 b/src/fesom_module.F90 index 60eda1d49..be7530914 100755 --- a/src/fesom_module.F90 +++ b/src/fesom_module.F90 @@ -176,9 +176,9 @@ subroutine fesom_init(fesom_total_nsteps) if (use_ice) then if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call ice_setup'//achar(27)//'[0m' call ice_setup(f%ice, f%tracers, f%partit, f%mesh) - ice_steps_since_upd = ice_ave_steps-1 + ice_steps_since_upd = f%ice%ice_ave_steps-1 ice_update=.true. - if (f%mype==0) write(*,*) 'EVP scheme option=', whichEVP + if (f%mype==0) write(*,*) 'EVP scheme option=', f%ice%whichEVP else ! create a dummy ice derived type with only a_ice, m_ice, m_snow and ! uvice since oce_timesteps still needs in moment @@ -352,7 +352,7 @@ subroutine fesom_runloop(current_nsteps) call update_atm_forcing(n, f%ice, f%tracers, f%partit, f%mesh) f%t1_frc = MPI_Wtime() !___compute ice step________________________________________________ - if (ice_steps_since_upd>=ice_ave_steps-1) then + if (ice_steps_since_upd>=f%ice%ice_ave_steps-1) then ice_update=.true. ice_steps_since_upd = 0 else diff --git a/src/ice_EVP.F90 b/src/ice_EVP.F90 index ffba80ca0..989c2ccc5 100755 --- a/src/ice_EVP.F90 +++ b/src/ice_EVP.F90 @@ -685,7 +685,7 @@ subroutine EVPdynamics(ice, partit, mesh) !___________________________________________________________________ if (a_ice(n) >= 0.01_WP) then ! Skip if ice is absent umod = sqrt((U_ice(n)-U_w(n))**2+(V_ice(n)-V_w(n))**2) - drag = Cd_oce_ice*umod*density_0*inv_mass(n) + drag = ice%cd_oce_ice*umod*density_0*inv_mass(n) rhsu = U_ice(n) +rdt*(drag*(ax*U_w(n) - ay*V_w(n))+ & inv_mass(n)*stress_atmice_x(n) + U_rhs_ice(n)) diff --git a/src/ice_maEVP.F90 b/src/ice_maEVP.F90 index 3be411b8f..f3cb23b43 100644 --- a/src/ice_maEVP.F90 +++ b/src/ice_maEVP.F90 @@ -680,7 +680,7 @@ subroutine EVPdynamics_m(ice, partit, mesh) !============= stress2rhs_m ends ====================== ! do i=1,myDim_nod2D umod = sqrt((u_ice_aux(i)-u_w(i))**2+(v_ice_aux(i)-v_w(i))**2) - drag = rdt*Cd_oce_ice*umod*density_0*inv_thickness(i) + drag = rdt*ice%cd_oce_ice*umod*density_0*inv_thickness(i) !rhs for water stress, air stress, and u_rhs_ice/v (internal stress + ssh) rhsu = u_ice(i)+drag*u_w(i)+rdt*(inv_thickness(i)*stress_atmice_x(i)+u_rhs_ice(i)) + ice%beta_evp*u_ice_aux(i) @@ -824,7 +824,7 @@ subroutine find_alpha_field_a(ice, partit, mesh) ! with thickness (msum) #endif !adjust c_aevp such, that alpha_evp_array and beta_evp_array become in acceptable range - alpha_evp_array(elem)=max(50.0_WP,sqrt(ice%ice_dt*c_aevp*pressure/rhoice/elem_area(elem))) + alpha_evp_array(elem)=max(50.0_WP,sqrt(ice%ice_dt*ice%c_aevp*pressure/rhoice/elem_area(elem))) ! /voltriangle(elem) for FESOM1.4 ! We do not allow alpha to be too small! end do !--> do elem=1,myDim_elem2D @@ -1036,7 +1036,7 @@ subroutine EVPdynamics_a(ice, partit, mesh) inv_thickness=1.0_WP/thickness umod=sqrt((u_ice_aux(i)-u_w(i))**2+(v_ice_aux(i)-v_w(i))**2) - drag=rdt*Cd_oce_ice*umod*density_0*inv_thickness + drag=rdt*ice%cd_oce_ice*umod*density_0*inv_thickness !rhs for water stress, air stress, and u_rhs_ice/v (internal stress + ssh) rhsu=u_ice(i)+drag*u_w(i)+rdt*(inv_thickness*stress_atmice_x(i)+u_rhs_ice(i)) diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index 61ef839bd..89ab5ad2c 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -37,20 +37,20 @@ MODULE i_PARAM ! real(kind=WP) :: alpha_evp=250, beta_evp=250 - real(kind=WP) :: c_aevp=0.15 ! 0.1--0.2, but should be adjusted experimentally +! real(kind=WP) :: c_aevp=0.15 ! 0.1--0.2, but should be adjusted experimentally ! Ice forcing averaging - integer :: ice_ave_steps=1 !ice step=ice_ave_steps*oce_step - real(kind=WP) :: cd_oce_ice = 5.5e-3 ! drag coef. oce - ice +! integer :: ice_ave_steps=1 !ice step=ice_ave_steps*oce_step +! real(kind=WP) :: cd_oce_ice = 5.5e-3 ! drag coef. oce - ice - logical :: ice_free_slip=.false. - integer :: whichEVP=0 !0=standart; 1=mEVP; 2=aEVP +! logical :: ice_free_slip=.false. +! integer :: whichEVP=0 !0=standart; 1=mEVP; 2=aEVP ! real(kind=WP) :: ice_dt !ice step=ice_ave_steps*oce_step ! NAMELIST /ice_dyn/ whichEVP, Pstar, ellipse, c_pressure, delta_min, evp_rheol_steps, Cd_oce_ice, & ! ice_gamma_fct, ice_diff, theta_io, ice_ave_steps, alpha_evp, beta_evp, c_aevp -NAMELIST /ice_dyn/ whichEVP, Cd_oce_ice, & -ice_ave_steps, c_aevp +! NAMELIST /ice_dyn/ whichEVP, Cd_oce_ice, & +! ice_ave_steps END MODULE i_PARAM diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index 0eff4bb5e..3e8caecff 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -107,7 +107,7 @@ subroutine oce_fluxes_mom(ice, dynamics, partit, mesh) !_______________________________________________________________________ if(a_ice(n)>0.001_WP) then - aux=sqrt((u_ice(n)-u_w(n))**2+(v_ice(n)-v_w(n))**2)*density_0*Cd_oce_ice + aux=sqrt((u_ice(n)-u_w(n))**2+(v_ice(n)-v_w(n))**2)*density_0*ice%cd_oce_ice stress_iceoce_x(n) = aux * (u_ice(n)-u_w(n)) stress_iceoce_y(n) = aux * (v_ice(n)-v_w(n)) else diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index 244c88fb3..ab5c5eba4 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -89,7 +89,7 @@ subroutine ice_setup(ice, tracers, partit, mesh) !___________________________________________________________________________ ! DO not change - ice%ice_dt = real(ice_ave_steps,WP)*dt + ice%ice_dt = real(ice%ice_ave_steps,WP)*dt ! ice_dt=dt ice%Tevp_inv = 3.0_WP/ice%ice_dt ice%Clim_evp = ice%Clim_evp*(ice%evp_rheol_steps/ice%ice_dt)**2/ice%Tevp_inv ! This is combination @@ -244,7 +244,6 @@ subroutine ice_timestep(step, ice, partit, mesh) use i_arrays use o_param use g_CONFIG - use i_PARAM, only: whichEVP use ice_EVPdynamics_interface use ice_maEVPdynamics_interface use ice_fct_interfaces @@ -288,7 +287,7 @@ subroutine ice_timestep(step, ice, partit, mesh) !___________________________________________________________________________ ! ===== Dynamics - SELECT CASE (whichEVP) + SELECT CASE (ice%whichEVP) CASE (0) if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call EVPdynamics...'//achar(27)//'[0m' call EVPdynamics (ice, partit, mesh) diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index 02ba58934..214cf7c43 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -127,7 +127,7 @@ subroutine thermodynamics(ice, partit, mesh) snow = prec_snow(inod) runo = runoff(inod) - ustar = sqrt(Cd_oce_ice)*sqrt((u_ice(inod)-u_w(inod))**2+(v_ice(inod)-v_w(inod))**2) + ustar = sqrt(ice%cd_oce_ice)*sqrt((u_ice(inod)-u_w(inod))**2+(v_ice(inod)-v_w(inod))**2) T_oc = T_oc_array(inod) S_oc = S_oc_array(inod) if (ref_sss_local) rsss = S_oc diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index 5fdb7d64b..d37feac43 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -187,7 +187,7 @@ subroutine thermodynamics(ice, partit, mesh) if(ulevels_nod2d(i)>1) cycle ustar=((u_ice(i)-u_w(i))**2+ & (v_ice(i)-v_w(i))**2) - ustar_aux(i)=sqrt(ustar*Cd_oce_ice) + ustar_aux(i)=sqrt(ustar*ice%cd_oce_ice) END DO call exchange_nod(ustar_aux, partit) ! ================ diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 4786346df..a7d454795 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -105,7 +105,6 @@ subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) use g_cvmix_kpp use g_cvmix_tidal use diagnostics - use i_PARAM, only: whichEVP implicit none integer :: i, j integer, save :: nm_io_unit = 103 ! unit to open namelist file, skip 100-102 for cray @@ -505,10 +504,10 @@ subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) end if !___________________________________________________________________________________________________________________________________ - if (whichEVP==1) then + if (ice%whichEVP==1) then end if - if (whichEVP==2) then + if (ice%whichEVP==2) then call def_stream(elem2D, myDim_elem2D, 'alpha_EVP', 'alpha in EVP', 'n/a', ice%alpha_evp_array, 1, 'd', i_real4, partit, mesh) call def_stream(nod2D, myDim_nod2D, 'beta_EVP', 'beta in EVP', 'n/a', ice%beta_evp_array, 1, 'd', i_real4, partit, mesh) end if @@ -674,7 +673,7 @@ subroutine create_new_file(entry, ice, dynamics, partit, mesh) call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'force_rotation' , NF_INT, 1, force_rotation), __LINE__) call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'include_fleapyear', NF_INT, 1, include_fleapyear), __LINE__) call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_floatice' , NF_INT, 1, use_floatice), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'whichEVP' , NF_INT, 1, whichEVP), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'whichEVP' , NF_INT, 1, ice%whichEVP), __LINE__) call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'evp_rheol_steps' , NF_INT, 1, ice%evp_rheol_steps), __LINE__) call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'opt_visc' , NF_INT, 1, dynamics%opt_visc), __LINE__) call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_wsplit' , NF_INT, 1, dynamics%use_wsplit), __LINE__) diff --git a/src/oce_mesh.F90 b/src/oce_mesh.F90 index b4222164c..a80f03850 100755 --- a/src/oce_mesh.F90 +++ b/src/oce_mesh.F90 @@ -2460,16 +2460,16 @@ SUBROUTINE mesh_auxiliary_arrays(partit, mesh) END DO deallocate(center_y, center_x) - !array of 2D boundary conditions is used in ice_maEVP - if (whichEVP > 0) then - allocate(mesh%bc_index_nod2D(myDim_nod2D+eDim_nod2D)) - mesh%bc_index_nod2D=1._WP - do n=1, myDim_edge2D - ed=mesh%edges(:, n) - if (myList_edge2D(n) <= mesh%edge2D_in) cycle - mesh%bc_index_nod2D(ed)=0._WP - end do - end if +! !array of 2D boundary conditions is used in ice_maEVP +! if (whichEVP > 0) then +! allocate(mesh%bc_index_nod2D(myDim_nod2D+eDim_nod2D)) +! mesh%bc_index_nod2D=1._WP +! do n=1, myDim_edge2D +! ed=mesh%edges(:, n) +! if (myList_edge2D(n) <= mesh%edge2D_in) cycle +! mesh%bc_index_nod2D(ed)=0._WP +! end do +! end if #if defined (__oasis) nn=0 From 9a329deafa11f0b4be568f62a5ee52f28c4906e4 Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 26 Nov 2021 14:59:28 +0100 Subject: [PATCH 684/909] exchange paramters ice_update, ice_steps_since_upd with ice derived type --- src/MOD_ICE.F90 | 2 +- src/fesom_module.F90 | 16 ++++++++-------- src/ice_oce_coupling.F90 | 19 +++++++++---------- 3 files changed, 18 insertions(+), 19 deletions(-) diff --git a/src/MOD_ICE.F90 b/src/MOD_ICE.F90 index 38ebcaa2e..79d923d00 100644 --- a/src/MOD_ICE.F90 +++ b/src/MOD_ICE.F90 @@ -154,8 +154,8 @@ MODULE MOD_ICE real(kind=WP) :: ice_dt ! ice step=ice_ave_steps*oce_step real(kind=WP) :: Tevp_inv - integer :: ice_steps_since_upd=0 + integer :: ice_steps_since_upd=0 logical :: ice_update = .true. !___________________________________________________________________________ contains diff --git a/src/fesom_module.F90 b/src/fesom_module.F90 index be7530914..cb00df671 100755 --- a/src/fesom_module.F90 +++ b/src/fesom_module.F90 @@ -176,8 +176,8 @@ subroutine fesom_init(fesom_total_nsteps) if (use_ice) then if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call ice_setup'//achar(27)//'[0m' call ice_setup(f%ice, f%tracers, f%partit, f%mesh) - ice_steps_since_upd = f%ice%ice_ave_steps-1 - ice_update=.true. + f%ice%ice_steps_since_upd = f%ice%ice_ave_steps-1 + f%ice%ice_update=.true. if (f%mype==0) write(*,*) 'EVP scheme option=', f%ice%whichEVP else ! create a dummy ice derived type with only a_ice, m_ice, m_snow and @@ -352,15 +352,15 @@ subroutine fesom_runloop(current_nsteps) call update_atm_forcing(n, f%ice, f%tracers, f%partit, f%mesh) f%t1_frc = MPI_Wtime() !___compute ice step________________________________________________ - if (ice_steps_since_upd>=f%ice%ice_ave_steps-1) then - ice_update=.true. - ice_steps_since_upd = 0 + if (f%ice%ice_steps_since_upd>=f%ice%ice_ave_steps-1) then + f%ice%ice_update=.true. + f%ice%ice_steps_since_upd = 0 else - ice_update=.false. - ice_steps_since_upd=ice_steps_since_upd+1 + f%ice%ice_update=.false. + f%ice%ice_steps_since_upd=f%ice%ice_steps_since_upd+1 endif if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call ice_timestep(n)'//achar(27)//'[0m' - if (ice_update) call ice_timestep(n, f%ice, f%partit, f%mesh) + if (f%ice%ice_update) call ice_timestep(n, f%ice, f%partit, f%mesh) !___compute fluxes to the ocean: heat, freshwater, momentum_________ if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call oce_fluxes_mom...'//achar(27)//'[0m' call oce_fluxes_mom(f%ice, f%dynamics, f%partit, f%mesh) ! momentum only diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index 3e8caecff..60bc2117a 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -182,7 +182,7 @@ subroutine ocean2ice(ice, dynamics, tracers, partit, mesh) elevation => ice%srfoce_ssh(:) ! the arrays in the ice model are renamed - if (ice_update) then + if (ice%ice_update) then do n=1, myDim_nod2d+eDim_nod2d if (ulevels_nod2D(n)>1) cycle T_oc_array(n) = temp(1,n) @@ -192,13 +192,12 @@ subroutine ocean2ice(ice, dynamics, tracers, partit, mesh) else do n=1, myDim_nod2d+eDim_nod2d if (ulevels_nod2D(n)>1) cycle - T_oc_array(n) = (T_oc_array(n)*real(ice_steps_since_upd,WP)+temp(1,n))/real(ice_steps_since_upd+1,WP) - S_oc_array(n) = (S_oc_array(n)*real(ice_steps_since_upd,WP)+salt(1,n))/real(ice_steps_since_upd+1,WP) - elevation(n) = (elevation(n) *real(ice_steps_since_upd,WP)+ hbar(n))/real(ice_steps_since_upd+1,WP) - !NR !PS elevation(n)=(elevation(n)*real(ice_steps_since_upd)+eta_n(n))/real(ice_steps_since_upd+1,WP) - !NR elevation(n)=(elevation(n)*real(ice_steps_since_upd)+hbar(n))/real(ice_steps_since_upd+1,WP) !PS + T_oc_array(n) = (T_oc_array(n)*real(ice%ice_steps_since_upd,WP)+temp(1,n))/real(ice%ice_steps_since_upd+1,WP) + S_oc_array(n) = (S_oc_array(n)*real(ice%ice_steps_since_upd,WP)+salt(1,n))/real(ice%ice_steps_since_upd+1,WP) + elevation(n) = (elevation(n) *real(ice%ice_steps_since_upd,WP)+ hbar(n))/real(ice%ice_steps_since_upd+1,WP) + !NR elevation(n) = (elevation(n) *real(ice%ice_steps_since_upd,WP)+ eta_n(n))/real(ice%ice_steps_since_upd+1,WP) + !NR elevation(n) = (elevation(n) *real(ice%ice_steps_since_upd,WP)+ hbar(n))/real(ice%ice_steps_since_upd+1,WP) end do -!!PS elevation(:)= (elevation(:)*real(ice_steps_since_upd)+hbar(:))/real(ice_steps_since_upd+1,WP) end if u_w = 0.0_WP @@ -222,12 +221,12 @@ subroutine ocean2ice(ice, dynamics, tracers, partit, mesh) uw = uw/vol vw = vw/vol - if (ice_update) then + if (ice%ice_update) then u_w(n)=uw v_w(n)=vw else - u_w(n)=(u_w(n)*real(ice_steps_since_upd,WP)+uw)/real(ice_steps_since_upd+1,WP) - v_w(n)=(v_w(n)*real(ice_steps_since_upd,WP)+vw)/real(ice_steps_since_upd+1,WP) + u_w(n)=(u_w(n)*real(ice%ice_steps_since_upd,WP)+uw)/real(ice%ice_steps_since_upd+1,WP) + v_w(n)=(v_w(n)*real(ice%ice_steps_since_upd,WP)+vw)/real(ice%ice_steps_since_upd+1,WP) endif end do call exchange_nod(u_w, v_w, partit) From 809bc2762b2f7b040f8dae0d157297d4eca762bf Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 26 Nov 2021 15:45:38 +0100 Subject: [PATCH 685/909] kickout modules i_arrays and i_param --- src/cavity_param.F90 | 2 - src/fesom_module.F90 | 2 - src/gen_bulk_formulae.F90 | 2 - src/gen_forcing_couple.F90 | 2 - src/gen_forcing_init.F90 | 1 - src/gen_model_setup.F90 | 1 - src/gen_modules_cvmix_kpp.F90 | 1 - src/gen_modules_cvmix_pp.F90 | 1 - src/gen_modules_diag.F90 | 1 - src/ice_EVP.F90 | 10 -- src/ice_fct.F90 | 14 -- src/ice_maEVP.F90 | 16 --- src/ice_modules.F90 | 234 +++++++++++++++++----------------- src/ice_oce_coupling.F90 | 4 - src/ice_setup_step.F90 | 6 - src/ice_thermo_oce.F90 | 3 - src/io_blowup.F90 | 5 +- src/io_meandata.F90 | 1 - src/io_restart.F90 | 1 - src/oce_ale.F90 | 2 - src/oce_ale_mixing_kpp.F90 | 1 - src/oce_ale_mixing_pp.F90 | 1 - src/oce_ale_pressure_bv.F90 | 1 - src/oce_ale_tracer.F90 | 1 - src/oce_ale_vel_rhs.F90 | 1 - src/oce_mesh.F90 | 1 - src/oce_mo_conv.F90 | 1 - src/oce_shortwave_pene.F90 | 2 - src/write_step_info.F90 | 2 - 29 files changed, 119 insertions(+), 201 deletions(-) diff --git a/src/cavity_param.F90 b/src/cavity_param.F90 index fdd451c37..ac5409844 100644 --- a/src/cavity_param.F90 +++ b/src/cavity_param.F90 @@ -194,7 +194,6 @@ subroutine cavity_heat_water_fluxes_3eq(ice, dynamics, tracers, partit, mesh) use MOD_ICE use o_PARAM , only: density_0, WP use o_ARRAYS, only: heat_flux, water_flux, density_m_rho0, density_ref - use i_ARRAYS implicit none !___________________________________________________________________________ type(t_partit), intent(inout), target :: partit @@ -392,7 +391,6 @@ subroutine cavity_heat_water_fluxes_2eq(ice, tracers, partit, mesh) use MOD_ICE use o_PARAM , only: WP use o_ARRAYS, only: heat_flux, water_flux - use i_ARRAYS implicit none type(t_partit), intent(inout), target :: partit diff --git a/src/fesom_module.F90 b/src/fesom_module.F90 index cb00df671..15fbc588a 100755 --- a/src/fesom_module.F90 +++ b/src/fesom_module.F90 @@ -9,8 +9,6 @@ module fesom_main_storage_module USE MOD_DYN USE o_ARRAYS USE o_PARAM - USE i_PARAM - use i_ARRAYS use g_clock use g_config use g_comm_auto diff --git a/src/gen_bulk_formulae.F90 b/src/gen_bulk_formulae.F90 index ce98a2b34..835d42628 100755 --- a/src/gen_bulk_formulae.F90 +++ b/src/gen_bulk_formulae.F90 @@ -5,7 +5,6 @@ MODULE gen_bulk USE MOD_PARSUP USE MOD_ICE use i_therm_param - use i_arrays use g_forcing_arrays use g_forcing_param, only: ncar_bulk_z_wind, ncar_bulk_z_tair, ncar_bulk_z_shum use o_param, only: WP @@ -339,7 +338,6 @@ subroutine cal_wind_drag_coeff(partit) ! Reviewed by ?? !-------------------------------------------------- - use i_arrays use g_forcing_arrays implicit none diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index dc67f8c2a..a0331322f 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -80,8 +80,6 @@ subroutine update_atm_forcing(istep, ice, tracers, partit, mesh) use MOD_TRACER use MOD_ICE use o_arrays - use i_arrays - use i_param use i_therm_param use g_forcing_param use g_forcing_arrays diff --git a/src/gen_forcing_init.F90 b/src/gen_forcing_init.F90 index 393a32303..3f5652c5b 100755 --- a/src/gen_forcing_init.F90 +++ b/src/gen_forcing_init.F90 @@ -40,7 +40,6 @@ subroutine forcing_array_setup(partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP - use i_arrays use g_forcing_arrays use g_forcing_param use g_config diff --git a/src/gen_model_setup.F90 b/src/gen_model_setup.F90 index bc860b413..88d8c3018 100755 --- a/src/gen_model_setup.F90 +++ b/src/gen_model_setup.F90 @@ -3,7 +3,6 @@ subroutine setup_model(partit) USE MOD_PARTIT USE MOD_PARSUP use o_param - use i_param use i_therm_param use g_forcing_param use g_config diff --git a/src/gen_modules_cvmix_kpp.F90 b/src/gen_modules_cvmix_kpp.F90 index c6f63dd43..044fd27fb 100644 --- a/src/gen_modules_cvmix_kpp.F90 +++ b/src/gen_modules_cvmix_kpp.F90 @@ -30,7 +30,6 @@ module g_cvmix_kpp USE MOD_MESH use o_arrays use g_comm_auto - use i_arrays use g_forcing_arrays use g_support use o_mixing_KPP_mod diff --git a/src/gen_modules_cvmix_pp.F90 b/src/gen_modules_cvmix_pp.F90 index 27ba2ca05..08b3f8b7d 100644 --- a/src/gen_modules_cvmix_pp.F90 +++ b/src/gen_modules_cvmix_pp.F90 @@ -30,7 +30,6 @@ module g_cvmix_pp USE MOD_DYN use o_arrays use g_comm_auto - use i_arrays implicit none !___________________________________________________________________________ diff --git a/src/gen_modules_diag.F90 b/src/gen_modules_diag.F90 index 139d271f3..d9f924a0c 100755 --- a/src/gen_modules_diag.F90 +++ b/src/gen_modules_diag.F90 @@ -10,7 +10,6 @@ module diagnostics use g_comm_auto use o_ARRAYS use g_forcing_arrays - use i_ARRAYS use o_mixing_KPP_mod use g_rotate_grid use g_support diff --git a/src/ice_EVP.F90 b/src/ice_EVP.F90 index 989c2ccc5..4297764f2 100755 --- a/src/ice_EVP.F90 +++ b/src/ice_EVP.F90 @@ -51,8 +51,6 @@ subroutine stress_tensor(ice_strength, ice, partit, mesh) USE MOD_PARSUP USE MOD_MESH use o_param - use i_param - use i_arrays use g_CONFIG #if defined (__icepack) use icedrv_main, only: rdg_conv_elem, rdg_shear_elem, strength @@ -170,8 +168,6 @@ end subroutine stress_tensor ! ! velocity field. They are stored as elemental arrays (sigma11, sigma22 and ! ! sigma12). The ocean velocity is at nodal locations. ! use o_param -! use i_param -! use i_arrays ! USE g_CONFIG ! USE MOD_MESH ! USE MOD_PARTIT @@ -277,9 +273,7 @@ end subroutine stress_tensor ! ! The divergence is computed in a cysly over edges. It is slower that the ! ! approach in stress2rhs_e inherited from FESOM ! USE o_PARAM -! USE i_PARAM ! USE i_therm_param -! USE i_arrays ! use g_config, only: use_cavity ! USE MOD_MESH ! USE MOD_PARTIT @@ -367,9 +361,7 @@ subroutine stress2rhs(inv_areamass, ice_strength, ice, partit, mesh) USE MOD_PARSUP USE MOD_MESH USE o_PARAM - USE i_PARAM USE i_THERM_PARAM - USE i_arrays IMPLICIT NONE type(t_ice), intent(inout), target :: ice type(t_partit), intent(inout), target :: partit @@ -452,8 +444,6 @@ subroutine EVPdynamics(ice, partit, mesh) USE MOD_PARSUP USE MOD_MESH USE o_PARAM - USE i_ARRAYS - USE i_PARAM USE i_therm_param USE o_ARRAYS USE g_CONFIG diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index 995a95c2e..ada6593ff 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -91,8 +91,6 @@ subroutine ice_TG_rhs(ice, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_ICE - use i_Arrays - use i_PARAM use o_PARAM USE g_CONFIG implicit none @@ -184,7 +182,6 @@ end subroutine ice_TG_rhs ! USE MOD_PARSUP ! use MOD_MESH ! use o_PARAM -! use i_ARRAYS ! use ice_fct_interfaces ! implicit none ! integer :: n_size @@ -276,8 +273,6 @@ subroutine ice_solve_low_order(ice, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_MESH - use i_ARRAYS - use i_PARAM use g_comm_auto implicit none type(t_ice), intent(inout), target :: ice @@ -361,7 +356,6 @@ subroutine ice_solve_high_order(ice, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_MESH - use i_ARRAYS use o_PARAM use g_comm_auto implicit none @@ -479,8 +473,6 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_MESH - use i_arrays - use i_param use o_PARAM use g_comm_auto implicit none @@ -820,8 +812,6 @@ SUBROUTINE ice_mass_matrix_fill(ice, partit, mesh) USE MOD_PARSUP use MOD_TRACER use MOD_ICE - use i_PARAM - use i_ARRAYS ! implicit none integer :: n, n1, n2, row @@ -907,8 +897,6 @@ subroutine ice_TG_rhs_div(ice, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_ICE - use i_Arrays - use i_PARAM use o_PARAM USE g_CONFIG implicit none @@ -1031,8 +1019,6 @@ subroutine ice_update_for_div(ice, partit, mesh) USE MOD_PARSUP use MOD_TRACER use MOD_ICE - use i_Arrays - use i_PARAM use o_PARAM USE g_CONFIG use g_comm_auto diff --git a/src/ice_maEVP.F90 b/src/ice_maEVP.F90 index f3cb23b43..7a3737c73 100644 --- a/src/ice_maEVP.F90 +++ b/src/ice_maEVP.F90 @@ -89,10 +89,8 @@ subroutine stress_tensor_m(ice, partit, mesh) USE MOD_PARSUP USE MOD_MESH use o_param - use i_param use mod_mesh use g_config - use i_arrays #if defined (__icepack) use icedrv_main, only: rdg_conv_elem, rdg_shear_elem, strength #endif @@ -202,10 +200,8 @@ subroutine ssh2rhs(ice, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP use o_param - use i_param use mod_mesh use g_config - use i_arrays use i_therm_param implicit none type(t_ice) , intent(inout), target :: ice @@ -295,11 +291,9 @@ subroutine stress2rhs_m(ice, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP use o_param - use i_param use i_therm_param use mod_mesh use g_config - use i_arrays implicit none type(t_ice) , intent(inout), target :: ice type(t_partit), intent(inout), target :: partit @@ -384,10 +378,8 @@ subroutine EVPdynamics_m(ice, partit, mesh) USE MOD_PARSUP USE MOD_MESH use o_param - use i_param use i_therm_param use g_config - use i_arrays use o_arrays use g_comm_auto #if defined (__icepack) @@ -744,10 +736,8 @@ subroutine find_alpha_field_a(ice, partit, mesh) USE MOD_PARSUP USE MOD_MESH use o_param - use i_param use i_therm_param use g_config - use i_arrays #if defined (__icepack) use icedrv_main, only: strength #endif @@ -841,10 +831,8 @@ subroutine stress_tensor_a(ice, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP use o_param - use i_param use mod_mesh use g_config - use i_arrays #if defined (__icepack) use icedrv_main, only: rdg_conv_elem, rdg_shear_elem, strength #endif @@ -962,9 +950,7 @@ subroutine EVPdynamics_a(ice, partit, mesh) USE MOD_PARSUP USE MOD_MESH use o_param - use i_arrays USE o_arrays - use i_param use o_PARAM use i_therm_param use g_config, only: use_cavity @@ -1096,8 +1082,6 @@ subroutine find_beta_field_a(ice, partit, mesh) USE MOD_MESH USE MOD_ICE use o_param - USE i_param - use i_arrays Implicit none type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index 89ab5ad2c..5ad4b8510 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -1,120 +1,120 @@ -! ===================== -! Sea ice -! Finite-volume implementation -! Modules for coupled version -! Only EVP solver is available in this distrib. memory setup -! ====================== -! Ice velocity is defined at nodes -!=========================================================================== -! -MODULE i_PARAM - ! - ! Ice specific parameters - ! - USE o_PARAM - IMPLICIT NONE - SAVE - ! ice model parameters: - ! RHEOLOGY -! REAL(kind=WP) :: Pstar = 30000.0_WP ![N/m^2] -! REAL(kind=WP) :: ellipse =2.0_WP ! -! REAL(kind=WP) :: c_pressure =20.0_WP ! -! REAL(kind=WP) :: delta_min=1.0e-11 ! [s^(-1)] -! REAL(kind=WP) :: Clim_evp=615 ! kg/m^2 - - -! REAL(kind=WP) :: zeta_min=4.0e+8 ! kg/s -! INTEGER :: evp_rheol_steps=120 ! EVP rheology - ! cybcycling steps -! REAL(kind=WP) :: ice_gamma_fct=0.25_WP ! smoothing parameter - ! in ice fct advection -! REAL(kind=WP) :: ice_diff=10.0_WP ! diffusion to stabilize - ! ice advection -! REAL(kind=WP) :: Tevp_inv -! real(kind=WP) :: theta_io=0.0_WP ! rotation angle - ! (ice-ocean), available - ! in EVP -! real(kind=WP) :: alpha_evp=250, beta_evp=250 - - -! real(kind=WP) :: c_aevp=0.15 ! 0.1--0.2, but should be adjusted experimentally - ! Ice forcing averaging -! integer :: ice_ave_steps=1 !ice step=ice_ave_steps*oce_step -! real(kind=WP) :: cd_oce_ice = 5.5e-3 ! drag coef. oce - ice - -! logical :: ice_free_slip=.false. -! integer :: whichEVP=0 !0=standart; 1=mEVP; 2=aEVP -! real(kind=WP) :: ice_dt !ice step=ice_ave_steps*oce_step - -! NAMELIST /ice_dyn/ whichEVP, Pstar, ellipse, c_pressure, delta_min, evp_rheol_steps, Cd_oce_ice, & -! ice_gamma_fct, ice_diff, theta_io, ice_ave_steps, alpha_evp, beta_evp, c_aevp - -! NAMELIST /ice_dyn/ whichEVP, Cd_oce_ice, & -! ice_ave_steps - - -END MODULE i_PARAM -! -!============================================================================= -! -MODULE i_ARRAYS -! -! Arrays used to store ice variables and organize coupling -! -USE o_PARAM -implicit none -save - logical :: ice_update = .true. ! - integer :: ice_steps_since_upd = 0 ! - real(kind=WP),allocatable,dimension(:,:) :: ice_grad_vel -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_ice, V_ice -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: m_ice, a_ice, m_snow -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_ice_old, V_ice_old, m_ice_old, a_ice_old, m_snow_old,thdgr_old !PS -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: thdgr_old -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_rhs_ice, V_rhs_ice -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_m, rhs_a, rhs_ms, ths_temp -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: ths_temp -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_w, V_w -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: u_ice_aux, v_ice_aux ! of the size of u_ice, v_ice -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_mdiv, rhs_adiv, rhs_msdiv -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: elevation -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: sigma11, sigma12, sigma22 -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: eps11, eps12, eps22 -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: fresh_wa_flux -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: net_heat_flux -#if defined (__oasis) || defined (__ifsinterface) -! real(kind=WP),target, allocatable, dimension(:) :: ice_alb, ice_temp ! new fields for OIFS coupling -! real(kind=WP),target, allocatable, dimension(:) :: ice_alb ! new fields for OIFS coupling -! real(kind=WP),target, allocatable, dimension(:) :: oce_heat_flux, ice_heat_flux -! real(kind=WP),target, allocatable, dimension(:) :: tmp_oce_heat_flux, tmp_ice_heat_flux - !temporary flux fields - !(for flux correction) -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_temp, m_templ, dm_temp, rhs_tempdiv -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: m_templ, dm_temp, rhs_tempdiv -#if defined (__oifs) || defined (__ifsinterface) -! real(kind=WP),target, allocatable, dimension(:) :: enthalpyoffuse -#endif -#endif /* (__oasis) || defined (__ifsinterface)*/ - -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: S_oc_array, T_oc_array -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_iceoce_x -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_iceoce_y -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_atmice_x -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_atmice_y -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: t_skin - ! FCT implementation -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: m_icel, a_icel, m_snowl -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: dm_ice, da_ice, dm_snow -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:,:) :: icefluxes -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: icepplus, icepminus -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: mass_matrix -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: alpha_evp_array(:) ! of myDim_elem2D -! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: beta_evp_array(:) ! of myDim_node2D+eDim_node2D - -! Mean arrays - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_ice_mean, V_ice_mean - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: m_ice_mean, a_ice_mean, m_snow_mean - END MODULE i_ARRAYS +! ! ! ===================== +! ! ! Sea ice +! ! ! Finite-volume implementation +! ! ! Modules for coupled version +! ! ! Only EVP solver is available in this distrib. memory setup +! ! ! ====================== +! ! ! Ice velocity is defined at nodes +! ! !=========================================================================== +! ! ! +! ! MODULE i_PARAM +! ! ! +! ! ! Ice specific parameters +! ! ! +! ! USE o_PARAM +! ! IMPLICIT NONE +! ! SAVE +! ! ! ice model parameters: +! ! ! RHEOLOGY +! ! ! REAL(kind=WP) :: Pstar = 30000.0_WP ![N/m^2] +! ! ! REAL(kind=WP) :: ellipse =2.0_WP ! +! ! ! REAL(kind=WP) :: c_pressure =20.0_WP ! +! ! ! REAL(kind=WP) :: delta_min=1.0e-11 ! [s^(-1)] +! ! ! REAL(kind=WP) :: Clim_evp=615 ! kg/m^2 +! ! +! ! +! ! ! REAL(kind=WP) :: zeta_min=4.0e+8 ! kg/s +! ! ! INTEGER :: evp_rheol_steps=120 ! EVP rheology +! ! ! cybcycling steps +! ! ! REAL(kind=WP) :: ice_gamma_fct=0.25_WP ! smoothing parameter +! ! ! in ice fct advection +! ! ! REAL(kind=WP) :: ice_diff=10.0_WP ! diffusion to stabilize +! ! ! ice advection +! ! ! REAL(kind=WP) :: Tevp_inv +! ! ! real(kind=WP) :: theta_io=0.0_WP ! rotation angle +! ! ! (ice-ocean), available +! ! ! in EVP +! ! ! real(kind=WP) :: alpha_evp=250, beta_evp=250 +! ! +! ! +! ! ! real(kind=WP) :: c_aevp=0.15 ! 0.1--0.2, but should be adjusted experimentally +! ! ! Ice forcing averaging +! ! ! integer :: ice_ave_steps=1 !ice step=ice_ave_steps*oce_step +! ! ! real(kind=WP) :: cd_oce_ice = 5.5e-3 ! drag coef. oce - ice +! ! +! ! ! logical :: ice_free_slip=.false. +! ! ! integer :: whichEVP=0 !0=standart; 1=mEVP; 2=aEVP +! ! ! real(kind=WP) :: ice_dt !ice step=ice_ave_steps*oce_step +! ! +! ! ! NAMELIST /ice_dyn/ whichEVP, Pstar, ellipse, c_pressure, delta_min, evp_rheol_steps, Cd_oce_ice, & +! ! ! ice_gamma_fct, ice_diff, theta_io, ice_ave_steps, alpha_evp, beta_evp, c_aevp +! ! +! ! ! NAMELIST /ice_dyn/ whichEVP, Cd_oce_ice, & +! ! ! ice_ave_steps +! ! +! ! +! ! END MODULE i_PARAM +! ! ! +! ! !============================================================================= +! ! ! +! ! MODULE i_ARRAYS +! ! ! +! ! ! Arrays used to store ice variables and organize coupling +! ! ! +! ! USE o_PARAM +! ! implicit none +! ! save +! ! ! logical :: ice_update = .true. ! +! ! ! integer :: ice_steps_since_upd = 0 ! +! ! ! real(kind=WP),allocatable,dimension(:,:) :: ice_grad_vel +! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_ice, V_ice +! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: m_ice, a_ice, m_snow +! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_ice_old, V_ice_old, m_ice_old, a_ice_old, m_snow_old,thdgr_old !PS +! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: thdgr_old +! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_rhs_ice, V_rhs_ice +! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_m, rhs_a, rhs_ms, ths_temp +! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: ths_temp +! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_w, V_w +! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: u_ice_aux, v_ice_aux ! of the size of u_ice, v_ice +! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_mdiv, rhs_adiv, rhs_msdiv +! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: elevation +! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: sigma11, sigma12, sigma22 +! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: eps11, eps12, eps22 +! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: fresh_wa_flux +! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: net_heat_flux +! ! #if defined (__oasis) || defined (__ifsinterface) +! ! ! real(kind=WP),target, allocatable, dimension(:) :: ice_alb, ice_temp ! new fields for OIFS coupling +! ! ! real(kind=WP),target, allocatable, dimension(:) :: ice_alb ! new fields for OIFS coupling +! ! ! real(kind=WP),target, allocatable, dimension(:) :: oce_heat_flux, ice_heat_flux +! ! ! real(kind=WP),target, allocatable, dimension(:) :: tmp_oce_heat_flux, tmp_ice_heat_flux +! ! !temporary flux fields +! ! !(for flux correction) +! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_temp, m_templ, dm_temp, rhs_tempdiv +! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: m_templ, dm_temp, rhs_tempdiv +! ! #if defined (__oifs) || defined (__ifsinterface) +! ! ! real(kind=WP),target, allocatable, dimension(:) :: enthalpyoffuse +! ! #endif +! ! #endif /* (__oasis) || defined (__ifsinterface)*/ +! ! +! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: S_oc_array, T_oc_array +! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_iceoce_x +! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_iceoce_y +! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_atmice_x +! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_atmice_y +! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: t_skin +! ! ! FCT implementation +! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: m_icel, a_icel, m_snowl +! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: dm_ice, da_ice, dm_snow +! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:,:) :: icefluxes +! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: icepplus, icepminus +! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: mass_matrix +! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: alpha_evp_array(:) ! of myDim_elem2D +! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: beta_evp_array(:) ! of myDim_node2D+eDim_node2D +! ! +! ! ! Mean arrays +! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_ice_mean, V_ice_mean +! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: m_ice_mean, a_ice_mean, m_snow_mean +! ! END MODULE i_ARRAYS !===================================================================== module i_therm_param USE o_PARAM diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index 60bc2117a..34ac3ae52 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -59,8 +59,6 @@ subroutine oce_fluxes_mom(ice, dynamics, partit, mesh) USE MOD_MESH use o_PARAM use o_ARRAYS - use i_ARRAYS - use i_PARAM USE g_CONFIG use g_comm_auto #if defined (__icepack) @@ -148,7 +146,6 @@ subroutine ocean2ice(ice, dynamics, tracers, partit, mesh) ! transmits the relevant fields from the ocean to the ice model use o_PARAM - use i_ARRAYS use MOD_MESH use MOD_DYN use MOD_ICE @@ -243,7 +240,6 @@ subroutine oce_fluxes(ice, dynamics, tracers, partit, mesh) use MOD_MESH USE g_CONFIG use o_ARRAYS - use i_ARRAYS use g_comm_auto use g_forcing_param, only: use_virt_salt use g_forcing_arrays diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index ab5c5eba4..2f502873b 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -65,8 +65,6 @@ subroutine ice_timestep(istep, ice, partit, mesh) ! ice initialization + array allocation + time stepping subroutine ice_setup(ice, tracers, partit, mesh) use o_param - use i_param - use i_arrays use g_CONFIG use mod_mesh USE MOD_PARTIT @@ -119,11 +117,9 @@ end subroutine ice_setup ! ! back compatibility with FESOM input routines ! ! use o_param -! use i_param ! use MOD_MESH ! USE MOD_PARTIT ! USE MOD_PARSUP -! use i_arrays ! USE g_CONFIG ! ! implicit none @@ -241,7 +237,6 @@ subroutine ice_timestep(step, ice, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_ICE - use i_arrays use o_param use g_CONFIG use ice_EVPdynamics_interface @@ -384,7 +379,6 @@ subroutine ice_initial_state(ice, tracers, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_MESH - use i_ARRAYs use o_PARAM use o_arrays use g_CONFIG diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index d37feac43..f3ad6aa7c 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -26,7 +26,6 @@ subroutine cut_off(ice, partit, mesh) !=================================================================== subroutine cut_off(ice, partit, mesh) use o_param - use i_arrays use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP @@ -115,8 +114,6 @@ subroutine thermodynamics(ice, partit, mesh) USE MOD_MESH use o_param use i_therm_param - use i_param - use i_arrays use g_config use g_forcing_param use g_forcing_arrays diff --git a/src/io_blowup.F90 b/src/io_blowup.F90 index ef5c6d5cf..ef96a6465 100644 --- a/src/io_blowup.F90 +++ b/src/io_blowup.F90 @@ -8,9 +8,8 @@ MODULE io_BLOWUP USE MOD_TRACER USE MOD_DYN USE MOD_ICE - use o_arrays - use i_arrays - implicit none + use o_arrays + implicit none #include "netcdf.inc" !___________________________________________________________________________ type nc_dims diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index a7d454795..3594ff57d 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -592,7 +592,6 @@ subroutine create_new_file(entry, ice, dynamics, partit, mesh) USE MOD_ICE use fesom_version_info_module use g_config - use i_PARAM use o_PARAM implicit none diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 98e1da355..cf3b335e6 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -9,7 +9,6 @@ MODULE io_RESTART use MOD_DYN use MOD_ICE use o_arrays - use i_arrays use g_cvmix_tke use g_cvmix_idemix implicit none diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 1e1e082a7..9b56dbb18 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -1935,7 +1935,6 @@ subroutine vert_vel_ale(dynamics, partit, mesh) USE MOD_DYN use g_comm_auto use io_RESTART !!PS - use i_arrays !!PS use g_forcing_arrays !!PS implicit none type(t_dyn) , intent(inout), target :: dynamics @@ -2827,7 +2826,6 @@ subroutine oce_timestep_ale(n, ice, dynamics, tracers, partit, mesh) USE MOD_PARSUP use g_comm_auto use io_RESTART !PS - use i_ARRAYS !PS use o_mixing_KPP_mod use g_cvmix_tke use g_cvmix_idemix diff --git a/src/oce_ale_mixing_kpp.F90 b/src/oce_ale_mixing_kpp.F90 index 1bc4c676e..3f8a7aad4 100755 --- a/src/oce_ale_mixing_kpp.F90 +++ b/src/oce_ale_mixing_kpp.F90 @@ -14,7 +14,6 @@ MODULE o_mixing_KPP_mod USE MOD_DYN USE o_ARRAYS USE g_config - USE i_arrays USE g_forcing_arrays USE g_comm_auto USE g_support diff --git a/src/oce_ale_mixing_pp.F90 b/src/oce_ale_mixing_pp.F90 index 36cf7d519..e816dcbad 100644 --- a/src/oce_ale_mixing_pp.F90 +++ b/src/oce_ale_mixing_pp.F90 @@ -22,7 +22,6 @@ subroutine oce_mixing_pp(dynamics, partit, mesh) USE o_PARAM USE o_ARRAYS USE g_config -use i_arrays IMPLICIT NONE type(t_mesh), intent(in), target :: mesh diff --git a/src/oce_ale_pressure_bv.F90 b/src/oce_ale_pressure_bv.F90 index d4d482171..12c3c954d 100644 --- a/src/oce_ale_pressure_bv.F90 +++ b/src/oce_ale_pressure_bv.F90 @@ -208,7 +208,6 @@ subroutine pressure_bv(tracers, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE o_ARRAYS - use i_arrays USE o_mixing_KPP_mod, only: dbsfc USE diagnostics, only: ldiag_dMOC use densityJM_components_interface diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 8b77301d5..fdba81109 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -431,7 +431,6 @@ subroutine diff_ver_part_impl_ale(tr_num, dynamics, tracers, partit, mesh) use MOD_DYN use o_PARAM use o_ARRAYS, only: Ki, Kv, heat_flux, water_flux, slope_tapered - use i_ARRAYS USE MOD_PARTIT USE MOD_PARSUP use g_CONFIG diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index a6df4c9a4..f3f954c46 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -39,7 +39,6 @@ subroutine compute_vel_rhs(ice, dynamics, partit, mesh) USE MOD_PARSUP USE MOD_MESH use o_ARRAYS, only: coriolis, ssh_gp, pgf_x, pgf_y - use i_ARRAYS use i_therm_param use o_PARAM use g_CONFIG diff --git a/src/oce_mesh.F90 b/src/oce_mesh.F90 index a80f03850..0bfebf092 100755 --- a/src/oce_mesh.F90 +++ b/src/oce_mesh.F90 @@ -2168,7 +2168,6 @@ SUBROUTINE mesh_auxiliary_arrays(partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE o_PARAM -USE i_PARAM USE o_ARRAYS USE g_ROTATE_grid use g_comm_auto diff --git a/src/oce_mo_conv.F90 b/src/oce_mo_conv.F90 index c3cc7a844..7045794d2 100644 --- a/src/oce_mo_conv.F90 +++ b/src/oce_mo_conv.F90 @@ -9,7 +9,6 @@ subroutine mo_convect(ice, partit, mesh) USE MOD_ICE USE o_ARRAYS USE g_config - use i_arrays use g_comm_auto IMPLICIT NONE type(t_ice), intent(in), target :: ice diff --git a/src/oce_shortwave_pene.F90 b/src/oce_shortwave_pene.F90 index 19961ddf5..f4ba8e51a 100644 --- a/src/oce_shortwave_pene.F90 +++ b/src/oce_shortwave_pene.F90 @@ -13,8 +13,6 @@ subroutine cal_shortwave_rad(ice, partit, mesh) USE g_CONFIG use g_forcing_arrays use g_comm_auto - use i_param - use i_arrays use i_therm_param IMPLICIT NONE type(t_ice) , intent(inout), target :: ice diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index dc1b6ae8a..d7007d0ac 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -49,7 +49,6 @@ subroutine write_step_info(istep, outfreq, ice, dynamics, tracers, partit, mesh) use o_PARAM use o_ARRAYS, only: water_flux, heat_flux, & pgf_x, pgf_y, Av, Kv - use i_ARRAYS use g_comm_auto use g_support implicit none @@ -259,7 +258,6 @@ subroutine check_blowup(istep, ice, dynamics, tracers, partit, mesh) use o_PARAM use o_ARRAYS, only: water_flux, stress_surf, & heat_flux, Kv, Av - use i_ARRAYS use g_comm_auto use io_BLOWUP use g_forcing_arrays From 2d238cde876c30498fdbb3ee31e8751fe35b4b92 Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 26 Nov 2021 15:54:15 +0100 Subject: [PATCH 686/909] kickout modules i_arrays and i_param in ice_thermo_cpl.F90 --- src/ice_thermo_cpl.F90 | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index 214cf7c43..e77057d35 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -21,8 +21,6 @@ subroutine thermodynamics(ice, partit, mesh) USE MOD_PARSUP USE MOD_MESH use i_therm_param - use i_param - use i_arrays use g_config use g_forcing_param use g_forcing_arrays From 2665c4ec3fd77c8bbc210843afd92fdd36e2d708 Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 26 Nov 2021 16:00:12 +0100 Subject: [PATCH 687/909] clean up ice_setup_step.F90 --- src/ice_setup_step.F90 | 204 +++++++---------------------------------- 1 file changed, 33 insertions(+), 171 deletions(-) diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index 2f502873b..e2c557f38 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -1,29 +1,15 @@ - -module ice_array_setup_interface - interface - subroutine ice_array_setup(partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use mod_tracer - type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - end subroutine - end interface -end module - module ice_initial_state_interface interface subroutine ice_initial_state(ice, tracers, partit, mesh) - use mod_mesh + USE MOD_ICE + USE MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP - use mod_tracer - USE MOD_ICE - type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers + USE MOD_MESH type(t_ice) , intent(inout), target :: ice + type(t_tracer), intent(in) , target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh end subroutine end interface end module @@ -31,15 +17,15 @@ subroutine ice_initial_state(ice, tracers, partit, mesh) module ice_setup_interface interface subroutine ice_setup(ice, tracers, partit, mesh) - use mod_mesh + USE MOD_ICE + USE MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP - use mod_tracer - USE MOD_ICE + USE MOD_MESH + type(t_ice) , intent(inout), target :: ice + type(t_tracer), intent(in) , target :: tracers type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - type(t_tracer), intent(in), target :: tracers - type(t_ice), intent(inout), target :: ice + type(t_mesh) , intent(in) , target :: mesh end subroutine end interface end module @@ -47,15 +33,14 @@ subroutine ice_setup(ice, tracers, partit, mesh) module ice_timestep_interface interface subroutine ice_timestep(istep, ice, partit, mesh) - use mod_mesh + USE MOD_ICE USE MOD_PARTIT USE MOD_PARSUP - use mod_tracer - USE MOD_ICE - integer, intent(in) :: istep + USE MOD_MESH + integer intent(in) :: istep + type(t_ice) , intent(inout), target :: ice type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - type(t_ice), intent(inout), target :: ice + type(t_mesh) , intent(in) , target :: mesh end subroutine end interface end module @@ -64,21 +49,21 @@ subroutine ice_timestep(istep, ice, partit, mesh) !_______________________________________________________________________________ ! ice initialization + array allocation + time stepping subroutine ice_setup(ice, tracers, partit, mesh) - use o_param - use g_CONFIG - use mod_mesh + USE MOD_ICE + USE MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP - use mod_tracer - use MOD_ICE + USE MOD_MESH + use o_param + use g_CONFIG use ice_array_setup_interface use ice_initial_state_interface use ice_fct_interfaces implicit none - type(t_ice), intent(inout), target :: ice - type(t_mesh), intent(in), target :: mesh + type(t_ice) , intent(inout), target :: ice + type(t_tracer), intent(in) , target :: tracers + type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in), target :: tracers !___________________________________________________________________________ ! initialise ice derived type @@ -90,13 +75,12 @@ subroutine ice_setup(ice, tracers, partit, mesh) ice%ice_dt = real(ice%ice_ave_steps,WP)*dt ! ice_dt=dt ice%Tevp_inv = 3.0_WP/ice%ice_dt - ice%Clim_evp = ice%Clim_evp*(ice%evp_rheol_steps/ice%ice_dt)**2/ice%Tevp_inv ! This is combination - ! it always ent - + ! This is combination it always enters + ice%Clim_evp = ice%Clim_evp*(ice%evp_rheol_steps/ice%ice_dt)**2/ice%Tevp_inv + !___________________________________________________________________________ if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call ice_fct_init'//achar(27)//'[0m' call ice_mass_matrix_fill(ice, partit, mesh) -! call ice_fct_init(ice, partit, mesh) !___________________________________________________________________________ ! Initialization routine, user input is required @@ -107,136 +91,14 @@ subroutine ice_setup(ice, tracers, partit, mesh) if(partit%mype==0) write(*,*) 'Ice is initialized' end subroutine ice_setup ! -! ! -! !_______________________________________________________________________________ -! subroutine ice_array_setup(partit, mesh) -! ! -! ! inializing sea ice model -! ! -! ! Variables that serve for exchange with atmosphere are nodal, to keep -! ! back compatibility with FESOM input routines -! -! use o_param -! use MOD_MESH -! USE MOD_PARTIT -! USE MOD_PARSUP -! USE g_CONFIG -! -! implicit none -! type(t_partit), intent(inout), target :: partit -! type(t_mesh), intent(in), target :: mesh -! integer :: n_size, e_size, mn, k, n, n1, n2 -! -! #include "associate_part_def.h" -! #include "associate_mesh_def.h" -! #include "associate_part_ass.h" -! #include "associate_mesh_ass.h" -! -! n_size=myDim_nod2D+eDim_nod2D -! e_size=myDim_elem2D+eDim_elem2D -! -! ! Allocate memory for variables of ice model -! ! allocate(u_ice(n_size), v_ice(n_size)) -! ! allocate(U_rhs_ice(n_size), V_rhs_ice(n_size)) -! ! allocate(sigma11(e_size), sigma12(e_size), sigma22(e_size)) -! ! allocate(eps11(e_size), eps12(e_size), eps22(e_size)) -! ! allocate(m_ice(n_size), a_ice(n_size), m_snow(n_size)) -! ! allocate(rhs_m(n_size), rhs_a(n_size), rhs_ms(n_size)) -! ! allocate(t_skin(n_size)) -! ! allocate(U_ice_old(n_size), V_ice_old(n_size)) !PS -! ! allocate(m_ice_old(n_size), a_ice_old(n_size), m_snow_old(n_size), thdgr_old(n_size)) !PS -! ! allocate(thdgr_old(n_size)) !PS -! ! if (whichEVP > 0) then -! ! ! allocate(u_ice_aux(n_size), v_ice_aux(n_size)) -! ! allocate(alpha_evp_array(myDim_elem2D)) -! ! allocate(beta_evp_array(n_size)) -! ! -! ! alpha_evp_array=alpha_evp -! ! beta_evp_array =alpha_evp ! alpha=beta works most reliable -! ! ! u_ice_aux=0.0_WP -! ! ! v_ice_aux=0.0_WP -! ! end if -! -! ! allocate(rhs_mdiv(n_size), rhs_adiv(n_size), rhs_msdiv(n_size)) -! -! ! m_ice_old=0.0_WP !PS -! ! a_ice_old=0.0_WP !PS -! ! m_snow_old=0.0_WP !PS -! ! thdgr_old=0.0_WP !PS -! ! U_ice_old=0.0_WP !PS -! ! V_ice_old=0.0_WP !PS -! -! ! rhs_m=0.0_WP -! ! rhs_ms=0.0_WP -! ! rhs_a=0.0_WP -! ! m_ice=0.0_WP -! ! a_ice=0.0_WP -! ! m_snow=0.0_WP -! ! U_rhs_ice=0.0_WP -! ! V_rhs_ice=0.0_WP -! ! U_ice=0.0_WP -! ! V_ice=0.0_WP -! ! sigma11=0.0_WP -! ! sigma22=0.0_WP -! ! sigma12=0.0_WP -! ! eps11=0.0_WP -! ! eps12=0.0_WP -! ! eps22=0.0_WP -! ! t_skin=0.0_WP -! ! rhs_mdiv=0.0_WP -! ! rhs_adiv=0.0_WP -! ! rhs_msdiv=0.0_WP -! -! -! ! Allocate memory for arrays used in coupling -! ! with ocean and atmosphere -! ! allocate(S_oc_array(n_size), T_oc_array(n_size)) ! copies of ocean T ans S -! ! S_oc_array = 0.0_WP -! ! T_oc_array = 0.0_WP -! ! allocate(fresh_wa_flux(n_size), net_heat_flux(n_size)) -! ! allocate(fresh_wa_flux(n_size)) -! ! allocate(net_heat_flux(n_size)) -! ! fresh_wa_flux = 0.0_WP -! ! net_heat_flux = 0.0_WP -! ! allocate(stress_atmice_x(n_size), stress_atmice_y(n_size)) -! ! stress_atmice_x = 0.0_WP -! ! stress_atmice_y = 0.0_WP -! ! ! allocate(elevation(n_size)) ! =ssh of ocean -! ! ! elevation = 0.0_WP -! ! allocate(stress_iceoce_x(n_size), stress_iceoce_y(n_size)) -! ! stress_iceoce_x = 0.0_WP -! ! stress_iceoce_y = 0.0_WP -! ! allocate(U_w(n_size), V_w(n_size)) ! =uf and vf of ocean at surface nodes -! #if defined (__oasis) || defined (__ifsinterface) -! ! allocate(oce_heat_flux(n_size), ice_heat_flux(n_size)) -! ! allocate(tmp_oce_heat_flux(n_size), tmp_ice_heat_flux(n_size)) -! #if defined (__oifs) || defined (__ifsinterface) -! ! allocate(ice_alb(n_size), ice_temp(n_size), enthalpyoffuse(n_size)) -! ! allocate(ice_alb(n_size), enthalpyoffuse(n_size)) -! ! allocate(rhs_tempdiv(n_size), rhs_temp(n_size)) -! ! allocate(rhs_tempdiv(n_size)) -! ! ice_alb=0.6_WP -! ! ice_temp=265.15_WP -! ! rhs_tempdiv=0._WP -! ! rhs_temp=0._WP -! ! enthalpyoffuse=0._WP -! #endif /* (__oifs) || defined (__ifsinterface) */ -! ! oce_heat_flux=0._WP -! ! ice_heat_flux=0._WP -! ! tmp_oce_heat_flux=0._WP -! ! tmp_ice_heat_flux=0._WP -! #endif /* (__oasis) || defined (__ifsinterface) */ -! end subroutine ice_array_setup -! ! -! ! !_______________________________________________________________________________ ! Sea ice model step subroutine ice_timestep(step, ice, partit, mesh) - use mod_mesh + USE MOD_ICE USE MOD_PARTIT USE MOD_PARSUP - USE MOD_ICE + USE MOD_MESH use o_param use g_CONFIG use ice_EVPdynamics_interface @@ -383,10 +245,10 @@ subroutine ice_initial_state(ice, tracers, partit, mesh) use o_arrays use g_CONFIG implicit none - type(t_ice), intent(inout), target :: ice - type(t_tracer), intent(in), target :: tracers + type(t_ice) , intent(inout), target :: ice + type(t_tracer), intent(in) , target :: tracers type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh !___________________________________________________________________________ integer :: i character(MAX_PATH) :: filename From 50662979f95b611dcd83a1d17cd67ff7c086f9bf Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 26 Nov 2021 16:42:36 +0100 Subject: [PATCH 688/909] clean up src/ice_EVP.F90 --- src/ice_EVP.F90 | 253 +++++++----------------------------------------- 1 file changed, 33 insertions(+), 220 deletions(-) diff --git a/src/ice_EVP.F90 b/src/ice_EVP.F90 index 4297764f2..6b2e65938 100755 --- a/src/ice_EVP.F90 +++ b/src/ice_EVP.F90 @@ -5,10 +5,10 @@ subroutine stress_tensor(ice_strength, ice, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_MESH - type(t_ice), intent(inout), target :: ice + type(t_ice) , intent(inout), target :: ice type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - real(kind=WP), intent(in) :: ice_strength(partit%mydim_elem2D) + type(t_mesh) , intent(in) , target :: mesh + real(kind=WP) , intent(in) :: ice_strength(partit%mydim_elem2D) end subroutine subroutine stress2rhs(inv_areamass, ice_strength, ice, partit, mesh) @@ -16,10 +16,10 @@ subroutine stress2rhs(inv_areamass, ice_strength, ice, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_MESH - type(t_ice), intent(inout), target :: ice + type(t_ice) , intent(inout), target :: ice type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - real(kind=WP), intent(in) :: inv_areamass(partit%myDim_nod2D), ice_strength(partit%mydim_elem2D) + type(t_mesh) , intent(in) , target :: mesh + real(kind=WP) , intent(in) :: inv_areamass(partit%myDim_nod2D), ice_strength(partit%mydim_elem2D) end subroutine end interface end module @@ -31,9 +31,9 @@ subroutine EVPdynamics(ice, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_MESH - type(t_ice), intent(inout), target :: ice + type(t_ice) , intent(inout), target :: ice type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh end subroutine end interface end module @@ -57,8 +57,8 @@ subroutine stress_tensor(ice_strength, ice, partit, mesh) #endif implicit none type(t_partit), intent(inout), target :: partit - type(t_ice), intent(inout), target :: ice - type(t_mesh), intent(in), target :: mesh + type(t_ice) , intent(inout), target :: ice + type(t_mesh) , intent(in) , target :: mesh !___________________________________________________________________________ real(kind=WP), intent(in) :: ice_strength(partit%mydim_elem2D) real(kind=WP) :: eta, xi, delta, aa @@ -75,14 +75,14 @@ subroutine stress_tensor(ice_strength, ice, partit, mesh) #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uice(:) - v_ice => ice%vice(:) - eps11 => ice%work%eps11(:) - eps12 => ice%work%eps12(:) - eps22 => ice%work%eps22(:) - sigma11 => ice%work%sigma11(:) - sigma12 => ice%work%sigma12(:) - sigma22 => ice%work%sigma22(:) + u_ice => ice%uice(:) + v_ice => ice%vice(:) + eps11 => ice%work%eps11(:) + eps12 => ice%work%eps12(:) + eps22 => ice%work%eps22(:) + sigma11 => ice%work%sigma11(:) + sigma12 => ice%work%sigma12(:) + sigma22 => ice%work%sigma22(:) !___________________________________________________________________________ vale = 1.0_WP/(ice%ellipse**2) @@ -162,193 +162,6 @@ subroutine stress_tensor(ice_strength, ice, partit, mesh) endif end do end subroutine stress_tensor -! !=================================================================== -! subroutine stress_tensor_no1(ice_strength, partit, mesh) -! ! EVP rheology. The routine computes stress tensor components based on ice -! ! velocity field. They are stored as elemental arrays (sigma11, sigma22 and -! ! sigma12). The ocean velocity is at nodal locations. -! use o_param -! USE g_CONFIG -! USE MOD_MESH -! USE MOD_PARTIT -! USE MOD_PARSUP -! implicit none -! type(t_mesh), intent(in), target :: mesh -! type(t_partit), intent(inout), target :: partit -! real(kind=WP), intent(in) :: ice_strength(partit%mydim_elem2D) -! real(kind=WP) :: eta, xi, delta, aa -! integer :: el, elnodes(3) -! real(kind=WP) :: asum, msum, vale, dx(3), dy(3) -! real(kind=WP) :: det1, det2, r1, r2, r3, si1, si2, dte -! real(kind=WP) :: zeta, delta_inv, d1, d2 -! -! #include "associate_part_def.h" -! #include "associate_mesh_def.h" -! #include "associate_part_ass.h" -! #include "associate_mesh_ass.h" -! -! vale = 1.0_WP/(ice%ellipse**2) -! -! dte = ice%ice_dt/(1.0_WP*evp_rheol_steps) -! det1 = 1.0_WP/(1.0_WP + 0.5_WP*ice%Tevp_inv*dte) -! det2 = 1.0_WP/(1.0_WP + 0.5_WP*ice%Tevp_inv*dte) !*ellipse**2 -! -! -! do el=1,myDim_elem2D -! !__________________________________________________________________________ -! ! if element contains cavity node skip it -! if (ulevels(el) > 1) cycle -! ! ===== Check if there is ice on elem -! -! ! There is no ice in elem -! ! if (any(m_ice(elnodes)<= 0.) .or. any(a_ice(elnodes) <=0.)) CYCLE -! if (ice_strength(el) > 0.) then -! ! ===== -! ! ===== Deformation rate tensor on element elem: -! !du/dx -! -! eps11(el) = sum(mesh%gradient_sca(1:3,el)*U_ice(mesh%elem2D_nodes(1:3,el))) & -! -mesh% metric_factor(el) * sum(V_ice(mesh%elem2D_nodes(1:3,el)))/3.0_WP -! -! eps22(el) = sum(mesh%gradient_sca(4:6, el)*V_ice(mesh%elem2D_nodes(1:3,el))) -! -! eps12(el) = 0.5_WP*(sum(mesh%gradient_sca(4:6,el)*U_ice(mesh%elem2D_nodes(1:3,el))) & -! + sum(mesh%gradient_sca(1:3,el)*V_ice(mesh%elem2D_nodes(1:3,el))) & -! + mesh%metric_factor(el) * sum(U_ice(mesh%elem2D_nodes(1:3,el)))/3.0_WP) -! ! ===== moduli: -! delta = sqrt((eps11(el)*eps11(el) + eps22(el)*eps22(el))*(1.0_WP+vale) + 4.0_WP*vale*eps12(el)*eps12(el) + & -! 2.0_WP*eps11(el)*eps22(el)*(1.0_WP-vale)) -! -! ! ======================================= -! ! ===== Here the EVP rheology piece starts -! ! ======================================= -! -! ! ===== viscosity zeta should exceed zeta_min -! ! (done via limiting delta from above) -! -! !if(delta>pressure/ice%zeta_min) delta=pressure/ice%zeta_min -! !It does not work properly by -! !creating response where ice_strength is small -! ! Uncomment and test if necessary -! -! ! ===== if delta is too small or zero, viscosity will too large (unlimited) -! ! (limit delta_inv) -! delta_inv = 1.0_WP/max(delta,ice%delta_min) -! -! !!PS delta_inv = delta/(delta+ice%delta_min) -! -! zeta = ice_strength(el)*delta_inv -! ! ===== Limiting pressure/Delta (zeta): it may still happen that pressure/Delta -! ! is too large in some regions and CFL criterion is violated. -! ! The regularization below was introduced by Hunke, -! ! but seemingly is not used in the current CICE. -! ! Without it divergence and zeta can be noisy (but code -! ! remains stable), using it reduces viscosities too strongly. -! ! It is therefore commented -! -! !if (zeta>ice%clim_evp*voltriangle(el)) then -! !zeta=ice%clim_evp*voltriangle(el) -! !end if -! -! zeta = zeta*ice%Tevp_inv -! -! r1 = zeta*(eps11(el)+eps22(el)) - ice_strength(el)*ice%Tevp_inv -! r2 = zeta*(eps11(el)-eps22(el))*vale -! r3 = zeta*eps12(el)*vale -! -! si1 = det1*(sigma11(el) + sigma22(el) + dte*r1) -! si2 = det2*(sigma11(el) - sigma22(el) + dte*r2) -! -! sigma12(el) = det2*(sigma12(el)+dte*r3) -! sigma11(el) = 0.5_WP*(si1+si2) -! sigma22(el) = 0.5_WP*(si1-si2) -! endif -! end do -! end subroutine stress_tensor_no1 -!=================================================================== -! subroutine stress2rhs_e(partit, mesh) -! ! EVP implementation: -! ! Computes the divergence of stress tensor and puts the result into the -! ! rhs vectors. Velocity is at nodes. -! ! The divergence is computed in a cysly over edges. It is slower that the -! ! approach in stress2rhs_e inherited from FESOM -! USE o_PARAM -! USE i_therm_param -! use g_config, only: use_cavity -! USE MOD_MESH -! USE MOD_PARTIT -! USE MOD_PARSUP -! -! IMPLICIT NONE -! type(t_mesh), intent(in), target :: mesh -! type(t_partit), intent(inout), target :: partit -! INTEGER :: n, elem, ed, elnodes(3), el(2), ednodes(2) -! REAL(kind=WP) :: mass, uc, vc, deltaX1, deltaX2, deltaY1, deltaY2 -! #include "associate_part_def.h" -! #include "associate_mesh_def.h" -! #include "associate_part_ass.h" -! #include "associate_mesh_ass.h" -! -! DO n=1, myDim_nod2D -! U_rhs_ice(n)=0.0_WP -! V_rhs_ice(n)=0.0_WP -! END DO -! -! ! Stress divergence -! DO ed=1,myDim_edge2D -! ednodes=edges(:,ed) -! el=edge_tri(:,ed) -! if(myList_edge2D(ed)>edge2D_in) cycle -! -! ! stress boundary condition at ocean cavity boundary edge ==0 -! if (use_cavity) then -! if ( (ulevels(el(1))>1) .or. ( el(2)>0 .and. ulevels(el(2))>1) ) cycle -! end if -! -! ! elements on both sides -! uc = - sigma12(el(1))*edge_cross_dxdy(1,ed) + sigma11(el(1))*edge_cross_dxdy(2,ed) & -! + sigma12(el(2))*edge_cross_dxdy(3,ed) - sigma11(el(2))*edge_cross_dxdy(4,ed) -! -! vc = - sigma22(el(1))*edge_cross_dxdy(1,ed) + sigma12(el(1))*edge_cross_dxdy(2,ed) & -! + sigma22(el(2))*edge_cross_dxdy(3,ed) - sigma12(el(2))*edge_cross_dxdy(4,ed) -! -! U_rhs_ice(ednodes(1)) = U_rhs_ice(ednodes(1)) + uc -! U_rhs_ice(ednodes(2)) = U_rhs_ice(ednodes(2)) - uc -! V_rhs_ice(ednodes(1)) = V_rhs_ice(ednodes(1)) + vc -! V_rhs_ice(ednodes(2)) = V_rhs_ice(ednodes(2)) - vc -! END DO -! -! DO n=1, myDim_nod2D -! !___________________________________________________________________________ -! ! if cavity node skip it -! if ( ulevels_nod2d(n) > 1 ) cycle -! -! !___________________________________________________________________________ -! mass = area(1,n)*(rhoice*m_ice(n)+rhosno*m_snow(n)) -! if(mass > 1.e-3_WP) then -! U_rhs_ice(n) = U_rhs_ice(n) / mass -! V_rhs_ice(n) = V_rhs_ice(n) / mass -! else -! U_rhs_ice(n)=0.0_WP -! V_rhs_ice(n)=0.0_WP -! end if -! END DO -! ! -! ! elevation gradient contribution -! ! -! do elem=1,myDim_elem2D -! !__________________________________________________________________________ -! ! if element contains cavity node skip it -! if (ulevels(elem) > 1) cycle -! -! !__________________________________________________________________________ -! elnodes=elem2D_nodes(:,elem) -! uc=elem_area(elem)*g*sum(gradient_sca(1:3,elem)*elevation(elnodes))/3.0_WP -! vc=elem_area(elem)*g*sum(gradient_sca(4:6,elem)*elevation(elnodes))/3.0_WP -! U_rhs_ice(elnodes)=U_rhs_ice(elnodes) - uc/area(1,elnodes) -! V_rhs_ice(elnodes)=V_rhs_ice(elnodes) - vc/area(1,elnodes) -! END DO -! end subroutine stress2rhs_e ! ! !_______________________________________________________________________________ @@ -363,9 +176,9 @@ subroutine stress2rhs(inv_areamass, ice_strength, ice, partit, mesh) USE o_PARAM USE i_THERM_PARAM IMPLICIT NONE - type(t_ice), intent(inout), target :: ice + type(t_ice) , intent(inout), target :: ice type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh !___________________________________________________________________________ REAL(kind=WP), intent(in) :: inv_areamass(partit%myDim_nod2D), ice_strength(partit%mydim_elem2D) INTEGER :: n, el, k @@ -378,13 +191,13 @@ subroutine stress2rhs(inv_areamass, ice_strength, ice, partit, mesh) #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - sigma11 => ice%work%sigma11(:) - sigma12 => ice%work%sigma12(:) - sigma22 => ice%work%sigma22(:) - u_rhs_ice => ice%uice_rhs(:) - v_rhs_ice => ice%vice_rhs(:) - rhs_a => ice%data(1)%values_rhs(:) - rhs_m => ice%data(2)%values_rhs(:) + sigma11 => ice%work%sigma11(:) + sigma12 => ice%work%sigma12(:) + sigma22 => ice%work%sigma22(:) + u_rhs_ice => ice%uice_rhs(:) + v_rhs_ice => ice%vice_rhs(:) + rhs_a => ice%data(1)%values_rhs(:) + rhs_m => ice%data(2)%values_rhs(:) !___________________________________________________________________________ val3=1/3.0_WP @@ -454,9 +267,9 @@ subroutine EVPdynamics(ice, partit, mesh) use icedrv_main, only: icepack_to_fesom #endif IMPLICIT NONE - type(t_ice), intent(inout), target :: ice + type(t_ice) , intent(inout), target :: ice type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh !___________________________________________________________________________ integer :: steps, shortstep real(kind=WP) :: rdt, asum, msum, r_a, r_b @@ -504,7 +317,7 @@ subroutine EVPdynamics(ice, partit, mesh) stress_atmice_x => ice%stress_atmice_x(:) stress_atmice_y => ice%stress_atmice_y(:) - !_______________________________________________________________________________ + !___________________________________________________________________________ ! If Icepack is used, always update the tracers #if defined (__icepack) a_ice_old(:) = a_ice(:) @@ -516,6 +329,7 @@ subroutine EVPdynamics(ice, partit, mesh) vsno_out=m_snow) #endif + !___________________________________________________________________________ rdt=ice%ice_dt/(1.0*ice%evp_rheol_steps) ax=cos(ice%theta_io) ay=sin(ice%theta_io) @@ -652,8 +466,7 @@ subroutine EVPdynamics(ice, partit, mesh) rhs_a(n) = rhs_a(n)/area(1,n) rhs_m(n) = rhs_m(n)/area(1,n) enddo - ! - + !___________________________________________________________________________ ! End of Precomputing --> And the ice stepping starts #if defined (__icepack) @@ -716,4 +529,4 @@ subroutine EVPdynamics(ice, partit, mesh) !_______________________________________________________________________ call exchange_nod(U_ice,V_ice,partit) END DO !--> do shortstep=1, ice%evp_rheol_steps -end subroutine EVPdynamics +end subroutine EVPdynamics \ No newline at end of file From d2d109ccc5001906df0537db41d709baebf87ef1 Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 26 Nov 2021 16:44:52 +0100 Subject: [PATCH 689/909] fix issue in src/ice_setup_step.F90 --- src/ice_setup_step.F90 | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index e2c557f38..d3622829f 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -37,7 +37,7 @@ subroutine ice_timestep(istep, ice, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_MESH - integer intent(in) :: istep + integer , intent(in) :: istep type(t_ice) , intent(inout), target :: ice type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh @@ -110,10 +110,10 @@ subroutine ice_timestep(step, ice, partit, mesh) use icedrv_main, only: step_icepack #endif implicit none - integer, intent(in) :: step - type(t_ice), intent(inout), target :: ice + integer , intent(in) :: step + type(t_ice) , intent(inout), target :: ice type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh !___________________________________________________________________________ integer :: i REAL(kind=WP) :: t0,t1, t2, t3 From 6234b7acb9dc4541eba0dbc79596a22cca77cf2a Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 26 Nov 2021 16:47:55 +0100 Subject: [PATCH 690/909] fix issue in src/ice_setup_step.F90 --- src/ice_setup_step.F90 | 1 - 1 file changed, 1 deletion(-) diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index d3622829f..8d16d6b9c 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -56,7 +56,6 @@ subroutine ice_setup(ice, tracers, partit, mesh) USE MOD_MESH use o_param use g_CONFIG - use ice_array_setup_interface use ice_initial_state_interface use ice_fct_interfaces implicit none From 8f066e75d71e6ec6ea6ac88f046e01ddf2eb82f8 Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 26 Nov 2021 17:26:13 +0100 Subject: [PATCH 691/909] clean up src/ice_oce_coupling.F90 --- src/ice_oce_coupling.F90 | 49 ++++++++++++++++++++-------------------- 1 file changed, 25 insertions(+), 24 deletions(-) diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index 34ac3ae52..6c0111f81 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -1,12 +1,12 @@ module ocean2ice_interface interface subroutine ocean2ice(ice, dynamics, tracers, partit, mesh) - use mod_mesh + USE MOD_ICE + USE MOD_DYN + USE MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP - USE MOD_TRACER - USE MOD_DYN - USE MOD_ICE + USE MOD_MESH type(t_ice) , intent(inout), target :: ice type(t_dyn) , intent(in) , target :: dynamics type(t_tracer), intent(inout), target :: tracers @@ -49,9 +49,8 @@ subroutine oce_fluxes_mom(ice, dynamics, partit, mesh) ! ! !_______________________________________________________________________________ +! transmits the relevant fields from the ice to the ocean model subroutine oce_fluxes_mom(ice, dynamics, partit, mesh) - ! transmits the relevant fields from the ice to the ocean model - ! USE MOD_ICE USE MOD_DYN USE MOD_PARTIT @@ -97,7 +96,9 @@ subroutine oce_fluxes_mom(ice, dynamics, partit, mesh) call icepack_to_fesom(nx_in=(myDim_nod2D+eDim_nod2D), & aice_out=a_ice) #endif - + + !___________________________________________________________________________ + ! compute total surface stress (iceoce+atmoce) on nodes do n=1,myDim_nod2D+eDim_nod2D !_______________________________________________________________________ ! if cavity node skip it @@ -113,12 +114,12 @@ subroutine oce_fluxes_mom(ice, dynamics, partit, mesh) stress_iceoce_y(n)=0.0_WP end if - ! total surface stress (iceoce+atmoce) on nodes stress_node_surf(1,n) = stress_iceoce_x(n)*a_ice(n) + stress_atmoce_x(n)*(1.0_WP-a_ice(n)) stress_node_surf(2,n) = stress_iceoce_y(n)*a_ice(n) + stress_atmoce_y(n)*(1.0_WP-a_ice(n)) end do !___________________________________________________________________________ + ! compute total surface stress (iceoce+atmoce) on elements DO elem=1,myDim_elem2D !_______________________________________________________________________ ! if cavity element skip it @@ -141,18 +142,16 @@ end subroutine oce_fluxes_mom ! ! !_______________________________________________________________________________ +! transmits the relevant fields from the ocean to the ice model subroutine ocean2ice(ice, dynamics, tracers, partit, mesh) - - ! transmits the relevant fields from the ocean to the ice model - - use o_PARAM - use MOD_MESH - use MOD_DYN - use MOD_ICE - use MOD_TRACER + USE MOD_ICE + USE MOD_DYN + USE MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP - USE g_CONFIG + USE MOD_MESH + use o_PARAM + use g_CONFIG use g_comm_auto implicit none type(t_ice) , intent(inout), target :: ice @@ -178,6 +177,7 @@ subroutine ocean2ice(ice, dynamics, tracers, partit, mesh) S_oc_array => ice%srfoce_salt(:) elevation => ice%srfoce_ssh(:) + !___________________________________________________________________________ ! the arrays in the ice model are renamed if (ice%ice_update) then do n=1, myDim_nod2d+eDim_nod2d @@ -192,11 +192,13 @@ subroutine ocean2ice(ice, dynamics, tracers, partit, mesh) T_oc_array(n) = (T_oc_array(n)*real(ice%ice_steps_since_upd,WP)+temp(1,n))/real(ice%ice_steps_since_upd+1,WP) S_oc_array(n) = (S_oc_array(n)*real(ice%ice_steps_since_upd,WP)+salt(1,n))/real(ice%ice_steps_since_upd+1,WP) elevation(n) = (elevation(n) *real(ice%ice_steps_since_upd,WP)+ hbar(n))/real(ice%ice_steps_since_upd+1,WP) - !NR elevation(n) = (elevation(n) *real(ice%ice_steps_since_upd,WP)+ eta_n(n))/real(ice%ice_steps_since_upd+1,WP) - !NR elevation(n) = (elevation(n) *real(ice%ice_steps_since_upd,WP)+ hbar(n))/real(ice%ice_steps_since_upd+1,WP) + !NR elevation(n) = (elevation(n) *real(ice%ice_steps_since_upd,WP)+ eta_n(n))/real(ice%ice_steps_since_upd+1,WP) + !NR elevation(n) = (elevation(n) *real(ice%ice_steps_since_upd,WP)+ hbar(n))/real(ice%ice_steps_since_upd+1,WP) end do end if + !___________________________________________________________________________ + ! surface ocean velocity at nodes u_w = 0.0_WP v_w = 0.0_WP do n=1, myDim_nod2d @@ -233,12 +235,12 @@ end subroutine ocean2ice !_______________________________________________________________________________ subroutine oce_fluxes(ice, dynamics, tracers, partit, mesh) USE MOD_ICE - use MOD_DYN - use MOD_TRACER + USE MOD_DYN + USE MOD_TRACER USE MOD_PARTIT USE MOD_PARSUP - use MOD_MESH - USE g_CONFIG + USE MOD_MESH + use g_CONFIG use o_ARRAYS use g_comm_auto use g_forcing_param, only: use_virt_salt @@ -267,7 +269,6 @@ subroutine oce_fluxes(ice, dynamics, tracers, partit, mesh) real(kind=WP), dimension(:) , pointer :: a_ice_old real(kind=WP), dimension(:) , pointer :: thdgr, thdgrsn real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux -! real(kind=WP), dimension(:) , pointer :: net_heat_flux #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" From e0c9905d23be7f92d732475eb48152967dda56c6 Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 26 Nov 2021 17:26:41 +0100 Subject: [PATCH 692/909] clean up src/ice_maEVP.F90 --- src/ice_maEVP.F90 | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/src/ice_maEVP.F90 b/src/ice_maEVP.F90 index 7a3737c73..78f770d97 100644 --- a/src/ice_maEVP.F90 +++ b/src/ice_maEVP.F90 @@ -761,17 +761,18 @@ subroutine find_alpha_field_a(ice, partit, mesh) #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - a_ice => ice%data(1)%values(:) - m_ice => ice%data(2)%values(:) - eps11 => ice%work%eps11(:) - eps12 => ice%work%eps12(:) - eps22 => ice%work%eps22(:) - sigma11 => ice%work%sigma11(:) - sigma12 => ice%work%sigma12(:) - sigma22 => ice%work%sigma22(:) - u_ice_aux => ice%uice_aux(:) - v_ice_aux => ice%vice_aux(:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + eps11 => ice%work%eps11(:) + eps12 => ice%work%eps12(:) + eps22 => ice%work%eps22(:) + sigma11 => ice%work%sigma11(:) + sigma12 => ice%work%sigma12(:) + sigma22 => ice%work%sigma22(:) + u_ice_aux => ice%uice_aux(:) + v_ice_aux => ice%vice_aux(:) alpha_evp_array => ice%alpha_evp_array(:) + !___________________________________________________________________________ val3=1.0_WP/3.0_WP vale=1.0_WP/(ice%ellipse**2) @@ -873,11 +874,11 @@ subroutine stress_tensor_a(ice, partit, mesh) val3=1.0_WP/3.0_WP vale=1.0_WP/(ice%ellipse**2) do elem=1,myDim_elem2D - !__________________________________________________________________________ + !_______________________________________________________________________ ! if element has any cavity node skip it if (ulevels(elem) > 1) cycle - !__________________________________________________________________________ + !_______________________________________________________________________ det2=1.0_WP/(1.0_WP+alpha_evp_array(elem)) ! Take alpha from array det1=alpha_evp_array(elem)*det2 @@ -1013,7 +1014,7 @@ subroutine EVPdynamics_a(ice, partit, mesh) call stress2rhs_m(ice, partit, mesh) ! _m=_a, so no _m version is the only one! do i=1,myDim_nod2D - !_______________________________________________________________________ + !___________________________________________________________________ ! if element has any cavity node skip it if (ulevels_nod2d(i)>1) cycle @@ -1038,17 +1039,17 @@ subroutine EVPdynamics_a(ice, partit, mesh) v_ice_aux(i)=det*((1.0_WP+beta_evp_array(i)+drag)*rhsv-fc*rhsu) end do - !___________________________________________________________________________ + !_______________________________________________________________________ ! apply sea ice velocity boundary condition do ed=1,myDim_edge2D - !_______________________________________________________________________ + !___________________________________________________________________ ! apply coastal sea ice velocity boundary conditions if(myList_edge2D(ed) > edge2D_in) then u_ice_aux(edges(:,ed))=0.0_WP v_ice_aux(edges(:,ed))=0.0_WP end if - !_______________________________________________________________________ + !___________________________________________________________________ ! apply sea ice velocity boundary conditions at cavity-ocean edge if (use_cavity) then if ( (ulevels(edge_tri(1,ed))>1) .or. & From cbb623c31a7ff5cbc89d8511d2be58513d1dba1c Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 26 Nov 2021 17:45:55 +0100 Subject: [PATCH 693/909] clean up src/ice_fct.F90 --- src/ice_fct.F90 | 621 +++++++++++++++++++++--------------------------- 1 file changed, 266 insertions(+), 355 deletions(-) diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index ada6593ff..4a84a4298 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -72,20 +72,22 @@ subroutine ice_update_for_div(ice, partit, mesh) end subroutine end interface end module - -! +! +! +!_______________________________________________________________________________ ! This file collect subroutines implementing FE-FCT ! advection scheme by Loehner et al. ! There is a tunable parameter ice_gamma_fct. ! Increasing it leads to positivity preserving solution. - +! ! Driving routine is fct_ice_solve. It calles other routines ! that do low-order and figh order solutions and then combine them in a flux ! corrected way. Taylor-Galerkin scheme is used as a high-order one. - +! ! The code is adapted from FESOM ! -! ===================================================================== +! +!_______________________________________________________________________________ subroutine ice_TG_rhs(ice, partit, mesh) use MOD_MESH USE MOD_PARTIT @@ -173,62 +175,9 @@ subroutine ice_TG_rhs(ice, partit, mesh) END DO end do end subroutine ice_TG_rhs -! ! -! !---------------------------------------------------------------------------- -! ! -! subroutine ice_fct_init(ice, partit, mesh) -! USE MOD_ICE -! USE MOD_PARTIT -! USE MOD_PARSUP -! use MOD_MESH -! use o_PARAM -! use ice_fct_interfaces -! implicit none -! integer :: n_size -! type(t_ice), intent(inout), target :: ice -! type(t_partit), intent(inout), target :: partit -! type(t_mesh), intent(in), target :: mesh -! !_____________________________________________________________________________ -! ! pointer on necessary derived types -! #include "associate_part_def.h" -! #include "associate_mesh_def.h" -! #include "associate_part_ass.h" -! #include "associate_mesh_ass.h" -! -! n_size=myDim_nod2D+eDim_nod2D -! -! ! Initialization of arrays necessary to implement FCT algorithm -! ! allocate(m_icel(n_size), a_icel(n_size), m_snowl(n_size)) ! low-order solutions -! ! m_icel=0.0_WP -! ! a_icel=0.0_WP -! ! m_snowl=0.0_WP -! #if defined (__oifs) || defined (__ifsinterface) -! ! allocate(m_templ(n_size)) -! ! allocate(dm_temp(n_size)) -! #endif /* (__oifs) */ -! ! allocate(icefluxes(myDim_elem2D,3)) -! ! allocate(icepplus(n_size), icepminus(n_size)) -! ! icefluxes = 0.0_WP -! ! icepplus = 0.0_WP -! ! icepminus= 0.0_WP -! -! #if defined (__oifs) || defined (__ifsinterface) -! ! m_templ=0.0_WP -! ! dm_temp=0.0_WP -! #endif /* (__oifs) */ -! -! ! allocate(dm_ice(n_size), da_ice(n_size), dm_snow(n_size)) ! increments of high -! ! dm_ice = 0.0_WP ! order solutions -! ! da_ice = 0.0_WP -! ! dm_snow = 0.0_WP -! -! ! Fill in the mass matrix -! call ice_mass_matrix_fill(ice, partit, mesh) -! if (mype==0) write(*,*) 'Ice FCT is initialized' -! end subroutine ice_fct_init ! -!---------------------------------------------------------------------------- ! +!_______________________________________________________________________________ subroutine ice_fct_solve(ice, partit, mesh) USE MOD_ICE USE MOD_PARTIT @@ -340,135 +289,128 @@ subroutine ice_solve_low_order(ice, partit, mesh) ! Low-order solution must be known to neighbours call exchange_nod(m_icel,a_icel,m_snowl, partit) - #if defined (__oifs) || defined (__ifsinterface) call exchange_nod(m_templ, partit) #endif /* (__oifs) */ - - end subroutine ice_solve_low_order ! ! !_______________________________________________________________________________ subroutine ice_solve_high_order(ice, partit, mesh) - USE MOD_ICE - USE MOD_TRACER - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_MESH - use o_PARAM - use g_comm_auto - implicit none - type(t_ice), intent(inout), target :: ice - type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh - !_____________________________________________________________________________ - integer :: n,i,clo,clo2,cn,location(100),row - real(kind=WP) :: rhs_new - integer :: num_iter_solve=3 - !_____________________________________________________________________________ - ! pointer on necessary derived types - real(kind=WP), dimension(:), pointer :: rhs_a, rhs_m, rhs_ms - real(kind=WP), dimension(:), pointer :: a_icel, m_icel, m_snowl - real(kind=WP), dimension(:), pointer :: da_ice, dm_ice, dm_snow - real(kind=WP), dimension(:), pointer :: mass_matrix + USE MOD_ICE + USE MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + use o_PARAM + use g_comm_auto + implicit none + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: n,i,clo,clo2,cn,location(100),row + real(kind=WP) :: rhs_new + integer :: num_iter_solve=3 + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: rhs_a, rhs_m, rhs_ms + real(kind=WP), dimension(:), pointer :: a_icel, m_icel, m_snowl + real(kind=WP), dimension(:), pointer :: da_ice, dm_ice, dm_snow + real(kind=WP), dimension(:), pointer :: mass_matrix #if defined (__oifs) || defined (__ifsinterface) - real(kind=WP), dimension(:), pointer :: rhs_temp, m_templ, dm_temp + real(kind=WP), dimension(:), pointer :: rhs_temp, m_templ, dm_temp #endif #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - rhs_a => ice%data(1)%values_rhs(:) - rhs_m => ice%data(2)%values_rhs(:) - rhs_ms => ice%data(3)%values_rhs(:) - a_icel => ice%data(1)%valuesl(:) - m_icel => ice%data(2)%valuesl(:) - m_snowl => ice%data(3)%valuesl(:) - da_ice => ice%data(1)%dvalues(:) - dm_ice => ice%data(2)%dvalues(:) - dm_snow => ice%data(3)%dvalues(:) - mass_matrix => ice%work%fct_massmatrix(:) + rhs_a => ice%data(1)%values_rhs(:) + rhs_m => ice%data(2)%values_rhs(:) + rhs_ms => ice%data(3)%values_rhs(:) + a_icel => ice%data(1)%valuesl(:) + m_icel => ice%data(2)%valuesl(:) + m_snowl => ice%data(3)%valuesl(:) + da_ice => ice%data(1)%dvalues(:) + dm_ice => ice%data(2)%dvalues(:) + dm_snow => ice%data(3)%dvalues(:) + mass_matrix => ice%work%fct_massmatrix(:) #if defined (__oifs) || defined (__ifsinterface) - rhs_temp => ice%data(4)%values_rhs(:) - m_templ => ice%data(4)%valuesl(:) - dm_temp => ice%data(4)%dvalues(:) + rhs_temp => ice%data(4)%values_rhs(:) + m_templ => ice%data(4)%valuesl(:) + dm_temp => ice%data(4)%dvalues(:) #endif - !_____________________________________________________________________________ - ! Does Taylor-Galerkin solution - ! - !the first approximation - do row=1,myDim_nod2D - !___________________________________________________________________________ - ! if cavity node skip it - if (ulevels_nod2d(row)>1) cycle - - dm_ice(row)=rhs_m(row)/area(1,row) - da_ice(row)=rhs_a(row)/area(1,row) - dm_snow(row)=rhs_ms(row)/area(1,row) + !___________________________________________________________________________ + ! Does Taylor-Galerkin solution + ! + !the first approximation + do row=1,myDim_nod2D + ! if cavity node skip it + if (ulevels_nod2d(row)>1) cycle + + dm_ice(row)=rhs_m(row)/area(1,row) + da_ice(row)=rhs_a(row)/area(1,row) + dm_snow(row)=rhs_ms(row)/area(1,row) #if defined (__oifs) || defined (__ifsinterface) - dm_temp(row)=rhs_temp(row)/area(1,row) + dm_temp(row)=rhs_temp(row)/area(1,row) #endif /* (__oifs) */ - end do - - call exchange_nod(dm_ice, da_ice, dm_snow, partit) + end do + call exchange_nod(dm_ice, da_ice, dm_snow, partit) #if defined (__oifs) || defined (__ifsinterface) - call exchange_nod(dm_temp, partit) + call exchange_nod(dm_temp, partit) #endif /* (__oifs) */ - !iterate - do n=1,num_iter_solve-1 - do row=1,myDim_nod2D - !___________________________________________________________________________ - ! if cavity node skip it - if (ulevels_nod2d(row)>1) cycle - - clo=ssh_stiff%rowptr(row)-ssh_stiff%rowptr(1)+1 - clo2=ssh_stiff%rowptr(row+1)-ssh_stiff%rowptr(1) - cn=clo2-clo+1 - location(1:cn)=nn_pos(1:cn,row) - rhs_new=rhs_m(row) - sum(mass_matrix(clo:clo2)*dm_ice(location(1:cn))) - m_icel(row)=dm_ice(row)+rhs_new/area(1,row) - rhs_new=rhs_a(row) - sum(mass_matrix(clo:clo2)*da_ice(location(1:cn))) - a_icel(row)=da_ice(row)+rhs_new/area(1,row) - rhs_new=rhs_ms(row) - sum(mass_matrix(clo:clo2)*dm_snow(location(1:cn))) - m_snowl(row)=dm_snow(row)+rhs_new/area(1,row) + !___________________________________________________________________________ + !iterate + do n=1,num_iter_solve-1 + do row=1,myDim_nod2D + ! if cavity node skip it + if (ulevels_nod2d(row)>1) cycle + !___________________________________________________________________ + clo = ssh_stiff%rowptr(row)-ssh_stiff%rowptr(1)+1 + clo2 = ssh_stiff%rowptr(row+1)-ssh_stiff%rowptr(1) + cn = clo2-clo+1 + location(1:cn)=nn_pos(1:cn,row) + !___________________________________________________________________ + rhs_new = rhs_m(row) - sum(mass_matrix(clo:clo2)*dm_ice(location(1:cn))) + m_icel(row) = dm_ice(row)+rhs_new/area(1,row) + rhs_new = rhs_a(row) - sum(mass_matrix(clo:clo2)*da_ice(location(1:cn))) + a_icel(row) = da_ice(row)+rhs_new/area(1,row) + rhs_new = rhs_ms(row) - sum(mass_matrix(clo:clo2)*dm_snow(location(1:cn))) + m_snowl(row)= dm_snow(row)+rhs_new/area(1,row) #if defined (__oifs) || defined (__ifsinterface) - rhs_new=rhs_temp(row) - sum(mass_matrix(clo:clo2)*dm_temp(location(1:cn))) - m_templ(row)=dm_temp(row)+rhs_new/area(1,row) + rhs_new = rhs_temp(row) - sum(mass_matrix(clo:clo2)*dm_temp(location(1:cn))) + m_templ(row)= dm_temp(row)+rhs_new/area(1,row) #endif /* (__oifs) */ - end do - do row=1,myDim_nod2D + end do !_______________________________________________________________________ - ! if cavity node skip it - if (ulevels_nod2d(row)>1) cycle - - dm_ice(row)=m_icel(row) - da_ice(row)=a_icel(row) - dm_snow(row)=m_snowl(row) + do row=1,myDim_nod2D + ! if cavity node skip it + if (ulevels_nod2d(row)>1) cycle + dm_ice(row)=m_icel(row) + da_ice(row)=a_icel(row) + dm_snow(row)=m_snowl(row) #if defined (__oifs) || defined (__ifsinterface) - dm_temp(row)=m_templ(row) + dm_temp(row)=m_templ(row) #endif /* (__oifs) */ - end do - call exchange_nod(dm_ice, da_ice, dm_snow, partit) - + end do + + !_______________________________________________________________________ + call exchange_nod(dm_ice, da_ice, dm_snow, partit) #if defined (__oifs) || defined (__ifsinterface) - call exchange_nod(dm_temp, partit) + call exchange_nod(dm_temp, partit) #endif /* (__oifs) */ - - end do + end do end subroutine ice_solve_high_order ! ! !_______________________________________________________________________________ +! Flux corrected transport algorithm for tracer advection +! It is based on Loehner et al. (Finite-element flux-corrected +! transport (FEM-FCT) for the Euler and Navier-Stokes equation, +! Int. J. Numer. Meth. Fluids, 7 (1987), 1093--1109) as described by Kuzmin and +! Turek. (kuzmin@math.uni-dortmund.de) subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) - ! Flux corrected transport algorithm for tracer advection - ! - ! It is based on Loehner et al. (Finite-element flux-corrected - ! transport (FEM-FCT) for the Euler and Navier-Stokes equation, - ! Int. J. Numer. Meth. Fluids, 7 (1987), 1093--1109) as described by Kuzmin and - ! Turek. (kuzmin@math.uni-dortmund.de) - ! USE MOD_ICE USE MOD_PARTIT USE MOD_PARSUP @@ -476,14 +418,13 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) use o_PARAM use g_comm_auto implicit none - type(t_ice), intent(inout), target :: ice + type(t_ice) , intent(inout), target :: ice type(t_partit), intent(inout), target :: partit - type(t_mesh), intent(in), target :: mesh + type(t_mesh) , intent(in) , target :: mesh !___________________________________________________________________________ - integer :: tr_array_id - integer :: icoef(3,3),n,q, elem,elnodes(3),row -! real(kind=WP), allocatable, dimension(:) :: tmax, tmin - real(kind=WP) :: vol, flux, ae, gamma + integer :: tr_array_id + integer :: icoef(3,3),n,q, elem,elnodes(3),row + real(kind=WP) :: vol, flux, ae, gamma !___________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:) , pointer :: a_ice, m_ice, m_snow @@ -492,44 +433,41 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) real(kind=WP), dimension(:) , pointer :: icepplus, icepminus, tmax, tmin real(kind=WP), dimension(:,:), pointer :: icefluxes #if defined (__oifs) || defined (__ifsinterface) - real(kind=WP), dimension(:), pointer :: ice_temp, m_templ, dm_temp + real(kind=WP), dimension(:) , pointer :: ice_temp, m_templ, dm_temp #endif #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - a_ice => ice%data(1)%values(:) - m_ice => ice%data(2)%values(:) - m_snow => ice%data(3)%values(:) - a_icel => ice%data(1)%valuesl(:) - m_icel => ice%data(2)%valuesl(:) - m_snowl => ice%data(3)%valuesl(:) - da_ice => ice%data(1)%dvalues(:) - dm_ice => ice%data(2)%dvalues(:) - dm_snow => ice%data(3)%dvalues(:) - icefluxes => ice%work%fct_fluxes(:,:) - icepplus => ice%work%fct_plus(:) - icepminus => ice%work%fct_minus(:) - tmax => ice%work%fct_tmax(:) - tmin => ice%work%fct_tmin(:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + a_icel => ice%data(1)%valuesl(:) + m_icel => ice%data(2)%valuesl(:) + m_snowl => ice%data(3)%valuesl(:) + da_ice => ice%data(1)%dvalues(:) + dm_ice => ice%data(2)%dvalues(:) + dm_snow => ice%data(3)%dvalues(:) + icefluxes => ice%work%fct_fluxes(:,:) + icepplus => ice%work%fct_plus(:) + icepminus => ice%work%fct_minus(:) + tmax => ice%work%fct_tmax(:) + tmin => ice%work%fct_tmin(:) #if defined (__oifs) || defined (__ifsinterface) - ice_temp => ice%data(4)%values(:) - m_templ => ice%data(4)%valuesl(:) - dm_temp => ice%data(4)%dvalues(:) + ice_temp => ice%data(4)%values(:) + m_templ => ice%data(4)%valuesl(:) + dm_temp => ice%data(4)%dvalues(:) #endif !___________________________________________________________________________ - gamma=ice%ice_gamma_fct ! It should coinside with gamma in - ! ts_solve_low_order + ! It should coinside with gamma in ts_solve_low_order + gamma=ice%ice_gamma_fct - !========================== + !___________________________________________________________________________ ! Compute elemental antidiffusive fluxes to nodes - !========================== ! This is the most unpleasant part --- ! it takes memory and time. For every element ! we need its antidiffusive contribution to ! each of its 3 nodes - -! allocate(tmax(myDim_nod2D), tmin(myDim_nod2D)) tmax = 0.0_WP tmin = 0.0_WP @@ -581,24 +519,22 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) #endif /* (__oifs) */ end do - !========================== + !___________________________________________________________________________ ! Screening the low-order solution - !========================== ! TO BE ADDED IF FOUND NECESSARY ! Screening means comparing low-order solutions with the ! solution on the previous time step and using whichever ! is greater/smaller in computations of max/min below - !========================== + !___________________________________________________________________________ ! Cluster min/max - !========================== if (tr_array_id==1) then do row=1, myDim_nod2D if (ulevels_nod2d(row)>1) cycle n=nn_num(row) tmax(row)=maxval(m_icel(nn_pos(1:n,row))) tmin(row)=minval(m_icel(nn_pos(1:n,row))) - ! Admissible increments + ! Admissible increments tmax(row)=tmax(row)-m_icel(row) tmin(row)=tmin(row)-m_icel(row) end do @@ -610,7 +546,7 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) n=nn_num(row) tmax(row)=maxval(a_icel(nn_pos(1:n,row))) tmin(row)=minval(a_icel(nn_pos(1:n,row))) - ! Admissible increments + ! Admissible increments tmax(row)=tmax(row)-a_icel(row) tmin(row)=tmin(row)-a_icel(row) end do @@ -622,7 +558,7 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) n=nn_num(row) tmax(row)=maxval(m_snowl(nn_pos(1:n,row))) tmin(row)=minval(m_snowl(nn_pos(1:n,row))) - ! Admissible increments + ! Admissible increments tmax(row)=tmax(row)-m_snowl(row) tmin(row)=tmin(row)-m_snowl(row) end do @@ -642,20 +578,16 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) end if #endif /* (__oifs) */ - !========================= + !___________________________________________________________________________ ! Sums of positive/negative fluxes to node row - !========================= icepplus=0._WP icepminus=0._WP do elem=1, myDim_elem2D - !_______________________________________________________________________ - elnodes=elem2D_nodes(:,elem) - - !_______________________________________________________________________ ! if cavity cycle over if(ulevels(elem)>1) cycle !LK89140 !_______________________________________________________________________ + elnodes=elem2D_nodes(:,elem) do q=1,3 n=elnodes(q) flux=icefluxes(elem,q) @@ -667,11 +599,9 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) end do end do - !======================== + !___________________________________________________________________________ ! The least upper bound for the correction factors - !======================== do n=1,myDim_nod2D - !_______________________________________________________________________ ! if cavity cycle over if(ulevels_nod2D(n)>1) cycle !LK89140 @@ -692,18 +622,14 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) ! pminus and pplus are to be known to neighbouting PE call exchange_nod(icepminus, icepplus, partit) - !======================== + !___________________________________________________________________________ ! Limiting - !======================== do elem=1, myDim_elem2D - !_______________________________________________________________________ - elnodes=elem2D_nodes(:,elem) - - !_______________________________________________________________________ ! if cavity cycle over if(ulevels(elem)>1) cycle !LK89140 !_______________________________________________________________________ + elnodes=elem2D_nodes(:,elem) ae=1.0_WP do q=1,3 n=elnodes(q) @@ -714,21 +640,18 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) icefluxes(elem,:)=ae*icefluxes(elem,:) end do - !========================== + !___________________________________________________________________________ ! Update the solution - !========================== if(tr_array_id==1) then do n=1,myDim_nod2D if(ulevels_nod2D(n)>1) cycle !LK89140 m_ice(n)=m_icel(n) end do do elem=1, myDim_elem2D - elnodes=elem2D_nodes(:,elem) - - !___________________________________________________________________ ! if cavity cycle over if(ulevels(elem)>1) cycle !LK89140 + elnodes=elem2D_nodes(:,elem) do q=1,3 n=elnodes(q) m_ice(n)=m_ice(n)+icefluxes(elem,q) @@ -742,12 +665,10 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) a_ice(n)=a_icel(n) end do do elem=1, myDim_elem2D - elnodes=elem2D_nodes(:,elem) - - !___________________________________________________________________ ! if cavity cycle over if(ulevels(elem)>1) cycle !LK89140 + elnodes=elem2D_nodes(:,elem) do q=1,3 n=elnodes(q) a_ice(n)=a_ice(n)+icefluxes(elem,q) @@ -761,12 +682,10 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) m_snow(n)=m_snowl(n) end do do elem=1, myDim_elem2D - elnodes=elem2D_nodes(:,elem) - - !___________________________________________________________________ ! if cavity cycle over if(ulevels(elem)>1) cycle !LK89140 + elnodes=elem2D_nodes(:,elem) do q=1,3 n=elnodes(q) m_snow(n)=m_snow(n)+icefluxes(elem,q) @@ -781,60 +700,52 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) ice_temp(n)=m_templ(n) end do do elem=1, myDim_elem2D - elnodes=elem2D_nodes(:,elem) - !___________________________________________________________________ ! if cavity cycle over if(ulevels(elem)>1) cycle !LK89140 + elnodes=elem2D_nodes(:,elem) do q=1,3 - n=elnodes(q) - ice_temp(n)=ice_temp(n)+icefluxes(elem,q) + n=elnodes(q) + ice_temp(n)=ice_temp(n)+icefluxes(elem,q) end do end do end if #endif /* (__oifs) */ || defined (__ifsinterface) call exchange_nod(m_ice, a_ice, m_snow, partit) - #if defined (__oifs) || defined (__ifsinterface) call exchange_nod(ice_temp, partit) -#endif /* (__oifs) */ - -! deallocate(tmin, tmax) +#endif /* (__oifs) */ end subroutine ice_fem_fct ! ! !_______________________________________________________________________________ -SUBROUTINE ice_mass_matrix_fill(ice, partit, mesh) ! Used in ice_fct inherited from FESOM - use MOD_MESH - USE MOD_PARTIT - USE MOD_PARSUP - use MOD_TRACER - use MOD_ICE - ! - implicit none - integer :: n, n1, n2, row - - integer :: elem, elnodes(3), q, offset, col, ipos - integer, allocatable :: col_pos(:) - real(kind=WP) :: aa - integer :: flag=0,iflag=0 - type(t_ice) , intent(inout), target :: ice - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(in) , target :: mesh - !_____________________________________________________________________________ - ! pointer on necessary derived types - real(kind=WP), dimension(:), pointer :: mass_matrix +SUBROUTINE ice_mass_matrix_fill(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + implicit none + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: n, n1, n2, row + integer :: elem, elnodes(3), q, offset, col, ipos + integer, allocatable :: col_pos(:) + real(kind=WP) :: aa + integer :: flag=0,iflag=0 + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: mass_matrix #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - mass_matrix => ice%work%fct_massmatrix(:) + mass_matrix => ice%work%fct_massmatrix(:) ! ! a) -! allocate(mass_matrix(sum(nn_num(1:myDim_nod2D)))) -! mass_matrix =0.0_WP allocate(col_pos(myDim_nod2D+eDim_nod2D)) DO elem=1,myDim_elem2D @@ -867,7 +778,6 @@ SUBROUTINE ice_mass_matrix_fill(ice, partit, mesh) ! TEST: area==sum of row entries in mass_matrix: DO q=1,myDim_nod2D - !___________________________________________________________________ ! if cavity cycle over if(ulevels_nod2d(q)>1) cycle @@ -890,13 +800,13 @@ SUBROUTINE ice_mass_matrix_fill(ice, partit, mesh) deallocate(col_pos) END SUBROUTINE ice_mass_matrix_fill ! -!========================================================= ! +!_______________________________________________________________________________ subroutine ice_TG_rhs_div(ice, partit, mesh) - use MOD_MESH + USE MOD_ICE USE MOD_PARTIT USE MOD_PARSUP - USE MOD_ICE + USE MOD_MESH use o_PARAM USE g_CONFIG implicit none @@ -920,107 +830,109 @@ subroutine ice_TG_rhs_div(ice, partit, mesh) #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uice(:) - v_ice => ice%vice(:) - a_ice => ice%data(1)%values(:) - m_ice => ice%data(2)%values(:) - m_snow => ice%data(3)%values(:) - rhs_a => ice%data(1)%values_rhs(:) - rhs_m => ice%data(2)%values_rhs(:) - rhs_ms => ice%data(3)%values_rhs(:) - rhs_adiv => ice%data(1)%values_div_rhs(:) - rhs_mdiv => ice%data(2)%values_div_rhs(:) - rhs_msdiv => ice%data(3)%values_div_rhs(:) + u_ice => ice%uice(:) + v_ice => ice%vice(:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + rhs_a => ice%data(1)%values_rhs(:) + rhs_m => ice%data(2)%values_rhs(:) + rhs_ms => ice%data(3)%values_rhs(:) + rhs_adiv => ice%data(1)%values_div_rhs(:) + rhs_mdiv => ice%data(2)%values_div_rhs(:) + rhs_msdiv => ice%data(3)%values_div_rhs(:) #if defined (__oifs) || defined (__ifsinterface) - ice_temp => ice%data(4)%values(:) - rhs_temp => ice%data(4)%values_rhs(:) - rhs_tempdiv => ice%data(4)%values_div_rhs(:) + ice_temp => ice%data(4)%values(:) + rhs_temp => ice%data(4)%values_rhs(:) + rhs_tempdiv => ice%data(4)%values_div_rhs(:) #endif !___________________________________________________________________________ - ! Computes the rhs in a Taylor-Galerkin way (with upwind type of - ! correction for the advection operator) - ! In this version I tr to split divergent term off, so that FCT works without it. - - DO row=1, myDim_nod2D - !! row=myList_nod2D(m) - rhs_m(row)=0.0_WP - rhs_a(row)=0.0_WP - rhs_ms(row)=0.0_WP + ! Computes the rhs in a Taylor-Galerkin way (with upwind type of + ! correction for the advection operator) + ! In this version I tr to split divergent term off, so that FCT works without it. + do row=1, myDim_nod2D + !! row=myList_nod2D(m) + rhs_m(row)=0.0_WP + rhs_a(row)=0.0_WP + rhs_ms(row)=0.0_WP #if defined (__oifs) || defined (__ifsinterface) - rhs_temp(row)=0.0_WP + rhs_temp(row)=0.0_WP #endif /* (__oifs) */ - rhs_mdiv(row)=0.0_WP - rhs_adiv(row)=0.0_WP - rhs_msdiv(row)=0.0_WP + rhs_mdiv(row)=0.0_WP + rhs_adiv(row)=0.0_WP + rhs_msdiv(row)=0.0_WP #if defined (__oifs) || defined (__ifsinterface) - rhs_tempdiv(row)=0.0_WP + rhs_tempdiv(row)=0.0_WP #endif /* (__oifs) */ - END DO - do elem=1,myDim_elem2D !assembling rhs over elements - elnodes=elem2D_nodes(:,elem) - !___________________________________________________________________________ - ! if cavity element skip it - if (ulevels(elem)>1) cycle - - !derivatives - dx=gradient_sca(1:3,elem) - dy=gradient_sca(4:6,elem) - vol=elem_area(elem) - um=sum(u_ice(elnodes)) - vm=sum(v_ice(elnodes)) - ! this is exact computation (no assumption of u=const on elements used - ! in the standard version) - c1=(um*um+sum(u_ice(elnodes)*u_ice(elnodes)))/12.0_WP - c2=(vm*vm+sum(v_ice(elnodes)*v_ice(elnodes)))/12.0_WP - c3=(um*vm+sum(v_ice(elnodes)*u_ice(elnodes)))/12.0_WP - c4=sum(dx*u_ice(elnodes)+dy*v_ice(elnodes)) - DO n=1,3 - row=elnodes(n) -!!PS if(ulevels_nod2D(row)>1) cycle !LK89140 - DO q = 1,3 - entries(q)= vol*ice%ice_dt*((1.0_WP-0.5_WP*ice%ice_dt*c4)*(dx(n)*(um+u_ice(elnodes(q)))+ & - dy(n)*(vm+v_ice(elnodes(q))))/12.0_WP - & - 0.5_WP*ice%ice_dt*(c1*dx(n)*dx(q)+c2*dy(n)*dy(q)+c3*(dx(n)*dy(q)+dx(q)*dy(n)))) - !um*dx(n)+vm*dy(n))*(um*dx(q)+vm*dy(q))/9.0) - entries2(q)=0.5_WP*ice%ice_dt*(dx(n)*(um+u_ice(elnodes(q)))+ & - dy(n)*(vm+v_ice(elnodes(q)))-dx(q)*(um+u_ice(row))- & - dy(q)*(vm+v_ice(row))) - END DO - cx1=vol*ice%ice_dt*c4*(sum(m_ice(elnodes))+m_ice(elnodes(n))+sum(entries2*m_ice(elnodes)))/12.0_WP - cx2=vol*ice%ice_dt*c4*(sum(a_ice(elnodes))+a_ice(elnodes(n))+sum(entries2*a_ice(elnodes)))/12.0_WP - cx3=vol*ice%ice_dt*c4*(sum(m_snow(elnodes))+m_snow(elnodes(n))+sum(entries2*m_snow(elnodes)))/12.0_WP + end do + + do elem=1,myDim_elem2D !assembling rhs over elements + elnodes=elem2D_nodes(:,elem) + + ! if cavity element skip it + if (ulevels(elem)>1) cycle + + !derivatives + dx=gradient_sca(1:3,elem) + dy=gradient_sca(4:6,elem) + vol=elem_area(elem) + um=sum(u_ice(elnodes)) + vm=sum(v_ice(elnodes)) + ! this is exact computation (no assumption of u=const on elements used + ! in the standard version) + c1=(um*um+sum(u_ice(elnodes)*u_ice(elnodes)))/12.0_WP + c2=(vm*vm+sum(v_ice(elnodes)*v_ice(elnodes)))/12.0_WP + c3=(um*vm+sum(v_ice(elnodes)*u_ice(elnodes)))/12.0_WP + c4=sum(dx*u_ice(elnodes)+dy*v_ice(elnodes)) + do n=1,3 + row=elnodes(n) + !!PS if(ulevels_nod2D(row)>1) cycle !LK89140 + do q = 1,3 + entries(q)= vol*ice%ice_dt*((1.0_WP-0.5_WP*ice%ice_dt*c4)*(dx(n)*(um+u_ice(elnodes(q)))+ & + dy(n)*(vm+v_ice(elnodes(q))))/12.0_WP - & + 0.5_WP*ice%ice_dt*(c1*dx(n)*dx(q)+c2*dy(n)*dy(q)+c3*(dx(n)*dy(q)+dx(q)*dy(n)))) + !um*dx(n)+vm*dy(n))*(um*dx(q)+vm*dy(q))/9.0) + entries2(q)=0.5_WP*ice%ice_dt*(dx(n)*(um+u_ice(elnodes(q)))+ & + dy(n)*(vm+v_ice(elnodes(q)))-dx(q)*(um+u_ice(row))- & + dy(q)*(vm+v_ice(row))) + end do + + !___________________________________________________________________ + cx1=vol*ice%ice_dt*c4*(sum(m_ice(elnodes))+m_ice(elnodes(n))+sum(entries2*m_ice(elnodes)))/12.0_WP + cx2=vol*ice%ice_dt*c4*(sum(a_ice(elnodes))+a_ice(elnodes(n))+sum(entries2*a_ice(elnodes)))/12.0_WP + cx3=vol*ice%ice_dt*c4*(sum(m_snow(elnodes))+m_snow(elnodes(n))+sum(entries2*m_snow(elnodes)))/12.0_WP #if defined (__oifs) || defined (__ifsinterface) - cx4=vol*ice%ice_dt*c4*(sum(ice_temp(elnodes))+ice_temp(elnodes(n))+sum(entries2*ice_temp(elnodes)))/12.0_WP + cx4=vol*ice%ice_dt*c4*(sum(ice_temp(elnodes))+ice_temp(elnodes(n))+sum(entries2*ice_temp(elnodes)))/12.0_WP #endif /* (__oifs) */ - rhs_m(row)=rhs_m(row)+sum(entries*m_ice(elnodes))+cx1 - rhs_a(row)=rhs_a(row)+sum(entries*a_ice(elnodes))+cx2 - rhs_ms(row)=rhs_ms(row)+sum(entries*m_snow(elnodes))+cx3 + !___________________________________________________________________ + rhs_m(row)=rhs_m(row)+sum(entries*m_ice(elnodes))+cx1 + rhs_a(row)=rhs_a(row)+sum(entries*a_ice(elnodes))+cx2 + rhs_ms(row)=rhs_ms(row)+sum(entries*m_snow(elnodes))+cx3 #if defined (__oifs) || defined (__ifsinterface) - rhs_temp(row)=rhs_temp(row)+sum(entries*ice_temp(elnodes))+cx4 + rhs_temp(row)=rhs_temp(row)+sum(entries*ice_temp(elnodes))+cx4 #endif /* (__oifs) */ - rhs_mdiv(row)=rhs_mdiv(row)-cx1 - rhs_adiv(row)=rhs_adiv(row)-cx2 - rhs_msdiv(row)=rhs_msdiv(row)-cx3 + !___________________________________________________________________ + rhs_mdiv(row)=rhs_mdiv(row)-cx1 + rhs_adiv(row)=rhs_adiv(row)-cx2 + rhs_msdiv(row)=rhs_msdiv(row)-cx3 #if defined (__oifs) || defined (__ifsinterface) - rhs_tempdiv(row)=rhs_tempdiv(row)-cx4 + rhs_tempdiv(row)=rhs_tempdiv(row)-cx4 #endif /* (__oifs) */ - - END DO - end do + end do + end do end subroutine ice_TG_rhs_div ! ! !_______________________________________________________________________________ subroutine ice_update_for_div(ice, partit, mesh) - use MOD_MESH + use MOD_ICE USE MOD_PARTIT USE MOD_PARSUP - use MOD_TRACER - use MOD_ICE + USE MOD_MESH use o_PARAM - USE g_CONFIG + use g_CONFIG use g_comm_auto implicit none type(t_ice) , intent(inout), target :: ice @@ -1065,13 +977,12 @@ subroutine ice_update_for_div(ice, partit, mesh) #endif !___________________________________________________________________________ ! Does Taylor-Galerkin solution - ! - !the first approximation + ! the first approximation do row=1,myDim_nod2D - !! row=myList_nod2D(m) - !___________________________________________________________________________ + !! row=myList_nod2D(m) ! if cavity node skip it if (ulevels_nod2d(row)>1) cycle + dm_ice(row) =rhs_mdiv(row) /area(1,row) da_ice(row) =rhs_adiv(row) /area(1,row) dm_snow(row)=rhs_msdiv(row)/area(1,row) @@ -1086,38 +997,39 @@ subroutine ice_update_for_div(ice, partit, mesh) call exchange_nod(dm_temp, partit) #endif /* (__oifs) */ + !___________________________________________________________________________ !iterate do n=1,num_iter_solve-1 do row=1,myDim_nod2D - !___________________________________________________________________________ ! if cavity node skip it if (ulevels_nod2d(row)>1) cycle !! row=myList_nod2D(m) + !___________________________________________________________________ clo=ssh_stiff%rowptr(row)-ssh_stiff%rowptr(1)+1 clo2=ssh_stiff%rowptr(row+1)-ssh_stiff%rowptr(1) cn=clo2-clo+1 location(1:cn)=nn_pos(1:cn, row) - rhs_new=rhs_mdiv(row) - sum(mass_matrix(clo:clo2)*dm_ice(location(1:cn))) - m_icel(row)=dm_ice(row)+rhs_new/area(1,row) - rhs_new=rhs_adiv(row) - sum(mass_matrix(clo:clo2)*da_ice(location(1:cn))) - a_icel(row)=da_ice(row)+rhs_new/area(1,row) - rhs_new=rhs_msdiv(row) - sum(mass_matrix(clo:clo2)*dm_snow(location(1:cn))) - m_snowl(row)=dm_snow(row)+rhs_new/area(1,row) + + !___________________________________________________________________ + rhs_new = rhs_mdiv(row) - sum(mass_matrix(clo:clo2)*dm_ice(location(1:cn))) + m_icel(row) = dm_ice(row)+rhs_new/area(1,row) + rhs_new = rhs_adiv(row) - sum(mass_matrix(clo:clo2)*da_ice(location(1:cn))) + a_icel(row) = da_ice(row)+rhs_new/area(1,row) + rhs_new = rhs_msdiv(row) - sum(mass_matrix(clo:clo2)*dm_snow(location(1:cn))) + m_snowl(row)= dm_snow(row)+rhs_new/area(1,row) #if defined (__oifs) || defined (__ifsinterface) - rhs_new=rhs_tempdiv(row) - sum(mass_matrix(clo:clo2)*dm_temp(location(1:cn))) - m_templ(row)=dm_temp(row)+rhs_new/area(1,row) + rhs_new = rhs_tempdiv(row) - sum(mass_matrix(clo:clo2)*dm_temp(location(1:cn))) + m_templ(row)= dm_temp(row)+rhs_new/area(1,row) #endif /* (__oifs) */ end do do row=1,myDim_nod2D - !___________________________________________________________________________ ! if cavity node skip it if (ulevels_nod2d(row)>1) cycle - !! row=myList_nod2D(m) - dm_ice(row)=m_icel(row) - da_ice(row)=a_icel(row) - dm_snow(row)=m_snowl(row) + dm_ice(row) = m_icel(row) + da_ice(row) = a_icel(row) + dm_snow(row) = m_snowl(row) #if defined (__oifs) || defined (__ifsinterface) - dm_temp(row)=m_templ(row) + dm_temp(row) = m_templ(row) #endif /* (__oifs) */ end do call exchange_nod(dm_ice, partit) @@ -1127,12 +1039,11 @@ subroutine ice_update_for_div(ice, partit, mesh) call exchange_nod(dm_temp, partit) #endif /* (__oifs) */ end do - m_ice=m_ice+dm_ice - a_ice=a_ice+da_ice - m_snow=m_snow+dm_snow + m_ice = m_ice+dm_ice + a_ice = a_ice+da_ice + m_snow = m_snow+dm_snow #if defined (__oifs) || defined (__ifsinterface) - ice_temp=ice_temp+dm_temp + ice_temp= ice_temp+dm_temp #endif /* (__oifs) */ - end subroutine ice_update_for_div ! ============================================================= From 645e7aa99f5ed5a3b472d75423d3685cf5bb6cd2 Mon Sep 17 00:00:00 2001 From: Patrick Date: Sun, 28 Nov 2021 22:23:35 +0100 Subject: [PATCH 694/909] add ice_thermo parameters to ice derived type --> but not using them yet --- src/MOD_ICE.F90 | 67 ++++++++++++++++++++++++++++++++++++++------- src/ice_modules.F90 | 64 +------------------------------------------ 2 files changed, 58 insertions(+), 73 deletions(-) diff --git a/src/MOD_ICE.F90 b/src/MOD_ICE.F90 index 79d923d00..f2364da8b 100644 --- a/src/MOD_ICE.F90 +++ b/src/MOD_ICE.F90 @@ -51,6 +51,36 @@ MODULE MOD_ICE !___________________________________________________________________________ real(kind=WP), allocatable, dimension(:) :: t_skin, thdgr, thdgrsn, thdgr_old, ustar !___________________________________________________________________________ + real(kind=WP) :: rhoair=1.3 , inv_rhoair=1./1.3 ! Air density & inverse , LY2004 !1.3 AOMIP + real(kind=WP) :: rhowat=1025., inv_rhowat=1./1025.! Water density & inverse + real(kind=WP) :: rhoice=910. , inv_rhoice=1./910. ! Ice density & inverse, AOMIP + real(kind=WP) :: rhosno=290. , inv_rhosno=1./290. ! Snow density & inverse, AOMIP + ! Specific heat of air, ice, snow [J/(kg * K)] + real(kind=WP) :: cpair=1005., cpice=2106., cpsno=2090. +! real(kind=WP) :: cc=rhowat*4190.0 ! Volumetr. heat cap. of water [J/m**3/K](cc = rhowat*cp_water) +! real(kind=WP) :: cl=rhoice*3.34e5 ! Volumetr. latent heat of ice fusion [J/m**3](cl=rhoice*Lf) +! --> cl and cc are setted in subroutine ice_init(...) + real(kind=WP) :: cc=1025.*4190.0 ! Volumetr. heat cap. of water [J/m**3/K](cc = rhowat*cp_water) + real(kind=WP) :: cl=910.*3.34e5 ! Volumetr. latent heat of ice fusion [J/m**3](cl=rhoice*Lf) + real(kind=WP) :: clhw=2.501e6 ! Specific latent heat [J/kg]: water -> water vapor + real(kind=WP) :: clhi=2.835e6 ! sea ice-> water vapor + real(kind=WP) :: tmelt=273.15 ! 0 deg C expressed in K + real(kind=WP) :: boltzmann=5.67E-8 ! S. Boltzmann const.*longw. emissivity + integer :: iclasses=7 ! Number of ice thickness gradations for ice growth calcs. + real(kind=WP) :: hmin= 0.01 ! Cut-off ice thickness !! + real(kind=WP) :: Armin=0.01 ! Minimum ice concentration !! + + ! --- namelist parameter /ice_therm/ + real(kind=WP) :: con= 2.1656, consn = 0.31 ! Thermal conductivities: ice & snow; W/m/K + real(kind=WP) :: Sice = 4.0 ! Ice salinity 3.2--5.0 ppt. + real(kind=WP) :: h0=1.0 ! Lead closing parameter [m] ! 0.5 + real(kind=WP) :: emiss_ice=0.97 ! Emissivity of Snow/Ice, + real(kind=WP) :: emiss_wat=0.97 ! Emissivity of open water + real(kind=WP) :: albsn = 0.81 ! Albedo: frozen snow + real(kind=WP) :: albsnm= 0.77 ! melting snow + real(kind=WP) :: albi = 0.70 ! frozen ice + real(kind=WP) :: albim = 0.68 ! melting ice + real(kind=WP) :: albw = 0.066 ! open water, LY2004 contains procedure WRITE_T_ICE_THERMO procedure READ_T_ICE_THERMO @@ -129,7 +159,8 @@ MODULE MOD_ICE !___________________________________________________________________________ ! put ice arrays for coupled model type(t_ice_atmcoupl) :: atmcoupl -#endif /* (__oasis) */ +#endif /* (__oasis) */ + !___________________________________________________________________________ ! set ice model parameters: ! --- RHEOLOGY --- @@ -505,6 +536,11 @@ subroutine ice_init(ice, partit, mesh) namelist /ice_dyn/ whichEVP, Pstar, ellipse, c_pressure, delta_min, evp_rheol_steps, & Cd_oce_ice, ice_gamma_fct, ice_diff, theta_io, ice_ave_steps, & alpha_evp, beta_evp, c_aevp + + real(kind=WP) :: Sice, h0, emiss_ice, emiss_wat, albsn, albsnm, albi, & + albim, albw, con, consn + namelist /ice_therm/ Sice, h0, emiss_ice, emiss_wat, albsn, albsnm, albi, & + albim, albw, con, consn !___________________________________________________________________________ ! pointer on necessary derived types #include "associate_part_def.h" @@ -522,11 +558,12 @@ subroutine ice_init(ice, partit, mesh) call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop end if - read(nm_unit, nml=ice_dyn, iostat=iost) + read(nm_unit, nml=ice_dyn , iostat=iost) + read(nm_unit, nml=ice_therm, iostat=iost) close(nm_unit) !___________________________________________________________________________ - ! set parameters in ice derived type from namelist.ice + ! set parameters in ice derived type from namelist.ice --> namelist /ice_dyn/ ice%whichEVP = whichEVP ice%pstar = Pstar ice%ellipse = ellipse @@ -542,17 +579,26 @@ subroutine ice_init(ice, partit, mesh) ice%beta_evp = beta_evp ice%c_aevp = c_aevp - !!PS no namelist paramter in moment - !!PS ice%zeta_min = zeta_min - !!PS ice%Tevp_inv = Tevp_inv - !!PS ice%ice_free_slip = ice_free_slip - !!PS ice%ice_dt = ice_dt - !!PS ice%Tevp_inv = Tevp_inv + ! set parameters in ice derived type from namelist.ice --> namelist /ice_therm/ + ice%thermo%con = con + ice%thermo%consn = consn + ice%thermo%Sice = Sice + ice%thermo%h0 = h0 + ice%thermo%emiss_ice= emiss_ice + ice%thermo%emiss_wat= emiss_wat + ice%thermo%albsn = albsn + ice%thermo%albsnm = albsnm + ice%thermo%albi = albi + ice%thermo%albim = albim + ice%thermo%albw = albw + + ice%thermo%cc=ice%thermo%rhowat*4190.0 ! Volumetr. heat cap. of water [J/m**3/K](cc = rhowat*cp_water) + ice%thermo%cl=ice%thermo%rhoice*3.34e5 ! Volumetr. latent heat of ice fusion [J/m**3](cl=rhoice*Lf) !___________________________________________________________________________ ! define local vertice & elem array size elem_size=myDim_elem2D+eDim_elem2D - node_size=myDim_nod2D+eDim_nod2D + node_size=myDim_nod2D +eDim_nod2D !___________________________________________________________________________ ! allocate/initialise arrays in ice derived type @@ -589,6 +635,7 @@ subroutine ice_init(ice, partit, mesh) ice%alpha_evp_array = ice%alpha_evp ice%beta_evp_array = ice%alpha_evp end if + !___________________________________________________________________________ ! initialise surface ocean arrays in ice derived type allocate(ice%srfoce_u( node_size)) diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index 5ad4b8510..391b69a89 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -52,69 +52,7 @@ ! ! ! NAMELIST /ice_dyn/ whichEVP, Cd_oce_ice, & ! ! ! ice_ave_steps ! ! -! ! -! ! END MODULE i_PARAM -! ! ! -! ! !============================================================================= -! ! ! -! ! MODULE i_ARRAYS -! ! ! -! ! ! Arrays used to store ice variables and organize coupling -! ! ! -! ! USE o_PARAM -! ! implicit none -! ! save -! ! ! logical :: ice_update = .true. ! -! ! ! integer :: ice_steps_since_upd = 0 ! -! ! ! real(kind=WP),allocatable,dimension(:,:) :: ice_grad_vel -! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_ice, V_ice -! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: m_ice, a_ice, m_snow -! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_ice_old, V_ice_old, m_ice_old, a_ice_old, m_snow_old,thdgr_old !PS -! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: thdgr_old -! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_rhs_ice, V_rhs_ice -! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_m, rhs_a, rhs_ms, ths_temp -! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: ths_temp -! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_w, V_w -! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: u_ice_aux, v_ice_aux ! of the size of u_ice, v_ice -! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_mdiv, rhs_adiv, rhs_msdiv -! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: elevation -! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: sigma11, sigma12, sigma22 -! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: eps11, eps12, eps22 -! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: fresh_wa_flux -! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: net_heat_flux -! ! #if defined (__oasis) || defined (__ifsinterface) -! ! ! real(kind=WP),target, allocatable, dimension(:) :: ice_alb, ice_temp ! new fields for OIFS coupling -! ! ! real(kind=WP),target, allocatable, dimension(:) :: ice_alb ! new fields for OIFS coupling -! ! ! real(kind=WP),target, allocatable, dimension(:) :: oce_heat_flux, ice_heat_flux -! ! ! real(kind=WP),target, allocatable, dimension(:) :: tmp_oce_heat_flux, tmp_ice_heat_flux -! ! !temporary flux fields -! ! !(for flux correction) -! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_temp, m_templ, dm_temp, rhs_tempdiv -! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: m_templ, dm_temp, rhs_tempdiv -! ! #if defined (__oifs) || defined (__ifsinterface) -! ! ! real(kind=WP),target, allocatable, dimension(:) :: enthalpyoffuse -! ! #endif -! ! #endif /* (__oasis) || defined (__ifsinterface)*/ -! ! -! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: S_oc_array, T_oc_array -! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_iceoce_x -! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_iceoce_y -! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_atmice_x -! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_atmice_y -! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: t_skin -! ! ! FCT implementation -! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: m_icel, a_icel, m_snowl -! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: dm_ice, da_ice, dm_snow -! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:,:) :: icefluxes -! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: icepplus, icepminus -! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: mass_matrix -! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: alpha_evp_array(:) ! of myDim_elem2D -! ! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: beta_evp_array(:) ! of myDim_node2D+eDim_node2D -! ! -! ! ! Mean arrays -! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_ice_mean, V_ice_mean -! ! REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: m_ice_mean, a_ice_mean, m_snow_mean -! ! END MODULE i_ARRAYS + !===================================================================== module i_therm_param USE o_PARAM From 2764ad6fd585f579201ff08dddc2a7f6eb56de20 Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 29 Nov 2021 10:40:39 +0100 Subject: [PATCH 695/909] fix ciepack issues with new ice derived type --- src/ice_EVP.F90 | 12 +++- src/ice_maEVP.F90 | 10 +++- src/icepack_drivers/icedrv_advection.F90 | 76 ++++++++++++------------ src/icepack_drivers/icedrv_main.F90 | 9 ++- src/icepack_drivers/icedrv_set.F90 | 23 +++---- src/icepack_drivers/icedrv_step.F90 | 5 +- src/icepack_drivers/icedrv_transfer.F90 | 25 +++++--- 7 files changed, 93 insertions(+), 67 deletions(-) diff --git a/src/ice_EVP.F90 b/src/ice_EVP.F90 index 6b2e65938..f5c59c4c1 100755 --- a/src/ice_EVP.F90 +++ b/src/ice_EVP.F90 @@ -295,7 +295,10 @@ subroutine EVPdynamics(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: u_ice_old, v_ice_old real(kind=WP), dimension(:), pointer :: u_rhs_ice, v_rhs_ice, rhs_a, rhs_m real(kind=WP), dimension(:), pointer :: u_w, v_w, elevation - real(kind=WP), dimension(:), pointer :: stress_atmice_x, stress_atmice_y + real(kind=WP), dimension(:), pointer :: stress_atmice_x, stress_atmice_y +#if defined (__icepack) + real(kind=WP), dimension(:), pointer :: a_ice_old, m_ice_old, m_snow_old +#endif #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -316,7 +319,12 @@ subroutine EVPdynamics(ice, partit, mesh) elevation => ice%srfoce_ssh(:) stress_atmice_x => ice%stress_atmice_x(:) stress_atmice_y => ice%stress_atmice_y(:) - +#if defined (__icepack) + a_ice_old => ice%data(1)%values_old(:) + m_ice_old => ice%data(2)%values_old(:) + m_snow_old => ice%data(3)%values_old(:) +#endif + !___________________________________________________________________________ ! If Icepack is used, always update the tracers #if defined (__icepack) diff --git a/src/ice_maEVP.F90 b/src/ice_maEVP.F90 index 78f770d97..5cf4992b3 100644 --- a/src/ice_maEVP.F90 +++ b/src/ice_maEVP.F90 @@ -417,6 +417,9 @@ subroutine EVPdynamics_m(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: elevation real(kind=WP), dimension(:), pointer :: stress_atmice_x, stress_atmice_y real(kind=WP), dimension(:), pointer :: u_ice_aux, v_ice_aux +#if defined (__icepack) + real(kind=WP), dimension(:), pointer :: a_ice_old, m_ice_old, m_snow_old +#endif #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -443,7 +446,12 @@ subroutine EVPdynamics_m(ice, partit, mesh) stress_atmice_y => ice%stress_atmice_y(:) u_ice_aux => ice%uice_aux(:) v_ice_aux => ice%vice_aux(:) - +#if defined (__icepack) + a_ice_old => ice%data(1)%values_old(:) + m_ice_old => ice%data(2)%values_old(:) + m_snow_old => ice%data(3)%values_old(:) +#endif + !___________________________________________________________________________ val3=1.0_WP/3.0_WP vale=1.0_WP/(ice%ellipse**2) diff --git a/src/icepack_drivers/icedrv_advection.F90 b/src/icepack_drivers/icedrv_advection.F90 index 554b0c51e..8c0b72a91 100644 --- a/src/icepack_drivers/icedrv_advection.F90 +++ b/src/icepack_drivers/icedrv_advection.F90 @@ -35,17 +35,17 @@ contains - subroutine tg_rhs_icepack(mesh, trc) + subroutine tg_rhs_icepack(ice, mesh, trc) use mod_mesh - use i_param + use MOD_ICE use o_param use g_config implicit none ! Input - output - + type(t_ice), target, intent(in) :: ice type(t_mesh), target, intent(in) :: mesh real(kind=dbl_kind), dimension(nx), intent(inout) :: trc @@ -79,14 +79,14 @@ subroutine tg_rhs_icepack(mesh, trc) ! Diffusivity - diff = ice_diff * sqrt( elem_area(elem) / scale_area ) + diff =ice% ice_diff * sqrt( elem_area(elem) / scale_area ) do n = 1, 3 row = elnodes(n) do q = 1, 3 - entries(q) = vol*ice_dt*((dx(n)*(um+uvel(elnodes(q))) + & + entries(q) = vol*ice%ice_dt*((dx(n)*(um+uvel(elnodes(q))) + & dy(n)*(vm+vvel(elnodes(q))))/12.0_WP - & diff*(dx(n)*dx(q)+ dy(n)*dy(q)) - & - 0.5_WP*ice_dt*(um*dx(n)+vm*dy(n))*(um*dx(q)+vm*dy(q))/9.0_WP) + 0.5_WP*ice%ice_dt*(um*dx(n)+vm*dy(n))*(um*dx(q)+vm*dy(q))/9.0_WP) enddo rhs_tr(row)=rhs_tr(row)+sum(entries*trc(elnodes)) enddo @@ -138,8 +138,7 @@ end subroutine init_advection_icepack subroutine fill_mass_matrix_icepack(mesh) use mod_mesh - use i_param - + implicit none integer(kind=int_kind) :: n, n1, n2, row @@ -198,7 +197,7 @@ end subroutine fill_mass_matrix_icepack !======================================================================= - subroutine solve_low_order_icepack(mesh, trc) + subroutine solve_low_order_icepack(ice, mesh, trc) !============================ ! Low-order solution @@ -210,20 +209,20 @@ subroutine solve_low_order_icepack(mesh, trc) ! is implemented as the difference between the consistent and lumped mass ! matrices acting on the field from the previous time step. The consistent ! mass matrix on the lhs is replaced with the lumped one. - + USE MOD_ICE use mod_mesh - use i_param - + implicit none integer(kind=int_kind) :: row, clo, clo2, cn, location(100) real (kind=dbl_kind) :: gamma + type(t_ice), target, intent(in) :: ice type(t_mesh), target, intent(in) :: mesh real(kind=dbl_kind), dimension(nx), intent(inout) :: trc #include "associate_mesh.h" - gamma = ice_gamma_fct ! Added diffusivity parameter + gamma = ice%ice_gamma_fct ! Added diffusivity parameter ! Adjust it to ensure posivity of solution do row = 1, nx_nh @@ -247,8 +246,7 @@ end subroutine solve_low_order_icepack subroutine solve_high_order_icepack(mesh, trc) use mod_mesh - use i_param - + implicit none integer(kind=int_kind) :: n,i,clo,clo2,cn,location(100),row @@ -288,7 +286,7 @@ end subroutine solve_high_order_icepack !======================================================================= - subroutine fem_fct_icepack(mesh, trc) + subroutine fem_fct_icepack(ice, mesh, trc) !============================ ! Flux corrected transport algorithm for tracer advection @@ -298,20 +296,20 @@ subroutine fem_fct_icepack(mesh, trc) ! transport (FEM-FCT) for the Euler and Navier-Stokes equation, ! Int. J. Numer. Meth. Fluids, 7 (1987), 1093--1109) as described by Kuzmin and ! Turek. (kuzmin@math.uni-dortmund.de) - + USE MOD_ICE use mod_mesh use o_param - use i_param - + integer(kind=int_kind) :: icoef(3,3), n, q, elem, elnodes(3), row real (kind=dbl_kind), allocatable, dimension(:) :: tmax, tmin real (kind=dbl_kind) :: vol, flux, ae, gamma type(t_mesh), target, intent(in) :: mesh real(kind=dbl_kind), dimension(nx), intent(inout) :: trc - + type(t_ice), target, intent(in) :: ice + #include "associate_mesh.h" - gamma = ice_gamma_fct ! It should coinside with gamma in + gamma = ice%ice_gamma_fct ! It should coinside with gamma in ! ts_solve_low_order !========================== @@ -445,17 +443,17 @@ end subroutine fem_fct_icepack !======================================================================= - subroutine tg_rhs_div_icepack(mesh, trc) - + subroutine tg_rhs_div_icepack(ice, mesh, trc) + USE MOD_ICE use mod_mesh use o_param - use i_param - + implicit none real (kind=dbl_kind) :: diff, entries(3), um, vm, vol, dx(3), dy(3) integer(kind=int_kind) :: n, q, row, elem, elnodes(3) real (kind=dbl_kind) :: c_1, c_2, c_3, c_4, c_x, entries2(3) + type(t_ice), target, intent(in) :: ice type(t_mesh), target, intent(in) :: mesh real(kind=dbl_kind), dimension(nx), intent(inout) :: trc @@ -494,14 +492,14 @@ subroutine tg_rhs_div_icepack(mesh, trc) row = elnodes(n) do q = 1, 3 - entries(q) = vol*ice_dt*((c1-p5*ice_dt*c_4)*(dx(n)*(um+uvel(elnodes(q)))+ & + entries(q) = vol*ice%ice_dt*((c1-p5*ice%ice_dt*c_4)*(dx(n)*(um+uvel(elnodes(q)))+ & dy(n)*(vm+vvel(elnodes(q))))/12.0_dbl_kind - & - p5*ice_dt*(c_1*dx(n)*dx(q)+c_2*dy(n)*dy(q)+c_3*(dx(n)*dy(q)+dx(q)*dy(n)))) - entries2(q) = p5*ice_dt*(dx(n)*(um+uvel(elnodes(q))) + & + p5*ice%ice_dt*(c_1*dx(n)*dx(q)+c_2*dy(n)*dy(q)+c_3*(dx(n)*dy(q)+dx(q)*dy(n)))) + entries2(q) = p5*ice%ice_dt*(dx(n)*(um+uvel(elnodes(q))) + & dy(n)*(vm+vvel(elnodes(q)))-dx(q)*(um+uvel(row)) - & dy(q)*(vm+vvel(row))) enddo - c_x = vol*ice_dt*c_4*(sum(trc(elnodes))+trc(elnodes(n))+sum(entries2*trc(elnodes))) / 12.0_dbl_kind + c_x = vol*ice%ice_dt*c_4*(sum(trc(elnodes))+trc(elnodes(n))+sum(entries2*trc(elnodes))) / 12.0_dbl_kind rhs_tr(row) = rhs_tr(row) + sum(entries * trc(elnodes)) + c_x rhs_trdiv(row) = rhs_trdiv(row) - c_x enddo @@ -515,8 +513,7 @@ subroutine update_for_div_icepack(mesh, trc) use mod_mesh use o_param - use i_param - + implicit none integer(kind=int_kind) :: n, i, clo, clo2, cn, & @@ -560,31 +557,33 @@ end subroutine update_for_div_icepack !======================================================================= - subroutine fct_solve_icepack(mesh, trc) + subroutine fct_solve_icepack(ice, mesh, trc) use mod_mesh - + use MOD_ICE implicit none real(kind=dbl_kind), dimension(nx), intent(inout) :: trc type(t_mesh), target, intent(in) :: mesh + type(t_ice), target, intent(in) :: ice ! Driving sequence - call tg_rhs_div_icepack(mesh, trc) + call tg_rhs_div_icepack(ice, mesh, trc) call solve_high_order_icepack(mesh, trc) ! uses arrays of low-order solutions as temp ! storage. It should preceed the call of low ! order solution. - call solve_low_order_icepack(mesh, trc) - call fem_fct_icepack(mesh, trc) + call solve_low_order_icepack(ice, mesh, trc) + call fem_fct_icepack(ice, mesh, trc) call update_for_div_icepack(mesh, trc) end subroutine fct_solve_icepack !======================================================================= - module subroutine tracer_advection_icepack(mesh) + module subroutine tracer_advection_icepack(ice, mesh) use mod_mesh + use MOD_ICE use icepack_intfc, only: icepack_aggregate use icepack_itd, only: cleanup_itd use g_config, only: dt @@ -614,6 +613,7 @@ module subroutine tracer_advection_icepack(mesh) works type(t_mesh), target, intent(in) :: mesh + type(t_ice), target, intent(in) :: ice call icepack_query_parameters(heat_capacity_out=heat_capacity, & puny_out=puny) @@ -643,7 +643,7 @@ module subroutine tracer_advection_icepack(mesh) ! Advect each tracer do nt = 1, narr - call fct_solve_icepack ( mesh, works(:,nt) ) + call fct_solve_icepack (ice, mesh, works(:,nt) ) end do call work_to_state (ntrcr, narr, works(:,:)) diff --git a/src/icepack_drivers/icedrv_main.F90 b/src/icepack_drivers/icedrv_main.F90 index 03c2e0625..0f5f938ff 100644 --- a/src/icepack_drivers/icedrv_main.F90 +++ b/src/icepack_drivers/icedrv_main.F90 @@ -753,10 +753,12 @@ module icedrv_main interface ! Read icepack namelists, setup the model parameter and write diagnostics - module subroutine set_icepack(partit) + module subroutine set_icepack(ice, partit) use mod_partit + use mod_ice implicit none type(t_partit), intent(inout), target :: partit + type(t_ice) , intent(inout), target :: ice end subroutine set_icepack ! Set up hemispheric masks @@ -850,10 +852,12 @@ module subroutine icepack_to_fesom_single_point( & end subroutine icepack_to_fesom_single_point ! Trancers advection - module subroutine tracer_advection_icepack(mesh) + module subroutine tracer_advection_icepack(ice, mesh) use mod_mesh + use MOD_ICE implicit none type(t_mesh), intent(in), target :: mesh + type(t_ice), intent(in), target :: ice end subroutine tracer_advection_icepack ! Advection initialization @@ -868,7 +872,6 @@ module subroutine step_icepack(ice, mesh, time_evp, time_advec, time_therm) use mod_mesh use mod_ice use g_config, only: dt - use i_PARAM, only: whichEVP use icepack_intfc, only: icepack_ice_strength implicit none real (kind=dbl_kind), intent(out) :: & diff --git a/src/icepack_drivers/icedrv_set.F90 b/src/icepack_drivers/icedrv_set.F90 index bc7af715f..dbfc55a1d 100644 --- a/src/icepack_drivers/icedrv_set.F90 +++ b/src/icepack_drivers/icedrv_set.F90 @@ -22,16 +22,17 @@ contains - module subroutine set_icepack(partit) - - use i_param, only: whichEVP - use i_param, only: cd_oce_ice, Pstar, c_pressure - use i_therm_param, only: albw + module subroutine set_icepack(ice, partit) + use MOD_ICE +! use i_param, only: whichEVP +! use i_param, only: cd_oce_ice, Pstar, c_pressure +! use i_therm_param, only: albw implicit none ! local variables type(t_partit), intent(inout), target :: partit + type(t_ice), intent(inout), target :: ice character(len=char_len) :: nml_filename, diag_filename character(len=*), parameter :: subname = '(set_icepack)' @@ -432,13 +433,13 @@ module subroutine set_icepack(partit) if (mype == 0) write(nu_diag,*) '-----------------------------------' if (mype == 0) write(nu_diag,*) ' ' - if (whichEVP == 1 .or. whichEVP == 2) then + if (ice%whichEVP == 1 .or. ice%whichEVP == 2) then if (mype == 0) write (nu_diag,*) 'WARNING: whichEVP = 1 or 2' if (mype == 0) write (nu_diag,*) 'Adaptive or Modified EVP formulations' if (mype == 0) write (nu_diag,*) 'are not allowed when using Icepack (yet).' if (mype == 0) write (nu_diag,*) 'Standard EVP will be used instead' if (mype == 0) write (nu_diag,*) ' whichEVP = 0' - whichEVP = 0 + ice%whichEVP = 0 endif if (ncat == 1 .and. kitd == 1) then @@ -817,10 +818,10 @@ module subroutine set_icepack(partit) ! Make the namelists.ice and namelist.icepack consistent (icepack wins ! over fesom) - cd_oce_ice = dragio - albw = albocn - Pstar = P_star - c_pressure = C_star + ice%cd_oce_ice = dragio + ice%thermo%albw= albocn + ice%Pstar = P_star + ice%c_pressure = C_star call icepack_init_parameters(ustar_min_in=ustar_min, Cf_in=Cf, & diff --git a/src/icepack_drivers/icedrv_step.F90 b/src/icepack_drivers/icedrv_step.F90 index 1997b9b45..5139454bf 100644 --- a/src/icepack_drivers/icedrv_step.F90 +++ b/src/icepack_drivers/icedrv_step.F90 @@ -1120,7 +1120,6 @@ module subroutine step_icepack(ice, mesh, time_evp, time_advec, time_therm) use icepack_intfc, only: icepack_ice_strength use g_config, only: dt - use i_PARAM, only: whichEVP use mod_mesh use mod_ice use ice_EVPdynamics_interface @@ -1240,7 +1239,7 @@ module subroutine step_icepack(ice, mesh, time_evp, time_advec, time_therm) t2 = MPI_Wtime() - select case (whichEVP) + select case (ice%whichEVP) case (0) call EVPdynamics (ice, p_partit, mesh) case (1) @@ -1268,7 +1267,7 @@ module subroutine step_icepack(ice, mesh, time_evp, time_advec, time_therm) t2 = MPI_Wtime() - call tracer_advection_icepack(mesh) + call tracer_advection_icepack(ice, mesh) t3 = MPI_Wtime() time_advec = t3 - t2 diff --git a/src/icepack_drivers/icedrv_transfer.F90 b/src/icepack_drivers/icedrv_transfer.F90 index fd6fca29f..15d36eb1b 100644 --- a/src/icepack_drivers/icedrv_transfer.F90 +++ b/src/icepack_drivers/icedrv_transfer.F90 @@ -18,12 +18,12 @@ module subroutine fesom_to_icepack(ice, mesh) use g_forcing_param, only: ncar_bulk_z_wind, ncar_bulk_z_tair, & ncar_bulk_z_shum use g_sbf, only: l_mslp - use i_arrays, only: S_oc_array, T_oc_array, & ! Ocean and sea ice fields - u_w, v_w, & - stress_atmice_x, stress_atmice_y -! u_ice, v_ice, & +! use i_arrays, only: S_oc_array, T_oc_array, & ! Ocean and sea ice fields +! u_w, v_w, & +! stress_atmice_x, stress_atmice_y +! ! u_ice, v_ice, & - use i_param, only: cd_oce_ice ! Sea ice parameters +! use i_param, only: cd_oce_ice ! Sea ice parameters use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted use icepack_intfc, only: icepack_query_parameters use icepack_intfc, only: icepack_sea_freezing_temperature @@ -63,10 +63,17 @@ module subroutine fesom_to_icepack(ice, mesh) type(t_mesh), target, intent(in) :: mesh type(t_ice), target, intent(inout) :: ice - real(kind=WP), dimension(:), pointer :: u_ice, v_ice + real(kind=WP), dimension(:), pointer :: u_ice, v_ice, S_oc_array, T_oc_array, & + u_w, v_w, stress_atmice_x, stress_atmice_y #include "associate_mesh.h" - u_ice => ice%uvice(1,:) - v_ice => ice%uvice(2,:) + u_ice => ice%uice(:) + v_ice => ice%vice(:) + S_oc_array => ice%srfoce_salt(:) + T_oc_array => ice%srfoce_temp(:) + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) + stress_atmice_x => ice%stress_atmice_x(:) + stress_atmice_y => ice%stress_atmice_y(:) ! Ice uvel(:) = u_ice(:) @@ -124,7 +131,7 @@ module subroutine fesom_to_icepack(ice, mesh) do i = 1, nx ! ocean - ice stress - aux = sqrt((uvel(i)-uocn(i))**2+(vvel(i)-vocn(i))**2)*rhowat*cd_oce_ice + aux = sqrt((uvel(i)-uocn(i))**2+(vvel(i)-vocn(i))**2)*rhowat*ice%cd_oce_ice strocnxT(i) = aux*(uvel(i) - uocn(i)) strocnyT(i) = aux*(vvel(i) - vocn(i)) ! freezing - melting potential From 1f2e82eb4c0422731f6e7e9e84cd1807c7b4050a Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 29 Nov 2021 10:43:10 +0100 Subject: [PATCH 696/909] forgot to add something --- src/fesom_module.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/fesom_module.F90 b/src/fesom_module.F90 index 15fbc588a..29e133c57 100755 --- a/src/fesom_module.F90 +++ b/src/fesom_module.F90 @@ -198,7 +198,7 @@ subroutine fesom_init(fesom_total_nsteps) ! Setup icepack !===================== if (f%mype==0) write(*,*) 'Icepack: reading namelists from namelist.icepack' - call set_icepack(f%partit) + call set_icepack(f%ice, f%partit) call alloc_icepack call init_icepack(f%ice, f%tracers%data(1), f%mesh) if (f%mype==0) write(*,*) 'Icepack: setup complete' From 44f37344254d72684dc32b9e6ef1c65d75e23687 Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 29 Nov 2021 12:04:41 +0100 Subject: [PATCH 697/909] fix small issue --- src/oce_shortwave_pene.F90 | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/oce_shortwave_pene.F90 b/src/oce_shortwave_pene.F90 index dabcc565a..cfca17cd8 100644 --- a/src/oce_shortwave_pene.F90 +++ b/src/oce_shortwave_pene.F90 @@ -1,7 +1,6 @@ subroutine cal_shortwave_rad(ice, partit, mesh) ! This routine is inherited from FESOM 1.4 and adopted appropreately. It calculates - ! s - hortwave penetration into the ocean assuming the constant chlorophyll concentration. + ! shortwave penetration into the ocean assuming the constant chlorophyll concentration. ! No penetration under the ice is applied. A decent way for ice region is to be discussed. ! This routine should be called after ice2oce coupling done if ice model is used. ! Ref.: Morel and Antoine 1994, Sweeney et al. 2005 From d97e1a783851968f5ad55a448e13b9e3754939fd Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 29 Nov 2021 12:13:34 +0100 Subject: [PATCH 698/909] moved OMP lock stuff out of hte vertical loops (was a stupid idea to have it inside). some routines became 3x faster. --- src/oce_ale_vel_rhs.F90 | 24 +++++++------- src/oce_dyn.F90 | 69 ++++++++++++++++++++++++----------------- 2 files changed, 52 insertions(+), 41 deletions(-) diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index 87679efc5..4f82cd0a7 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -317,63 +317,63 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) ! Do not calculate on Halo nodes, as the result will not be used. ! The "if" is cheaper than the avoided computiations. if (nod(1) <= myDim_nod2d) then - do nz=min(ul1,ul2), max(nl1,nl2) - ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) #if defined(_OPENMP) call omp_set_lock(partit%plock(nod(1))) #endif + do nz=min(ul1,ul2), max(nl1,nl2) + ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) UVnode_rhs(1,nz,nod(1)) = UVnode_rhs(1,nz,nod(1)) + un1(nz)*UV(1,nz,el1) + un2(nz)*UV(1,nz,el2) UVnode_rhs(2,nz,nod(1)) = UVnode_rhs(2,nz,nod(1)) + un1(nz)*UV(2,nz,el1) + un2(nz)*UV(2,nz,el2) + end do #if defined(_OPENMP) call omp_unset_lock(partit%plock(nod(1))) #endif - end do endif ! second edge node if (nod(2) <= myDim_nod2d) then - do nz=min(ul1,ul2), max(nl1,nl2) - ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) #if defined(_OPENMP) call omp_set_lock(partit%plock(nod(2))) #endif + do nz=min(ul1,ul2), max(nl1,nl2) + ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) UVnode_rhs(1,nz,nod(2)) = UVnode_rhs(1,nz,nod(2)) - un1(nz)*UV(1,nz,el1) - un2(nz)*UV(1,nz,el2) UVnode_rhs(2,nz,nod(2)) = UVnode_rhs(2,nz,nod(2)) - un1(nz)*UV(2,nz,el1) - un2(nz)*UV(2,nz,el2) + end do #if defined(_OPENMP) call omp_unset_lock(partit%plock(nod(2))) #endif - end do endif else ! el2 is not a valid element --> ed is a boundary edge, there is only the contribution from el1 ! first edge node if (nod(1) <= myDim_nod2d) then - do nz=ul1, nl1 - ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) #if defined(_OPENMP) call omp_set_lock(partit%plock(nod(1))) #endif + do nz=ul1, nl1 + ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) UVnode_rhs(1,nz,nod(1)) = UVnode_rhs(1,nz,nod(1)) + un1(nz)*UV(1,nz,el1) UVnode_rhs(2,nz,nod(1)) = UVnode_rhs(2,nz,nod(1)) + un1(nz)*UV(2,nz,el1) + end do ! --> do nz=ul1, nl1 #if defined(_OPENMP) call omp_unset_lock(partit%plock(nod(1))) #endif - end do ! --> do nz=ul1, nl1 endif ! second edge node if (nod(2) <= myDim_nod2d) then - do nz=ul1, nl1 - ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) #if defined(_OPENMP) call omp_set_lock(partit%plock(nod(2))) #endif + do nz=ul1, nl1 + ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) UVnode_rhs(1,nz,nod(2)) = UVnode_rhs(1,nz,nod(2)) - un1(nz)*UV(1,nz,el1) UVnode_rhs(2,nz,nod(2)) = UVnode_rhs(2,nz,nod(2)) - un1(nz)*UV(2,nz,el1) + end do ! --> do nz=ul1, nl1 #if defined(_OPENMP) call omp_unset_lock(partit%plock(nod(2))) #endif - end do ! --> do nz=ul1, nl1 endif endif ! --> if (el2>0) then end do ! --> do ed=1, myDim_edge2D diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 446e2a656..1bac929cd 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -267,6 +267,10 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) len=sqrt(sum(elem_area(el))) nzmax = minval(nlevels(el)) nzmin = maxval(ulevels(el)) +#if defined(_OPENMP) + call omp_set_lock(partit%plock(el(1))) + call omp_set_lock(partit%plock(el(2))) +#endif DO nz=nzmin,nzmax-1 u1=UV(1,nz,el(1))-UV(1,nz,el(2)) v1=UV(2,nz,el(1))-UV(2,nz,el(2)) @@ -278,21 +282,15 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) !here dynamics%visc_gamma2 is dimensional (1/velocity). If it is 10, then the respective term dominates starting from |u|=0.1 m/s an so on. u1=u1*vi v1=v1*vi -#if defined(_OPENMP) - call omp_set_lock(partit%plock(el(1))) -#endif U_b(nz,el(1))=U_b(nz,el(1))-u1/elem_area(el(1)) V_b(nz,el(1))=V_b(nz,el(1))-v1/elem_area(el(1)) -#if defined(_OPENMP) - call omp_unset_lock(partit%plock(el(1))) - call omp_set_lock(partit%plock(el(2))) -#endif U_b(nz,el(2))=U_b(nz,el(2))+u1/elem_area(el(2)) V_b(nz,el(2))=V_b(nz,el(2))+v1/elem_area(el(2)) + END DO #if defined(_OPENMP) - call omp_unset_lock(partit%plock(el(2))) + call omp_unset_lock(partit%plock(el(1))) + call omp_unset_lock(partit%plock(el(2))) #endif - END DO END DO !$OMP END DO !$OMP MASTER @@ -392,6 +390,10 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) el=edge_tri(:,ed) nzmin = maxval(ulevels(el)) nzmax = minval(nlevels(el)) +#if defined(_OPENMP) + call omp_set_lock(partit%plock(el(1))) + call omp_set_lock(partit%plock(el(2))) +#endif DO nz=nzmin,nzmax-1 u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) @@ -399,7 +401,11 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) U_c(nz,el(2))=U_c(nz,el(2))+u1 V_c(nz,el(1))=V_c(nz,el(1))-v1 V_c(nz,el(2))=V_c(nz,el(2))+v1 - END DO + END DO +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(el(1))) + call omp_unset_lock(partit%plock(el(2))) +#endif END DO !$OMP END DO !$OMP DO @@ -431,24 +437,22 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) el=edge_tri(:,ed) nzmin = maxval(ulevels(el)) nzmax = minval(nlevels(el)) +#if defined(_OPENMP) + call omp_set_lock(partit%plock(el(1))) + call omp_set_lock(partit%plock(el(2))) +#endif DO nz=nzmin,nzmax-1 u1=(U_c(nz,el(1))-U_c(nz,el(2))) v1=(V_c(nz,el(1))-V_c(nz,el(2))) -#if defined(_OPENMP) - call omp_set_lock(partit%plock(el(1))) -#endif UV_rhs(1,nz,el(1))=UV_rhs(1,nz,el(1))-u1/elem_area(el(1)) UV_rhs(2,nz,el(1))=UV_rhs(2,nz,el(1))-v1/elem_area(el(1)) -#if defined(_OPENMP) - call omp_unset_lock(partit%plock(el(1))) - call omp_set_lock(partit%plock(el(2))) -#endif UV_rhs(1,nz,el(2))=UV_rhs(1,nz,el(2))+u1/elem_area(el(2)) UV_rhs(2,nz,el(2))=UV_rhs(2,nz,el(2))+v1/elem_area(el(2)) + END DO #if defined(_OPENMP) - call omp_unset_lock(partit%plock(el(2))) + call omp_unset_lock(partit%plock(el(1))) + call omp_unset_lock(partit%plock(el(2))) #endif - END DO END DO !$OMP END DO !$OMP END PARALLEL @@ -505,6 +509,10 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) len=sqrt(sum(elem_area(el))) nzmin = maxval(ulevels(el)) nzmax = minval(nlevels(el)) +#if defined(_OPENMP) + call omp_set_lock(partit%plock(el(1))) + call omp_set_lock(partit%plock(el(2))) +#endif DO nz=nzmin,nzmax-1 u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) @@ -520,7 +528,11 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) U_c(nz,el(2))=U_c(nz,el(2))+u1 V_c(nz,el(1))=V_c(nz,el(1))-v1 V_c(nz,el(2))=V_c(nz,el(2))+v1 - END DO + END DO +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(el(1))) + call omp_unset_lock(partit%plock(el(2))) +#endif END DO !$OMP END DO !$OMP MASTER @@ -535,6 +547,10 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) len=sqrt(sum(elem_area(el))) nzmin = maxval(ulevels(el)) nzmax = minval(nlevels(el)) +#if defined(_OPENMP) + call omp_set_lock(partit%plock(el(1))) + call omp_set_lock(partit%plock(el(2))) +#endif DO nz=nzmin,nzmax-1 u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) @@ -547,21 +563,16 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) u1=vi*(U_c(nz,el(1))-U_c(nz,el(2))) v1=vi*(V_c(nz,el(1))-V_c(nz,el(2))) -#if defined(_OPENMP) - call omp_set_lock(partit%plock(el(1))) -#endif + UV_rhs(1,nz,el(1))=UV_rhs(1,nz,el(1))-u1/elem_area(el(1)) UV_rhs(2,nz,el(1))=UV_rhs(2,nz,el(1))-v1/elem_area(el(1)) -#if defined(_OPENMP) - call omp_unset_lock(partit%plock(el(1))) - call omp_set_lock(partit%plock(el(2))) -#endif UV_rhs(1,nz,el(2))=UV_rhs(1,nz,el(2))+u1/elem_area(el(2)) UV_rhs(2,nz,el(2))=UV_rhs(2,nz,el(2))+v1/elem_area(el(2)) + END DO #if defined(_OPENMP) - call omp_unset_lock(partit%plock(el(2))) + call omp_unset_lock(partit%plock(el(1))) + call omp_unset_lock(partit%plock(el(2))) #endif - END DO END DO !$OMP END DO !$OMP END PARALLEL From 4875e3f1de08c7513a5fac7bf573d8664c6763f7 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 29 Nov 2021 14:20:35 +0100 Subject: [PATCH 699/909] OMP lock taken out from the vertical loop in vert_vel_ale. It slows down by ~x3 when present --- src/oce_ale.F90 | 42 +++++++++++++++++++----------------------- 1 file changed, 19 insertions(+), 23 deletions(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index dbc941243..7972dec1d 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -1999,7 +1999,11 @@ subroutine vert_vel_ale(dynamics, partit, mesh) ! do it with gauss-law: int( div(u_vec)*dV) = int( u_vec * n_vec * dS ) nzmin = ulevels(el(1)) nzmax = nlevels(el(1))-1 - + +#if defined(_OPENMP) + call omp_set_lock(partit%plock(enodes(1))) + call omp_set_lock(partit%plock(enodes(2))) +#endif do nz = nzmax, nzmin, -1 ! --> h * u_vec * n_vec ! --> e_vec = (dx,dy), n_vec = (-dy,dx); @@ -2012,20 +2016,15 @@ subroutine vert_vel_ale(dynamics, partit, mesh) if (Fer_GM) then c1=(fer_UV(2,nz,el(1))*deltaX1- & fer_UV(1,nz,el(1))*deltaY1)*helem(nz,el(1)) -#if defined(_OPENMP) - call omp_set_lock(partit%plock(enodes(1))) -#endif fer_Wvel(nz,enodes(1))=fer_Wvel(nz,enodes(1))+c1 -#if defined(_OPENMP) - call omp_unset_lock(partit%plock(enodes(1))) - call omp_set_lock (partit%plock(enodes(2))) -#endif fer_Wvel(nz,enodes(2))=fer_Wvel(nz,enodes(2))-c1 + end if + end do #if defined(_OPENMP) - call omp_unset_lock(partit%plock(enodes(2))) + call omp_unset_lock(partit%plock(enodes(1))) + call omp_unset_lock(partit%plock(enodes(2))) #endif - end if - end do + !_______________________________________________________________________ ! if ed is not a boundary edge --> calc div(u_vec*h) for every layer @@ -2035,7 +2034,10 @@ subroutine vert_vel_ale(dynamics, partit, mesh) deltaY2=edge_cross_dxdy(4,ed) nzmin = ulevels(el(2)) nzmax = nlevels(el(2))-1 - +#if defined(_OPENMP) + call omp_set_lock(partit%plock(enodes(1))) + call omp_set_lock(partit%plock(enodes(2))) +#endif do nz = nzmax, nzmin, -1 c2=-(UV(2,nz,el(2))*deltaX2 - UV(1,nz,el(2))*deltaY2)*helem(nz,el(2)) Wvel(nz,enodes(1))=Wvel(nz,enodes(1))+c2 @@ -2043,20 +2045,14 @@ subroutine vert_vel_ale(dynamics, partit, mesh) if (Fer_GM) then c2=-(fer_UV(2,nz,el(2))*deltaX2- & fer_UV(1,nz,el(2))*deltaY2)*helem(nz,el(2)) -#if defined(_OPENMP) - call omp_set_lock(partit%plock(enodes(1))) -#endif fer_Wvel(nz,enodes(1))=fer_Wvel(nz,enodes(1))+c2 -#if defined(_OPENMP) - call omp_unset_lock(partit%plock(enodes(1))) - call omp_set_lock (partit%plock(enodes(2))) -#endif fer_Wvel(nz,enodes(2))=fer_Wvel(nz,enodes(2))-c2 -#if defined(_OPENMP) - call omp_unset_lock(partit%plock(enodes(2))) -#endif - end if + end if end do +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(enodes(1))) + call omp_unset_lock(partit%plock(enodes(2))) +#endif end if end do ! --> do ed=1, myDim_edge2D !$OMP END PARALLEL DO From d2707f91fa8e27d3f77c370bd135c41730594f4e Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Mon, 29 Nov 2021 18:15:19 +0100 Subject: [PATCH 700/909] update environment file for juwels --- env/juwels/shell | 59 +++++++++++++++++++++++++++++++++--------------- 1 file changed, 41 insertions(+), 18 deletions(-) diff --git a/env/juwels/shell b/env/juwels/shell index 0b5451c82..89cc14b62 100644 --- a/env/juwels/shell +++ b/env/juwels/shell @@ -1,22 +1,45 @@ ########## -module --force purge -module use /gpfs/software/juwels/otherstages -module load Stages/2019a -module load StdEnv -# For intel MPI -#module load Intel/2019.3.199-GCC-8.3.0 IntelMPI/2018.5.288 imkl/2019.3.199 -#export FC=mpiifort CC=mpiicc CXX=mpiicpc +module --force purge +module use /gpfs/software/juwels/otherstages +module load Stages/2020 +module load Intel/2020.2.254-GCC-9.3.0 +module load ParaStationMPI/5.4.7-1 +module load CMake/3.18.0 +module load imkl/2020.2.254 +module load netCDF-Fortran/4.5.3 +module load netCDF/4.7.4 +module load Perl/5.32.0 +module load netCDF -# For ParaStation MPI -module load Intel/2019.3.199-GCC-8.3.0 ParaStationMPI/5.4 imkl/2019.5.281 -export FC=mpifort CC=mpicc CXX=mpicxx +export LC_ALL=en_US.UTF-8 +export TMPDIR=/tmp +export FC=mpifort +export F77=mpifort +export MPIFC=mpifort +export FCFLAGS=-free +export CC=mpicc +export CXX=mpic++ -module load netCDF/4.6.3 -module load netCDF-Fortran/4.4.5 -module load CMake -export NETCDF_DIR=$EBROOTNETCDF -export NETCDFF_DIR=$EBROOTNETCDFMINFORTRAN -export NETCDF_Fortran_INCLUDE_DIRECTORIES=${NETCDFF_DIR}/include/ -export NETCDF_C_INCLUDE_DIRECTORIES=${NETCDF_DIR}/include/ -export NETCDF_CXX_INCLUDE_DIRECTORIES=${NETCDFCXX_DIR}/include/ +export NETCDF_Fortran_INCLUDE_DIRECTORIES=$EBROOTNETCDFMINFORTRAN/include +export NETCDF_Fortran_LIBRARIES=$EBROOTNETCDFMINFORTRAN/lib + +#module use /gpfs/software/juwels/otherstages +#module load Stages/2019a +#module load StdEnv +## For intel MPI +##module load Intel/2019.3.199-GCC-8.3.0 IntelMPI/2018.5.288 imkl/2019.3.199 +##export FC=mpiifort CC=mpiicc CXX=mpiicpc + +## For ParaStation MPI +#module load Intel/2019.3.199-GCC-8.3.0 ParaStationMPI/5.4 imkl/2019.5.281 +#export FC=mpifort CC=mpicc CXX=mpicxx + +#module load netCDF/4.6.3 +#module load netCDF-Fortran/4.4.5 +#module load CMake +#export NETCDF_DIR=$EBROOTNETCDF +#export NETCDFF_DIR=$EBROOTNETCDFMINFORTRAN +#export NETCDF_Fortran_INCLUDE_DIRECTORIES=${NETCDFF_DIR}/include/ +#export NETCDF_C_INCLUDE_DIRECTORIES=${NETCDF_DIR}/include/ +#export NETCDF_CXX_INCLUDE_DIRECTORIES=${NETCDFCXX_DIR}/include/ From e4304b42d45352c491144ebd5011a2bf1e13aa43 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 29 Nov 2021 20:06:04 +0100 Subject: [PATCH 701/909] there was a deadlock condition in oce_ale.F90 --- src/oce_ale.F90 | 4 ++-- src/oce_dyn.F90 | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 7972dec1d..a59f39051 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2021,8 +2021,8 @@ subroutine vert_vel_ale(dynamics, partit, mesh) end if end do #if defined(_OPENMP) - call omp_unset_lock(partit%plock(enodes(1))) call omp_unset_lock(partit%plock(enodes(2))) + call omp_unset_lock(partit%plock(enodes(1))) #endif @@ -2050,8 +2050,8 @@ subroutine vert_vel_ale(dynamics, partit, mesh) end if end do #if defined(_OPENMP) - call omp_unset_lock(partit%plock(enodes(1))) call omp_unset_lock(partit%plock(enodes(2))) + call omp_unset_lock(partit%plock(enodes(1))) #endif end if end do ! --> do ed=1, myDim_edge2D diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 1bac929cd..82769047e 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -288,8 +288,8 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) V_b(nz,el(2))=V_b(nz,el(2))+v1/elem_area(el(2)) END DO #if defined(_OPENMP) - call omp_unset_lock(partit%plock(el(1))) call omp_unset_lock(partit%plock(el(2))) + call omp_unset_lock(partit%plock(el(1))) #endif END DO !$OMP END DO @@ -403,8 +403,8 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) V_c(nz,el(2))=V_c(nz,el(2))+v1 END DO #if defined(_OPENMP) - call omp_unset_lock(partit%plock(el(1))) call omp_unset_lock(partit%plock(el(2))) + call omp_unset_lock(partit%plock(el(1))) #endif END DO !$OMP END DO @@ -450,8 +450,8 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) UV_rhs(2,nz,el(2))=UV_rhs(2,nz,el(2))+v1/elem_area(el(2)) END DO #if defined(_OPENMP) - call omp_unset_lock(partit%plock(el(1))) call omp_unset_lock(partit%plock(el(2))) + call omp_unset_lock(partit%plock(el(1))) #endif END DO !$OMP END DO @@ -530,8 +530,8 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) V_c(nz,el(2))=V_c(nz,el(2))+v1 END DO #if defined(_OPENMP) - call omp_unset_lock(partit%plock(el(1))) call omp_unset_lock(partit%plock(el(2))) + call omp_unset_lock(partit%plock(el(1))) #endif END DO !$OMP END DO @@ -570,8 +570,8 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) UV_rhs(2,nz,el(2))=UV_rhs(2,nz,el(2))+v1/elem_area(el(2)) END DO #if defined(_OPENMP) - call omp_unset_lock(partit%plock(el(1))) call omp_unset_lock(partit%plock(el(2))) + call omp_unset_lock(partit%plock(el(1))) #endif END DO !$OMP END DO From 87eade441e481169ca498be2bc4765efc12724c1 Mon Sep 17 00:00:00 2001 From: dsidoren Date: Mon, 29 Nov 2021 21:24:02 +0100 Subject: [PATCH 702/909] Update oce_ale.F90 --- src/oce_ale.F90 | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index a59f39051..c122a253e 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -1999,7 +1999,8 @@ subroutine vert_vel_ale(dynamics, partit, mesh) ! do it with gauss-law: int( div(u_vec)*dV) = int( u_vec * n_vec * dS ) nzmin = ulevels(el(1)) nzmax = nlevels(el(1))-1 - +! loop over edges (enodes(1), enodes(2)) at the interface between el(1) and el(2), +! we expect no deadlock here...but who knows :) #if defined(_OPENMP) call omp_set_lock(partit%plock(enodes(1))) call omp_set_lock(partit%plock(enodes(2))) From 2b3dff353db735fb7568527ff79b237594d2b8df Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Tue, 30 Nov 2021 12:13:16 +0100 Subject: [PATCH 703/909] removed deadlock from vert_vel_ale --- src/oce_ale.F90 | 93 ++++++++++++++++++++++++++----------------------- 1 file changed, 49 insertions(+), 44 deletions(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index c122a253e..2ffbb4e06 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -1941,7 +1941,8 @@ subroutine vert_vel_ale(dynamics, partit, mesh) type(t_mesh), intent(inout), target :: mesh !___________________________________________________________________________ integer :: el(2), enodes(2), n, nz, ed, nzmin, nzmax, uln1, uln2, nln1, nln2 - real(kind=WP) :: c1, c2, deltaX1, deltaY1, deltaX2, deltaY2, dd, dd1, dddt, cflmax + real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2, dd, dd1, dddt, cflmax + real(kind=WP) :: c1(mesh%nl-1), c2(mesh%nl-1) real(kind=WP) :: lcflmax !for OMP realization ! --> zlevel with local zstar real(kind=WP) :: dhbar_total, dhbar_rest, distrib_dhbar_int @@ -1999,34 +2000,35 @@ subroutine vert_vel_ale(dynamics, partit, mesh) ! do it with gauss-law: int( div(u_vec)*dV) = int( u_vec * n_vec * dS ) nzmin = ulevels(el(1)) nzmax = nlevels(el(1))-1 -! loop over edges (enodes(1), enodes(2)) at the interface between el(1) and el(2), -! we expect no deadlock here...but who knows :) -#if defined(_OPENMP) - call omp_set_lock(partit%plock(enodes(1))) - call omp_set_lock(partit%plock(enodes(2))) -#endif +! we introduced c1 & c2 as arrays here to avoid deadlocks when in OpenMP mode do nz = nzmax, nzmin, -1 ! --> h * u_vec * n_vec ! --> e_vec = (dx,dy), n_vec = (-dy,dx); ! --> h * u*(-dy) + v*dx - c1=( UV(2,nz,el(1))*deltaX1 - UV(1,nz,el(1))*deltaY1 )*helem(nz,el(1)) + c1(nz)=( UV(2,nz,el(1))*deltaX1 - UV(1,nz,el(1))*deltaY1 )*helem(nz,el(1)) ! inflow(outflow) "flux" to control volume of node enodes1 - Wvel(nz,enodes(1))=Wvel(nz,enodes(1))+c1 ! is equal to outflow(inflow) "flux" to control volume of node enodes2 - Wvel(nz,enodes(2))=Wvel(nz,enodes(2))-c1 if (Fer_GM) then - c1=(fer_UV(2,nz,el(1))*deltaX1- & - fer_UV(1,nz,el(1))*deltaY1)*helem(nz,el(1)) - fer_Wvel(nz,enodes(1))=fer_Wvel(nz,enodes(1))+c1 - fer_Wvel(nz,enodes(2))=fer_Wvel(nz,enodes(2))-c1 + c2(nz)=(fer_UV(2,nz,el(1))*deltaX1- fer_UV(1,nz,el(1))*deltaY1)*helem(nz,el(1)) end if end do #if defined(_OPENMP) - call omp_unset_lock(partit%plock(enodes(2))) + call omp_set_lock (partit%plock(enodes(1))) +#endif + Wvel (nzmin:nzmax, enodes(1))= Wvel (nzmin:nzmax, enodes(1))+c1(nzmin:nzmax) + fer_Wvel(nzmin:nzmax, enodes(1))= fer_Wvel(nzmin:nzmax, enodes(1))+c2(nzmin:nzmax) +#if defined(_OPENMP) call omp_unset_lock(partit%plock(enodes(1))) #endif - +#if defined(_OPENMP) + call omp_set_lock (partit%plock(enodes(2))) +#endif + Wvel (nzmin:nzmax, enodes(2))= Wvel (nzmin:nzmax, enodes(2))-c1(nzmin:nzmax) + fer_Wvel(nzmin:nzmax, enodes(2))= fer_Wvel(nzmin:nzmax, enodes(2))-c2(nzmin:nzmax) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(enodes(2))) +#endif !_______________________________________________________________________ ! if ed is not a boundary edge --> calc div(u_vec*h) for every layer ! for el(2) @@ -2034,26 +2036,29 @@ subroutine vert_vel_ale(dynamics, partit, mesh) deltaX2=edge_cross_dxdy(3,ed) deltaY2=edge_cross_dxdy(4,ed) nzmin = ulevels(el(2)) - nzmax = nlevels(el(2))-1 -#if defined(_OPENMP) - call omp_set_lock(partit%plock(enodes(1))) - call omp_set_lock(partit%plock(enodes(2))) -#endif + nzmax = nlevels(el(2))-1 do nz = nzmax, nzmin, -1 - c2=-(UV(2,nz,el(2))*deltaX2 - UV(1,nz,el(2))*deltaY2)*helem(nz,el(2)) - Wvel(nz,enodes(1))=Wvel(nz,enodes(1))+c2 - Wvel(nz,enodes(2))=Wvel(nz,enodes(2))-c2 + c1(nz)=-(UV(2,nz,el(2))*deltaX2 - UV(1,nz,el(2))*deltaY2)*helem(nz,el(2)) if (Fer_GM) then - c2=-(fer_UV(2,nz,el(2))*deltaX2- & - fer_UV(1,nz,el(2))*deltaY2)*helem(nz,el(2)) - fer_Wvel(nz,enodes(1))=fer_Wvel(nz,enodes(1))+c2 - fer_Wvel(nz,enodes(2))=fer_Wvel(nz,enodes(2))-c2 + c2(nz)=-(fer_UV(2,nz,el(2))*deltaX2-fer_UV(1,nz,el(2))*deltaY2)*helem(nz,el(2)) end if end do #if defined(_OPENMP) - call omp_unset_lock(partit%plock(enodes(2))) - call omp_unset_lock(partit%plock(enodes(1))) -#endif + call omp_set_lock (partit%plock(enodes(1))) +#endif + Wvel (nzmin:nzmax, enodes(1))= Wvel (nzmin:nzmax, enodes(1))+c1(nzmin:nzmax) + fer_Wvel(nzmin:nzmax, enodes(1))= fer_Wvel(nzmin:nzmax, enodes(1))+c2(nzmin:nzmax) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(enodes(1))) +#endif +#if defined(_OPENMP) + call omp_set_lock (partit%plock(enodes(2))) +#endif + Wvel (nzmin:nzmax, enodes(2))= Wvel (nzmin:nzmax, enodes(2))-c1(nzmin:nzmax) + fer_Wvel(nzmin:nzmax, enodes(2))= fer_Wvel(nzmin:nzmax, enodes(2))-c2(nzmin:nzmax) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(enodes(2))) +#endif end if end do ! --> do ed=1, myDim_edge2D !$OMP END PARALLEL DO @@ -2434,14 +2439,14 @@ subroutine vert_vel_ale(dynamics, partit, mesh) nzmin = ulevels_nod2D(n) nzmax = nlevels_nod2D(n)-1 do nz=nzmin,nzmax - c1=abs(Wvel(nz,n) *dt/hnode_new(nz,n)) - c2=abs(Wvel(nz+1,n)*dt/hnode_new(nz,n)) - ! strong condition: - ! total volume change induced by the vertical motion - ! no matter, upwind or downwind ! - CFL_z(nz, n)=CFL_z(nz,n)+c1 - CFL_z(nz+1,n)=c2 + c1(nz)=abs(Wvel(nz,n) *dt/hnode_new(nz,n)) + c2(nz)=abs(Wvel(nz+1,n)*dt/hnode_new(nz,n)) end do + ! strong condition: + ! total volume change induced by the vertical motion + ! no matter, upwind or downwind ! + CFL_z(nzmin:nzmax, n) =CFL_z(nzmin:nzmax, n) +c1 + CFL_z(nzmin+1:nzmax+1, n)=CFL_z(nzmin+1:nzmax+1, n)+c2 end do !$OMP END PARALLEL DO @@ -2496,16 +2501,16 @@ subroutine vert_vel_ale(dynamics, partit, mesh) nzmin = ulevels_nod2D(n) nzmax = nlevels_nod2D(n) do nz=nzmin,nzmax - c1=1.0_WP - c2=0.0_WP + c1(nz)=1.0_WP + c2(nz)=0.0_WP if (dynamics%use_wsplit .and. (CFL_z(nz, n) > dynamics%wsplit_maxcfl)) then dd=max((CFL_z(nz, n)-dynamics%wsplit_maxcfl), 0.0_WP)/max(dynamics%wsplit_maxcfl, 1.e-12) - c1=1.0_WP/(1.0_WP+dd) !explicit part =1. if dd=0. - c2=dd /(1.0_WP+dd) !implicit part =1. if dd=inf + c1(nz)=1.0_WP/(1.0_WP+dd) !explicit part =1. if dd=0. + c2(nz)=dd /(1.0_WP+dd) !implicit part =1. if dd=inf end if - Wvel_e(nz,n)=c1*Wvel(nz,n) - Wvel_i(nz,n)=c2*Wvel(nz,n) end do + Wvel_e(nzmin:nzmax,n)=c1(nzmin:nzmax)*Wvel(nzmin:nzmax,n) + Wvel_i(nzmin:nzmax,n)=c2(nzmin:nzmax)*Wvel(nzmin:nzmax,n) end do !$OMP END PARALLEL DO end subroutine vert_vel_ale From e2e40b9532ab468817a1b1bc82d0ef69a9cb0edb Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Tue, 30 Nov 2021 12:29:11 +0100 Subject: [PATCH 704/909] removed deadlock from oce_dyn.F90 -> visc_filt_bcksct --- src/oce_dyn.F90 | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 82769047e..d0d1b7e20 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -229,6 +229,7 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) type(t_mesh) , intent(in) , target :: mesh !___________________________________________________________________________ real(kind=8) :: u1, v1, len, vi + real(kind=8) :: update_u(mesh%nl-1), update_v(mesh%nl-1) integer :: nz, ed, el(2), nelem(3),k, elem, nzmin, nzmax !___________________________________________________________________________ ! pointer on necessary derived types @@ -267,10 +268,6 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) len=sqrt(sum(elem_area(el))) nzmax = minval(nlevels(el)) nzmin = maxval(ulevels(el)) -#if defined(_OPENMP) - call omp_set_lock(partit%plock(el(1))) - call omp_set_lock(partit%plock(el(2))) -#endif DO nz=nzmin,nzmax-1 u1=UV(1,nz,el(1))-UV(1,nz,el(2)) v1=UV(2,nz,el(1))-UV(2,nz,el(2)) @@ -282,15 +279,31 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) !here dynamics%visc_gamma2 is dimensional (1/velocity). If it is 10, then the respective term dominates starting from |u|=0.1 m/s an so on. u1=u1*vi v1=v1*vi - U_b(nz,el(1))=U_b(nz,el(1))-u1/elem_area(el(1)) - V_b(nz,el(1))=V_b(nz,el(1))-v1/elem_area(el(1)) - U_b(nz,el(2))=U_b(nz,el(2))+u1/elem_area(el(2)) - V_b(nz,el(2))=V_b(nz,el(2))+v1/elem_area(el(2)) +! U_b(nz,el(1))=U_b(nz,el(1))-u1/elem_area(el(1)) +! V_b(nz,el(1))=V_b(nz,el(1))-v1/elem_area(el(1)) +! U_b(nz,el(2))=U_b(nz,el(2))+u1/elem_area(el(2)) +! V_b(nz,el(2))=V_b(nz,el(2))+v1/elem_area(el(2)) + update_u(nz)=u1 + update_v(nz)=v1 END DO #if defined(_OPENMP) - call omp_unset_lock(partit%plock(el(2))) + call omp_set_lock(partit%plock(el(1))) +#endif + U_b(nzmin:nzmax-1, el(1))=U_b(nzmin:nzmax-1, el(1))+update_u(nzmin:nzmax-1)/elem_area(el(1)) + V_b(nzmin:nzmax-1, el(1))=V_b(nzmin:nzmax-1, el(1))-update_v(nzmin:nzmax-1)/elem_area(el(1)) +#if defined(_OPENMP) call omp_unset_lock(partit%plock(el(1))) #endif + +#if defined(_OPENMP) + call omp_set_lock(partit%plock(el(2))) +#endif + U_b(nzmin:nzmax-1, el(2))=U_b(nzmin:nzmax-1, el(2))+update_u(nzmin:nzmax-1)/elem_area(el(2)) + V_b(nzmin:nzmax-1, el(2))=V_b(nzmin:nzmax-1, el(2))-update_v(nzmin:nzmax-1)/elem_area(el(2)) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(el(2))) +#endif + END DO !$OMP END DO !$OMP MASTER From 123d0b70ce0f782b95031e9fdfc4a06eff455d1d Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Tue, 30 Nov 2021 13:09:14 +0100 Subject: [PATCH 705/909] removed some bugs in OMP from deadlock fixing --- src/oce_ale.F90 | 33 ++++++++++++++++++--------------- src/oce_dyn.F90 | 15 +++++---------- 2 files changed, 23 insertions(+), 25 deletions(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 2ffbb4e06..dda3ba346 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2015,17 +2015,18 @@ subroutine vert_vel_ale(dynamics, partit, mesh) #if defined(_OPENMP) call omp_set_lock (partit%plock(enodes(1))) #endif - Wvel (nzmin:nzmax, enodes(1))= Wvel (nzmin:nzmax, enodes(1))+c1(nzmin:nzmax) - fer_Wvel(nzmin:nzmax, enodes(1))= fer_Wvel(nzmin:nzmax, enodes(1))+c2(nzmin:nzmax) + Wvel (nzmin:nzmax, enodes(1))= Wvel (nzmin:nzmax, enodes(1))+c1(nzmin:nzmax) + if (Fer_GM) then + fer_Wvel(nzmin:nzmax, enodes(1))= fer_Wvel(nzmin:nzmax, enodes(1))+c2(nzmin:nzmax) + end if #if defined(_OPENMP) call omp_unset_lock(partit%plock(enodes(1))) -#endif - -#if defined(_OPENMP) call omp_set_lock (partit%plock(enodes(2))) #endif - Wvel (nzmin:nzmax, enodes(2))= Wvel (nzmin:nzmax, enodes(2))-c1(nzmin:nzmax) - fer_Wvel(nzmin:nzmax, enodes(2))= fer_Wvel(nzmin:nzmax, enodes(2))-c2(nzmin:nzmax) + Wvel (nzmin:nzmax, enodes(2))= Wvel (nzmin:nzmax, enodes(2))-c1(nzmin:nzmax) + if (Fer_GM) then + fer_Wvel(nzmin:nzmax, enodes(2))= fer_Wvel(nzmin:nzmax, enodes(2))-c2(nzmin:nzmax) + end if #if defined(_OPENMP) call omp_unset_lock(partit%plock(enodes(2))) #endif @@ -2046,16 +2047,18 @@ subroutine vert_vel_ale(dynamics, partit, mesh) #if defined(_OPENMP) call omp_set_lock (partit%plock(enodes(1))) #endif - Wvel (nzmin:nzmax, enodes(1))= Wvel (nzmin:nzmax, enodes(1))+c1(nzmin:nzmax) - fer_Wvel(nzmin:nzmax, enodes(1))= fer_Wvel(nzmin:nzmax, enodes(1))+c2(nzmin:nzmax) + Wvel (nzmin:nzmax, enodes(1))= Wvel (nzmin:nzmax, enodes(1))+c1(nzmin:nzmax) + if (Fer_GM) then + fer_Wvel(nzmin:nzmax, enodes(1))= fer_Wvel(nzmin:nzmax, enodes(1))+c2(nzmin:nzmax) + end if #if defined(_OPENMP) call omp_unset_lock(partit%plock(enodes(1))) -#endif -#if defined(_OPENMP) call omp_set_lock (partit%plock(enodes(2))) #endif - Wvel (nzmin:nzmax, enodes(2))= Wvel (nzmin:nzmax, enodes(2))-c1(nzmin:nzmax) - fer_Wvel(nzmin:nzmax, enodes(2))= fer_Wvel(nzmin:nzmax, enodes(2))-c2(nzmin:nzmax) + Wvel (nzmin:nzmax, enodes(2))= Wvel (nzmin:nzmax, enodes(2))-c1(nzmin:nzmax) + if (Fer_GM) then + fer_Wvel(nzmin:nzmax, enodes(2))= fer_Wvel(nzmin:nzmax, enodes(2))-c2(nzmin:nzmax) + end if #if defined(_OPENMP) call omp_unset_lock(partit%plock(enodes(2))) #endif @@ -2445,8 +2448,8 @@ subroutine vert_vel_ale(dynamics, partit, mesh) ! strong condition: ! total volume change induced by the vertical motion ! no matter, upwind or downwind ! - CFL_z(nzmin:nzmax, n) =CFL_z(nzmin:nzmax, n) +c1 - CFL_z(nzmin+1:nzmax+1, n)=CFL_z(nzmin+1:nzmax+1, n)+c2 + CFL_z(nzmin :nzmax, n)=CFL_z(nzmin :nzmax, n)+c1 + CFL_z(nzmin+1:nzmax+1, n)=c2 end do !$OMP END PARALLEL DO diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index d0d1b7e20..fa2c877c1 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -277,29 +277,24 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) )*len ! vi=dt*max(dynamics%visc_gamma0, dynamics%visc_gamma1*max(sqrt(u1*u1+v1*v1), dynamics%visc_gamma2*(u1*u1+v1*v1)))*len !here dynamics%visc_gamma2 is dimensional (1/velocity). If it is 10, then the respective term dominates starting from |u|=0.1 m/s an so on. - u1=u1*vi - v1=v1*vi ! U_b(nz,el(1))=U_b(nz,el(1))-u1/elem_area(el(1)) ! V_b(nz,el(1))=V_b(nz,el(1))-v1/elem_area(el(1)) ! U_b(nz,el(2))=U_b(nz,el(2))+u1/elem_area(el(2)) ! V_b(nz,el(2))=V_b(nz,el(2))+v1/elem_area(el(2)) - update_u(nz)=u1 - update_v(nz)=v1 + update_u(nz)=u1*vi + update_v(nz)=v1*vi END DO #if defined(_OPENMP) call omp_set_lock(partit%plock(el(1))) #endif - U_b(nzmin:nzmax-1, el(1))=U_b(nzmin:nzmax-1, el(1))+update_u(nzmin:nzmax-1)/elem_area(el(1)) + U_b(nzmin:nzmax-1, el(1))=U_b(nzmin:nzmax-1, el(1))-update_u(nzmin:nzmax-1)/elem_area(el(1)) V_b(nzmin:nzmax-1, el(1))=V_b(nzmin:nzmax-1, el(1))-update_v(nzmin:nzmax-1)/elem_area(el(1)) #if defined(_OPENMP) call omp_unset_lock(partit%plock(el(1))) -#endif - -#if defined(_OPENMP) - call omp_set_lock(partit%plock(el(2))) + call omp_set_lock (partit%plock(el(2))) #endif U_b(nzmin:nzmax-1, el(2))=U_b(nzmin:nzmax-1, el(2))+update_u(nzmin:nzmax-1)/elem_area(el(2)) - V_b(nzmin:nzmax-1, el(2))=V_b(nzmin:nzmax-1, el(2))-update_v(nzmin:nzmax-1)/elem_area(el(2)) + V_b(nzmin:nzmax-1, el(2))=V_b(nzmin:nzmax-1, el(2))+update_v(nzmin:nzmax-1)/elem_area(el(2)) #if defined(_OPENMP) call omp_unset_lock(partit%plock(el(2))) #endif From 2ea758f595e9bb2a21cfcfd2f9e96d05d65f999c Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Tue, 30 Nov 2021 13:19:09 +0100 Subject: [PATCH 706/909] fixed OMP bug in visc_filt_bcksct --- src/oce_dyn.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index fa2c877c1..142fb2016 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -260,7 +260,7 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) END DO !$OMP END PARALLEL DO -!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(u1, v1, len, vi, nz, ed, el, nelem, k, elem, nzmin, nzmax) +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(u1, v1, len, vi, nz, ed, el, nelem, k, elem, nzmin, nzmax, update_u, update_v) !$OMP DO DO ed=1, myDim_edge2D+eDim_edge2D if(myList_edge2D(ed)>edge2D_in) cycle From 82c389a2208c55c126583bf196842069f4c9f9bf Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Tue, 30 Nov 2021 15:03:50 +0100 Subject: [PATCH 707/909] iminor fixes in oce_ale.F90 --- src/oce_ale.F90 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index dda3ba346..aeec6a56b 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -1982,7 +1982,7 @@ subroutine vert_vel_ale(dynamics, partit, mesh) END DO !$OMP END PARALLEL DO -!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(ed, enodes, el, deltaX1, deltaY1, nz, nzmin, nzmax, c1, deltaX2, deltaY2, c2) +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(ed, enodes, el, deltaX1, deltaY1, nz, nzmin, nzmax, deltaX2, deltaY2, c1, c2) do ed=1, myDim_edge2D ! local indice of nodes that span up edge ed enodes=edges(:,ed) @@ -2448,8 +2448,8 @@ subroutine vert_vel_ale(dynamics, partit, mesh) ! strong condition: ! total volume change induced by the vertical motion ! no matter, upwind or downwind ! - CFL_z(nzmin :nzmax, n)=CFL_z(nzmin :nzmax, n)+c1 - CFL_z(nzmin+1:nzmax+1, n)=c2 + CFL_z(nzmin :nzmax, n)=CFL_z(nzmin :nzmax, n)+c1(nzmin:nzmax) + CFL_z(nzmin+1:nzmax+1, n)=c2(nzmin:nzmax) end do !$OMP END PARALLEL DO From 4d025f37b4d3bfd3da693cde601a1be9714ef0c8 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Tue, 30 Nov 2021 15:50:28 +0100 Subject: [PATCH 708/909] fixing reproducibility issues with the regression tests on GitHub --- src/oce_ale.F90 | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index aeec6a56b..6c122c8f3 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2437,19 +2437,17 @@ subroutine vert_vel_ale(dynamics, partit, mesh) end do !$OMP END PARALLEL DO -!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax, c1, c2) +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax) do n=1, myDim_nod2D+eDim_nod2D nzmin = ulevels_nod2D(n) nzmax = nlevels_nod2D(n)-1 do nz=nzmin,nzmax - c1(nz)=abs(Wvel(nz,n) *dt/hnode_new(nz,n)) - c2(nz)=abs(Wvel(nz+1,n)*dt/hnode_new(nz,n)) + ! strong condition: + ! total volume change induced by the vertical motion + ! no matter, upwind or downwind ! + CFL_z(nz, n)=CFL_z(nz,n)+abs(Wvel(nz,n) *dt/hnode_new(nz,n)) + CFL_z(nz+1,n)= abs(Wvel(nz+1,n)*dt/hnode_new(nz,n)) end do - ! strong condition: - ! total volume change induced by the vertical motion - ! no matter, upwind or downwind ! - CFL_z(nzmin :nzmax, n)=CFL_z(nzmin :nzmax, n)+c1(nzmin:nzmax) - CFL_z(nzmin+1:nzmax+1, n)=c2(nzmin:nzmax) end do !$OMP END PARALLEL DO @@ -2499,21 +2497,19 @@ subroutine vert_vel_ale(dynamics, partit, mesh) ! wsplit_maxcfl=0 means w_exp is zero (everything computed implicitly) ! wsplit_maxcfl=inf menas w_impl is zero (everything computed explicitly) ! a guess for optimal choice of wsplit_maxcfl would be 0.95 -!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax, c1, c2, dd) +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax, dd) do n=1, myDim_nod2D+eDim_nod2D nzmin = ulevels_nod2D(n) nzmax = nlevels_nod2D(n) do nz=nzmin,nzmax - c1(nz)=1.0_WP - c2(nz)=0.0_WP + Wvel_e(nz,n)=Wvel(nz,n) + Wvel_i(nz,n)=0.0_WP if (dynamics%use_wsplit .and. (CFL_z(nz, n) > dynamics%wsplit_maxcfl)) then dd=max((CFL_z(nz, n)-dynamics%wsplit_maxcfl), 0.0_WP)/max(dynamics%wsplit_maxcfl, 1.e-12) - c1(nz)=1.0_WP/(1.0_WP+dd) !explicit part =1. if dd=0. - c2(nz)=dd /(1.0_WP+dd) !implicit part =1. if dd=inf + Wvel_e(nz,n)=1.0_WP/(1.0_WP+dd)*Wvel(nz,n) !explicit part =1. if dd=0. + Wvel_i(nz,n)=dd /(1.0_WP+dd)*Wvel(nz,n) !implicit part =1. if dd=inf end if end do - Wvel_e(nzmin:nzmax,n)=c1(nzmin:nzmax)*Wvel(nzmin:nzmax,n) - Wvel_i(nzmin:nzmax,n)=c2(nzmin:nzmax)*Wvel(nzmin:nzmax,n) end do !$OMP END PARALLEL DO end subroutine vert_vel_ale From 6dde8bbb71ba936b440d7135e05fa6bfda1fc70b Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 30 Nov 2021 16:01:24 +0100 Subject: [PATCH 709/909] exchange use_i_therm_param with ice derived type thermodynamic parameter in src/ice_thermo_oce.F90 --- src/ice_thermo_oce.F90 | 412 +++++++++++++++++++++++------------------ 1 file changed, 230 insertions(+), 182 deletions(-) diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index f3ad6aa7c..3cd29f07f 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -89,7 +89,7 @@ subroutine cut_off(ice, partit, mesh) end subroutine cut_off #if !defined (__oasis) && !defined (__ifsinterface) -!=================================================================== +!_______________________________________________________________________________ ! Sea-ice thermodynamics routines ! ! Coded by N. Yakovlev and S. Danilov. @@ -98,8 +98,7 @@ end subroutine cut_off ! by Ralph Timmermann. ! Adjusted for general forcing data and NlFs option, cleaned up, bug fixing, ! by Qiang Wang, 13.01.2009 -!---------------------------------------------------------------------------- - +!_______________________________________________________________________________ subroutine thermodynamics(ice, partit, mesh) ! ! For every surface node, this subroutine extracts the information @@ -113,7 +112,6 @@ subroutine thermodynamics(ice, partit, mesh) USE MOD_PARSUP USE MOD_MESH use o_param - use i_therm_param use g_config use g_forcing_param use g_forcing_arrays @@ -236,14 +234,14 @@ subroutine thermodynamics(ice, partit, mesh) !!PS h_ml = 1.25_WP ! 10.0 or 30. used previously fw = 0.0_WP ehf = 0.0_WP - lid_Clo=h0 + lid_Clo=ice%thermo%h0 if (geo_coord_nod2D(2,i)>0) then !TODO 2 separate pars for each hemisphere lid_clo=0.5_WP else lid_clo=0.5_WP endif - call therm_ice(h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss, & + call therm_ice(ice%thermo,h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss, & ug,ustar,T_oc,S_oc,h_ml,t,ice%ice_dt,ch,ce,ch_i,ce_i,evap_in,fw,ehf,evap, & rsf, ithdgr, ithdgrsn, iflice, hflatow, hfsenow, hflwrdout,lid_clo,subli) @@ -282,59 +280,77 @@ subroutine thermodynamics(ice, partit, mesh) end do end subroutine thermodynamics ! -!=================================================================== ! -subroutine therm_ice(h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss, & +!_______________________________________________________________________________ +subroutine therm_ice(ithermp, h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss, & ug,ustar,T_oc,S_oc,H_ML,t,ice_dt,ch,ce,ch_i,ce_i,evap_in,fw,ehf,evap, & rsf, dhgrowth, dhsngrowth, iflice, hflatow, hfsenow, hflwrdout,lid_clo,subli) - ! Ice Thermodynamic growth model - ! - ! Input parameters: - !------------------ - ! h - ice mass [m] - ! hsn - snow mass [m] - ! A - ice compactness - ! fsh - shortwave radiation - ! flo - longwave radiation - ! Ta - air temperature - ! qa - specific humidity - ! rain - precipitation rain - ! snow - precipitation snow - ! runo - runoff - ! ug - wind speed - ! ustar - friction velocity - ! T_oc, S_oc - ocean temperature and salinity beneath the ice (mixed layer) - ! H_ML - mixed layer depth - should be specified. - ! t - temperature of snow/ice top surface - ! ice_dt - time step [s] - ! ch - transfer coefficient for sensible heat (for open ocean) - ! ce - transfer coefficient for evaporation (for open ocean) - ! ch_i - transfer coefficient for sensible heat (for ice) - ! ce_i - transfer coefficient for evaporation (for ice) - ! lid_clo - lid closing parameter - ! Output parameters: - !------------------- - ! h - ice mass - ! hsn - snow mass - ! A - ice compactness - ! t - temperature of snow/ice top surface - ! fw - freshwater flux due to ice melting [m water/ice_dt] - ! ehf - net heat flux at the ocean surface [W/m2] !RTnew - - use i_therm_param - use g_forcing_param, only: use_virt_salt - use o_param - implicit none + ! Ice Thermodynamic growth model + ! + ! Input parameters: + !------------------ + ! h - ice mass [m] + ! hsn - snow mass [m] + ! A - ice compactness + ! fsh - shortwave radiation + ! flo - longwave radiation + ! Ta - air temperature + ! qa - specific humidity + ! rain - precipitation rain + ! snow - precipitation snow + ! runo - runoff + ! ug - wind speed + ! ustar - friction velocity + ! T_oc, S_oc - ocean temperature and salinity beneath the ice (mixed layer) + ! H_ML - mixed layer depth - should be specified. + ! t - temperature of snow/ice top surface + ! ice_dt - time step [s] + ! ch - transfer coefficient for sensible heat (for open ocean) + ! ce - transfer coefficient for evaporation (for open ocean) + ! ch_i - transfer coefficient for sensible heat (for ice) + ! ce_i - transfer coefficient for evaporation (for ice) + ! lid_clo - lid closing parameter + ! Output parameters: + !------------------- + ! h - ice mass + ! hsn - snow mass + ! A - ice compactness + ! t - temperature of snow/ice top surface + ! fw - freshwater flux due to ice melting [m water/ice_dt] + ! ehf - net heat flux at the ocean surface [W/m2] !RTnew - integer k - real(kind=WP) h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss,evap_in - real(kind=WP) ug,ustar,T_oc,S_oc,H_ML,t,ice_dt,ch,ce,ch_i,ce_i,fw,ehf - real(kind=WP) dhgrowth,dhsngrowth,ahf,prec,subli,subli_i,rsf - real(kind=WP) rhow,show,rhice,shice,sh,thick,thact,lat - real(kind=WP) rh,rA,qhst,sn,hsntmp,o2ihf,evap - real(kind=WP) iflice,hflatow,hfsenow,hflwrdout - real(kind=WP), external :: TFrez ! Sea water freeze temperature. - real(kind=WP) lid_clo + USE MOD_ICE + use g_forcing_param, only: use_virt_salt + use o_param + implicit none + type(t_ice_thermo), intent(in), target :: ithermp + integer k + real(kind=WP) h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss,evap_in + real(kind=WP) ug,ustar,T_oc,S_oc,H_ML,t,ice_dt,ch,ce,ch_i,ce_i,fw,ehf + real(kind=WP) dhgrowth,dhsngrowth,ahf,prec,subli,subli_i,rsf + real(kind=WP) rhow,show,rhice,shice,sh,thick,thact,lat + real(kind=WP) rh,rA,qhst,sn,hsntmp,o2ihf,evap + real(kind=WP) iflice,hflatow,hfsenow,hflwrdout + real(kind=WP), external :: TFrez ! Sea water freeze temperature. + real(kind=WP) lid_clo + !___________________________________________________________________________ + real(kind=WP), pointer :: hmin, Sice, Armin, cc, cl, con, consn, rhosno, rhoice, inv_rhowat, inv_rhosno + integer , pointer :: iclasses + hmin => ithermp%hmin + Armin => ithermp%Armin + Sice => ithermp%Sice + cc => ithermp%cc + cl => ithermp%cl + con => ithermp%con + consn => ithermp%consn + iclasses => ithermp%iclasses + rhosno => ithermp%rhosno + rhoice => ithermp%rhoice + inv_rhowat => ithermp%inv_rhowat + inv_rhosno => ithermp%inv_rhosno + + !___________________________________________________________________________ + ! Store ice thickness at start of growth routine dhgrowth=h @@ -349,7 +365,7 @@ subroutine therm_ice(h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss, & ! Growth rate for ice in open ocean rhow=0.0_WP evap=0.0_WP - call obudget(qa,fsh,flo,T_oc,ug,ta,ch,ce,rhow,evap,hflatow,hfsenow,hflwrdout) + call obudget(ithermp, qa,fsh,flo,T_oc,ug,ta,ch,ce,rhow,evap,hflatow,hfsenow,hflwrdout) hflatow=hflatow*(1.0_WP-A) hfsenow=hfsenow*(1.0_WP-A) hflwrdout=hflwrdout*(1.0_WP-A) @@ -495,7 +511,7 @@ subroutine therm_ice(h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss, & ! Flooding (snow to ice conversion) iflice=h - call flooding(h,hsn) + call flooding(ithermp, h, hsn) iflice=(h-iflice)/ice_dt ! to maintain salt conservation for the current model version @@ -510,45 +526,61 @@ subroutine therm_ice(h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss, & end subroutine therm_ice ! -!===================================================================================== ! -subroutine budget (hice,hsn,t,ta,qa,fsh,flo,ug,S_oc,ch_i,ce_i,fh,subli) - ! Thick ice growth rate [m ice/sec] - ! - ! INPUT: - ! hice - actual ice thickness [m] - ! hsn - snow thickness, used for albedo parameterization [m] - ! t - temperature of snow/ice surface [C] - ! ta - air temperature [C] - ! qa - specific humidity [Kg/Kg] - ! fsh - shortwave radiation [W/m**2] - ! flo - longwave radiation [W/m**2] - ! ug - wind speed [m/sec] - ! S_oc - ocean salinity for the temperature of the ice base calculation [ppt] - ! ch_i - transfer coefficient for sensible heat (for ice) - ! ce_i - transfer coefficient for evaporation (for ice) - ! - ! OUTPUT: fh - growth rate - ! - ! qiang: The formular for saturated humidity was modified according to Large/Yeager2004 - ! to allow direct comparison with the CORE results (Griffies et al. 2009). The new - ! formular does not require sea level pressure. - ! A similar change was also made for the obudget routine. - ! It was found through experiments that the results are quite similar to that from the - ! original code, and the simulated ice volume is only slightly larger after modification. - - use i_therm_param - use o_param, only: WP - implicit none - - integer iter, imax ! Number of iterations - real(kind=WP) hice,hsn,t,ta,qa,fsh,flo,ug,S_oc,ch_i,ce_i,fh - real(kind=WP) hfsen,hfrad,hflat,hftot,subli - real(kind=WP) alb ! Albedo of sea ice - real(kind=WP) q1, q2 ! coefficients for saturated specific humidity - real(kind=WP) A1,A2,A3,B,C, d1, d2, d3 - real(kind=WP), external :: TFrez - +!_______________________________________________________________________________ +subroutine budget (ithermp, hice,hsn,t,ta,qa,fsh,flo,ug,S_oc,ch_i,ce_i,fh,subli) + ! Thick ice growth rate [m ice/sec] + ! + ! INPUT: + ! hice - actual ice thickness [m] + ! hsn - snow thickness, used for albedo parameterization [m] + ! t - temperature of snow/ice surface [C] + ! ta - air temperature [C] + ! qa - specific humidity [Kg/Kg] + ! fsh - shortwave radiation [W/m**2] + ! flo - longwave radiation [W/m**2] + ! ug - wind speed [m/sec] + ! S_oc - ocean salinity for the temperature of the ice base calculation [ppt] + ! ch_i - transfer coefficient for sensible heat (for ice) + ! ce_i - transfer coefficient for evaporation (for ice) + ! + ! OUTPUT: fh - growth rate + ! + ! qiang: The formular for saturated humidity was modified according to Large/Yeager2004 + ! to allow direct comparison with the CORE results (Griffies et al. 2009). The new + ! formular does not require sea level pressure. + ! A similar change was also made for the obudget routine. + ! It was found through experiments that the results are quite similar to that from the + ! original code, and the simulated ice volume is only slightly larger after modification. + use MOD_ICE + use o_param, only: WP + implicit none + type(t_ice_thermo), intent(in), target :: ithermp + integer iter, imax ! Number of iterations + real(kind=WP) hice,hsn,t,ta,qa,fsh,flo,ug,S_oc,ch_i,ce_i,fh + real(kind=WP) hfsen,hfrad,hflat,hftot,subli + real(kind=WP) alb ! Albedo of sea ice + real(kind=WP) q1, q2 ! coefficients for saturated specific humidity + real(kind=WP) A1,A2,A3,B,C, d1, d2, d3 + real(kind=WP), external :: TFrez + !___________________________________________________________________________ + real(kind=WP), pointer :: boltzmann, emiss_ice, tmelt, cl, clhi, con, cpair, & + inv_rhowat, inv_rhoair, rhoair, albim, albi, albsn, albsnm + boltzmann => ithermp%boltzmann + emiss_ice => ithermp%emiss_ice + tmelt => ithermp%tmelt + cl => ithermp%cl + clhi => ithermp%clhi + con => ithermp%con + cpair => ithermp%cpair + inv_rhowat => ithermp%inv_rhowat + inv_rhoair => ithermp%inv_rhoair + rhoair => ithermp%rhoair + albim => ithermp%albim + albi => ithermp%albi + albsn => ithermp%albsn + albsnm => ithermp%albsnm + !___________________________________________________________________________ !!PS data q1 /11637800.0/, q2 /-5897.8/ !!PS data imax /5/ @@ -615,109 +647,125 @@ subroutine budget (hice,hsn,t,ta,qa,fsh,flo,ug,S_oc,ch_i,ce_i,fh,subli) return end subroutine budget ! -!====================================================================================== ! -subroutine obudget (qa,fsh,flo,t,ug,ta,ch,ce,fh,evap,hflatow,hfsenow,hflwrdout) - ! Ice growth rate for open ocean [m ice/sec] - ! - ! INPUT: - ! t - temperature of open water [C] - ! fsh - shortwave radiation - ! flo - longwave radiation - ! ta - air temperature [C] - ! qa - specific humidity - ! ug - wind speed [m/sec] - ! ch - transfer coefficient for sensible heat - ! ce - transfer coefficient for evaporation - ! - ! OUTPUT: fh - growth rate - ! evap - evaporation - - use i_therm_param - use o_param, only: WP - implicit none - - real(kind=WP) qa,t,ta,fsh,flo,ug,ch,ce,fh,evap - real(kind=WP) hfsenow,hfradow,hflatow,hftotow,hflwrdout,b - real(kind=WP) q1, q2 ! coefficients for saturated specific humidity - real(kind=WP) c1, c4, c5 - logical :: standard_saturation_shum_formula = .true. - integer :: ii - - !data c1, c4, c5 /3.8e-3, 17.67, 243.5/ -!!PS data c1, c4, c5 /3.8e-3, 17.27, 237.3/ -!!PS data q1 /640380./, q2 /-5107.4/ - - c1 = 3.8e-3_WP - c4 = 17.27_WP - c5 = 237.3_WP - q1 = 640380._WP - q2 = -5107.4_WP - - ! (saturated) surface specific humidity - if(standard_saturation_shum_formula) then - b=c1*exp(c4*t/(t+c5)) ! a standard one - else - b=0.98_WP*q1*inv_rhoair*exp(q2/(t+tmelt)) ! LY2004 NCAR version - end if - - ! radiation heat fluxe [W/m**2]: - hfradow= (1.0_WP-albw)*fsh & ! absorbed short wave radiation - +flo ! long wave radiation coming in !put emiss/check - hflwrdout=-emiss_wat*boltzmann*((t+tmelt)**4) ! long wave radiation going out !in LY2004 emiss=1 - hfradow=hfradow+hflwrdout - - ! sensible heat fluxe [W/m**2]: - hfsenow=rhoair*cpair*ch*ug*(ta-t) ! sensible heat - - ! latent heat fluxe [W/m**2]: - evap =rhoair*ce*ug*(qa-b) ! evaporation kg/m2/s - hflatow=clhw*evap ! latent heat W/m2 +!_______________________________________________________________________________ +subroutine obudget (ithermp, qa,fsh,flo,t,ug,ta,ch,ce,fh,evap,hflatow,hfsenow,hflwrdout) + ! Ice growth rate for open ocean [m ice/sec] + ! + ! INPUT: + ! t - temperature of open water [C] + ! fsh - shortwave radiation + ! flo - longwave radiation + ! ta - air temperature [C] + ! qa - specific humidity + ! ug - wind speed [m/sec] + ! ch - transfer coefficient for sensible heat + ! ce - transfer coefficient for evaporation + ! + ! OUTPUT: fh - growth rate + ! evap - evaporation + use MOD_ICE + use o_param, only: WP + implicit none + type(t_ice_thermo), intent(in), target :: ithermp + real(kind=WP) qa,t,ta,fsh,flo,ug,ch,ce,fh,evap + real(kind=WP) hfsenow,hfradow,hflatow,hftotow,hflwrdout,b + real(kind=WP) q1, q2 ! coefficients for saturated specific humidity + real(kind=WP) c1, c4, c5 + logical :: standard_saturation_shum_formula = .true. + integer :: ii + !___________________________________________________________________________ + real(kind=WP), pointer :: boltzmann, emiss_wat, inv_rhowat, inv_rhoair, rhoair, & + tmelt, cl, clhw, cpair, albw + boltzmann => ithermp%boltzmann + emiss_wat => ithermp%emiss_wat + inv_rhowat => ithermp%inv_rhowat + inv_rhoair => ithermp%inv_rhoair + rhoair => ithermp%rhoair + tmelt => ithermp%tmelt + cl => ithermp%cl + clhw => ithermp%clhw + cpair => ithermp%cpair + albw => ithermp%albw + + !___________________________________________________________________________ + c1 = 3.8e-3_WP + c4 = 17.27_WP + c5 = 237.3_WP + q1 = 640380._WP + q2 = -5107.4_WP + + ! (saturated) surface specific humidity + if(standard_saturation_shum_formula) then + b=c1*exp(c4*t/(t+c5)) ! a standard one + else + b=0.98_WP*q1*inv_rhoair*exp(q2/(t+tmelt)) ! LY2004 NCAR version + end if + + ! radiation heat fluxe [W/m**2]: + hfradow= (1.0_WP-albw)*fsh & ! absorbed short wave radiation + +flo ! long wave radiation coming in !put emiss/check + hflwrdout=-emiss_wat*boltzmann*((t+tmelt)**4) ! long wave radiation going out !in LY2004 emiss=1 + hfradow=hfradow+hflwrdout + + ! sensible heat fluxe [W/m**2]: + hfsenow=rhoair*cpair*ch*ug*(ta-t) ! sensible heat + + ! latent heat fluxe [W/m**2]: + evap =rhoair*ce*ug*(qa-b) ! evaporation kg/m2/s + hflatow=clhw*evap ! latent heat W/m2 - ! total heat fluxe [W/m**2]: - hftotow=hfradow+hfsenow+hflatow ! total heat W/m2 - - fh= -hftotow/cl ! growth rate [m ice/sec] - ! +: ML gains energy, ice melts - ! -: ML loses energy, ice grows - evap=evap*inv_rhowat ! evaporation rate [m water/s],negative up !!! + ! total heat fluxe [W/m**2]: + hftotow=hfradow+hfsenow+hflatow ! total heat W/m2 + + fh= -hftotow/cl ! growth rate [m ice/sec] + ! +: ML gains energy, ice melts + ! -: ML loses energy, ice grows + evap=evap*inv_rhowat ! evaporation rate [m water/s],negative up !!! - return + return end subroutine obudget ! -!====================================================================================== ! -subroutine flooding (h,hsn) - use i_therm_param - - real(kind=WP) h,hsn,hdraft,hflood +!_______________________________________________________________________________ +subroutine flooding (ithermp, h, hsn) + use MOD_ICE + type(t_ice_thermo), intent(in), target :: ithermp + real(kind=WP) h,hsn,hdraft,hflood + !___________________________________________________________________________ + real(kind=WP), pointer :: inv_rhowat, inv_rhosno, rhoice, rhosno + inv_rhowat => ithermp%inv_rhowat + inv_rhosno => ithermp%inv_rhosno + rhoice => ithermp%rhoice + rhosno => ithermp%rhosno - hdraft=(rhosno*hsn+h*rhoice)*inv_rhowat ! Archimedes: displaced water - hflood=hdraft-min(hdraft,h) ! Increase in mean ice thickness due to flooding - h=h+hflood ! Add converted snow to ice volume - hsn=hsn-hflood*rhoice*inv_rhosno ! Subtract snow from snow layer + !___________________________________________________________________________ + hdraft=(rhosno*hsn+h*rhoice)*inv_rhowat ! Archimedes: displaced water + hflood=hdraft-min(hdraft,h) ! Increase in mean ice thickness due to flooding + h=h+hflood ! Add converted snow to ice volume + hsn=hsn-hflood*rhoice*inv_rhosno ! Subtract snow from snow layer - !RT This is what all AWI sea ice models do, but - !RT I wonder whether it really is correct for the heat budget. - !RT I suggest we initially keep it to allow for a comparison with BRIOS results - !RT and rethink it at a later stage. + !RT This is what all AWI sea ice models do, but + !RT I wonder whether it really is correct for the heat budget. + !RT I suggest we initially keep it to allow for a comparison with BRIOS results + !RT and rethink it at a later stage. - return + return end subroutine flooding ! -!====================================================================================== ! +!_______________________________________________________________________________ function TFrez(S) - ! Nonlinear correlation for the water freezing temperature. - ! Millero (1978) - UNESCO. Reference - See A. Gill, 1982. - use o_param, only: WP - implicit none - real(kind=WP) :: S, TFrez + ! Nonlinear correlation for the water freezing temperature. + ! Millero (1978) - UNESCO. Reference - See A. Gill, 1982. + use o_param, only: WP + implicit none + real(kind=WP) :: S, TFrez - TFrez= -0.0575_WP*S+1.7105e-3_WP *sqrt(S**3)-2.155e-4_WP *S*S + TFrez= -0.0575_WP*S+1.7105e-3_WP *sqrt(S**3)-2.155e-4_WP *S*S end function TFrez ! -!====================================================================================== ! +!_______________________________________________________________________________ #endif /* #if !defined (__oasis) && !defined (__ifsinterface) */ From 584eba5f414add1c0cb196a35bb4bd5b1aff5534 Mon Sep 17 00:00:00 2001 From: Sebastian Hinck Date: Tue, 30 Nov 2021 16:08:44 +0100 Subject: [PATCH 710/909] Combine loops, when not using OpenMP --- src/oce_adv_tra_driver.F90 | 8 ++++---- src/oce_adv_tra_fct.F90 | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/oce_adv_tra_driver.F90 b/src/oce_adv_tra_driver.F90 index 87336224a..3a3a49b0d 100644 --- a/src/oce_adv_tra_driver.F90 +++ b/src/oce_adv_tra_driver.F90 @@ -140,12 +140,12 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, dynamics, tracers, partit, #endif do nz=nu12, nl12 fct_LO(nz, enodes(1))=fct_LO(nz, enodes(1))+adv_flux_hor(nz, e) - end do #if defined(_OPENMP) + end do call omp_unset_lock(partit%plock(enodes(1))) call omp_set_lock (partit%plock(enodes(2))) -#endif do nz=nu12, nl12 +#endif fct_LO(nz, enodes(2))=fct_LO(nz, enodes(2))-adv_flux_hor(nz, e) end do #if defined(_OPENMP) @@ -306,12 +306,12 @@ subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, partit, #endif do nz=nu12, nl12 dttf_h(nz,enodes(1))=dttf_h(nz,enodes(1))+flux_h(nz,edge)*dt/areasvol(nz,enodes(1)) - end do #if defined(_OPENMP) + end do call omp_unset_lock(partit%plock(enodes(1))) call omp_set_lock (partit%plock(enodes(2))) -#endif do nz=nu12, nl12 +#endif dttf_h(nz,enodes(2))=dttf_h(nz,enodes(2))-flux_h(nz,edge)*dt/areasvol(nz,enodes(2)) end do #if defined(_OPENMP) diff --git a/src/oce_adv_tra_fct.F90 b/src/oce_adv_tra_fct.F90 index 6ee03a739..1d599d60f 100644 --- a/src/oce_adv_tra_fct.F90 +++ b/src/oce_adv_tra_fct.F90 @@ -231,12 +231,12 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, do nz=nu12, nl12 fct_plus (nz,enodes(1))=fct_plus (nz,enodes(1)) + max(0.0_WP, adf_h(nz,edge)) fct_minus(nz,enodes(1))=fct_minus(nz,enodes(1)) + min(0.0_WP, adf_h(nz,edge)) - end do #if defined(_OPENMP) + end do call omp_unset_lock(partit%plock(enodes(1))) call omp_set_lock (partit%plock(enodes(2))) -#endif do nz=nu12, nl12 +#endif fct_plus (nz,enodes(2))=fct_plus (nz,enodes(2)) + max(0.0_WP,-adf_h(nz,edge)) fct_minus(nz,enodes(2))=fct_minus(nz,enodes(2)) + min(0.0_WP,-adf_h(nz,edge)) end do From b88a2e51d952c151f8bbe2c3e9d2c9ae7b8a4bb8 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Tue, 30 Nov 2021 16:16:34 +0100 Subject: [PATCH 711/909] reproducibility with the master branch fixed! R=ABS(XYZ); V=V+R is not equal (up to the rounding error) to V=V+ABS(XYZ) --- src/oce_ale.F90 | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 6c122c8f3..bd1374256 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2442,11 +2442,13 @@ subroutine vert_vel_ale(dynamics, partit, mesh) nzmin = ulevels_nod2D(n) nzmax = nlevels_nod2D(n)-1 do nz=nzmin,nzmax + c1(1)=abs(Wvel(nz,n) *dt/hnode_new(nz,n)) !c1->c1(1) is made for the sake of reproducibility with the master branch (rounding error) + c2(1)=abs(Wvel(nz+1,n)*dt/hnode_new(nz,n)) !otherwise just add these terms (c(1) & c(2)) to CFL_z, respectively! ! strong condition: ! total volume change induced by the vertical motion ! no matter, upwind or downwind ! - CFL_z(nz, n)=CFL_z(nz,n)+abs(Wvel(nz,n) *dt/hnode_new(nz,n)) - CFL_z(nz+1,n)= abs(Wvel(nz+1,n)*dt/hnode_new(nz,n)) + CFL_z(nz, n)=CFL_z(nz,n)+c1(1) + CFL_z(nz+1,n)= c2(1) end do end do !$OMP END PARALLEL DO From f1fd825f9f3342aeecdc99cd718f8b88c8180804 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Tue, 30 Nov 2021 16:41:43 +0100 Subject: [PATCH 712/909] removing the deadlock from the rest of voscosity filters --- src/oce_dyn.F90 | 114 +++++++++++++++++++++++++----------------------- 1 file changed, 59 insertions(+), 55 deletions(-) diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 142fb2016..5353e2109 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -229,8 +229,8 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) type(t_mesh) , intent(in) , target :: mesh !___________________________________________________________________________ real(kind=8) :: u1, v1, len, vi - real(kind=8) :: update_u(mesh%nl-1), update_v(mesh%nl-1) integer :: nz, ed, el(2), nelem(3),k, elem, nzmin, nzmax + real(kind=8) :: update_u(mesh%nl-1), update_v(mesh%nl-1) !___________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs @@ -275,12 +275,6 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) max(dynamics%visc_gamma1*sqrt(u1*u1+v1*v1), & dynamics%visc_gamma2*(u1*u1+v1*v1)) & )*len -! vi=dt*max(dynamics%visc_gamma0, dynamics%visc_gamma1*max(sqrt(u1*u1+v1*v1), dynamics%visc_gamma2*(u1*u1+v1*v1)))*len - !here dynamics%visc_gamma2 is dimensional (1/velocity). If it is 10, then the respective term dominates starting from |u|=0.1 m/s an so on. -! U_b(nz,el(1))=U_b(nz,el(1))-u1/elem_area(el(1)) -! V_b(nz,el(1))=V_b(nz,el(1))-v1/elem_area(el(1)) -! U_b(nz,el(2))=U_b(nz,el(2))+u1/elem_area(el(2)) -! V_b(nz,el(2))=V_b(nz,el(2))+v1/elem_area(el(2)) update_u(nz)=u1*vi update_v(nz)=v1*vi END DO @@ -298,7 +292,6 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) #if defined(_OPENMP) call omp_unset_lock(partit%plock(el(2))) #endif - END DO !$OMP END DO !$OMP MASTER @@ -370,6 +363,7 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) !___________________________________________________________________________ real(kind=8) :: u1, v1, vi, len integer :: ed, el(2), elem, nz, nzmin, nzmax + real(kind=8) :: update_u(mesh%nl-1), update_v(mesh%nl-1) !___________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs @@ -398,23 +392,25 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) el=edge_tri(:,ed) nzmin = maxval(ulevels(el)) nzmax = minval(nlevels(el)) -#if defined(_OPENMP) - call omp_set_lock(partit%plock(el(1))) - call omp_set_lock(partit%plock(el(2))) -#endif DO nz=nzmin,nzmax-1 - u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) - v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) - U_c(nz,el(1))=U_c(nz,el(1))-u1 - U_c(nz,el(2))=U_c(nz,el(2))+u1 - V_c(nz,el(1))=V_c(nz,el(1))-v1 - V_c(nz,el(2))=V_c(nz,el(2))+v1 + update_u(nz)=(UV(1,nz,el(1))-UV(1,nz,el(2))) + update_v(nz)=(UV(2,nz,el(1))-UV(2,nz,el(2))) END DO + END DO #if defined(_OPENMP) - call omp_unset_lock(partit%plock(el(2))) - call omp_unset_lock(partit%plock(el(1))) + call omp_set_lock(partit%plock(el(1))) +#endif + U_c(nzmin:nzmax-1, el(1))=U_c(nzmin:nzmax-1, el(1))-update_u(nzmin:nzmax-1) + V_c(nzmin:nzmax-1, el(1))=V_c(nzmin:nzmax-1, el(1))-update_v(nzmin:nzmax-1) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(el(1))) + call omp_set_lock (partit%plock(el(2))) +#endif + U_c(nzmin:nzmax-1, el(2))=U_c(nzmin:nzmax-1, el(2))+update_u(nzmin:nzmax-1) + V_c(nzmin:nzmax-1, el(2))=V_c(nzmin:nzmax-1, el(2))+update_v(nzmin:nzmax-1) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(el(2))) #endif - END DO !$OMP END DO !$OMP DO DO ed=1,myDim_elem2D @@ -445,22 +441,26 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) el=edge_tri(:,ed) nzmin = maxval(ulevels(el)) nzmax = minval(nlevels(el)) + DO nz=nzmin,nzmax-1 + update_u(nz)=(U_c(nz,el(1))-U_c(nz,el(2))) + update_v(nz)=(V_c(nz,el(1))-V_c(nz,el(2))) + END DO #if defined(_OPENMP) call omp_set_lock(partit%plock(el(1))) - call omp_set_lock(partit%plock(el(2))) #endif - DO nz=nzmin,nzmax-1 - u1=(U_c(nz,el(1))-U_c(nz,el(2))) - v1=(V_c(nz,el(1))-V_c(nz,el(2))) - UV_rhs(1,nz,el(1))=UV_rhs(1,nz,el(1))-u1/elem_area(el(1)) - UV_rhs(2,nz,el(1))=UV_rhs(2,nz,el(1))-v1/elem_area(el(1)) - UV_rhs(1,nz,el(2))=UV_rhs(1,nz,el(2))+u1/elem_area(el(2)) - UV_rhs(2,nz,el(2))=UV_rhs(2,nz,el(2))+v1/elem_area(el(2)) - END DO + UV_rhs(1, nzmin:nzmax-1, el(1))=UV_rhs(1, nzmin:nzmax-1, el(1))-update_u(nzmin:nzmax-1)/elem_area(el(1)) + UV_rhs(2, nzmin:nzmax-1, el(1))=UV_rhs(2, nzmin:nzmax-1, el(1))-update_v(nzmin:nzmax-1)/elem_area(el(1)) #if defined(_OPENMP) - call omp_unset_lock(partit%plock(el(2))) call omp_unset_lock(partit%plock(el(1))) + call omp_set_lock (partit%plock(el(2))) #endif + UV_rhs(1, nzmin:nzmax-1, el(2))=UV_rhs(1, nzmin:nzmax-1, el(2))+update_u(nzmin:nzmax-1)/elem_area(el(2)) + UV_rhs(2, nzmin:nzmax-1, el(2))=UV_rhs(2, nzmin:nzmax-1, el(2))+update_v(nzmin:nzmax-1)/elem_area(el(2)) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(el(2))) +#endif +!$OMP END DO +!$OMP DO END DO !$OMP END DO !$OMP END PARALLEL @@ -489,6 +489,7 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) !___________________________________________________________________________ real(kind=8) :: u1, v1, len, vi integer :: ed, el(2), nz, nzmin, nzmax, elem + real(kind=8) :: update_u(mesh%nl-1), update_v(mesh%nl-1) !___________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs @@ -517,10 +518,6 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) len=sqrt(sum(elem_area(el))) nzmin = maxval(ulevels(el)) nzmax = minval(nlevels(el)) -#if defined(_OPENMP) - call omp_set_lock(partit%plock(el(1))) - call omp_set_lock(partit%plock(el(2))) -#endif DO nz=nzmin,nzmax-1 u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) @@ -530,17 +527,24 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) dynamics%visc_gamma2*vi) & )*len) ! vi=sqrt(max(dynamics%visc_gamma0, dynamics%visc_gamma1*max(sqrt(vi), dynamics%visc_gamma2*vi))*len) - u1=u1*vi - v1=v1*vi - U_c(nz,el(1))=U_c(nz,el(1))-u1 - U_c(nz,el(2))=U_c(nz,el(2))+u1 - V_c(nz,el(1))=V_c(nz,el(1))-v1 - V_c(nz,el(2))=V_c(nz,el(2))+v1 + update_u(nz)=u1*vi + update_v(nz)=v1*vi END DO #if defined(_OPENMP) - call omp_unset_lock(partit%plock(el(2))) + call omp_set_lock(partit%plock(el(1))) +#endif + U_c(nzmin:nzmax-1, el(1))=U_c(nzmin:nzmax-1, el(1))-update_u(nzmin:nzmax-1) + V_c(nzmin:nzmax-1, el(1))=V_c(nzmin:nzmax-1, el(1))-update_v(nzmin:nzmax-1) +#if defined(_OPENMP) call omp_unset_lock(partit%plock(el(1))) + call omp_set_lock (partit%plock(el(2))) +#endif + U_c(nzmin:nzmax-1, el(2))=U_c(nzmin:nzmax-1, el(2))+update_u(nzmin:nzmax-1) + V_c(nzmin:nzmax-1, el(2))=V_c(nzmin:nzmax-1, el(2))+update_v(nzmin:nzmax-1) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(el(2))) #endif + END DO !$OMP END DO !$OMP MASTER @@ -555,10 +559,6 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) len=sqrt(sum(elem_area(el))) nzmin = maxval(ulevels(el)) nzmax = minval(nlevels(el)) -#if defined(_OPENMP) - call omp_set_lock(partit%plock(el(1))) - call omp_set_lock(partit%plock(el(2))) -#endif DO nz=nzmin,nzmax-1 u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) @@ -568,18 +568,22 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) dynamics%visc_gamma2*vi) & )*len) ! vi=-dt*sqrt(max(dynamics%visc_gamma0, dynamics%visc_gamma1*max(sqrt(vi), dynamics%visc_gamma2*vi))*len) - u1=vi*(U_c(nz,el(1))-U_c(nz,el(2))) - v1=vi*(V_c(nz,el(1))-V_c(nz,el(2))) - - - UV_rhs(1,nz,el(1))=UV_rhs(1,nz,el(1))-u1/elem_area(el(1)) - UV_rhs(2,nz,el(1))=UV_rhs(2,nz,el(1))-v1/elem_area(el(1)) - UV_rhs(1,nz,el(2))=UV_rhs(1,nz,el(2))+u1/elem_area(el(2)) - UV_rhs(2,nz,el(2))=UV_rhs(2,nz,el(2))+v1/elem_area(el(2)) + update_u(nz)=vi*(U_c(nz,el(1))-U_c(nz,el(2))) + update_v(nz)=vi*(V_c(nz,el(1))-V_c(nz,el(2))) END DO #if defined(_OPENMP) - call omp_unset_lock(partit%plock(el(2))) + call omp_set_lock(partit%plock(el(1))) +#endif + UV_rhs(1, nzmin:nzmax-1, el(1))=UV_rhs(1, nzmin:nzmax-1, el(1))-update_u(nzmin:nzmax-1)/elem_area(el(1)) + UV_rhs(2, nzmin:nzmax-1, el(1))=UV_rhs(2, nzmin:nzmax-1, el(1))-update_v(nzmin:nzmax-1)/elem_area(el(1)) +#if defined(_OPENMP) call omp_unset_lock(partit%plock(el(1))) + call omp_set_lock (partit%plock(el(2))) +#endif + UV_rhs(1, nzmin:nzmax-1, el(2))=UV_rhs(2, nzmin:nzmax-1, el(2))+update_u(nzmin:nzmax-1)/elem_area(el(2)) + UV_rhs(2, nzmin:nzmax-1, el(2))=UV_rhs(2, nzmin:nzmax-1, el(2))+update_v(nzmin:nzmax-1)/elem_area(el(2)) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(el(2))) #endif END DO !$OMP END DO From 77047123b979539cc768064d6f7fbccc2bd251b5 Mon Sep 17 00:00:00 2001 From: dsidoren Date: Tue, 30 Nov 2021 17:02:46 +0100 Subject: [PATCH 713/909] Update oce_ale.F90 --- src/oce_ale.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index bd1374256..8db33879b 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2508,8 +2508,8 @@ subroutine vert_vel_ale(dynamics, partit, mesh) Wvel_i(nz,n)=0.0_WP if (dynamics%use_wsplit .and. (CFL_z(nz, n) > dynamics%wsplit_maxcfl)) then dd=max((CFL_z(nz, n)-dynamics%wsplit_maxcfl), 0.0_WP)/max(dynamics%wsplit_maxcfl, 1.e-12) - Wvel_e(nz,n)=1.0_WP/(1.0_WP+dd)*Wvel(nz,n) !explicit part =1. if dd=0. - Wvel_i(nz,n)=dd /(1.0_WP+dd)*Wvel(nz,n) !implicit part =1. if dd=inf + Wvel_e(nz,n)=(1.0_WP/(1.0_WP+dd))*Wvel(nz,n) !explicit part =1. if dd=0. + Wvel_i(nz,n)=(dd /(1.0_WP+dd))*Wvel(nz,n) !implicit part =1. if dd=inf end if end do end do From 7865a280426ffa1e79c79aab27eb83af2af205b6 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Tue, 30 Nov 2021 17:06:57 +0100 Subject: [PATCH 714/909] OMP bug fixes in oce_dyn.F90 --- src/oce_dyn.F90 | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 5353e2109..1fc6c126e 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -396,7 +396,6 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) update_u(nz)=(UV(1,nz,el(1))-UV(1,nz,el(2))) update_v(nz)=(UV(2,nz,el(1))-UV(2,nz,el(2))) END DO - END DO #if defined(_OPENMP) call omp_set_lock(partit%plock(el(1))) #endif @@ -411,7 +410,9 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) #if defined(_OPENMP) call omp_unset_lock(partit%plock(el(2))) #endif + END DO !$OMP END DO + !$OMP DO DO ed=1,myDim_elem2D len=sqrt(elem_area(ed)) @@ -459,8 +460,6 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) #if defined(_OPENMP) call omp_unset_lock(partit%plock(el(2))) #endif -!$OMP END DO -!$OMP DO END DO !$OMP END DO !$OMP END PARALLEL From a7f8c5e6c7b81350d5e845a40773d38da8645ded Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 30 Nov 2021 17:14:23 +0100 Subject: [PATCH 715/909] fix bug in interfaces in ../src/ice_thermo_oce.F90 --- src/ice_thermo_oce.F90 | 46 ++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 44 insertions(+), 2 deletions(-) diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index 3cd29f07f..b7a22002b 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -22,8 +22,48 @@ subroutine cut_off(ice, partit, mesh) end interface end module +module ice_therm_interface + interface + subroutine therm_ice(ithermp, h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss, & + ug,ustar,T_oc,S_oc,H_ML,t,ice_dt,ch,ce,ch_i,ce_i,evap_in,fw,ehf,evap, & + rsf, dhgrowth, dhsngrowth, iflice, hflatow, hfsenow, hflwrdout,lid_clo,subli) + USE MOD_ICE + type(t_ice_thermo), intent(in), target :: ithermp + real(kind=WP) h, hsn, A, fsh, flo, Ta, qa, rain, snow, runo, rsss, evap_in, & + ug, ustar, T_oc, S_oc, H_ML, t, ice_dt, ch, ce, ch_i, ce_i, fw, ehf, & + dhgrowth, dhsngrowth, ahf, prec, subli, subli_i, rsf, & + rhow, show, rhice, shice, sh, thick, thact, lat, & + rh, rA, qhst, sn, hsntmp, o2ihf, evap, iflice, hflatow, & + hfsenow, hflwrdout, lid_clo + end subroutine + end interface +end module -!=================================================================== +module ice_budget_interfaces + interface + subroutine budget(ithermp, hice, hsn, t, ta, qa, fsh, flo, ug, S_oc, ch_i, ce_i, fh, subli) + USE MOD_ICE + type(t_ice_thermo), intent(in), target :: ithermp + real(kind=WP) hice, hsn, t, ta, qa, fsh, flo, ug, S_oc, ch_i, ce_i, fh, subli + end subroutine + + subroutine obudget(ithermp, qa, fsh, flo, t, ug, ta, ch, ce, fh, evap, hflatow, hfsenow, hflwrdout) + USE MOD_ICE + type(t_ice_thermo), intent(in), target :: ithermp + real(kind=WP) qa, t, ta, fsh, flo, ug, ch, ce, fh, evap, hfsenow, & + hfradow, hflatow, hftotow, hflwrdout + end subroutine + + subroutine flooding(ithermp, h, hsn) + USE MOD_ICE + type(t_ice_thermo), intent(in), target :: ithermp + real(kind=WP) h, hsn + end subroutine + end interface +end module +! +! +!_______________________________________________________________________________ subroutine cut_off(ice, partit, mesh) use o_param use MOD_MESH @@ -117,6 +157,7 @@ subroutine thermodynamics(ice, partit, mesh) use g_forcing_arrays use g_comm_auto use g_sbf, only: l_snow + use ice_therm_interface implicit none type(t_ice), intent(inout), target :: ice type(t_mesh), intent(in), target :: mesh @@ -322,6 +363,7 @@ subroutine therm_ice(ithermp, h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss, & USE MOD_ICE use g_forcing_param, only: use_virt_salt use o_param + use ice_budget_interfaces implicit none type(t_ice_thermo), intent(in), target :: ithermp integer k @@ -382,7 +424,7 @@ subroutine therm_ice(ithermp, h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss, & if (thick.gt.hmin) then do k=1,iclasses thact = real((2*k-1),WP)*thick/real(iclasses,WP) ! Thicknesses of actual ice class - call budget(thact,hsn,t,Ta,qa,fsh,flo,ug,S_oc,ch_i,ce_i,shice,subli_i) + call budget(ithermp, thact, hsn,t,Ta,qa,fsh,flo,ug,S_oc,ch_i,ce_i,shice,subli_i) !Thick ice K-class growth rate rhice=rhice+shice ! Add to average heat flux subli=subli+subli_i From f44fff9d70aa15d16ea83141635b3eabec28bf6e Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 30 Nov 2021 17:40:10 +0100 Subject: [PATCH 716/909] clean up ../src/ice_thermo_oce.F90 a bit --- src/ice_thermo_oce.F90 | 809 ++++++++++++++++++++--------------------- 1 file changed, 404 insertions(+), 405 deletions(-) diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index b7a22002b..5ae98e19a 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -147,185 +147,186 @@ subroutine thermodynamics(ice, partit, mesh) ! variables. !------------------------------------------------------------------------ - USE MOD_ICE - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_MESH - use o_param - use g_config - use g_forcing_param - use g_forcing_arrays - use g_comm_auto - use g_sbf, only: l_snow - use ice_therm_interface - implicit none - type(t_ice), intent(inout), target :: ice - type(t_mesh), intent(in), target :: mesh - type(t_partit), intent(inout), target :: partit - !_____________________________________________________________________________ - real(kind=WP) :: h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss,rsf,evap_in - real(kind=WP) :: ug,ustar,T_oc,S_oc,h_ml,t,ch,ce,ch_i,ce_i,fw,ehf,evap - real(kind=WP) :: ithdgr, ithdgrsn, iflice, hflatow, hfsenow, hflwrdout, subli - real(kind=WP) :: lat - integer :: i, j, elem - !!PS real(kind=WP), allocatable :: ustar_aux(:) - real(kind=WP) lid_clo - !_____________________________________________________________________________ - ! pointer on necessary derived types - integer, pointer :: myDim_nod2D, eDim_nod2D - integer, dimension(:), pointer :: ulevels_nod2D - real(kind=WP), dimension(:,:),pointer :: geo_coord_nod2D - real(kind=WP), dimension(:), pointer :: u_ice, v_ice - real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow - real(kind=WP), dimension(:), pointer :: a_ice_old, m_ice_old, m_snow_old - real(kind=WP), dimension(:) , pointer :: thdgr, thdgrsn, thdgr_old, t_skin, ustar_aux - real(kind=WP), dimension(:) , pointer :: S_oc_array, T_oc_array, u_w, v_w - real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux -! real(kind=WP), dimension(:) , pointer :: net_heat_flux - myDim_nod2d=>partit%myDim_nod2D - eDim_nod2D =>partit%eDim_nod2D - ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D - geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D - - u_ice => ice%uice(:) - v_ice => ice%vice(:) - a_ice => ice%data(1)%values(:) - m_ice => ice%data(2)%values(:) - m_snow => ice%data(3)%values(:) - a_ice_old => ice%data(1)%values_old(:) - m_ice_old => ice%data(2)%values_old(:) - m_snow_old => ice%data(3)%values_old(:) - thdgr => ice%thermo%thdgr - thdgrsn => ice%thermo%thdgrsn - thdgr_old => ice%thermo%thdgr_old - t_skin => ice%thermo%t_skin - ustar_aux => ice%thermo%ustar - u_w => ice%srfoce_u(:) - v_w => ice%srfoce_v(:) - T_oc_array => ice%srfoce_temp(:) - S_oc_array => ice%srfoce_salt(:) - net_heat_flux => ice%flx_h(:) - fresh_wa_flux => ice%flx_fw(:) - - !_____________________________________________________________________________ - rsss=ref_sss + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + use o_param + use g_config + use g_forcing_param + use g_forcing_arrays + use g_comm_auto + use g_sbf, only: l_snow + use ice_therm_interface + implicit none + type(t_ice) , intent(inout), target :: ice + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + !_____________________________________________________________________________ + integer :: i, j, elem + real(kind=WP) :: h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss,rsf,evap_in + real(kind=WP) :: ug,ustar,T_oc,S_oc,h_ml,t,ch,ce,ch_i,ce_i,fw,ehf,evap + real(kind=WP) :: ithdgr, ithdgrsn, iflice, hflatow, hfsenow, hflwrdout, subli + real(kind=WP) :: lid_clo + real(kind=WP) :: lat + + !_____________________________________________________________________________ + ! pointer on necessary derived types + integer , pointer :: myDim_nod2D, eDim_nod2D + integer , dimension(:) , pointer :: ulevels_nod2D + real(kind=WP), dimension(:,:), pointer :: geo_coord_nod2D + real(kind=WP), dimension(:) , pointer :: u_ice, v_ice + real(kind=WP), dimension(:) , pointer :: a_ice, m_ice, m_snow + real(kind=WP), dimension(:) , pointer :: a_ice_old, m_ice_old, m_snow_old + real(kind=WP), dimension(:) , pointer :: thdgr, thdgrsn, thdgr_old, t_skin, ustar_aux + real(kind=WP), dimension(:) , pointer :: S_oc_array, T_oc_array, u_w, v_w + real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux + myDim_nod2d => partit%myDim_nod2D + eDim_nod2D => partit%eDim_nod2D + ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D + geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D + u_ice => ice%uice(:) + v_ice => ice%vice(:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + a_ice_old => ice%data(1)%values_old(:) + m_ice_old => ice%data(2)%values_old(:) + m_snow_old => ice%data(3)%values_old(:) + thdgr => ice%thermo%thdgr + thdgrsn => ice%thermo%thdgrsn + thdgr_old => ice%thermo%thdgr_old + t_skin => ice%thermo%t_skin + ustar_aux => ice%thermo%ustar + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) + T_oc_array => ice%srfoce_temp(:) + S_oc_array => ice%srfoce_salt(:) + net_heat_flux => ice%flx_h(:) + fresh_wa_flux => ice%flx_fw(:) - ! u_ice and v_ice are at nodes - ! u_w, v_w are at nodes (interpolated from elements) - ! u_wind and v_wind are always at nodes - ! ================ - ! Friction velocity - ! ================ -! allocate(ustar_aux(myDim_nod2D+eDim_nod2D)) + !___________________________________________________________________________ + rsss=ref_sss + + ! u_ice and v_ice are at nodes + ! u_w, v_w are at nodes (interpolated from elements) + ! u_wind and v_wind are always at nodes + !___________________________________________________________________________ + ! Friction velocity ustar_aux=0.0_WP - DO i=1, myDim_nod2D - ustar=0.0_WP - if(ulevels_nod2d(i)>1) cycle - ustar=((u_ice(i)-u_w(i))**2+ & - (v_ice(i)-v_w(i))**2) - ustar_aux(i)=sqrt(ustar*ice%cd_oce_ice) - END DO - call exchange_nod(ustar_aux, partit) - ! ================ - ! end: friction velocity - ! ================ - - do i=1, myDim_nod2d+eDim_nod2D - !__________________________________________________________________________ - ! if there is a cavity no sea ice thermodynamics is apllied - if(ulevels_nod2d(i)>1) cycle - - !__________________________________________________________________________ - h = m_ice(i) - hsn = m_snow(i) - A = a_ice(i) - fsh = shortwave(i) - flo = longwave(i) - Ta = Tair(i) - qa = shum(i) - if (.not. l_snow) then - if (Ta>=0.0_WP) then - rain=prec_rain(i) - snow=0.0_WP + do i=1, myDim_nod2D + ustar=0.0_WP + if(ulevels_nod2d(i)>1) cycle + ustar=((u_ice(i)-u_w(i))**2 + (v_ice(i)-v_w(i))**2) + ustar_aux(i)=sqrt(ustar*ice%cd_oce_ice) + end do + call exchange_nod(ustar_aux, partit) + + !___________________________________________________________________________ + do i=1, myDim_nod2d+eDim_nod2D + !_______________________________________________________________________ + ! if there is a cavity no sea ice thermodynamics is apllied + if(ulevels_nod2d(i)>1) cycle + + !_______________________________________________________________________ + ! prepare inputs for ice thermodynamics step + h = m_ice(i) + hsn = m_snow(i) + A = a_ice(i) + fsh = shortwave(i) + flo = longwave(i) + Ta = Tair(i) + qa = shum(i) + if (.not. l_snow) then + if (Ta>=0.0_WP) then + rain=prec_rain(i) + snow=0.0_WP + else + rain=0.0_WP + snow=prec_rain(i) + endif + evap_in=evaporation(i) !evap_in: positive up + !!PS evap_in=0.0_WP + else + rain = prec_rain(i) + snow = prec_snow(i) + evap_in=0.0_WP + end if + runo = runoff(i) + ug = sqrt(u_wind(i)**2+v_wind(i)**2) + ustar = ustar_aux(i) + T_oc = T_oc_array(i) + S_oc = S_oc_array(i) + if(ref_sss_local) rsss = S_oc + t = t_skin(i) + ch = Ch_atm_oce_arr(i) + ce = Ce_atm_oce_arr(i) + ch_i = Ch_atm_ice + ce_i = Ce_atm_ice + !!PS h_ml = 10.0_WP ! 10.0 or 30. used previously + !!PS h_ml = 5.0_WP ! 10.0 or 30. used previously + h_ml = 2.5_WP ! 10.0 or 30. used previously + !!PS h_ml = 1.25_WP ! 10.0 or 30. used previously + fw = 0.0_WP + ehf = 0.0_WP + lid_Clo=ice%thermo%h0 + if (geo_coord_nod2D(2,i)>0) then !TODO 2 separate pars for each hemisphere + lid_clo=0.5_WP else - rain=0.0_WP - snow=prec_rain(i) + lid_clo=0.5_WP endif - evap_in=evaporation(i) !evap_in: positive up -!!PS evap_in=0.0_WP - else - rain = prec_rain(i) - snow = prec_snow(i) - evap_in=0.0_WP - end if - runo = runoff(i) - ug = sqrt(u_wind(i)**2+v_wind(i)**2) - ustar = ustar_aux(i) - T_oc = T_oc_array(i) - S_oc = S_oc_array(i) - if(ref_sss_local) rsss = S_oc - t = t_skin(i) - ch = Ch_atm_oce_arr(i) - ce = Ce_atm_oce_arr(i) - ch_i = Ch_atm_ice - ce_i = Ce_atm_ice -!!PS h_ml = 10.0_WP ! 10.0 or 30. used previously -!!PS h_ml = 5.0_WP ! 10.0 or 30. used previously - h_ml = 2.5_WP ! 10.0 or 30. used previously -!!PS h_ml = 1.25_WP ! 10.0 or 30. used previously - fw = 0.0_WP - ehf = 0.0_WP - lid_Clo=ice%thermo%h0 - if (geo_coord_nod2D(2,i)>0) then !TODO 2 separate pars for each hemisphere - lid_clo=0.5_WP - else - lid_clo=0.5_WP - endif - - call therm_ice(ice%thermo,h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss, & - ug,ustar,T_oc,S_oc,h_ml,t,ice%ice_dt,ch,ce,ch_i,ce_i,evap_in,fw,ehf,evap, & - rsf, ithdgr, ithdgrsn, iflice, hflatow, hfsenow, hflwrdout,lid_clo,subli) - - m_ice_old(i) = m_ice(i) !PS - m_snow_old(i) = m_snow(i) !PS - a_ice_old(i) = a_ice(i) !PS - thdgr_old(i) = thdgr(i) !PS - - m_ice(i) = h - m_snow(i) = hsn - a_ice(i) = A - - t_skin(i) = t - fresh_wa_flux(i) = fw !positive down - net_heat_flux(i) = ehf !positive down - evaporation(i) = evap !negative up - ice_sublimation(i)= subli - - thdgr(i) = ithdgr - thdgrsn(i) = ithdgrsn - flice(i) = iflice - olat_heat(i) = hflatow - osen_heat(i) = hfsenow - olwout(i) = hflwrdout - - ! real salt flux due to salinity that is contained in the sea ice 4-5 psu - real_salt_flux(i)= rsf !PS - - ! if snow file is not given snow computed from prec_rain --> but prec_snow - ! array needs to be filled --> so that the freshwater balancing adds up - if (.not. l_snow) then - prec_rain(i) = rain - prec_snow(i) = snow - end if + + !_______________________________________________________________________ + ! do ice thermodynamics + call therm_ice(ice%thermo,h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss, & + ug,ustar,T_oc,S_oc,h_ml,t,ice%ice_dt,ch,ce,ch_i,ce_i,evap_in,fw,ehf,evap, & + rsf, ithdgr, ithdgrsn, iflice, hflatow, hfsenow, hflwrdout,lid_clo,subli) + + !_______________________________________________________________________ + ! write ice thermodyn. results into arrays + ! backup of old values + m_ice_old(i) = m_ice(i) !PS + m_snow_old(i) = m_snow(i) !PS + a_ice_old(i) = a_ice(i) !PS + thdgr_old(i) = thdgr(i) !PS + + ! new values + m_ice(i) = h + m_snow(i) = hsn + a_ice(i) = A + + t_skin(i) = t + fresh_wa_flux(i) = fw !positive down + net_heat_flux(i) = ehf !positive down + evaporation(i) = evap !negative up + ice_sublimation(i)= subli + + thdgr(i) = ithdgr + thdgrsn(i) = ithdgrsn + flice(i) = iflice + olat_heat(i) = hflatow + osen_heat(i) = hfsenow + olwout(i) = hflwrdout + + ! real salt flux due to salinity that is contained in the sea ice 4-5 psu + real_salt_flux(i) = rsf !PS + + ! if snow file is not given snow computed from prec_rain --> but prec_snow + ! array needs to be filled --> so that the freshwater balancing adds up + if (.not. l_snow) then + prec_rain(i) = rain + prec_snow(i) = snow + end if end do end subroutine thermodynamics ! ! !_______________________________________________________________________________ -subroutine therm_ice(ithermp, h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss, & - ug,ustar,T_oc,S_oc,H_ML,t,ice_dt,ch,ce,ch_i,ce_i,evap_in,fw,ehf,evap, & - rsf, dhgrowth, dhsngrowth, iflice, hflatow, hfsenow, hflwrdout,lid_clo,subli) +subroutine therm_ice(ithermp, h, hsn, A, fsh, flo, Ta, qa, rain, snow, runo, rsss, & + ug, ustar, T_oc, S_oc, H_ML, t, ice_dt, ch, ce, ch_i, ce_i, & + evap_in, fw, ehf, evap, rsf, dhgrowth, dhsngrowth, iflice, & + hflatow, hfsenow, hflwrdout, lid_clo, subli) ! Ice Thermodynamic growth model ! ! Input parameters: @@ -392,180 +393,179 @@ subroutine therm_ice(ithermp, h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss, & inv_rhosno => ithermp%inv_rhosno !___________________________________________________________________________ + ! Store ice thickness at start of growth routine + dhgrowth=h + + ! determine h(i,j)/a(i,j) = actual ice thickness. + ! if snow layer is present, add hsn weighted with quotient + ! of conductivities of ice and snow, according to 0-layer approach + ! of Semtner (1976). + ! thickness at the ice covered part + thick=hsn*(con/consn)/max(A,Armin) ! Effective snow thickness + thick=thick+h/max(A,Armin) ! Effective total snow-ice thickness + + ! Growth rate for ice in open ocean + rhow=0.0_WP + evap=0.0_WP + call obudget(ithermp, qa,fsh,flo,T_oc,ug,ta,ch,ce,rhow,evap,hflatow,hfsenow,hflwrdout) + hflatow=hflatow*(1.0_WP-A) + hfsenow=hfsenow*(1.0_WP-A) + hflwrdout=hflwrdout*(1.0_WP-A) - ! Store ice thickness at start of growth routine - dhgrowth=h - - ! determine h(i,j)/a(i,j) = actual ice thickness. - ! if snow layer is present, add hsn weighted with quotient - ! of conductivities of ice and snow, according to 0-layer approach - ! of Semtner (1976). - ! thickness at the ice covered part - thick=hsn*(con/consn)/max(A,Armin) ! Effective snow thickness - thick=thick+h/max(A,Armin) ! Effective total snow-ice thickness - - ! Growth rate for ice in open ocean - rhow=0.0_WP - evap=0.0_WP - call obudget(ithermp, qa,fsh,flo,T_oc,ug,ta,ch,ce,rhow,evap,hflatow,hfsenow,hflwrdout) - hflatow=hflatow*(1.0_WP-A) - hfsenow=hfsenow*(1.0_WP-A) - hflwrdout=hflwrdout*(1.0_WP-A) - - ! add heat loss at open ocean due to melting snow fall - !rhow=rhow+snow*1000.0/rhoice !qiang - ! ice_dt and (1-A) will be multiplied afterwards - - ! growth rate of ice in ice covered part - ! following Hibler 1984 - ! assuming ice thickness has an euqal, 7-level distribution from zero to two times h - rhice=0.0_WP - subli=0.0_WP - if (thick.gt.hmin) then - do k=1,iclasses - thact = real((2*k-1),WP)*thick/real(iclasses,WP) ! Thicknesses of actual ice class - call budget(ithermp, thact, hsn,t,Ta,qa,fsh,flo,ug,S_oc,ch_i,ce_i,shice,subli_i) - !Thick ice K-class growth rate - rhice=rhice+shice ! Add to average heat flux - subli=subli+subli_i - end do - rhice=rhice/real(iclasses,WP) ! Add to average heat flux - subli=subli/real(iclasses,WP) - end if - - ! Convert growth rates [m ice/sec] into growth per time step DT. - rhow=rhow*ice_dt - rhice=rhice*ice_dt - - ! Multiply ice growth of open water and ice - ! with the corresponding areal fractions of grid cell - show =rhow*(1.0_WP-A) - shice=rhice*A - sh =show+shice - - ! Store atmospheric heat flux, average over grid cell [W/m**2] - ahf=-cl*sh/ice_dt - - ! precipitation (into the ocean) - prec=rain+runo+snow*(1.0_WP-A) ! m water/s - - ! snow fall above ice - hsn=hsn+snow*ice_dt*A*1000.0_WP*inv_rhosno ! Add snow fall to temporary snow thickness !!! - dhsngrowth=hsn ! Store snow thickness after snow fall - - evap=evap*(1.0_WP-A) ! m water/s - subli=subli*A - - ! If there is atmospheric melting, first melt any snow that is present. - ! Atmospheric heat flux available for melting - ! sh = MINUS atm. heat flux / specific latent heat of sea ice - ! Note: (sh<0) for melting, (sh>0) for freezing - hsntmp= -min(sh,0.0_WP)*rhoice*inv_rhosno - - ! hsntmp is the decrease in snow thickness due to atmospheric melting - ! [m/DT]. Do not melt more snow than available - hsntmp=min(hsntmp,hsn) - hsn=hsn-hsntmp ! Update snow thickness after atmospheric snow melt - - ! Negative atmospheric heat flux left after melting of snow - ! Note: (sh<0) and (hsntmp>0) for melting conditions - ! hsntmp=0 for non-snow-melting conditions - rh=sh+hsntmp*rhosno/rhoice - h=max(h,0.0_WP) - - ! Compute heat content qhst of mixed layer - sea ice system - ! - ! Total heat content is the sum of - ! h ice thickness after calculation of dynamic effects - ! 178418rh change in ice thickness due to atmospheric forcing - ! and heat available in mixed layer, with - ! T_oc temperature of ocean surface layer - ! Tfrez freezing point of sea water - ! H_ML thickness of uppermost layer - ! - !RT: - ! There are three possibilities to do this. - ! 1.: Assume an instantaneous adjustment of mixed layer heat content. - ! Any heat available is then instantaneously used to melt ice. - ! (so-called ice-bath approach) - ! This is what used to be used in the Lemke sea ice-mixed layer model. - ! rh=rh-(T_oc-TFrez(S_oc))*H_ML*cc/cl - ! qhst=h+rh - ! - ! 2.: Parameterize the ocean-to-ice heat flux (o2ihf) - ! as a function of temperature difference. For a first step - ! we can assume a constant exchange coefficient gamma_t: - ! o2ihf= (T_oc-TFrez(S_oc))*gamma_t*cc*A & - ! +(T_oc-Tfrez(S_oc))*H_ML/ice_dt*cc*(1.0-A) ! [W/m2] - ! rh=rh-o2ihf*ice_dt/cl - ! qhst=h+rh ! [m] - ! - ! 3. Parameterize the ocean-to-ice heat flux (o2ihf) - ! as a function of temperature difference and the - ! friction velocity: - o2ihf= (T_oc-TFrez(S_oc))*0.006_WP*ustar*cc*A & - +(T_oc-Tfrez(S_oc))*H_ML/ice_dt*cc*(1.0_WP-A) ! [W/m2] - rh=rh-o2ihf*ice_dt/cl - qhst=h+rh ! [m] - - ! Melt snow if there is any ML heat content left (qhst<0). - ! This may be the case if advection moves ice (with snow) to regions - ! with a warm mixed layer. - sn=hsn+min(qhst,0.0_WP)*rhoice*inv_rhosno - - ! New temporary snow thickness must not be negative: - sn=max(sn,0.0_WP) - - ! Update snow and ice depth - hsn=sn - h=max(qhst,0.0_WP) - if (h.lt.1E-6_WP) h=0._WP ! Avoid very small ice thicknesses - - ! heat and fresh water fluxes - dhgrowth=h-dhgrowth ! Change in ice thickness due to thermodynamic effects - dhsngrowth=hsn-dhsngrowth ! Change in snow thickness due to thermodynamic melting - - ! (without snow fall). This is a negative value (MINUS snow melt) - - dhgrowth=dhgrowth/ice_dt ! Conversion: 'per time step' -> 'per second' - dhsngrowth=dhsngrowth/ice_dt ! Conversion: 'per time step' -> 'per second' - ! (radiation+turbulent) + freezing(-melting) sea-ice&snow - - ehf = ahf + cl*(dhgrowth+(rhosno/rhoice)*dhsngrowth) + ! add heat loss at open ocean due to melting snow fall + !rhow=rhow+snow*1000.0/rhoice !qiang + ! ice_dt and (1-A) will be multiplied afterwards + + ! growth rate of ice in ice covered part + ! following Hibler 1984 + ! assuming ice thickness has an euqal, 7-level distribution from zero to two times h + rhice=0.0_WP + subli=0.0_WP + if (thick.gt.hmin) then + do k=1,iclasses + thact = real((2*k-1),WP)*thick/real(iclasses,WP) ! Thicknesses of actual ice class + call budget(ithermp, thact, hsn,t,Ta,qa,fsh,flo,ug,S_oc,ch_i,ce_i,shice,subli_i) + !Thick ice K-class growth rate + rhice=rhice+shice ! Add to average heat flux + subli=subli+subli_i + end do + rhice=rhice/real(iclasses,WP) ! Add to average heat flux + subli=subli/real(iclasses,WP) + end if + + ! Convert growth rates [m ice/sec] into growth per time step DT. + rhow=rhow*ice_dt + rhice=rhice*ice_dt + + ! Multiply ice growth of open water and ice + ! with the corresponding areal fractions of grid cell + show =rhow*(1.0_WP-A) + shice=rhice*A + sh =show+shice + + ! Store atmospheric heat flux, average over grid cell [W/m**2] + ahf=-cl*sh/ice_dt + + ! precipitation (into the ocean) + prec=rain+runo+snow*(1.0_WP-A) ! m water/s + + ! snow fall above ice + hsn=hsn+snow*ice_dt*A*1000.0_WP*inv_rhosno ! Add snow fall to temporary snow thickness !!! + dhsngrowth=hsn ! Store snow thickness after snow fall + + evap=evap*(1.0_WP-A) ! m water/s + subli=subli*A + + ! If there is atmospheric melting, first melt any snow that is present. + ! Atmospheric heat flux available for melting + ! sh = MINUS atm. heat flux / specific latent heat of sea ice + ! Note: (sh<0) for melting, (sh>0) for freezing + hsntmp= -min(sh,0.0_WP)*rhoice*inv_rhosno + + ! hsntmp is the decrease in snow thickness due to atmospheric melting + ! [m/DT]. Do not melt more snow than available + hsntmp=min(hsntmp,hsn) + hsn=hsn-hsntmp ! Update snow thickness after atmospheric snow melt + + ! Negative atmospheric heat flux left after melting of snow + ! Note: (sh<0) and (hsntmp>0) for melting conditions + ! hsntmp=0 for non-snow-melting conditions + rh=sh+hsntmp*rhosno/rhoice + h=max(h,0.0_WP) + + ! Compute heat content qhst of mixed layer - sea ice system + ! + ! Total heat content is the sum of + ! h ice thickness after calculation of dynamic effects + ! 178418rh change in ice thickness due to atmospheric forcing + ! and heat available in mixed layer, with + ! T_oc temperature of ocean surface layer + ! Tfrez freezing point of sea water + ! H_ML thickness of uppermost layer + ! + !RT: + ! There are three possibilities to do this. + ! 1.: Assume an instantaneous adjustment of mixed layer heat content. + ! Any heat available is then instantaneously used to melt ice. + ! (so-called ice-bath approach) + ! This is what used to be used in the Lemke sea ice-mixed layer model. + ! rh=rh-(T_oc-TFrez(S_oc))*H_ML*cc/cl + ! qhst=h+rh + ! + ! 2.: Parameterize the ocean-to-ice heat flux (o2ihf) + ! as a function of temperature difference. For a first step + ! we can assume a constant exchange coefficient gamma_t: + ! o2ihf= (T_oc-TFrez(S_oc))*gamma_t*cc*A & + ! +(T_oc-Tfrez(S_oc))*H_ML/ice_dt*cc*(1.0-A) ! [W/m2] + ! rh=rh-o2ihf*ice_dt/cl + ! qhst=h+rh ! [m] + ! + ! 3. Parameterize the ocean-to-ice heat flux (o2ihf) + ! as a function of temperature difference and the + ! friction velocity: + o2ihf= (T_oc-TFrez(S_oc))*0.006_WP*ustar*cc*A & + +(T_oc-Tfrez(S_oc))*H_ML/ice_dt*cc*(1.0_WP-A) ! [W/m2] + rh=rh-o2ihf*ice_dt/cl + qhst=h+rh ! [m] + + ! Melt snow if there is any ML heat content left (qhst<0). + ! This may be the case if advection moves ice (with snow) to regions + ! with a warm mixed layer. + sn=hsn+min(qhst,0.0_WP)*rhoice*inv_rhosno + + ! New temporary snow thickness must not be negative: + sn=max(sn,0.0_WP) + + ! Update snow and ice depth + hsn=sn + h=max(qhst,0.0_WP) + if (h.lt.1E-6_WP) h=0._WP ! Avoid very small ice thicknesses + + ! heat and fresh water fluxes + dhgrowth=h-dhgrowth ! Change in ice thickness due to thermodynamic effects + dhsngrowth=hsn-dhsngrowth ! Change in snow thickness due to thermodynamic melting + + ! (without snow fall). This is a negative value (MINUS snow melt) + + dhgrowth=dhgrowth/ice_dt ! Conversion: 'per time step' -> 'per second' + dhsngrowth=dhsngrowth/ice_dt ! Conversion: 'per time step' -> 'per second' + ! (radiation+turbulent) + freezing(-melting) sea-ice&snow + + ehf = ahf + cl*(dhgrowth+(rhosno/rhoice)*dhsngrowth) + + ! (prec+runoff)+evap - freezing(+melting) ice&snow + if (.not. use_virt_salt) then + fw= prec+evap - dhgrowth*rhoice*inv_rhowat - dhsngrowth*rhosno*inv_rhowat + rsf= -dhgrowth*rhoice*inv_rhowat*Sice + else + fw= prec+evap - dhgrowth*rhoice*inv_rhowat*(rsss-Sice)/rsss - dhsngrowth*rhosno*inv_rhowat + end if + + ! Changes in compactnesses (equation 16 of Hibler 1979) + rh=-min(h,-rh) ! Make sure we do not try to melt more ice than is available + rA= rhow - o2ihf*ice_dt/cl !Qiang: it was -(T_oc-TFrez(S_oc))*H_ML*cc/cl, changed in June 2010 + !rA= rhow - (T_oc-TFrez(S_oc))*H_ML*cc/cl*(1.0-A) + A=A + 0.5_WP*min(rh,0.0_WP)*A/max(h,hmin) + max(rA,0.0_WP)*(1._WP-A)/lid_clo !/h0 + !meaning: melting freezing + + A=min(A,h*1.e6_WP) ! A -> 0 for h -> 0 + A=min(max(A,0.0_WP),1._WP) ! A >= 0, A <= 1 + + ! Flooding (snow to ice conversion) + iflice=h + call flooding(ithermp, h, hsn) + iflice=(h-iflice)/ice_dt + + ! to maintain salt conservation for the current model version + !(a way to avoid producing net salt from snow-type-ice) + if (.not. use_virt_salt) then + rsf=rsf-iflice*rhoice*inv_rhowat*Sice + else + fw=fw+iflice*rhoice*inv_rhowat*Sice/rsss + end if + + evap=evap+subli - ! (prec+runoff)+evap - freezing(+melting) ice&snow - if (.not. use_virt_salt) then - fw= prec+evap - dhgrowth*rhoice*inv_rhowat - dhsngrowth*rhosno*inv_rhowat - rsf= -dhgrowth*rhoice*inv_rhowat*Sice - else - fw= prec+evap - dhgrowth*rhoice*inv_rhowat*(rsss-Sice)/rsss - dhsngrowth*rhosno*inv_rhowat - end if - - ! Changes in compactnesses (equation 16 of Hibler 1979) - rh=-min(h,-rh) ! Make sure we do not try to melt more ice than is available - rA= rhow - o2ihf*ice_dt/cl !Qiang: it was -(T_oc-TFrez(S_oc))*H_ML*cc/cl, changed in June 2010 - !rA= rhow - (T_oc-TFrez(S_oc))*H_ML*cc/cl*(1.0-A) - A=A + 0.5_WP*min(rh,0.0_WP)*A/max(h,hmin) + max(rA,0.0_WP)*(1._WP-A)/lid_clo !/h0 - !meaning: melting freezing - - A=min(A,h*1.e6_WP) ! A -> 0 for h -> 0 - A=min(max(A,0.0_WP),1._WP) ! A >= 0, A <= 1 - - ! Flooding (snow to ice conversion) - iflice=h - call flooding(ithermp, h, hsn) - iflice=(h-iflice)/ice_dt - - ! to maintain salt conservation for the current model version - !(a way to avoid producing net salt from snow-type-ice) - if (.not. use_virt_salt) then - rsf=rsf-iflice*rhoice*inv_rhowat*Sice - else - fw=fw+iflice*rhoice*inv_rhowat*Sice/rsss - end if - - evap=evap+subli - end subroutine therm_ice ! ! @@ -622,71 +622,70 @@ subroutine budget (ithermp, hice,hsn,t,ta,qa,fsh,flo,ug,S_oc,ch_i,ce_i,fh,subli) albi => ithermp%albi albsn => ithermp%albsn albsnm => ithermp%albsnm + !___________________________________________________________________________ -!!PS data q1 /11637800.0/, q2 /-5897.8/ -!!PS data imax /5/ - - q1 = 11637800.0_WP - q2 = -5897.8_WP - imax = 5 - - ! set albedo - ! ice and snow, freezing and melting conditions are distinguished. - if (t<0.0_WP) then ! freezing condition - if (hsn.gt.0.0_WP) then ! snow cover present - alb=albsn - else ! no snow cover - alb=albi - endif - else ! melting condition - if (hsn.gt.0.0_WP) then ! snow cover present - alb=albsnm - else ! no snow cover - alb=albim - endif - endif - - d1=rhoair*cpair*Ch_i - d2=rhoair*Ce_i - d3=d2*clhi - - ! total incoming atmospheric heat flux - A1=(1.0_WP-alb)*fsh + flo + d1*ug*ta + d3*ug*qa ! in LY2004 emiss is multiplied wiht flo - ! NEWTON-RHAPSON TO GET TEMPERATURE AT THE TOP OF THE ICE LAYER - - do iter=1,imax - - B=q1*inv_rhoair*exp(q2/(t+tmelt)) ! (saturated) specific humidity over ice - A2=-d1*ug*t-d3*ug*B & - -emiss_ice*boltzmann*((t+tmelt)**4) ! sensible and latent heat,and outward radiation - A3=-d3*ug*B*q2/((t+tmelt)**2) ! gradient coefficient for the latent heat part - C=con/hice ! gradient coefficient for downward heat conductivity - A3=A3+C+d1*ug & ! gradient coefficient for sensible heat and radiation - +4.0_WP*emiss_ice*boltzmann*((t+tmelt)**3) - C=C*(TFrez(S_oc)-t) ! downward conductivity term - - t=t+(A1+A2+C)/A3 ! NEW ICE TEMPERATURE AS THE SUM OF ALL COMPONENTS - end do - - t=min(0.0_WP,t) - ! heat fluxes [W/m**2]: - - hfrad= (1.0_WP-alb)*fsh & ! absorbed short wave radiation - +flo & ! long wave radiation coming in ! in LY2004 emiss is multiplied - -emiss_ice*boltzmann*((t+tmelt)**4) ! long wave radiation going out - - hfsen=d1*ug*(ta-t) ! sensible heat - subli=d2*ug*(qa-B) ! sublimation - hflat=clhi*subli ! latent heat - - hftot=hfrad+hfsen+hflat ! total heat - - fh= -hftot/cl ! growth rate [m ice/sec] - ! +: ML gains energy, ice melts - ! -: ML loses energy, ice grows - subli=subli*inv_rhowat ! negative upward - - return + q1 = 11637800.0_WP + q2 = -5897.8_WP + imax = 5 + + !___________________________________________________________________________ + ! set albedo + ! ice and snow, freezing and melting conditions are distinguished. + if (t<0.0_WP) then ! --> freezing condition + if (hsn.gt.0.0_WP) then ! --> snow cover present + alb=albsn + else ! --> no snow cover + alb=albi + endif + else ! --> melting condition + if (hsn.gt.0.0_WP) then ! --> snow cover present + alb=albsnm + else ! --> no snow cover + alb=albim + endif + endif + + !___________________________________________________________________________ + d1=rhoair*cpair*Ch_i + d2=rhoair*Ce_i + d3=d2*clhi + + ! total incoming atmospheric heat flux + A1=(1.0_WP-alb)*fsh + flo + d1*ug*ta + d3*ug*qa ! in LY2004 emiss is multiplied wiht flo + ! NEWTON-RHAPSON TO GET TEMPERATURE AT THE TOP OF THE ICE LAYER + + do iter=1,imax + B=q1*inv_rhoair*exp(q2/(t+tmelt)) ! (saturated) specific humidity over ice + A2=-d1*ug*t-d3*ug*B & + -emiss_ice*boltzmann*((t+tmelt)**4) ! sensible and latent heat,and outward radiation + A3=-d3*ug*B*q2/((t+tmelt)**2) ! gradient coefficient for the latent heat part + C=con/hice ! gradient coefficient for downward heat conductivity + A3=A3+C+d1*ug & ! gradient coefficient for sensible heat and radiation + +4.0_WP*emiss_ice*boltzmann*((t+tmelt)**3) + C=C*(TFrez(S_oc)-t) ! downward conductivity term + + t=t+(A1+A2+C)/A3 ! NEW ICE TEMPERATURE AS THE SUM OF ALL COMPONENTS + end do + t=min(0.0_WP,t) + + !___________________________________________________________________________ + ! heat fluxes [W/m**2]: + hfrad= (1.0_WP-alb)*fsh & ! absorbed short wave radiation + +flo & ! long wave radiation coming in ! in LY2004 emiss is multiplied + -emiss_ice*boltzmann*((t+tmelt)**4) ! long wave radiation going out + + hfsen=d1*ug*(ta-t) ! sensible heat + subli=d2*ug*(qa-B) ! sublimation + hflat=clhi*subli ! latent heat + + hftot=hfrad+hfsen+hflat ! total heat + + fh= -hftot/cl ! growth rate [m ice/sec] + ! +: ML gains energy, ice melts + ! -: ML loses energy, ice grows + subli=subli*inv_rhowat ! negative upward + + return end subroutine budget ! ! From cd729ed980d1813c67fdf29535fbb61de6ad839c Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 30 Nov 2021 18:39:49 +0100 Subject: [PATCH 717/909] allow more than 255 characters in source code lines when using the Fortran cray compiler --- src/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 0f26f04f5..02b353ff0 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -80,7 +80,7 @@ elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU ) target_compile_options(${PROJECT_NAME} PRIVATE -fallow-argument-mismatch) # gfortran v10 is strict about erroneous API calls: "Rank mismatch between actual argument at (1) and actual argument at (2) (scalar and rank-1)" endif() elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray ) - target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64 -hnoomp) + target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64 -hnoomp -N 1023) endif() target_include_directories(${PROJECT_NAME} PRIVATE ${NETCDF_Fortran_INCLUDE_DIRECTORIES} ${OASIS_Fortran_INCLUDE_DIRECTORIES}) target_include_directories(${PROJECT_NAME} PRIVATE ${MCT_Fortran_INCLUDE_DIRECTORIES} ${MPEU_Fortran_INCLUDE_DIRECTORIES}) From dac0c761f8362f3c485697bb97dbd6e0aff15b6b Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 30 Nov 2021 18:41:40 +0100 Subject: [PATCH 718/909] - add a cmake switch to enable/disable the workarounds required for aleph - use -craympich-mt as a default on aleph --- src/CMakeLists.txt | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 02b353ff0..f568a9e98 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -2,8 +2,16 @@ cmake_minimum_required(VERSION 3.4) project(fesom C Fortran) if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray ) -#add_compile_options(-craympich-mt) # alternative cray-mpich library - add_compile_options(-DDISABLE_PARALLEL_RESTART_READ) # work around issue on aleph + option(ALEPH_CRAYMPICH_WORKAROUNDS "workaround for performance issues on aleph" ON) # todo: enable this switch only on aleph, not with cray compilers in general +endif() +if(${ALEPH_CRAYMPICH_WORKAROUNDS}) + # todo: enable these options only for our targets + add_compile_options(-craympich-mt) # alternative cray-mpich library + # make sure to also set these variables in the runtime environment: + # MPICH_MAX_THREAD_SAFETY=multiple # allows highest MPI thread level (i.e. MPI_THREAD_MULTIPLE) + # MPICH_CRAY_OPT_THREAD_SYNC=0 # the Cray MPICH library falls back to using the pthread_mutex-based thread-synchronization implementation + # MPICH_OPT_THREAD_SYNC=0 # seems to be a duplicate variable which also appears in some documentation instead of MPICH_CRAY_OPT_THREAD_SYNC + add_compile_options(-DDISABLE_PARALLEL_RESTART_READ) # reading restarts is slow when doing it on parallel on aleph, switch it off for now endif() #add_compile_options(-DTRANSPOSE_OUTPUT) From df383a179290929553a6473bd2e80c7a2b60f954 Mon Sep 17 00:00:00 2001 From: dsidoren Date: Wed, 1 Dec 2021 09:28:40 +0100 Subject: [PATCH 719/909] Update oce_ale.F90 --- src/oce_ale.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 8db33879b..9396a083f 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2452,7 +2452,7 @@ subroutine vert_vel_ale(dynamics, partit, mesh) end do end do !$OMP END PARALLEL DO - +cflmax=0. !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, lcflmax) lcflmax=0. !$OMP DO From 641b50575447a6b871417af3b771555f1f7ae43b Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Wed, 1 Dec 2021 09:36:58 +0100 Subject: [PATCH 720/909] CFLMAX was not initialized in vert_vel_ale (debugger never complained :( ) --- src/oce_ale.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index dbc941243..ffb00295d 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2447,7 +2447,7 @@ subroutine vert_vel_ale(dynamics, partit, mesh) end do end do !$OMP END PARALLEL DO - +cflmax=0. !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, lcflmax) lcflmax=0. !$OMP DO From d160c54587b98e9ac250b0a7193b6999ae168e39 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Wed, 1 Dec 2021 10:11:14 +0100 Subject: [PATCH 721/909] fixing the small bugs and replaced the computation of maximum in OMP through REDUCTION --- src/oce_ale.F90 | 11 +++-------- src/write_step_info.F90 | 8 ++++---- 2 files changed, 7 insertions(+), 12 deletions(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 9396a083f..d4d336ee2 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -1943,7 +1943,6 @@ subroutine vert_vel_ale(dynamics, partit, mesh) integer :: el(2), enodes(2), n, nz, ed, nzmin, nzmax, uln1, uln2, nln1, nln2 real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2, dd, dd1, dddt, cflmax real(kind=WP) :: c1(mesh%nl-1), c2(mesh%nl-1) - real(kind=WP) :: lcflmax !for OMP realization ! --> zlevel with local zstar real(kind=WP) :: dhbar_total, dhbar_rest, distrib_dhbar_int real(kind=WP), dimension(:), allocatable :: max_dhbar2distr, cumsum_maxdhbar, distrib_dhbar @@ -2452,17 +2451,13 @@ subroutine vert_vel_ale(dynamics, partit, mesh) end do end do !$OMP END PARALLEL DO -cflmax=0. -!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, lcflmax) - lcflmax=0. + cflmax=0. +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n) REDUCTION(max:cflmax) !$OMP DO do n=1, myDim_nod2D+eDim_nod2D - lcflmax=max(lcflmax, maxval(CFL_z(:, n))) + cflmax=max(cflmax, maxval(CFL_z(:, n))) end do !$OMP END DO -!$OMP CRITICAL - cflmax=max(lcflmax, cflmax) -!$OMP END CRITICAL !$OMP END PARALLEL if (cflmax > 1.0_WP .and. flag_warn_cflz) then diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index b97127caa..86ca6b3b7 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -125,9 +125,9 @@ subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) call MPI_AllREDUCE(loc , min_wflux, 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) loc=omp_min_max_sum1(heat_flux, 1, myDim_nod2D, 'min', partit) call MPI_AllREDUCE(loc , min_hflux, 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc=omp_min_max_sum2(tracers%data(1)%values, 1, nl-1, 1, myDim_nod2D, 'min', partit, 0.0) + loc=omp_min_max_sum2(tracers%data(1)%values, 1, nl-1, 1, myDim_nod2D, 'min', partit, 0.0_WP) call MPI_AllREDUCE(loc , min_temp , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc=omp_min_max_sum2(tracers%data(2)%values, 1, nl-1, 1, myDim_nod2D, 'min', partit, 0.0) + loc=omp_min_max_sum2(tracers%data(2)%values, 1, nl-1, 1, myDim_nod2D, 'min', partit, 0.0_WP) call MPI_AllREDUCE(loc , min_salt , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) loc=omp_min_max_sum1(Wvel(1,:), 1, myDim_nod2D, 'min', partit) call MPI_AllREDUCE(loc , min_wvel , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) @@ -157,9 +157,9 @@ subroutine write_step_info(istep, outfreq, dynamics, tracers, partit, mesh) call MPI_AllREDUCE(loc , max_wflux, 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) loc=omp_min_max_sum1(heat_flux, 1, myDim_nod2D, 'max', partit) call MPI_AllREDUCE(loc , max_hflux, 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc=omp_min_max_sum2(tracers%data(1)%values, 1, nl-1, 1, myDim_nod2D, 'max', partit, 0.0) + loc=omp_min_max_sum2(tracers%data(1)%values, 1, nl-1, 1, myDim_nod2D, 'max', partit, 0.0_WP) call MPI_AllREDUCE(loc , max_temp , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc=omp_min_max_sum2(tracers%data(2)%values, 1, nl-1, 1, myDim_nod2D, 'min', partit, 0.0) + loc=omp_min_max_sum2(tracers%data(2)%values, 1, nl-1, 1, myDim_nod2D, 'min', partit, 0.0_WP) call MPI_AllREDUCE(loc , max_salt , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) loc=omp_min_max_sum1(Wvel(1,:), 1, myDim_nod2D, 'max', partit) call MPI_AllREDUCE(loc , max_wvel , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) From bca72715703dcc28b24f0ca1f67ca72df06de424 Mon Sep 17 00:00:00 2001 From: dsidoren Date: Wed, 1 Dec 2021 10:33:59 +0100 Subject: [PATCH 722/909] Update oce_ale.F90 --- src/oce_ale.F90 | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index d4d336ee2..ce5d12585 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -1980,7 +1980,8 @@ subroutine vert_vel_ale(dynamics, partit, mesh) end if END DO !$OMP END PARALLEL DO - + c1=0.0_WP + c2=0.0_WP !$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(ed, enodes, el, deltaX1, deltaY1, nz, nzmin, nzmax, deltaX2, deltaY2, c1, c2) do ed=1, myDim_edge2D ! local indice of nodes that span up edge ed From 80f8d071ac1ad7fb366fb4c7ce0e6ba4a7ab6802 Mon Sep 17 00:00:00 2001 From: Sebastian Hinck Date: Wed, 1 Dec 2021 12:20:31 +0100 Subject: [PATCH 723/909] Use OMP Reduction to calculate parallel min, max & sum --- src/gen_support.F90 | 73 +++++++++++++++++++-------------------------- 1 file changed, 30 insertions(+), 43 deletions(-) diff --git a/src/gen_support.F90 b/src/gen_support.F90 index eee296fa0..822b4cc8a 100644 --- a/src/gen_support.F90 +++ b/src/gen_support.F90 @@ -499,7 +499,7 @@ FUNCTION omp_min_max_sum1(arr, pos1, pos2, what, partit, nan) character(3), intent(in) :: what real(kind=WP), optional :: nan !to be implemented upon the need (for masked arrays) real(kind=WP) :: omp_min_max_sum1 - real(kind=WP) :: loc, val + real(kind=WP) :: val integer :: n type(t_partit),intent(in), & @@ -508,48 +508,40 @@ FUNCTION omp_min_max_sum1(arr, pos1, pos2, what, partit, nan) SELECT CASE (trim(what)) CASE ('sum') val=0.0_WP -!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, loc) - loc=0.0_WP -!$OMP DO +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n) +!$OMP DO REDUCTION(+: val) do n=pos1, pos2 - loc=loc+arr(n) + val=val+arr(n) end do !$OMP END DO -!$OMP CRITICAL - val=val+loc -!$OMP END CRITICAL !$OMP END PARALLEL + CASE ('min') val=arr(1) -!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, loc) - loc=val -!$OMP DO +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n) +!$OMP DO REDUCTION(min, val) do n=pos1, pos2 - loc=min(loc, arr(n)) + val=min(val, arr(n)) end do !$OMP END DO -!$OMP CRITICAL - val=min(val, loc) -!$OMP END CRITICAL !$OMP END PARALLEL + CASE ('max') val=arr(1) -!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, loc) - loc=val -!$OMP DO +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n) +!$OMP DO REDUCTION(max: val) do n=pos1, pos2 - loc=max(loc, arr(n)) + val=max(val, arr(n)) end do !$OMP END DO -!$OMP CRITICAL - val=max(val, loc) -!$OMP END CRITICAL !$OMP END PARALLEL - CASE DEFAULT - if (partit%mype==0) write(*,*) trim(what), ' is not implemented in omp_min_max_sum case!' - call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) - STOP + + CASE DEFAULT + if (partit%mype==0) write(*,*) trim(what), ' is not implemented in omp_min_max_sum case!' + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) + STOP END SELECT + omp_min_max_sum1=val END FUNCTION ! @@ -562,7 +554,7 @@ FUNCTION omp_min_max_sum2(arr, pos11, pos12, pos21, pos22, what, partit, nan) character(3), intent(in) :: what real(kind=WP), optional :: nan !to be implemented upon the need (for masked arrays) real(kind=WP) :: omp_min_max_sum2 - real(kind=WP) :: loc, val, vmasked + real(kind=WP) :: val, vmasked integer :: i, j @@ -574,39 +566,34 @@ FUNCTION omp_min_max_sum2(arr, pos11, pos12, pos21, pos22, what, partit, nan) if (.not. present(nan)) vmasked=huge(vmasked) !just some crazy number val=arr(1,1) !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(i, j, loc) - loc=val +!$OMP DO REDUCTION(min: val) do i=pos11, pos12 -!$OMP DO - do j=pos21, pos22 - if (arr(i,j)/=vmasked) loc=min(loc, arr(i,j)) + do j=pos21, pos22 + if (arr(i,j)/=vmasked) val=min(val, arr(i,j)) + end do end do !$OMP END DO - end do -!$OMP CRITICAL - val=min(val, loc) -!$OMP END CRITICAL !$OMP END PARALLEL + CASE ('max') if (.not. present(nan)) vmasked=tiny(vmasked) !just some crazy number val=arr(1,1) !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(i, j, loc) - loc=val +!$OMP DO REDUCTION(max: val) do i=pos11, pos12 -!$OMP DO - do j=pos21, pos22 - if (arr(i,j)/=vmasked) loc=max(loc, arr(i,j)) + do j=pos21, pos22 + if (arr(i,j)/=vmasked) val=max(val, arr(i,j)) + end do end do !$OMP END DO - end do -!$OMP CRITICAL - val=max(val, loc) -!$OMP END CRITICAL !$OMP END PARALLEL + CASE DEFAULT if (partit%mype==0) write(*,*) trim(what), ' is not implemented in omp_min_max_sum case!' call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) STOP END SELECT + omp_min_max_sum2=val END FUNCTION end module g_support From 7cb55ed48486f800c6061f2db77579266cb89a17 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Wed, 1 Dec 2021 12:20:44 +0100 Subject: [PATCH 724/909] static vs. dynamic allocation of a uxuarry arrays changes the resutls. some checks needed! --- src/oce_ale.F90 | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index ce5d12585..d46eab29a 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -1942,7 +1942,9 @@ subroutine vert_vel_ale(dynamics, partit, mesh) !___________________________________________________________________________ integer :: el(2), enodes(2), n, nz, ed, nzmin, nzmax, uln1, uln2, nln1, nln2 real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2, dd, dd1, dddt, cflmax - real(kind=WP) :: c1(mesh%nl-1), c2(mesh%nl-1) + ! still to be understood but if you allocate these arrays statically the results will be different: + ! real(kind=WP) :: c1(mesh%nl-1), c2(mesh%nl-1) + real(kind=WP) :: c1(50), c2(50) ! --> zlevel with local zstar real(kind=WP) :: dhbar_total, dhbar_rest, distrib_dhbar_int real(kind=WP), dimension(:), allocatable :: max_dhbar2distr, cumsum_maxdhbar, distrib_dhbar @@ -1980,8 +1982,7 @@ subroutine vert_vel_ale(dynamics, partit, mesh) end if END DO !$OMP END PARALLEL DO - c1=0.0_WP - c2=0.0_WP + !$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(ed, enodes, el, deltaX1, deltaY1, nz, nzmin, nzmax, deltaX2, deltaY2, c1, c2) do ed=1, myDim_edge2D ! local indice of nodes that span up edge ed From d3d90d26f8fd50f44bf14049cb58ee490242dcd4 Mon Sep 17 00:00:00 2001 From: Sebastian Hinck Date: Wed, 1 Dec 2021 12:26:43 +0100 Subject: [PATCH 725/909] ... --- src/gen_support.F90 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/gen_support.F90 b/src/gen_support.F90 index 822b4cc8a..753ea3e16 100644 --- a/src/gen_support.F90 +++ b/src/gen_support.F90 @@ -519,7 +519,7 @@ FUNCTION omp_min_max_sum1(arr, pos1, pos2, what, partit, nan) CASE ('min') val=arr(1) !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n) -!$OMP DO REDUCTION(min, val) +!$OMP DO REDUCTION(min: val) do n=pos1, pos2 val=min(val, arr(n)) end do @@ -565,7 +565,7 @@ FUNCTION omp_min_max_sum2(arr, pos11, pos12, pos21, pos22, what, partit, nan) CASE ('min') if (.not. present(nan)) vmasked=huge(vmasked) !just some crazy number val=arr(1,1) -!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(i, j, loc) +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(i, j) !$OMP DO REDUCTION(min: val) do i=pos11, pos12 do j=pos21, pos22 @@ -578,7 +578,7 @@ FUNCTION omp_min_max_sum2(arr, pos11, pos12, pos21, pos22, what, partit, nan) CASE ('max') if (.not. present(nan)) vmasked=tiny(vmasked) !just some crazy number val=arr(1,1) -!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(i, j, loc) +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(i, j) !$OMP DO REDUCTION(max: val) do i=pos11, pos12 do j=pos21, pos22 From 4b249ec398ea7fe4fd6197ae10720f25ec717ac7 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Wed, 1 Dec 2021 12:27:13 +0100 Subject: [PATCH 726/909] check static allocation in oce_dyn.F90 for reproducibility issues --- src/oce_dyn.F90 | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 1fc6c126e..015f205a6 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -230,7 +230,9 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) !___________________________________________________________________________ real(kind=8) :: u1, v1, len, vi integer :: nz, ed, el(2), nelem(3),k, elem, nzmin, nzmax - real(kind=8) :: update_u(mesh%nl-1), update_v(mesh%nl-1) + ! still to be understood but if you allocate these arrays statically the results will be different: + ! real(kind=8) :: update_u(mesh%nl-1), update_v(mesh%nl-1) + real(kind=8) :: update_u(50), update_v(50) !___________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs From 963b7705f81648d45928ff4485b52a41d67671e3 Mon Sep 17 00:00:00 2001 From: Sebastian Hinck Date: Wed, 1 Dec 2021 12:44:57 +0100 Subject: [PATCH 727/909] Use REDUCTION --- src/gen_surface_forcing.F90 | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/src/gen_surface_forcing.F90 b/src/gen_surface_forcing.F90 index d745a5700..b3c79ba53 100644 --- a/src/gen_surface_forcing.F90 +++ b/src/gen_surface_forcing.F90 @@ -528,7 +528,6 @@ SUBROUTINE nc_sbc_ini(rdate, partit, mesh) integer :: numnodes ! nu,ber of nodes in elem (3 for triangle, 4 for ... ) real(wp) :: x, y ! coordinates of elements integer :: fld_idx - integer :: warn_omp type(flfi_type), pointer :: flf type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit @@ -543,7 +542,6 @@ SUBROUTINE nc_sbc_ini(rdate, partit, mesh) ! & STAT=sbc_alloc ) ! used to inerpolate on nodes warn = 0 - warn_omp = 0 ! get ini year; Fill names of sbc_flfi idate=int(rdate) @@ -557,8 +555,8 @@ SUBROUTINE nc_sbc_ini(rdate, partit, mesh) do fld_idx = 1, i_totfl flf=>sbc_flfi(fld_idx) ! prepare nearest coordinates in INfile , save to bilin_indx_i/j -!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(i, x, y, warn) -!$OMP DO +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(i, x, y) +!$OMP DO REDUCTION(max: warn) do i = 1, myDim_nod2D+eDim_nod2D x = geo_coord_nod2D(1,i)/rad if (x < 0) x=x+360._WP @@ -583,18 +581,15 @@ SUBROUTINE nc_sbc_ini(rdate, partit, mesh) bilin_indx_j(fld_idx, i)=0 end if end if - if (warn_omp == 0) then + if (warn == 0) then if (bilin_indx_i(fld_idx, i) < 1 .or. bilin_indx_j(fld_idx, i) < 1) then ! WRITE(*,*) ' WARNING: node/element coordinate out of forcing bounds,' ! WRITE(*,*) ' nearest value will be used as a constant field' - warn_omp = 1 + warn = 1 end if end if end do !$OMP END DO -!$OMP CRITICAL - warn=max(warn_omp, warn) -!$OMP END CRITICAL !$OMP END PARALLEL end do lfirst=.false. From 31afc7d2c3bbcf0f3202dec5b856890660b4c993 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 1 Dec 2021 12:46:25 +0100 Subject: [PATCH 728/909] add more information regarding optimization pitfalls with cray-mpich --- src/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index f568a9e98..e189544ed 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -6,11 +6,11 @@ if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray ) endif() if(${ALEPH_CRAYMPICH_WORKAROUNDS}) # todo: enable these options only for our targets - add_compile_options(-craympich-mt) # alternative cray-mpich library + add_compile_options(-craympich-mt) # alternative cray-mpich library, about 5 % faster with cray-mpich/7.7.3 on aleph, not available for modules cray-mpich > 7.7.3; todo: test compiling and performance with cray-mpich > 7.7.3 # make sure to also set these variables in the runtime environment: # MPICH_MAX_THREAD_SAFETY=multiple # allows highest MPI thread level (i.e. MPI_THREAD_MULTIPLE) # MPICH_CRAY_OPT_THREAD_SYNC=0 # the Cray MPICH library falls back to using the pthread_mutex-based thread-synchronization implementation - # MPICH_OPT_THREAD_SYNC=0 # seems to be a duplicate variable which also appears in some documentation instead of MPICH_CRAY_OPT_THREAD_SYNC + # MPICH_OPT_THREAD_SYNC=0 # seems to be a duplicate variable which also appears in some documentation instead of MPICH_CRAY_OPT_THREAD_SYNC (but this one brings a huge speed gain on aleph) add_compile_options(-DDISABLE_PARALLEL_RESTART_READ) # reading restarts is slow when doing it on parallel on aleph, switch it off for now endif() #add_compile_options(-DTRANSPOSE_OUTPUT) From 742633d22dfe46964f429b78755a3ac0cdd597c9 Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 1 Dec 2021 14:33:28 +0100 Subject: [PATCH 729/909] ewxchange i_therm_para parameters with ice%thermo%... derived parameters --- src/gen_bulk_formulae.F90 | 8 ++++++-- src/gen_forcing_couple.F90 | 5 +++-- src/ice_EVP.F90 | 8 +++++--- src/ice_maEVP.F90 | 26 +++++++++++++++++++------- 4 files changed, 33 insertions(+), 14 deletions(-) diff --git a/src/gen_bulk_formulae.F90 b/src/gen_bulk_formulae.F90 index fdf2932dc..3f3ee05cb 100755 --- a/src/gen_bulk_formulae.F90 +++ b/src/gen_bulk_formulae.F90 @@ -4,7 +4,6 @@ MODULE gen_bulk USE MOD_PARTIT USE MOD_PARSUP USE MOD_ICE - use i_therm_param use g_forcing_arrays use g_forcing_param, only: ncar_bulk_z_wind, ncar_bulk_z_tair, ncar_bulk_z_shum use o_param, only: WP @@ -51,9 +50,12 @@ subroutine ncar_ocean_fluxes_mode_fesom14(ice, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_ice) , intent(inout), target :: ice real(kind=WP), dimension(:) , pointer :: T_oc_array, u_w, v_w + real(kind=WP) , pointer :: inv_rhoair, tmelt u_w => ice%srfoce_u(:) v_w => ice%srfoce_v(:) T_oc_array => ice%srfoce_temp(:) + inv_rhoair => ice%thermo%inv_rhoair + tmelt => ice%thermo%tmelt !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(i, j, m, cd_n10, ce_n10, ch_n10, cd_n10_rt, cd, ce, ch, cd_rt, zeta, x2, x, psi_m, psi_h, stab, & !$OMP t, ts, q, qs, u, u10, tv, xx, dux, dvy, tstar, qstar, ustar, bstar ) !$OMP DO @@ -164,10 +166,12 @@ subroutine ncar_ocean_fluxes_mode(ice, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_ice) , intent(inout), target :: ice real(kind=WP), dimension(:) , pointer :: T_oc_array, u_w, v_w + real(kind=WP) , pointer :: inv_rhoair, tmelt u_w => ice%srfoce_u(:) v_w => ice%srfoce_v(:) T_oc_array => ice%srfoce_temp(:) - + inv_rhoair => ice%thermo%inv_rhoair + tmelt => ice%thermo%tmelt !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(i, j, m, cd_n10, ce_n10, ch_n10, cd_n10_rt, hl1, cd, ce, ch, cd_rt, x2, x, stab, & diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index c903983ab..0e0465cdf 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -80,7 +80,6 @@ subroutine update_atm_forcing(istep, ice, tracers, partit, mesh) use MOD_TRACER use MOD_ICE use o_arrays - use i_therm_param use g_forcing_param use g_forcing_arrays use g_clock @@ -135,6 +134,7 @@ subroutine update_atm_forcing(istep, ice, tracers, partit, mesh) #if defined (__oifs) || defined (__ifsinterface) real(kind=WP), dimension(:), pointer :: ice_temp, ice_alb, enthalpyoffuse #endif + real(kind=WP) , pointer :: rhoair #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -156,7 +156,8 @@ subroutine update_atm_forcing(istep, ice, tracers, partit, mesh) tmp_oce_heat_flux=> ice%atmcoupl%tmpoce_flx_h(:) tmp_ice_heat_flux=> ice%atmcoupl%tmpice_flx_h(:) #endif - + rhoair => ice%thermo%rhoair + !_____________________________________________________________________________ t1=MPI_Wtime() #if defined (__oasis) diff --git a/src/ice_EVP.F90 b/src/ice_EVP.F90 index f5c59c4c1..1784c9eda 100755 --- a/src/ice_EVP.F90 +++ b/src/ice_EVP.F90 @@ -174,7 +174,6 @@ subroutine stress2rhs(inv_areamass, ice_strength, ice, partit, mesh) USE MOD_PARSUP USE MOD_MESH USE o_PARAM - USE i_THERM_PARAM IMPLICIT NONE type(t_ice) , intent(inout), target :: ice type(t_partit), intent(inout), target :: partit @@ -257,7 +256,6 @@ subroutine EVPdynamics(ice, partit, mesh) USE MOD_PARSUP USE MOD_MESH USE o_PARAM - USE i_therm_param USE o_ARRAYS USE g_CONFIG USE g_comm_auto @@ -299,6 +297,7 @@ subroutine EVPdynamics(ice, partit, mesh) #if defined (__icepack) real(kind=WP), dimension(:), pointer :: a_ice_old, m_ice_old, m_snow_old #endif + real(kind=WP) , pointer :: inv_rhowat, rhosno, rhoice #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -324,7 +323,10 @@ subroutine EVPdynamics(ice, partit, mesh) m_ice_old => ice%data(2)%values_old(:) m_snow_old => ice%data(3)%values_old(:) #endif - + rhosno => ice%thermo%rhosno + rhoice => ice%thermo%rhoice + inv_rhowat => ice%thermo%inv_rhowat + !___________________________________________________________________________ ! If Icepack is used, always update the tracers #if defined (__icepack) diff --git a/src/ice_maEVP.F90 b/src/ice_maEVP.F90 index 5cf4992b3..467d54464 100644 --- a/src/ice_maEVP.F90 +++ b/src/ice_maEVP.F90 @@ -202,7 +202,6 @@ subroutine ssh2rhs(ice, partit, mesh) use o_param use mod_mesh use g_config - use i_therm_param implicit none type(t_ice) , intent(inout), target :: ice type(t_partit), intent(inout), target :: partit @@ -216,6 +215,7 @@ subroutine ssh2rhs(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: m_ice, m_snow real(kind=WP), dimension(:), pointer :: rhs_a, rhs_m real(kind=WP), dimension(:), pointer :: elevation + real(kind=WP) , pointer :: rhoice, rhosno, inv_rhowat #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -225,6 +225,9 @@ subroutine ssh2rhs(ice, partit, mesh) rhs_a => ice%data(1)%values_rhs(:) rhs_m => ice%data(2)%values_rhs(:) elevation => ice%srfoce_ssh + rhoice => ice%thermo%rhoice + rhosno => ice%thermo%rhosno + inv_rhowat => ice%thermo%inv_rhowat !___________________________________________________________________________ val3=1.0_WP/3.0_WP @@ -291,7 +294,6 @@ subroutine stress2rhs_m(ice, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP use o_param - use i_therm_param use mod_mesh use g_config implicit none @@ -308,6 +310,7 @@ subroutine stress2rhs_m(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:), pointer :: sigma11, sigma12, sigma22 real(kind=WP), dimension(:), pointer :: u_rhs_ice, v_rhs_ice, rhs_a, rhs_m + real(kind=WP) , pointer :: rhoice, rhosno #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -322,6 +325,8 @@ subroutine stress2rhs_m(ice, partit, mesh) v_rhs_ice => ice%vice_rhs(:) rhs_a => ice%data(1)%values_rhs(:) rhs_m => ice%data(2)%values_rhs(:) + rhoice => ice%thermo%rhoice + rhosno => ice%thermo%rhosno !___________________________________________________________________________ val3=1.0_WP/3.0_WP @@ -378,7 +383,6 @@ subroutine EVPdynamics_m(ice, partit, mesh) USE MOD_PARSUP USE MOD_MESH use o_param - use i_therm_param use g_config use o_arrays use g_comm_auto @@ -420,6 +424,7 @@ subroutine EVPdynamics_m(ice, partit, mesh) #if defined (__icepack) real(kind=WP), dimension(:), pointer :: a_ice_old, m_ice_old, m_snow_old #endif + real(kind=WP) , pointer :: rhoice, rhosno, inv_rhowat #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -451,7 +456,10 @@ subroutine EVPdynamics_m(ice, partit, mesh) m_ice_old => ice%data(2)%values_old(:) m_snow_old => ice%data(3)%values_old(:) #endif - + rhoice => ice%thermo%rhoice + rhosno => ice%thermo%rhosno + inv_rhowat => ice%thermo%inv_rhowat + !___________________________________________________________________________ val3=1.0_WP/3.0_WP vale=1.0_WP/(ice%ellipse**2) @@ -460,9 +468,10 @@ subroutine EVPdynamics_m(ice, partit, mesh) rdt=ice%ice_dt steps=ice%evp_rheol_steps + !___________________________________________________________________________ u_ice_aux=u_ice ! Initialize solver variables v_ice_aux=v_ice - + #if defined (__icepack) a_ice_old(:) = a_ice(:) m_ice_old(:) = a_ice(:) @@ -744,7 +753,6 @@ subroutine find_alpha_field_a(ice, partit, mesh) USE MOD_PARSUP USE MOD_MESH use o_param - use i_therm_param use g_config #if defined (__icepack) use icedrv_main, only: strength @@ -765,6 +773,7 @@ subroutine find_alpha_field_a(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: sigma11, sigma12, sigma22 real(kind=WP), dimension(:), pointer :: u_ice_aux, v_ice_aux real(kind=WP), dimension(:), pointer :: alpha_evp_array + real(kind=WP) , pointer :: rhoice #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -780,6 +789,7 @@ subroutine find_alpha_field_a(ice, partit, mesh) u_ice_aux => ice%uice_aux(:) v_ice_aux => ice%vice_aux(:) alpha_evp_array => ice%alpha_evp_array(:) + rhoice => ice%thermo%rhoice !___________________________________________________________________________ val3=1.0_WP/3.0_WP @@ -961,7 +971,6 @@ subroutine EVPdynamics_a(ice, partit, mesh) use o_param USE o_arrays use o_PARAM - use i_therm_param use g_config, only: use_cavity use g_comm_auto use ice_maEVP_interfaces @@ -986,6 +995,7 @@ subroutine EVPdynamics_a(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: stress_atmice_x, stress_atmice_y real(kind=WP), dimension(:), pointer :: u_ice_aux, v_ice_aux real(kind=WP), dimension(:), pointer :: beta_evp_array + real(kind=WP) , pointer :: rhoice, rhosno #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -1004,6 +1014,8 @@ subroutine EVPdynamics_a(ice, partit, mesh) u_ice_aux => ice%uice_aux(:) v_ice_aux => ice%vice_aux(:) beta_evp_array => ice%beta_evp_array(:) + rhoice => ice%thermo%rhoice + rhosno => ice%thermo%rhosno !___________________________________________________________________________ steps=ice%evp_rheol_steps From b6af78bf123589fa4c5fbf3c1022c96bd6bf25d8 Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 1 Dec 2021 14:45:45 +0100 Subject: [PATCH 730/909] ewxchange i_therm_para parameters with ice%thermo%... derived parameters --- src/ice_oce_coupling.F90 | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index 277df9e75..d77daf814 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -253,7 +253,6 @@ subroutine oce_fluxes(ice, dynamics, tracers, partit, mesh) use g_forcing_param, only: use_virt_salt use g_forcing_arrays use g_support - use i_therm_param use cavity_interfaces #if defined (__icepack) use icedrv_main, only: icepack_to_fesom, & @@ -276,6 +275,7 @@ subroutine oce_fluxes(ice, dynamics, tracers, partit, mesh) real(kind=WP), dimension(:) , pointer :: a_ice_old real(kind=WP), dimension(:) , pointer :: thdgr, thdgrsn real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux + real(kind=WP) , pointer :: rhoice, rhosno, inv_rhowat #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -290,6 +290,9 @@ subroutine oce_fluxes(ice, dynamics, tracers, partit, mesh) thdgrsn => ice%thermo%thdgrsn(:) fresh_wa_flux => ice%flx_fw(:) net_heat_flux => ice%flx_h(:) + rhoice => ice%thermo%rhoice + rhosno => ice%thermo%rhosno + inv_rhowat => ice%thermo%inv_rhowat !___________________________________________________________________________ allocate(flux(myDim_nod2D+eDim_nod2D)) From d40018d7a6b605c9f8b236a739b9f8468c3e18b0 Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 1 Dec 2021 14:50:59 +0100 Subject: [PATCH 731/909] ewxchange i_therm_para parameters with ice%thermo%... derived parameters in src/oce_ale_vel_rhs.F90 --- src/oce_ale_vel_rhs.F90 | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index f3f954c46..30f363b25 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -39,7 +39,7 @@ subroutine compute_vel_rhs(ice, dynamics, partit, mesh) USE MOD_PARSUP USE MOD_MESH use o_ARRAYS, only: coriolis, ssh_gp, pgf_x, pgf_y - use i_therm_param +! use i_therm_param use o_PARAM use g_CONFIG use g_forcing_param, only: use_virt_salt @@ -64,16 +64,20 @@ subroutine compute_vel_rhs(ice, dynamics, partit, mesh) real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhsAB, UV_rhs real(kind=WP), dimension(:) , pointer :: eta_n real(kind=WP), dimension(:) , pointer :: m_ice, m_snow, a_ice + real(kind=WP) , pointer :: rhoice, rhosno, inv_rhowat #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - UV => dynamics%uv(:,:,:) - UV_rhs => dynamics%uv_rhs(:,:,:) - UV_rhsAB => dynamics%uv_rhsAB(:,:,:) - eta_n => dynamics%eta_n(:) - m_ice => ice%data(2)%values(:) - m_snow => ice%data(3)%values(:) + UV => dynamics%uv(:,:,:) + UV_rhs => dynamics%uv_rhs(:,:,:) + UV_rhsAB => dynamics%uv_rhsAB(:,:,:) + eta_n => dynamics%eta_n(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + rhoice => ice%thermo%rhoice + rhosno => ice%thermo%rhosno + inv_rhowat=> ice%thermo%inv_rhowat !___________________________________________________________________________ use_pice=0 From e303791b90aefdb37c9edfc9cf49fc597ddcaa7c Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 1 Dec 2021 15:16:31 +0100 Subject: [PATCH 732/909] - detect aleph login nodes from env.sh switcher - add environment file for aleph --- env.sh | 2 ++ env/aleph/shell | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+) create mode 100644 env/aleph/shell diff --git a/env.sh b/env.sh index f568651e3..6660dca5d 100755 --- a/env.sh +++ b/env.sh @@ -47,6 +47,8 @@ elif [[ $LOGINHOST =~ ^jwlogin[0-9][0-9].juwels$ ]]; then STRATEGY="juwels" elif [[ $LOGINHOST =~ ^cc[a-b]+-login[0-9]+\.ecmwf\.int$ ]]; then STRATEGY="ecaccess.ecmwf.int" +elif [[ $LOGINHOST =~ ^stco-esl[0-9]+$ ]]; then + STRATEGY="aleph" else echo "can not determine environment for host: "$LOGINHOST [ $BEING_EXECUTED = true ] && exit 1 diff --git a/env/aleph/shell b/env/aleph/shell new file mode 100644 index 000000000..20090664f --- /dev/null +++ b/env/aleph/shell @@ -0,0 +1,33 @@ +module unload craype +module load craype/2.6.2 + +module load PrgEnv-cray/6.0.4 +module load alps pbs +module load cray-mpich/7.7.3 +module load craype-x86-skylake +module load cmake/3.14.0 +module load cray-hdf5-parallel/1.10.2.0 +module load cray-netcdf-hdf5parallel/4.6.1.3 +module load fftw/2.1.5.9 +module load proj4/5.1.0 + +export HDF5ROOT=$HDF5_ROOT +export NETCDFFROOT=$NETCDF_DIR +export NETCDFROOT=$NETCDF_DIR +export NETCDF_Fortran_INCLUDE_DIRECTORIES=$NETCDFROOT/include +export NETCDF_CXX_INCLUDE_DIRECTORIES=$NETCDFROOT/include +export NETCDF_CXX_LIBRARIES=$NETCDFROOT/lib +export PERL5LIB=/usr/lib64/perl5 +export XML2ROOT=/usr +export ZLIBROOT=/usr +export TMPDIR=/tmp +export PMI_LABEL_ERROUT=1 +export DR_HOOK_IGNORE_SIGNALS=-1 + + +# enable full MPI thread support level (MPI_THREAD_MULTIPLE) +export MPICH_MAX_THREAD_SAFETY=multiple # to also switch to an alternative (probably with faster locking) multi threading implementation of the cray-mpich library, use the compiler flag -craympich-mt +export MPICH_CRAY_OPT_THREAD_SYNC=0 # the Cray MPICH library falls back to using the pthread_mutex-based thread-synchronization implementation +export MPICH_OPT_THREAD_SYNC=0 # seems to be a duplicate variable which also appears in some documentation instead of MPICH_CRAY_OPT_THREAD_SYNC (but this one brings a huge speed gain on aleph) + +export FC=ftn CC=cc CXX=CC From d9a6d76052646a423b0ea9906475f7d0d059672c Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 1 Dec 2021 15:24:58 +0100 Subject: [PATCH 733/909] ewxchange i_therm_para parameters with ice%thermo%... derived parameters src/oce_ale_vel_rhs.F90, src/oce_shortwave_pene.F90, src/oce_spp.F90 --- src/oce_ale_vel_rhs.F90 | 1 - src/oce_shortwave_pene.F90 | 3 ++- src/oce_spp.F90 | 5 ++++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index 30f363b25..5b9fb0c7d 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -39,7 +39,6 @@ subroutine compute_vel_rhs(ice, dynamics, partit, mesh) USE MOD_PARSUP USE MOD_MESH use o_ARRAYS, only: coriolis, ssh_gp, pgf_x, pgf_y -! use i_therm_param use o_PARAM use g_CONFIG use g_forcing_param, only: use_virt_salt diff --git a/src/oce_shortwave_pene.F90 b/src/oce_shortwave_pene.F90 index cfca17cd8..a40361c01 100644 --- a/src/oce_shortwave_pene.F90 +++ b/src/oce_shortwave_pene.F90 @@ -13,7 +13,6 @@ subroutine cal_shortwave_rad(ice, partit, mesh) USE g_CONFIG use g_forcing_arrays use g_comm_auto - use i_therm_param IMPLICIT NONE type(t_ice) , intent(inout), target :: ice type(t_partit), intent(inout), target :: partit @@ -26,11 +25,13 @@ subroutine cal_shortwave_rad(ice, partit, mesh) !___________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: a_ice + real(kind=WP) , pointer :: albw #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" a_ice => ice%data(1)%values(:) + albw => ice%thermo%albw !___________________________________________________________________________ !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(m, n2, n3, k, nzmax, nzmin, swsurf, aux, c, c2, c3, c4, c5, v1, v2, sc1, sc2) diff --git a/src/oce_spp.F90 b/src/oce_spp.F90 index e186193e1..56c099e97 100644 --- a/src/oce_spp.F90 +++ b/src/oce_spp.F90 @@ -16,7 +16,6 @@ subroutine cal_rejected_salt(ice, partit, mesh) USE MOD_PARSUP use g_comm_auto use o_tracers -use i_therm_param, only: rhoice, rhowat, Sice use g_config, only: dt implicit none @@ -26,12 +25,16 @@ subroutine cal_rejected_salt(ice, partit, mesh) type(t_mesh) , intent(in), target :: mesh type(t_partit), intent(in), target :: partit real(kind=WP), dimension(:) , pointer :: thdgr, S_oc_array +real(kind=WP) , pointer :: rhoice, rhowat, Sice #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" thdgr => ice%thermo%thdgr S_oc_array => ice%srfoce_salt +rhoice => ice%thermo%rhoice +rhowat => ice%thermo%rhowat +Sice => ice%thermo%Sice aux=rhoice/rhowat*dt do row=1, myDim_nod2d +eDim_nod2D! myDim is sufficient From 9000e5723b3e3457bd13f68567e4f4b47e1d3e03 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 1 Dec 2021 15:48:49 +0100 Subject: [PATCH 734/909] use the aleph workarounds only on aleph --- env/aleph/shell | 2 ++ src/CMakeLists.txt | 7 +++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/env/aleph/shell b/env/aleph/shell index 20090664f..874bdb086 100644 --- a/env/aleph/shell +++ b/env/aleph/shell @@ -30,4 +30,6 @@ export MPICH_MAX_THREAD_SAFETY=multiple # to also switch to an alternative (prob export MPICH_CRAY_OPT_THREAD_SYNC=0 # the Cray MPICH library falls back to using the pthread_mutex-based thread-synchronization implementation export MPICH_OPT_THREAD_SYNC=0 # seems to be a duplicate variable which also appears in some documentation instead of MPICH_CRAY_OPT_THREAD_SYNC (but this one brings a huge speed gain on aleph) +export ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS='' + export FC=ftn CC=cc CXX=CC diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index e189544ed..958896cee 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,10 +1,9 @@ cmake_minimum_required(VERSION 3.4) project(fesom C Fortran) -if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray ) - option(ALEPH_CRAYMPICH_WORKAROUNDS "workaround for performance issues on aleph" ON) # todo: enable this switch only on aleph, not with cray compilers in general -endif() -if(${ALEPH_CRAYMPICH_WORKAROUNDS}) +option(ALEPH_CRAYMPICH_WORKAROUNDS "workaround for performance issues on aleph" OFF) + +if(ALEPH_CRAYMPICH_WORKAROUNDS OR DEFINED ENV{ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS}) # todo: enable these options only for our targets add_compile_options(-craympich-mt) # alternative cray-mpich library, about 5 % faster with cray-mpich/7.7.3 on aleph, not available for modules cray-mpich > 7.7.3; todo: test compiling and performance with cray-mpich > 7.7.3 # make sure to also set these variables in the runtime environment: From 14dab3809a170798cc2f5b7e88e90f54d8451fd7 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 1 Dec 2021 16:13:46 +0100 Subject: [PATCH 735/909] always reflect whether we have aleph workarounds enabled via the cmake cache variable, even if it has initially been set from the environment --- src/CMakeLists.txt | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 958896cee..f3eee3f9c 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,9 +1,14 @@ cmake_minimum_required(VERSION 3.4) project(fesom C Fortran) -option(ALEPH_CRAYMPICH_WORKAROUNDS "workaround for performance issues on aleph" OFF) -if(ALEPH_CRAYMPICH_WORKAROUNDS OR DEFINED ENV{ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS}) +if(DEFINED ENV{ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS}) # be able to set the initial cache value from our env settings for aleph, not only via cmake command + option(ALEPH_CRAYMPICH_WORKAROUNDS "workaround for performance issues on aleph" ON) +else() + option(ALEPH_CRAYMPICH_WORKAROUNDS "workaround for performance issues on aleph" OFF) +endif() + +if(ALEPH_CRAYMPICH_WORKAROUNDS) # todo: enable these options only for our targets add_compile_options(-craympich-mt) # alternative cray-mpich library, about 5 % faster with cray-mpich/7.7.3 on aleph, not available for modules cray-mpich > 7.7.3; todo: test compiling and performance with cray-mpich > 7.7.3 # make sure to also set these variables in the runtime environment: From 4530c0cf54016950e599718a6482049188a12d2a Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 1 Dec 2021 16:14:38 +0100 Subject: [PATCH 736/909] kickout module i_therm_param from ice_modules.F90 do reading of namelist itherm onyl ice_init --- src/gen_model_setup.F90 | 16 +++---- src/ice_modules.F90 | 98 ++++++++++++++++++++--------------------- 2 files changed, 57 insertions(+), 57 deletions(-) diff --git a/src/gen_model_setup.F90 b/src/gen_model_setup.F90 index 88d8c3018..4f78d5610 100755 --- a/src/gen_model_setup.F90 +++ b/src/gen_model_setup.F90 @@ -3,7 +3,7 @@ subroutine setup_model(partit) USE MOD_PARTIT USE MOD_PARSUP use o_param - use i_therm_param +! use i_therm_param use g_forcing_param use g_config use diagnostics, only: ldiag_solver,lcurt_stress_surf,lcurt_stress_surf, ldiag_energy, & @@ -62,13 +62,13 @@ subroutine setup_model(partit) read (fileunit, NML=land_ice) close (fileunit) - if(use_ice) then - nmlfile ='namelist.ice' ! name of ice namelist file - open (newunit=fileunit, file=nmlfile) -! read (fileunit, NML=ice_dyn) - read (fileunit, NML=ice_therm) - close (fileunit) - endif +! if(use_ice) then +! nmlfile ='namelist.ice' ! name of ice namelist file +! open (newunit=fileunit, file=nmlfile) +! ! read (fileunit, NML=ice_dyn) +! read (fileunit, NML=ice_therm) +! close (fileunit) +! endif nmlfile ='namelist.io' ! name of forcing namelist file open (newunit=fileunit, file=nmlfile) diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index 391b69a89..1f889ecb9 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -53,54 +53,54 @@ ! ! ! ice_ave_steps ! ! -!===================================================================== -module i_therm_param -USE o_PARAM - implicit none -REAL(kind=WP), parameter :: rhoair= 1.3 ! Air density, LY2004 !1.3 AOMIP -REAL(kind=WP), parameter :: inv_rhoair= 1./1.3 ! Air density, LY2004 !1.3 AOMIP -REAL(kind=WP), parameter :: rhowat= 1025. ! Water density -REAL(kind=WP), parameter :: inv_rhowat= 1./1025. ! Inverse Water density -REAL(kind=WP), parameter :: rhoice= 910. ! Ice density, AOMIP -REAL(kind=WP), parameter :: inv_rhoice= 1./910. ! Ice density, AOMIP -REAL(kind=WP), parameter :: rhosno= 290. ! Snow density, AOMIP -REAL(kind=WP), parameter :: inv_rhosno= 1./290. ! Snow density, AOMIP - -REAL(kind=WP), parameter :: cpair=1005. ! Specific heat of air [J/(kg * K)] -REAL(kind=WP), parameter :: cpice=2106. ! Specific heat of ice [J/(kg * K)] -REAL(kind=WP), parameter :: cpsno=2090. ! Specific heat of snow [J/(kg * K)] -REAL(kind=WP), parameter :: cc=rhowat*4190.0 ! Volumetr. heat cap. of water [J/m**3/K](cc = rhowat*cp_water) -REAL(kind=WP), parameter :: cl=rhoice*3.34e5 ! Volumetr. latent heat of ice fusion [J/m**3](cl=rhoice*Lf) -REAL(kind=WP), parameter :: clhw=2.501e6 ! Specific latent heat [J/kg]: water -> water vapor -REAL(kind=WP), parameter :: clhi=2.835e6 ! sea ice-> water vapor - -REAL(kind=WP), parameter :: tmelt=273.15 ! 0 deg C expressed in K -REAL(kind=WP), parameter :: boltzmann=5.67E-8 ! S. Boltzmann const.*longw. emissivity - -REAL(kind=WP) :: con = 2.1656 ! Thermal conductivities: ice; W/m/K -REAL(kind=WP) :: consn = 0.31 ! snow - -REAL(kind=WP) :: Sice = 4.0 ! Ice salinity 3.2--5.0 ppt. - -integer :: iclasses=7 ! Number of ice thickness gradations for ice growth calcs. -REAL(kind=WP) :: h0=1.0 ! Lead closing parameter [m] ! 0.5 - -REAL(kind=WP) :: hmin= 0.01 ! Cut-off ice thickness !! -REAL(kind=WP) :: Armin=0.01 ! Minimum ice concentration !! - -REAL(kind=WP) :: emiss_ice=0.97 ! Emissivity of Snow/Ice, -REAL(kind=WP) :: emiss_wat=0.97 ! Emissivity of open water - -REAL(kind=WP) :: albsn= 0.81 ! Albedo: frozen snow -REAL(kind=WP) :: albsnm= 0.77 ! melting snow -REAL(kind=WP) :: albi= 0.70 ! frozen ice -REAL(kind=WP) :: albim= 0.68 ! melting ice -REAL(kind=WP) :: albw= 0.066 ! open water, LY2004 - - NAMELIST /ice_therm/ Sice, h0, emiss_ice, & - emiss_wat, albsn, albsnm, albi, albim, albw, con, consn - -end module i_therm_param - +! ! !===================================================================== +! ! module i_therm_param +! ! USE o_PARAM +! ! implicit none +! ! REAL(kind=WP), parameter :: rhoair= 1.3 ! Air density, LY2004 !1.3 AOMIP +! ! REAL(kind=WP), parameter :: inv_rhoair= 1./1.3 ! Air density, LY2004 !1.3 AOMIP +! ! REAL(kind=WP), parameter :: rhowat= 1025. ! Water density +! ! REAL(kind=WP), parameter :: inv_rhowat= 1./1025. ! Inverse Water density +! ! REAL(kind=WP), parameter :: rhoice= 910. ! Ice density, AOMIP +! ! REAL(kind=WP), parameter :: inv_rhoice= 1./910. ! Ice density, AOMIP +! ! REAL(kind=WP), parameter :: rhosno= 290. ! Snow density, AOMIP +! ! REAL(kind=WP), parameter :: inv_rhosno= 1./290. ! Snow density, AOMIP +! ! +! ! REAL(kind=WP), parameter :: cpair=1005. ! Specific heat of air [J/(kg * K)] +! ! REAL(kind=WP), parameter :: cpice=2106. ! Specific heat of ice [J/(kg * K)] +! ! REAL(kind=WP), parameter :: cpsno=2090. ! Specific heat of snow [J/(kg * K)] +! ! REAL(kind=WP), parameter :: cc=rhowat*4190.0 ! Volumetr. heat cap. of water [J/m**3/K](cc = rhowat*cp_water) +! ! REAL(kind=WP), parameter :: cl=rhoice*3.34e5 ! Volumetr. latent heat of ice fusion [J/m**3](cl=rhoice*Lf) +! ! REAL(kind=WP), parameter :: clhw=2.501e6 ! Specific latent heat [J/kg]: water -> water vapor +! ! REAL(kind=WP), parameter :: clhi=2.835e6 ! sea ice-> water vapor +! ! +! ! REAL(kind=WP), parameter :: tmelt=273.15 ! 0 deg C expressed in K +! ! REAL(kind=WP), parameter :: boltzmann=5.67E-8 ! S. Boltzmann const.*longw. emissivity +! ! +! ! REAL(kind=WP) :: con = 2.1656 ! Thermal conductivities: ice; W/m/K +! ! REAL(kind=WP) :: consn = 0.31 ! snow +! ! +! ! REAL(kind=WP) :: Sice = 4.0 ! Ice salinity 3.2--5.0 ppt. +! ! +! ! integer :: iclasses=7 ! Number of ice thickness gradations for ice growth calcs. +! ! REAL(kind=WP) :: h0=1.0 ! Lead closing parameter [m] ! 0.5 +! ! +! ! REAL(kind=WP) :: hmin= 0.01 ! Cut-off ice thickness !! +! ! REAL(kind=WP) :: Armin=0.01 ! Minimum ice concentration !! +! ! +! ! REAL(kind=WP) :: emiss_ice=0.97 ! Emissivity of Snow/Ice, +! ! REAL(kind=WP) :: emiss_wat=0.97 ! Emissivity of open water +! ! +! ! REAL(kind=WP) :: albsn= 0.81 ! Albedo: frozen snow +! ! REAL(kind=WP) :: albsnm= 0.77 ! melting snow +! ! REAL(kind=WP) :: albi= 0.70 ! frozen ice +! ! REAL(kind=WP) :: albim= 0.68 ! melting ice +! ! REAL(kind=WP) :: albw= 0.066 ! open water, LY2004 +! ! +! ! NAMELIST /ice_therm/ Sice, h0, emiss_ice, & +! ! emiss_wat, albsn, albsnm, albi, albim, albw, con, consn +! ! +! ! end module i_therm_param +! ! !============================================================================== From 45c0eed757f037f0f832d319e1c3ce2ff83e6b4c Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 1 Dec 2021 16:19:17 +0100 Subject: [PATCH 737/909] kickout module i_therm_param from ice_thermo_cpl.F90 --- src/ice_thermo_cpl.F90 | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index e77057d35..deae891ee 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -20,7 +20,6 @@ subroutine thermodynamics(ice, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_MESH - use i_therm_param use g_config use g_forcing_param use g_forcing_arrays @@ -456,7 +455,7 @@ end subroutine ice_growth - subroutine ice_surftemp(h,hsn,a2ihf,t) + subroutine ice_surftemp(ifthermp, h,hsn,a2ihf,t) ! INPUT: ! a2ihf - Total atmo heat flux to ice ! A - Ice fraction @@ -466,9 +465,8 @@ subroutine ice_surftemp(h,hsn,a2ihf,t) ! INPUT/OUTPUT: ! t - Ice surface temperature - use i_therm_param implicit none - + type(t_ice_thermo), intent(in), target :: ithermp !---- atmospheric heat net flux into to ice (provided by OpenIFS) real(kind=WP) a2ihf !---- ocean variables (provided by FESOM) @@ -487,6 +485,13 @@ subroutine ice_surftemp(h,hsn,a2ihf,t) real(kind=WP), parameter :: dice = 0.10_WP ! Thickness for top ice "layer" !---- freezing temperature of sea-water [K] real(kind=WP) :: TFrezs + + real(kind=WP), pointer :: con, consn, cpsno, rhoice, rhosno + con => ice%thermo%con + consn => ice%thermo%consn + cpsno => ice%thermo%cpsno + rhoice => ice%thermo%rhoice + rhosno => ice%thermo%rhosno !---- compute freezing temperature of sea-water from salinity TFrezs = -0.0575_WP*S_oc + 1.7105e-3_WP*sqrt(S_oc**3) - 2.155e-4_WP*(S_oc**2)+273.15 @@ -502,7 +507,7 @@ subroutine ice_surftemp(h,hsn,a2ihf,t) t=min(273.15_WP,t) end subroutine ice_surftemp - subroutine ice_albedo(h,hsn,t,alb,geolat) + subroutine ice_albedo(ifthermp, h,hsn,t,alb,geolat) ! INPUT: ! h - ice thickness [m] ! hsn - snow thickness [m] @@ -511,16 +516,20 @@ subroutine ice_albedo(h,hsn,t,alb,geolat) ! ! OUTPUT: ! alb - selected broadband albedo - use i_therm_param implicit none - + type(t_ice_thermo), intent(in), target :: ithermp real(kind=WP) :: h real(kind=WP) :: hsn real(kind=WP) :: t real(kind=WP) :: alb real(kind=WP) :: geolat real(kind=WP) :: melt_pool_alb_reduction - + real(kind=WP), pointer :: albsn, albi, albsnm, albim + albsn => ice%thermo%albsn + albi => ice%thermo%albi + albsnm => ice%thermo%albsnm + albim => ice%thermo%albim + ! set albedo ! ice and snow, freezing and melting conditions are distinguished if (geolat.gt.0.) then !SH does not have melt ponds From 0ca746a05678a1ce3f9f86d519d8a3e78e75d5a6 Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 1 Dec 2021 16:19:31 +0100 Subject: [PATCH 738/909] kickout module i_therm_param from ifs_interface/ifs_interface.F90 --- src/ifs_interface/ifs_interface.F90 | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/src/ifs_interface/ifs_interface.F90 b/src/ifs_interface/ifs_interface.F90 index f63bfe83b..224d6e04f 100644 --- a/src/ifs_interface/ifs_interface.F90 +++ b/src/ifs_interface/ifs_interface.F90 @@ -349,7 +349,7 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & USE fesom_main_storage_module, only: fesom => f !USE o_ARRAYS, ONLY : UV ! tr_arr is now tracers, UV in dynamics derived type !USE i_arrays, ONLY : m_ice, a_ice, m_snow - USE i_therm_param, ONLY : tmelt + !USE i_therm_param, ONLY : tmelt USE g_rotate_grid, only: vector_r2g USE parinter USE scripremap @@ -368,7 +368,8 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & integer, pointer :: myDim_nod2D, eDim_nod2D integer, pointer :: myDim_elem2D, eDim_elem2D, eXDim_elem2D real(kind=wpIFS), dimension(:), pointer :: a_ice, m_ice, m_snow - + real(kind=wpIFS) , pointer :: tmelt + ! Message passing information INTEGER, INTENT(IN) :: mype, npes, icomm ! Number Gaussian grid points @@ -385,19 +386,19 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & !#include "associate_mesh.h" ! associate what is needed only - myDim_nod2D => fesom%partit%myDim_nod2D - eDim_nod2D => fesom%partit%eDim_nod2D + myDim_nod2D => fesom%partit%myDim_nod2D + eDim_nod2D => fesom%partit%eDim_nod2D - myDim_elem2D => fesom%partit%myDim_elem2D - eDim_elem2D => fesom%partit%eDim_elem2D - eXDim_elem2D => fesom%partit%eXDim_elem2D + myDim_elem2D => fesom%partit%myDim_elem2D + eDim_elem2D => fesom%partit%eDim_elem2D + eXDim_elem2D => fesom%partit%eXDim_elem2D coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => fesom%mesh%coord_nod2D elem2D_nodes(1:3, 1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => fesom%mesh%elem2D_nodes - a_ice => fesom%ice%data(1)%values(:) - m_ice => fesom%ice%data(2)%values(:) - m_snow => fesom%ice%data(3)%values(:) - + a_ice => fesom%ice%data(1)%values(:) + m_ice => fesom%ice%data(2)%values(:) + m_snow => fesom%ice%data(3)%values(:) + tmelt => fesom%ice%thermo%tmelt ! scalar const. ! =================================================================== ! From 1e99f5f34a83cf1587c5066fc5eaa3114bb2e3ee Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 1 Dec 2021 16:40:27 +0100 Subject: [PATCH 739/909] fix issue in ice_thermo_cpl.F90 --- src/ice_modules.F90 | 2 +- src/ice_thermo_cpl.F90 | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index 1f889ecb9..d49d7f786 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -74,7 +74,7 @@ ! ! REAL(kind=WP), parameter :: clhw=2.501e6 ! Specific latent heat [J/kg]: water -> water vapor ! ! REAL(kind=WP), parameter :: clhi=2.835e6 ! sea ice-> water vapor ! ! -! ! REAL(kind=WP), parameter :: tmelt=273.15 ! 0 deg C expressed in K +! ! REAL(kind=WP), parameter :: tmelt=273.15 cd ! 0 deg C expressed in K ! ! REAL(kind=WP), parameter :: boltzmann=5.67E-8 ! S. Boltzmann const.*longw. emissivity ! ! ! ! REAL(kind=WP) :: con = 2.1656 ! Thermal conductivities: ice; W/m/K diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index deae891ee..9e02f60b6 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -150,13 +150,13 @@ subroutine thermodynamics(ice, partit, mesh) ! energy fluxes ---! t = ice_temp(inod) if(A>Aimin) then - call ice_surftemp(max(h/(max(A,Aimin)),0.05),hsn/(max(A,Aimin)),a2ihf,t) + call ice_surftemp(ice%thermo, max(h/(max(A,Aimin)),0.05), hsn/(max(A,Aimin)), a2ihf, t) ice_temp(inod) = t else ! Freezing temp of saltwater in K ice_temp(inod) = -0.0575_WP*S_oc_array(inod) + 1.7105e-3_WP*sqrt(S_oc_array(inod)**3) -2.155e-4_WP*(S_oc_array(inod)**2)+273.15_WP endif - call ice_albedo(h,hsn,t,alb,geolat) + call ice_albedo(ice%thermo, h, hsn, t, alb, geolat) ice_alb(inod) = alb #endif call ice_growth @@ -455,7 +455,7 @@ end subroutine ice_growth - subroutine ice_surftemp(ifthermp, h,hsn,a2ihf,t) + subroutine ice_surftemp(ithermp, h,hsn,a2ihf,t) ! INPUT: ! a2ihf - Total atmo heat flux to ice ! A - Ice fraction @@ -507,7 +507,7 @@ subroutine ice_surftemp(ifthermp, h,hsn,a2ihf,t) t=min(273.15_WP,t) end subroutine ice_surftemp - subroutine ice_albedo(ifthermp, h,hsn,t,alb,geolat) + subroutine ice_albedo(ithermp, h, hsn, t, alb, geolat) ! INPUT: ! h - ice thickness [m] ! hsn - snow thickness [m] From f4fd113786a9879bbd0734ae1c9c97ac3197c243 Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 1 Dec 2021 16:50:59 +0100 Subject: [PATCH 740/909] fix issue in ice_thermo_cpl.F90 --- src/ice_thermo_cpl.F90 | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index 9e02f60b6..929ec1369 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -67,8 +67,9 @@ subroutine thermodynamics(ice, partit, mesh) real(kind=WP), dimension(:) , pointer :: ice_temp, ice_alb, enthalpyoffuse #endif #if defined (__oasis) || defined (__ifsinterface) - real(kind=WP), dimension(:), pointer :: oce_heat_flux, ice_heat_flux + real(kind=WP), dimension(:) , pointer :: oce_heat_flux, ice_heat_flux #endif + real(kind=WP) , pointer :: rhoice, rhosno, rhowat, Sice, cl, cc, cpice, consn, con myDim_nod2d=>partit%myDim_nod2D eDim_nod2D =>partit%eDim_nod2D ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D @@ -95,6 +96,18 @@ subroutine thermodynamics(ice, partit, mesh) oce_heat_flux => ice%atmcoupl%oce_flx_h(:) ice_heat_flux => ice%atmcoupl%ice_flx_h(:) #endif + rhoice => ice%thermo%rhoice + rhosno => ice%thermo%rhosno + rhowat => ice%thermo%rhowat + Sice => ice%thermo%Sice + cl => ice%thermo%cl + cc => ice%thermo%cc + cpice => ice%thermo%cpice + consn => ice%thermo%consn + con => ice%thermo%con + rhoice => ice%thermo%rhoice + + !_____________________________________________________________________________ rsss = ref_sss From d6c0a18e8ca1b4e0ca5674321912d371feb5b938 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 1 Dec 2021 17:20:37 +0100 Subject: [PATCH 741/909] synchronize all MPI processes for each mesh level in input/output or else aleph is sometimes 30 times slower (especially for real4) --- src/CMakeLists.txt | 1 + src/io_fesom_file.F90 | 8 ++++++++ src/io_meandata.F90 | 8 ++++++++ 3 files changed, 17 insertions(+) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index f3eee3f9c..32554bbe5 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -16,6 +16,7 @@ if(ALEPH_CRAYMPICH_WORKAROUNDS) # MPICH_CRAY_OPT_THREAD_SYNC=0 # the Cray MPICH library falls back to using the pthread_mutex-based thread-synchronization implementation # MPICH_OPT_THREAD_SYNC=0 # seems to be a duplicate variable which also appears in some documentation instead of MPICH_CRAY_OPT_THREAD_SYNC (but this one brings a huge speed gain on aleph) add_compile_options(-DDISABLE_PARALLEL_RESTART_READ) # reading restarts is slow when doing it on parallel on aleph, switch it off for now + add_compile_options(-DENABLE_ALEPH_CRAYMPICH_WORKAROUNDS) endif() #add_compile_options(-DTRANSPOSE_OUTPUT) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index fc20832b1..f816f688c 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -187,6 +187,10 @@ subroutine read_and_scatter_variables(this) end if do lvl=1, nlvl +#ifdef DENABLE_ALEPH_CRAYMPICH_WORKAROUNDS + ! aleph cray-mpich workaround + call MPI_Barrier(this%comm, mpierr) +#endif if(this%is_iorank()) then if(is_2d) then call this%read_var(var%var_index, [1,last_rec_idx], [size(var%global_level_data),1], var%global_level_data) @@ -239,6 +243,10 @@ subroutine gather_and_write_variables(this) end if do lvl=1, nlvl +#ifdef DENABLE_ALEPH_CRAYMPICH_WORKAROUNDS + ! aleph cray-mpich workaround + call MPI_Barrier(this%comm, mpierr) +#endif ! the data from our pointer is not contiguous (if it is 3D data), so we can not pass the pointer directly to MPI laux = var%local_data_copy(lvl,:) ! todo: remove this buffer and pass the data directly to MPI (change order of data layout to be levelwise or do not gather levelwise but by columns) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 4e38135f6..471fdace8 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -722,6 +722,10 @@ subroutine write_mean(entry, entry_index) if(.not. allocated(entry%aux_r8)) allocate(entry%aux_r8(size2)) end if do lev=1, size1 +#ifdef DENABLE_ALEPH_CRAYMPICH_WORKAROUNDS + ! aleph cray-mpich workaround + call MPI_Barrier(this%comm, MPIERR) +#endif if(.not. entry%is_elem_based) then call gather_nod2D (entry%local_values_r8_copy(lev,1:size(entry%local_values_r8_copy,dim=2)), entry%aux_r8, entry%root_rank, tag, entry%comm) else @@ -746,6 +750,10 @@ subroutine write_mean(entry, entry_index) if(.not. allocated(entry%aux_r4)) allocate(entry%aux_r4(size2)) end if do lev=1, size1 +#ifdef DENABLE_ALEPH_CRAYMPICH_WORKAROUNDS + ! aleph cray-mpich workaround + call MPI_Barrier(this%comm, MPIERR) +#endif if(.not. entry%is_elem_based) then call gather_real4_nod2D (entry%local_values_r4_copy(lev,1:size(entry%local_values_r4_copy,dim=2)), entry%aux_r4, entry%root_rank, tag, entry%comm) else From 400f151954c8c046957eafc97e971e871a867a60 Mon Sep 17 00:00:00 2001 From: Sebastian Hinck Date: Wed, 1 Dec 2021 20:41:21 +0100 Subject: [PATCH 742/909] use reduction... --- src/oce_muscl_adv.F90 | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/src/oce_muscl_adv.F90 b/src/oce_muscl_adv.F90 index ddadb5657..418d4831b 100755 --- a/src/oce_muscl_adv.F90 +++ b/src/oce_muscl_adv.F90 @@ -57,8 +57,8 @@ subroutine muscl_adv_init(twork, partit, mesh) !___________________________________________________________________________ nn_size=0 -!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, k) -!$OMP DO +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n) +!$OMP DO REDUCTION(max: k) do n=1, myDim_nod2D ! get number of neighbouring nodes from sparse stiffness matrix ! stiffnes matrix filled up in subroutine init_stiff_mat_ale @@ -68,17 +68,11 @@ subroutine muscl_adv_init(twork, partit, mesh) ! next value switches to a new row ! --> SSH_stiff%rowptr(n+1)-SSH_stiff%rowptr(n) gives maximum number of ! neighbouring nodes within a single row of the sparse matrix - k=SSH_stiff%rowptr(n+1)-SSH_stiff%rowptr(n) - -!$OMP CRITICAL - if (k > nn_size) then - nn_size=k ! nnum maximum number of neighbouring nodes - end if -!$OMP END CRITICAL - + k=max(k, SSH_stiff%rowptr(n+1)-SSH_stiff%rowptr(n)) end do !$OMP END DO !$OMP END PARALLEL + nn_size=k !___________________________________________________________________________ allocate(mesh%nn_num(myDim_nod2D), mesh%nn_pos(nn_size,myDim_nod2D)) nn_num(1:myDim_nod2D) => mesh%nn_num From a51508f277f0de9b9a125ae5e3584402a6b7bfe3 Mon Sep 17 00:00:00 2001 From: Sebastian Hinck Date: Wed, 1 Dec 2021 20:55:41 +0100 Subject: [PATCH 743/909] ... --- src/oce_muscl_adv.F90 | 1 + 1 file changed, 1 insertion(+) diff --git a/src/oce_muscl_adv.F90 b/src/oce_muscl_adv.F90 index 418d4831b..07ce574ee 100755 --- a/src/oce_muscl_adv.F90 +++ b/src/oce_muscl_adv.F90 @@ -57,6 +57,7 @@ subroutine muscl_adv_init(twork, partit, mesh) !___________________________________________________________________________ nn_size=0 + k=0 !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n) !$OMP DO REDUCTION(max: k) do n=1, myDim_nod2D From 5dc02d023ebb06ca7a39ac1ef1268c291c7cc8ac Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 2 Dec 2021 09:48:17 +0100 Subject: [PATCH 744/909] - fix typo in preprocessor definition - add ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS status to the info module - add missong declaration - fix variable name --- src/info_module.F90 | 5 +++++ src/io_fesom_file.F90 | 6 ++++-- src/io_meandata.F90 | 8 ++++---- 3 files changed, 13 insertions(+), 6 deletions(-) diff --git a/src/info_module.F90 b/src/info_module.F90 index a30129b87..c6354dad5 100644 --- a/src/info_module.F90 +++ b/src/info_module.F90 @@ -101,6 +101,11 @@ subroutine print_definitions() print '(g0)', 'TRANSPOSE_OUTPUT is ON' #else print '(g0)', 'TRANSPOSE_OUTPUT is OFF' +#endif +#ifdef ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS + print '(g0)', 'ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS is ON' +#else + print '(g0)', 'ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS is OFF' #endif end subroutine diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index f816f688c..64b46e78f 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -169,6 +169,7 @@ subroutine read_and_scatter_variables(this) integer last_rec_idx type(var_info), pointer :: var real(kind=8), allocatable :: laux(:) + integer mpierr last_rec_idx = this%rec_count() @@ -187,7 +188,7 @@ subroutine read_and_scatter_variables(this) end if do lvl=1, nlvl -#ifdef DENABLE_ALEPH_CRAYMPICH_WORKAROUNDS +#ifdef ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS ! aleph cray-mpich workaround call MPI_Barrier(this%comm, mpierr) #endif @@ -225,6 +226,7 @@ subroutine gather_and_write_variables(this) logical is_2d real(kind=8), allocatable :: laux(:) type(var_info), pointer :: var + integer mpierr if(this%is_iorank()) this%rec_cnt = this%rec_count()+1 @@ -243,7 +245,7 @@ subroutine gather_and_write_variables(this) end if do lvl=1, nlvl -#ifdef DENABLE_ALEPH_CRAYMPICH_WORKAROUNDS +#ifdef ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS ! aleph cray-mpich workaround call MPI_Barrier(this%comm, mpierr) #endif diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 471fdace8..2ec904bbd 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -722,9 +722,9 @@ subroutine write_mean(entry, entry_index) if(.not. allocated(entry%aux_r8)) allocate(entry%aux_r8(size2)) end if do lev=1, size1 -#ifdef DENABLE_ALEPH_CRAYMPICH_WORKAROUNDS +#ifdef ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS ! aleph cray-mpich workaround - call MPI_Barrier(this%comm, MPIERR) + call MPI_Barrier(entry%comm, MPIERR) #endif if(.not. entry%is_elem_based) then call gather_nod2D (entry%local_values_r8_copy(lev,1:size(entry%local_values_r8_copy,dim=2)), entry%aux_r8, entry%root_rank, tag, entry%comm) @@ -750,9 +750,9 @@ subroutine write_mean(entry, entry_index) if(.not. allocated(entry%aux_r4)) allocate(entry%aux_r4(size2)) end if do lev=1, size1 -#ifdef DENABLE_ALEPH_CRAYMPICH_WORKAROUNDS +#ifdef ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS ! aleph cray-mpich workaround - call MPI_Barrier(this%comm, MPIERR) + call MPI_Barrier(entry%comm, MPIERR) #endif if(.not. entry%is_elem_based) then call gather_real4_nod2D (entry%local_values_r4_copy(lev,1:size(entry%local_values_r4_copy,dim=2)), entry%aux_r4, entry%root_rank, tag, entry%comm) From c6f63d058aba84c95731f2353e53e9e55514a780 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Thu, 2 Dec 2021 12:39:50 +0100 Subject: [PATCH 745/909] changed GNU compiler options for the sake of reproducibility. with the old tests rewriting the loops led to slightly different results. checks have been updated accordingly! --- setups/test_pi/setup.yml | 17 ++++++----------- src/CMakeLists.txt | 7 ++++--- 2 files changed, 10 insertions(+), 14 deletions(-) diff --git a/setups/test_pi/setup.yml b/setups/test_pi/setup.yml index e1222bc86..2b9d4a51a 100644 --- a/setups/test_pi/setup.yml +++ b/setups/test_pi/setup.yml @@ -59,17 +59,12 @@ namelist.io: prec: 8 fcheck: - a_ice: 0.2691276598479261 - salt: 23.944024679303666 - temp: 1.701768750033021 - sst: 8.531528640978305 - u: -0.0014072137861434184 - v: 0.00014184602459601167 - - - - - + a_ice: 0.26912765975496816 + salt: 23.944024679315966 + sst: 8.531528641557886 + temp: 1.7017687500626169 + u: -0.0014072137916283753 + v: 0.0001418460244606028 diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index efd831979..29b809b2b 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -91,10 +91,11 @@ if(${VERBOSE}) endif() # CMAKE_Fortran_COMPILER_ID will also work if a wrapper is being used (e.g. mpif90 wraps ifort -> compiler id is Intel) if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Intel ) - target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -init=zero -no-wrap-margin) -# target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -g -traceback -check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) +# target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -init=zero -no-wrap-margin) + target_compile_options(${PROJECT_NAME} PRIVATE -qopenmp -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -g -traceback -check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU ) - target_compile_options(${PROJECT_NAME} PRIVATE -O3 -finit-local-zero -finline-functions -march=native -fimplicit-none -fdefault-real-8 -ffree-line-length-none) +# target_compile_options(${PROJECT_NAME} PRIVATE -O3 -finit-local-zero -finline-functions -fimplicit-none -fdefault-real-8 -ffree-line-length-none) + target_compile_options(${PROJECT_NAME} PRIVATE -O2 -g -ffloat-store -finit-local-zero -finline-functions -fimplicit-none -fdefault-real-8 -ffree-line-length-none) if(CMAKE_Fortran_COMPILER_VERSION VERSION_GREATER_EQUAL 10 ) target_compile_options(${PROJECT_NAME} PRIVATE -fallow-argument-mismatch) # gfortran v10 is strict about erroneous API calls: "Rank mismatch between actual argument at (1) and actual argument at (2) (scalar and rank-1)" endif() From 1d6f142200b5a2c6857c33362c9e7a13cab62926 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Thu, 2 Dec 2021 13:34:24 +0100 Subject: [PATCH 746/909] bug fix in viscosity option (opt_visc = 6) introduced when solving the OMP deadlock issues in oce_dyn.F90 --- src/oce_dyn.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 015f205a6..bfabff4f5 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -581,7 +581,7 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) call omp_unset_lock(partit%plock(el(1))) call omp_set_lock (partit%plock(el(2))) #endif - UV_rhs(1, nzmin:nzmax-1, el(2))=UV_rhs(2, nzmin:nzmax-1, el(2))+update_u(nzmin:nzmax-1)/elem_area(el(2)) + UV_rhs(1, nzmin:nzmax-1, el(2))=UV_rhs(1, nzmin:nzmax-1, el(2))+update_u(nzmin:nzmax-1)/elem_area(el(2)) UV_rhs(2, nzmin:nzmax-1, el(2))=UV_rhs(2, nzmin:nzmax-1, el(2))+update_v(nzmin:nzmax-1)/elem_area(el(2)) #if defined(_OPENMP) call omp_unset_lock(partit%plock(el(2))) From 60b46bdcbcc17683cd308d674e374b586e74f9a1 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Thu, 2 Dec 2021 16:28:12 +0100 Subject: [PATCH 747/909] Thanks Sergey!!! he provided us with a new solver code. It works same fast as PARMS in the channel test cases but is slower on CORE2 mesh (for the moment). Still not OMP parallelized but there is good potential for this.i The preconditioner is the same as in MITgcm. shall be swithced on manually in src/MOD_DYN.F90 (see comments in T_SOLVERINFO) --- src/MOD_DYN.F90 | 11 ++- src/MOD_MESH.F90 | 1 + src/oce_ale.F90 | 10 ++ src/solver.F90 | 234 +++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 254 insertions(+), 2 deletions(-) create mode 100644 src/solver.F90 diff --git a/src/MOD_DYN.F90 b/src/MOD_DYN.F90 index 77438f64b..6f2ffa7ef 100644 --- a/src/MOD_DYN.F90 +++ b/src/MOD_DYN.F90 @@ -17,7 +17,14 @@ MODULE MOD_DYN integer :: fillin = 3 integer :: lutype = 2 real(kind=WP) :: droptol = 1.e-8 - real(kind=WP) :: soltol = 1e-10 !1.e-10 +!!! PARMS Solver + real(kind=WP) :: soltol = 1e-10 ! default for PARMS + logical :: use_parms = .TRUE. +!!! +!!! Sergey's Solver +! real(kind=WP) :: soltol = 1e-6 ! default for PARMS +! logical :: use_parms = .FALSE. +!!! contains procedure WRITE_T_SOLVERINFO procedure READ_T_SOLVERINFO @@ -268,4 +275,4 @@ subroutine READ_T_DYN(dynamics, unit, iostat, iomsg) end subroutine READ_T_DYN -END MODULE MOD_DYN \ No newline at end of file +END MODULE MOD_DYN diff --git a/src/MOD_MESH.F90 b/src/MOD_MESH.F90 index 8dc1c6414..808de3564 100644 --- a/src/MOD_MESH.F90 +++ b/src/MOD_MESH.F90 @@ -16,6 +16,7 @@ MODULE MOD_MESH integer(int32), allocatable, dimension(:) :: rowptr integer(int32), allocatable, dimension(:) :: colind_loc integer(int32), allocatable, dimension(:) :: rowptr_loc + real(kind=WP), allocatable, dimension(:) :: pr_values !preconditioner values END TYPE SPARSE_MATRIX TYPE T_MESH diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index d46eab29a..0c50d009a 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2527,6 +2527,8 @@ subroutine solve_ssh_ale(dynamics, partit, mesh) use g_comm_auto use g_config, only: which_ale use iso_c_binding, only: C_INT, C_DOUBLE + use ssh_solve_preconditioner_interface + use ssh_solve_cg_interface implicit none #include "fparms.h" type(t_dyn) , intent(inout), target :: dynamics @@ -2536,6 +2538,7 @@ subroutine solve_ssh_ale(dynamics, partit, mesh) logical, save :: lfirst=.true. integer(kind=C_INT) :: n3, reuse, new_values integer :: n + !___________________________________________________________________________ ! interface for solver interface @@ -2572,6 +2575,13 @@ end subroutine psolve droptol => dynamics%solverinfo%droptol soltol => dynamics%solverinfo%soltol +if (.not. dynamics%solverinfo%use_parms) then +if (lfirst) call ssh_solve_preconditioner(partit, mesh) +call ssh_solve_cg(dynamics%d_eta, dynamics%ssh_rhs, soltol, maxiter, partit, mesh) +lfirst=.false. +return +end if + !___________________________________________________________________________ if (trim(which_ale)=='linfs') then reuse=0 diff --git a/src/solver.F90 b/src/solver.F90 new file mode 100644 index 000000000..c1d8f1a0f --- /dev/null +++ b/src/solver.F90 @@ -0,0 +1,234 @@ +module ssh_solve_preconditioner_interface + interface + subroutine ssh_solve_preconditioner(partit, mesh) + use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + end subroutine + end interface +end module + +module ssh_solve_cg_interface + interface + subroutine ssh_solve_cg(x, rhs, tol, maxiter, partit, mesh) + use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + real(kind=WP), intent(inout) :: x(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: rhs(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: tol + integer, intent(in) :: maxiter + end subroutine + end interface +end module +!========================================================================= +subroutine ssh_solve_preconditioner(partit, mesh) + ! Preconditioner follows MITgcm (JGR, 102,5753-5766, 1997) + ! If the row r of the ssh equation is a_r eta_r +\sum a_i\eta_i=rhs_row_r + ! where summation is over all nodes neighboring node r, + ! the inverse of the preconditioner matrix has the coefficients + ! 1/a_r, .... -2*a_i/a_r/(a_r+(a_diag)_i) .... + ! Here (a_diag)_i is the diagonal value in row i of the ssh matrix. + + ! The inverse of preconditioner matrix (M^{-1} in general notation and K in the + ! paper cited) is, in reality, one iteration of the + ! Jacobi method, with symmetrization. We need symmetrization to be able to use + ! the conjugate gradient method. + use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + USE g_comm_auto + IMPLICIT NONE + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + integer :: nend, row, node, n, offset + real(kind=WP), allocatable :: diag_values(:) + real(kind=WP), pointer :: pr_values(:) + integer, pointer :: rptr(:), cind(:) + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + nend=ssh_stiff%rowptr(myDim_nod2D+1)-ssh_stiff%rowptr(1) + allocate(mesh%ssh_stiff%pr_values(nend)) ! Will store the values of inverse preconditioner matrix + pr_values=>mesh%ssh_stiff%pr_values + cind =>mesh%ssh_stiff%colind_loc + rptr =>mesh%ssh_stiff%rowptr_loc + allocate(diag_values(myDim_nod2D+eDim_nod2D)) ! Temporary, will be thrown away + + DO row=1, myDim_nod2D + offset=ssh_stiff%rowptr(row)- ssh_stiff%rowptr(1)+1 + diag_values(row)=ssh_stiff%values(offset) + END DO + call exchange_nod(diag_values, partit) ! We have diagonal values + ! ========== + ! Fill in the preconditioner + ! ========== + DO row=1, myDim_nod2D + offset=ssh_stiff%rowptr(row)-ssh_stiff%rowptr(1) + nend=ssh_stiff%rowptr(row+1)-ssh_stiff%rowptr(row) + pr_values(offset+1)=1.0_WP/ssh_stiff%values(offset+1) + DO n=2, nend + node=cind(offset+n) ! Will be ssh_stiff$colind(offset+n) + pr_values(n+offset)=-0.5_WP*(ssh_stiff%values(n+offset)/ssh_stiff%values(1+offset))/ & + (ssh_stiff%values(1+offset)+ diag_values(node)) + END DO + END DO + deallocate(diag_values) +end subroutine ssh_solve_preconditioner + +! ======================================================== +subroutine ssh_solve_cg(x,rhs, tol, maxiter, partit, mesh) + ! Conjugate gradient solver + ! Our ssh matrix is symmetric, because we compute divergencethe contributions as + ! integrated over area of scalar control volume. + ! + ! I tried first to follow the MITgcm paper, but I have doubts about + ! their computations of beta. The variant below -- see Wikipedia. + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + USE g_comm_auto + IMPLICIT NONE + type(t_partit),intent(inout), target :: partit + type(t_mesh), intent(inout), target :: mesh + real(kind=WP), intent(inout) :: x(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: rhs(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: tol + integer, intent(in) :: maxiter + integer :: row, nini, nend, iter + real(kind=WP) :: sprod(2), s_old, s_aux, al, be, rtol + integer :: req + real(kind=WP), allocatable :: rr(:), zz(:), pp(:), App(:) + real(kind=WP), pointer :: pr_values(:) + integer, pointer :: rptr(:), cind(:) + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + pr_values=>mesh%ssh_stiff%pr_values + cind =>mesh%ssh_stiff%colind_loc + rptr =>mesh%ssh_stiff%rowptr_loc + + row=myDim_nod2D+eDim_nod2D + allocate(rr(row), zz(row), pp(row), App(row)) + rr=0.0_WP + zz=0.0_WP + pp=0.0_WP + App=0.0_WP + + ! ============== + ! Initialization. We solve AX=b, r_0=b-AX_0 + ! ============== + ! Define working tolerance: + ! ============== + s_old=sum(rhs(1:myDim_nod2D)*rhs(1:myDim_nod2D)) + call MPI_Iallreduce(MPI_IN_PLACE, s_old, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD, req, MPIerr) + call MPI_Wait(req, MPI_STATUS_IGNORE, MPIerr) + rtol=tol*sqrt(s_old/real(nod2D,WP)) + ! ============== + ! Compute r0 + ! ============== + DO row=1, myDim_nod2D + !nini=ssh_stiff%rowptr(row)-ssh_stiff%rowptr(1)+1 + !nend=ssh_stiff%rowptr(row+1)-ssh_stiff%rowptr(1) + !rr(row)=rhs(row)-sum(ssh_stiff%values(nini:nend)*X(cind(nini:nend))) + rr(row)=rhs(row)-sum(ssh_stiff%values(rptr(row):rptr(row+1)-1)* & + X(cind(rptr(row):rptr(row+1)-1))) + END DO + call exchange_nod(rr, partit) + ! ============= + ! z_0=M^{-1} r_0 (M^{-1} is the precondit. matrix) + ! pp is the search direction + ! ============= + DO row=1, myDim_nod2D + zz(row)= sum(pr_values(rptr(row):rptr(row+1)-1)*rr(cind(rptr(row):rptr(row+1)-1))) + pp(row)=zz(row) + END DO + ! =============== + ! Scalar product of r*z + ! =============== + s_old=sum(rr(1:myDim_nod2D)*zz(1:myDim_nod2D)) + call MPI_Iallreduce(MPI_IN_PLACE, s_old, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD, req, MPIerr) + call MPI_Wait(req, MPI_STATUS_IGNORE, MPIerr) + + ! =============== + ! Iterations + ! =============== + Do iter=1, maxiter + ! ============ + ! Compute Ap + ! ============ + call exchange_nod(pp, partit) ! Update before matrix-vector multiplications + + DO row=1, myDim_nod2D + App(row)=sum(ssh_stiff%values(rptr(row):rptr(row+1)-1)*pp(cind(rptr(row):rptr(row+1)-1))) + END DO + ! ============ + ! Scalar products for alpha + ! ============ + + s_aux=sum(pp(1:myDim_nod2D)*App(1:myDim_nod2D)) + + call MPI_Iallreduce(MPI_IN_PLACE, s_aux, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD, req, MPIerr) + call MPI_Wait(req, MPI_STATUS_IGNORE, MPIerr) + al=s_old/s_aux + ! =========== + ! New X and residual r + ! =========== + DO row=1, myDim_nod2D + X(row)=X(row)+al*pp(row) + rr(row)=rr(row)-al*App(row) + END DO + ! =========== + ! New z + ! =========== + call exchange_nod(rr, partit) ! Update before matrix-vector multiplications + + DO row=1, myDim_nod2D + zz(row)= sum(pr_values(rptr(row):rptr(row+1)-1)*rr(cind(rptr(row):rptr(row+1)-1))) + END DO + + ! =========== + ! Scalar products for beta + ! =========== + sprod(1)=sum(rr(1:myDim_nod2D)*zz(1:myDim_nod2D)) + sprod(2)=sum(rr(1:myDim_nod2D)*rr(1:myDim_nod2D)) + + call MPI_Iallreduce(MPI_IN_PLACE, sprod, 2, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD, req, MPIerr) + call MPI_Wait(req, MPI_STATUS_IGNORE, MPIerr) + ! =========== + ! Exit if tolerance is achieved + ! =========== + if(sqrt(sprod(2)/nod2D)< rtol) then + !write(*,*) mype, 'exit', iter + exit + endif + be=sprod(1)/s_old + s_old=sprod(1) + ! =========== + ! New p + ! =========== + DO row=1,myDim_nod2D + pp(row)=zz(row)+be*pp(row) + end do + END DO + deallocate(App,zz,pp,rr) + + ! At the end: The result is in X, but it needs a halo exchange. + call exchange_nod(x, partit) +end subroutine ssh_solve_cg + +! =================================================================== + From 324a566e823ee84a9d4f1ae8cec47239f394b250 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Thu, 2 Dec 2021 17:30:47 +0100 Subject: [PATCH 748/909] 1. OMP for Sergey's solver 2. some minor fixes in the code --- src/MOD_DYN.F90 | 17 ++++-- src/oce_ale.F90 | 7 +-- src/oce_dyn.F90 | 3 +- src/solver.F90 | 152 ++++++++++++++++++++++++++++++------------------ 4 files changed, 113 insertions(+), 66 deletions(-) diff --git a/src/MOD_DYN.F90 b/src/MOD_DYN.F90 index 6f2ffa7ef..e91298c13 100644 --- a/src/MOD_DYN.F90 +++ b/src/MOD_DYN.F90 @@ -18,13 +18,14 @@ MODULE MOD_DYN integer :: lutype = 2 real(kind=WP) :: droptol = 1.e-8 !!! PARMS Solver - real(kind=WP) :: soltol = 1e-10 ! default for PARMS - logical :: use_parms = .TRUE. +! real(kind=WP) :: soltol = 1e-10 ! default for PARMS +! logical :: use_parms = .TRUE. !!! !!! Sergey's Solver -! real(kind=WP) :: soltol = 1e-6 ! default for PARMS -! logical :: use_parms = .FALSE. + real(kind=WP) :: soltol = 1e-5 ! default for PARMS + logical :: use_parms = .FALSE. !!! + real(kind=WP), allocatable :: rr(:), zz(:), pp(:), App(:) contains procedure WRITE_T_SOLVERINFO procedure READ_T_SOLVERINFO @@ -133,6 +134,10 @@ subroutine WRITE_T_SOLVERINFO(tsolverinfo, unit, iostat, iomsg) write(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%lutype write(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%droptol write(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%soltol + call write_bin_array(tsolverinfo%rr, unit, iostat, iomsg) + call write_bin_array(tsolverinfo%zz, unit, iostat, iomsg) + call write_bin_array(tsolverinfo%pp, unit, iostat, iomsg) + call write_bin_array(tsolverinfo%App, unit, iostat, iomsg) end subroutine WRITE_T_SOLVERINFO subroutine READ_T_SOLVERINFO(tsolverinfo, unit, iostat, iomsg) @@ -148,6 +153,10 @@ subroutine READ_T_SOLVERINFO(tsolverinfo, unit, iostat, iomsg) read(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%lutype read(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%droptol read(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%soltol + call read_bin_array(tsolverinfo%rr, unit, iostat, iomsg) + call read_bin_array(tsolverinfo%zz, unit, iostat, iomsg) + call read_bin_array(tsolverinfo%pp, unit, iostat, iomsg) + call read_bin_array(tsolverinfo%App, unit, iostat, iomsg) end subroutine READ_T_SOLVERINFO ! diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 0c50d009a..531afc6ec 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -1943,8 +1943,7 @@ subroutine vert_vel_ale(dynamics, partit, mesh) integer :: el(2), enodes(2), n, nz, ed, nzmin, nzmax, uln1, uln2, nln1, nln2 real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2, dd, dd1, dddt, cflmax ! still to be understood but if you allocate these arrays statically the results will be different: - ! real(kind=WP) :: c1(mesh%nl-1), c2(mesh%nl-1) - real(kind=WP) :: c1(50), c2(50) + real(kind=WP) :: c1(mesh%nl-1), c2(mesh%nl-1) ! --> zlevel with local zstar real(kind=WP) :: dhbar_total, dhbar_rest, distrib_dhbar_int real(kind=WP), dimension(:), allocatable :: max_dhbar2distr, cumsum_maxdhbar, distrib_dhbar @@ -2576,8 +2575,8 @@ end subroutine psolve soltol => dynamics%solverinfo%soltol if (.not. dynamics%solverinfo%use_parms) then -if (lfirst) call ssh_solve_preconditioner(partit, mesh) -call ssh_solve_cg(dynamics%d_eta, dynamics%ssh_rhs, soltol, maxiter, partit, mesh) +if (lfirst) call ssh_solve_preconditioner(dynamics%solverinfo, partit, mesh) +call ssh_solve_cg(dynamics%d_eta, dynamics%ssh_rhs, dynamics%solverinfo, partit, mesh) lfirst=.false. return end if diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index bfabff4f5..bdc75e204 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -231,8 +231,7 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) real(kind=8) :: u1, v1, len, vi integer :: nz, ed, el(2), nelem(3),k, elem, nzmin, nzmax ! still to be understood but if you allocate these arrays statically the results will be different: - ! real(kind=8) :: update_u(mesh%nl-1), update_v(mesh%nl-1) - real(kind=8) :: update_u(50), update_v(50) + real(kind=8) :: update_u(mesh%nl-1), update_v(mesh%nl-1) !___________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs diff --git a/src/solver.F90 b/src/solver.F90 index c1d8f1a0f..ada086983 100644 --- a/src/solver.F90 +++ b/src/solver.F90 @@ -1,33 +1,34 @@ module ssh_solve_preconditioner_interface interface - subroutine ssh_solve_preconditioner(partit, mesh) + subroutine ssh_solve_preconditioner(solverinfo, partit, mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(inout), target :: mesh + USE MOD_DYN + type(t_solverinfo), intent(inout), target :: solverinfo + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(inout), target :: mesh end subroutine end interface end module module ssh_solve_cg_interface interface - subroutine ssh_solve_cg(x, rhs, tol, maxiter, partit, mesh) + subroutine ssh_solve_cg(x, rhs, solverinfo, partit, mesh) use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP USE MOD_DYN - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(inout), target :: mesh - real(kind=WP), intent(inout) :: x(partit%myDim_nod2D+partit%eDim_nod2D) - real(kind=WP), intent(in) :: rhs(partit%myDim_nod2D+partit%eDim_nod2D) - real(kind=WP), intent(in) :: tol - integer, intent(in) :: maxiter + type(t_solverinfo), intent(inout), target :: solverinfo + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(inout), target :: mesh + real(kind=WP), intent(inout) :: x(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: rhs(partit%myDim_nod2D+partit%eDim_nod2D) end subroutine end interface end module !========================================================================= -subroutine ssh_solve_preconditioner(partit, mesh) +subroutine ssh_solve_preconditioner(solverinfo, partit, mesh) ! Preconditioner follows MITgcm (JGR, 102,5753-5766, 1997) ! If the row r of the ssh equation is a_r eta_r +\sum a_i\eta_i=rhs_row_r ! where summation is over all nodes neighboring node r, @@ -45,8 +46,9 @@ subroutine ssh_solve_preconditioner(partit, mesh) USE MOD_DYN USE g_comm_auto IMPLICIT NONE - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(inout), target :: mesh + type(t_solverinfo), intent(inout), target :: solverinfo + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(inout), target :: mesh integer :: nend, row, node, n, offset real(kind=WP), allocatable :: diag_values(:) real(kind=WP), pointer :: pr_values(:) @@ -83,10 +85,17 @@ subroutine ssh_solve_preconditioner(partit, mesh) END DO END DO deallocate(diag_values) + + n=myDim_nod2D+eDim_nod2D + allocate(solverinfo%rr(n), solverinfo%zz(n), solverinfo%pp(n), solverinfo%App(n)) + solverinfo%rr =0.0_WP + solverinfo%zz =0.0_WP + solverinfo%pp =0.0_WP + solverinfo%App=0.0_WP end subroutine ssh_solve_preconditioner ! ======================================================== -subroutine ssh_solve_cg(x,rhs, tol, maxiter, partit, mesh) +subroutine ssh_solve_cg(x, rhs, solverinfo, partit, mesh) ! Conjugate gradient solver ! Our ssh matrix is symmetric, because we compute divergencethe contributions as ! integrated over area of scalar control volume. @@ -99,19 +108,18 @@ subroutine ssh_solve_cg(x,rhs, tol, maxiter, partit, mesh) USE MOD_DYN USE g_comm_auto IMPLICIT NONE - type(t_partit),intent(inout), target :: partit - type(t_mesh), intent(inout), target :: mesh - real(kind=WP), intent(inout) :: x(partit%myDim_nod2D+partit%eDim_nod2D) - real(kind=WP), intent(in) :: rhs(partit%myDim_nod2D+partit%eDim_nod2D) - real(kind=WP), intent(in) :: tol - integer, intent(in) :: maxiter + type(t_solverinfo), intent(inout), target :: solverinfo + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(inout), target :: mesh + real(kind=WP), intent(inout) :: x(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: rhs(partit%myDim_nod2D+partit%eDim_nod2D) integer :: row, nini, nend, iter real(kind=WP) :: sprod(2), s_old, s_aux, al, be, rtol integer :: req - real(kind=WP), allocatable :: rr(:), zz(:), pp(:), App(:) - real(kind=WP), pointer :: pr_values(:) + real(kind=WP), pointer :: pr_values(:), rr(:), zz(:), pp(:), App(:) integer, pointer :: rptr(:), cind(:) + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" @@ -121,113 +129,145 @@ subroutine ssh_solve_cg(x,rhs, tol, maxiter, partit, mesh) cind =>mesh%ssh_stiff%colind_loc rptr =>mesh%ssh_stiff%rowptr_loc - row=myDim_nod2D+eDim_nod2D - allocate(rr(row), zz(row), pp(row), App(row)) - rr=0.0_WP - zz=0.0_WP - pp=0.0_WP - App=0.0_WP - + rr =>solverinfo%rr + zz =>solverinfo%zz + pp =>solverinfo%pp + App=>solverinfo%App + ! ============== ! Initialization. We solve AX=b, r_0=b-AX_0 ! ============== ! Define working tolerance: - ! ============== - s_old=sum(rhs(1:myDim_nod2D)*rhs(1:myDim_nod2D)) - call MPI_Iallreduce(MPI_IN_PLACE, s_old, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD, req, MPIerr) + ! ============== + s_old=0.0_WP +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(row) REDUCTION(+:s_old) +!$OMP DO + DO row=1, myDim_nod2D + s_old=s_old+rhs(row)*rhs(row) + END DO +!$OMP END DO +!$OMP END PARALLEL + + call MPI_Iallreduce(MPI_IN_PLACE, s_old, 1, MPI_DOUBLE, MPI_SUM, partit%MPI_COMM_FESOM, req, MPIerr) call MPI_Wait(req, MPI_STATUS_IGNORE, MPIerr) - rtol=tol*sqrt(s_old/real(nod2D,WP)) + rtol=solverinfo%soltol*sqrt(s_old/real(nod2D,WP)) ! ============== ! Compute r0 ! ============== +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(row) DO row=1, myDim_nod2D - !nini=ssh_stiff%rowptr(row)-ssh_stiff%rowptr(1)+1 - !nend=ssh_stiff%rowptr(row+1)-ssh_stiff%rowptr(1) - !rr(row)=rhs(row)-sum(ssh_stiff%values(nini:nend)*X(cind(nini:nend))) rr(row)=rhs(row)-sum(ssh_stiff%values(rptr(row):rptr(row+1)-1)* & X(cind(rptr(row):rptr(row+1)-1))) END DO +!$OMP END PARALLEL DO call exchange_nod(rr, partit) +!$OMP BARRIER ! ============= ! z_0=M^{-1} r_0 (M^{-1} is the precondit. matrix) ! pp is the search direction ! ============= +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(row) DO row=1, myDim_nod2D zz(row)= sum(pr_values(rptr(row):rptr(row+1)-1)*rr(cind(rptr(row):rptr(row+1)-1))) pp(row)=zz(row) END DO +!$OMP END PARALLEL DO ! =============== ! Scalar product of r*z ! =============== - s_old=sum(rr(1:myDim_nod2D)*zz(1:myDim_nod2D)) - call MPI_Iallreduce(MPI_IN_PLACE, s_old, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD, req, MPIerr) + s_old=0.0_WP +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(row) REDUCTION(+:s_old) +!$OMP DO + DO row=1, myDim_nod2D + s_old=s_old+rr(row)*zz(row) + END DO +!$OMP END DO +!$OMP END PARALLEL + call MPI_Iallreduce(MPI_IN_PLACE, s_old, 1, MPI_DOUBLE, MPI_SUM, partit%MPI_COMM_FESOM, req, MPIerr) call MPI_Wait(req, MPI_STATUS_IGNORE, MPIerr) ! =============== ! Iterations ! =============== - Do iter=1, maxiter + Do iter=1, solverinfo%maxiter ! ============ ! Compute Ap ! ============ call exchange_nod(pp, partit) ! Update before matrix-vector multiplications - +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(row) DO row=1, myDim_nod2D App(row)=sum(ssh_stiff%values(rptr(row):rptr(row+1)-1)*pp(cind(rptr(row):rptr(row+1)-1))) END DO +!$OMP END PARALLEL DO ! ============ ! Scalar products for alpha ! ============ - s_aux=sum(pp(1:myDim_nod2D)*App(1:myDim_nod2D)) - - call MPI_Iallreduce(MPI_IN_PLACE, s_aux, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD, req, MPIerr) + s_aux=0.0_WP +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(row) REDUCTION(+:s_aux) +!$OMP DO + DO row=1, myDim_nod2D + s_aux=s_aux+pp(row)*App(row) + END DO +!$OMP END DO +!$OMP END PARALLEL + call MPI_Iallreduce(MPI_IN_PLACE, s_aux, 1, MPI_DOUBLE, MPI_SUM, partit%MPI_COMM_FESOM, req, MPIerr) call MPI_Wait(req, MPI_STATUS_IGNORE, MPIerr) al=s_old/s_aux ! =========== ! New X and residual r ! =========== +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(row) DO row=1, myDim_nod2D - X(row)=X(row)+al*pp(row) + X(row) =X(row) +al* pp(row) rr(row)=rr(row)-al*App(row) END DO +!$OMP END PARALLEL DO ! =========== ! New z ! =========== call exchange_nod(rr, partit) ! Update before matrix-vector multiplications - +!$OMP BARRIER +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(row) DO row=1, myDim_nod2D zz(row)= sum(pr_values(rptr(row):rptr(row+1)-1)*rr(cind(rptr(row):rptr(row+1)-1))) END DO - +!$OMP END PARALLEL DO ! =========== ! Scalar products for beta ! =========== - sprod(1)=sum(rr(1:myDim_nod2D)*zz(1:myDim_nod2D)) - sprod(2)=sum(rr(1:myDim_nod2D)*rr(1:myDim_nod2D)) +sprod(1)=0.0_WP +sprod(2)=0.0_WP +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(row) REDUCTION(+:sprod) + DO row=1, myDim_nod2D + sprod(1)=sprod(1)+rr(row)*zz(row) + sprod(2)=sprod(2)+rr(row)*rr(row) + END DO +!$OMP END PARALLEL DO - call MPI_Iallreduce(MPI_IN_PLACE, sprod, 2, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD, req, MPIerr) + call MPI_Iallreduce(MPI_IN_PLACE, sprod, 2, MPI_DOUBLE, MPI_SUM, partit%MPI_COMM_FESOM, req, MPIerr) call MPI_Wait(req, MPI_STATUS_IGNORE, MPIerr) +!$OMP BARRIER ! =========== ! Exit if tolerance is achieved ! =========== - if(sqrt(sprod(2)/nod2D)< rtol) then - !write(*,*) mype, 'exit', iter - exit + if (sqrt(sprod(2)/nod2D)< rtol) then + exit endif be=sprod(1)/s_old s_old=sprod(1) ! =========== ! New p ! =========== +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(row) DO row=1,myDim_nod2D - pp(row)=zz(row)+be*pp(row) - end do + pp(row)=zz(row)+be*pp(row) + END DO +!$OMP END PARALLEL DO END DO - deallocate(App,zz,pp,rr) - ! At the end: The result is in X, but it needs a halo exchange. call exchange_nod(x, partit) +!$OMP BARRIER end subroutine ssh_solve_cg ! =================================================================== From 368dc1b51c5d2ef5c38a6a9945ef1422aa580dfd Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Thu, 2 Dec 2021 17:31:43 +0100 Subject: [PATCH 749/909] set PARMS to default for reproducibility purpose --- src/MOD_DYN.F90 | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/MOD_DYN.F90 b/src/MOD_DYN.F90 index e91298c13..f338ae443 100644 --- a/src/MOD_DYN.F90 +++ b/src/MOD_DYN.F90 @@ -18,12 +18,12 @@ MODULE MOD_DYN integer :: lutype = 2 real(kind=WP) :: droptol = 1.e-8 !!! PARMS Solver -! real(kind=WP) :: soltol = 1e-10 ! default for PARMS -! logical :: use_parms = .TRUE. + real(kind=WP) :: soltol = 1e-10 ! default for PARMS + logical :: use_parms = .TRUE. !!! !!! Sergey's Solver - real(kind=WP) :: soltol = 1e-5 ! default for PARMS - logical :: use_parms = .FALSE. +! real(kind=WP) :: soltol = 1e-5 ! default for PARMS +! logical :: use_parms = .FALSE. !!! real(kind=WP), allocatable :: rr(:), zz(:), pp(:), App(:) contains From a5632f362fdb633e236ee64702730d5dbb2a713c Mon Sep 17 00:00:00 2001 From: Sebastian Hinck Date: Fri, 3 Dec 2021 12:27:02 +0100 Subject: [PATCH 750/909] Use of OMP REDUCTION.. --- src/gen_support.F90 | 12 ++++-------- src/oce_ale_pressure_bv.F90 | 12 ++++-------- 2 files changed, 8 insertions(+), 16 deletions(-) diff --git a/src/gen_support.F90 b/src/gen_support.F90 index 753ea3e16..97931ad4d 100644 --- a/src/gen_support.F90 +++ b/src/gen_support.F90 @@ -316,23 +316,19 @@ subroutine integrate_nod_2D(data, int2D, partit, mesh) real(kind=WP), intent(inout) :: int2D integer :: row - real(kind=WP) :: lval_omp, lval + real(kind=WP) :: lval #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" lval=0.0_WP -!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(row, lval_omp) - lval_omp=0.0_WP -!$OMP DO +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(row) +!$OMP DO REDUCTION (+: lval) do row=1, myDim_nod2D - lval_omp=lval_omp+data(row)*areasvol(ulevels_nod2D(row),row) + lval=lval+data(row)*areasvol(ulevels_nod2D(row),row) end do !$OMP END DO -!$OMP CRITICAL -lval=lval+lval_omp -!$OMP END CRITICAL !$OMP END PARALLEL int2D=0.0_WP call MPI_AllREDUCE(lval, int2D, 1, MPI_DOUBLE_PRECISION, MPI_SUM, & diff --git a/src/oce_ale_pressure_bv.F90 b/src/oce_ale_pressure_bv.F90 index d4d482171..b547480ba 100644 --- a/src/oce_ale_pressure_bv.F90 +++ b/src/oce_ale_pressure_bv.F90 @@ -217,7 +217,7 @@ subroutine pressure_bv(tracers, partit, mesh) type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in), target :: tracers - real(kind=WP) :: dz_inv, bv, a, a_loc, rho_up, rho_dn, t, s + real(kind=WP) :: dz_inv, bv, a, rho_up, rho_dn, t, s integer :: node, nz, nl1, nzmax, nzmin real(kind=WP) :: rhopot(mesh%nl), bulk_0(mesh%nl), bulk_pz(mesh%nl), bulk_pz2(mesh%nl), rho(mesh%nl), dbsfc1(mesh%nl), db_max real(kind=WP) :: bulk_up, bulk_dn, smallvalue, buoyancy_crit, rho_surf, aux_rho, aux_rho1 @@ -236,20 +236,16 @@ subroutine pressure_bv(tracers, partit, mesh) !___________________________________________________________________________ ! Screen salinity a =0.0_WP -!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax, a_loc) - a_loc=0.0_WP -!$OMP DO +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax) +!$OMP DO REDUCTION(min: a) do node=1, myDim_nod2D+eDim_nod2D nzmin = ulevels_nod2D(node) nzmax = nlevels_nod2D(node) do nz=nzmin,nzmax-1 - a_loc=min(a_loc, salt(nz,node)) + a=min(a, salt(nz,node)) enddo enddo !$OMP END DO -!$OMP CRITICAL - a=min(a, a_loc) -!$OMP END CRITICAL !$OMP END PARALLEL !___________________________________________________________________________ From 79d86ea661f6f16240d6795531a0b9c0ae74bfc7 Mon Sep 17 00:00:00 2001 From: Sebastian Hinck Date: Fri, 3 Dec 2021 12:31:12 +0100 Subject: [PATCH 751/909] OMP REDUCTION... --- src/gen_support.F90 | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/src/gen_support.F90 b/src/gen_support.F90 index 97931ad4d..90c19bc9d 100644 --- a/src/gen_support.F90 +++ b/src/gen_support.F90 @@ -349,25 +349,21 @@ subroutine integrate_nod_3D(data, int3D, partit, mesh) real(kind=WP), intent(inout) :: int3D integer :: k, row - real(kind=WP) :: lval_omp, lval + real(kind=WP) :: lval #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" lval=0.0_WP -!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(row, k, lval_omp) - lval_omp=0.0_WP -!$OMP DO +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(row, k) +!$OMP DO REDUCTION(+: lval) do row=1, myDim_nod2D do k=ulevels_nod2D(row), nlevels_nod2D(row)-1 lval=lval+data(k, row)*areasvol(k,row)*hnode_new(k,row) ! --> TEST_cavity end do end do !$OMP END DO -!$OMP CRITICAL -lval=lval+lval_omp -!$OMP END CRITICAL !$OMP END PARALLEL int3D=0.0_WP call MPI_AllREDUCE(lval, int3D, 1, MPI_DOUBLE_PRECISION, MPI_SUM, & From 191794c38e8068de6cc907379adb98fc92540800 Mon Sep 17 00:00:00 2001 From: Sebastian Hinck Date: Fri, 3 Dec 2021 12:37:21 +0100 Subject: [PATCH 752/909] Implementation of sum in 'omp_min_max_sum2' --- src/gen_support.F90 | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/gen_support.F90 b/src/gen_support.F90 index 90c19bc9d..aba704771 100644 --- a/src/gen_support.F90 +++ b/src/gen_support.F90 @@ -578,6 +578,19 @@ FUNCTION omp_min_max_sum2(arr, pos11, pos12, pos21, pos22, what, partit, nan) end do end do !$OMP END DO +!$OMP END PARALLEL + + CASE ('sum') + if (.not. present(nan)) vmasked=huge(vmasked) !just some crazy number + val=0 +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(i, j) +!$OMP DO REDUCTION(+: val) + do i=pos11, pos12 + do j=pos21, pos22 + if (arr(i,j)/=vmasked) val=val+arr(i,j) + end do + end do +!$OMP END DO !$OMP END PARALLEL CASE DEFAULT From 534093c0392badc3ded3f782cf8f67ab19d5feb6 Mon Sep 17 00:00:00 2001 From: Sebastian Hinck Date: Fri, 3 Dec 2021 12:47:41 +0100 Subject: [PATCH 753/909] Remove unused variable --- src/oce_ale_pressure_bv.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/oce_ale_pressure_bv.F90 b/src/oce_ale_pressure_bv.F90 index b547480ba..05f951c11 100644 --- a/src/oce_ale_pressure_bv.F90 +++ b/src/oce_ale_pressure_bv.F90 @@ -264,7 +264,7 @@ subroutine pressure_bv(tracers, partit, mesh) !___________________________________________________________________________ -!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(dz_inv, bv, a, a_loc, rho_up, rho_dn, t, s, node, nz, nl1, nzmax, nzmin, & +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(dz_inv, bv, a, rho_up, rho_dn, t, s, node, nz, nl1, nzmax, nzmin, & !$OMP rhopot, bulk_0, bulk_pz, bulk_pz2, rho, dbsfc1, db_max, bulk_up, bulk_dn, & !$OMP rho_surf, aux_rho, aux_rho1, flag1, flag2) !$OMP DO From ade0627716663d38ed1fc3870f8bd1845c08ff67 Mon Sep 17 00:00:00 2001 From: Sebastian Hinck Date: Fri, 3 Dec 2021 13:39:39 +0100 Subject: [PATCH 754/909] Applied some formating to benchmarking results --- src/fesom_module.F90 | 51 +++++++++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 22 deletions(-) diff --git a/src/fesom_module.F90 b/src/fesom_module.F90 index fdeec1c53..54b19e23f 100755 --- a/src/fesom_module.F90 +++ b/src/fesom_module.F90 @@ -285,7 +285,6 @@ subroutine fesom_runloop(current_nsteps) use fesom_main_storage_module integer, intent(in) :: current_nsteps ! EO parameters - integer n !===================== @@ -426,30 +425,38 @@ subroutine fesom_finalize() call MPI_AllREDUCE(MPI_IN_PLACE, min_rtime, 14, MPI_REAL, MPI_MIN, f%MPI_COMM_FESOM, f%MPIerr) if (f%mype==0) then - write(*,*) '___MODEL RUNTIME mean, min, max per task [seconds]________________________' - write(*,*) ' runtime ocean:',mean_rtime(1), min_rtime(1), max_rtime(1) - write(*,*) ' > runtime oce. mix,pres. :',mean_rtime(2), min_rtime(2), max_rtime(2) - write(*,*) ' > runtime oce. dyn. u,v,w:',mean_rtime(3), min_rtime(3), max_rtime(3) - write(*,*) ' > runtime oce. dyn. ssh :',mean_rtime(4), min_rtime(4), max_rtime(4) - write(*,*) ' > runtime oce. solve ssh:',mean_rtime(5), min_rtime(5), max_rtime(5) - write(*,*) ' > runtime oce. GM/Redi :',mean_rtime(6), min_rtime(6), max_rtime(6) - write(*,*) ' > runtime oce. tracer :',mean_rtime(7), min_rtime(7), max_rtime(7) - write(*,*) ' runtime ice :',mean_rtime(10), min_rtime(10), max_rtime(10) - write(*,*) ' > runtime ice step :',mean_rtime(8), min_rtime(8), max_rtime(8) - write(*,*) ' runtime diag: ', mean_rtime(11), min_rtime(11), max_rtime(11) - write(*,*) ' runtime output: ', mean_rtime(12), min_rtime(12), max_rtime(12) - write(*,*) ' runtime restart:', mean_rtime(13), min_rtime(13), max_rtime(13) - write(*,*) ' runtime forcing:', mean_rtime(14), min_rtime(14), max_rtime(14) - write(*,*) ' runtime total (ice+oce):',mean_rtime(9), min_rtime(9), max_rtime(9) + 41 format (a35,a10,2a15) !Format for table heading + 42 format (a30,3f15.4) !Format for table content + + print 41, '___MODEL RUNTIME per task [seconds]','_____mean_','___________min_', '___________max_' + print 42, ' runtime ocean: ', mean_rtime(1), min_rtime(1), max_rtime(1) + print 42, ' > runtime oce. mix,pres. :', mean_rtime(2), min_rtime(2), max_rtime(2) + print 42, ' > runtime oce. dyn. u,v,w:', mean_rtime(3), min_rtime(3), max_rtime(3) + print 42, ' > runtime oce. dyn. ssh :', mean_rtime(4), min_rtime(4), max_rtime(4) + print 42, ' > runtime oce. solve ssh :', mean_rtime(5), min_rtime(5), max_rtime(5) + print 42, ' > runtime oce. GM/Redi :', mean_rtime(6), min_rtime(6), max_rtime(6) + print 42, ' > runtime oce. tracer :', mean_rtime(7), min_rtime(7), max_rtime(7) + print 42, ' runtime ice : ', mean_rtime(10), min_rtime(10), max_rtime(10) + print 42, ' > runtime ice step : ', mean_rtime(8), min_rtime(8), max_rtime(8) + print 42, ' runtime diag: ', mean_rtime(11), min_rtime(11), max_rtime(11) + print 42, ' runtime output: ', mean_rtime(12), min_rtime(12), max_rtime(12) + print 42, ' runtime restart: ', mean_rtime(13), min_rtime(13), max_rtime(13) + print 42, ' runtime forcing: ', mean_rtime(14), min_rtime(14), max_rtime(14) + print 42, ' runtime total (ice+oce): ', mean_rtime(9), min_rtime(9), max_rtime(9) + + 43 format (a33,i15) !Format Ncores + 44 format (a33,i15) !Format OMP threads + 45 format (a33,f15.4,a4) !Format runtime + write(*,*) - write(*,*) '============================================' - write(*,*) '=========== BENCHMARK RUNTIME ==============' - write(*,*) ' Number of cores : ',f%npes + write(*,*) '======================================================' + write(*,*) '================ BENCHMARK RUNTIME ===================' + print 43, ' Number of cores : ',f%npes #if defined(_OPENMP) - write(*,*) ' Max OpenMP threads : ',OMP_GET_MAX_THREADS() + print 44, ' Max OpenMP threads : ',OMP_GET_MAX_THREADS() #endif - write(*,*) ' Runtime for all timesteps : ',f%runtime_alltimesteps,' sec' - write(*,*) '============================================' + print 45, ' Runtime for all timesteps : ',f%runtime_alltimesteps,' sec' + write(*,*) '======================================================' write(*,*) end if ! call clock_finish From d4954146235701b6430ef477b798b9876d4f368d Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 6 Dec 2021 11:41:51 +0100 Subject: [PATCH 755/909] with the recent env settings reading restarts in parallel seems to work now on aleph --- src/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 32554bbe5..199c55980 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -15,7 +15,7 @@ if(ALEPH_CRAYMPICH_WORKAROUNDS) # MPICH_MAX_THREAD_SAFETY=multiple # allows highest MPI thread level (i.e. MPI_THREAD_MULTIPLE) # MPICH_CRAY_OPT_THREAD_SYNC=0 # the Cray MPICH library falls back to using the pthread_mutex-based thread-synchronization implementation # MPICH_OPT_THREAD_SYNC=0 # seems to be a duplicate variable which also appears in some documentation instead of MPICH_CRAY_OPT_THREAD_SYNC (but this one brings a huge speed gain on aleph) - add_compile_options(-DDISABLE_PARALLEL_RESTART_READ) # reading restarts is slow when doing it on parallel on aleph, switch it off for now + #add_compile_options(-DDISABLE_PARALLEL_RESTART_READ) # reading restarts is slow when doing it on parallel on aleph, switch it off for now add_compile_options(-DENABLE_ALEPH_CRAYMPICH_WORKAROUNDS) endif() #add_compile_options(-DTRANSPOSE_OUTPUT) From ed3103e78930caad8a4006ae584fb6e3c43c2976 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 6 Dec 2021 11:42:36 +0100 Subject: [PATCH 756/909] use transposed output as default on aleph, otherwise it is too slow --- src/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 199c55980..9a5185b24 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -17,8 +17,8 @@ if(ALEPH_CRAYMPICH_WORKAROUNDS) # MPICH_OPT_THREAD_SYNC=0 # seems to be a duplicate variable which also appears in some documentation instead of MPICH_CRAY_OPT_THREAD_SYNC (but this one brings a huge speed gain on aleph) #add_compile_options(-DDISABLE_PARALLEL_RESTART_READ) # reading restarts is slow when doing it on parallel on aleph, switch it off for now add_compile_options(-DENABLE_ALEPH_CRAYMPICH_WORKAROUNDS) + add_compile_options(-DTRANSPOSE_OUTPUT) endif() -#add_compile_options(-DTRANSPOSE_OUTPUT) option(DISABLE_MULTITHREADING "disable asynchronous operations" OFF) From b44e6a91ce123170b1b4044c946168f369419776 Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Mon, 6 Dec 2021 14:11:57 +0100 Subject: [PATCH 757/909] update icepack tests --- setups/test_pi_icepack/setup.yml | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/setups/test_pi_icepack/setup.yml b/setups/test_pi_icepack/setup.yml index 35da53540..3bc1c4deb 100644 --- a/setups/test_pi_icepack/setup.yml +++ b/setups/test_pi_icepack/setup.yml @@ -71,13 +71,14 @@ namelist.io: prec: 8 fcheck: - a_ice: 0.30599570824298994 - salt: 23.866195774787034 - temp: 1.717206693389919 - sst: 8.725991935766256 - u: -0.0014448974204450153 - v: 0.00018600030457097512 - aicen: 0.06119914164859799 + a_ice: 0.30599544390558286 + aicen: 0.061199088781116566 + salt: 23.866195697592563 + sst: 8.725992728181598 + temp: 1.717206734648259 + u: -0.001444895079544947 + v: 0.00018599946919795504 + From 6cf5dd75edef3c39c23ca52ac04457d2094b52dc Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Tue, 7 Dec 2021 00:27:20 +0100 Subject: [PATCH 758/909] update the rest of the tests --- setups/test_pi_floatice/setup.yml | 12 ++++++------ setups/test_pi_linfs/setup.yml | 12 ++++++------ setups/test_pi_partial/setup.yml | 12 ++++++------ setups/test_pi_visc7/setup.yml | 12 ++++++------ setups/test_pi_zstar/setup.yml | 12 ++++++------ test/run_tests.sh | 3 ++- 6 files changed, 32 insertions(+), 31 deletions(-) diff --git a/setups/test_pi_floatice/setup.yml b/setups/test_pi_floatice/setup.yml index 0a23d073f..5a0efdaf6 100644 --- a/setups/test_pi_floatice/setup.yml +++ b/setups/test_pi_floatice/setup.yml @@ -61,12 +61,12 @@ namelist.io: prec: 8 fcheck: - a_ice: 0.26880359680085886 - salt: 23.943630158896298 - temp: 1.7010247885672327 - sst: 8.509590362118958 - u: -0.005721019451264724 - v: 0.00047682952470964814 + a_ice: 0.2688036133133268 + salt: 23.943629925697905 + sst: 8.509603317707892 + temp: 1.7010248191306683 + u: -0.005721009166639396 + v: 0.00047684416150415605 diff --git a/setups/test_pi_linfs/setup.yml b/setups/test_pi_linfs/setup.yml index 88650a51f..aaa6d0999 100644 --- a/setups/test_pi_linfs/setup.yml +++ b/setups/test_pi_linfs/setup.yml @@ -61,12 +61,12 @@ namelist.io: prec: 8 fcheck: - a_ice: 0.2685778327298968 - salt: 23.944511945072648 - temp: 1.7011044195264193 - sst: 8.51781304844356 - u: -0.0013090250570688075 - v: 0.00013164013131872999 + a_ice: 0.268577839695243 + salt: 23.94451194254492 + sst: 8.517818738095748 + temp: 1.701104466912738 + u: -0.001308996954725246 + v: 0.0001316762592120162 diff --git a/setups/test_pi_partial/setup.yml b/setups/test_pi_partial/setup.yml index b3e74290d..33ffd2d71 100644 --- a/setups/test_pi_partial/setup.yml +++ b/setups/test_pi_partial/setup.yml @@ -61,12 +61,12 @@ namelist.io: prec: 8 fcheck: - a_ice: 0.2691270793874835 - salt: 23.944032641762846 - temp: 1.7014629411995628 - sst: 8.531605186060785 - u: -0.0014154276919262456 - v: 0.00013994193864008374 + a_ice: 0.26912709492615366 + salt: 23.944033079753975 + sst: 8.5316133160601 + temp: 1.701462830461885 + u: -0.0014154222939901564 + v: 0.00013995648270483183 diff --git a/setups/test_pi_visc7/setup.yml b/setups/test_pi_visc7/setup.yml index afcee51e4..bed9e3451 100644 --- a/setups/test_pi_visc7/setup.yml +++ b/setups/test_pi_visc7/setup.yml @@ -62,12 +62,12 @@ namelist.io: prec: 8 fcheck: - a_ice: 0.2691276109603212 - salt: 23.944024690144552 - temp: 1.7017686482560304 - sst: 8.531529100200583 - u: -0.0014071010764418097 - v: 0.00014173175700137738 + a_ice: 0.26912762639046617 + salt: 23.9440246569731 + sst: 8.53153464292271 + temp: 1.701768688625486 + u: -0.0014070898494307966 + v: 0.00014174869736213242 diff --git a/setups/test_pi_zstar/setup.yml b/setups/test_pi_zstar/setup.yml index b69816202..3424b4b5a 100644 --- a/setups/test_pi_zstar/setup.yml +++ b/setups/test_pi_zstar/setup.yml @@ -61,12 +61,12 @@ namelist.io: prec: 8 fcheck: - a_ice: 0.2691276443855294 - salt: 23.944024712806094 - temp: 1.701768707848739 - sst: 8.531522995932146 - u: -0.001407225233294229 - v: 0.00014182969591235959 + a_ice: 0.26912765975496816 + salt: 23.944024679315966 + sst: 8.531528641557886 + temp: 1.7017687500626169 + u: -0.0014072137916283753 + v: 0.0001418460244606028 diff --git a/test/run_tests.sh b/test/run_tests.sh index 347bb5002..644ca21b8 100755 --- a/test/run_tests.sh +++ b/test/run_tests.sh @@ -3,7 +3,7 @@ set -e cd ../ machine="docker" -tests="test_pi test_souf test_pi_linfs test_pi_zstar test_pi_partial test_pi_floatice test_pi_visc7 test_pi_zstar" +tests="test_pi test_souf test_pi_linfs test_pi_zstar test_pi_partial test_pi_floatice test_pi_visc7" ./configure.sh ubuntu @@ -15,6 +15,7 @@ echo $test cd work_pi chmod +x job_docker_new ./job_docker_new + echo "This was ${test}" fcheck . cd ../ From 4f20f514643caf7c63e435a28a06d793bf8d2dad Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Tue, 7 Dec 2021 00:28:53 +0100 Subject: [PATCH 759/909] hopefully tests when the model did not finished correctly will now show as faild in CI --- work/job_ubuntu | 2 ++ 1 file changed, 2 insertions(+) diff --git a/work/job_ubuntu b/work/job_ubuntu index 5930bf2c6..b1120219e 100755 --- a/work/job_ubuntu +++ b/work/job_ubuntu @@ -1,5 +1,7 @@ #!/bin/bash +set -e + ulimit -s unlimited export OMP_NUM_THREADS=1 From e5d33e3d8318f15406f988402a7237b39d495845 Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Tue, 7 Dec 2021 00:47:40 +0100 Subject: [PATCH 760/909] fix ln: failed to create symbolic link error for repeated runs --- work/job_ubuntu | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/work/job_ubuntu b/work/job_ubuntu index b1120219e..a5a3466e4 100755 --- a/work/job_ubuntu +++ b/work/job_ubuntu @@ -6,12 +6,12 @@ ulimit -s unlimited export OMP_NUM_THREADS=1 -ln -s ../bin/fesom.x . # cp -n ../bin/fesom.x +ln -sf ../bin/fesom.x . # cp -n ../bin/fesom.x cp -n ../config/namelist.config . cp -n ../config/namelist.forcing . cp -n ../config/namelist.oce . cp -n ../config/namelist.ice . -cp -n ../config/namelist.io . +cp -n ../config/namelist.io . date time mpirun --allow-run-as-root --mca btl_vader_single_copy_mechanism none -n 2 fesom.x From d59baf40fcc9dbba9d99e93de23daf1a041d5e23 Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Tue, 7 Dec 2021 00:50:13 +0100 Subject: [PATCH 761/909] add check for restart, that somehow disapeared from main tests --- .github/workflows/fesom2.1.yml | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/.github/workflows/fesom2.1.yml b/.github/workflows/fesom2.1.yml index b95d274f5..069a5a669 100644 --- a/.github/workflows/fesom2.1.yml +++ b/.github/workflows/fesom2.1.yml @@ -26,6 +26,7 @@ jobs: - name: Compile model (library) run: | bash ./test/ifs_interface/configure_lib.sh -l + - name: Library exists run: | bash ./test/ifs_interface/check_exist.sh @@ -33,6 +34,7 @@ jobs: - name: Create global test run run: | mkrun pi test_pi -m docker + - name: FESOM2 global test run run: | cd work_pi @@ -42,15 +44,22 @@ jobs: run: | cd work_pi fcheck . - + + - name: Check restarts + run: | + cd work_pi + ./job_docker_new + - name: Create channel test run run: | mkrun souf test_souf -m docker + - name: FESOM2 channel test run run: | cd work_souf chmod +x job_docker_new ./job_docker_new + - name: Check channel results run: | cd work_souf @@ -60,6 +69,7 @@ jobs: run: | cd mesh_part bash -l configure.sh ubuntu + - name: Run partitioner run: | cd work_pi From 06b255268c05a5567834290cef666992f864f06f Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Tue, 7 Dec 2021 00:51:21 +0100 Subject: [PATCH 762/909] change back compiler options for Intel compiler --- src/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 29b809b2b..4d9c35422 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -91,8 +91,8 @@ if(${VERBOSE}) endif() # CMAKE_Fortran_COMPILER_ID will also work if a wrapper is being used (e.g. mpif90 wraps ifort -> compiler id is Intel) if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Intel ) -# target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -init=zero -no-wrap-margin) - target_compile_options(${PROJECT_NAME} PRIVATE -qopenmp -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -g -traceback -check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) + target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -init=zero -no-wrap-margin) +# target_compile_options(${PROJECT_NAME} PRIVATE -qopenmp -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -g -traceback -check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU ) # target_compile_options(${PROJECT_NAME} PRIVATE -O3 -finit-local-zero -finline-functions -fimplicit-none -fdefault-real-8 -ffree-line-length-none) target_compile_options(${PROJECT_NAME} PRIVATE -O2 -g -ffloat-store -finit-local-zero -finline-functions -fimplicit-none -fdefault-real-8 -ffree-line-length-none) From be7885182451f8793fb6a4738d0beeab58a2051f Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Tue, 7 Dec 2021 13:16:47 +0100 Subject: [PATCH 763/909] we still need some time to integrate icepack into refactoring branch, so turning off the tests for now --- .github/{workflows => inactive_workflows}/fesom2_icepack.yml | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .github/{workflows => inactive_workflows}/fesom2_icepack.yml (100%) diff --git a/.github/workflows/fesom2_icepack.yml b/.github/inactive_workflows/fesom2_icepack.yml similarity index 100% rename from .github/workflows/fesom2_icepack.yml rename to .github/inactive_workflows/fesom2_icepack.yml From bbeec8d9c03d6e8f73000eba092e8bb8dbe6a81e Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 7 Dec 2021 17:49:58 +0100 Subject: [PATCH 764/909] remove explicit initialization of io_gather lists --- src/io_fesom_file.F90 | 2 -- src/io_gather.F90 | 21 ++++++--------------- src/io_meandata.F90 | 2 -- 3 files changed, 6 insertions(+), 19 deletions(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index 64b46e78f..ce6944920 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -116,8 +116,6 @@ subroutine init(this, mesh_nod2d, mesh_elem2d, mesh_nl) ! todo: would like to ca integer err integer provided_mpi_thread_support_level - call init_io_gather() - ! get hold of our mesh data for later use (assume the mesh instance will not change) m_nod2d = mesh_nod2d m_elem2d = mesh_elem2d diff --git a/src/io_gather.F90 b/src/io_gather.F90 index 5aa8f68fb..f54484130 100644 --- a/src/io_gather.F90 +++ b/src/io_gather.F90 @@ -1,6 +1,6 @@ module io_gather_module implicit none - public init_io_gather, gather_nod2D, gather_real4_nod2D, gather_elem2D, gather_real4_elem2D + public gather_nod2D, gather_real4_nod2D, gather_elem2D, gather_real4_elem2D private logical, save :: nod2D_lists_initialized = .false. @@ -9,19 +9,10 @@ module io_gather_module logical, save :: elem2D_lists_initialized = .false. integer, save :: rank0Dim_elem2D - integer, save, allocatable, dimension(:) :: rank0List_elem2D - + integer, save, allocatable, dimension(:) :: rank0List_elem2D contains - subroutine init_io_gather() - integer err - - if(.not. nod2D_lists_initialized) call init_nod2D_lists() - if(.not. elem2D_lists_initialized) call init_elem2D_lists() - end subroutine - - subroutine init_nod2D_lists() use g_PARSUP implicit none @@ -101,7 +92,7 @@ subroutine gather_nod2D(arr2D, arr2D_global, root_rank, tag, io_comm) integer :: request_index integer :: mpi_precision = MPI_DOUBLE_PRECISION - if(.not. nod2D_lists_initialized) stop "io_gather_module has not been initialized" + if(.not. nod2D_lists_initialized) call init_nod2D_lists() include "io_gather_nod.inc" end subroutine @@ -126,7 +117,7 @@ subroutine gather_real4_nod2D(arr2D, arr2D_global, root_rank, tag, io_comm) integer :: request_index integer :: mpi_precision = MPI_REAL - if(.not. nod2D_lists_initialized) stop "io_gather_module has not been initialized" + if(.not. nod2D_lists_initialized) call init_nod2D_lists() include "io_gather_nod.inc" end subroutine @@ -151,7 +142,7 @@ subroutine gather_elem2D(arr2D, arr2D_global, root_rank, tag, io_comm) integer :: request_index integer :: mpi_precision = MPI_DOUBLE_PRECISION - if(.not. elem2D_lists_initialized) stop "io_gather_module has not been initialized" + if(.not. elem2D_lists_initialized) call init_elem2D_lists() include "io_gather_elem.inc" end subroutine @@ -176,7 +167,7 @@ subroutine gather_real4_elem2D(arr2D, arr2D_global, root_rank, tag, io_comm) integer :: request_index integer :: mpi_precision = MPI_REAL - if(.not. elem2D_lists_initialized) stop "io_gather_module has not been initialized" + if(.not. elem2D_lists_initialized) call init_elem2D_lists() include "io_gather_elem.inc" end subroutine diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 2ec904bbd..183c9e5de 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -829,11 +829,9 @@ subroutine output(istep, mesh) ctime=timeold+(dayold-1.)*86400 if (lfirst) then call ini_mean_io(mesh) - call init_io_gather() #if defined (__icepack) call init_io_icepack(mesh) #endif - call init_io_gather() end if call update_means From 8d924aea532740f7ecc7e7c90b3bb4860ecccaa7 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Tue, 7 Dec 2021 18:23:05 +0100 Subject: [PATCH 765/909] OMP for the ice part has been implemented. Just some small loos are left for tomorrow :) --- src/ice_EVP.F90 | 103 ++++++++++++++++------ src/ice_fct.F90 | 190 +++++++++++++++++++++++++++++++++-------- src/ice_setup_step.F90 | 4 + 3 files changed, 234 insertions(+), 63 deletions(-) diff --git a/src/ice_EVP.F90 b/src/ice_EVP.F90 index 1784c9eda..edef74b1f 100755 --- a/src/ice_EVP.F90 +++ b/src/ice_EVP.F90 @@ -61,11 +61,9 @@ subroutine stress_tensor(ice_strength, ice, partit, mesh) type(t_mesh) , intent(in) , target :: mesh !___________________________________________________________________________ real(kind=WP), intent(in) :: ice_strength(partit%mydim_elem2D) - real(kind=WP) :: eta, xi, delta, aa - integer :: el, elnodes(3) - real(kind=WP) :: asum, msum, vale, dx(3), dy(3) - real(kind=WP) :: det1, det2, r1, r2, r3, si1, si2, dte - real(kind=WP) :: zeta, delta_inv, d1, d2 + integer :: el + real(kind=WP) :: det1, det2, dte, vale, r1, r2, r3, si1, si2 + real(kind=WP) :: zeta, delta, delta_inv, d1, d2 !___________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: u_ice, v_ice @@ -90,6 +88,7 @@ subroutine stress_tensor(ice_strength, ice, partit, mesh) det1 = 1.0_WP/(1.0_WP + 0.5_WP*ice%Tevp_inv*dte) det2 = 1.0_WP/(1.0_WP + 0.5_WP*ice%Tevp_inv*dte) !*ellipse**2 +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(el, r1, r2, r3, si1, si2, zeta, delta, delta_inv, d1, d2) do el=1,myDim_elem2D !_______________________________________________________________________ ! if element contains cavity node skip it @@ -161,6 +160,7 @@ subroutine stress_tensor(ice_strength, ice, partit, mesh) #endif endif end do +!$OMP END PARALLEL DO end subroutine stress_tensor ! ! @@ -201,11 +201,14 @@ subroutine stress2rhs(inv_areamass, ice_strength, ice, partit, mesh) !___________________________________________________________________________ val3=1/3.0_WP +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, el, k) +!$OMP DO DO n=1, myDim_nod2D U_rhs_ice(n)=0.0_WP V_rhs_ice(n)=0.0_WP END DO - +!$OMP END DO +!$OMP DO do el=1,myDim_elem2D ! ===== Skip if ice is absent ! if (any(m_ice(elnodes)<= 0.) .or. any(a_ice(elnodes) <=0.)) CYCLE @@ -215,8 +218,10 @@ subroutine stress2rhs(inv_areamass, ice_strength, ice, partit, mesh) !_______________________________________________________________________ if (ice_strength(el) > 0._WP) then - !$IVDEP DO k=1,3 +#if defined(_OPENMP) + call omp_set_lock (partit%plock(elem2D_nodes(k,el))) +#endif U_rhs_ice(elem2D_nodes(k,el)) = U_rhs_ice(elem2D_nodes(k,el)) & - elem_area(el) * & (sigma11(el)*gradient_sca(k,el) + sigma12(el)*gradient_sca(k+3,el) & @@ -226,10 +231,14 @@ subroutine stress2rhs(inv_areamass, ice_strength, ice, partit, mesh) - elem_area(el) * & (sigma12(el)*gradient_sca(k,el) + sigma22(el)*gradient_sca(k+3,el) & -sigma11(el)*val3*metric_factor(el)) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(elem2D_nodes(k,el))) +#endif END DO endif end do - +!$OMP END DO +!$OMP DO DO n=1, myDim_nod2D !_______________________________________________________________________ ! if cavity node skip it @@ -243,7 +252,9 @@ subroutine stress2rhs(inv_areamass, ice_strength, ice, partit, mesh) U_rhs_ice(n) = 0._WP V_rhs_ice(n) = 0._WP endif - END DO + END DO +!$OMP END DO +!$OMP END PARALLEL end subroutine stress2rhs ! ! @@ -279,13 +290,12 @@ subroutine EVPdynamics(ice, partit, mesh) real(kind=WP) :: ice_strength(partit%myDim_elem2D), elevation_elem(3), p_ice(3) integer :: use_pice - real(kind=WP) :: eta, xi, delta + real(kind=WP) :: eta, delta integer :: k real(kind=WP) :: vale, dx(3), dy(3), val3 real(kind=WP) :: det1, det2, r1, r2, r3, si1, si2, dte real(kind=WP) :: zeta, delta_inv, d1, d2 INTEGER :: elem - REAL(kind=WP) :: mass, uc, vc, deltaX1, deltaX2, deltaY1, deltaY2 !_______________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: u_ice, v_ice @@ -346,10 +356,16 @@ subroutine EVPdynamics(ice, partit, mesh) !___________________________________________________________________________ ! Precompute values that are never changed during the iteration - inv_areamass =0.0_WP - inv_mass =0.0_WP - rhs_a =0.0_WP - rhs_m =0.0_WP +!$OMP PARALLEL DO + do n=1, myDim_nod2D+eDim_nod2D + inv_areamass(n) =0.0_WP + inv_mass(n) =0.0_WP + rhs_a(n) =0.0_WP + rhs_m(n) =0.0_WP + end do +!$OMP END PARALLEL DO + +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n) do n=1,myDim_nod2D !_______________________________________________________________________ ! if cavity node skip it @@ -373,13 +389,15 @@ subroutine EVPdynamics(ice, partit, mesh) rhs_a(n)=0.0_WP ! these are used as temporal storage here rhs_m(n)=0.0_WP ! for the contribution due to ssh enddo - +!$OMP END PARALLEL DO + !___________________________________________________________________________ use_pice=0 if (use_floatice .and. .not. trim(which_ale)=='linfs') use_pice=1 if ( .not. trim(which_ALE)=='linfs') then ! for full free surface include pressure from ice mass ice_strength=0.0_WP +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(el, elnodes, msum, asum, aa, p_ice, elevation_elem, elevation_dx, elevation_dy) do el = 1,myDim_elem2D elnodes = elem2D_nodes(:,el) @@ -431,10 +449,12 @@ subroutine EVPdynamics(ice, partit, mesh) rhs_m(elnodes) = rhs_m(elnodes)-aa*elevation_dy end if enddo +!$OMP END PARALLEL DO else ! for linear free surface - ice_strength=0.0_WP +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(el, elnodes, msum, asum, aa, elevation_elem, elevation_dx, elevation_dy) do el = 1,myDim_elem2D + ice_strength(el)=0.0_WP elnodes = elem2D_nodes(:,el) !___________________________________________________________________ ! if element has any cavity node skip it @@ -468,15 +488,16 @@ subroutine EVPdynamics(ice, partit, mesh) rhs_m(elnodes) = rhs_m(elnodes)-aa*elevation_dy end if enddo +!$OMP END PARALLEL DO endif ! --> if ( .not. trim(which_ALE)=='linfs') then - +!$OMP PARALLEL DO !___________________________________________________________________________ do n=1,myDim_nod2D if (ulevels_nod2d(n)>1) cycle rhs_a(n) = rhs_a(n)/area(1,n) rhs_m(n) = rhs_m(n)/area(1,n) enddo - +!$OMP END PARALLEL DO !___________________________________________________________________________ ! End of Precomputing --> And the ice stepping starts #if defined (__icepack) @@ -484,12 +505,23 @@ subroutine EVPdynamics(ice, partit, mesh) rdg_shear_elem(:) = 0.0_WP #endif do shortstep=1, ice%evp_rheol_steps +!write(*,*) partit%mype, shortstep, 'CP1' !_______________________________________________________________________ call stress_tensor(ice_strength, ice, partit, mesh) +!call MPI_Barrier(partit%MPI_COMM_FESOM, partit%MPIerr) +!write(*,*) partit%mype, shortstep, 'CP2' call stress2rhs(inv_areamass, ice_strength, ice, partit, mesh) +!call MPI_Barrier(partit%MPI_COMM_FESOM, partit%MPIerr) +!write(*,*) partit%mype, shortstep, 'CP3' !_______________________________________________________________________ - U_ice_old = U_ice !PS - V_ice_old = V_ice !PS +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, ed, umod, drag, rhsu, rhsv, r_a, r_b, det) +!$OMP DO + do n=1,myDim_nod2D+eDim_nod2D + U_ice_old(n) = U_ice(n) !PS + V_ice_old(n) = V_ice(n) !PS + end do +!$OMP END DO +!$OMP DO do n=1,myDim_nod2D !___________________________________________________________________ ! if cavity node skip it @@ -515,9 +547,10 @@ subroutine EVPdynamics(ice, partit, mesh) V_ice(n) = 0.0_WP end if end do - +!$OMP END DO !_______________________________________________________________________ - ! apply sea ice velocity boundary condition + ! apply sea ice velocity boundary condition +!$OMP DO DO ed=1,myDim_edge2D !___________________________________________________________________ ! apply coastal sea ice velocity boundary conditions @@ -530,13 +563,29 @@ subroutine EVPdynamics(ice, partit, mesh) ! apply sea ice velocity boundary conditions at cavity-ocean edge if (use_cavity) then if ( (ulevels(edge_tri(1,ed))>1) .or. & - ( edge_tri(2,ed)>0 .and. ulevels(edge_tri(2,ed))>1) ) then - U_ice(edges(1:2,ed))=0.0_WP - V_ice(edges(1:2,ed))=0.0_WP + ( edge_tri(2,ed)>0 .and. ulevels(edge_tri(2,ed))>1) ) then +#if defined(_OPENMP) + call omp_set_lock (partit%plock(edges(1,ed))) +#endif + U_ice(edges(1,ed))=0.0_WP + V_ice(edges(1,ed))=0.0_WP +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(edges(1,ed))) + call omp_set_lock (partit%plock(edges(2,ed))) +#endif + U_ice(edges(2,ed))=0.0_WP + V_ice(edges(2,ed))=0.0_WP +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(edges(2,ed))) +#endif end if end if end do +!$OMP END DO +!$OMP END PARALLEL +!write(*,*) partit%mype, shortstep, 'CP4' !_______________________________________________________________________ call exchange_nod(U_ice,V_ice,partit) +!$OMP BARRIER END DO !--> do shortstep=1, ice%evp_rheol_steps -end subroutine EVPdynamics \ No newline at end of file +end subroutine EVPdynamics diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index 4a84a4298..11290a83c 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -128,6 +128,8 @@ subroutine ice_TG_rhs(ice, partit, mesh) #endif !___________________________________________________________________________ ! Taylor-Galerkin (Lax-Wendroff) rhs +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, q, row, elem, elnodes, diff, entries, um, vm, vol, dx, dy) +!$OMP DO DO row=1, myDim_nod2D rhs_m(row)=0._WP rhs_a(row)=0._WP @@ -136,8 +138,9 @@ subroutine ice_TG_rhs(ice, partit, mesh) rhs_temp(row)=0._WP #endif /* (__oifs) */ END DO - +!$OMP END DO ! Velocities at nodes +!$OMP DO do elem=1,myDim_elem2D !assembling rhs over elements elnodes=elem2D_nodes(:,elem) !_______________________________________________________________________ @@ -174,6 +177,8 @@ subroutine ice_TG_rhs(ice, partit, mesh) #endif /* (__oifs) */ END DO end do +!$OMP END DO +!$OMP END PARALLEL end subroutine ice_TG_rhs ! ! @@ -260,7 +265,8 @@ subroutine ice_solve_low_order(ice, partit, mesh) #endif !___________________________________________________________________________ gamma=ice%ice_gamma_fct ! Added diffusivity parameter - ! Adjust it to ensure posivity of solution + ! Adjust it to ensure posivity of solution +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(row, clo, clo2, cn, location) do row=1,myDim_nod2D !_______________________________________________________________________ ! if there is cavity no ice fxt low order @@ -286,12 +292,13 @@ subroutine ice_solve_low_order(ice, partit, mesh) (1.0_WP-gamma)*ice_temp(row) #endif /* (__oifs) */ end do - +!$OMP END PARALLEL DO ! Low-order solution must be known to neighbours call exchange_nod(m_icel,a_icel,m_snowl, partit) #if defined (__oifs) || defined (__ifsinterface) call exchange_nod(m_templ, partit) #endif /* (__oifs) */ +!$OMP BARRIER end subroutine ice_solve_low_order ! ! @@ -309,7 +316,7 @@ subroutine ice_solve_high_order(ice, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh !___________________________________________________________________________ - integer :: n,i,clo,clo2,cn,location(100),row + integer :: n,clo,clo2,cn,location(100),row real(kind=WP) :: rhs_new integer :: num_iter_solve=3 !___________________________________________________________________________ @@ -344,6 +351,7 @@ subroutine ice_solve_high_order(ice, partit, mesh) ! Does Taylor-Galerkin solution ! !the first approximation +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(row) do row=1,myDim_nod2D ! if cavity node skip it if (ulevels_nod2d(row)>1) cycle @@ -355,14 +363,17 @@ subroutine ice_solve_high_order(ice, partit, mesh) dm_temp(row)=rhs_temp(row)/area(1,row) #endif /* (__oifs) */ end do - +!$OMP END PARALLEL DO call exchange_nod(dm_ice, da_ice, dm_snow, partit) #if defined (__oifs) || defined (__ifsinterface) call exchange_nod(dm_temp, partit) #endif /* (__oifs) */ +!$OMP BARRIER !___________________________________________________________________________ - !iterate + !iterate do n=1,num_iter_solve-1 +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, clo, clo2, cn, location, row, rhs_new) +!$OMP DO do row=1,myDim_nod2D ! if cavity node skip it if (ulevels_nod2d(row)>1) cycle @@ -383,7 +394,9 @@ subroutine ice_solve_high_order(ice, partit, mesh) m_templ(row)= dm_temp(row)+rhs_new/area(1,row) #endif /* (__oifs) */ end do +!$OMP END DO !_______________________________________________________________________ +!$OMP DO do row=1,myDim_nod2D ! if cavity node skip it if (ulevels_nod2d(row)>1) cycle @@ -394,12 +407,14 @@ subroutine ice_solve_high_order(ice, partit, mesh) dm_temp(row)=m_templ(row) #endif /* (__oifs) */ end do - +!$OMP END DO +!$OMP END PARALLEL !_______________________________________________________________________ call exchange_nod(dm_ice, da_ice, dm_snow, partit) #if defined (__oifs) || defined (__ifsinterface) call exchange_nod(dm_temp, partit) #endif /* (__oifs) */ +!$OMP BARRIER end do end subroutine ice_solve_high_order ! @@ -423,7 +438,7 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) type(t_mesh) , intent(in) , target :: mesh !___________________________________________________________________________ integer :: tr_array_id - integer :: icoef(3,3),n,q, elem,elnodes(3),row + integer :: icoef(3,3), n, q, elem, elnodes(3), row real(kind=WP) :: vol, flux, ae, gamma !___________________________________________________________________________ ! pointer on necessary derived types @@ -477,7 +492,9 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) ! Cycle over rows row=elnodes(n) icoef(n,n)=-2 end do - + +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, q, elem, elnodes, row, vol, flux, ae) +!$OMP DO do elem=1, myDim_elem2D !_______________________________________________________________________ elnodes=elem2D_nodes(:,elem) @@ -518,7 +535,7 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) end if #endif /* (__oifs) */ end do - +!$OMP END DO !___________________________________________________________________________ ! Screening the low-order solution ! TO BE ADDED IF FOUND NECESSARY @@ -529,6 +546,7 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) !___________________________________________________________________________ ! Cluster min/max if (tr_array_id==1) then +!$OMP DO do row=1, myDim_nod2D if (ulevels_nod2d(row)>1) cycle n=nn_num(row) @@ -538,9 +556,11 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) tmax(row)=tmax(row)-m_icel(row) tmin(row)=tmin(row)-m_icel(row) end do +!$OMP END DO end if if (tr_array_id==2) then +!$OMP DO do row=1, myDim_nod2D if (ulevels_nod2d(row)>1) cycle n=nn_num(row) @@ -550,9 +570,11 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) tmax(row)=tmax(row)-a_icel(row) tmin(row)=tmin(row)-a_icel(row) end do +!$OMP END DO end if if (tr_array_id==3) then +!$OMP DO do row=1, myDim_nod2D if (ulevels_nod2d(row)>1) cycle n=nn_num(row) @@ -562,10 +584,12 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) tmax(row)=tmax(row)-m_snowl(row) tmin(row)=tmin(row)-m_snowl(row) end do +!$OMP END DO end if #if defined (__oifs) || defined (__ifsinterface) if (tr_array_id==4) then +!$OMP DO do row=1, myDim_nod2D if (ulevels_nod2d(row)>1) cycle n=nn_num(row) @@ -575,13 +599,19 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) tmax(row)=tmax(row)-m_templ(row) tmin(row)=tmin(row)-m_templ(row) end do +!$OMP END DO end if #endif /* (__oifs) */ !___________________________________________________________________________ ! Sums of positive/negative fluxes to node row - icepplus=0._WP - icepminus=0._WP +!$OMP DO + do n=1, myDim_nod2D+eDim_nod2D + icepplus (n)=0._WP + icepminus(n)=0._WP + end do +!$OMP END DO +!$OMP DO do elem=1, myDim_elem2D ! if cavity cycle over if(ulevels(elem)>1) cycle !LK89140 @@ -591,16 +621,23 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) do q=1,3 n=elnodes(q) flux=icefluxes(elem,q) +#if defined(_OPENMP) + call omp_set_lock (partit%plock(n)) +#endif if (flux>0) then icepplus(n)=icepplus(n)+flux else - icepminus(n)=icepminus(n)+flux + icepminus(n)=icepminus(n)+flux end if +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(n)) +#endif end do end do - +!$OMP END DO !___________________________________________________________________________ ! The least upper bound for the correction factors +!$OMP DO do n=1,myDim_nod2D ! if cavity cycle over if(ulevels_nod2D(n)>1) cycle !LK89140 @@ -619,11 +656,15 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) icepminus(n)=0._WP end if end do +!$OMP END DO ! pminus and pplus are to be known to neighbouting PE +!$OMP MASTER call exchange_nod(icepminus, icepplus, partit) - +!$OMP END MASTER +!$OMP BARRIER !___________________________________________________________________________ ! Limiting +!$OMP DO do elem=1, myDim_elem2D ! if cavity cycle over if(ulevels(elem)>1) cycle !LK89140 @@ -639,14 +680,17 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) end do icefluxes(elem,:)=ae*icefluxes(elem,:) end do - +!$OMP END DO !___________________________________________________________________________ ! Update the solution if(tr_array_id==1) then +!$OMP DO do n=1,myDim_nod2D if(ulevels_nod2D(n)>1) cycle !LK89140 m_ice(n)=m_icel(n) - end do + end do +!$OMP END DO +!$OMP DO do elem=1, myDim_elem2D ! if cavity cycle over if(ulevels(elem)>1) cycle !LK89140 @@ -654,33 +698,53 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) elnodes=elem2D_nodes(:,elem) do q=1,3 n=elnodes(q) +#if defined(_OPENMP) + call omp_set_lock (partit%plock(n)) +#endif m_ice(n)=m_ice(n)+icefluxes(elem,q) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(n)) +#endif end do - end do + end do +!$OMP END DO end if if(tr_array_id==2) then +!$OMP DO do n=1,myDim_nod2D if(ulevels_nod2D(n)>1) cycle !LK89140 a_ice(n)=a_icel(n) - end do + end do +!$OMP END DO +!$OMP DO do elem=1, myDim_elem2D ! if cavity cycle over if(ulevels(elem)>1) cycle !LK89140 elnodes=elem2D_nodes(:,elem) do q=1,3 - n=elnodes(q) + n=elnodes(q) +#if defined(_OPENMP) + call omp_set_lock (partit%plock(n)) +#endif a_ice(n)=a_ice(n)+icefluxes(elem,q) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(n)) +#endif end do - end do + end do +!$OMP END DO end if if(tr_array_id==3) then +!$OMP DO do n=1,myDim_nod2D if(ulevels_nod2D(n)>1) cycle !LK89140 m_snow(n)=m_snowl(n) - end do + end do +!$OMP END DO +!$OMP DO do elem=1, myDim_elem2D ! if cavity cycle over if(ulevels(elem)>1) cycle !LK89140 @@ -688,17 +752,27 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) elnodes=elem2D_nodes(:,elem) do q=1,3 n=elnodes(q) +#if defined(_OPENMP) + call omp_set_lock (partit%plock(n)) +#endif m_snow(n)=m_snow(n)+icefluxes(elem,q) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(n)) +#endif end do - end do + end do +!$OMP END DO end if #if defined (__oifs) || defined (__ifsinterface) if(tr_array_id==4) then +!$OMP DO do n=1,myDim_nod2D if(ulevels_nod2D(n)>1) cycle !LK89140 ice_temp(n)=m_templ(n) end do +!$OMP END DO +!$OMP DO do elem=1, myDim_elem2D ! if cavity cycle over if(ulevels(elem)>1) cycle !LK89140 @@ -706,16 +780,24 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) elnodes=elem2D_nodes(:,elem) do q=1,3 n=elnodes(q) +#if defined(_OPENMP) + call omp_set_lock (partit%plock(n)) +#endif ice_temp(n)=ice_temp(n)+icefluxes(elem,q) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(n)) +#endif end do end do +!$OMP END DO end if #endif /* (__oifs) */ || defined (__ifsinterface) - +!$OMP END PARALLEL call exchange_nod(m_ice, a_ice, m_snow, partit) #if defined (__oifs) || defined (__ifsinterface) call exchange_nod(ice_temp, partit) #endif /* (__oifs) */ +!$OMP BARRIER end subroutine ice_fem_fct ! ! @@ -731,11 +813,11 @@ SUBROUTINE ice_mass_matrix_fill(ice, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh !___________________________________________________________________________ - integer :: n, n1, n2, row + integer :: n, row integer :: elem, elnodes(3), q, offset, col, ipos integer, allocatable :: col_pos(:) real(kind=WP) :: aa - integer :: flag=0,iflag=0 + integer :: flag=0, iflag=0 !___________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: mass_matrix @@ -747,7 +829,8 @@ SUBROUTINE ice_mass_matrix_fill(ice, partit, mesh) ! ! a) allocate(col_pos(myDim_nod2D+eDim_nod2D)) - +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, row, elem, elnodes, q, offset, col, ipos, aa) +!$OMP DO DO elem=1,myDim_elem2D elnodes=elem2D_nodes(:,elem) @@ -775,8 +858,9 @@ SUBROUTINE ice_mass_matrix_fill(ice, partit, mesh) END DO end do END DO - +!$OMP END DO ! TEST: area==sum of row entries in mass_matrix: +!$OMP DO DO q=1,myDim_nod2D ! if cavity cycle over if(ulevels_nod2d(q)>1) cycle @@ -787,14 +871,25 @@ SUBROUTINE ice_mass_matrix_fill(ice, partit, mesh) aa=sum(mass_matrix(offset:n)) !!PS if(abs(area(1,q)-aa)>.1_WP) then if(abs(area(ulevels_nod2d(q),q)-aa)>.1_WP) then +!$OMP CRITICAL iflag=q flag=1 +!$OMP END CRITICAL endif END DO +!$OMP END DO +!$OMP END PARALLEL if(flag>0) then offset=ssh_stiff%rowptr(iflag)-ssh_stiff%rowptr(1)+1 n=ssh_stiff%rowptr(iflag+1)-ssh_stiff%rowptr(1) - aa=sum(mass_matrix(offset:n)) + aa=0 +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(row) REDUCTION(+:aa) +!$OMP DO + do row=offset, n + aa=aa+mass_matrix(row) + end do +!$OMP END DO +!$OMP END PARALLEL write(*,*) '#### MASS MATRIX PROBLEM', mype, iflag, aa, area(1,iflag), ulevels_nod2D(iflag) endif deallocate(col_pos) @@ -865,7 +960,8 @@ subroutine ice_TG_rhs_div(ice, partit, mesh) rhs_tempdiv(row)=0.0_WP #endif /* (__oifs) */ end do - +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(diff, entries, um, vm, vol, dx, dy, n, q, row, elem, elnodes, c1, c2, c3, c4, cx1, cx2, cx3, cx4, entries2) +!$OMP DO do elem=1,myDim_elem2D !assembling rhs over elements elnodes=elem2D_nodes(:,elem) @@ -906,6 +1002,9 @@ subroutine ice_TG_rhs_div(ice, partit, mesh) #endif /* (__oifs) */ !___________________________________________________________________ +#if defined(_OPENMP) + call omp_set_lock (partit%plock(row)) +#endif rhs_m(row)=rhs_m(row)+sum(entries*m_ice(elnodes))+cx1 rhs_a(row)=rhs_a(row)+sum(entries*a_ice(elnodes))+cx2 rhs_ms(row)=rhs_ms(row)+sum(entries*m_snow(elnodes))+cx3 @@ -920,8 +1019,13 @@ subroutine ice_TG_rhs_div(ice, partit, mesh) #if defined (__oifs) || defined (__ifsinterface) rhs_tempdiv(row)=rhs_tempdiv(row)-cx4 #endif /* (__oifs) */ +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(row)) +#endif end do end do +!$OMP END DO +!$OMP END PARALLEL end subroutine ice_TG_rhs_div ! ! @@ -939,7 +1043,7 @@ subroutine ice_update_for_div(ice, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh !___________________________________________________________________________ - integer :: n,i,clo,clo2,cn,location(100),row + integer :: n,clo,clo2,cn,location(100),row real(kind=WP) :: rhs_new integer :: num_iter_solve=3 !___________________________________________________________________________ @@ -978,6 +1082,7 @@ subroutine ice_update_for_div(ice, partit, mesh) !___________________________________________________________________________ ! Does Taylor-Galerkin solution ! the first approximation +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(row) do row=1,myDim_nod2D !! row=myList_nod2D(m) ! if cavity node skip it @@ -990,16 +1095,19 @@ subroutine ice_update_for_div(ice, partit, mesh) dm_temp(row)=rhs_tempdiv(row)/area(1,row) #endif /* (__oifs) */ end do +!$OMP END PARALLEL DO call exchange_nod(dm_ice, partit) call exchange_nod(da_ice, partit) call exchange_nod(dm_snow, partit) #if defined (__oifs) || defined (__ifsinterface) call exchange_nod(dm_temp, partit) #endif /* (__oifs) */ - +!$OMP BARRIER !___________________________________________________________________________ !iterate do n=1,num_iter_solve-1 +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(row, n, clo, clo2, cn, location, rhs_new) +!$OMP DO do row=1,myDim_nod2D ! if cavity node skip it if (ulevels_nod2d(row)>1) cycle @@ -1022,6 +1130,8 @@ subroutine ice_update_for_div(ice, partit, mesh) m_templ(row)= dm_temp(row)+rhs_new/area(1,row) #endif /* (__oifs) */ end do +!$OMP END DO +!$OMP DO do row=1,myDim_nod2D ! if cavity node skip it if (ulevels_nod2d(row)>1) cycle @@ -1032,18 +1142,26 @@ subroutine ice_update_for_div(ice, partit, mesh) dm_temp(row) = m_templ(row) #endif /* (__oifs) */ end do +!$OMP END DO +!$OMP END PARALLEL call exchange_nod(dm_ice, partit) call exchange_nod(da_ice, partit) call exchange_nod(dm_snow, partit) #if defined (__oifs) || defined (__ifsinterface) call exchange_nod(dm_temp, partit) #endif /* (__oifs) */ +!$OMP BARRIER end do - m_ice = m_ice+dm_ice - a_ice = a_ice+da_ice - m_snow = m_snow+dm_snow + +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(row) + do row=1, myDim_nod2D+eDim_nod2D + m_ice(row) = m_ice (row)+dm_ice (row) + a_ice(row) = a_ice (row)+da_ice (row) + m_snow(row) = m_snow(row)+dm_snow(row) #if defined (__oifs) || defined (__ifsinterface) - ice_temp= ice_temp+dm_temp + ice_temp(row)= ice_temp(row)+dm_temp(row) #endif /* (__oifs) */ + end do +!$OMP END PARALLEL DO end subroutine ice_update_for_div ! ============================================================= diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index 8d16d6b9c..b3368bd03 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -170,9 +170,11 @@ subroutine ice_timestep(step, ice, partit, mesh) ! call cut_off ! new FCT routines from Sergey Danilov 08.05.2018 #if defined (__oifs) || defined (__ifsinterface) +!$OMP PARALLEL DO do i=1,myDim_nod2D+eDim_nod2D ice_temp(i) = ice_temp(i)*a_ice(i) end do +!$OMP END PARALLEL DO #endif /* (__oifs) */ if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call ice_TG_rhs_div...'//achar(27)//'[0m' call ice_TG_rhs_div (ice, partit, mesh) @@ -184,9 +186,11 @@ subroutine ice_timestep(step, ice, partit, mesh) call ice_update_for_div(ice, partit, mesh) #if defined (__oifs) || defined (__ifsinterface) +!$OMP PARALLEL DO do i=1,myDim_nod2D+eDim_nod2D if (a_ice(i)>0.0_WP) ice_temp(i) = ice_temp(i)/a_ice(i) end do +!$OMP END PARALLEL DO #endif /* (__oifs) */ if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call cut_off...'//achar(27)//'[0m' From 23c6d651d6450b8fb8c4fd08441ad288105dcc6d Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 8 Dec 2021 13:06:07 +0100 Subject: [PATCH 766/909] add a default frequency for raw restarts to the example namelists --- config/namelist.config | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/config/namelist.config b/config/namelist.config index b283fdd8d..0d17f6843 100644 --- a/config/namelist.config +++ b/config/namelist.config @@ -24,7 +24,9 @@ ResultPath='../result_tmp/' &restart_log restart_length=1 !only required for d,h,s cases, y, m take 1 -restart_length_unit='y' !output period: y, d, h, s +restart_length_unit='y' !output period: y, d, h, s, off +raw_restart_length=1 +raw_restart_length_unit='y' ! e.g. y, d, h, s, off logfile_outfreq=960 !in logfile info. output frequency, # steps / From b0ebbf540e44ee8675dd24e2f7302753f3810820 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 9 Dec 2021 10:43:25 +0100 Subject: [PATCH 767/909] remove wrong description of restart subroutine (merged from 0686d) --- src/fesom_module.F90 | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/fesom_module.F90 b/src/fesom_module.F90 index 3618ae130..894669e73 100755 --- a/src/fesom_module.F90 +++ b/src/fesom_module.F90 @@ -206,12 +206,6 @@ subroutine fesom_init(fesom_total_nsteps) call clock_newyear ! check if it is a new year if (f%mype==0) f%t6=MPI_Wtime() !___CREATE NEW RESTART FILE IF APPLICABLE___________________________________ - ! The interface to the restart module is made via call restart ! - ! The inputs are: istep, l_write, l_create - ! if istep is not zero it will be decided whether restart shall be written - ! if l_write is TRUE the restart will be forced - ! if l_read the restart will be read - ! as an example, for reading restart one does: call restart(0, .false., .false., .true., tracers, partit, mesh) call restart(0, .false., r_restart, f%ice, f%dynamics, f%tracers, f%partit, f%mesh) ! istep, l_write, l_read if (f%mype==0) f%t7=MPI_Wtime() ! store grid information into netcdf file From 8e74f6a7a693a21ba7e3b59ce08ae63a28a1149d Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 9 Dec 2021 10:51:26 +0100 Subject: [PATCH 768/909] remove unused parameter in restart procedure (merged from 5581d) --- src/fesom_module.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/fesom_module.F90 b/src/fesom_module.F90 index 894669e73..7cae64cc6 100755 --- a/src/fesom_module.F90 +++ b/src/fesom_module.F90 @@ -206,7 +206,7 @@ subroutine fesom_init(fesom_total_nsteps) call clock_newyear ! check if it is a new year if (f%mype==0) f%t6=MPI_Wtime() !___CREATE NEW RESTART FILE IF APPLICABLE___________________________________ - call restart(0, .false., r_restart, f%ice, f%dynamics, f%tracers, f%partit, f%mesh) ! istep, l_write, l_read + call restart(0, r_restart, f%ice, f%dynamics, f%tracers, f%partit, f%mesh) if (f%mype==0) f%t7=MPI_Wtime() ! store grid information into netcdf file if (.not. r_restart) call write_mesh_info(f%partit, f%mesh) @@ -375,7 +375,7 @@ subroutine fesom_runloop(current_nsteps) call output (n, f%ice, f%dynamics, f%tracers, f%partit, f%mesh) f%t5 = MPI_Wtime() - call restart(n, .false., .false., f%ice, f%dynamics, f%tracers, f%partit, f%mesh) + call restart(n, .false., f%ice, f%dynamics, f%tracers, f%partit, f%mesh) f%t6 = MPI_Wtime() f%rtime_fullice = f%rtime_fullice + f%t2 - f%t1 From c2a45022b1b0cde322caa5e30ab8b0c337d1b2bf Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 9 Dec 2021 11:04:19 +0100 Subject: [PATCH 769/909] add missing derived type argument --- src/io_gather.F90 | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/io_gather.F90 b/src/io_gather.F90 index 6d961b51b..19822d5f7 100644 --- a/src/io_gather.F90 +++ b/src/io_gather.F90 @@ -109,7 +109,7 @@ subroutine gather_nod2D(arr2D, arr2D_global, root_rank, tag, io_comm, partit) #include "associate_part_def.h" #include "associate_part_ass.h" - if(.not. nod2D_lists_initialized) call init_nod2D_lists() + if(.not. nod2D_lists_initialized) call init_nod2D_lists(partit) include "io_gather_nod.inc" end subroutine @@ -138,7 +138,7 @@ subroutine gather_real4_nod2D(arr2D, arr2D_global, root_rank, tag, io_comm, part #include "associate_part_def.h" #include "associate_part_ass.h" - if(.not. nod2D_lists_initialized) call init_nod2D_lists() + if(.not. nod2D_lists_initialized) call init_nod2D_lists(partit) include "io_gather_nod.inc" end subroutine @@ -167,7 +167,7 @@ subroutine gather_elem2D(arr2D, arr2D_global, root_rank, tag, io_comm, partit) #include "associate_part_def.h" #include "associate_part_ass.h" - if(.not. elem2D_lists_initialized) call init_elem2D_lists() + if(.not. elem2D_lists_initialized) call init_elem2D_lists(partit) include "io_gather_elem.inc" end subroutine @@ -196,7 +196,7 @@ subroutine gather_real4_elem2D(arr2D, arr2D_global, root_rank, tag, io_comm, par #include "associate_part_def.h" #include "associate_part_ass.h" - if(.not. elem2D_lists_initialized) call init_elem2D_lists() + if(.not. elem2D_lists_initialized) call init_elem2D_lists(partit) include "io_gather_elem.inc" end subroutine From a12429d3ac8df26684cdc437f1a2dd91dafcf4fc Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Thu, 9 Dec 2021 12:13:49 +0100 Subject: [PATCH 770/909] fixed most of OMP bugs in sea ice part --- src/ice_EVP.F90 | 27 ++++++++++++++++++--------- src/ice_fct.F90 | 48 +++++++++++++++++++++++++++--------------------- src/oce_ale.F90 | 11 +++++++---- 3 files changed, 52 insertions(+), 34 deletions(-) diff --git a/src/ice_EVP.F90 b/src/ice_EVP.F90 index edef74b1f..afea85ede 100755 --- a/src/ice_EVP.F90 +++ b/src/ice_EVP.F90 @@ -286,8 +286,7 @@ subroutine EVPdynamics(ice, partit, mesh) integer :: n, ed, ednodes(2), el, elnodes(3) real(kind=WP) :: ax, ay, aa, elevation_dx, elevation_dy - real(kind=WP) :: inv_areamass(partit%myDim_nod2D), inv_mass(partit%myDim_nod2D) - real(kind=WP) :: ice_strength(partit%myDim_elem2D), elevation_elem(3), p_ice(3) + real(kind=WP) :: elevation_elem(3), p_ice(3) integer :: use_pice real(kind=WP) :: eta, delta @@ -304,6 +303,7 @@ subroutine EVPdynamics(ice, partit, mesh) real(kind=WP), dimension(:), pointer :: u_rhs_ice, v_rhs_ice, rhs_a, rhs_m real(kind=WP), dimension(:), pointer :: u_w, v_w, elevation real(kind=WP), dimension(:), pointer :: stress_atmice_x, stress_atmice_y + real(kind=WP), dimension(:), pointer :: inv_areamass, inv_mass, ice_strength #if defined (__icepack) real(kind=WP), dimension(:), pointer :: a_ice_old, m_ice_old, m_snow_old #endif @@ -337,6 +337,10 @@ subroutine EVPdynamics(ice, partit, mesh) rhoice => ice%thermo%rhoice inv_rhowat => ice%thermo%inv_rhowat + inv_areamass => ice%work%inv_areamass(:) + inv_mass => ice%work%inv_mass(:) + ice_strength => ice%work%ice_strength(:) + !___________________________________________________________________________ ! If Icepack is used, always update the tracers #if defined (__icepack) @@ -396,10 +400,14 @@ subroutine EVPdynamics(ice, partit, mesh) if (use_floatice .and. .not. trim(which_ale)=='linfs') use_pice=1 if ( .not. trim(which_ALE)=='linfs') then ! for full free surface include pressure from ice mass - ice_strength=0.0_WP -!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(el, elnodes, msum, asum, aa, p_ice, elevation_elem, elevation_dx, elevation_dy) - do el = 1,myDim_elem2D - +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(el, elnodes, msum, asum, aa, p_ice, elevation_elem, elevation_dx, elevation_dy) +!$OMP DO + do el = 1, myDim_elem2D + eDim_elem2D + ice_strength(el)=0.0_WP + end do +!$OMP END DO +!$OMP DO + do el = 1,myDim_elem2D elnodes = elem2D_nodes(:,el) !___________________________________________________________________ ! if element has any cavity node skip it @@ -449,7 +457,8 @@ subroutine EVPdynamics(ice, partit, mesh) rhs_m(elnodes) = rhs_m(elnodes)-aa*elevation_dy end if enddo -!$OMP END PARALLEL DO +!$OMP END DO +!$OMP END PARALLEL else ! for linear free surface !$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(el, elnodes, msum, asum, aa, elevation_elem, elevation_dx, elevation_dy) @@ -507,10 +516,10 @@ subroutine EVPdynamics(ice, partit, mesh) do shortstep=1, ice%evp_rheol_steps !write(*,*) partit%mype, shortstep, 'CP1' !_______________________________________________________________________ - call stress_tensor(ice_strength, ice, partit, mesh) + call stress_tensor(ice_strength(1:myDim_nod2D), ice, partit, mesh) !call MPI_Barrier(partit%MPI_COMM_FESOM, partit%MPIerr) !write(*,*) partit%mype, shortstep, 'CP2' - call stress2rhs(inv_areamass, ice_strength, ice, partit, mesh) + call stress2rhs(inv_areamass(1:myDim_nod2D), ice_strength(1:myDim_elem2D), ice, partit, mesh) !call MPI_Barrier(partit%MPI_COMM_FESOM, partit%MPIerr) !write(*,*) partit%mype, shortstep, 'CP3' !_______________________________________________________________________ diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index 11290a83c..99c1430ea 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -813,9 +813,8 @@ SUBROUTINE ice_mass_matrix_fill(ice, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh !___________________________________________________________________________ - integer :: n, row - integer :: elem, elnodes(3), q, offset, col, ipos - integer, allocatable :: col_pos(:) + integer :: n, k, row + integer :: elem, elnodes(3), q, offset, ipos real(kind=WP) :: aa integer :: flag=0, iflag=0 !___________________________________________________________________________ @@ -828,8 +827,7 @@ SUBROUTINE ice_mass_matrix_fill(ice, partit, mesh) mass_matrix => ice%work%fct_massmatrix(:) ! ! a) - allocate(col_pos(myDim_nod2D+eDim_nod2D)) -!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, row, elem, elnodes, q, offset, col, ipos, aa) +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, k, row, elem, elnodes, q, offset, ipos, aa) !$OMP DO DO elem=1,myDim_elem2D elnodes=elem2D_nodes(:,elem) @@ -840,22 +838,31 @@ SUBROUTINE ice_mass_matrix_fill(ice, partit, mesh) if(row>myDim_nod2D) cycle !___________________________________________________________________ ! Global-to-local neighbourhood correspondence - DO q=1,nn_num(row) - col_pos(nn_pos(q,row))=q - END DO + ! we have to modify col_pos construction for OMP compatibility. The MPI version might become a bit slower :( + ! loop over number of neghbouring nodes of node-row offset=ssh_stiff%rowptr(row)-ssh_stiff%rowptr(1) - DO q=1,3 - col=elnodes(q) - !_______________________________________________________________ - ! if element is cavity cycle over - if(ulevels(elem)>1) cycle - - ipos=offset+col_pos(col) - mass_matrix(ipos)=mass_matrix(ipos)+elem_area(elem)/12.0_WP - if(q==n) then - mass_matrix(ipos)=mass_matrix(ipos)+elem_area(elem)/12.0_WP - end if - END DO + do q=1, 3 + !_______________________________________________________________ + ! if element is cavity cycle over + if(ulevels(elem)>1) cycle + do k=1, nn_num(row) + if (nn_pos(k,row)==elnodes(q)) then + ipos=offset+k + exit + end if + if (k==nn_num(row)) write(*,*) 'FATAL ERROR' + end do +#if defined(_OPENMP) + call omp_set_lock (partit%plock(row)) ! it shall be sufficient to block writing into the same row of SSH_stiff +#endif + mass_matrix(ipos)=mass_matrix(ipos)+elem_area(elem)/12.0_WP + if(q==n) then + mass_matrix(ipos)=mass_matrix(ipos)+elem_area(elem)/12.0_WP + end if +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(row)) +#endif + END DO end do END DO !$OMP END DO @@ -892,7 +899,6 @@ SUBROUTINE ice_mass_matrix_fill(ice, partit, mesh) !$OMP END PARALLEL write(*,*) '#### MASS MATRIX PROBLEM', mype, iflag, aa, area(1,iflag), ulevels_nod2D(iflag) endif - deallocate(col_pos) END SUBROUTINE ice_mass_matrix_fill ! ! diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 97f79668c..4b6b95b31 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -1598,11 +1598,11 @@ subroutine update_stiff_mat_ale(partit, mesh) ! on the rhs. So the sign is changed in the expression below. ! npos... sparse matrix indices position of node points elnodes #if defined(_OPENMP) -! call omp_set_lock(row) ! it shall be sufficient to block writing into the same row of SSH_stiff + call omp_set_lock (partit%plock(row)) ! it shall be sufficient to block writing into the same row of SSH_stiff #endif SSH_stiff%values(npos)=SSH_stiff%values(npos) + fy*factor #if defined(_OPENMP) -! call omp_unset_lock(row) + call omp_unset_lock(partit%plock(row)) #endif end do ! --> do i=1,2 end do ! --> do j=1,2 @@ -1983,7 +1983,8 @@ subroutine vert_vel_ale(dynamics, partit, mesh) END DO !$OMP END PARALLEL DO -!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(ed, enodes, el, deltaX1, deltaY1, nz, nzmin, nzmax, deltaX2, deltaY2, c1, c2) +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(ed, enodes, el, deltaX1, deltaY1, nz, nzmin, nzmax, deltaX2, deltaY2, c1, c2) +!$OMP DO do ed=1, myDim_edge2D ! local indice of nodes that span up edge ed enodes=edges(:,ed) @@ -2065,7 +2066,8 @@ subroutine vert_vel_ale(dynamics, partit, mesh) #endif end if end do ! --> do ed=1, myDim_edge2D -!$OMP END PARALLEL DO +!$OMP END DO +!$OMP END PARALLEL ! | ! | ! +--> until here Wvel contains the thickness divergence div(u) @@ -2512,6 +2514,7 @@ subroutine vert_vel_ale(dynamics, partit, mesh) end do !$OMP END PARALLEL DO end subroutine vert_vel_ale + ! ! !=============================================================================== From 8ba13a1361bff5d782b9e06c76f926dd42d54ab1 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Thu, 9 Dec 2021 12:22:21 +0100 Subject: [PATCH 771/909] make use of t_ice:ice for arrays inv_areamass, inv_mass, ice_strength in ice_EVP.F90 -> EVPdynamics --- src/ice_EVP.F90 | 65 +++++++++++++++++++++---------------------------- 1 file changed, 28 insertions(+), 37 deletions(-) diff --git a/src/ice_EVP.F90 b/src/ice_EVP.F90 index afea85ede..c3685177b 100755 --- a/src/ice_EVP.F90 +++ b/src/ice_EVP.F90 @@ -1,6 +1,6 @@ module ice_EVP_interfaces interface - subroutine stress_tensor(ice_strength, ice, partit, mesh) + subroutine stress_tensor(ice, partit, mesh) USE MOD_ICE USE MOD_PARTIT USE MOD_PARSUP @@ -8,10 +8,9 @@ subroutine stress_tensor(ice_strength, ice, partit, mesh) type(t_ice) , intent(inout), target :: ice type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh - real(kind=WP) , intent(in) :: ice_strength(partit%mydim_elem2D) end subroutine - subroutine stress2rhs(inv_areamass, ice_strength, ice, partit, mesh) + subroutine stress2rhs(ice, partit, mesh) USE MOD_ICE USE MOD_PARTIT USE MOD_PARSUP @@ -19,7 +18,6 @@ subroutine stress2rhs(inv_areamass, ice_strength, ice, partit, mesh) type(t_ice) , intent(inout), target :: ice type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh - real(kind=WP) , intent(in) :: inv_areamass(partit%myDim_nod2D), ice_strength(partit%mydim_elem2D) end subroutine end interface end module @@ -45,7 +43,7 @@ subroutine EVPdynamics(ice, partit, mesh) ! EVP rheology. The routine computes stress tensor components based on ice ! velocity field. They are stored as elemental arrays (sigma11, sigma22 and ! sigma12). The ocean velocity is at nodal locations. -subroutine stress_tensor(ice_strength, ice, partit, mesh) +subroutine stress_tensor(ice, partit, mesh) USE MOD_ICE USE MOD_PARTIT USE MOD_PARSUP @@ -60,7 +58,6 @@ subroutine stress_tensor(ice_strength, ice, partit, mesh) type(t_ice) , intent(inout), target :: ice type(t_mesh) , intent(in) , target :: mesh !___________________________________________________________________________ - real(kind=WP), intent(in) :: ice_strength(partit%mydim_elem2D) integer :: el real(kind=WP) :: det1, det2, dte, vale, r1, r2, r3, si1, si2 real(kind=WP) :: zeta, delta, delta_inv, d1, d2 @@ -69,19 +66,20 @@ subroutine stress_tensor(ice_strength, ice, partit, mesh) real(kind=WP), dimension(:), pointer :: u_ice, v_ice real(kind=WP), dimension(:), pointer :: eps11, eps12, eps22 real(kind=WP), dimension(:), pointer :: sigma11, sigma12, sigma22 + real(kind=WP), dimension(:), pointer :: ice_strength #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - u_ice => ice%uice(:) - v_ice => ice%vice(:) - eps11 => ice%work%eps11(:) - eps12 => ice%work%eps12(:) - eps22 => ice%work%eps22(:) - sigma11 => ice%work%sigma11(:) - sigma12 => ice%work%sigma12(:) - sigma22 => ice%work%sigma22(:) - + u_ice => ice%uice(:) + v_ice => ice%vice(:) + eps11 => ice%work%eps11(:) + eps12 => ice%work%eps12(:) + eps22 => ice%work%eps22(:) + sigma11 => ice%work%sigma11(:) + sigma12 => ice%work%sigma12(:) + sigma22 => ice%work%sigma22(:) + ice_strength=> ice%work%ice_strength(:) !___________________________________________________________________________ vale = 1.0_WP/(ice%ellipse**2) dte = ice%ice_dt/(1.0_WP*ice%evp_rheol_steps) @@ -168,7 +166,7 @@ end subroutine stress_tensor ! EVP implementation: ! Computes the divergence of stress tensor and puts the result into the ! rhs vectors -subroutine stress2rhs(inv_areamass, ice_strength, ice, partit, mesh) +subroutine stress2rhs(ice, partit, mesh) USE MOD_ICE USE MOD_PARTIT USE MOD_PARSUP @@ -179,24 +177,26 @@ subroutine stress2rhs(inv_areamass, ice_strength, ice, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_mesh) , intent(in) , target :: mesh !___________________________________________________________________________ - REAL(kind=WP), intent(in) :: inv_areamass(partit%myDim_nod2D), ice_strength(partit%mydim_elem2D) INTEGER :: n, el, k REAL(kind=WP) :: val3 !___________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: sigma11, sigma12, sigma22 real(kind=WP), dimension(:), pointer :: u_rhs_ice, v_rhs_ice, rhs_a, rhs_m + real(kind=WP), dimension(:), pointer :: inv_areamass, ice_strength #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - sigma11 => ice%work%sigma11(:) - sigma12 => ice%work%sigma12(:) - sigma22 => ice%work%sigma22(:) - u_rhs_ice => ice%uice_rhs(:) - v_rhs_ice => ice%vice_rhs(:) - rhs_a => ice%data(1)%values_rhs(:) - rhs_m => ice%data(2)%values_rhs(:) + sigma11 => ice%work%sigma11(:) + sigma12 => ice%work%sigma12(:) + sigma22 => ice%work%sigma22(:) + u_rhs_ice => ice%uice_rhs(:) + v_rhs_ice => ice%vice_rhs(:) + rhs_a => ice%data(1)%values_rhs(:) + rhs_m => ice%data(2)%values_rhs(:) + inv_areamass => ice%work%inv_areamass(:) + ice_strength => ice%work%ice_strength(:) !___________________________________________________________________________ val3=1/3.0_WP @@ -402,13 +402,9 @@ subroutine EVPdynamics(ice, partit, mesh) ! for full free surface include pressure from ice mass !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(el, elnodes, msum, asum, aa, p_ice, elevation_elem, elevation_dx, elevation_dy) !$OMP DO - do el = 1, myDim_elem2D + eDim_elem2D - ice_strength(el)=0.0_WP - end do -!$OMP END DO -!$OMP DO - do el = 1,myDim_elem2D + do el = 1,myDim_elem2D elnodes = elem2D_nodes(:,el) + ice_strength(el)=0.0_WP !___________________________________________________________________ ! if element has any cavity node skip it if (ulevels(el) > 1) cycle @@ -514,14 +510,9 @@ subroutine EVPdynamics(ice, partit, mesh) rdg_shear_elem(:) = 0.0_WP #endif do shortstep=1, ice%evp_rheol_steps -!write(*,*) partit%mype, shortstep, 'CP1' !_______________________________________________________________________ - call stress_tensor(ice_strength(1:myDim_nod2D), ice, partit, mesh) -!call MPI_Barrier(partit%MPI_COMM_FESOM, partit%MPIerr) -!write(*,*) partit%mype, shortstep, 'CP2' - call stress2rhs(inv_areamass(1:myDim_nod2D), ice_strength(1:myDim_elem2D), ice, partit, mesh) -!call MPI_Barrier(partit%MPI_COMM_FESOM, partit%MPIerr) -!write(*,*) partit%mype, shortstep, 'CP3' + call stress_tensor(ice, partit, mesh) + call stress2rhs(ice, partit, mesh) !_______________________________________________________________________ !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, ed, umod, drag, rhsu, rhsv, r_a, r_b, det) !$OMP DO From 6d7308c8da0f09acc5ec4d19788c0b6774318edc Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Thu, 9 Dec 2021 12:50:23 +0100 Subject: [PATCH 772/909] OMP for the ice thermodynamics (OCEAN ONLY MODE) --- src/ice_setup_step.F90 | 2 ++ src/ice_thermo_oce.F90 | 63 +++++++++++++++++++++++------------------- 2 files changed, 37 insertions(+), 28 deletions(-) diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index b3368bd03..ebcbdbc17 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -206,6 +206,7 @@ subroutine ice_timestep(step, ice, partit, mesh) #endif /* (__icepack) */ !___________________________________________________________________________ +!$OMP PARALLEL DO do i=1,myDim_nod2D+eDim_nod2D if ( ( U_ice(i)/=0.0_WP .and. mesh%ulevels_nod2d(i)>1) .or. (V_ice(i)/=0.0_WP .and. mesh%ulevels_nod2d(i)>1) ) then write(*,*) " --> found cavity velocity /= 0.0_WP , ", mype @@ -215,6 +216,7 @@ subroutine ice_timestep(step, ice, partit, mesh) write(*,*) end if end do +!$OMP END PARALLEL DO t3=MPI_Wtime() rtime_ice = rtime_ice + (t3-t0) rtime_tot = rtime_tot + (t3-t0) diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index 5ae98e19a..e2aa318e4 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -74,7 +74,8 @@ subroutine cut_off(ice, partit, mesh) implicit none type(t_mesh), intent(in), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_ice), intent(inout), target :: ice + type(t_ice), intent(inout), target :: ice + integer :: n !___________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow @@ -94,38 +95,39 @@ subroutine cut_off(ice, partit, mesh) !___________________________________________________________________________ ! lower cutoff: a_ice - where(a_ice>1.0_WP) a_ice=1.0_WP - +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n) +DO n=1, myDim_nod2D+eDim_nod2D + if (a_ice(n) > 1.0_WP) a_ice(n)=1.0_WP ! upper cutoff: a_ice - where(a_ice<0.1e-8_WP) - a_ice=0.0_WP + if (a_ice(n) < .1e-8_WP) then + a_ice(n)=0.0_WP #if defined (__oifs) || defined (__ifsinterface) - m_ice=0.0_WP - m_snow=0.0_WP - ice_temp=273.15_WP + m_ice(n) =0.0_WP + m_snow(n) =0.0_WP + ice_temp(n)=273.15_WP #endif /* (__oifs) */ - end where - + end if !___________________________________________________________________________ ! lower cutoff: m_ice - where(m_ice<0.1e-8_WP) - m_ice=0.0_WP + if (m_ice(n) < .1e-8_WP) then + m_ice(n)=0.0_WP #if defined (__oifs) || defined (__ifsinterface) - m_snow=0.0_WP - a_ice=0.0_WP - ice_temp=273.15_WP + m_snow(n) =0.0_WP + a_ice(n) =0.0_WP + ice_temp(n)=273.15_WP #endif /* (__oifs) */ - end where + end if !___________________________________________________________________________ #if defined (__oifs) || defined (__ifsinterface) - where(ice_temp>273.15_WP) ice_temp=273.15_WP + if (ice_temp(n) > 273.15_WP) ice_temp(n)=273.15_WP #endif /* (__oifs) */ #if defined (__oifs) || defined (__ifsinterface) - where(ice_temp < 173.15_WP .and. a_ice >= 0.1e-8_WP) ice_temp=271.35_WP + if (ice_temp(n) < 173.15_WP .and. a_ice(n) >= 0.1e-8_WP) ice_temp(n)=271.35_WP #endif /* (__oifs) */ - +END DO +!$OMP END PARALLEL DO end subroutine cut_off #if !defined (__oasis) && !defined (__ifsinterface) @@ -213,16 +215,24 @@ subroutine thermodynamics(ice, partit, mesh) ! u_wind and v_wind are always at nodes !___________________________________________________________________________ ! Friction velocity - ustar_aux=0.0_WP +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(i, j, elem, h, hsn, A, fsh, flo, Ta, qa, rain, snow, runo, rsss, rsf, evap_in, ug, ustar, T_oc, S_oc, & +!$OMP h_ml, t, ch, ce, ch_i, ce_i, fw, ehf, evap, ithdgr, ithdgrsn, iflice, hflatow, hfsenow, hflwrdout, & +!$OMP subli, lid_clo, lat) +!$OMP DO do i=1, myDim_nod2D ustar=0.0_WP if(ulevels_nod2d(i)>1) cycle ustar=((u_ice(i)-u_w(i))**2 + (v_ice(i)-v_w(i))**2) ustar_aux(i)=sqrt(ustar*ice%cd_oce_ice) end do +!$OMP END DO +!$OMP MASTER call exchange_nod(ustar_aux, partit) - +!$OMP END MASTER +!$OMP BARRIER + !___________________________________________________________________________ +!$OMP DO do i=1, myDim_nod2d+eDim_nod2D !_______________________________________________________________________ ! if there is a cavity no sea ice thermodynamics is apllied @@ -246,7 +256,6 @@ subroutine thermodynamics(ice, partit, mesh) snow=prec_rain(i) endif evap_in=evaporation(i) !evap_in: positive up - !!PS evap_in=0.0_WP else rain = prec_rain(i) snow = prec_snow(i) @@ -263,14 +272,11 @@ subroutine thermodynamics(ice, partit, mesh) ce = Ce_atm_oce_arr(i) ch_i = Ch_atm_ice ce_i = Ce_atm_ice - !!PS h_ml = 10.0_WP ! 10.0 or 30. used previously - !!PS h_ml = 5.0_WP ! 10.0 or 30. used previously h_ml = 2.5_WP ! 10.0 or 30. used previously - !!PS h_ml = 1.25_WP ! 10.0 or 30. used previously fw = 0.0_WP ehf = 0.0_WP lid_Clo=ice%thermo%h0 - if (geo_coord_nod2D(2,i)>0) then !TODO 2 separate pars for each hemisphere + if (geo_coord_nod2D(2, i)>0) then !TODO 2 separate pars for each hemisphere lid_clo=0.5_WP else lid_clo=0.5_WP @@ -316,9 +322,10 @@ subroutine thermodynamics(ice, partit, mesh) if (.not. l_snow) then prec_rain(i) = rain prec_snow(i) = snow - end if - + end if end do +!$OMP END DO +!$OMP END PARALLEL end subroutine thermodynamics ! ! From de4522519d83fd68973f4e0d57ef57beb039ef09 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 9 Dec 2021 17:12:41 +0100 Subject: [PATCH 773/909] use the derived types in files from parallel_restart to be able to compile --- src/io_fesom_file.F90 | 24 +++--- src/io_restart.F90 | 158 ++++++++++++++++++++-------------- src/io_restart_file_group.F90 | 46 ++++++---- src/io_scatter.F90 | 36 ++++---- 4 files changed, 154 insertions(+), 110 deletions(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index ce6944920..39186ab39 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -2,6 +2,7 @@ module io_fesom_file_module use io_netcdf_file_module use async_threads_module + use MOD_PARTIT implicit none public fesom_file_type private @@ -36,6 +37,8 @@ module io_fesom_file_module type(thread_type) thread logical :: thread_running = .false. integer :: comm + integer rank + type(t_partit), pointer :: partit logical gather_and_write contains procedure, public :: async_read_and_scatter_variables, async_gather_and_write_variables, join, init, is_iorank, rec_count, time_varindex, time_dimindex @@ -64,10 +67,9 @@ module io_fesom_file_module function is_iorank(this) result(x) - use g_PARSUP class(fesom_file_type), intent(in) :: this logical x - x = (mype == this%iorank) + x = (this%rank == this%iorank) end function @@ -102,14 +104,15 @@ function time_dimindex(this) result(x) end function - subroutine init(this, mesh_nod2d, mesh_elem2d, mesh_nl) ! todo: would like to call it initialize but Fortran is rather cluncky with overwriting base type procedures - use g_PARSUP + subroutine init(this, mesh_nod2d, mesh_elem2d, mesh_nl, partit) ! todo: would like to call it initialize but Fortran is rather cluncky with overwriting base type procedures use io_netcdf_workaround_module use io_gather_module + use MOD_PARTIT class(fesom_file_type), target, intent(inout) :: this integer mesh_nod2d integer mesh_elem2d integer mesh_nl + type(t_partit), target, intent(in) :: partit ! EO parameters type(fesom_file_type_ptr), allocatable :: tmparr(:) logical async_netcdf_allowed @@ -141,11 +144,12 @@ subroutine init(this, mesh_nod2d, mesh_elem2d, mesh_nl) ! todo: would like to ca all_fesom_files(size(all_fesom_files))%ptr => this this%fesom_file_index = size(all_fesom_files) + this%partit => partit ! set up async output - this%iorank = next_io_rank(MPI_COMM_FESOM, async_netcdf_allowed) + this%iorank = next_io_rank(partit%MPI_COMM_FESOM, async_netcdf_allowed, partit) - call MPI_Comm_dup(MPI_COMM_FESOM, this%comm, err) + call MPI_Comm_dup(partit%MPI_COMM_FESOM, this%comm, err) call this%thread%initialize(async_worker, this%fesom_file_index) if(.not. async_netcdf_allowed) call this%thread%disable_async() @@ -204,9 +208,9 @@ subroutine read_and_scatter_variables(this) end if if(var%is_elem_based) then - call scatter_elem2D(var%global_level_data, laux, this%iorank, this%comm) + call scatter_elem2D(var%global_level_data, laux, this%iorank, this%comm, this%partit) else - call scatter_nod2D(var%global_level_data, laux, this%iorank, this%comm) + call scatter_nod2D(var%global_level_data, laux, this%iorank, this%comm, this%partit) end if ! the data from our pointer is not contiguous (if it is 3D data), so we can not pass the pointer directly to MPI var%external_local_data_ptr(lvl,:) = laux ! todo: remove this buffer and pass the data directly to MPI (change order of data layout to be levelwise or do not gather levelwise but by columns) @@ -251,9 +255,9 @@ subroutine gather_and_write_variables(this) laux = var%local_data_copy(lvl,:) ! todo: remove this buffer and pass the data directly to MPI (change order of data layout to be levelwise or do not gather levelwise but by columns) if(var%is_elem_based) then - call gather_elem2D(laux, var%global_level_data, this%iorank, 42, this%comm) + call gather_elem2D(laux, var%global_level_data, this%iorank, 42, this%comm, this%partit) else - call gather_nod2D (laux, var%global_level_data, this%iorank, 42, this%comm) + call gather_nod2D (laux, var%global_level_data, this%iorank, 42, this%comm, this%partit) end if if(this%is_iorank()) then diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 1ae1087c1..7b6c4b6ac 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -2,8 +2,9 @@ MODULE io_RESTART use restart_file_group_module use g_clock use o_arrays - use i_arrays use g_cvmix_tke + use MOD_TRACER + use MOD_ICE implicit none public :: restart, finalize_restart private @@ -27,13 +28,16 @@ MODULE io_RESTART !-------------------------------------------------------------------------------------------- ! ini_ocean_io initializes ocean_file datatype which contains information of all variables need to be written into ! the ocean restart file. This is the only place need to be modified if a new variable is added! -subroutine ini_ocean_io(year, mesh) +subroutine ini_ocean_io(year, dynamics, tracers, partit, mesh) integer, intent(in) :: year integer :: j character(500) :: longname character(500) :: trname, units character(4) :: cyear - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), target :: tracers + type(t_dyn), target :: dynamics logical, save :: has_been_called = .false. write(cyear,'(i4)') year @@ -47,32 +51,32 @@ subroutine ini_ocean_io(year, mesh) !=========================================================================== !___Define the netCDF variables for 2D fields_______________________________ !___SSH_____________________________________________________________________ - call oce_files%def_node_var('ssh', 'sea surface elevation', 'm', eta_n, mesh) + call oce_files%def_node_var('ssh', 'sea surface elevation', 'm', dynamics%eta_n, mesh, partit) !___ALE related fields______________________________________________________ - call oce_files%def_node_var('hbar', 'ALE surface elevation', 'm', hbar, mesh) -!!PS call oce_files%def_node_var('ssh_rhs', 'RHS for the elevation', '?', ssh_rhs, mesh) - call oce_files%def_node_var('ssh_rhs_old', 'RHS for the elevation', '?', ssh_rhs_old, mesh) - call oce_files%def_node_var('hnode', 'nodal layer thickness', 'm', hnode, mesh) + call oce_files%def_node_var('hbar', 'ALE surface elevation', 'm', mesh%hbar, mesh, partit) +!!PS call oce_files%def_node_var('ssh_rhs', 'RHS for the elevation', '?', ssh_rhs, mesh, partit) + call oce_files%def_node_var('ssh_rhs_old', 'RHS for the elevation', '?', dynamics%ssh_rhs_old, mesh, partit) + call oce_files%def_node_var('hnode', 'nodal layer thickness', 'm', mesh%hnode, mesh, partit) !___Define the netCDF variables for 3D fields_______________________________ - call oce_files%def_elem_var('u', 'zonal velocity', 'm/s', UV(1,:,:), mesh) - call oce_files%def_elem_var('v', 'meridional velocity', 'm/s', UV(2,:,:), mesh) - call oce_files%def_elem_var('urhs_AB', 'Adams–Bashforth for u', 'm/s', UV_rhsAB(1,:,:), mesh) - call oce_files%def_elem_var('vrhs_AB', 'Adams–Bashforth for v', 'm/s', UV_rhsAB(2,:,:), mesh) + call oce_files%def_elem_var('u', 'zonal velocity', 'm/s', dynamics%uv(1,:,:), mesh, partit) + call oce_files%def_elem_var('v', 'meridional velocity', 'm/s', dynamics%uv(2,:,:), mesh, partit) + call oce_files%def_elem_var('urhs_AB', 'Adams–Bashforth for u', 'm/s', dynamics%uv_rhsAB(1,:,:), mesh, partit) + call oce_files%def_elem_var('vrhs_AB', 'Adams–Bashforth for v', 'm/s', dynamics%uv_rhsAB(2,:,:), mesh, partit) !___Save restart variables for TKE and IDEMIX_________________________________ if (trim(mix_scheme)=='cvmix_TKE' .or. trim(mix_scheme)=='cvmix_TKE+IDEMIX') then - call oce_files%def_node_var('tke', 'Turbulent Kinetic Energy', 'm2/s2', tke(:,:), mesh) + call oce_files%def_node_var('tke', 'Turbulent Kinetic Energy', 'm2/s2', tke(:,:), mesh, partit) endif if (trim(mix_scheme)=='cvmix_IDEMIX' .or. trim(mix_scheme)=='cvmix_TKE+IDEMIX') then - call oce_files%def_node_var('iwe', 'Internal Wave eneryy', 'm2/s2', tke(:,:), mesh) + call oce_files%def_node_var('iwe', 'Internal Wave eneryy', 'm2/s2', tke(:,:), mesh, partit) endif - if (visc_option==8) then - call oce_files%def_elem_var('uke', 'unresolved kinetic energy', 'm2/s2', uke(:,:), mesh) - call oce_files%def_elem_var('uke_rhs', 'unresolved kinetic energy rhs', 'm2/s2', uke_rhs(:,:), mesh) + if (dynamics%opt_visc==8) then + call oce_files%def_elem_var('uke', 'unresolved kinetic energy', 'm2/s2', uke(:,:), mesh, partit) + call oce_files%def_elem_var('uke_rhs', 'unresolved kinetic energy rhs', 'm2/s2', uke_rhs(:,:), mesh, partit) endif - do j=1,num_tracers + do j=1,tracers%num_tracers SELECT CASE (j) CASE(1) trname='temp' @@ -87,22 +91,24 @@ subroutine ini_ocean_io(year, mesh) write(longname,'(A15,i1)') 'passive tracer ', j units='none' END SELECT - call oce_files%def_node_var(trim(trname), trim(longname), trim(units), tr_arr(:,:,j), mesh) + call oce_files%def_node_var(trim(trname), trim(longname), trim(units), tracers%data(j)%values(:,:), mesh, partit) longname=trim(longname)//', Adams–Bashforth' - call oce_files%def_node_var(trim(trname)//'_AB', trim(longname), trim(units), tr_arr_old(:,:,j), mesh) + call oce_files%def_node_var(trim(trname)//'_AB', trim(longname), trim(units), tracers%data(j)%valuesAB(:,:), mesh, partit) end do - call oce_files%def_node_var('w', 'vertical velocity', 'm/s', Wvel, mesh) - call oce_files%def_node_var('w_expl', 'vertical velocity', 'm/s', Wvel_e, mesh) - call oce_files%def_node_var('w_impl', 'vertical velocity', 'm/s', Wvel_i, mesh) + call oce_files%def_node_var('w', 'vertical velocity', 'm/s', dynamics%w, mesh, partit) + call oce_files%def_node_var('w_expl', 'vertical velocity', 'm/s', dynamics%w_e, mesh, partit) + call oce_files%def_node_var('w_impl', 'vertical velocity', 'm/s', dynamics%w_i, mesh, partit) end subroutine ini_ocean_io ! !-------------------------------------------------------------------------------------------- ! ini_ice_io initializes ice_file datatype which contains information of all variables need to be written into ! the ice restart file. This is the only place need to be modified if a new variable is added! -subroutine ini_ice_io(year, mesh) +subroutine ini_ice_io(year, ice, partit, mesh) integer, intent(in) :: year character(4) :: cyear type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_ice), target :: ice logical, save :: has_been_called = .false. write(cyear,'(i4)') year @@ -115,21 +121,21 @@ subroutine ini_ice_io(year, mesh) !===================== Definition part ===================================== !=========================================================================== !___Define the netCDF variables for 2D fields_______________________________ - call ice_files%def_node_var('area', 'ice concentration [0 to 1]', '%', a_ice, mesh) - call ice_files%def_node_var('hice', 'effective ice thickness', 'm', m_ice, mesh) - call ice_files%def_node_var('hsnow', 'effective snow thickness', 'm', m_snow, mesh) - call ice_files%def_node_var('uice', 'zonal velocity', 'm/s', u_ice, mesh) - call ice_files%def_node_var('vice', 'meridional velocity', 'm', v_ice, mesh) + call ice_files%def_node_var('area', 'ice concentration [0 to 1]', '%', ice%data(1)%values(:), mesh, partit) + call ice_files%def_node_var('hice', 'effective ice thickness', 'm', ice%data(2)%values(:), mesh, partit) + call ice_files%def_node_var('hsnow', 'effective snow thickness', 'm', ice%data(3)%values(:), mesh, partit) + call ice_files%def_node_var('uice', 'zonal velocity', 'm/s', ice%uice, mesh, partit) + call ice_files%def_node_var('vice', 'meridional velocity', 'm', ice%vice, mesh, partit) #if defined (__oifs) - call ice_files%def_node_var_optional('ice_albedo', 'ice albedo', '-', ice_alb, mesh) - call ice_files%def_node_var_optional('ice_temp', 'ice surface temperature', 'K', ice_temp, mesh) + call ice_files%def_node_var_optional('ice_albedo', 'ice albedo', '-', ice%atmcoupl%ice_alb, mesh, partit) + call ice_files%def_node_var_optional('ice_temp', 'ice surface temperature', 'K', ice%data(4)%values, mesh, partit) #endif /* (__oifs) */ end subroutine ini_ice_io ! !-------------------------------------------------------------------------------------------- ! -subroutine restart(istep, l_read, mesh) +subroutine restart(istep, l_read, ice, dynamics, tracers, partit, mesh) #if defined(__icepack) icepack restart not merged here ! produce a compiler error if USE_ICEPACK=ON; todo: merge icepack restart from 68d8b8b @@ -144,48 +150,53 @@ subroutine restart(istep, l_read, mesh) logical :: l_read logical :: is_portable_restart_write, is_raw_restart_write type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in) , target :: tracers + type(t_dyn) , intent(in) , target :: dynamics + type(t_ice) , intent(in) , target :: ice logical dumpfiles_exist logical, save :: initialized = .false. integer cstat, estat character(500) cmsg ! there seems to be no documentation about the max size of this text + integer mpierr if(.not. initialized) then initialized = .true. - raw_restart_dirpath = trim(ResultPath)//"/fesom_raw_restart/np"//int_to_txt(npes) - raw_restart_infopath = trim(ResultPath)//"/fesom_raw_restart/np"//int_to_txt(npes)//".info" + raw_restart_dirpath = trim(ResultPath)//"/fesom_raw_restart/np"//int_to_txt(partit%npes) + raw_restart_infopath = trim(ResultPath)//"/fesom_raw_restart/np"//int_to_txt(partit%npes)//".info" if(raw_restart_length_unit /= "off") then - if(mype == RAW_RESTART_METADATA_RANK) then + if(partit%mype == RAW_RESTART_METADATA_RANK) then ! inquire does not work for directories, the directory might already exist call execute_command_line("mkdir -p "//raw_restart_dirpath, exitstat=estat, cmdstat=cstat, cmdmsg=cmsg) ! sometimes does not work on aleph if(cstat /= 0) print *,"creating raw restart directory ERROR ", trim(cmsg) end if - call MPI_Barrier(MPI_COMM_FESOM, mpierr) ! make sure the dir has been created before we continue... + call MPI_Barrier(partit%MPI_COMM_FESOM, mpierr) ! make sure the dir has been created before we continue... end if end if ctime=timeold+(dayold-1.)*86400 if (.not. l_read) then - call ini_ocean_io(yearnew, mesh) - if (use_ice) call ini_ice_io (yearnew, mesh) + call ini_ocean_io(yearnew, dynamics, tracers, partit, mesh) + if (use_ice) call ini_ice_io (yearnew, ice, partit, mesh) else - call ini_ocean_io(yearold, mesh) - if (use_ice) call ini_ice_io (yearold, mesh) + call ini_ocean_io(yearold, dynamics, tracers, partit, mesh) + if (use_ice) call ini_ice_io (yearold, ice, partit, mesh) end if if (l_read) then ! determine if we can load raw restart dump files - if(mype == RAW_RESTART_METADATA_RANK) then + if(partit%mype == RAW_RESTART_METADATA_RANK) then inquire(file=raw_restart_infopath, exist=dumpfiles_exist) end if - call MPI_Bcast(dumpfiles_exist, 1, MPI_LOGICAL, RAW_RESTART_METADATA_RANK, MPI_COMM_FESOM, MPIerr) + call MPI_Bcast(dumpfiles_exist, 1, MPI_LOGICAL, RAW_RESTART_METADATA_RANK, partit%MPI_COMM_FESOM, mpierr) if(dumpfiles_exist) then - call read_all_raw_restarts() + call read_all_raw_restarts(partit%MPI_COMM_FESOM, partit%mype) else - call read_restart(oce_path, oce_files) - if (use_ice) call read_restart(ice_path, ice_files) + call read_restart(oce_path, oce_files, partit%MPI_COMM_FESOM, partit%mype) + if (use_ice) call read_restart(ice_path, ice_files, partit%MPI_COMM_FESOM, partit%mype) ! immediately create a raw restart if(raw_restart_length_unit /= "off") then - call write_all_raw_restarts(istep) + call write_all_raw_restarts(istep, partit%MPI_COMM_FESOM, partit%mype) end if end if end if @@ -202,17 +213,17 @@ subroutine restart(istep, l_read, mesh) if(is_portable_restart_write) then ! write restart - if(mype==0) write(*,*)'Do output (netCDF, restart) ...' + if(partit%mype==0) write(*,*)'Do output (netCDF, restart) ...' call write_restart(oce_path, oce_files, istep) if(use_ice) call write_restart(ice_path, ice_files, istep) end if if(is_raw_restart_write) then - call write_all_raw_restarts(istep) + call write_all_raw_restarts(istep, partit%MPI_COMM_FESOM, partit%mype) end if ! actualize clock file to latest restart point - if (mype==0) then + if (partit%mype==0) then if(is_portable_restart_write .or. is_raw_restart_write) then write(*,*) ' --> actualize clock file to latest restart point' call clock_finish @@ -273,13 +284,15 @@ subroutine write_restart(path, filegroup, istep) end subroutine -subroutine write_all_raw_restarts(istep) - integer, intent(in):: istep +subroutine write_all_raw_restarts(istep, mpicomm, mype) + integer, intent(in) :: istep + integer, intent(in) :: mpicomm + integer, intent(in) :: mype ! EO parameters integer cstep integer fileunit - open(newunit = fileunit, file = raw_restart_dirpath//'/'//mpirank_to_txt()//'.dump', form = 'unformatted') + open(newunit = fileunit, file = raw_restart_dirpath//'/'//mpirank_to_txt(mpicomm)//'.dump', form = 'unformatted') call write_raw_restart_group(oce_files, fileunit) if(use_ice) call write_raw_restart_group(ice_files, fileunit) close(fileunit) @@ -311,11 +324,16 @@ subroutine write_raw_restart_group(filegroup, fileunit) end subroutine -subroutine read_all_raw_restarts() +subroutine read_all_raw_restarts(mpicomm, mype) + integer, intent(in) :: mpicomm + integer, intent(in) :: mype + ! EO parameters integer rstep real(kind=WP) rtime integer fileunit integer status + integer mpierr + include 'mpif.h' if(mype == RAW_RESTART_METADATA_RANK) then ! read metadata info for the raw restart @@ -338,15 +356,15 @@ subroutine read_all_raw_restarts() print *,"reading raw restart from "//raw_restart_dirpath end if ! sync globalstep with the other processes to let all processes writing portable restart files know the globalstep - call MPI_Bcast(globalstep, 1, MPI_INTEGER, RAW_RESTART_METADATA_RANK, MPI_COMM_FESOM, MPIerr) + call MPI_Bcast(globalstep, 1, MPI_INTEGER, RAW_RESTART_METADATA_RANK, mpicomm, mpierr) - open(newunit = fileunit, status = 'old', iostat = status, file = raw_restart_dirpath//'/'//mpirank_to_txt()//'.dump', form = 'unformatted') + open(newunit = fileunit, status = 'old', iostat = status, file = raw_restart_dirpath//'/'//mpirank_to_txt(mpicomm)//'.dump', form = 'unformatted') if(status == 0) then call read_raw_restart_group(oce_files, fileunit) if(use_ice) call read_raw_restart_group(ice_files, fileunit) close(fileunit) else - print *,"can not open ",raw_restart_dirpath//'/'//mpirank_to_txt()//'.dump' + print *,"can not open ",raw_restart_dirpath//'/'//mpirank_to_txt(mpicomm)//'.dump' stop 1 end if end subroutine @@ -389,10 +407,11 @@ subroutine finalize_restart() end subroutine -subroutine read_restart(path, filegroup) - use g_PARSUP +subroutine read_restart(path, filegroup, mpicomm, mype) character(len=*), intent(in) :: path type(restart_file_group), intent(inout) :: filegroup + integer, intent(in) :: mpicomm + integer, intent(in) :: mype ! EO parameters real(kind=WP) rtime integer i @@ -402,6 +421,8 @@ subroutine read_restart(path, filegroup) logical, allocatable :: skip_file(:) integer current_iorank_snd, current_iorank_rcv integer max_globalstep + integer mpierr + include 'mpif.h' allocate(skip_file(filegroup%nfiles)) skip_file = .false. @@ -442,8 +463,8 @@ subroutine read_restart(path, filegroup) ! iorank already knows if we skip the file, tell the others if(.not. filegroup%files(i)%must_exist_on_read) then - call MPI_Allreduce(current_iorank_snd, current_iorank_rcv, 1, MPI_INTEGER, MPI_SUM, MPI_COMM_FESOM, MPIerr) - call MPI_Bcast(skip_file(i), 1, MPI_LOGICAL, current_iorank_rcv, MPI_COMM_FESOM, MPIerr) + call MPI_Allreduce(current_iorank_snd, current_iorank_rcv, 1, MPI_INTEGER, MPI_SUM, mpicomm, mpierr) + call MPI_Bcast(skip_file(i), 1, MPI_LOGICAL, current_iorank_rcv, mpicomm, mpierr) end if if(.not. skip_file(i)) call filegroup%files(i)%async_read_and_scatter_variables() @@ -476,7 +497,7 @@ subroutine read_restart(path, filegroup) ! sync globalstep with processes which may have skipped a restart upon reading and thus need to know the globalstep when writing their restart if( any(skip_file .eqv. .true.) ) then - call MPI_Allreduce(globalstep, max_globalstep, 1, MPI_INTEGER, MPI_MAX, MPI_COMM_FESOM, MPIerr) + call MPI_Allreduce(globalstep, max_globalstep, 1, MPI_INTEGER, MPI_MAX, mpicomm, mpierr) globalstep = max_globalstep end if @@ -484,9 +505,9 @@ subroutine read_restart(path, filegroup) if(filegroup%nfiles >= 1) then ! use the first restart I/O process to send the globalstep if( filegroup%files(1)%is_iorank() .and. (mype .ne. RAW_RESTART_METADATA_RANK)) then - call MPI_Send(globalstep, 1, MPI_INTEGER, RAW_RESTART_METADATA_RANK, 42, MPI_COMM_FESOM, MPIerr) + call MPI_Send(globalstep, 1, MPI_INTEGER, RAW_RESTART_METADATA_RANK, 42, mpicomm, mpierr) else if((mype == RAW_RESTART_METADATA_RANK) .and. (.not. filegroup%files(1)%is_iorank())) then - call MPI_Recv(globalstep, 1, MPI_INTEGER, MPI_ANY_SOURCE, 42, MPI_COMM_FESOM, mpistatus, MPIerr) + call MPI_Recv(globalstep, 1, MPI_INTEGER, MPI_ANY_SOURCE, 42, mpicomm, mpistatus, mpierr) end if end if end subroutine @@ -521,11 +542,18 @@ function is_due(unit, frequency, istep) result(d) end function - function mpirank_to_txt() result(txt) - use g_PARSUP + function mpirank_to_txt(mpicomm) result(txt) use fortran_utils + integer, intent(in) :: mpicomm character(:), allocatable :: txt ! EO parameters + integer mype + integer npes + integer mpierr + include 'mpif.h' + + call MPI_Comm_Rank(mpicomm, mype, mpierr) + call MPI_Comm_Size(mpicomm, npes, mpierr) txt = int_to_txt_pad(mype,int(log10(real(npes)))+1) ! pad to the width of the number of processes end function diff --git a/src/io_restart_file_group.F90 b/src/io_restart_file_group.F90 index 244dea065..0acbd78bb 100644 --- a/src/io_restart_file_group.F90 +++ b/src/io_restart_file_group.F90 @@ -1,6 +1,7 @@ ! helper module to treat split restart files similar as the previous single-file ones module restart_file_group_module use io_fesom_file_module + use MOD_PARTIT implicit none public restart_file_group private @@ -33,67 +34,72 @@ module restart_file_group_module contains - subroutine def_node_var_2d(this, name, longname, units, local_data, mesh) + subroutine def_node_var_2d(this, name, longname, units, local_data, mesh, partit) use mod_mesh class(restart_file_group), target, intent(inout) :: this character(len=*), intent(in) :: name character(len=*), intent(in) :: units, longname real(kind=8), target, intent(inout) :: local_data(:) ! todo: be able to set precision type(t_mesh), intent(in) :: mesh + type(t_partit), intent(in) :: partit ! EO parameters - call add_file(this, name, .true., mesh%nod2d, mesh%elem2d, mesh%nl) + call add_file(this, name, .true., mesh%nod2d, mesh%elem2d, mesh%nl, partit) call this%files(this%nfiles)%specify_node_var(name, longname, units, local_data) end subroutine - subroutine def_node_var_3d(this, name, longname, units, local_data, mesh) + subroutine def_node_var_3d(this, name, longname, units, local_data, mesh, partit) use mod_mesh class(restart_file_group), intent(inout) :: this character(len=*), intent(in) :: name character(len=*), intent(in) :: units, longname real(kind=8), target, intent(inout) :: local_data(:,:) ! todo: be able to set precision type(t_mesh), intent(in) :: mesh + type(t_partit), intent(in) :: partit ! EO parameters - call add_file(this, name, .true., mesh%nod2d, mesh%elem2d, mesh%nl) + call add_file(this, name, .true., mesh%nod2d, mesh%elem2d, mesh%nl, partit) call this%files(this%nfiles)%specify_node_var(name, longname, units, local_data) end subroutine - subroutine def_elem_var_2d(this, name, longname, units, local_data, mesh) + subroutine def_elem_var_2d(this, name, longname, units, local_data, mesh, partit) use mod_mesh class(restart_file_group), intent(inout) :: this character(len=*), intent(in) :: name character(len=*), intent(in) :: units, longname real(kind=8), target, intent(inout) :: local_data(:) ! todo: be able to set precision type(t_mesh), intent(in) :: mesh + type(t_partit), intent(in) :: partit ! EO parameters - call add_file(this, name, .true., mesh%nod2d, mesh%elem2d, mesh%nl) + call add_file(this, name, .true., mesh%nod2d, mesh%elem2d, mesh%nl, partit) call this%files(this%nfiles)%specify_elem_var(name, longname, units, local_data) end subroutine - subroutine def_elem_var_3d(this, name, longname, units, local_data, mesh) + subroutine def_elem_var_3d(this, name, longname, units, local_data, mesh, partit) use mod_mesh class(restart_file_group), intent(inout) :: this character(len=*), intent(in) :: name character(len=*), intent(in) :: units, longname real(kind=8), target, intent(inout) :: local_data(:,:) ! todo: be able to set precision type(t_mesh), intent(in) :: mesh + type(t_partit), intent(in) :: partit ! EO parameters - call add_file(this, name, .true., mesh%nod2d, mesh%elem2d, mesh%nl) + call add_file(this, name, .true., mesh%nod2d, mesh%elem2d, mesh%nl, partit) call this%files(this%nfiles)%specify_elem_var(name, longname, units, local_data) end subroutine - subroutine add_file(g, name, must_exist_on_read, mesh_nod2d, mesh_elem2d, mesh_nl) + subroutine add_file(g, name, must_exist_on_read, mesh_nod2d, mesh_elem2d, mesh_nl, partit) class(restart_file_group), target, intent(inout) :: g character(len=*), intent(in) :: name logical must_exist_on_read integer mesh_nod2d, mesh_elem2d, mesh_nl + type(t_partit), intent(in) :: partit ! EO parameters type(restart_file_type), pointer :: f @@ -104,64 +110,68 @@ subroutine add_file(g, name, must_exist_on_read, mesh_nod2d, mesh_elem2d, mesh_n f%path = "" f%varname = name f%must_exist_on_read = must_exist_on_read - call f%fesom_file_type%init(mesh_nod2d, mesh_elem2d, mesh_nl) + call f%fesom_file_type%init(mesh_nod2d, mesh_elem2d, mesh_nl, partit) ! this is specific for a restart file f%iter_varindex = f%add_var_int('iter', [f%time_dimindex()]) end subroutine - subroutine def_node_var_2d_optional(this, name, longname, units, local_data, mesh) + subroutine def_node_var_2d_optional(this, name, longname, units, local_data, mesh, partit) use mod_mesh class(restart_file_group), target, intent(inout) :: this character(len=*), intent(in) :: name character(len=*), intent(in) :: units, longname real(kind=8), target, intent(inout) :: local_data(:) ! todo: be able to set precision type(t_mesh), intent(in) :: mesh + type(t_partit), intent(in) :: partit ! EO parameters - call add_file(this, name, .false., mesh%nod2d, mesh%elem2d, mesh%nl) + call add_file(this, name, .false., mesh%nod2d, mesh%elem2d, mesh%nl, partit) call this%files(this%nfiles)%specify_node_var(name, longname, units, local_data) end subroutine - subroutine def_node_var_3d_optional(this, name, longname, units, local_data, mesh) + subroutine def_node_var_3d_optional(this, name, longname, units, local_data, mesh, partit) use mod_mesh class(restart_file_group), intent(inout) :: this character(len=*), intent(in) :: name character(len=*), intent(in) :: units, longname real(kind=8), target, intent(inout) :: local_data(:,:) ! todo: be able to set precision type(t_mesh), intent(in) :: mesh + type(t_partit), intent(in) :: partit ! EO parameters - call add_file(this, name, .false., mesh%nod2d, mesh%elem2d, mesh%nl) + call add_file(this, name, .false., mesh%nod2d, mesh%elem2d, mesh%nl, partit) call this%files(this%nfiles)%specify_node_var(name, longname, units, local_data) end subroutine - subroutine def_elem_var_2d_optional(this, name, longname, units, local_data, mesh) + subroutine def_elem_var_2d_optional(this, name, longname, units, local_data, mesh, partit) use mod_mesh class(restart_file_group), intent(inout) :: this character(len=*), intent(in) :: name character(len=*), intent(in) :: units, longname real(kind=8), target, intent(inout) :: local_data(:) ! todo: be able to set precision type(t_mesh), intent(in) :: mesh + type(t_partit), intent(in) :: partit ! EO parameters - call add_file(this, name, .false., mesh%nod2d, mesh%elem2d, mesh%nl) + call add_file(this, name, .false., mesh%nod2d, mesh%elem2d, mesh%nl, partit) call this%files(this%nfiles)%specify_elem_var(name, longname, units, local_data) end subroutine - subroutine def_elem_var_3d_optional(this, name, longname, units, local_data, mesh) + subroutine def_elem_var_3d_optional(this, name, longname, units, local_data, mesh, partit) use mod_mesh class(restart_file_group), intent(inout) :: this character(len=*), intent(in) :: name character(len=*), intent(in) :: units, longname real(kind=8), target, intent(inout) :: local_data(:,:) ! todo: be able to set precision type(t_mesh), intent(in) :: mesh + type(t_partit), intent(in) :: partit ! EO parameters - call add_file(this, name, .false., mesh%nod2d, mesh%elem2d, mesh%nl) + call add_file(this, name, .false., mesh%nod2d, mesh%elem2d, mesh%nl, partit) call this%files(this%nfiles)%specify_elem_var(name, longname, units, local_data) end subroutine diff --git a/src/io_scatter.F90 b/src/io_scatter.F90 index e7e54e235..27e8714ce 100644 --- a/src/io_scatter.F90 +++ b/src/io_scatter.F90 @@ -7,14 +7,14 @@ module io_scatter_module ! thread-safe procedure - subroutine scatter_nod2D(arr2D_global, arr2D_local, root_rank, comm) - use g_PARSUP - use o_mesh + subroutine scatter_nod2D(arr2D_global, arr2D_local, root_rank, comm, partit) + use MOD_PARTIT use, intrinsic :: iso_fortran_env, only: real64 real(real64), intent(in) :: arr2D_global(:) real(real64), intent(out) :: arr2D_local(:) integer, intent(in) :: root_rank ! rank of sending process integer, intent(in) :: comm + type(t_partit), intent(in) :: partit ! EO args integer :: tag = 0 integer :: mpi_precision = MPI_DOUBLE_PRECISION @@ -23,12 +23,13 @@ subroutine scatter_nod2D(arr2D_global, arr2D_local, root_rank, comm) integer, allocatable :: remote_list_nod2d(:) real(real64), allocatable :: sendbuf(:) integer node_size + integer mpierr - call assert(size(arr2D_local) == size(mylist_nod2d), __LINE__) ! == mydim_nod2d+edim_nod2d, i.e. partition nodes + halo nodes + call assert(size(arr2D_local) == size(partit%mylist_nod2d), __LINE__) ! == mydim_nod2d+edim_nod2d, i.e. partition nodes + halo nodes - if(mype == root_rank) then - arr2D_local = arr2D_global(mylist_nod2d) - do remote_rank = 0, npes-1 + if(partit%mype == root_rank) then + arr2D_local = arr2D_global(partit%mylist_nod2d) + do remote_rank = 0, partit%npes-1 if(remote_rank == root_rank) cycle ! receive remote partition 2D size @@ -47,9 +48,9 @@ subroutine scatter_nod2D(arr2D_global, arr2D_local, root_rank, comm) end do else - node_size = size(mylist_nod2d) + node_size = size(partit%mylist_nod2d) call mpi_send(node_size, 1, mpi_integer, root_rank, tag+0, comm, mpierr) - call mpi_send(mylist_nod2d(1), node_size, mpi_integer, root_rank, tag+1, comm, mpierr) + call mpi_send(partit%mylist_nod2d(1), node_size, mpi_integer, root_rank, tag+1, comm, mpierr) call mpi_recv(arr2D_local(1), node_size, mpi_precision, root_rank, tag+2, comm, status, mpierr) ! aleph blocks here end if @@ -61,14 +62,14 @@ subroutine scatter_nod2D(arr2D_global, arr2D_local, root_rank, comm) ! thread-safe procedure - subroutine scatter_elem2D(arr2D_global, arr2D_local, root_rank, comm) - use g_PARSUP - use o_mesh + subroutine scatter_elem2D(arr2D_global, arr2D_local, root_rank, comm, partit) + use MOD_PARTIT use, intrinsic :: iso_fortran_env, only: real64 real(real64), intent(in) :: arr2D_global(:) real(real64), intent(out) :: arr2D_local(:) integer, intent(in) :: root_rank ! rank of sending process integer, intent(in) :: comm + type(t_partit), intent(in) :: partit ! EO args integer :: tag = 0 integer :: mpi_precision = MPI_DOUBLE_PRECISION @@ -77,13 +78,14 @@ subroutine scatter_elem2D(arr2D_global, arr2D_local, root_rank, comm) integer, allocatable :: remote_list_elem2d(:) real(real64), allocatable :: sendbuf(:) integer elem_size + integer mpierr elem_size = size(arr2D_local) - call assert(elem_size == mydim_elem2d+edim_elem2d, __LINE__) ! mylist_elem2d is larger and can not be used for comparison here + call assert(elem_size == partit%mydim_elem2d+partit%edim_elem2d, __LINE__) ! mylist_elem2d is larger and can not be used for comparison here - if(mype == root_rank) then - arr2D_local = arr2D_global(myList_elem2D(1:elem_size)) - do remote_rank = 0, npes-1 + if(partit%mype == root_rank) then + arr2D_local = arr2D_global(partit%myList_elem2D(1:elem_size)) + do remote_rank = 0, partit%npes-1 if(remote_rank == root_rank) cycle ! receive remote partition 2D size @@ -103,7 +105,7 @@ subroutine scatter_elem2D(arr2D_global, arr2D_local, root_rank, comm) else call mpi_send(elem_size, 1, mpi_integer, root_rank, tag+0, comm, mpierr) - call mpi_send(mylist_elem2d(1), elem_size, mpi_integer, root_rank, tag+1, comm, mpierr) + call mpi_send(partit%mylist_elem2d(1), elem_size, mpi_integer, root_rank, tag+1, comm, mpierr) call mpi_recv(arr2D_local(1), elem_size, mpi_precision, root_rank, tag+2, comm, status, mpierr) end if From 2e0f27dd986d1c9b1188626c6c19a79fc0371df7 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 9 Dec 2021 17:30:46 +0100 Subject: [PATCH 774/909] fix uninitialized variable --- src/io_fesom_file.F90 | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index 39186ab39..04360ed0e 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -37,7 +37,6 @@ module io_fesom_file_module type(thread_type) thread logical :: thread_running = .false. integer :: comm - integer rank type(t_partit), pointer :: partit logical gather_and_write contains @@ -69,7 +68,7 @@ module io_fesom_file_module function is_iorank(this) result(x) class(fesom_file_type), intent(in) :: this logical x - x = (this%rank == this%iorank) + x = (this%partit%mype == this%iorank) end function From efcc054cb774a2122caa114a9af5bec63541cbef Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 9 Dec 2021 17:54:36 +0100 Subject: [PATCH 775/909] remove option to not transpose output, as the refactoring branch is already transposed (see 0f17f32e) these changes inadvertently got in during the merge of parallel_restart --- src/CMakeLists.txt | 1 - src/info_module.F90 | 5 ----- src/io_meandata.F90 | 13 ------------- 3 files changed, 19 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 1f980db45..257100f67 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -17,7 +17,6 @@ if(ALEPH_CRAYMPICH_WORKAROUNDS) # MPICH_OPT_THREAD_SYNC=0 # seems to be a duplicate variable which also appears in some documentation instead of MPICH_CRAY_OPT_THREAD_SYNC (but this one brings a huge speed gain on aleph) #add_compile_options(-DDISABLE_PARALLEL_RESTART_READ) # reading restarts is slow when doing it on parallel on aleph, switch it off for now add_compile_options(-DENABLE_ALEPH_CRAYMPICH_WORKAROUNDS) - add_compile_options(-DTRANSPOSE_OUTPUT) endif() option(DISABLE_MULTITHREADING "disable asynchronous operations" OFF) diff --git a/src/info_module.F90 b/src/info_module.F90 index c6354dad5..21316f88e 100644 --- a/src/info_module.F90 +++ b/src/info_module.F90 @@ -97,11 +97,6 @@ subroutine print_definitions() #else print '(g0)', 'DISABLE_PARALLEL_RESTART_READ is OFF' #endif -#ifdef TRANSPOSE_OUTPUT - print '(g0)', 'TRANSPOSE_OUTPUT is ON' -#else - print '(g0)', 'TRANSPOSE_OUTPUT is OFF' -#endif #ifdef ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS print '(g0)', 'ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS is ON' #else diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index d476677de..193863981 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -643,12 +643,7 @@ subroutine create_new_file(entry, ice, dynamics, partit, mesh) call assert_nf( nf_put_att_text(entry%ncid, entry%tID, 'axis', len_trim('T'), trim('T')), __LINE__) call assert_nf( nf_put_att_text(entry%ncid, entry%tID, 'stored_direction', len_trim('increasing'), trim('increasing')), __LINE__) -#ifndef TRANSPOSE_OUTPUT - call assert_nf( nf_def_var(entry%ncid, trim(entry%name), entry%data_strategy%netcdf_type(), entry%ndim+1, (/entry%dimid(1:entry%ndim), entry%recID/), entry%varID), __LINE__) -#else call assert_nf( nf_def_var(entry%ncid, trim(entry%name), entry%data_strategy%netcdf_type(), entry%ndim+1, (/entry%dimid(entry%ndim:1:-1), entry%recID/), entry%varID), __LINE__) -#endif - call assert_nf( nf_put_att_text(entry%ncid, entry%varID, 'description', len_trim(entry%description), entry%description), __LINE__) @@ -760,11 +755,7 @@ subroutine write_mean(entry, entry_index) if (entry%ndim==1) then call assert_nf( nf_put_vara_double(entry%ncid, entry%varID, (/1, entry%rec_count/), (/size2, 1/), entry%aux_r8, 1), __LINE__) elseif (entry%ndim==2) then -#ifndef TRANSPOSE_OUTPUT - call assert_nf( nf_put_vara_double(entry%ncid, entry%varID, (/lev, 1, entry%rec_count/), (/1, size2, 1/), entry%aux_r8, 1), __LINE__) -#else call assert_nf( nf_put_vara_double(entry%ncid, entry%varID, (/1, lev, entry%rec_count/), (/size2, 1, 1/), entry%aux_r8, 1), __LINE__) -#endif end if end if end do @@ -788,11 +779,7 @@ subroutine write_mean(entry, entry_index) if (entry%ndim==1) then call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/1, entry%rec_count/), (/size2, 1/), entry%aux_r4, 1), __LINE__) elseif (entry%ndim==2) then -#ifndef TRANSPOSE_OUTPUT - call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/lev, 1, entry%rec_count/), (/1, size2, 1/), entry%aux_r4, 1), __LINE__) -#else call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/1, lev, entry%rec_count/), (/size2, 1, 1/), entry%aux_r4, 1), __LINE__) -#endif end if end if end do From 3e5ddba3463ffd49c480c012db5cdc40045cf4d0 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 10 Dec 2021 10:38:29 +0100 Subject: [PATCH 776/909] remove switch to read split and untransposed restarts --- src/info_module.F90 | 5 ----- src/io_fesom_file.F90 | 20 +------------------- 2 files changed, 1 insertion(+), 24 deletions(-) diff --git a/src/info_module.F90 b/src/info_module.F90 index 21316f88e..e3c96a5cb 100644 --- a/src/info_module.F90 +++ b/src/info_module.F90 @@ -87,11 +87,6 @@ subroutine print_definitions() #else print '(g0)', 'VERBOSE is OFF' #endif -#ifdef UNTRANSPOSE_RESTART - print '(g0)', 'UNTRANSPOSE_RESTART is ON' -#else - print '(g0)', 'UNTRANSPOSE_RESTART is OFF' -#endif #ifdef DISABLE_PARALLEL_RESTART_READ print '(g0)', 'DISABLE_PARALLEL_RESTART_READ is ON' #else diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index 04360ed0e..5477277f1 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -197,12 +197,7 @@ subroutine read_and_scatter_variables(this) if(is_2d) then call this%read_var(var%var_index, [1,last_rec_idx], [size(var%global_level_data),1], var%global_level_data) else - ! z,nod,time -#ifdef UNTRANSPOSE_RESTART - call this%read_var(var%var_index, [lvl,1,last_rec_idx], [1,size(var%global_level_data),1], var%global_level_data) ! untransposed -#else call this%read_var(var%var_index, [1,lvl,last_rec_idx], [size(var%global_level_data),1,1], var%global_level_data) -#endif end if end if @@ -263,12 +258,7 @@ subroutine gather_and_write_variables(this) if(is_2d) then call this%write_var(var%var_index, [1,this%rec_cnt], [size(var%global_level_data),1], var%global_level_data) else - ! z,nod,time -#ifdef UNTRANSPOSE_RESTART - call this%write_var(var%var_index, [lvl,1,this%rec_cnt], [1,size(var%global_level_data),1], var%global_level_data) ! untransposed -#else call this%write_var(var%var_index, [1,lvl,this%rec_cnt], [size(var%global_level_data),1,1], var%global_level_data) -#endif end if end if end do @@ -396,11 +386,7 @@ subroutine specify_node_var_3d(this, name, longname, units, local_data) level_diminfo = obtain_diminfo(this, m_nod2d) depth_diminfo = obtain_diminfo(this, size(local_data, dim=1)) -#ifdef UNTRANSPOSE_RESTART - call specify_variable(this, name, [depth_diminfo%idx, level_diminfo%idx, this%time_dimidx], level_diminfo%len, local_data, .false., longname, units) ! untransposed -#else - call specify_variable(this, name, [level_diminfo%idx, depth_diminfo%idx, this%time_dimidx], level_diminfo%len, local_data, .false., longname, units) -#endif + call specify_variable(this, name, [level_diminfo%idx, depth_diminfo%idx, this%time_dimidx], level_diminfo%len, local_data, .false., longname, units) end subroutine @@ -433,11 +419,7 @@ subroutine specify_elem_var_3d(this, name, longname, units, local_data) level_diminfo = obtain_diminfo(this, m_elem2d) depth_diminfo = obtain_diminfo(this, size(local_data, dim=1)) -#ifdef UNTRANSPOSE_RESTART - call specify_variable(this, name, [depth_diminfo%idx, level_diminfo%idx, this%time_dimidx], level_diminfo%len, local_data, .true., longname, units) ! untransposed -#else call specify_variable(this, name, [level_diminfo%idx, depth_diminfo%idx, this%time_dimidx], level_diminfo%len, local_data, .true., longname, units) -#endif end subroutine From 0f26b576c026b9e00f74c872282c6cc294ecddbd Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 17 Dec 2021 17:12:13 +0100 Subject: [PATCH 777/909] make topo provided elements instead of node --- src/fvom_init.F90 | 152 +++++++++++++++++++++++++++++-------- src/gen_modules_config.F90 | 7 +- src/oce_mesh.F90 | 144 +++++++++++++++++++++++++++++------ 3 files changed, 247 insertions(+), 56 deletions(-) diff --git a/src/fvom_init.F90 b/src/fvom_init.F90 index 93944692e..6a6e8da7e 100755 --- a/src/fvom_init.F90 +++ b/src/fvom_init.F90 @@ -233,7 +233,11 @@ subroutine read_mesh_cavity(mesh) !___________________________________________________________________________ ! read depth of cavity-ocean boundary - fname = trim(meshpath)//'cavity_depth.out' + if (use_cavityonelem) then + fname = trim(meshpath)//'cavity_depth@elem.out' + else + fname = trim(meshpath)//'cavity_depth@node.out' + end if file_exist=.False. inquire(file=trim(fname),exist=file_exist) if (file_exist) then @@ -652,6 +656,7 @@ subroutine find_levels(mesh) integer :: elem1, j, n, nneighb, q, node, i, nz, auxi integer :: count_iter, count_neighb_open, exit_flag, fileID=111 real(kind=WP) :: x, dmean + logical :: file_exist integer :: max_iter=1000 character(MAX_PATH) :: file_name type(t_mesh), intent(inout), target :: mesh @@ -670,31 +675,118 @@ subroutine find_levels(mesh) end if depth => mesh%depth !required after the allocation, otherwise the pointer remains undefined + !______________________________________________________________________________ + ! read depth from aux3d.out + if (trim(use_depthfile)=='aux3d') then + ! check if aux3d.out file does exist + file_exist=.False. + file_name=trim(meshpath)//'aux3d.out' + inquire(file=trim(file_name),exist=file_exist) + !_______________________________________________________________________ + if (file_exist) then + write(*," (A, A)") ' read file:',trim(file_name) + !___________________________________________________________________ + ! load fesom2.0 aux3d.out file + open(fileID, file=file_name) + + ! read the number of levels + read(fileID,*) nl + allocate(mesh%zbar(nl)) ! their standard depths + + ! read full depth levels + zbar => mesh%zbar !required after the allocation, otherwise the pointer remains undefined + read(fileID,*) zbar + if(zbar(2)>0) zbar=-zbar ! zbar is negative + + ! compute mid depth levels + allocate(mesh%Z(nl-1)) + Z => mesh%Z !required after the allocation, otherwise the pointer remains undefined + Z=zbar(1:nl-1)+zbar(2:nl) ! mid-depths of cells + Z=0.5_WP*Z + else + write(*,*) '____________________________________________________________________' + write(*,*) ' ERROR: You want to use aux3d.out file to define your depth, but ' + write(*,*) ' the file seems not to exist' + write(*,*) ' --> check in namelist.config, the flag use_depthfile must be' + write(*,*) ' use_depthfile= "aux3d" or "depth@" ' + write(*,*) ' --> model stops here' + write(*,*) '____________________________________________________________________' + stop + end if !___________________________________________________________________________ - ! load fesom2.0 aux3d.out file - file_name=trim(meshpath)//'aux3d.out' - open(fileID, file=file_name) - - ! read the number of levels - read(fileID,*) nl - allocate(mesh%zbar(nl)) ! their standard depths - - ! read full depth levels - zbar => mesh%zbar !required after the allocation, otherwise the pointer remains undefined - read(fileID,*) zbar - if(zbar(2)>0) zbar=-zbar ! zbar is negative - - ! compute mid depth levels - allocate(mesh%Z(nl-1)) - Z => mesh%Z !required after the allocation, otherwise the pointer remains undefined - Z=zbar(1:nl-1)+zbar(2:nl) ! mid-depths of cells - Z=0.5_WP*Z + ! read depth from depth@node.out or depth@elem.out + elseif (trim(use_depthfile)=='depth@') then + !_______________________________________________________________________ + ! load file depth_zlev.out --> contains number of model levels and full depth + ! levels + file_exist=.False. + file_name=trim(meshpath)//'depth_zlev.out' + inquire(file=trim(file_name),exist=file_exist) + if (file_exist) then + write(*," (A, A)") ' read file:',trim(file_name) + !___________________________________________________________________ + ! load fesom2.0 aux3d.out file + open(fileID, file=file_name) + + ! read the number of levels + read(fileID,*) nl + allocate(mesh%zbar(nl)) ! their standard depths + + ! read full depth levels + zbar => mesh%zbar !required after the allocation, otherwise the pointer remains undefined + read(fileID,*) zbar + if(zbar(2)>0) zbar=-zbar ! zbar is negative + + ! compute mid depth levels + allocate(mesh%Z(nl-1)) + Z => mesh%Z !required after the allocation, otherwise the pointer remains undefined + Z=zbar(1:nl-1)+zbar(2:nl) ! mid-depths of cells + Z=0.5_WP*Z + + close(fileID) + else + write(*,*) '____________________________________________________________________' + write(*,*) ' ERROR: You want to use depth@elem.out or depth@node.out file, therefore' + write(*,*) ' you also need the file depth_zlev.out which contains the model ' + write(*,*) ' number of layers and the depth of model levels. This file seems ' + write(*,*) ' not to exist' + write(*,*) ' --> check in namelist.config, the flag use_depthfile must be' + write(*,*) ' use_depthfile= "aux3d" or "depth@" and your meshfolder' + write(*,*) ' --> model stops here' + write(*,*) '____________________________________________________________________' + stop + endif + + !_______________________________________________________________________ + ! load file depth@elem.out or depth@node.out contains topography either at + ! nodes or elements + if (use_depthonelem) then + file_name=trim(meshpath)//'depth@elem.out' + else + file_name=trim(meshpath)//'depth@node.out' + end if + inquire(file=trim(file_name),exist=file_exist) + if (file_exist) then + write(*," (A, A)") ' read file:',trim(file_name) + open(fileID, file=file_name) + else + write(*,*) '____________________________________________________________________' + write(*,*) ' ERROR: You want to use depth@elem.out or depth@node.out file to ' + write(*,*) ' define your depth, but the file seems not to exist' + write(*,*) ' --> check in namelist.config, the flag use_depthfile must be' + write(*,*) ' use_depthfile= "aux3d" or "depth@" and your meshfolder ' + write(*,*) ' --> model stops here' + write(*,*) '____________________________________________________________________' + stop + end if + end if + !___________________________________________________________________________ ! read topography from file auxi = nod2d if (use_depthonelem) auxi = elem2d - write(*,*) ' use_depthonelem = ',use_depthonelem - write(*,*) ' auxi =',auxi +! write(*,*) ' use_depthonelem = ',use_depthonelem +! write(*,*) ' auxi =',auxi DO n = 1, auxi read(fileID,*) x if (x>0) x=-x @@ -702,7 +794,6 @@ subroutine find_levels(mesh) depth(n)=x END DO close(fileID) - if(depth(2)>0) depth=-depth ! depth is negative !___________________________________________________________________________ @@ -760,6 +851,7 @@ subroutine find_levels(mesh) write(fileID,*) nlevels(n) end do close(fileID) + !___________________________________________________________________________ ! check for isolated cells (cells with at least two boundary faces or three ! boundary vertices) and eliminate them --> FESOM2.0 doesn't like these kind @@ -850,14 +942,13 @@ subroutine find_levels(mesh) !___________________________________________________________________________ ! write vertical level indices into file - !_______________________________________________________________________ file_name=trim(meshpath)//'elvls.out' open(fileID, file=file_name) do n=1,elem2D write(fileID,*) nlevels(n) end do close(fileID) - !_______________________________________________________________________ + file_name=trim(meshpath)//'nlvls.out' open(fileID, file=file_name) do n=1,nod2D @@ -893,11 +984,9 @@ subroutine find_levels_cavity(mesh) type(t_mesh), intent(inout), target :: mesh #include "associate_mesh_ini.h" !___________________________________________________________________________ - if (mype==0) then - print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' - print *, achar(27)//'[7;1m' //' -->: compute elem, vertice cavity depth index '//achar(27)//'[0m' - end if - + print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;1m' //' -->: compute elem, vertice cavity depth index '//achar(27)//'[0m' + !___________________________________________________________________________ allocate(mesh%ulevels(elem2D)) ulevels => mesh%ulevels @@ -1081,7 +1170,7 @@ subroutine find_levels_cavity(mesh) do node=1,nod2D !___________________________________________________________________ if (ulevels_nod2D(node)>=nlevels_nod2D(node)) then - write(*,*) ' -[check]->: vertice cavity depth deeper or equal bottom depth, node=', node + write(*,*) ' -[check]->: vertice cavity depth deeper or equal bottom depth, node=', node exit_flag2 = 0 end if @@ -1094,7 +1183,7 @@ subroutine find_levels_cavity(mesh) do elem=1,elem2D if (ulevels(elem)< maxval(ulevels_nod2D(elem2D_nodes(:,elem))) ) then - write(*,*) ' -[check]->: found elem cavity shallower than its valid maximum cavity vertice depths, elem=', elem2d + write(*,*) ' -[check]->: found elem cavity shallower than its valid maximum cavity vertice depths, elem=', elem2d exit_flag2 = 0 end if end do ! --> do elem=1,elem2D @@ -1198,6 +1287,7 @@ subroutine find_levels_cavity(mesh) write(20,*) ulevels(elem) enddo close(20) + ! write out vertice cavity-ocean boundary level + yes/no cavity flag file_name=trim(meshpath)//'cavity_nlvls.out' open(20, file=file_name) diff --git a/src/gen_modules_config.F90 b/src/gen_modules_config.F90 index 373524b5a..041da23e3 100755 --- a/src/gen_modules_config.F90 +++ b/src/gen_modules_config.F90 @@ -81,13 +81,16 @@ module g_config ! Set to zeros to work with ! geographical coordinates integer :: thers_zbar_lev=5 ! minimum number of levels to be - character(len=5) :: which_depth_n2e='mean' + character(len=5) :: which_depth_n2e='mean' + logical :: use_depthonelem =.false. + character(len=10) :: use_depthfile='aux3d' ! 'aux3d', 'depth@' logical :: use_cavityonelem=.false. + namelist /geometry/ cartesian, fplane, & cyclic_length, rotated_grid, force_rotation, & alphaEuler, betaEuler, gammaEuler, & - which_depth_n2e, use_depthonelem, use_cavityonelem + which_depth_n2e, use_depthonelem, use_cavityonelem, use_depthfile !_____________________________________________________________________________ ! *** fleap_year *** diff --git a/src/oce_mesh.F90 b/src/oce_mesh.F90 index 6a1955bee..e865ff14d 100755 --- a/src/oce_mesh.F90 +++ b/src/oce_mesh.F90 @@ -199,7 +199,8 @@ SUBROUTINE read_mesh(partit, mesh) character(len=MAX_PATH) :: file_name character(len=MAX_PATH) :: dist_mesh_dir integer :: flag_wrongaux3d=0 - integer :: ierror ! return error code + integer :: ierror ! return error code + logical :: file_exist integer, allocatable, dimension(:) :: mapping integer, allocatable, dimension(:,:) :: ibuff real(kind=WP), allocatable, dimension(:,:) :: rbuff @@ -516,24 +517,117 @@ SUBROUTINE read_mesh(partit, mesh) ! read depth data !============================== ! 0 proc reads header of aux3d.out and broadcasts it between procs - if (mype==0) then !open the file for reading on 0 proc + ! + ! + !______________________________________________________________________________ + ! read depth from aux3d.out + if (trim(use_depthfile)=='aux3d') then + ! check if aux3d.out file does exist + file_exist=.False. file_name=trim(meshpath)//'aux3d.out' - open(fileID, file=file_name) - read(fileID,*) mesh%nl ! the number of levels - end if - call MPI_BCast(mesh%nl, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - if (mesh%nl < 3) then - write(*,*) '!!!Number of levels is less than 3, model will stop!!!' - call par_ex(partit%MPI_COMM_FESOM, partit%mype) - stop - end if - allocate(mesh%zbar(mesh%nl)) ! allocate the array for storing the standard depths - if (mype==0) read(fileID,*) mesh%zbar - call MPI_BCast(mesh%zbar, mesh%nl, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) - if(mesh%zbar(2)>0) mesh%zbar=-mesh%zbar ! zbar is negative - allocate(mesh%Z(mesh%nl-1)) - mesh%Z=mesh%zbar(1:mesh%nl-1)+mesh%zbar(2:mesh%nl) ! mid-depths of cells - mesh%Z=0.5_WP*mesh%Z + inquire(file=trim(file_name),exist=file_exist) + !___________________________________________________________________________ + if (file_exist) then + if (mype==0) then !open the file for reading on 0 proc + open(fileID, file=file_name) + read(fileID,*) mesh%nl ! the number of levels + end if + call MPI_BCast(mesh%nl, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + if (mesh%nl < 3) then + write(*,*) '!!!Number of levels is less than 3, model will stop!!!' + call par_ex(partit%MPI_COMM_FESOM, partit%mype) + stop + end if + allocate(mesh%zbar(mesh%nl)) ! allocate the array for storing the standard depths + if (mype==0) read(fileID,*) mesh%zbar + call MPI_BCast(mesh%zbar, mesh%nl, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) + if(mesh%zbar(2)>0) mesh%zbar=-mesh%zbar ! zbar is negative + allocate(mesh%Z(mesh%nl-1)) + mesh%Z=mesh%zbar(1:mesh%nl-1)+mesh%zbar(2:mesh%nl) ! mid-depths of cells + mesh%Z=0.5_WP*mesh%Z + !___________________________________________________________________________ + else + if (mype==0) then + write(*,*) '____________________________________________________________________' + write(*,*) ' ERROR: You want to use aux3d.out file to define your depth, but ' + write(*,*) ' the file seems not to exist' + write(*,*) ' --> check in namelist.config, the flag use_depthfile must be' + write(*,*) ' use_depthfile= "aux3d" or "depth@" ' + write(*,*) ' --> model stops here' + write(*,*) '____________________________________________________________________' + end if + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 0) + end if + + !______________________________________________________________________________ + ! read depth from depth@node.out or depth@elem.out + elseif (trim(use_depthfile)=='depth@') then + !___________________________________________________________________________ + ! load file depth_zlev.out --> contains number of model levels and full depth + ! levels + file_exist=.False. + file_name=trim(meshpath)//'depth_zlev.out' + inquire(file=trim(file_name),exist=file_exist) + if (file_exist) then + if (mype==0) then !open the file for reading on 0 proc + open(fileID, file=file_name) + read(fileID,*) mesh%nl ! the number of levels + end if + call MPI_BCast(mesh%nl, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + if (mesh%nl < 3) then + write(*,*) '!!!Number of levels is less than 3, model will stop!!!' + call par_ex(partit%MPI_COMM_FESOM, partit%mype) + stop + end if + allocate(mesh%zbar(mesh%nl)) ! allocate the array for storing the standard depths + if (mype==0) read(fileID,*) mesh%zbar + call MPI_BCast(mesh%zbar, mesh%nl, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) + if(mesh%zbar(2)>0) mesh%zbar=-mesh%zbar ! zbar is negative + allocate(mesh%Z(mesh%nl-1)) + mesh%Z=mesh%zbar(1:mesh%nl-1)+mesh%zbar(2:mesh%nl) ! mid-depths of cells + mesh%Z=0.5_WP*mesh%Z + if (mype==0) close(fileID) + !___________________________________________________________________________ + else + if (mype==0) then + write(*,*) '____________________________________________________________________' + write(*,*) ' ERROR: You want to use depth@elem.out or depth@node.out file, therefore' + write(*,*) ' you also need the file depth_zlev.out which contains the model ' + write(*,*) ' number of layers and the depth of model levels. This file seems ' + write(*,*) ' not to exist' + write(*,*) ' --> check in namelist.config, the flag use_depthfile must be' + write(*,*) ' use_depthfile= "aux3d" or "depth@" and your meshfolder' + write(*,*) ' --> model stops here' + write(*,*) '____________________________________________________________________' + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 0) + end if + end if + + !___________________________________________________________________________ + ! load file depth@elem.out or depth@node.out contains topography either at + ! nodes or elements + file_exist=.False. + if (use_depthonelem) then + file_name=trim(meshpath)//'depth@elem.out' + else + file_name=trim(meshpath)//'depth@node.out' + end if + inquire(file=trim(file_name),exist=file_exist) + if (file_exist) then + if (mype==0) open(fileID, file=file_name) + else + if (mype==0) then + write(*,*) '____________________________________________________________________' + write(*,*) ' ERROR: You want to use depth@elem.out or depth@node.out file to ' + write(*,*) ' define your depth, but the file seems not to exist' + write(*,*) ' --> check in namelist.config, the flag use_depthfile must be' + write(*,*) ' use_depthfile= "aux3d" or "depth@" and your meshfolder ' + write(*,*) ' --> model stops here' + write(*,*) '____________________________________________________________________' + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 0) + end if + end if + end if ! 0 proc reads the data in chunks and distributes it between other procs !______________________________________________________________________________ @@ -554,7 +648,7 @@ SUBROUTINE read_mesh(partit, mesh) mapping(iofs)=n end if end do - + k=min(chunk_size, mesh%elem2D-nchunk*chunk_size) if (mype==0) then do n=1, k @@ -566,7 +660,7 @@ SUBROUTINE read_mesh(partit, mesh) if ( flag_wrongaux3d==0 .and. any(abs(rbuff(1:k,1))>11000.0_WP) ) flag_wrongaux3d=1 end if call MPI_BCast(rbuff(1:k,1), k, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) - + do n=1, k x=rbuff(n,1) if (x>0) x=-x !deps must be negative! @@ -607,7 +701,7 @@ SUBROUTINE read_mesh(partit, mesh) mapping(iofs)=n end if end do - + k=min(chunk_size, mesh%nod2D-nchunk*chunk_size) if (mype==0) then do n=1, k @@ -619,7 +713,7 @@ SUBROUTINE read_mesh(partit, mesh) if ( flag_wrongaux3d==0 .and. any(abs(rbuff(1:k,1))>11000.0_WP) ) flag_wrongaux3d=1 end if call MPI_BCast(rbuff(1:k,1), k, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) - + do n=1, k x=rbuff(n,1) if (x>0) x=-x !deps must be negative! @@ -1296,7 +1390,11 @@ subroutine find_levels_cavity(partit, mesh) !___________________________________________________________________________ ! Part IV: reading cavity depth at nodes if (mype==0) then - file_name=trim(meshpath)//'cavity_depth.out' + if (use_cavityonelem) then + file_name = trim(meshpath)//'cavity_depth@elem.out' + else + file_name = trim(meshpath)//'cavity_depth@node.out' + end if file_exist=.False. inquire(file=trim(file_name),exist=file_exist) if (file_exist) then From 785a7d7b85c991f36a934dee51e7878096ff8947 Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 17 Dec 2021 17:12:54 +0100 Subject: [PATCH 778/909] comment some screen output --- src/gen_comm.F90 | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/gen_comm.F90 b/src/gen_comm.F90 index 26cc0c613..26d318008 100755 --- a/src/gen_comm.F90 +++ b/src/gen_comm.F90 @@ -23,11 +23,11 @@ subroutine communication_nodn(partit, mesh) ! Assume we have 2D partitioning vector in part. Find communication rules ! Reduce allocation: find all neighboring PE nd_count = count(part(1:nod2d) == mype) -write(*,*) nod2d -write(*,*) MAX_LAENDERECK -write(*,*) nd_count -write(*,*) allocated(partit%myList_nod2D) -write(*,*) partit%mype +! write(*,*) nod2d +! write(*,*) MAX_LAENDERECK +! write(*,*) nd_count +! write(*,*) allocated(partit%myList_nod2D) +! write(*,*) partit%mype allocate(recv_from_pe(nod2d), send_to_pes(MAX_LAENDERECK,nd_count), & partit%myList_nod2D(nd_count), STAT=IERR) if (IERR /= 0) then From 664552c8f93f8af4bd5271d69a594606ee938892 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 22 Dec 2021 11:04:45 +0100 Subject: [PATCH 779/909] exchange lists have to be initialized from the main thread, this error has been introduced while merging to refactoring branch (see also commits bbeec8d9 and 90d77f79 in the parallel_restart branch) --- src/io_fesom_file.F90 | 2 ++ src/io_gather.F90 | 21 +++++++++++++++------ src/io_meandata.F90 | 1 + 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index 5477277f1..25cc211d9 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -118,6 +118,8 @@ subroutine init(this, mesh_nod2d, mesh_elem2d, mesh_nl, partit) ! todo: would li integer err integer provided_mpi_thread_support_level + call init_io_gather() + ! get hold of our mesh data for later use (assume the mesh instance will not change) m_nod2d = mesh_nod2d m_elem2d = mesh_elem2d diff --git a/src/io_gather.F90 b/src/io_gather.F90 index 19822d5f7..46079bf92 100644 --- a/src/io_gather.F90 +++ b/src/io_gather.F90 @@ -2,7 +2,7 @@ module io_gather_module USE MOD_PARTIT USE MOD_PARSUP implicit none - public gather_nod2D, gather_real4_nod2D, gather_elem2D, gather_real4_elem2D + public init_io_gather, gather_nod2D, gather_real4_nod2D, gather_elem2D, gather_real4_elem2D private logical, save :: nod2D_lists_initialized = .false. @@ -11,11 +11,20 @@ module io_gather_module logical, save :: elem2D_lists_initialized = .false. integer, save :: rank0Dim_elem2D - integer, save, allocatable, dimension(:) :: rank0List_elem2D + integer, save, allocatable, dimension(:) :: rank0List_elem2D contains + subroutine init_io_gather(partit) + type(t_partit), intent(inout), target :: partit + ! EO parameters + + if(.not. nod2D_lists_initialized) call init_nod2D_lists(partit) + if(.not. elem2D_lists_initialized) call init_elem2D_lists(partit) + end subroutine + + subroutine init_nod2D_lists(partit) implicit none type(t_partit), intent(inout), target :: partit @@ -109,7 +118,7 @@ subroutine gather_nod2D(arr2D, arr2D_global, root_rank, tag, io_comm, partit) #include "associate_part_def.h" #include "associate_part_ass.h" - if(.not. nod2D_lists_initialized) call init_nod2D_lists(partit) + if(.not. nod2D_lists_initialized) stop "io_gather_module has not been initialized" include "io_gather_nod.inc" end subroutine @@ -138,7 +147,7 @@ subroutine gather_real4_nod2D(arr2D, arr2D_global, root_rank, tag, io_comm, part #include "associate_part_def.h" #include "associate_part_ass.h" - if(.not. nod2D_lists_initialized) call init_nod2D_lists(partit) + if(.not. nod2D_lists_initialized) stop "io_gather_module has not been initialized" include "io_gather_nod.inc" end subroutine @@ -167,7 +176,7 @@ subroutine gather_elem2D(arr2D, arr2D_global, root_rank, tag, io_comm, partit) #include "associate_part_def.h" #include "associate_part_ass.h" - if(.not. elem2D_lists_initialized) call init_elem2D_lists(partit) + if(.not. elem2D_lists_initialized) stop "io_gather_module has not been initialized" include "io_gather_elem.inc" end subroutine @@ -196,7 +205,7 @@ subroutine gather_real4_elem2D(arr2D, arr2D_global, root_rank, tag, io_comm, par #include "associate_part_def.h" #include "associate_part_ass.h" - if(.not. elem2D_lists_initialized) call init_elem2D_lists(partit) + if(.not. elem2D_lists_initialized) stop "io_gather_module has not been initialized" include "io_gather_elem.inc" end subroutine diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 193863981..4b7ce30f3 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -872,6 +872,7 @@ subroutine output(istep, ice, dynamics, tracers, partit, mesh) ctime=timeold+(dayold-1.)*86400 if (lfirst) then call ini_mean_io(ice, dynamics, tracers, partit, mesh) + call init_io_gather(partit) #if defined (__icepack) call init_io_icepack(mesh) !icapack has its copy of p_partit => partit #endif From c8a1526e0a12fdd253564a8a9bbdfb93e17c8b34 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 3 Jan 2022 18:24:06 +0100 Subject: [PATCH 780/909] add missing changes from commit 664552 --- src/io_fesom_file.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index 25cc211d9..88e36fa80 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -111,14 +111,14 @@ subroutine init(this, mesh_nod2d, mesh_elem2d, mesh_nl, partit) ! todo: would li integer mesh_nod2d integer mesh_elem2d integer mesh_nl - type(t_partit), target, intent(in) :: partit + type(t_partit), target :: partit ! EO parameters type(fesom_file_type_ptr), allocatable :: tmparr(:) logical async_netcdf_allowed integer err integer provided_mpi_thread_support_level - call init_io_gather() + call init_io_gather(partit) ! get hold of our mesh data for later use (assume the mesh instance will not change) m_nod2d = mesh_nod2d From 0bbb80c76f393dc5245285197a915ca380660a44 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 5 Jan 2022 12:22:01 +0100 Subject: [PATCH 781/909] avoid error with pgi compiler complaining NOPASS was not declared for 'reset_state' procedure --- src/mpi_topology_module.F90 | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/mpi_topology_module.F90 b/src/mpi_topology_module.F90 index 8bd876566..ea69ee737 100644 --- a/src/mpi_topology_module.F90 +++ b/src/mpi_topology_module.F90 @@ -25,7 +25,9 @@ module mpi_topology_module type :: mpi_topology_type contains - procedure, nopass :: next_host_head_rank, set_hostname_strategy, reset_state + procedure, nopass :: next_host_head_rank + procedure, nopass :: set_hostname_strategy + procedure, nopass :: reset_state end type type(mpi_topology_type) mpi_topology From 8a7a47d9ef6e2c5045dcd5eadbcd0fdd9f2d6f24 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Wed, 5 Jan 2022 14:15:48 +0100 Subject: [PATCH 782/909] some fixes to compule with fortran PGI --- src/MOD_DYN.F90 | 3 +++ src/MOD_MESH.F90 | 3 ++- src/MOD_PARTIT.F90 | 2 ++ src/MOD_TRACER.F90 | 3 +++ src/io_netcdf_file_module.F90 | 40 +++++++++++++++-------------------- 5 files changed, 27 insertions(+), 24 deletions(-) diff --git a/src/MOD_DYN.F90 b/src/MOD_DYN.F90 index f338ae443..cc3fe80c9 100644 --- a/src/MOD_DYN.F90 +++ b/src/MOD_DYN.F90 @@ -27,6 +27,7 @@ MODULE MOD_DYN !!! real(kind=WP), allocatable :: rr(:), zz(:), pp(:), App(:) contains + private procedure WRITE_T_SOLVERINFO procedure READ_T_SOLVERINFO generic :: write(unformatted) => WRITE_T_SOLVERINFO @@ -42,6 +43,7 @@ MODULE MOD_DYN ! easy backscatter contribution real(kind=WP), allocatable, dimension(:,:) :: u_b, v_b contains + private procedure WRITE_T_DYN_WORK procedure READ_T_DYN_WORK generic :: write(unformatted) => WRITE_T_DYN_WORK @@ -108,6 +110,7 @@ MODULE MOD_DYN !___________________________________________________________________________ contains + private procedure WRITE_T_DYN procedure READ_T_DYN generic :: write(unformatted) => WRITE_T_DYN diff --git a/src/MOD_MESH.F90 b/src/MOD_MESH.F90 index 808de3564..f2cdc1e9f 100644 --- a/src/MOD_MESH.F90 +++ b/src/MOD_MESH.F90 @@ -131,7 +131,8 @@ MODULE MOD_MESH character(:), allocatable :: representative_checksum contains - procedure write_t_mesh + private + procedure write_t_mesh procedure read_t_mesh generic :: write(unformatted) => write_t_mesh generic :: read(unformatted) => read_t_mesh diff --git a/src/MOD_PARTIT.F90 b/src/MOD_PARTIT.F90 index 5d6b917c3..5c16ffcd5 100644 --- a/src/MOD_PARTIT.F90 +++ b/src/MOD_PARTIT.F90 @@ -28,6 +28,7 @@ module MOD_PARTIT integer :: nreq ! number of requests for MPI_Wait ! (to combine halo exchange of several fields) contains + private procedure WRITE_T_COM_STRUCT procedure READ_T_COM_STRUCT generic :: write(unformatted) => WRITE_T_COM_STRUCT @@ -82,6 +83,7 @@ module MOD_PARTIT integer(omp_lock_kind), allocatable :: plock(:) #endif contains + private procedure WRITE_T_PARTIT procedure READ_T_PARTIT generic :: write(unformatted) => WRITE_T_PARTIT diff --git a/src/MOD_TRACER.F90 b/src/MOD_TRACER.F90 index 242ee483d..66f1d812e 100644 --- a/src/MOD_TRACER.F90 +++ b/src/MOD_TRACER.F90 @@ -18,6 +18,7 @@ MODULE MOD_TRACER integer :: ID contains + private procedure WRITE_T_TRACER_DATA procedure READ_T_TRACER_DATA generic :: write(unformatted) => WRITE_T_TRACER_DATA @@ -45,6 +46,7 @@ MODULE MOD_TRACER real(kind=WP),allocatable,dimension(:,:,:) :: edge_up_dn_grad contains + private procedure WRITE_T_TRACER_WORK procedure READ_T_TRACER_WORK generic :: write(unformatted) => WRITE_T_TRACER_WORK @@ -75,6 +77,7 @@ MODULE MOD_TRACER logical :: i_vert_diff = .true. contains +private procedure WRITE_T_TRACER procedure READ_T_TRACER generic :: write(unformatted) => WRITE_T_TRACER diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index 2ee3936cb..2ac8111f3 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -4,6 +4,23 @@ module io_netcdf_file_module public netcdf_file_type private + type dim_type + character(:), allocatable :: name + integer len + integer ncid + end type + + type att_type_wrapper ! work around Fortran not being able to have polymorphic types in the same array + class(att_type), allocatable :: it + end type + + type var_type ! todo: use variable type from io_netcdf_module here + character(:), allocatable :: name + integer, allocatable :: dim_indices(:) + integer datatype + type(att_type_wrapper), allocatable :: atts(:) + integer ncid + end type type netcdf_file_type private @@ -27,29 +44,6 @@ module io_netcdf_file_module procedure, private :: add_global_att_text, add_global_att_int end type - - type dim_type - character(:), allocatable :: name - integer len - - integer ncid - end type - - - type var_type ! todo: use variable type from io_netcdf_module here - character(:), allocatable :: name - integer, allocatable :: dim_indices(:) - integer datatype - type(att_type_wrapper), allocatable :: atts(:) - - integer ncid - end type - - - type att_type_wrapper ! work around Fortran not being able to have polymorphic types in the same array - class(att_type), allocatable :: it - end type - contains From 11583a2a75ab50dd399bb40b11851ee8768373ae Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 6 Jan 2022 18:25:21 +0100 Subject: [PATCH 783/909] use a newer Nvidia HPC SDK (NVHPC) to be able to compile assumed shape and assumed rank parameters --- env/juwels/shell | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/env/juwels/shell b/env/juwels/shell index 89cc14b62..7be33060d 100644 --- a/env/juwels/shell +++ b/env/juwels/shell @@ -1,15 +1,14 @@ ########## module --force purge module use /gpfs/software/juwels/otherstages -module load Stages/2020 -module load Intel/2020.2.254-GCC-9.3.0 -module load ParaStationMPI/5.4.7-1 -module load CMake/3.18.0 -module load imkl/2020.2.254 +module load Stages/2022 +module load NVHPC/21.11 # older versions of pgf90/nvfortran can not compile mixed assumed shape and assumed rank parameters +module load ParaStationMPI/5.5.0-1 +module load CMake/3.21.1 +#module load imkl/2021.2.0 module load netCDF-Fortran/4.5.3 -module load netCDF/4.7.4 -module load Perl/5.32.0 -module load netCDF +module load netCDF/4.8.1 +module load Perl/5.34.0 export LC_ALL=en_US.UTF-8 export TMPDIR=/tmp From acf25dfbb1300672d04e25c1543582ced1c461c8 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Tue, 11 Jan 2022 10:24:32 +0100 Subject: [PATCH 784/909] making FESOM compile with NVFORTRAN 22.1 --- src/io_netcdf_file_module.F90 | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index 2ac8111f3..ceb15e111 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -156,7 +156,13 @@ function add_var_x(this, name, dim_indices, netcdf_datatype) result(varindex) call move_alloc(tmparr, this%vars) varindex = size(this%vars) - this%vars(varindex) = var_type(name=name, dim_indices=dim_indices, datatype=netcdf_datatype, atts=empty_atts, ncid=-1) +! this%vars(varindex) = var_type(name=name, dim_indices=dim_indices, datatype=netcdf_datatype, atts=empty_atts, ncid=-1) +! NVIDIA 22.1 compiler didnt like the line above, hence we unfold it unelegantly: + this%vars(varindex)%name = name + this%vars(varindex)%dim_indices= dim_indices + this%vars(varindex)%datatype = netcdf_datatype + this%vars(varindex)%atts = empty_atts + this%vars(varindex)%ncid = -1 end function From 314d9ada69b0ab614fa0f6120154dcb3060ef6b0 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Tue, 11 Jan 2022 10:42:58 +0100 Subject: [PATCH 785/909] making NVFORTRAAN 22.1 happy following advices from ESiWACE project --- src/associate_mesh_ass.h | 59 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) diff --git a/src/associate_mesh_ass.h b/src/associate_mesh_ass.h index db5b26d39..72a4d1246 100644 --- a/src/associate_mesh_ass.h +++ b/src/associate_mesh_ass.h @@ -6,6 +6,64 @@ ocean_area => mesh%ocean_area nl => mesh%nl nn_size => mesh%nn_size ocean_areawithcav => mesh%ocean_areawithcav +#ifdef __PGI +coord_nod2D => mesh%coord_nod2D (1:2,1:myDim_nod2D+eDim_nod2D) +geo_coord_nod2D => mesh%geo_coord_nod2D (1:2,1:myDim_nod2D+eDim_nod2D) +elem2D_nodes => mesh%elem2D_nodes (1:3, 1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) +edges => mesh%edges (1:2,1:myDim_edge2D+eDim_edge2D) +edge_tri => mesh%edge_tri (1:2,1:myDim_edge2D+eDim_edge2D) +elem_edges => mesh%elem_edges (1:3,1:myDim_elem2D) +elem_area => mesh%elem_area (1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) +edge_dxdy => mesh%edge_dxdy (1:2,1:myDim_edge2D+eDim_edge2D) +edge_cross_dxdy => mesh%edge_cross_dxdy (1:4,1:myDim_edge2D+eDim_edge2D) +elem_cos => mesh%elem_cos (1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) +metric_factor => mesh%metric_factor (1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) +elem_neighbors => mesh%elem_neighbors (1:3,1:myDim_elem2D) +nod_in_elem2D => mesh%nod_in_elem2D !(maxval(rmax),myDim_nod2D+eDim_nod2D) +x_corners => mesh%x_corners !(myDim_nod2D, maxval(rmax)) +y_corners => mesh%y_corners !(myDim_nod2D, maxval(rmax)) +nod_in_elem2D_num => mesh%nod_in_elem2D_num (1:myDim_nod2D+eDim_nod2D) +depth => mesh%depth (1:myDim_nod2D+eDim_nod2D) +gradient_vec => mesh%gradient_vec (1:6,1:myDim_elem2D) +gradient_sca => mesh%gradient_sca (1:6,1:myDim_elem2D) +bc_index_nod2D => mesh%bc_index_nod2D (1:myDim_nod2D+eDim_nod2D) +zbar => mesh%zbar (1:mesh%nl) +Z => mesh%Z (1:mesh%nl-1) +elem_depth => mesh%elem_depth! never used, not even allocated +nlevels => mesh%nlevels (1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) +nlevels_nod2D => mesh%nlevels_nod2D (1:myDim_nod2D+eDim_nod2D) +nlevels_nod2D_min => mesh%nlevels_nod2D_min (1:myDim_nod2D+eDim_nod2D) +area => mesh%area (1:mesh%nl,1:myDim_nod2d+eDim_nod2D) +areasvol => mesh%areasvol (1:mesh%nl,1:myDim_nod2d+eDim_nod2D) +area_inv => mesh%area_inv (1:mesh%nl,1:myDim_nod2d+eDim_nod2D) +areasvol_inv => mesh%areasvol_inv (1:mesh%nl,1:myDim_nod2d+eDim_nod2D) +mesh_resolution => mesh%mesh_resolution (1:myDim_nod2d+eDim_nod2D) +ssh_stiff => mesh%ssh_stiff +lump2d_north => mesh%lump2d_north (1:myDim_nod2d) +lump2d_south => mesh%lump2d_south (1:myDim_nod2d) +cavity_flag_n => mesh%cavity_flag_n (1:myDim_nod2D+eDim_nod2D) +cavity_flag_e => mesh%cavity_flag_e (1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) +cavity_depth => mesh%cavity_depth (1:myDim_nod2D+eDim_nod2D) +ulevels => mesh%ulevels (1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) +ulevels_nod2D => mesh%ulevels_nod2D (1:myDim_nod2D+eDim_nod2D) +ulevels_nod2D_max => mesh%ulevels_nod2D_max (1:myDim_nod2D+eDim_nod2D) +nn_num => mesh%nn_num (1:myDim_nod2D) +nn_pos => mesh%nn_pos (1:mesh%nn_size, 1:myDim_nod2D) +hnode => mesh%hnode (1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) +hnode_new => mesh%hnode_new (1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) +zbar_3d_n => mesh%zbar_3d_n (1:mesh%nl, 1:myDim_nod2D+eDim_nod2D) +Z_3d_n => mesh%Z_3d_n (1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) +helem => mesh%helem (1:mesh%nl-1, 1:myDim_elem2D) +bottom_elem_thickness => mesh%bottom_elem_thickness (1:myDim_elem2D) +bottom_node_thickness => mesh%bottom_node_thickness (1:myDim_nod2D+eDim_nod2D) +dhe => mesh%dhe (1:myDim_elem2D) +hbar => mesh%hbar (1:myDim_nod2D+eDim_nod2D) +hbar_old => mesh%hbar_old (1:myDim_nod2D+eDim_nod2D) +zbar_n_bot => mesh%zbar_n_bot (1:myDim_nod2D+eDim_nod2D) +zbar_e_bot => mesh%zbar_e_bot (1:myDim_elem2D+eDim_elem2D) +zbar_n_srf => mesh%zbar_n_srf (1:myDim_nod2D+eDim_nod2D) +zbar_e_srf => mesh%zbar_e_srf (1:myDim_elem2D+eDim_elem2D) +#else coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%coord_nod2D geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D elem2D_nodes(1:3, 1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem2D_nodes @@ -66,5 +124,6 @@ zbar_n_bot(1:myDim_nod2D+eDim_nod2D) => mesh%zbar_n_bot zbar_e_bot(1:myDim_elem2D+eDim_elem2D) => mesh%zbar_e_bot zbar_n_srf(1:myDim_nod2D+eDim_nod2D) => mesh%zbar_n_srf zbar_e_srf(1:myDim_elem2D+eDim_elem2D) => mesh%zbar_e_srf +#endif From 5625a1dcfe49320e745d2749d211168d0e88218e Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Tue, 11 Jan 2022 11:08:41 +0100 Subject: [PATCH 786/909] making NVFORTRAN 22.1 happy again --- src/associate_part_ass.h | 52 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 47 insertions(+), 5 deletions(-) diff --git a/src/associate_part_ass.h b/src/associate_part_ass.h index 7ded6750f..da8b96b06 100644 --- a/src/associate_part_ass.h +++ b/src/associate_part_ass.h @@ -15,14 +15,57 @@ MPIERR => partit%MPIERR npes => partit%npes mype => partit%mype maxPEnum => partit%maxPEnum +part => partit%part +lb=lbound(partit%s_mpitype_elem3D, 2) +ub=ubound(partit%s_mpitype_elem3D, 2) + +#ifdef __PGI +myList_nod2D => partit%myList_nod2D(1:myDim_nod2D +eDim_nod2D) +myList_elem2D => partit%myList_elem2D(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) +myList_edge2D => partit%myList_edge2D(1:myDim_edge2D+eDim_edge2D) + +if (allocated(partit%remPtr_nod2D)) then + remPtr_nod2D => partit%remPtr_nod2D(1:npes) + remList_nod2D => partit%remList_nod2D(1:remPtr_nod2D(npes)) +end if + +if (allocated(partit%remPtr_elem2D)) then + remPtr_elem2D => partit%remPtr_elem2D(1:npes) + remList_elem2D => partit%remList_elem2D(1:remPtr_elem2D(npes)) +end if + +s_mpitype_elem2D => partit%s_mpitype_elem2D(1:com_elem2D%sPEnum, 1:4) +r_mpitype_elem2D => partit%r_mpitype_elem2D(1:com_elem2D%rPEnum, 1:4) + +s_mpitype_elem2D_full_i => partit%s_mpitype_elem2D_full_i(1:com_elem2D_full%sPEnum) +r_mpitype_elem2D_full_i => partit%r_mpitype_elem2D_full_i(1:com_elem2D_full%rPEnum) + +s_mpitype_elem2D_full => partit%s_mpitype_elem2D_full(1:com_elem2D_full%sPEnum, 1:4) +r_mpitype_elem2D_full => partit%r_mpitype_elem2D_full(1:com_elem2D_full%rPEnum, 1:4) + +s_mpitype_elem3D => partit%s_mpitype_elem3D(1:com_elem2D%sPEnum, lb:ub, 1:4) +r_mpitype_elem3D => partit%r_mpitype_elem3D(1:com_elem2D%rPEnum, lb:ub, 1:4) + +s_mpitype_elem3D_full => partit%s_mpitype_elem3D_full(1:com_elem2D_full%sPEnum, lb:ub, 1:4) +r_mpitype_elem3D_full => partit%r_mpitype_elem3D_full(1:com_elem2D_full%rPEnum, lb:ub, 1:4) + +r_mpitype_elem3D => partit%r_mpitype_elem3D(1:com_elem2D%rPEnum, lb:ub, 1:4) +r_mpitype_elem3D_full => partit%r_mpitype_elem3D_full(1:com_elem2D_full%rPEnum, lb:ub, 1:4) + +s_mpitype_nod2D => partit%s_mpitype_nod2D(1:com_nod2D%sPEnum) +r_mpitype_nod2D => partit%r_mpitype_nod2D(1:com_nod2D%rPEnum) + +s_mpitype_nod2D_i => partit%s_mpitype_nod2D_i(1:com_nod2D%sPEnum) +r_mpitype_nod2D_i => partit%r_mpitype_nod2D_i(1:com_nod2D%rPEnum) + +s_mpitype_nod3D => partit%s_mpitype_nod3D(1:com_nod2D%sPEnum, lb:ub, 1:3) +r_mpitype_nod3D => partit%r_mpitype_nod3D(1:com_nod2D%rPEnum, lb:ub, 1:3) +#else myList_nod2D (1:myDim_nod2D +eDim_nod2D) => partit%myList_nod2D myList_elem2D(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => partit%myList_elem2D myList_edge2D(1:myDim_edge2D+eDim_edge2D) => partit%myList_edge2D -lb=lbound(partit%s_mpitype_elem3D, 2) -ub=ubound(partit%s_mpitype_elem3D, 2) - if (allocated(partit%remPtr_nod2D)) then remPtr_nod2D (1:npes) => partit%remPtr_nod2D remList_nod2D (1:remPtr_nod2D(npes)) => partit%remList_nod2D @@ -59,5 +102,4 @@ r_mpitype_nod2D_i(1:com_nod2D%rPEnum) => partit%r_mpitype_nod2D_i s_mpitype_nod3D(1:com_nod2D%sPEnum, lb:ub, 1:3) => partit%s_mpitype_nod3D r_mpitype_nod3D(1:com_nod2D%rPEnum, lb:ub, 1:3) => partit%r_mpitype_nod3D - -part => partit%part +#endif From 56179e26ca6d77987fec61ca5e4c5631d0462a74 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Tue, 11 Jan 2022 11:45:39 +0100 Subject: [PATCH 787/909] FESOM compiles with NVIDIA 22.1. Some pieces of code had to be simplified unelegantly :( --- env/juwels/shell | 2 +- src/MOD_ICE.F90 | 5 +++++ src/ice_thermo_oce.F90 | 7 ++++++- src/io_meandata.F90 | 8 ++++++-- src/oce_ale.F90 | 20 ++++++++++++++++++-- src/oce_muscl_adv.F90 | 5 +++++ 6 files changed, 41 insertions(+), 6 deletions(-) diff --git a/env/juwels/shell b/env/juwels/shell index 7be33060d..f5e8a951f 100644 --- a/env/juwels/shell +++ b/env/juwels/shell @@ -2,7 +2,7 @@ module --force purge module use /gpfs/software/juwels/otherstages module load Stages/2022 -module load NVHPC/21.11 # older versions of pgf90/nvfortran can not compile mixed assumed shape and assumed rank parameters +module load NVHPC/22.1 # older versions of pgf90/nvfortran can not compile mixed assumed shape and assumed rank parameters module load ParaStationMPI/5.5.0-1 module load CMake/3.21.1 #module load imkl/2021.2.0 diff --git a/src/MOD_ICE.F90 b/src/MOD_ICE.F90 index f2364da8b..d1b8d5ef9 100644 --- a/src/MOD_ICE.F90 +++ b/src/MOD_ICE.F90 @@ -18,6 +18,7 @@ MODULE MOD_ICE integer :: ID !___________________________________________________________________________ contains + private procedure WRITE_T_ICE_DATA procedure READ_T_ICE_DATA generic :: write(unformatted) => WRITE_T_ICE_DATA @@ -38,6 +39,7 @@ MODULE MOD_ICE real(kind=WP), allocatable, dimension(:) :: ice_strength, inv_areamass, inv_mass !___________________________________________________________________________ contains + private procedure WRITE_T_ICE_WORK procedure READ_T_ICE_WORK generic :: write(unformatted) => WRITE_T_ICE_WORK @@ -82,6 +84,7 @@ MODULE MOD_ICE real(kind=WP) :: albim = 0.68 ! melting ice real(kind=WP) :: albw = 0.066 ! open water, LY2004 contains + private procedure WRITE_T_ICE_THERMO procedure READ_T_ICE_THERMO generic :: write(unformatted) => WRITE_T_ICE_THERMO @@ -104,6 +107,7 @@ MODULE MOD_ICE #endif /* (__oifs) */ !___________________________________________________________________________ contains + private procedure WRITE_T_ICE_ATMCOUPL procedure READ_T_ICE_ATMCOUPL generic :: write(unformatted) => WRITE_T_ICE_ATMCOUPL @@ -190,6 +194,7 @@ MODULE MOD_ICE logical :: ice_update = .true. !___________________________________________________________________________ contains + private procedure WRITE_T_ICE procedure READ_T_ICE generic :: write(unformatted) => WRITE_T_ICE diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index e2aa318e4..cdd8ef455 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -185,8 +185,13 @@ subroutine thermodynamics(ice, partit, mesh) real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux myDim_nod2d => partit%myDim_nod2D eDim_nod2D => partit%eDim_nod2D +#ifdef __PGI + ulevels_nod2D => mesh%ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) + geo_coord_nod2D => mesh%geo_coord_nod2D(1:2, 1:myDim_nod2D+eDim_nod2D) +#else ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D - geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D + geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D +#endif u_ice => ice%uice(:) v_ice => ice%vice(:) a_ice => ice%data(1)%values(:) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 4b7ce30f3..17a49071e 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -1018,7 +1018,8 @@ subroutine def_stream3D(glsize, lcsize, name, description, units, data, freq, fr type(t_mesh), intent(in), target :: mesh logical, optional, intent(in) :: flip_array integer i - + +#if !defined(__PGI) do i = 1, rank(data) if ((ubound(data, dim = i)<=0)) then if (partit%mype==0) then @@ -1028,6 +1029,7 @@ subroutine def_stream3D(glsize, lcsize, name, description, units, data, freq, fr return end if end do +#endif if (partit%mype==0) then write(*,*) 'adding I/O stream 3D for ', trim(name) @@ -1083,7 +1085,8 @@ subroutine def_stream2D(glsize, lcsize, name, description, units, data, freq, fr type(t_mesh), intent(in) :: mesh type(t_partit), intent(inout) :: partit integer i - + +#if !defined(__PGI) do i = 1, rank(data) if ((ubound(data, dim = i)<=0)) then if (partit%mype==0) then @@ -1093,6 +1096,7 @@ subroutine def_stream2D(glsize, lcsize, name, description, units, data, freq, fr return end if end do +#endif if (partit%mype==0) then write(*,*) 'adding I/O stream 2D for ', trim(name) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 4b6b95b31..fe5f69554 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -222,13 +222,29 @@ subroutine init_ale(dynamics, partit, mesh) allocate(mesh%zbar_e_srf(myDim_elem2D+eDim_elem2D)) ! also change bottom thickness at nodes due to partial cell --> bottom - ! thickness at nodes is the volume weighted mean of sorounding elemental + ! thickness at nodes is the volume weighted mean of sorounding elemental ! thicknesses allocate(mesh%bottom_node_thickness(myDim_nod2D+eDim_nod2D)) allocate(mesh%zbar_n_bot(myDim_nod2D+eDim_nod2D)) allocate(mesh%zbar_n_srf(myDim_nod2D+eDim_nod2D)) ! reassociate after the allocation (no pointer exists before) +#ifdef __PGI + hnode => mesh%hnode (1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) + hnode_new => mesh%hnode_new (1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) + zbar_3d_n => mesh%zbar_3d_n (1:mesh%nl, 1:myDim_nod2D+eDim_nod2D) + Z_3d_n => mesh%Z_3d_n (1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) + helem => mesh%helem (1:mesh%nl-1, 1:myDim_elem2D) + bottom_elem_thickness => mesh%bottom_elem_thickness (1:myDim_elem2D) + bottom_node_thickness => mesh%bottom_node_thickness (1:myDim_nod2D+eDim_nod2D) + dhe => mesh%dhe (1:myDim_elem2D) + hbar => mesh%hbar (1:myDim_nod2D+eDim_nod2D) + hbar_old => mesh%hbar_old (1:myDim_nod2D+eDim_nod2D) + zbar_n_bot => mesh%zbar_n_bot (1:myDim_nod2D+eDim_nod2D) + zbar_e_bot => mesh%zbar_e_bot (1:myDim_elem2D+eDim_elem2D) + zbar_n_srf => mesh%zbar_n_srf (1:myDim_nod2D+eDim_nod2D) + zbar_e_srf => mesh%zbar_e_srf (1:myDim_elem2D+eDim_elem2D) +#else hnode(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%hnode hnode_new(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%hnode_new zbar_3d_n(1:mesh%nl, 1:myDim_nod2D+eDim_nod2D) => mesh%zbar_3d_n @@ -243,7 +259,7 @@ subroutine init_ale(dynamics, partit, mesh) zbar_e_bot(1:myDim_elem2D+eDim_elem2D) => mesh%zbar_e_bot zbar_n_srf(1:myDim_nod2D+eDim_nod2D) => mesh%zbar_n_srf zbar_e_srf(1:myDim_elem2D+eDim_elem2D) => mesh%zbar_e_srf - +#endif !___initialize______________________________________________________________ hbar = 0.0_WP hbar_old = 0.0_WP diff --git a/src/oce_muscl_adv.F90 b/src/oce_muscl_adv.F90 index 07ce574ee..a12cbafc2 100755 --- a/src/oce_muscl_adv.F90 +++ b/src/oce_muscl_adv.F90 @@ -76,8 +76,13 @@ subroutine muscl_adv_init(twork, partit, mesh) nn_size=k !___________________________________________________________________________ allocate(mesh%nn_num(myDim_nod2D), mesh%nn_pos(nn_size,myDim_nod2D)) +#ifdef __PGI + nn_num => mesh%nn_num(1:myDim_nod2D) + nn_pos => mesh%nn_pos(1:nn_size, 1:myDim_nod2D) +#else nn_num(1:myDim_nod2D) => mesh%nn_num nn_pos(1:nn_size, 1:myDim_nod2D) => mesh%nn_pos +#endif ! These are the same arrays that we also use in quadratic reconstruction !MOVE IT TO SOMEWHERE ELSE !$OMP PARALLEL DO From d588ee2560a17eb0eb59e69893972da83b9c6f60 Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Sun, 16 Jan 2022 11:57:56 +0100 Subject: [PATCH 788/909] turn off debug flag --- config/namelist.config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/namelist.config b/config/namelist.config index 941827f1a..b950ace24 100644 --- a/config/namelist.config +++ b/config/namelist.config @@ -56,7 +56,7 @@ use_cavity=.false. ! use_cavity_partial_cell=.false. use_floatice = .false. use_sw_pene=.true. -flag_debug=.true. +flag_debug=.false. / &machine From 35ba589d29100f3e94165382436baa8ef87798de Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Sun, 16 Jan 2022 12:16:53 +0100 Subject: [PATCH 789/909] update iloveclock, to make it simplier and hopefully more precise --- view/scripts/iloveclock.py | 33 +++++++++------------------------ 1 file changed, 9 insertions(+), 24 deletions(-) diff --git a/view/scripts/iloveclock.py b/view/scripts/iloveclock.py index f52d632c9..45da4c3f3 100755 --- a/view/scripts/iloveclock.py +++ b/view/scripts/iloveclock.py @@ -16,12 +16,10 @@ Valid calendars 'standard', 'gregorian', 'proleptic_gregorian' 'noleap', '365_day', '360_day', 'julian', 'all_leap', '366_day' -Copyright (c) 2018, FESOM Development Team. +Copyright (c) 2018, 2022 FESOM Development Team. """ from netCDF4 import Dataset, num2date -from datetime import timedelta -from datetime import datetime import sys filename = sys.argv[1] @@ -31,32 +29,19 @@ calendar = '365_day' f = Dataset(filename) -# a = num2date(f.variables['time'][:], f.variables['time'].units, '365_day') print(20*'*') print('CALENDAR: ' + calendar) print(20*'*') for nstamp in range(f.variables['time'].shape[0]): - sstamp = num2date(f.variables['time'][nstamp], f.variables['time'].units, calendar) - delta = (60 - sstamp.minute)*60 - estamp = num2date(f.variables['time'][:][nstamp] + delta, f.variables['time'].units, calendar) - seconds_in_day_s = sstamp.hour*3600+sstamp.minute*60 - seconds_in_day_e = estamp.hour*3600+estamp.minute*60 - - if calendar in ['noleap', '365_day', '360_day', '366_day']: - print(sstamp) - print("{:5d} {:10d} {:10d}".format(seconds_in_day_s, sstamp.dayofyr, sstamp.year)) - print("{:5d} {:10d} {:10d}".format(seconds_in_day_e, estamp.dayofyr, estamp.year)) - print(20*'*') - else: - print(sstamp) - print("{:5d} {:10d} {:10d}".format(seconds_in_day_s, sstamp.timetuple().tm_yday, sstamp.year)) - print("{:5d} {:10d} {:10d}".format(seconds_in_day_e, estamp.timetuple().tm_yday, estamp.year)) - print(20*'*') - -print(20*'*') -print('CALENDAR: ' + calendar) -print(20*'*') + estamp = num2date(f.variables['time'][:][nstamp], f.variables['time'].units, calendar) + sstamp = int(f.variables['time'][nstamp]) + day = (sstamp//86400)+1 + seconds = sstamp%86400 + print(sstamp) + print("{:5d} {:10d} {:10d}".format(seconds, day, estamp.year)) + print("{:5d} {:10d} {:10d}".format(86400, day, estamp.year)) + print(20*'*') f.close() \ No newline at end of file From 9765c8d427c46afe09bc39c01e04628f4d4bf7db Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 20 Jan 2022 16:14:24 +0100 Subject: [PATCH 790/909] add custom implementation for mkdir --- src/fortran_utils.F90 | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/src/fortran_utils.F90 b/src/fortran_utils.F90 index 6eecdb89d..1296cfe72 100644 --- a/src/fortran_utils.F90 +++ b/src/fortran_utils.F90 @@ -39,5 +39,30 @@ function int_to_txt_pad(val, width) result(txt) allocate(character(w) :: txt) write(txt,'(i0.'//widthtxt//')') val end function + + + ! using EXECUTE_COMMAND_LINE to call mkdir sometimes fail (EXECUTE_COMMAND_LINE is forked as an new process, which may be the problem) + ! try to use the C mkdir as an alternative + subroutine mkdir(path) + use iso_c_binding + character(len=*), intent(in) :: path + ! EO parameters + integer result + character(:,kind=C_CHAR), allocatable :: pathcopy + + interface + function mkdir_c(path, mode) bind(c,name="mkdir") + use iso_c_binding + integer(c_int) mkdir_c + character(kind=c_char,len=1) path(*) + integer(c_int), value :: mode + end function + end interface + + pathcopy = path ! we need to pass an array of c_char to the C funcktion (this is not a correct type conversion, but Fortran characters seem to be of the same kind as c_char) + ! result is 0 if the dir has been created from this call, otherwise -1 + ! the mode will not exactly be what we pass here, as it is subtracted by the umask bits (and possibly more) + result = mkdir_c(pathcopy//C_NULL_CHAR, int(o'777', c_int)) + end subroutine end module From 369459f79e53d710ee3f575bc148ed662cb19b26 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 20 Jan 2022 16:14:50 +0100 Subject: [PATCH 791/909] execute_command_line sometimes fails, use a custom implementation around mkdir from C instead --- src/io_restart.F90 | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 7b6c4b6ac..be5ce5004 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -156,8 +156,6 @@ subroutine restart(istep, l_read, ice, dynamics, tracers, partit, mesh) type(t_ice) , intent(in) , target :: ice logical dumpfiles_exist logical, save :: initialized = .false. - integer cstat, estat - character(500) cmsg ! there seems to be no documentation about the max size of this text integer mpierr if(.not. initialized) then @@ -166,9 +164,9 @@ subroutine restart(istep, l_read, ice, dynamics, tracers, partit, mesh) raw_restart_infopath = trim(ResultPath)//"/fesom_raw_restart/np"//int_to_txt(partit%npes)//".info" if(raw_restart_length_unit /= "off") then if(partit%mype == RAW_RESTART_METADATA_RANK) then - ! inquire does not work for directories, the directory might already exist - call execute_command_line("mkdir -p "//raw_restart_dirpath, exitstat=estat, cmdstat=cstat, cmdmsg=cmsg) ! sometimes does not work on aleph - if(cstat /= 0) print *,"creating raw restart directory ERROR ", trim(cmsg) + ! execute_command_line with mkdir sometimes fails, use a custom implementation around mkdir from C instead + call mkdir(trim(ResultPath)//"/fesom_raw_restart") ! we have no mkdir -p, create the intermediate dirs separately + call mkdir(raw_restart_dirpath) end if call MPI_Barrier(partit%MPI_COMM_FESOM, mpierr) ! make sure the dir has been created before we continue... end if @@ -234,6 +232,7 @@ end subroutine restart subroutine write_restart(path, filegroup, istep) + use fortran_utils character(len=*), intent(in) :: path type(restart_file_group), intent(inout) :: filegroup integer, intent(in) :: istep @@ -265,7 +264,8 @@ subroutine write_restart(path, filegroup, istep) end if end if if(filegroup%files(i)%path .ne. filepath) then - call execute_command_line("mkdir -p "//dirpath) + ! execute_command_line with mkdir sometimes fails, use a custom implementation around mkdir from C instead + call mkdir(dirpath) filegroup%files(i)%path = filepath call filegroup%files(i)%open_write_create(filegroup%files(i)%path) else @@ -285,7 +285,7 @@ subroutine write_restart(path, filegroup, istep) subroutine write_all_raw_restarts(istep, mpicomm, mype) - integer, intent(in) :: istep + integer, intent(in):: istep integer, intent(in) :: mpicomm integer, intent(in) :: mype ! EO parameters From cfbb144bdb4b27275b4230ddbc8112c5cedd5f2e Mon Sep 17 00:00:00 2001 From: dsidoren Date: Sun, 23 Jan 2022 19:11:06 +0100 Subject: [PATCH 792/909] visc_gamma0, visc_gamma1 & visc_gamma2 were used instead gamma0_tra, gamma1_tra & gamma2_tra in diff_part_bh visc_gamma0, visc_gamma1 & visc_gamma2 were used instead gamma0_tra, gamma1_tra & gamma2_tra in diff_part_bh --- src/oce_ale_tracer.F90 | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index fdba81109..dde2d0f7e 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -1189,9 +1189,9 @@ SUBROUTINE diff_part_bh(tr_num, dynamics, tracers, partit, mesh) v1=UV(2, nz,el(1))-UV(2, nz,el(2)) vi=u1*u1+v1*v1 tt=ttf(nz,en(1))-ttf(nz,en(2)) - vi=sqrt(max(dynamics%visc_gamma0, & - max(dynamics%visc_gamma1*sqrt(vi), & - dynamics%visc_gamma2*vi) & + vi=sqrt(max(tracers%data(tr_num)%gamma0_tra, & + max(tracers%data(tr_num)%gamma1_tra*sqrt(vi), & + tracers%data(tr_num)%gamma2_tra*vi) & )*len) !vi=sqrt(max(sqrt(u1*u1+v1*v1),0.04)*le) ! 10m^2/s for 10 km (0.04 h/50) !vi=sqrt(10.*le) @@ -1219,10 +1219,10 @@ SUBROUTINE diff_part_bh(tr_num, dynamics, tracers, partit, mesh) v1=UV(2, nz,el(1))-UV(2, nz,el(2)) vi=u1*u1+v1*v1 tt=temporary_ttf(nz,en(1))-temporary_ttf(nz,en(2)) - vi=sqrt(max(dynamics%visc_gamma0, & - max(dynamics%visc_gamma1*sqrt(vi), & - dynamics%visc_gamma2*vi) & - )*len) + vi=sqrt(max(tracers%data(tr_num)%gamma0_tra, & + max(tracers%data(tr_num)%gamma1_tra*sqrt(vi), & + tracers%data(tr_num)%gamma2_tra*vi) & + )*len) !vi=sqrt(max(sqrt(u1*u1+v1*v1),0.04)*le) ! 10m^2/s for 10 km (0.04 h/50) !vi=sqrt(10.*le) tt=-tt*vi*dt From b74251e1582dfe1d9c22d0272d89c58126beb64e Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Sun, 23 Jan 2022 21:15:53 +0100 Subject: [PATCH 793/909] fixed bug regarding settings for i_impl_diff & bh_diff_tracer which were inconsistontly (diplicated) defined in different "subtypes" of type T_TRACER --- src/MOD_TRACER.F90 | 32 +++++++++++++++++--------------- src/oce_ale_tracer.F90 | 7 +++---- 2 files changed, 20 insertions(+), 19 deletions(-) diff --git a/src/MOD_TRACER.F90 b/src/MOD_TRACER.F90 index 242ee483d..752e64f1c 100644 --- a/src/MOD_TRACER.F90 +++ b/src/MOD_TRACER.F90 @@ -68,11 +68,13 @@ MODULE MOD_TRACER type(t_tracer_work) :: work ! general options for all tracers (can be moved to T_TRACER is needed) ! bharmonic diffusion for tracers. We recommend to use this option in very high resolution runs (Redi is generally off there). -logical :: smooth_bh_tra = .false. -real(kind=WP) :: gamma0_tra = 0.0005 -real(kind=WP) :: gamma1_tra = 0.0125 -real(kind=WP) :: gamma2_tra = 0. -logical :: i_vert_diff = .true. +! we keep these tracer characteristics for each tracer individually (contained in T_TRACER_DATA), although in +! the namelist.tra they are define unique for all tracers. +!logical :: smooth_bh_tra = .false. +!real(kind=WP) :: gamma0_tra = 0.0005 +!real(kind=WP) :: gamma1_tra = 0.0125 +!real(kind=WP) :: gamma2_tra = 0. +!logical :: i_vert_diff = .true. contains procedure WRITE_T_TRACER @@ -193,11 +195,11 @@ subroutine WRITE_T_TRACER(tracer, unit, iostat, iomsg) write(unit, iostat=iostat, iomsg=iomsg) tracer%data(i) end do write(unit, iostat=iostat, iomsg=iomsg) tracer%work - write(unit, iostat=iostat, iomsg=iomsg) tracer%smooth_bh_tra - write(unit, iostat=iostat, iomsg=iomsg) tracer%gamma0_tra - write(unit, iostat=iostat, iomsg=iomsg) tracer%gamma1_tra - write(unit, iostat=iostat, iomsg=iomsg) tracer%gamma2_tra - write(unit, iostat=iostat, iomsg=iomsg) tracer%i_vert_diff +! write(unit, iostat=iostat, iomsg=iomsg) tracer%smooth_bh_tra +! write(unit, iostat=iostat, iomsg=iomsg) tracer%gamma0_tra +! write(unit, iostat=iostat, iomsg=iomsg) tracer%gamma1_tra +! write(unit, iostat=iostat, iomsg=iomsg) tracer%gamma2_tra +! write(unit, iostat=iostat, iomsg=iomsg) tracer%i_vert_diff end subroutine WRITE_T_TRACER ! Unformatted reading for T_TRACER @@ -217,11 +219,11 @@ subroutine READ_T_TRACER(tracer, unit, iostat, iomsg) ! write(*,*) 'tracer info:', tracer%data(i)%ID, TRIM(tracer%data(i)%tra_adv_hor), TRIM(tracer%data(i)%tra_adv_ver), TRIM(tracer%data(i)%tra_adv_lim) end do read(unit, iostat=iostat, iomsg=iomsg) tracer%work - read(unit, iostat=iostat, iomsg=iomsg) tracer%smooth_bh_tra - read(unit, iostat=iostat, iomsg=iomsg) tracer%gamma0_tra - read(unit, iostat=iostat, iomsg=iomsg) tracer%gamma1_tra - read(unit, iostat=iostat, iomsg=iomsg) tracer%gamma2_tra - read(unit, iostat=iostat, iomsg=iomsg) tracer%i_vert_diff +! read(unit, iostat=iostat, iomsg=iomsg) tracer%smooth_bh_tra +! read(unit, iostat=iostat, iomsg=iomsg) tracer%gamma0_tra +! read(unit, iostat=iostat, iomsg=iomsg) tracer%gamma1_tra +! read(unit, iostat=iostat, iomsg=iomsg) tracer%gamma2_tra +! read(unit, iostat=iostat, iomsg=iomsg) tracer%i_vert_diff end subroutine READ_T_TRACER end module MOD_TRACER !========================================================== diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index dde2d0f7e..fdbcce0b4 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -319,7 +319,7 @@ subroutine diff_tracers_ale(tr_num, dynamics, tracers, partit, mesh) !___________________________________________________________________________ ! do vertical diffusion: explicit - if (.not. tracers%i_vert_diff) call diff_ver_part_expl_ale(tr_num, tracers, partit, mesh) + if (.not. tracers%data(tr_num)%i_vert_diff) call diff_ver_part_expl_ale(tr_num, tracers, partit, mesh) ! A projection of horizontal Redi diffussivity onto vertical. This par contains horizontal ! derivatives and has to be computed explicitly! if (Redi) call diff_ver_part_redi_expl(tracers, partit, mesh) @@ -342,14 +342,13 @@ subroutine diff_tracers_ale(tr_num, dynamics, tracers, partit, mesh) end do !$OMP END PARALLEL DO !___________________________________________________________________________ - if (tracers%i_vert_diff) then + if (tracers%data(tr_num)%i_vert_diff) then ! do vertical diffusion: implicite call diff_ver_part_impl_ale(tr_num, dynamics, tracers, partit, mesh) - end if !We DO not set del_ttf to zero because it will not be used in this timestep anymore !init_tracers_AB will set it to zero for the next timestep - if (tracers%smooth_bh_tra) then + if (tracers%data(tr_num)%smooth_bh_tra) then call diff_part_bh(tr_num, dynamics, tracers, partit, mesh) ! alpply biharmonic diffusion (implemented as filter) end if end subroutine diff_tracers_ale From e0fe1de95d43a0aa555fab9f7c26a181bf403976 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Mon, 24 Jan 2022 11:35:07 +0100 Subject: [PATCH 794/909] tracer dependent computation of biharmonic diffusion in diff_part_bh --- src/oce_ale_tracer.F90 | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index fdbcce0b4..8b10f5a2e 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -1156,6 +1156,7 @@ SUBROUTINE diff_part_bh(tr_num, dynamics, tracers, partit, mesh) type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit integer :: n, nz, ed, el(2), en(2), k, elem, nl1, ul1 + integer :: elnodes1(3), elnodes2(3) real(kind=WP) :: u1, v1, len, vi, tt, ww real(kind=WP), pointer :: temporary_ttf(:,:) real(kind=WP), pointer :: UV(:,:,:) @@ -1187,6 +1188,11 @@ SUBROUTINE diff_part_bh(tr_num, dynamics, tracers, partit, mesh) u1=UV(1, nz,el(1))-UV(1, nz,el(2)) v1=UV(2, nz,el(1))-UV(2, nz,el(2)) vi=u1*u1+v1*v1 + elnodes1=elem2d_nodes(:,el(1)) + elnodes2=elem2d_nodes(:,el(2)) + u1=maxval(ttf(nz, elnodes1))-minval(ttf(nz, elnodes2)) + v1=minval(ttf(nz, elnodes1))-maxval(ttf(nz, elnodes2)) + vi=u1*u1+v1*v1 tt=ttf(nz,en(1))-ttf(nz,en(2)) vi=sqrt(max(tracers%data(tr_num)%gamma0_tra, & max(tracers%data(tr_num)%gamma1_tra*sqrt(vi), & @@ -1214,8 +1220,10 @@ SUBROUTINE diff_part_bh(tr_num, dynamics, tracers, partit, mesh) ul1=minval(ulevels_nod2D_max(en)) nl1=maxval(nlevels_nod2D_min(en))-1 DO nz=ul1,nl1 - u1=UV(1, nz,el(1))-UV(1, nz,el(2)) - v1=UV(2, nz,el(1))-UV(2, nz,el(2)) + elnodes1=elem2d_nodes(:,el(1)) + elnodes2=elem2d_nodes(:,el(2)) + u1=maxval(ttf(nz, elnodes1))-minval(ttf(nz, elnodes2)) + v1=minval(ttf(nz, elnodes1))-maxval(ttf(nz, elnodes2)) vi=u1*u1+v1*v1 tt=temporary_ttf(nz,en(1))-temporary_ttf(nz,en(2)) vi=sqrt(max(tracers%data(tr_num)%gamma0_tra, & From 7a366e01c282cd93fac8e222feeb7a074caf59f4 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Tue, 25 Jan 2022 14:36:52 +0100 Subject: [PATCH 795/909] segmentaition fault fix for diff_part_bh --- src/oce_ale_tracer.F90 | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 8b10f5a2e..e90cfd9ec 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -1182,14 +1182,16 @@ SUBROUTINE diff_part_bh(tr_num, dynamics, tracers, partit, mesh) el=edge_tri(:,ed) en=edges(:,ed) len=sqrt(sum(elem_area(el))) - ul1=minval(ulevels_nod2D_max(en)) - nl1=maxval(nlevels_nod2D_min(en))-1 + ul1 = minval(nlevels(el)) + nl1 = maxval(ulevels(el)) +! ul1=minval(ulevels_nod2D_max(en)) +! nl1=maxval(nlevels_nod2D_min(en))-1 + elnodes1=elem2d_nodes(:,el(1)) + elnodes2=elem2d_nodes(:,el(2)) DO nz=ul1,nl1 u1=UV(1, nz,el(1))-UV(1, nz,el(2)) v1=UV(2, nz,el(1))-UV(2, nz,el(2)) vi=u1*u1+v1*v1 - elnodes1=elem2d_nodes(:,el(1)) - elnodes2=elem2d_nodes(:,el(2)) u1=maxval(ttf(nz, elnodes1))-minval(ttf(nz, elnodes2)) v1=minval(ttf(nz, elnodes1))-maxval(ttf(nz, elnodes2)) vi=u1*u1+v1*v1 @@ -1217,11 +1219,13 @@ SUBROUTINE diff_part_bh(tr_num, dynamics, tracers, partit, mesh) el=edge_tri(:,ed) en=edges(:,ed) len=sqrt(sum(elem_area(el))) - ul1=minval(ulevels_nod2D_max(en)) - nl1=maxval(nlevels_nod2D_min(en))-1 + ul1 = minval(nlevels(el)) + nl1 = maxval(ulevels(el)) +! ul1=minval(ulevels_nod2D_max(en)) +! nl1=maxval(nlevels_nod2D_min(en))-1 + elnodes1=elem2d_nodes(:,el(1)) + elnodes2=elem2d_nodes(:,el(2)) DO nz=ul1,nl1 - elnodes1=elem2d_nodes(:,el(1)) - elnodes2=elem2d_nodes(:,el(2)) u1=maxval(ttf(nz, elnodes1))-minval(ttf(nz, elnodes2)) v1=minval(ttf(nz, elnodes1))-maxval(ttf(nz, elnodes2)) vi=u1*u1+v1*v1 From 8d6f3994d23f431458b68c46b6a9ff54cf471f7a Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Wed, 26 Jan 2022 14:59:30 +0100 Subject: [PATCH 796/909] a lot of things fixed in diff_part_bh --- src/oce_ale_tracer.F90 | 81 +++++++++++++++++++++++++----------------- 1 file changed, 49 insertions(+), 32 deletions(-) diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index e90cfd9ec..f3b4f4f0c 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -1155,9 +1155,9 @@ SUBROUTINE diff_part_bh(tr_num, dynamics, tracers, partit, mesh) type(t_tracer), intent(inout), target :: tracers type(t_mesh) , intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit - integer :: n, nz, ed, el(2), en(2), k, elem, nl1, ul1 + integer :: n, nz, ed, el(2), en(2), k, elem, nzmin, nzmax integer :: elnodes1(3), elnodes2(3) - real(kind=WP) :: u1, v1, len, vi, tt, ww + real(kind=WP) :: u1, v1, len, vi, ww, tt(mesh%nl-1) real(kind=WP), pointer :: temporary_ttf(:,:) real(kind=WP), pointer :: UV(:,:,:) real(kind=WP), pointer :: ttf(:,:) @@ -1165,6 +1165,8 @@ SUBROUTINE diff_part_bh(tr_num, dynamics, tracers, partit, mesh) #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" + + UV => dynamics%uv(:,:,:) ttf => tracers%data(tr_num)%values temporary_ttf => tracers%work%del_ttf !use already allocated working array. could be fct_LO instead etc. @@ -1175,74 +1177,89 @@ SUBROUTINE diff_part_bh(tr_num, dynamics, tracers, partit, mesh) end do !$OMP END PARALLEL DO -!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, ed, el, en, k, elem, nl1, ul1, u1, v1, len, vi, tt, ww) +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, ed, el, en, k, elem, nzmin, nzmax, u1, v1, len, vi, tt, ww, & +!$OMP elnodes1, elnodes2) !$OMP DO - DO ed=1, myDim_edge2D+eDim_edge2D - if (myList_edge2D(ed)>edge2D_in) cycle + DO ed=1, myDim_edge2D!+eDim_edge2D + if (myList_edge2D(ed) > edge2D_in) cycle el=edge_tri(:,ed) en=edges(:,ed) len=sqrt(sum(elem_area(el))) - ul1 = minval(nlevels(el)) - nl1 = maxval(ulevels(el)) -! ul1=minval(ulevels_nod2D_max(en)) -! nl1=maxval(nlevels_nod2D_min(en))-1 + nzmax = minval(nlevels(el)) + nzmin = maxval(ulevels(el)) elnodes1=elem2d_nodes(:,el(1)) elnodes2=elem2d_nodes(:,el(2)) - DO nz=ul1,nl1 - u1=UV(1, nz,el(1))-UV(1, nz,el(2)) - v1=UV(2, nz,el(1))-UV(2, nz,el(2)) - vi=u1*u1+v1*v1 + DO nz=nzmin, nzmax-1 u1=maxval(ttf(nz, elnodes1))-minval(ttf(nz, elnodes2)) v1=minval(ttf(nz, elnodes1))-maxval(ttf(nz, elnodes2)) vi=u1*u1+v1*v1 - tt=ttf(nz,en(1))-ttf(nz,en(2)) + tt(nz)=ttf(nz,en(1))-ttf(nz,en(2)) vi=sqrt(max(tracers%data(tr_num)%gamma0_tra, & max(tracers%data(tr_num)%gamma1_tra*sqrt(vi), & tracers%data(tr_num)%gamma2_tra*vi) & )*len) - !vi=sqrt(max(sqrt(u1*u1+v1*v1),0.04)*le) ! 10m^2/s for 10 km (0.04 h/50) - !vi=sqrt(10.*le) - tt=tt*vi - temporary_ttf(nz,en(1))=temporary_ttf(nz,en(1))-tt - temporary_ttf(nz,en(2))=temporary_ttf(nz,en(2))+tt - END DO + tt(nz)=tt(nz)*vi + END DO +#if defined(_OPENMP) + call omp_set_lock (partit%plock(en(1))) +#endif + temporary_ttf(nzmin:nzmax-1,en(1))=temporary_ttf(nzmin:nzmax-1,en(1))-tt(nzmin:nzmax-1) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(en(1))) + call omp_set_lock (partit%plock(en(2))) +#endif + temporary_ttf(nzmin:nzmax-1,en(2))=temporary_ttf(nzmin:nzmax-1,en(2))+tt(nzmin:nzmax-1) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(en(2))) +#endif END DO !$OMP END DO +!$OMP MASTER call exchange_nod(temporary_ttf, partit) +!$OMP END MASTER !$OMP BARRIER ! =========== ! Second round: ! =========== !$OMP DO - DO ed=1, myDim_edge2D+eDim_edge2D + DO ed=1, myDim_edge2D!+eDim_edge2D if (myList_edge2D(ed)>edge2D_in) cycle el=edge_tri(:,ed) en=edges(:,ed) len=sqrt(sum(elem_area(el))) - ul1 = minval(nlevels(el)) - nl1 = maxval(ulevels(el)) -! ul1=minval(ulevels_nod2D_max(en)) -! nl1=maxval(nlevels_nod2D_min(en))-1 + nzmax = minval(nlevels(el)) + nzmin = maxval(ulevels(el)) elnodes1=elem2d_nodes(:,el(1)) elnodes2=elem2d_nodes(:,el(2)) - DO nz=ul1,nl1 + DO nz=nzmin, nzmax-1 u1=maxval(ttf(nz, elnodes1))-minval(ttf(nz, elnodes2)) v1=minval(ttf(nz, elnodes1))-maxval(ttf(nz, elnodes2)) vi=u1*u1+v1*v1 - tt=temporary_ttf(nz,en(1))-temporary_ttf(nz,en(2)) + tt(nz)=temporary_ttf(nz,en(1))-temporary_ttf(nz,en(2)) vi=sqrt(max(tracers%data(tr_num)%gamma0_tra, & max(tracers%data(tr_num)%gamma1_tra*sqrt(vi), & tracers%data(tr_num)%gamma2_tra*vi) & )*len) - !vi=sqrt(max(sqrt(u1*u1+v1*v1),0.04)*le) ! 10m^2/s for 10 km (0.04 h/50) - !vi=sqrt(10.*le) - tt=-tt*vi*dt - ttf(nz,en(1))=ttf(nz,en(1))-tt/area(nz,en(1)) - ttf(nz,en(2))=ttf(nz,en(2))+tt/area(nz,en(2)) + tt(nz)=-tt(nz)*vi*dt END DO +#if defined(_OPENMP) + call omp_set_lock (partit%plock(en(1))) +#endif + ttf(nzmin:nzmax-1,en(1))=ttf(nzmin:nzmax-1,en(1))-tt/area(nzmin:nzmax-1,en(1)) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(en(1))) + call omp_set_lock (partit%plock(en(2))) +#endif + ttf(nzmin:nzmax-1,en(2))=ttf(nzmin:nzmax-1,en(2))+tt/area(nzmin:nzmax-1,en(2)) +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(en(2))) +#endif END DO !$OMP END DO !$OMP END PARALLEL + +call exchange_nod(ttf, partit) +!$OMP BARRIER end subroutine diff_part_bh ! ! From 690b210687462e10beb725980e8ef4f7050f3eb0 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Wed, 26 Jan 2022 18:31:13 +0100 Subject: [PATCH 797/909] last (hopefully) problems fixed in diff_part_bh --- src/oce_ale_tracer.F90 | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index f3b4f4f0c..bc7a02122 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -1196,8 +1196,8 @@ SUBROUTINE diff_part_bh(tr_num, dynamics, tracers, partit, mesh) tt(nz)=ttf(nz,en(1))-ttf(nz,en(2)) vi=sqrt(max(tracers%data(tr_num)%gamma0_tra, & max(tracers%data(tr_num)%gamma1_tra*sqrt(vi), & - tracers%data(tr_num)%gamma2_tra*vi) & - )*len) + tracers%data(tr_num)%gamma2_tra* vi) & + )*len) tt(nz)=tt(nz)*vi END DO #if defined(_OPENMP) @@ -1238,26 +1238,25 @@ SUBROUTINE diff_part_bh(tr_num, dynamics, tracers, partit, mesh) tt(nz)=temporary_ttf(nz,en(1))-temporary_ttf(nz,en(2)) vi=sqrt(max(tracers%data(tr_num)%gamma0_tra, & max(tracers%data(tr_num)%gamma1_tra*sqrt(vi), & - tracers%data(tr_num)%gamma2_tra*vi) & - )*len) + tracers%data(tr_num)%gamma2_tra* vi) & + )*len) tt(nz)=-tt(nz)*vi*dt END DO #if defined(_OPENMP) call omp_set_lock (partit%plock(en(1))) #endif - ttf(nzmin:nzmax-1,en(1))=ttf(nzmin:nzmax-1,en(1))-tt/area(nzmin:nzmax-1,en(1)) + ttf(nzmin:nzmax-1,en(1))=ttf(nzmin:nzmax-1,en(1))-tt(nzmin:nzmax-1)/area(nzmin:nzmax-1,en(1)) #if defined(_OPENMP) call omp_unset_lock(partit%plock(en(1))) call omp_set_lock (partit%plock(en(2))) #endif - ttf(nzmin:nzmax-1,en(2))=ttf(nzmin:nzmax-1,en(2))+tt/area(nzmin:nzmax-1,en(2)) + ttf(nzmin:nzmax-1,en(2))=ttf(nzmin:nzmax-1,en(2))+tt(nzmin:nzmax-1)/area(nzmin:nzmax-1,en(2)) #if defined(_OPENMP) call omp_unset_lock(partit%plock(en(2))) #endif END DO !$OMP END DO !$OMP END PARALLEL - call exchange_nod(ttf, partit) !$OMP BARRIER end subroutine diff_part_bh From c1e494a84640700b72609ff519e1a85a9aa3bdeb Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Tue, 1 Feb 2022 15:51:21 +0100 Subject: [PATCH 798/909] Brunt-Vaisala frequency is smoothed spatially before it is given to the mixing schemes. This damps some mode which is expressed as a grid noise in the modelled fields (mainly at the equator). --- src/oce_ale_pressure_bv.F90 | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/src/oce_ale_pressure_bv.F90 b/src/oce_ale_pressure_bv.F90 index 9881804d7..16c306cfe 100644 --- a/src/oce_ale_pressure_bv.F90 +++ b/src/oce_ale_pressure_bv.F90 @@ -208,6 +208,7 @@ subroutine pressure_bv(tracers, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE o_ARRAYS + USE g_support USE o_mixing_KPP_mod, only: dbsfc USE diagnostics, only: ldiag_dMOC use densityJM_components_interface @@ -216,7 +217,7 @@ subroutine pressure_bv(tracers, partit, mesh) type(t_mesh), intent(in) , target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(in), target :: tracers - real(kind=WP) :: dz_inv, bv, a, rho_up, rho_dn, t, s + real(kind=WP) :: zmean, dz_inv, bv, a, rho_up, rho_dn, t, s integer :: node, nz, nl1, nzmax, nzmin real(kind=WP) :: rhopot(mesh%nl), bulk_0(mesh%nl), bulk_pz(mesh%nl), bulk_pz2(mesh%nl), rho(mesh%nl), dbsfc1(mesh%nl), db_max real(kind=WP) :: bulk_up, bulk_dn, smallvalue, buoyancy_crit, rho_surf, aux_rho, aux_rho1 @@ -263,7 +264,7 @@ subroutine pressure_bv(tracers, partit, mesh) !___________________________________________________________________________ -!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(dz_inv, bv, a, rho_up, rho_dn, t, s, node, nz, nl1, nzmax, nzmin, & +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(zmean, dz_inv, bv, a, rho_up, rho_dn, t, s, node, nz, nl1, nzmax, nzmin, & !$OMP rhopot, bulk_0, bulk_pz, bulk_pz2, rho, dbsfc1, db_max, bulk_up, bulk_dn, & !$OMP rho_surf, aux_rho, aux_rho1, flag1, flag2) !$OMP DO @@ -423,24 +424,21 @@ subroutine pressure_bv(tracers, partit, mesh) flag1=.true. flag2=.true. do nz=nzmin+1,nzmax-1 - bulk_up = bulk_0(nz-1) + zbar_3d_n(nz,node)*(bulk_pz(nz-1) + zbar_3d_n(nz,node)*bulk_pz2(nz-1)) - bulk_dn = bulk_0(nz) + zbar_3d_n(nz,node)*(bulk_pz(nz) + zbar_3d_n(nz,node)*bulk_pz2(nz)) - rho_up = bulk_up*rhopot(nz-1) / (bulk_up + 0.1_WP*zbar_3d_n(nz,node)*real(state_equation)) - rho_dn = bulk_dn*rhopot(nz) / (bulk_dn + 0.1_WP*zbar_3d_n(nz,node)*real(state_equation)) - dz_inv=1.0_WP/(Z_3d_n(nz-1,node)-Z_3d_n(nz,node)) - + zmean = 0.5_WP*sum(Z_3d_n(nz-1:nz, node), node)) + bulk_up = bulk_0(nz-1) + zmean*(bulk_pz(nz-1) + zmean*bulk_pz2(nz-1)) + bulk_dn = bulk_0(nz) + zmean*(bulk_pz(nz) + zmean*bulk_pz2(nz)) + rho_up = bulk_up*rhopot(nz-1) / (bulk_up + 0.1_WP*zmean*real(state_equation)) + rho_dn = bulk_dn*rhopot(nz) / (bulk_dn + 0.1_WP*zmean*real(state_equation)) + dz_inv = 1.0_WP/(Z_3d_n(nz-1,node)-Z_3d_n(nz,node)) !_______________________________________________________________ ! squared brunt väisälä frequence N^2 --> N^2>0 stratification is ! stable, vertical elongated parcel is accelaratedtowards ! initial point --> does oscillation with frequency N. ! N^2<0 stratification is unstable vertical elongated parcel is ! accelerated away from initial point - bvfreq(nz,node) = -g*dz_inv*(rho_up-rho_dn)/density_0 -!!PS bvfreq(nz,node) = -g*dz_inv*(rho_up-rho_dn)/density_ref(nz,node) - + bvfreq(nz,node) = -g*dz_inv*(rho_up-rho_dn)/density_0 !!PS !--> Why not like this ? - !!PS bvfreq(nz,node) = -g*dz_inv*(rho_up-rho_dn)/(rho_dn) - + !!PS bvfreq(nz,node) = -g*dz_inv*(rho_up-rho_dn)/(rho_dn) !_______________________________________________________________ ! define MLD following Large et al. 1997 ! MLD is the shallowest depth where the local buoyancy gradient matches the maximum buoyancy gradient @@ -474,6 +472,8 @@ subroutine pressure_bv(tracers, partit, mesh) end do !$OMP END DO !$OMP END PARALLEL +call smooth_nod (bvfreq, 1, partit, mesh) +!$OMP BARRIER end subroutine pressure_bv ! ! From bde32fb02fe92036e68b05d6f52a2e96c88eac25 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Tue, 1 Feb 2022 16:00:31 +0100 Subject: [PATCH 799/909] a missing parenthesis added --- src/oce_ale_pressure_bv.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/oce_ale_pressure_bv.F90 b/src/oce_ale_pressure_bv.F90 index 16c306cfe..a8c5ca7ec 100644 --- a/src/oce_ale_pressure_bv.F90 +++ b/src/oce_ale_pressure_bv.F90 @@ -424,7 +424,7 @@ subroutine pressure_bv(tracers, partit, mesh) flag1=.true. flag2=.true. do nz=nzmin+1,nzmax-1 - zmean = 0.5_WP*sum(Z_3d_n(nz-1:nz, node), node)) + zmean = 0.5_WP*sum(Z_3d_n(nz-1:nz, node), node) bulk_up = bulk_0(nz-1) + zmean*(bulk_pz(nz-1) + zmean*bulk_pz2(nz-1)) bulk_dn = bulk_0(nz) + zmean*(bulk_pz(nz) + zmean*bulk_pz2(nz)) rho_up = bulk_up*rhopot(nz-1) / (bulk_up + 0.1_WP*zmean*real(state_equation)) From d64eb64015c9ad9500f1bd36fe51f225459839d3 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 3 Feb 2022 10:33:07 +0100 Subject: [PATCH 800/909] stop if accessing r_mpitype_elem3D beyond its bounds (this is not a fix yet, only prevents invalid memory access) --- src/gen_modules_partitioning.F90 | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index 658573fd3..2e3be2e33 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -219,6 +219,12 @@ subroutine init_mpi_types(partit, mesh) blocklen_tmp(1:nb) = blocklen(1:nb)*n_val*nl1 displace_tmp(1:nb) = displace(1:nb)*n_val*nl1 + ! r_mpitype_elem3D shape is e.g. 7,2,4 but the args n,nl1,n_val are 1,47,1 and thus OUT OF BOUNDS + ! the second dimension of r_mpitype_elem3D is probably always 2 (from nl-1 to nl) + if(.not. all(shape(r_mpitype_elem3D) .ge. [n,nl1,n_val]) ) then + print *,"out of bounds error ",shape(r_mpitype_elem3D), "vs", n,nl1,n_val, "in line ",__FILE__, __FILE__ + stop 1 + end if call MPI_TYPE_INDEXED(nb, blocklen_tmp, displace_tmp, MPI_DOUBLE_PRECISION, & r_mpitype_elem3D(n,nl1,n_val), MPIerr) @@ -299,6 +305,12 @@ subroutine init_mpi_types(partit, mesh) blocklen_tmp(1:nb) = blocklen(1:nb)*n_val*nl1 displace_tmp(1:nb) = displace(1:nb)*n_val*nl1 + ! r_mpitype_elem3D shape is e.g. 7,2,4 but the args n,nl1,n_val are 1,47,1 and thus OUT OF BOUNDS + ! the second dimension of r_mpitype_elem3D is probably always 2 (from nl-1 to nl) + if(.not. all(shape(r_mpitype_elem3D) .ge. [n,nl1,n_val]) ) then + print *,"out of bounds error ",shape(r_mpitype_elem3D), "vs", n,nl1,n_val, "in line ",__FILE__, __FILE__ + stop 1 + end if call MPI_TYPE_INDEXED(nb, blocklen_tmp, displace_tmp, MPI_DOUBLE_PRECISION, & r_mpitype_elem3D_full(n,nl1,n_val), MPIerr) From 83d4bb5c298c85781c32c3efe5e255bad3cb3c1c Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 3 Feb 2022 13:40:55 +0100 Subject: [PATCH 801/909] change the bounds test to use the actual array bounds, compilers seem to behave differently when setting the bounds for an array pointer (e.g. gfortran and nvfortran) --- src/gen_modules_partitioning.F90 | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index 2e3be2e33..69f995c2f 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -219,10 +219,10 @@ subroutine init_mpi_types(partit, mesh) blocklen_tmp(1:nb) = blocklen(1:nb)*n_val*nl1 displace_tmp(1:nb) = displace(1:nb)*n_val*nl1 - ! r_mpitype_elem3D shape is e.g. 7,2,4 but the args n,nl1,n_val are 1,47,1 and thus OUT OF BOUNDS + ! r_mpitype_elem3D shape is e.g. 7,2,4 and its bounds 1:7,1:2,1:4 but the args n,nl1,n_val are 1,47,1 and thus OUT OF BOUNDS ! the second dimension of r_mpitype_elem3D is probably always 2 (from nl-1 to nl) - if(.not. all(shape(r_mpitype_elem3D) .ge. [n,nl1,n_val]) ) then - print *,"out of bounds error ",shape(r_mpitype_elem3D), "vs", n,nl1,n_val, "in line ",__FILE__, __FILE__ + if(.not. (all(lbound(r_mpitype_elem3D) .le. [n,nl1,n_val]) .and. all(ubound(r_mpitype_elem3D) .ge. [n,nl1,n_val])) ) then + print *,"out of bounds error, lbound:",lbound(r_mpitype_elem3D), "indices:", n,nl1,n_val, "ubound:", ubound(r_mpitype_elem3D), __FILE__,__LINE__ stop 1 end if call MPI_TYPE_INDEXED(nb, blocklen_tmp, displace_tmp, MPI_DOUBLE_PRECISION, & @@ -305,10 +305,10 @@ subroutine init_mpi_types(partit, mesh) blocklen_tmp(1:nb) = blocklen(1:nb)*n_val*nl1 displace_tmp(1:nb) = displace(1:nb)*n_val*nl1 - ! r_mpitype_elem3D shape is e.g. 7,2,4 but the args n,nl1,n_val are 1,47,1 and thus OUT OF BOUNDS + ! r_mpitype_elem3D shape is e.g. 7,2,4 and its bounds 1:7,1:2,1:4 but the args n,nl1,n_val are 1,47,1 and thus OUT OF BOUNDS ! the second dimension of r_mpitype_elem3D is probably always 2 (from nl-1 to nl) - if(.not. all(shape(r_mpitype_elem3D) .ge. [n,nl1,n_val]) ) then - print *,"out of bounds error ",shape(r_mpitype_elem3D), "vs", n,nl1,n_val, "in line ",__FILE__, __FILE__ + if(.not. (all(lbound(r_mpitype_elem3D) .le. [n,nl1,n_val]) .and. all(ubound(r_mpitype_elem3D) .ge. [n,nl1,n_val])) ) then + print *,"out of bounds error, lbound:",lbound(r_mpitype_elem3D), "indices:", n,nl1,n_val, "ubound:", ubound(r_mpitype_elem3D), __FILE__,__LINE__ stop 1 end if call MPI_TYPE_INDEXED(nb, blocklen_tmp, displace_tmp, MPI_DOUBLE_PRECISION, & From f45c9c8728006c437353c3ebae18486cd8472b2d Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 3 Feb 2022 15:49:40 +0100 Subject: [PATCH 802/909] add namelist parameter to save restart files as derived type binaries --- config/namelist.config | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/config/namelist.config b/config/namelist.config index 941827f1a..4bbee8905 100644 --- a/config/namelist.config +++ b/config/namelist.config @@ -23,10 +23,12 @@ ResultPath='../result_tmp/' / &restart_log -restart_length=1 !only required for d,h,s cases, y, m take 1 -restart_length_unit='y' !output period: y, d, h, s, off -raw_restart_length=1 +restart_length=1 ! --> do netcdf restart ( only required for d,h,s cases, y, m take 1) +restart_length_unit='y' !output period: y, d, h, s, off +raw_restart_length=1 ! --> do core dump restart raw_restart_length_unit='y' ! e.g. y, d, h, s, off +bin_restart_length=1 ! --> do derived type binary restart +bin_restart_length_unit='y' ! e.g. y, d, h, s, off logfile_outfreq=960 !in logfile info. output frequency, # steps / From b419a501a9305f7bb7d143e9864e1de1db1cf189 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 3 Feb 2022 15:53:31 +0100 Subject: [PATCH 803/909] just small in output that is written to log file --- src/MOD_DYN.F90 | 82 +++++++++++++++++----------------- src/MOD_READ_BINARY_ARRAYS.F90 | 14 +++--- src/MOD_TRACER.F90 | 2 +- src/gen_ic3d.F90 | 4 +- 4 files changed, 51 insertions(+), 51 deletions(-) diff --git a/src/MOD_DYN.F90 b/src/MOD_DYN.F90 index f338ae443..f349d81f0 100644 --- a/src/MOD_DYN.F90 +++ b/src/MOD_DYN.F90 @@ -200,28 +200,6 @@ subroutine WRITE_T_DYN(dynamics, unit, iostat, iomsg) integer, intent(out) :: iostat character(*), intent(inout) :: iomsg - !___________________________________________________________________________ - call write_bin_array(dynamics%uv , unit, iostat, iomsg) - call write_bin_array(dynamics%uv_rhs , unit, iostat, iomsg) - call write_bin_array(dynamics%uv_rhsAB , unit, iostat, iomsg) - call write_bin_array(dynamics%uvnode , unit, iostat, iomsg) - - call write_bin_array(dynamics%w , unit, iostat, iomsg) - call write_bin_array(dynamics%w_e , unit, iostat, iomsg) - call write_bin_array(dynamics%w_i , unit, iostat, iomsg) - call write_bin_array(dynamics%cfl_z , unit, iostat, iomsg) - - if (Fer_GM) then - call write_bin_array(dynamics%fer_w , unit, iostat, iomsg) - call write_bin_array(dynamics%fer_uv, unit, iostat, iomsg) - end if - - !___________________________________________________________________________ - write(unit, iostat=iostat, iomsg=iomsg) dynamics%work - - !___________________________________________________________________________ - write(unit, iostat=iostat, iomsg=iomsg) dynamics%solverinfo - !___________________________________________________________________________ write(unit, iostat=iostat, iomsg=iomsg) dynamics%opt_visc write(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_gamma0 @@ -238,6 +216,27 @@ subroutine WRITE_T_DYN(dynamics, unit, iostat, iomsg) write(unit, iostat=iostat, iomsg=iomsg) dynamics%use_wsplit write(unit, iostat=iostat, iomsg=iomsg) dynamics%wsplit_maxcfl + !___________________________________________________________________________ + write(unit, iostat=iostat, iomsg=iomsg) dynamics%solverinfo + + !___________________________________________________________________________ + write(unit, iostat=iostat, iomsg=iomsg) dynamics%work + + !___________________________________________________________________________ + call write_bin_array(dynamics%uv , unit, iostat, iomsg) + call write_bin_array(dynamics%uv_rhs , unit, iostat, iomsg) + call write_bin_array(dynamics%uv_rhsAB , unit, iostat, iomsg) + call write_bin_array(dynamics%uvnode , unit, iostat, iomsg) + call write_bin_array(dynamics%w , unit, iostat, iomsg) + call write_bin_array(dynamics%w_e , unit, iostat, iomsg) + call write_bin_array(dynamics%w_i , unit, iostat, iomsg) + call write_bin_array(dynamics%cfl_z , unit, iostat, iomsg) + if (Fer_GM) then + call write_bin_array(dynamics%fer_w , unit, iostat, iomsg) + call write_bin_array(dynamics%fer_uv, unit, iostat, iomsg) + end if + + end subroutine WRITE_T_DYN subroutine READ_T_DYN(dynamics, unit, iostat, iomsg) @@ -247,25 +246,6 @@ subroutine READ_T_DYN(dynamics, unit, iostat, iomsg) integer, intent(out) :: iostat character(*), intent(inout) :: iomsg - !___________________________________________________________________________ - call read_bin_array(dynamics%uv , unit, iostat, iomsg) - call read_bin_array(dynamics%uv_rhs , unit, iostat, iomsg) - call read_bin_array(dynamics%uv_rhsAB , unit, iostat, iomsg) - call read_bin_array(dynamics%uvnode , unit, iostat, iomsg) - - call read_bin_array(dynamics%w , unit, iostat, iomsg) - call read_bin_array(dynamics%w_e , unit, iostat, iomsg) - call read_bin_array(dynamics%w_i , unit, iostat, iomsg) - call read_bin_array(dynamics%cfl_z , unit, iostat, iomsg) - - if (Fer_GM) then - call read_bin_array(dynamics%fer_w , unit, iostat, iomsg) - call read_bin_array(dynamics%fer_uv , unit, iostat, iomsg) - end if - - !___________________________________________________________________________ - read(unit, iostat=iostat, iomsg=iomsg) dynamics%work - !___________________________________________________________________________ read(unit, iostat=iostat, iomsg=iomsg) dynamics%opt_visc read(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_gamma0 @@ -282,6 +262,26 @@ subroutine READ_T_DYN(dynamics, unit, iostat, iomsg) read(unit, iostat=iostat, iomsg=iomsg) dynamics%use_wsplit read(unit, iostat=iostat, iomsg=iomsg) dynamics%wsplit_maxcfl + !___________________________________________________________________________ + read(unit, iostat=iostat, iomsg=iomsg) dynamics%solverinfo + + !___________________________________________________________________________ + read(unit, iostat=iostat, iomsg=iomsg) dynamics%work + + !___________________________________________________________________________ + call read_bin_array(dynamics%uv , unit, iostat, iomsg) + call read_bin_array(dynamics%uv_rhs , unit, iostat, iomsg) + call read_bin_array(dynamics%uv_rhsAB , unit, iostat, iomsg) + call read_bin_array(dynamics%uvnode , unit, iostat, iomsg) + call read_bin_array(dynamics%w , unit, iostat, iomsg) + call read_bin_array(dynamics%w_e , unit, iostat, iomsg) + call read_bin_array(dynamics%w_i , unit, iostat, iomsg) + call read_bin_array(dynamics%cfl_z , unit, iostat, iomsg) + if (Fer_GM) then + call read_bin_array(dynamics%fer_w , unit, iostat, iomsg) + call read_bin_array(dynamics%fer_uv , unit, iostat, iomsg) + end if + end subroutine READ_T_DYN END MODULE MOD_DYN diff --git a/src/MOD_READ_BINARY_ARRAYS.F90 b/src/MOD_READ_BINARY_ARRAYS.F90 index 87f0b2389..84b883c43 100644 --- a/src/MOD_READ_BINARY_ARRAYS.F90 +++ b/src/MOD_READ_BINARY_ARRAYS.F90 @@ -19,7 +19,7 @@ subroutine read1d_real(arr, unit, iostat, iomsg) read(unit, iostat=iostat, iomsg=iomsg) s1 if (s1==0) return - allocate(arr(s1)) + if (.not. allocated(arr)) allocate(arr(s1)) read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) end subroutine read1d_real @@ -32,7 +32,7 @@ subroutine read1d_int(arr, unit, iostat, iomsg) read(unit, iostat=iostat, iomsg=iomsg) s1 if (s1==0) return - allocate(arr(s1)) + if (.not. allocated(arr)) allocate(arr(s1)) read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) end subroutine read1d_int @@ -45,7 +45,7 @@ subroutine read1d_char(arr, unit, iostat, iomsg) read(unit, iostat=iostat, iomsg=iomsg) s1 if (s1==0) return - allocate(arr(s1)) + if (.not. allocated(arr)) allocate(arr(s1)) read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) end subroutine read1d_char @@ -71,7 +71,7 @@ subroutine read2d_real(arr, unit, iostat, iomsg) read(unit, iostat=iostat, iomsg=iomsg) s1, s2 if ((s1==0) .or. (s2==0)) return - allocate(arr(s1, s2)) + if (.not. allocated(arr)) allocate(arr(s1, s2)) read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2) end subroutine read2d_real @@ -84,7 +84,7 @@ subroutine read2d_int(arr, unit, iostat, iomsg) read(unit, iostat=iostat, iomsg=iomsg) s1, s2 if ((s1==0) .or. (s2==0)) return - allocate(arr(s1, s2)) + if (.not. allocated(arr)) allocate(arr(s1, s2)) read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2) end subroutine read2d_int @@ -97,7 +97,7 @@ subroutine read3d_real(arr, unit, iostat, iomsg) read(unit, iostat=iostat, iomsg=iomsg) s1, s2, s3 if ((s1==0) .or. (s2==0) .or. (s3==0)) return - allocate(arr(s1,s2,s3)) + if (.not. allocated(arr)) allocate(arr(s1,s2,s3)) read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2, 1:s3) end subroutine read3d_real @@ -110,7 +110,7 @@ subroutine read3d_int(arr, unit, iostat, iomsg) read(unit, iostat=iostat, iomsg=iomsg) s1, s2, s3 if ((s1==0) .or. (s2==0) .or. (s3==0)) return - allocate(arr(s1,s2,s3)) + if (.not. allocated(arr)) allocate(arr(s1,s2,s3)) read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2, 1:s3) end subroutine read3d_int end module MOD_READ_BINARY_ARRAYS diff --git a/src/MOD_TRACER.F90 b/src/MOD_TRACER.F90 index 242ee483d..dd3841675 100644 --- a/src/MOD_TRACER.F90 +++ b/src/MOD_TRACER.F90 @@ -211,7 +211,7 @@ subroutine READ_T_TRACER(tracer, unit, iostat, iomsg) read(unit, iostat=iostat, iomsg=iomsg) tracer%num_tracers ! write(*,*) 'number of tracers to read: ', tracer%num_tracers - allocate(tracer%data(tracer%num_tracers)) + if (.not. allocated(tracer%data)) allocate(tracer%data(tracer%num_tracers)) do i=1, tracer%num_tracers read(unit, iostat=iostat, iomsg=iomsg) tracer%data(i) ! write(*,*) 'tracer info:', tracer%data(i)%ID, TRIM(tracer%data(i)%tra_adv_hor), TRIM(tracer%data(i)%tra_adv_ver), TRIM(tracer%data(i)%tra_adv_lim) diff --git a/src/gen_ic3d.F90 b/src/gen_ic3d.F90 index 6bd4da6e4..f3fa32d34 100644 --- a/src/gen_ic3d.F90 +++ b/src/gen_ic3d.F90 @@ -351,9 +351,9 @@ SUBROUTINE getcoeffld(tracers, partit, mesh) iost = nf_inq_varid(ncid, varname, id_data) iost = nf_inq_var_fill(ncid, id_data, NO_FILL, FILL_VALUE) ! FillValue defined? if (NO_FILL==1) then - print *, 'No _FillValue is set in ', filename, ', trying dummy =', dummy, FILL_VALUE + print *, 'No _FillValue is set in ', trim(filename), ', trying dummy =', dummy, FILL_VALUE else - print *, 'The FillValue in ', filename, ' is set to ', FILL_VALUE ! should set dummy accordingly + print *, 'The FillValue in ', trim(filename), ' is set to ', FILL_VALUE ! should set dummy accordingly end if end if call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) From bd2e8f604d3d4d96adbcfed537448aa7ae0af828 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 3 Feb 2022 15:54:13 +0100 Subject: [PATCH 804/909] just small change in output that is written to log file --- src/fesom_module.F90 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/fesom_module.F90 b/src/fesom_module.F90 index 7cae64cc6..853d94995 100755 --- a/src/fesom_module.F90 +++ b/src/fesom_module.F90 @@ -206,6 +206,7 @@ subroutine fesom_init(fesom_total_nsteps) call clock_newyear ! check if it is a new year if (f%mype==0) f%t6=MPI_Wtime() !___CREATE NEW RESTART FILE IF APPLICABLE___________________________________ + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call restart'//achar(27)//'[0m' call restart(0, r_restart, f%ice, f%dynamics, f%tracers, f%partit, f%mesh) if (f%mype==0) f%t7=MPI_Wtime() ! store grid information into netcdf file @@ -214,6 +215,7 @@ subroutine fesom_init(fesom_total_nsteps) !___IF RESTART WITH ZLEVEL OR ZSTAR IS DONE, ALSO THE ACTUAL LEVELS AND ____ !___MIDDEPTH LEVELS NEEDS TO BE CALCULATET AT RESTART_______________________ if (r_restart) then + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call restart_thickness_ale'//achar(27)//'[0m' call restart_thickness_ale(f%partit, f%mesh) end if if (f%mype==0) then From 7c347ae0f4a94051e58ff272a97a621ab53576a4 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 3 Feb 2022 15:56:26 +0100 Subject: [PATCH 805/909] just small change in output that is written to log file --- src/gen_modules_clock.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/gen_modules_clock.F90 b/src/gen_modules_clock.F90 index 8443e2633..40f9abc31 100755 --- a/src/gen_modules_clock.F90 +++ b/src/gen_modules_clock.F90 @@ -130,13 +130,13 @@ subroutine clock_init(partit) write(*,*) print *, achar(27)//'[31m' //'____________________________________________________________'//achar(27)//'[0m' print *, achar(27)//'[5;7;31m'//' --> THIS IS A RESTART RUN !!! '//achar(27)//'[0m' - write(*,"(A, F5.2, I4, I5)") ' > clock restarted at time:', timenew, daynew, yearnew + write(*,"(A, F8.2, I4, I5)") ' > clock restarted at time:', timenew, daynew, yearnew write(*,*) else write(*,*) print *, achar(27)//'[32m' //'____________________________________________________________'//achar(27)//'[0m' print *, achar(27)//'[7;32m'//' --> THIS IS A INITIALISATION RUN !!! '//achar(27)//'[0m' - write(*,"(A, F5.2, I4, I5)")' > clock initialized at time:', timenew, daynew, yearnew + write(*,"(A, F8.2, I4, I5)")' > clock initialized at time:', timenew, daynew, yearnew write(*,*) end if end if From 4014dcc893ad675ebfaf5335316a08511ea5235e Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 3 Feb 2022 15:57:21 +0100 Subject: [PATCH 806/909] add namelist parameter to save restart files as derived type binaries --- src/gen_modules_config.F90 | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/gen_modules_config.F90 b/src/gen_modules_config.F90 index 8cb06e1f4..c26d360e6 100755 --- a/src/gen_modules_config.F90 +++ b/src/gen_modules_config.F90 @@ -34,12 +34,16 @@ module g_config ! *** restart_log *** integer :: logfile_outfreq=1 ! logfile info. outp. freq., # steps integer :: restart_length=1 - character(3) :: restart_length_unit='m' + character(3) :: restart_length_unit='m' integer :: raw_restart_length=1 - character(3) :: raw_restart_length_unit='m' + character(3) :: raw_restart_length_unit='m' + integer :: bin_restart_length=1 + character(3) :: bin_restart_length_unit='m' - namelist /restart_log/ restart_length, restart_length_unit, raw_restart_length, raw_restart_length_unit, logfile_outfreq - + namelist /restart_log/ restart_length , restart_length_unit, & + raw_restart_length, raw_restart_length_unit, & + bin_restart_length, bin_restart_length_unit, & + logfile_outfreq !_____________________________________________________________________________ ! *** ale_def *** ! Which ALE case to use : 'linfs', 'zlevel', 'zstar' From 09869327440bc51d553e8adf9a43cad066f13e6e Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 3 Feb 2022 15:58:00 +0100 Subject: [PATCH 807/909] fix small bug in writing the blowup file --- src/io_blowup.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/io_blowup.F90 b/src/io_blowup.F90 index ef96a6465..60832dd37 100644 --- a/src/io_blowup.F90 +++ b/src/io_blowup.F90 @@ -147,7 +147,7 @@ subroutine ini_blowup_io(year, ice, dynamics, tracers, partit, mesh) call def_variable(bid, 'w' , (/nl, nod2D/) , 'vertical velocity', 'm/s', dynamics%w); call def_variable(bid, 'w_expl' , (/nl, nod2D/) , 'vertical velocity', 'm/s', dynamics%w_e); call def_variable(bid, 'w_impl' , (/nl, nod2D/) , 'vertical velocity', 'm/s', dynamics%w_i); - call def_variable(bid, 'cfl_z' , (/nl-1, nod2D/) , 'vertical CFL criteria', '', dynamics%cfl_z); + call def_variable(bid, 'cfl_z' , (/nl, nod2D/) , 'vertical CFL criteria', '', dynamics%cfl_z); !_____________________________________________________________________________ ! write snapshot ice variables to blowup file From 6395b5979faafd086e908f5a4aed92337dc38621 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 3 Feb 2022 15:58:41 +0100 Subject: [PATCH 808/909] fix small bug in writing the checkup data into the log file --- src/write_step_info.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index 97d3bdc89..926e0ecf4 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -165,7 +165,7 @@ subroutine write_step_info(istep, outfreq, ice, dynamics, tracers, partit, mesh) call MPI_AllREDUCE(loc , max_hflux, 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) loc=omp_min_max_sum2(tracers%data(1)%values, 1, nl-1, 1, myDim_nod2D, 'max', partit, 0.0_WP) call MPI_AllREDUCE(loc , max_temp , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc=omp_min_max_sum2(tracers%data(2)%values, 1, nl-1, 1, myDim_nod2D, 'min', partit, 0.0_WP) + loc=omp_min_max_sum2(tracers%data(2)%values, 1, nl-1, 1, myDim_nod2D, 'max', partit, 0.0_WP) call MPI_AllREDUCE(loc , max_salt , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) loc=omp_min_max_sum1(Wvel(1,:), 1, myDim_nod2D, 'max', partit) call MPI_AllREDUCE(loc , max_wvel , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) From edb885265904b40a018ee05f314d1cd8cb07242f Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 3 Feb 2022 15:59:59 +0100 Subject: [PATCH 809/909] add writing/reading derived type restart files for t_mesh, t_tracer, t_dynamics, t_ice --- src/io_restart.F90 | 366 ++++++++++++++++++++++++++++++++++++++------- 1 file changed, 312 insertions(+), 54 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 7b6c4b6ac..f625cf460 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -5,6 +5,10 @@ MODULE io_RESTART use g_cvmix_tke use MOD_TRACER use MOD_ICE +! use MOD_DYN +! use MOD_MESH +! USE MOD_PARTIT +! USE MOD_PARSUP implicit none public :: restart, finalize_restart private @@ -12,14 +16,15 @@ MODULE io_RESTART integer, save :: globalstep=0 ! todo: remove this from module scope as it will mess things up if we use async read/write from the same process real(kind=WP) :: ctime !current time in seconds from the beginning of the year - type(restart_file_group), save :: oce_files - type(restart_file_group), save :: ice_files + type(restart_file_group) , save :: oce_files + type(restart_file_group) , save :: ice_files character(:), allocatable, save :: oce_path character(:), allocatable, save :: ice_path character(:), allocatable, save :: raw_restart_dirpath character(:), allocatable, save :: raw_restart_infopath - + character(:), allocatable, save :: bin_restart_dirpath + character(:), allocatable, save :: bin_restart_infopath integer, parameter :: RAW_RESTART_METADATA_RANK = 0 @@ -148,33 +153,58 @@ subroutine restart(istep, l_read, ice, dynamics, tracers, partit, mesh) integer :: istep logical :: l_read - logical :: is_portable_restart_write, is_raw_restart_write - type(t_mesh), intent(in) , target :: mesh + logical :: is_portable_restart_write, is_raw_restart_write, is_bin_restart_write + type(t_mesh) , intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in) , target :: tracers - type(t_dyn) , intent(in) , target :: dynamics - type(t_ice) , intent(in) , target :: ice - logical dumpfiles_exist - logical, save :: initialized = .false. + type(t_tracer), intent(inout), target :: tracers + type(t_dyn) , intent(inout), target :: dynamics + type(t_ice) , intent(inout), target :: ice + logical rawfiles_exist, binfiles_exist + logical, save :: initialized_raw = .false. + logical, save :: initialized_bin = .false. integer cstat, estat character(500) cmsg ! there seems to be no documentation about the max size of this text integer mpierr - if(.not. initialized) then - initialized = .true. - raw_restart_dirpath = trim(ResultPath)//"/fesom_raw_restart/np"//int_to_txt(partit%npes) + !_____________________________________________________________________________ + ! initialize directory for core dump restart + if(.not. initialized_raw) then + initialized_raw = .true. + raw_restart_dirpath = trim(ResultPath)//"/fesom_raw_restart/np"//int_to_txt(partit%npes) raw_restart_infopath = trim(ResultPath)//"/fesom_raw_restart/np"//int_to_txt(partit%npes)//".info" if(raw_restart_length_unit /= "off") then - if(partit%mype == RAW_RESTART_METADATA_RANK) then - ! inquire does not work for directories, the directory might already exist - call execute_command_line("mkdir -p "//raw_restart_dirpath, exitstat=estat, cmdstat=cstat, cmdmsg=cmsg) ! sometimes does not work on aleph - if(cstat /= 0) print *,"creating raw restart directory ERROR ", trim(cmsg) - end if - call MPI_Barrier(partit%MPI_COMM_FESOM, mpierr) ! make sure the dir has been created before we continue... + if(partit%mype == RAW_RESTART_METADATA_RANK) then + ! inquire does not work for directories, the directory might already exist + call execute_command_line("mkdir -p "//raw_restart_dirpath, exitstat=estat, cmdstat=cstat, cmdmsg=cmsg) ! sometimes does not work on aleph + if(cstat /= 0) print *,"creating raw restart directory ERROR ", trim(cmsg) + end if + call MPI_Barrier(partit%MPI_COMM_FESOM, mpierr) ! make sure the dir has been created before we continue... end if end if + !_____________________________________________________________________________ + ! initialize directory for derived type binary restart + if(.not. initialized_bin) then + initialized_bin = .true. + bin_restart_dirpath = trim(ResultPath)//"/fesom_bin_restart/np"//int_to_txt(partit%npes) + bin_restart_infopath = trim(ResultPath)//"/fesom_bin_restart/np"//int_to_txt(partit%npes)//".info" + if(bin_restart_length_unit /= "off") then + if(partit%mype == RAW_RESTART_METADATA_RANK) then + ! inquire does not work for directories, the directory might already exist + call execute_command_line("mkdir -p "//bin_restart_dirpath, exitstat=estat, cmdstat=cstat, cmdmsg=cmsg) ! sometimes does not work on aleph + if(cstat /= 0) print *,"creating derived type binary restart directory ERROR ", trim(cmsg) + end if + call MPI_Barrier(partit%MPI_COMM_FESOM, mpierr) ! make sure the dir has been created before we continue... + end if + end if + + !_____________________________________________________________________________ + ! compute current time based on what is written in fesom.clock file ctime=timeold+(dayold-1.)*86400 + + !_____________________________________________________________________________ + ! initialise files for netcdf restart if l_read==TRUE --> the restart file + ! will be read if (.not. l_read) then call ini_ocean_io(yearnew, dynamics, tracers, partit, mesh) if (use_ice) call ini_ice_io (yearnew, ice, partit, mesh) @@ -183,56 +213,101 @@ subroutine restart(istep, l_read, ice, dynamics, tracers, partit, mesh) if (use_ice) call ini_ice_io (yearold, ice, partit, mesh) end if + !___READING OF RESTART________________________________________________________ + ! should restart files be readed --> see r_restart in gen_modules_clock.F90 if (l_read) then - ! determine if we can load raw restart dump files + ! determine if we can load raw restart dump files --> check if *.info file for + ! raw restarts exist --> if info file exist also the rest must exist --> so + ! core dump restart is readable + if(partit%mype == RAW_RESTART_METADATA_RANK) then + inquire(file=raw_restart_infopath, exist=rawfiles_exist) + end if + call MPI_Bcast(rawfiles_exist, 1, MPI_LOGICAL, RAW_RESTART_METADATA_RANK, partit%MPI_COMM_FESOM, mpierr) + + ! check if folder for derived type binary restarts exist if(partit%mype == RAW_RESTART_METADATA_RANK) then - inquire(file=raw_restart_infopath, exist=dumpfiles_exist) + inquire(file=bin_restart_infopath, exist=binfiles_exist) end if - call MPI_Bcast(dumpfiles_exist, 1, MPI_LOGICAL, RAW_RESTART_METADATA_RANK, partit%MPI_COMM_FESOM, mpierr) - if(dumpfiles_exist) then - call read_all_raw_restarts(partit%MPI_COMM_FESOM, partit%mype) + call MPI_Bcast(binfiles_exist, 1, MPI_LOGICAL, RAW_RESTART_METADATA_RANK, partit%MPI_COMM_FESOM, mpierr) + + !___________________________________________________________________________ + ! read core dump file restart + if(rawfiles_exist) then + call read_all_raw_restarts(partit%MPI_COMM_FESOM, partit%mype) + + !___________________________________________________________________________ + ! read derived type binary file restart + elseif(binfiles_exist) then + call read_all_bin_restarts(ice, dynamics, tracers, partit, mesh) + + !___________________________________________________________________________ + ! read netcdf file restart else - call read_restart(oce_path, oce_files, partit%MPI_COMM_FESOM, partit%mype) - if (use_ice) call read_restart(ice_path, ice_files, partit%MPI_COMM_FESOM, partit%mype) - ! immediately create a raw restart - if(raw_restart_length_unit /= "off") then - call write_all_raw_restarts(istep, partit%MPI_COMM_FESOM, partit%mype) - end if + call read_restart(oce_path, oce_files, partit%MPI_COMM_FESOM, partit%mype) + if (use_ice) call read_restart(ice_path, ice_files, partit%MPI_COMM_FESOM, partit%mype) + ! immediately create a raw core dump restart +! if(raw_restart_length_unit /= "off") then +! call write_all_raw_restarts(istep, partit%MPI_COMM_FESOM, partit%mype) +! end if +! ! immediately create a derived type binary restart +! if(bin_restart_length_unit /= "off") then +! call write_all_bin_restarts(istep, ice, dynamics, tracers, partit, mesh) +! end if end if end if if (istep==0) return - - !check whether restart will be written + + !___WRITING OF RESTART________________________________________________________ + ! check whether restart will be written + ! --> should write netcdf restart: True/False is_portable_restart_write = is_due(trim(restart_length_unit), restart_length, istep) + + ! --> should write core dump restart: True/False if(is_portable_restart_write .and. (raw_restart_length_unit /= "off")) then is_raw_restart_write = .true. ! always write a raw restart together with the portable restart (unless raw restarts are off) else is_raw_restart_write = is_due(trim(raw_restart_length_unit), raw_restart_length, istep) end if + + ! --> should write derived type binary restart: True/False + if(is_portable_restart_write .and. (bin_restart_length_unit /= "off")) then + is_bin_restart_write = .true. ! always write a binary restart together with the portable restart (unless raw restarts are off) + else + is_bin_restart_write = is_due(trim(bin_restart_length_unit), bin_restart_length, istep) + end if + !_____________________________________________________________________________ + ! finally write restart for netcdf, core dump and derived type binary + ! write netcdf restart if(is_portable_restart_write) then - ! write restart if(partit%mype==0) write(*,*)'Do output (netCDF, restart) ...' call write_restart(oce_path, oce_files, istep) if(use_ice) call write_restart(ice_path, ice_files, istep) end if + ! write core dump if(is_raw_restart_write) then call write_all_raw_restarts(istep, partit%MPI_COMM_FESOM, partit%mype) end if + ! write derived type binary + if(is_bin_restart_write) then + call write_all_bin_restarts(istep, ice, dynamics, tracers, partit, mesh) + end if + ! actualize clock file to latest restart point if (partit%mype==0) then - if(is_portable_restart_write .or. is_raw_restart_write) then - write(*,*) ' --> actualize clock file to latest restart point' - call clock_finish - end if + if(is_portable_restart_write .or. is_raw_restart_write .or. is_bin_restart_write) then + write(*,*) ' --> actualize clock file to latest restart point' + call clock_finish + end if end if end subroutine restart - - +! +! +!_______________________________________________________________________________ subroutine write_restart(path, filegroup, istep) character(len=*), intent(in) :: path type(restart_file_group), intent(inout) :: filegroup @@ -282,8 +357,9 @@ subroutine write_restart(path, filegroup, istep) end do end subroutine - - +! +! +!_______________________________________________________________________________ subroutine write_all_raw_restarts(istep, mpicomm, mype) integer, intent(in) :: istep integer, intent(in) :: mpicomm @@ -310,8 +386,9 @@ subroutine write_all_raw_restarts(istep, mpicomm, mype) close(fileunit) end if end subroutine - - +! +! +!_______________________________________________________________________________ subroutine write_raw_restart_group(filegroup, fileunit) type(restart_file_group), intent(inout) :: filegroup integer, intent(in) :: fileunit @@ -322,8 +399,184 @@ subroutine write_raw_restart_group(filegroup, fileunit) call filegroup%files(i)%write_variables_raw(fileunit) end do end subroutine +! +! +!_______________________________________________________________________________ +subroutine write_all_bin_restarts(istep, ice, dynamics, tracers, partit, mesh) + integer, intent(in) :: istep + type(t_ice) , target, intent(in) :: ice + type(t_dyn) , target, intent(in) :: dynamics + type(t_tracer), target, intent(in) :: tracers + type(t_partit), target, intent(in) :: partit + type(t_mesh) , target, intent(in) :: mesh + + ! EO parameters + integer cstep + integer fileunit, fileunit_i + + !___________________________________________________________________________ + ! write info file + if(partit%mype == RAW_RESTART_METADATA_RANK) then + print *, achar(27)//'[1;33m'//' --> writing derived type binary restarts to '//bin_restart_dirpath//achar(27)//'[0m' + ! store metadata about the raw restart + cstep = globalstep+istep + fileunit_i = 299 + open(newunit = fileunit_i, file = bin_restart_infopath) + write(fileunit_i, '(g0)') cstep + write(fileunit_i, '(g0)') ctime + write(fileunit_i, '(2(g0))') "! year: ",yearnew + end if + + !___________________________________________________________________________ + ! mesh derived type + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = bin_restart_dirpath//'/'//'t_mesh.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'replace', & + form = 'unformatted') + write(fileunit) mesh + close(fileunit) + if(partit%mype == RAW_RESTART_METADATA_RANK) then + write(fileunit_i, '(1(g0))') "! t_mesh" + print *, achar(27)//'[33m'//' > write derived type t_mesh'//achar(27)//'[0m' + end if + + !___________________________________________________________________________ + ! partit derived type + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = bin_restart_dirpath//'/'//'t_partit.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'replace', & + form = 'unformatted') + write(fileunit) partit + close(fileunit) + if(partit%mype == RAW_RESTART_METADATA_RANK) then + write(fileunit_i, '(1(g0))') "! t_partit" + print *, achar(27)//'[33m'//' > write derived type t_partit'//achar(27)//'[0m' + end if + + !___________________________________________________________________________ + ! tracer derived type + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = bin_restart_dirpath//'/'//'t_tracer.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'replace', & + form = 'unformatted') + write(fileunit) tracers + close(fileunit) + if(partit%mype == RAW_RESTART_METADATA_RANK) then + write(fileunit_i, '(1(g0))') "! t_tracer" + print *, achar(27)//'[33m'//' > write derived type t_tracer'//achar(27)//'[0m' + end if + + !___________________________________________________________________________ + ! dynamics derived type + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = bin_restart_dirpath//'/'//'t_dynamics.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'replace', & + form = 'unformatted') + write(fileunit) dynamics + close(fileunit) + if(partit%mype == RAW_RESTART_METADATA_RANK) then + write(fileunit_i, '(1(g0))') "! t_dynamics" + print *, achar(27)//'[33m'//' > write derived type t_dynamics'//achar(27)//'[0m' + end if + + !___________________________________________________________________________ + ! ice derived type + if (use_ice) then + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = bin_restart_dirpath//'/'//'t_ice.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'replace', & + form = 'unformatted') + write(fileunit) ice + close(fileunit) + if(partit%mype == RAW_RESTART_METADATA_RANK) then + write(fileunit_i, '(1(g0))') "! t_ice" + print *, achar(27)//'[33m'//' > write derived type t_ice'//achar(27)//'[0m' + end if + end if + + !___________________________________________________________________________ + if(partit%mype == RAW_RESTART_METADATA_RANK) close(fileunit_i) - +end subroutine +! +! +!_______________________________________________________________________________ +subroutine read_all_bin_restarts(ice, dynamics, tracers, partit, mesh) + implicit none + type(t_ice) , intent(inout), target :: ice + type(t_dyn) , intent(inout), target :: dynamics + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + integer fileunit + + if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[1;33m'//' --> read restarts from derived type binary'//achar(27)//'[0m' + + !___________________________________________________________________________ + ! mesh derived type + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = bin_restart_dirpath//'/'//'t_mesh.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'old', & + form = 'unformatted') + read(fileunit) mesh + close(fileunit) + if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[33m'//' > read derived type t_mesh'//achar(27)//'[0m' + + !___________________________________________________________________________ + ! partit derived type + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = bin_restart_dirpath//'/'//'t_partit.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'old', & + form = 'unformatted') + read(fileunit) partit + close(fileunit) + if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[33m'//' > read derived type t_partit'//achar(27)//'[0m' + + !___________________________________________________________________________ + ! tracer derived type + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = bin_restart_dirpath//'/'//'t_tracer.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'old', & + form = 'unformatted') + read(fileunit) tracers + close(fileunit) + if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[33m'//' > read derived type t_tracer'//achar(27)//'[0m' + + !___________________________________________________________________________ + ! dynamics derived type + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = bin_restart_dirpath//'/'//'t_dynamics.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'old', & + form = 'unformatted') + read(fileunit) dynamics + close(fileunit) + if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[33m'//' > read derived type t_dynamics'//achar(27)//'[0m' + + !___________________________________________________________________________ + ! ice derived type + if (use_ice) then + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = bin_restart_dirpath//'/'//'t_ice.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'old', & + form = 'unformatted') + read(fileunit) ice + close(fileunit) + if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[33m'//' > read derived type t_ice'//achar(27)//'[0m' + end if +end subroutine +! +! +!_______________________________________________________________________________ subroutine read_all_raw_restarts(mpicomm, mype) integer, intent(in) :: mpicomm integer, intent(in) :: mype @@ -368,8 +621,9 @@ subroutine read_all_raw_restarts(mpicomm, mype) stop 1 end if end subroutine - - +! +! +!_______________________________________________________________________________ subroutine read_raw_restart_group(filegroup, fileunit) type(restart_file_group), intent(inout) :: filegroup integer, intent(in) :: fileunit @@ -380,8 +634,9 @@ subroutine read_raw_restart_group(filegroup, fileunit) call filegroup%files(i)%read_variables_raw(fileunit) end do end subroutine - - +! +! +!_______________________________________________________________________________ ! join remaining threads and close all open files subroutine finalize_restart() integer i @@ -405,8 +660,9 @@ subroutine finalize_restart() end do end if end subroutine - - +! +! +!_______________________________________________________________________________ subroutine read_restart(path, filegroup, mpicomm, mype) character(len=*), intent(in) :: path type(restart_file_group), intent(inout) :: filegroup @@ -511,8 +767,9 @@ subroutine read_restart(path, filegroup, mpicomm, mype) end if end if end subroutine - - +! +! +!_______________________________________________________________________________ function is_due(unit, frequency, istep) result(d) character(len=*), intent(in) :: unit integer, intent(in) :: frequency @@ -540,8 +797,9 @@ function is_due(unit, frequency, istep) result(d) stop end if end function - - +! +! +!_______________________________________________________________________________ function mpirank_to_txt(mpicomm) result(txt) use fortran_utils integer, intent(in) :: mpicomm From 049d7bd6ab0b2d1e336d8337d5992f31a829a9fd Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 3 Feb 2022 16:05:21 +0100 Subject: [PATCH 810/909] fix shaped pointer assignment for nvfortran --- src/associate_part_ass.h | 52 ++++++++++++++++++++-------------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/src/associate_part_ass.h b/src/associate_part_ass.h index da8b96b06..cd23348c7 100644 --- a/src/associate_part_ass.h +++ b/src/associate_part_ass.h @@ -20,7 +20,7 @@ part => partit%part lb=lbound(partit%s_mpitype_elem3D, 2) ub=ubound(partit%s_mpitype_elem3D, 2) -#ifdef __PGI +#ifdef __PGIxx myList_nod2D => partit%myList_nod2D(1:myDim_nod2D +eDim_nod2D) myList_elem2D => partit%myList_elem2D(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) myList_edge2D => partit%myList_edge2D(1:myDim_edge2D+eDim_edge2D) @@ -62,44 +62,44 @@ r_mpitype_nod2D_i => partit%r_mpitype_nod2D_i(1:com_nod2D%rPEnum) s_mpitype_nod3D => partit%s_mpitype_nod3D(1:com_nod2D%sPEnum, lb:ub, 1:3) r_mpitype_nod3D => partit%r_mpitype_nod3D(1:com_nod2D%rPEnum, lb:ub, 1:3) #else -myList_nod2D (1:myDim_nod2D +eDim_nod2D) => partit%myList_nod2D -myList_elem2D(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => partit%myList_elem2D -myList_edge2D(1:myDim_edge2D+eDim_edge2D) => partit%myList_edge2D +myList_nod2D (1:myDim_nod2D +eDim_nod2D) => partit%myList_nod2D(:) +myList_elem2D(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => partit%myList_elem2D(:) +myList_edge2D(1:myDim_edge2D+eDim_edge2D) => partit%myList_edge2D(:) if (allocated(partit%remPtr_nod2D)) then - remPtr_nod2D (1:npes) => partit%remPtr_nod2D - remList_nod2D (1:remPtr_nod2D(npes)) => partit%remList_nod2D + remPtr_nod2D (1:npes) => partit%remPtr_nod2D(:) + remList_nod2D (1:remPtr_nod2D(npes)) => partit%remList_nod2D(:) end if if (allocated(partit%remPtr_elem2D)) then -remPtr_elem2D (1:npes) => partit%remPtr_elem2D -remList_elem2D(1:remPtr_elem2D(npes)) => partit%remList_elem2D +remPtr_elem2D (1:npes) => partit%remPtr_elem2D(:) +remList_elem2D(1:remPtr_elem2D(npes)) => partit%remList_elem2D(:) end if -s_mpitype_elem2D(1:com_elem2D%sPEnum, 1:4) => partit%s_mpitype_elem2D -r_mpitype_elem2D(1:com_elem2D%rPEnum, 1:4) => partit%r_mpitype_elem2D +s_mpitype_elem2D(1:com_elem2D%sPEnum, 1:4) => partit%s_mpitype_elem2D(:,:) +r_mpitype_elem2D(1:com_elem2D%rPEnum, 1:4) => partit%r_mpitype_elem2D(:,:) -s_mpitype_elem2D_full_i(1:com_elem2D_full%sPEnum) => partit%s_mpitype_elem2D_full_i -r_mpitype_elem2D_full_i(1:com_elem2D_full%rPEnum) => partit%r_mpitype_elem2D_full_i +s_mpitype_elem2D_full_i(1:com_elem2D_full%sPEnum) => partit%s_mpitype_elem2D_full_i(:) +r_mpitype_elem2D_full_i(1:com_elem2D_full%rPEnum) => partit%r_mpitype_elem2D_full_i(:) -s_mpitype_elem2D_full(1:com_elem2D_full%sPEnum, 1:4) => partit%s_mpitype_elem2D_full -r_mpitype_elem2D_full(1:com_elem2D_full%rPEnum, 1:4) => partit%r_mpitype_elem2D_full +s_mpitype_elem2D_full(1:com_elem2D_full%sPEnum, 1:4) => partit%s_mpitype_elem2D_full(:,:) +r_mpitype_elem2D_full(1:com_elem2D_full%rPEnum, 1:4) => partit%r_mpitype_elem2D_full(:,:) -s_mpitype_elem3D(1:com_elem2D%sPEnum, lb:ub, 1:4) => partit%s_mpitype_elem3D -r_mpitype_elem3D(1:com_elem2D%rPEnum, lb:ub, 1:4) => partit%r_mpitype_elem3D +s_mpitype_elem3D(1:com_elem2D%sPEnum, lb:ub, 1:4) => partit%s_mpitype_elem3D(:,:,:) +r_mpitype_elem3D(1:com_elem2D%rPEnum, lb:ub, 1:4) => partit%r_mpitype_elem3D(:,:,:) -s_mpitype_elem3D_full(1:com_elem2D_full%sPEnum, lb:ub, 1:4) => partit%s_mpitype_elem3D_full -r_mpitype_elem3D_full(1:com_elem2D_full%rPEnum, lb:ub, 1:4) => partit%r_mpitype_elem3D_full +s_mpitype_elem3D_full(1:com_elem2D_full%sPEnum, lb:ub, 1:4) => partit%s_mpitype_elem3D_full(:,:,:) +r_mpitype_elem3D_full(1:com_elem2D_full%rPEnum, lb:ub, 1:4) => partit%r_mpitype_elem3D_full(:,:,:) -r_mpitype_elem3D(1:com_elem2D%rPEnum, lb:ub, 1:4) => partit%r_mpitype_elem3D -r_mpitype_elem3D_full(1:com_elem2D_full%rPEnum, lb:ub, 1:4) => partit%r_mpitype_elem3D_full +r_mpitype_elem3D(1:com_elem2D%rPEnum, lb:ub, 1:4) => partit%r_mpitype_elem3D(:,:,:) +r_mpitype_elem3D_full(1:com_elem2D_full%rPEnum, lb:ub, 1:4) => partit%r_mpitype_elem3D_full(:,:,:) -s_mpitype_nod2D(1:com_nod2D%sPEnum) => partit%s_mpitype_nod2D -r_mpitype_nod2D(1:com_nod2D%rPEnum) => partit%r_mpitype_nod2D +s_mpitype_nod2D(1:com_nod2D%sPEnum) => partit%s_mpitype_nod2D(:) +r_mpitype_nod2D(1:com_nod2D%rPEnum) => partit%r_mpitype_nod2D(:) -s_mpitype_nod2D_i(1:com_nod2D%sPEnum) => partit%s_mpitype_nod2D_i -r_mpitype_nod2D_i(1:com_nod2D%rPEnum) => partit%r_mpitype_nod2D_i +s_mpitype_nod2D_i(1:com_nod2D%sPEnum) => partit%s_mpitype_nod2D_i(:) +r_mpitype_nod2D_i(1:com_nod2D%rPEnum) => partit%r_mpitype_nod2D_i(:) -s_mpitype_nod3D(1:com_nod2D%sPEnum, lb:ub, 1:3) => partit%s_mpitype_nod3D -r_mpitype_nod3D(1:com_nod2D%rPEnum, lb:ub, 1:3) => partit%r_mpitype_nod3D +s_mpitype_nod3D(1:com_nod2D%sPEnum, lb:ub, 1:3) => partit%s_mpitype_nod3D(:,:,:) +r_mpitype_nod3D(1:com_nod2D%rPEnum, lb:ub, 1:3) => partit%r_mpitype_nod3D(:,:,:) #endif From 0cc322abdb7abe012720c70aeac623399510b097 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 3 Feb 2022 16:53:25 +0100 Subject: [PATCH 811/909] change condition when raw and bin restart is used --- src/CMakeLists.txt | 4 ++-- src/io_restart.F90 | 21 ++++++++++++--------- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 257100f67..d7c15fcdd 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -108,8 +108,8 @@ if(${VERBOSE}) endif() # CMAKE_Fortran_COMPILER_ID will also work if a wrapper is being used (e.g. mpif90 wraps ifort -> compiler id is Intel) if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Intel ) - target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -init=zero -no-wrap-margin) -# target_compile_options(${PROJECT_NAME} PRIVATE -qopenmp -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -g -traceback -check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) +# target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -init=zero -no-wrap-margin) + target_compile_options(${PROJECT_NAME} PRIVATE -qopenmp -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -g -traceback -check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU ) # target_compile_options(${PROJECT_NAME} PRIVATE -O3 -finit-local-zero -finline-functions -fimplicit-none -fdefault-real-8 -ffree-line-length-none) target_compile_options(${PROJECT_NAME} PRIVATE -O2 -g -ffloat-store -finit-local-zero -finline-functions -fimplicit-none -fdefault-real-8 -ffree-line-length-none) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index d898f2043..d7f081a1a 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -235,7 +235,7 @@ subroutine restart(istep, l_read, ice, dynamics, tracers, partit, mesh) !___________________________________________________________________________ ! read derived type binary file restart - elseif(binfiles_exist) then + elseif(binfiles_exist .and. bin_restart_length_unit /= "off") then call read_all_bin_restarts(ice, dynamics, tracers, partit, mesh) !___________________________________________________________________________ @@ -244,13 +244,13 @@ subroutine restart(istep, l_read, ice, dynamics, tracers, partit, mesh) call read_restart(oce_path, oce_files, partit%MPI_COMM_FESOM, partit%mype) if (use_ice) call read_restart(ice_path, ice_files, partit%MPI_COMM_FESOM, partit%mype) ! immediately create a raw core dump restart - if(raw_restart_length_unit /= "off") then - call write_all_raw_restarts(istep, partit%MPI_COMM_FESOM, partit%mype) - end if - ! immediately create a derived type binary restart - if(bin_restart_length_unit /= "off") then - call write_all_bin_restarts(istep, ice, dynamics, tracers, partit, mesh) - end if +! if(raw_restart_length_unit /= "off") then +! call write_all_raw_restarts(istep, partit%MPI_COMM_FESOM, partit%mype) +! end if +! ! immediately create a derived type binary restart +! if(bin_restart_length_unit /= "off") then +! call write_all_bin_restarts(istep, ice, dynamics, tracers, partit, mesh) +! end if end if end if @@ -279,7 +279,8 @@ subroutine restart(istep, l_read, ice, dynamics, tracers, partit, mesh) ! finally write restart for netcdf, core dump and derived type binary ! write netcdf restart if(is_portable_restart_write) then - if(partit%mype==0) write(*,*)'Do output (netCDF, restart) ...' +! if(partit%mype==0) write(*,*)'Do output (netCDF, restart) ...' + if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[1;33m'//' --> write traditional restarts to netcdf file'//achar(27)//'[0m' call write_restart(oce_path, oce_files, istep) if(use_ice) call write_restart(ice_path, ice_files, istep) end if @@ -683,6 +684,8 @@ subroutine read_restart(path, filegroup, mpicomm, mype) allocate(skip_file(filegroup%nfiles)) skip_file = .false. + if (mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[1;33m'//' --> read traditional restarts from netcdf file'//achar(27)//'[0m' + do i=1, filegroup%nfiles current_iorank_snd = 0 current_iorank_rcv = 0 From 19be4f24ca7c5f06d7e9d942180047c1930322ed Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Thu, 3 Feb 2022 19:55:08 +0100 Subject: [PATCH 812/909] runs with PGI finally --- src/associate_part_ass.h | 43 -------------------------------- src/gen_modules_partitioning.F90 | 16 ++++++------ 2 files changed, 8 insertions(+), 51 deletions(-) diff --git a/src/associate_part_ass.h b/src/associate_part_ass.h index cd23348c7..af53de8d2 100644 --- a/src/associate_part_ass.h +++ b/src/associate_part_ass.h @@ -20,48 +20,6 @@ part => partit%part lb=lbound(partit%s_mpitype_elem3D, 2) ub=ubound(partit%s_mpitype_elem3D, 2) -#ifdef __PGIxx -myList_nod2D => partit%myList_nod2D(1:myDim_nod2D +eDim_nod2D) -myList_elem2D => partit%myList_elem2D(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) -myList_edge2D => partit%myList_edge2D(1:myDim_edge2D+eDim_edge2D) - -if (allocated(partit%remPtr_nod2D)) then - remPtr_nod2D => partit%remPtr_nod2D(1:npes) - remList_nod2D => partit%remList_nod2D(1:remPtr_nod2D(npes)) -end if - -if (allocated(partit%remPtr_elem2D)) then - remPtr_elem2D => partit%remPtr_elem2D(1:npes) - remList_elem2D => partit%remList_elem2D(1:remPtr_elem2D(npes)) -end if - -s_mpitype_elem2D => partit%s_mpitype_elem2D(1:com_elem2D%sPEnum, 1:4) -r_mpitype_elem2D => partit%r_mpitype_elem2D(1:com_elem2D%rPEnum, 1:4) - -s_mpitype_elem2D_full_i => partit%s_mpitype_elem2D_full_i(1:com_elem2D_full%sPEnum) -r_mpitype_elem2D_full_i => partit%r_mpitype_elem2D_full_i(1:com_elem2D_full%rPEnum) - -s_mpitype_elem2D_full => partit%s_mpitype_elem2D_full(1:com_elem2D_full%sPEnum, 1:4) -r_mpitype_elem2D_full => partit%r_mpitype_elem2D_full(1:com_elem2D_full%rPEnum, 1:4) - -s_mpitype_elem3D => partit%s_mpitype_elem3D(1:com_elem2D%sPEnum, lb:ub, 1:4) -r_mpitype_elem3D => partit%r_mpitype_elem3D(1:com_elem2D%rPEnum, lb:ub, 1:4) - -s_mpitype_elem3D_full => partit%s_mpitype_elem3D_full(1:com_elem2D_full%sPEnum, lb:ub, 1:4) -r_mpitype_elem3D_full => partit%r_mpitype_elem3D_full(1:com_elem2D_full%rPEnum, lb:ub, 1:4) - -r_mpitype_elem3D => partit%r_mpitype_elem3D(1:com_elem2D%rPEnum, lb:ub, 1:4) -r_mpitype_elem3D_full => partit%r_mpitype_elem3D_full(1:com_elem2D_full%rPEnum, lb:ub, 1:4) - -s_mpitype_nod2D => partit%s_mpitype_nod2D(1:com_nod2D%sPEnum) -r_mpitype_nod2D => partit%r_mpitype_nod2D(1:com_nod2D%rPEnum) - -s_mpitype_nod2D_i => partit%s_mpitype_nod2D_i(1:com_nod2D%sPEnum) -r_mpitype_nod2D_i => partit%r_mpitype_nod2D_i(1:com_nod2D%rPEnum) - -s_mpitype_nod3D => partit%s_mpitype_nod3D(1:com_nod2D%sPEnum, lb:ub, 1:3) -r_mpitype_nod3D => partit%r_mpitype_nod3D(1:com_nod2D%rPEnum, lb:ub, 1:3) -#else myList_nod2D (1:myDim_nod2D +eDim_nod2D) => partit%myList_nod2D(:) myList_elem2D(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => partit%myList_elem2D(:) myList_edge2D(1:myDim_edge2D+eDim_edge2D) => partit%myList_edge2D(:) @@ -102,4 +60,3 @@ r_mpitype_nod2D_i(1:com_nod2D%rPEnum) => partit%r_mpitype_nod2D_i(:) s_mpitype_nod3D(1:com_nod2D%sPEnum, lb:ub, 1:3) => partit%s_mpitype_nod3D(:,:,:) r_mpitype_nod3D(1:com_nod2D%rPEnum, lb:ub, 1:3) => partit%r_mpitype_nod3D(:,:,:) -#endif diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index 69f995c2f..a9d6f6600 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -221,10 +221,10 @@ subroutine init_mpi_types(partit, mesh) ! r_mpitype_elem3D shape is e.g. 7,2,4 and its bounds 1:7,1:2,1:4 but the args n,nl1,n_val are 1,47,1 and thus OUT OF BOUNDS ! the second dimension of r_mpitype_elem3D is probably always 2 (from nl-1 to nl) - if(.not. (all(lbound(r_mpitype_elem3D) .le. [n,nl1,n_val]) .and. all(ubound(r_mpitype_elem3D) .ge. [n,nl1,n_val])) ) then - print *,"out of bounds error, lbound:",lbound(r_mpitype_elem3D), "indices:", n,nl1,n_val, "ubound:", ubound(r_mpitype_elem3D), __FILE__,__LINE__ - stop 1 - end if +! if(.not. (all(lbound(r_mpitype_elem3D) .le. [n,nl1,n_val]) .and. all(ubound(r_mpitype_elem3D) .ge. [n,nl1,n_val])) ) then +! print *,"out of bounds error, lbound:",lbound(r_mpitype_elem3D), "indices:", n,nl1,n_val, "ubound:", ubound(r_mpitype_elem3D), __FILE__,__LINE__ +! stop 1 +! end if call MPI_TYPE_INDEXED(nb, blocklen_tmp, displace_tmp, MPI_DOUBLE_PRECISION, & r_mpitype_elem3D(n,nl1,n_val), MPIerr) @@ -307,10 +307,10 @@ subroutine init_mpi_types(partit, mesh) ! r_mpitype_elem3D shape is e.g. 7,2,4 and its bounds 1:7,1:2,1:4 but the args n,nl1,n_val are 1,47,1 and thus OUT OF BOUNDS ! the second dimension of r_mpitype_elem3D is probably always 2 (from nl-1 to nl) - if(.not. (all(lbound(r_mpitype_elem3D) .le. [n,nl1,n_val]) .and. all(ubound(r_mpitype_elem3D) .ge. [n,nl1,n_val])) ) then - print *,"out of bounds error, lbound:",lbound(r_mpitype_elem3D), "indices:", n,nl1,n_val, "ubound:", ubound(r_mpitype_elem3D), __FILE__,__LINE__ - stop 1 - end if +! if(.not. (all(lbound(r_mpitype_elem3D) .le. [n,nl1,n_val]) .and. all(ubound(r_mpitype_elem3D) .ge. [n,nl1,n_val])) ) then +! print *,"out of bounds error, lbound:",lbound(r_mpitype_elem3D), "indices:", n,nl1,n_val, "ubound:", ubound(r_mpitype_elem3D), __FILE__,__LINE__ +! stop 1 +! end if call MPI_TYPE_INDEXED(nb, blocklen_tmp, displace_tmp, MPI_DOUBLE_PRECISION, & r_mpitype_elem3D_full(n,nl1,n_val), MPIerr) From f9db7add1f183da1bdd88f61ece3c2e75f3c8832 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 4 Feb 2022 10:18:49 +0100 Subject: [PATCH 813/909] re-enable bounds check and fix variable name in second check --- src/gen_modules_partitioning.F90 | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index a9d6f6600..ae1cf3eab 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -221,10 +221,10 @@ subroutine init_mpi_types(partit, mesh) ! r_mpitype_elem3D shape is e.g. 7,2,4 and its bounds 1:7,1:2,1:4 but the args n,nl1,n_val are 1,47,1 and thus OUT OF BOUNDS ! the second dimension of r_mpitype_elem3D is probably always 2 (from nl-1 to nl) -! if(.not. (all(lbound(r_mpitype_elem3D) .le. [n,nl1,n_val]) .and. all(ubound(r_mpitype_elem3D) .ge. [n,nl1,n_val])) ) then -! print *,"out of bounds error, lbound:",lbound(r_mpitype_elem3D), "indices:", n,nl1,n_val, "ubound:", ubound(r_mpitype_elem3D), __FILE__,__LINE__ -! stop 1 -! end if + if(.not. (all(lbound(r_mpitype_elem3D) .le. [n,nl1,n_val]) .and. all(ubound(r_mpitype_elem3D) .ge. [n,nl1,n_val])) ) then + print *,"out of bounds error, lbound:",lbound(r_mpitype_elem3D), "indices:", n,nl1,n_val, "ubound:", ubound(r_mpitype_elem3D), __FILE__,__LINE__ + stop 1 + end if call MPI_TYPE_INDEXED(nb, blocklen_tmp, displace_tmp, MPI_DOUBLE_PRECISION, & r_mpitype_elem3D(n,nl1,n_val), MPIerr) @@ -307,10 +307,10 @@ subroutine init_mpi_types(partit, mesh) ! r_mpitype_elem3D shape is e.g. 7,2,4 and its bounds 1:7,1:2,1:4 but the args n,nl1,n_val are 1,47,1 and thus OUT OF BOUNDS ! the second dimension of r_mpitype_elem3D is probably always 2 (from nl-1 to nl) -! if(.not. (all(lbound(r_mpitype_elem3D) .le. [n,nl1,n_val]) .and. all(ubound(r_mpitype_elem3D) .ge. [n,nl1,n_val])) ) then -! print *,"out of bounds error, lbound:",lbound(r_mpitype_elem3D), "indices:", n,nl1,n_val, "ubound:", ubound(r_mpitype_elem3D), __FILE__,__LINE__ -! stop 1 -! end if + if(.not. (all(lbound(r_mpitype_elem3D_full) .le. [n,nl1,n_val]) .and. all(ubound(r_mpitype_elem3D_full) .ge. [n,nl1,n_val])) ) then + print *,"out of bounds error, lbound:",lbound(r_mpitype_elem3D_full), "indices:", n,nl1,n_val, "ubound:", ubound(r_mpitype_elem3D_full), __FILE__,__LINE__ + stop 1 + end if call MPI_TYPE_INDEXED(nb, blocklen_tmp, displace_tmp, MPI_DOUBLE_PRECISION, & r_mpitype_elem3D_full(n,nl1,n_val), MPIerr) From 3b5172bc6da59d1c54ba774bcacacce27b166e19 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 4 Feb 2022 10:54:43 +0100 Subject: [PATCH 814/909] fix indentation --- src/CMakeLists.txt | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 257100f67..aafecf763 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -117,11 +117,11 @@ elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU ) target_compile_options(${PROJECT_NAME} PRIVATE -fallow-argument-mismatch) # gfortran v10 is strict about erroneous API calls: "Rank mismatch between actual argument at (1) and actual argument at (2) (scalar and rank-1)" endif() elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray ) -if(${ENABLE_OPENMP}) - target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64 -N 1023 -homp) -else() - target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64 -N 1023 -hnoomp) -endif() + if(${ENABLE_OPENMP}) + target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64 -N 1023 -homp) + else() + target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64 -N 1023 -hnoomp) + endif() endif() target_include_directories(${PROJECT_NAME} PRIVATE ${NETCDF_Fortran_INCLUDE_DIRECTORIES} ${OASIS_Fortran_INCLUDE_DIRECTORIES}) target_include_directories(${PROJECT_NAME} PRIVATE ${MCT_Fortran_INCLUDE_DIRECTORIES} ${MPEU_Fortran_INCLUDE_DIRECTORIES}) From b62c3a441b2b8ab4095488ea5a77ab0c7dd8c8e7 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 4 Feb 2022 11:00:36 +0100 Subject: [PATCH 815/909] add compiler settings for nvfortran, see https://github.com/ESiWACE-S1/fesom2/blob/b11c9d95959bb3713a7e8a53f9cd3cbf3c0391d8/src/CMakeLists.txt#L87 --- src/CMakeLists.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index aafecf763..9dab28cf9 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -122,6 +122,8 @@ elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray ) else() target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64 -N 1023 -hnoomp) endif() +elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL NVHPC ) + target_compile_options(${PROJECT_NAME} PRIVATE -fast -fastsse -Mipa=fast,inline -O3 -Mallocatable=95 -Mr8 -pgf90libs) endif() target_include_directories(${PROJECT_NAME} PRIVATE ${NETCDF_Fortran_INCLUDE_DIRECTORIES} ${OASIS_Fortran_INCLUDE_DIRECTORIES}) target_include_directories(${PROJECT_NAME} PRIVATE ${MCT_Fortran_INCLUDE_DIRECTORIES} ${MPEU_Fortran_INCLUDE_DIRECTORIES}) From 6cda461db6a46294ce3b61e2aec5dcdbd3c1742e Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 4 Feb 2022 11:15:57 +0100 Subject: [PATCH 816/909] add OpenACC compiler settings for nvfortran, see https://github.com/ESiWACE-S1/fesom2/blob/b11c9d95959bb3713a7e8a53f9cd3cbf3c0391d8/src/CMakeLists.txt#L84 --- src/CMakeLists.txt | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 9dab28cf9..8e6cf07e7 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -20,6 +20,8 @@ if(ALEPH_CRAYMPICH_WORKAROUNDS) endif() option(DISABLE_MULTITHREADING "disable asynchronous operations" OFF) +option(ENABLE_OPENACC "compile with OpenACC support" OFF) +set(NV_GPU_ARCH "cc80" CACHE STRING "GPU arch for nvfortran compiler (cc35,cc50,cc60,cc70,cc80,...)") option(ENABLE_OPENMP "build FESOM with OpenMP" OFF) if(${ENABLE_OPENMP}) @@ -124,6 +126,11 @@ elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray ) endif() elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL NVHPC ) target_compile_options(${PROJECT_NAME} PRIVATE -fast -fastsse -Mipa=fast,inline -O3 -Mallocatable=95 -Mr8 -pgf90libs) + if(${ENABLE_OPENACC}) + # additional compiler settings + target_compile_options(${PROJECT_NAME} PRIVATE -acc -ta=tesla:${NV_GPU_ARCH} -Minfo=accel) + set(CMAKE_EXE_LINKER_FLAGS "-acc -ta=tesla:${NV_GPU_ARCH}") + endif() endif() target_include_directories(${PROJECT_NAME} PRIVATE ${NETCDF_Fortran_INCLUDE_DIRECTORIES} ${OASIS_Fortran_INCLUDE_DIRECTORIES}) target_include_directories(${PROJECT_NAME} PRIVATE ${MCT_Fortran_INCLUDE_DIRECTORIES} ${MPEU_Fortran_INCLUDE_DIRECTORIES}) From 72643dda11b2d9ec103d179b62b6a1e540e81850 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 4 Feb 2022 14:16:11 +0100 Subject: [PATCH 817/909] nvfortran does not compile with both OpenMP (-mp) and -Mipa=inline enabled --- src/CMakeLists.txt | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 8e6cf07e7..1e292f61c 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -125,12 +125,17 @@ elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray ) target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64 -N 1023 -hnoomp) endif() elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL NVHPC ) - target_compile_options(${PROJECT_NAME} PRIVATE -fast -fastsse -Mipa=fast,inline -O3 -Mallocatable=95 -Mr8 -pgf90libs) + target_compile_options(${PROJECT_NAME} PRIVATE -fast -fastsse -O3 -Mallocatable=95 -Mr8 -pgf90libs) if(${ENABLE_OPENACC}) # additional compiler settings target_compile_options(${PROJECT_NAME} PRIVATE -acc -ta=tesla:${NV_GPU_ARCH} -Minfo=accel) set(CMAKE_EXE_LINKER_FLAGS "-acc -ta=tesla:${NV_GPU_ARCH}") endif() + if(${ENABLE_OPENMP}) + target_compile_options(${PROJECT_NAME} PRIVATE -Mipa=fast) + else() + target_compile_options(${PROJECT_NAME} PRIVATE -Mipa=fast,inline) + endif() endif() target_include_directories(${PROJECT_NAME} PRIVATE ${NETCDF_Fortran_INCLUDE_DIRECTORIES} ${OASIS_Fortran_INCLUDE_DIRECTORIES}) target_include_directories(${PROJECT_NAME} PRIVATE ${MCT_Fortran_INCLUDE_DIRECTORIES} ${MPEU_Fortran_INCLUDE_DIRECTORIES}) From 6a76b37cd2a8ed92a026b4cb4913e1b587a57274 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 4 Feb 2022 14:19:14 +0100 Subject: [PATCH 818/909] enable OpenMP for all compilers if cmake variable ENABLE_OPENMP=ON --- src/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 1e292f61c..b8bf8c1d2 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -145,6 +145,7 @@ target_link_libraries(${PROJECT_NAME} ${PROJECT_NAME}_C ${MCT_Fortran_LIBRARIES} target_link_libraries(${PROJECT_NAME} async_threads_cpp) set_target_properties(${PROJECT_NAME} PROPERTIES LINKER_LANGUAGE Fortran) if(${ENABLE_OPENMP} AND NOT ${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray) + target_compile_options(${PROJECT_NAME} PRIVATE ${OpenMP_Fortran_FLAGS}) # currently we only have OpenMP in the Fortran part target_link_libraries(${PROJECT_NAME} OpenMP::OpenMP_Fortran) endif() From 6c46f3392a162b87b392e9e09aeec46511422210 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Fri, 4 Feb 2022 14:23:33 +0100 Subject: [PATCH 819/909] when calling FESOM with the --info option, print whether FESOM has been compiled with OpenMP support --- src/command_line_options.F90 | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/command_line_options.F90 b/src/command_line_options.F90 index 369150964..7b66ddae1 100644 --- a/src/command_line_options.F90 +++ b/src/command_line_options.F90 @@ -29,6 +29,12 @@ subroutine parse() case('--info') print '(g0)', '# Definitions' call info%print_definitions() + print '(g0)', '# compiled with OpenMP?' +#ifdef _OPENMP + print '(g0)', '_OPENMP is ON' +#else + print '(g0)', '_OPENMP is OFF' +#endif case default print *, 'unknown option: ', arg error stop From d133c60a015b68c5ac3d4b13c7d565ff2335dade Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 7 Feb 2022 16:24:54 +0100 Subject: [PATCH 820/909] finalize restart writing before exiting FESOM --- src/fesom_module.F90 | 1 + 1 file changed, 1 insertion(+) diff --git a/src/fesom_module.F90 b/src/fesom_module.F90 index 7cae64cc6..b320908f6 100755 --- a/src/fesom_module.F90 +++ b/src/fesom_module.F90 @@ -395,6 +395,7 @@ subroutine fesom_finalize() real(kind=real32) :: mean_rtime(15), max_rtime(15), min_rtime(15) call finalize_output() + call finalize_restart() !___FINISH MODEL RUN________________________________________________________ From fc74cd53177152780fb4f396e52f538d5db7e507 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Mon, 7 Feb 2022 16:24:54 +0100 Subject: [PATCH 821/909] finalize restart writing before exiting FESOM --- src/fesom_module.F90 | 1 + 1 file changed, 1 insertion(+) diff --git a/src/fesom_module.F90 b/src/fesom_module.F90 index 7cae64cc6..b320908f6 100755 --- a/src/fesom_module.F90 +++ b/src/fesom_module.F90 @@ -395,6 +395,7 @@ subroutine fesom_finalize() real(kind=real32) :: mean_rtime(15), max_rtime(15), min_rtime(15) call finalize_output() + call finalize_restart() !___FINISH MODEL RUN________________________________________________________ From 8b2e474ce6ea2e826c85185bc983599c3abb694c Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 8 Feb 2022 13:50:44 +0100 Subject: [PATCH 822/909] switch off debug mode --- src/CMakeLists.txt | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index d7c15fcdd..03e05b8f6 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -108,14 +108,14 @@ if(${VERBOSE}) endif() # CMAKE_Fortran_COMPILER_ID will also work if a wrapper is being used (e.g. mpif90 wraps ifort -> compiler id is Intel) if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Intel ) -# target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -init=zero -no-wrap-margin) - target_compile_options(${PROJECT_NAME} PRIVATE -qopenmp -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -g -traceback -check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) + target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -init=zero -no-wrap-margin) +# target_compile_options(${PROJECT_NAME} PRIVATE -qopenmp -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -g -traceback -check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU ) -# target_compile_options(${PROJECT_NAME} PRIVATE -O3 -finit-local-zero -finline-functions -fimplicit-none -fdefault-real-8 -ffree-line-length-none) - target_compile_options(${PROJECT_NAME} PRIVATE -O2 -g -ffloat-store -finit-local-zero -finline-functions -fimplicit-none -fdefault-real-8 -ffree-line-length-none) - if(CMAKE_Fortran_COMPILER_VERSION VERSION_GREATER_EQUAL 10 ) - target_compile_options(${PROJECT_NAME} PRIVATE -fallow-argument-mismatch) # gfortran v10 is strict about erroneous API calls: "Rank mismatch between actual argument at (1) and actual argument at (2) (scalar and rank-1)" - endif() +# target_compile_options(${PROJECT_NAME} PRIVATE -O3 -finit-local-zero -finline-functions -fimplicit-none -fdefault-real-8 -ffree-line-length-none) + target_compile_options(${PROJECT_NAME} PRIVATE -O2 -g -ffloat-store -finit-local-zero -finline-functions -fimplicit-none -fdefault-real-8 -ffree-line-length-none) + if(CMAKE_Fortran_COMPILER_VERSION VERSION_GREATER_EQUAL 10 ) + target_compile_options(${PROJECT_NAME} PRIVATE -fallow-argument-mismatch) # gfortran v10 is strict about erroneous API calls: "Rank mismatch between actual argument at (1) and actual argument at (2) (scalar and rank-1)" + endif() elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray ) if(${ENABLE_OPENMP}) target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64 -N 1023 -homp) From e3f0524f7118409a127f8bc41c0dc4e871f8d90f Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 8 Feb 2022 13:52:01 +0100 Subject: [PATCH 823/909] clean up code --- src/oce_ale.F90 | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 4b6b95b31..c1e7e34a0 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -2578,12 +2578,12 @@ end subroutine psolve droptol => dynamics%solverinfo%droptol soltol => dynamics%solverinfo%soltol -if (.not. dynamics%solverinfo%use_parms) then -if (lfirst) call ssh_solve_preconditioner(dynamics%solverinfo, partit, mesh) -call ssh_solve_cg(dynamics%d_eta, dynamics%ssh_rhs, dynamics%solverinfo, partit, mesh) -lfirst=.false. -return -end if + if (.not. dynamics%solverinfo%use_parms) then + if (lfirst) call ssh_solve_preconditioner(dynamics%solverinfo, partit, mesh) + call ssh_solve_cg(dynamics%d_eta, dynamics%ssh_rhs, dynamics%solverinfo, partit, mesh) + lfirst=.false. + return + end if !___________________________________________________________________________ if (trim(which_ale)=='linfs') then From 3b02a6c7d700a695c5c42efd08fe804290f56cad Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 8 Feb 2022 13:52:50 +0100 Subject: [PATCH 824/909] add some information to screen output --- src/io_restart.F90 | 41 ++++++++++++++++++++++++++++------------- 1 file changed, 28 insertions(+), 13 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index d7f081a1a..92989710a 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -140,7 +140,7 @@ end subroutine ini_ice_io ! !-------------------------------------------------------------------------------------------- ! -subroutine restart(istep, l_read, ice, dynamics, tracers, partit, mesh) +subroutine restart(istep, l_read, which_readr, ice, dynamics, tracers, partit, mesh) #if defined(__icepack) icepack restart not merged here ! produce a compiler error if USE_ICEPACK=ON; todo: merge icepack restart from 68d8b8b @@ -164,6 +164,11 @@ subroutine restart(istep, l_read, ice, dynamics, tracers, partit, mesh) logical, save :: initialized_bin = .false. integer mpierr + !which_readr = ... + ! 0 ... read netcdf restart + ! 1 ... read dump file restart (binary) + ! 2 ... read derived type restart (binary) + integer , intent(out):: which_readr !_____________________________________________________________________________ ! initialize directory for core dump restart if(.not. initialized_raw) then @@ -231,26 +236,35 @@ subroutine restart(istep, l_read, ice, dynamics, tracers, partit, mesh) !___________________________________________________________________________ ! read core dump file restart if(rawfiles_exist) then + which_readr = 1 call read_all_raw_restarts(partit%MPI_COMM_FESOM, partit%mype) !___________________________________________________________________________ ! read derived type binary file restart elseif(binfiles_exist .and. bin_restart_length_unit /= "off") then + which_readr = 2 call read_all_bin_restarts(ice, dynamics, tracers, partit, mesh) !___________________________________________________________________________ ! read netcdf file restart else + which_readr = 0 + if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[1;33m'//' --> read restarts from netcdf file: ocean'//achar(27)//'[0m' call read_restart(oce_path, oce_files, partit%MPI_COMM_FESOM, partit%mype) - if (use_ice) call read_restart(ice_path, ice_files, partit%MPI_COMM_FESOM, partit%mype) + if (use_ice) then + if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[1;33m'//' --> read restarts from netcdf file: ice'//achar(27)//'[0m' + call read_restart(ice_path, ice_files, partit%MPI_COMM_FESOM, partit%mype) + end if + ! immediately create a raw core dump restart -! if(raw_restart_length_unit /= "off") then -! call write_all_raw_restarts(istep, partit%MPI_COMM_FESOM, partit%mype) -! end if -! ! immediately create a derived type binary restart -! if(bin_restart_length_unit /= "off") then -! call write_all_bin_restarts(istep, ice, dynamics, tracers, partit, mesh) -! end if + if(raw_restart_length_unit /= "off") then + call write_all_raw_restarts(istep, partit%MPI_COMM_FESOM, partit%mype) + end if + + ! immediately create a derived type binary restart + if(bin_restart_length_unit /= "off") then + call write_all_bin_restarts(istep, ice, dynamics, tracers, partit, mesh) + end if end if end if @@ -280,9 +294,12 @@ subroutine restart(istep, l_read, ice, dynamics, tracers, partit, mesh) ! write netcdf restart if(is_portable_restart_write) then ! if(partit%mype==0) write(*,*)'Do output (netCDF, restart) ...' - if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[1;33m'//' --> write traditional restarts to netcdf file'//achar(27)//'[0m' + if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[1;33m'//' --> write restarts to netcdf file: ocean'//achar(27)//'[0m' call write_restart(oce_path, oce_files, istep) - if(use_ice) call write_restart(ice_path, ice_files, istep) + if(use_ice) then + if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[1;33m'//' --> write restarts to netcdf file: ice'//achar(27)//'[0m' + call write_restart(ice_path, ice_files, istep) + end if end if ! write core dump @@ -684,8 +701,6 @@ subroutine read_restart(path, filegroup, mpicomm, mype) allocate(skip_file(filegroup%nfiles)) skip_file = .false. - if (mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[1;33m'//' --> read traditional restarts from netcdf file'//achar(27)//'[0m' - do i=1, filegroup%nfiles current_iorank_snd = 0 current_iorank_rcv = 0 From 80bd90014a16e0b8fd266a6335723d603a9219c1 Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 8 Feb 2022 13:56:27 +0100 Subject: [PATCH 825/909] need to add another output flag from call restart(...) about which restart file is read (either netcdf, core dump or derived type binary) --- src/fesom_module.F90 | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/fesom_module.F90 b/src/fesom_module.F90 index 853d94995..53efbef37 100755 --- a/src/fesom_module.F90 +++ b/src/fesom_module.F90 @@ -43,6 +43,7 @@ module fesom_main_storage_module type :: fesom_main_storage_type integer :: n, from_nstep, offset, row, i, provided + integer :: which_readr ! read which restart files (0=netcdf, 1=core dump,2=dtype) integer, pointer :: mype, npes, MPIerr, MPI_COMM_FESOM real(kind=WP) :: t0, t1, t2, t3, t4, t5, t6, t7, t8, t0_ice, t1_ice, t0_frc, t1_frc real(kind=WP) :: rtime_fullice, rtime_write_restart, rtime_write_means, rtime_compute_diag, rtime_read_forcing @@ -206,16 +207,14 @@ subroutine fesom_init(fesom_total_nsteps) call clock_newyear ! check if it is a new year if (f%mype==0) f%t6=MPI_Wtime() !___CREATE NEW RESTART FILE IF APPLICABLE___________________________________ - if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call restart'//achar(27)//'[0m' - call restart(0, r_restart, f%ice, f%dynamics, f%tracers, f%partit, f%mesh) + call restart(0, r_restart, f%which_readr, f%ice, f%dynamics, f%tracers, f%partit, f%mesh) if (f%mype==0) f%t7=MPI_Wtime() ! store grid information into netcdf file if (.not. r_restart) call write_mesh_info(f%partit, f%mesh) !___IF RESTART WITH ZLEVEL OR ZSTAR IS DONE, ALSO THE ACTUAL LEVELS AND ____ !___MIDDEPTH LEVELS NEEDS TO BE CALCULATET AT RESTART_______________________ - if (r_restart) then - if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call restart_thickness_ale'//achar(27)//'[0m' + if (r_restart .and. .not. f%which_readr==2) then call restart_thickness_ale(f%partit, f%mesh) end if if (f%mype==0) then @@ -377,7 +376,7 @@ subroutine fesom_runloop(current_nsteps) call output (n, f%ice, f%dynamics, f%tracers, f%partit, f%mesh) f%t5 = MPI_Wtime() - call restart(n, .false., f%ice, f%dynamics, f%tracers, f%partit, f%mesh) + call restart(n, .false., f%which_readr, f%ice, f%dynamics, f%tracers, f%partit, f%mesh) f%t6 = MPI_Wtime() f%rtime_fullice = f%rtime_fullice + f%t2 - f%t1 @@ -397,6 +396,7 @@ subroutine fesom_finalize() real(kind=real32) :: mean_rtime(15), max_rtime(15), min_rtime(15) call finalize_output() + call finalize_restart() !___FINISH MODEL RUN________________________________________________________ From f4d8371e59e3bb0dd11a8717a3b40abdbd33ae39 Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Tue, 8 Feb 2022 17:46:38 +0100 Subject: [PATCH 826/909] update main tests --- setups/test_pi/setup.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/setups/test_pi/setup.yml b/setups/test_pi/setup.yml index 2b9d4a51a..8856bcb4f 100644 --- a/setups/test_pi/setup.yml +++ b/setups/test_pi/setup.yml @@ -59,12 +59,12 @@ namelist.io: prec: 8 fcheck: - a_ice: 0.26912765975496816 - salt: 23.944024679315966 - sst: 8.531528641557886 - temp: 1.7017687500626169 - u: -0.0014072137916283753 - v: 0.0001418460244606028 + a_ice: 0.2692498167543513 + salt: 23.944089812055452 + sst: 8.526792796340805 + temp: 1.7018189804276316 + u: -0.0014310701355284717 + v: 0.00014314237674481877 From f7dd86fdc94a449221c385cfbf0d025bd4d6c2c5 Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 9 Feb 2022 12:39:45 +0100 Subject: [PATCH 827/909] make variable inout for subroutine read_all_bin_restarts(...) so its also usable when doing dwarfs --- src/io_restart.F90 | 108 +++++++++++++++++++++++++++------------------ 1 file changed, 66 insertions(+), 42 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 92989710a..a29574c1c 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -243,8 +243,20 @@ subroutine restart(istep, l_read, which_readr, ice, dynamics, tracers, partit, m ! read derived type binary file restart elseif(binfiles_exist .and. bin_restart_length_unit /= "off") then which_readr = 2 - call read_all_bin_restarts(ice, dynamics, tracers, partit, mesh) - + if (use_ice) then + call read_all_bin_restarts(bin_restart_dirpath, & + ice = ice, & + dynamics = dynamics, & + tracers = tracers, & + partit = partit, & + mesh = mesh) + else + call read_all_bin_restarts(bin_restart_dirpath, & + dynamics = dynamics, & + tracers = tracers, & + partit = partit, & + mesh = mesh) + end if !___________________________________________________________________________ ! read netcdf file restart else @@ -524,67 +536,79 @@ subroutine write_all_bin_restarts(istep, ice, dynamics, tracers, partit, mesh) ! ! !_______________________________________________________________________________ -subroutine read_all_bin_restarts(ice, dynamics, tracers, partit, mesh) +subroutine read_all_bin_restarts(path_in, ice, dynamics, tracers, partit, mesh) implicit none - type(t_ice) , intent(inout), target :: ice - type(t_dyn) , intent(inout), target :: dynamics - type(t_tracer), intent(inout), target :: tracers - type(t_partit), intent(inout), target :: partit - type(t_mesh) , intent(inout), target :: mesh + + ! do optional here for the usage with dwarfs, since there only specific derived + ! types will be needed + character(len=*), intent(in) :: path_in + type(t_ice) , intent(inout), target, optional :: ice + type(t_dyn) , intent(inout), target, optional :: dynamics + type(t_tracer), intent(inout), target, optional :: tracers + type(t_partit), intent(inout), target, optional :: partit + type(t_mesh) , intent(inout), target, optional :: mesh integer fileunit if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[1;33m'//' --> read restarts from derived type binary'//achar(27)//'[0m' !___________________________________________________________________________ ! mesh derived type - fileunit = partit%mype+300 - open(newunit = fileunit, & - file = bin_restart_dirpath//'/'//'t_mesh.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & - status = 'old', & - form = 'unformatted') - read(fileunit) mesh - close(fileunit) - if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[33m'//' > read derived type t_mesh'//achar(27)//'[0m' + if (present(mesh)) then + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = trim(path_in)//'/'//'t_mesh.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'old', & + form = 'unformatted') + read(fileunit) mesh + close(fileunit) + if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[33m'//' > read derived type t_mesh'//achar(27)//'[0m' + end if !___________________________________________________________________________ ! partit derived type - fileunit = partit%mype+300 - open(newunit = fileunit, & - file = bin_restart_dirpath//'/'//'t_partit.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & - status = 'old', & - form = 'unformatted') - read(fileunit) partit - close(fileunit) - if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[33m'//' > read derived type t_partit'//achar(27)//'[0m' + if (present(partit)) then + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = trim(path_in)//'/'//'t_partit.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'old', & + form = 'unformatted') + read(fileunit) partit + close(fileunit) + if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[33m'//' > read derived type t_partit'//achar(27)//'[0m' + end if !___________________________________________________________________________ ! tracer derived type - fileunit = partit%mype+300 - open(newunit = fileunit, & - file = bin_restart_dirpath//'/'//'t_tracer.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & - status = 'old', & - form = 'unformatted') - read(fileunit) tracers - close(fileunit) - if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[33m'//' > read derived type t_tracer'//achar(27)//'[0m' + if (present(tracers)) then + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = trim(path_in)//'/'//'t_tracer.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'old', & + form = 'unformatted') + read(fileunit) tracers + close(fileunit) + if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[33m'//' > read derived type t_tracer'//achar(27)//'[0m' + end if !___________________________________________________________________________ ! dynamics derived type - fileunit = partit%mype+300 - open(newunit = fileunit, & - file = bin_restart_dirpath//'/'//'t_dynamics.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & - status = 'old', & - form = 'unformatted') - read(fileunit) dynamics - close(fileunit) - if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[33m'//' > read derived type t_dynamics'//achar(27)//'[0m' + if (present(dynamics)) then + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = trim(path_in)//'/'//'t_dynamics.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'old', & + form = 'unformatted') + read(fileunit) dynamics + close(fileunit) + if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[33m'//' > read derived type t_dynamics'//achar(27)//'[0m' + end if !___________________________________________________________________________ ! ice derived type - if (use_ice) then + if (present(ice)) then fileunit = partit%mype+300 open(newunit = fileunit, & - file = bin_restart_dirpath//'/'//'t_ice.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + file = trim(path_in)//'/'//'t_ice.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & status = 'old', & form = 'unformatted') read(fileunit) ice From 81f9173dd740afb975cd1d1484f7eee305a20e5c Mon Sep 17 00:00:00 2001 From: dsidoren Date: Thu, 10 Feb 2022 15:22:06 +0100 Subject: [PATCH 828/909] Update MOD_ICE.F90 small fix for reading in t_ice --- src/MOD_ICE.F90 | 1 + 1 file changed, 1 insertion(+) diff --git a/src/MOD_ICE.F90 b/src/MOD_ICE.F90 index f2364da8b..71d15135d 100644 --- a/src/MOD_ICE.F90 +++ b/src/MOD_ICE.F90 @@ -458,6 +458,7 @@ subroutine READ_T_ICE(ice, unit, iostat, iomsg) end if !___________________________________________________________________________ read(unit, iostat=iostat, iomsg=iomsg) ice%num_itracers + if (.not. allocated(ice%data)) allocate(ice%data(ice%num_itracers)) do i=1, ice%num_itracers read(unit, iostat=iostat, iomsg=iomsg) ice%data(i) end do From da4b013eacf94b590cbb0cc7932239c5fb3747c6 Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 11 Feb 2022 10:03:18 +0100 Subject: [PATCH 829/909] move routines for reading writing derived types into own modules so they can be used separately in dwarfs --- src/io_restart.F90 | 444 ++++++++++++++++++++++++--------------------- 1 file changed, 233 insertions(+), 211 deletions(-) diff --git a/src/io_restart.F90 b/src/io_restart.F90 index a29574c1c..d29c934ea 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -1,14 +1,17 @@ MODULE io_RESTART use restart_file_group_module + use restart_derivedtype_module use g_clock use o_arrays use g_cvmix_tke use MOD_TRACER use MOD_ICE -! use MOD_DYN -! use MOD_MESH -! USE MOD_PARTIT -! USE MOD_PARSUP + use MOD_DYN + use MOD_MESH + use MOD_PARTIT + use MOD_PARSUP + use fortran_utils + implicit none public :: restart, finalize_restart private @@ -168,7 +171,9 @@ subroutine restart(istep, l_read, which_readr, ice, dynamics, tracers, partit, m ! 0 ... read netcdf restart ! 1 ... read dump file restart (binary) ! 2 ... read derived type restart (binary) - integer , intent(out):: which_readr + integer, intent(out):: which_readr + + integer :: cstep !_____________________________________________________________________________ ! initialize directory for core dump restart if(.not. initialized_raw) then @@ -275,7 +280,15 @@ subroutine restart(istep, l_read, which_readr, ice, dynamics, tracers, partit, m ! immediately create a derived type binary restart if(bin_restart_length_unit /= "off") then - call write_all_bin_restarts(istep, ice, dynamics, tracers, partit, mesh) + ! current (total) model step --> cstep = globalstep+istep + call write_all_bin_restarts((/globalstep+istep, int(ctime), yearnew/), & + bin_restart_dirpath, & + bin_restart_infopath, & + ice, & + dynamics, & + tracers, & + partit, & + mesh) end if end if end if @@ -321,7 +334,15 @@ subroutine restart(istep, l_read, which_readr, ice, dynamics, tracers, partit, m ! write derived type binary if(is_bin_restart_write) then - call write_all_bin_restarts(istep, ice, dynamics, tracers, partit, mesh) + ! current (total) model step --> cstep = globalstep+istep + call write_all_bin_restarts((/globalstep+istep, int(ctime), yearnew/), & + bin_restart_dirpath, & + bin_restart_infopath, & + ice, & + dynamics, & + tracers, & + partit, & + mesh) end if ! actualize clock file to latest restart point @@ -429,193 +450,193 @@ subroutine write_raw_restart_group(filegroup, fileunit) call filegroup%files(i)%write_variables_raw(fileunit) end do end subroutine -! -! -!_______________________________________________________________________________ -subroutine write_all_bin_restarts(istep, ice, dynamics, tracers, partit, mesh) - integer, intent(in) :: istep - type(t_ice) , target, intent(in) :: ice - type(t_dyn) , target, intent(in) :: dynamics - type(t_tracer), target, intent(in) :: tracers - type(t_partit), target, intent(in) :: partit - type(t_mesh) , target, intent(in) :: mesh - - ! EO parameters - integer cstep - integer fileunit, fileunit_i - - !___________________________________________________________________________ - ! write info file - if(partit%mype == RAW_RESTART_METADATA_RANK) then - print *, achar(27)//'[1;33m'//' --> writing derived type binary restarts to '//bin_restart_dirpath//achar(27)//'[0m' - ! store metadata about the raw restart - cstep = globalstep+istep - fileunit_i = 299 - open(newunit = fileunit_i, file = bin_restart_infopath) - write(fileunit_i, '(g0)') cstep - write(fileunit_i, '(g0)') ctime - write(fileunit_i, '(2(g0))') "! year: ",yearnew - end if - - !___________________________________________________________________________ - ! mesh derived type - fileunit = partit%mype+300 - open(newunit = fileunit, & - file = bin_restart_dirpath//'/'//'t_mesh.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & - status = 'replace', & - form = 'unformatted') - write(fileunit) mesh - close(fileunit) - if(partit%mype == RAW_RESTART_METADATA_RANK) then - write(fileunit_i, '(1(g0))') "! t_mesh" - print *, achar(27)//'[33m'//' > write derived type t_mesh'//achar(27)//'[0m' - end if - - !___________________________________________________________________________ - ! partit derived type - fileunit = partit%mype+300 - open(newunit = fileunit, & - file = bin_restart_dirpath//'/'//'t_partit.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & - status = 'replace', & - form = 'unformatted') - write(fileunit) partit - close(fileunit) - if(partit%mype == RAW_RESTART_METADATA_RANK) then - write(fileunit_i, '(1(g0))') "! t_partit" - print *, achar(27)//'[33m'//' > write derived type t_partit'//achar(27)//'[0m' - end if - - !___________________________________________________________________________ - ! tracer derived type - fileunit = partit%mype+300 - open(newunit = fileunit, & - file = bin_restart_dirpath//'/'//'t_tracer.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & - status = 'replace', & - form = 'unformatted') - write(fileunit) tracers - close(fileunit) - if(partit%mype == RAW_RESTART_METADATA_RANK) then - write(fileunit_i, '(1(g0))') "! t_tracer" - print *, achar(27)//'[33m'//' > write derived type t_tracer'//achar(27)//'[0m' - end if - - !___________________________________________________________________________ - ! dynamics derived type - fileunit = partit%mype+300 - open(newunit = fileunit, & - file = bin_restart_dirpath//'/'//'t_dynamics.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & - status = 'replace', & - form = 'unformatted') - write(fileunit) dynamics - close(fileunit) - if(partit%mype == RAW_RESTART_METADATA_RANK) then - write(fileunit_i, '(1(g0))') "! t_dynamics" - print *, achar(27)//'[33m'//' > write derived type t_dynamics'//achar(27)//'[0m' - end if - - !___________________________________________________________________________ - ! ice derived type - if (use_ice) then - fileunit = partit%mype+300 - open(newunit = fileunit, & - file = bin_restart_dirpath//'/'//'t_ice.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & - status = 'replace', & - form = 'unformatted') - write(fileunit) ice - close(fileunit) - if(partit%mype == RAW_RESTART_METADATA_RANK) then - write(fileunit_i, '(1(g0))') "! t_ice" - print *, achar(27)//'[33m'//' > write derived type t_ice'//achar(27)//'[0m' - end if - end if - - !___________________________________________________________________________ - if(partit%mype == RAW_RESTART_METADATA_RANK) close(fileunit_i) - -end subroutine -! -! -!_______________________________________________________________________________ -subroutine read_all_bin_restarts(path_in, ice, dynamics, tracers, partit, mesh) - implicit none - - ! do optional here for the usage with dwarfs, since there only specific derived - ! types will be needed - character(len=*), intent(in) :: path_in - type(t_ice) , intent(inout), target, optional :: ice - type(t_dyn) , intent(inout), target, optional :: dynamics - type(t_tracer), intent(inout), target, optional :: tracers - type(t_partit), intent(inout), target, optional :: partit - type(t_mesh) , intent(inout), target, optional :: mesh - integer fileunit - - if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[1;33m'//' --> read restarts from derived type binary'//achar(27)//'[0m' - - !___________________________________________________________________________ - ! mesh derived type - if (present(mesh)) then - fileunit = partit%mype+300 - open(newunit = fileunit, & - file = trim(path_in)//'/'//'t_mesh.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & - status = 'old', & - form = 'unformatted') - read(fileunit) mesh - close(fileunit) - if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[33m'//' > read derived type t_mesh'//achar(27)//'[0m' - end if - - !___________________________________________________________________________ - ! partit derived type - if (present(partit)) then - fileunit = partit%mype+300 - open(newunit = fileunit, & - file = trim(path_in)//'/'//'t_partit.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & - status = 'old', & - form = 'unformatted') - read(fileunit) partit - close(fileunit) - if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[33m'//' > read derived type t_partit'//achar(27)//'[0m' - end if - - !___________________________________________________________________________ - ! tracer derived type - if (present(tracers)) then - fileunit = partit%mype+300 - open(newunit = fileunit, & - file = trim(path_in)//'/'//'t_tracer.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & - status = 'old', & - form = 'unformatted') - read(fileunit) tracers - close(fileunit) - if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[33m'//' > read derived type t_tracer'//achar(27)//'[0m' - end if - - !___________________________________________________________________________ - ! dynamics derived type - if (present(dynamics)) then - fileunit = partit%mype+300 - open(newunit = fileunit, & - file = trim(path_in)//'/'//'t_dynamics.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & - status = 'old', & - form = 'unformatted') - read(fileunit) dynamics - close(fileunit) - if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[33m'//' > read derived type t_dynamics'//achar(27)//'[0m' - end if - - !___________________________________________________________________________ - ! ice derived type - if (present(ice)) then - fileunit = partit%mype+300 - open(newunit = fileunit, & - file = trim(path_in)//'/'//'t_ice.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & - status = 'old', & - form = 'unformatted') - read(fileunit) ice - close(fileunit) - if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[33m'//' > read derived type t_ice'//achar(27)//'[0m' - end if -end subroutine +! ! ! +! ! ! +! ! !_______________________________________________________________________________ +! ! subroutine write_all_bin_restarts(istep, ice, dynamics, tracers, partit, mesh) +! ! integer, intent(in) :: istep +! ! type(t_ice) , target, intent(in) :: ice +! ! type(t_dyn) , target, intent(in) :: dynamics +! ! type(t_tracer), target, intent(in) :: tracers +! ! type(t_partit), target, intent(in) :: partit +! ! type(t_mesh) , target, intent(in) :: mesh +! ! +! ! ! EO parameters +! ! integer cstep +! ! integer fileunit, fileunit_i +! ! +! ! !___________________________________________________________________________ +! ! ! write info file +! ! if(partit%mype == RAW_RESTART_METADATA_RANK) then +! ! print *, achar(27)//'[1;33m'//' --> writing derived type binary restarts to '//bin_restart_dirpath//achar(27)//'[0m' +! ! ! store metadata about the raw restart +! ! cstep = globalstep+istep +! ! fileunit_i = 299 +! ! open(newunit = fileunit_i, file = bin_restart_infopath) +! ! write(fileunit_i, '(g0)') cstep +! ! write(fileunit_i, '(g0)') ctime +! ! write(fileunit_i, '(2(g0))') "! year: ",yearnew +! ! end if +! ! +! ! !___________________________________________________________________________ +! ! ! mesh derived type +! ! fileunit = partit%mype+300 +! ! open(newunit = fileunit, & +! ! file = bin_restart_dirpath//'/'//'t_mesh.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & +! ! status = 'replace', & +! ! form = 'unformatted') +! ! write(fileunit) mesh +! ! close(fileunit) +! ! if(partit%mype == RAW_RESTART_METADATA_RANK) then +! ! write(fileunit_i, '(1(g0))') "! t_mesh" +! ! print *, achar(27)//'[33m'//' > write derived type t_mesh'//achar(27)//'[0m' +! ! end if +! ! +! ! !___________________________________________________________________________ +! ! ! partit derived type +! ! fileunit = partit%mype+300 +! ! open(newunit = fileunit, & +! ! file = bin_restart_dirpath//'/'//'t_partit.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & +! ! status = 'replace', & +! ! form = 'unformatted') +! ! write(fileunit) partit +! ! close(fileunit) +! ! if(partit%mype == RAW_RESTART_METADATA_RANK) then +! ! write(fileunit_i, '(1(g0))') "! t_partit" +! ! print *, achar(27)//'[33m'//' > write derived type t_partit'//achar(27)//'[0m' +! ! end if +! ! +! ! !___________________________________________________________________________ +! ! ! tracer derived type +! ! fileunit = partit%mype+300 +! ! open(newunit = fileunit, & +! ! file = bin_restart_dirpath//'/'//'t_tracer.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & +! ! status = 'replace', & +! ! form = 'unformatted') +! ! write(fileunit) tracers +! ! close(fileunit) +! ! if(partit%mype == RAW_RESTART_METADATA_RANK) then +! ! write(fileunit_i, '(1(g0))') "! t_tracer" +! ! print *, achar(27)//'[33m'//' > write derived type t_tracer'//achar(27)//'[0m' +! ! end if +! ! +! ! !___________________________________________________________________________ +! ! ! dynamics derived type +! ! fileunit = partit%mype+300 +! ! open(newunit = fileunit, & +! ! file = bin_restart_dirpath//'/'//'t_dynamics.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & +! ! status = 'replace', & +! ! form = 'unformatted') +! ! write(fileunit) dynamics +! ! close(fileunit) +! ! if(partit%mype == RAW_RESTART_METADATA_RANK) then +! ! write(fileunit_i, '(1(g0))') "! t_dynamics" +! ! print *, achar(27)//'[33m'//' > write derived type t_dynamics'//achar(27)//'[0m' +! ! end if +! ! +! ! !___________________________________________________________________________ +! ! ! ice derived type +! ! if (use_ice) then +! ! fileunit = partit%mype+300 +! ! open(newunit = fileunit, & +! ! file = bin_restart_dirpath//'/'//'t_ice.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & +! ! status = 'replace', & +! ! form = 'unformatted') +! ! write(fileunit) ice +! ! close(fileunit) +! ! if(partit%mype == RAW_RESTART_METADATA_RANK) then +! ! write(fileunit_i, '(1(g0))') "! t_ice" +! ! print *, achar(27)//'[33m'//' > write derived type t_ice'//achar(27)//'[0m' +! ! end if +! ! end if +! ! +! ! !___________________________________________________________________________ +! ! if(partit%mype == RAW_RESTART_METADATA_RANK) close(fileunit_i) +! ! +! ! end subroutine +! ! ! +! ! ! +! ! !_______________________________________________________________________________ +! ! subroutine read_all_bin_restarts(path_in, ice, dynamics, tracers, partit, mesh) +! ! implicit none +! ! +! ! ! do optional here for the usage with dwarfs, since there only specific derived +! ! ! types will be needed +! ! character(len=*), intent(in) :: path_in +! ! type(t_ice) , intent(inout), target, optional :: ice +! ! type(t_dyn) , intent(inout), target, optional :: dynamics +! ! type(t_tracer), intent(inout), target, optional :: tracers +! ! type(t_partit), intent(inout), target, optional :: partit +! ! type(t_mesh) , intent(inout), target, optional :: mesh +! ! integer fileunit +! ! +! ! if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[1;33m'//' --> read restarts from derived type binary'//achar(27)//'[0m' +! ! +! ! !___________________________________________________________________________ +! ! ! mesh derived type +! ! if (present(mesh)) then +! ! fileunit = partit%mype+300 +! ! open(newunit = fileunit, & +! ! file = trim(path_in)//'/'//'t_mesh.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & +! ! status = 'old', & +! ! form = 'unformatted') +! ! read(fileunit) mesh +! ! close(fileunit) +! ! if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[33m'//' > read derived type t_mesh'//achar(27)//'[0m' +! ! end if +! ! +! ! !___________________________________________________________________________ +! ! ! partit derived type +! ! if (present(partit)) then +! ! fileunit = partit%mype+300 +! ! open(newunit = fileunit, & +! ! file = trim(path_in)//'/'//'t_partit.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & +! ! status = 'old', & +! ! form = 'unformatted') +! ! read(fileunit) partit +! ! close(fileunit) +! ! if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[33m'//' > read derived type t_partit'//achar(27)//'[0m' +! ! end if +! ! +! ! !___________________________________________________________________________ +! ! ! tracer derived type +! ! if (present(tracers)) then +! ! fileunit = partit%mype+300 +! ! open(newunit = fileunit, & +! ! file = trim(path_in)//'/'//'t_tracer.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & +! ! status = 'old', & +! ! form = 'unformatted') +! ! read(fileunit) tracers +! ! close(fileunit) +! ! if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[33m'//' > read derived type t_tracer'//achar(27)//'[0m' +! ! end if +! ! +! ! !___________________________________________________________________________ +! ! ! dynamics derived type +! ! if (present(dynamics)) then +! ! fileunit = partit%mype+300 +! ! open(newunit = fileunit, & +! ! file = trim(path_in)//'/'//'t_dynamics.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & +! ! status = 'old', & +! ! form = 'unformatted') +! ! read(fileunit) dynamics +! ! close(fileunit) +! ! if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[33m'//' > read derived type t_dynamics'//achar(27)//'[0m' +! ! end if +! ! +! ! !___________________________________________________________________________ +! ! ! ice derived type +! ! if (present(ice)) then +! ! fileunit = partit%mype+300 +! ! open(newunit = fileunit, & +! ! file = trim(path_in)//'/'//'t_ice.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & +! ! status = 'old', & +! ! form = 'unformatted') +! ! read(fileunit) ice +! ! close(fileunit) +! ! if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[33m'//' > read derived type t_ice'//achar(27)//'[0m' +! ! end if +! ! end subroutine ! ! !_______________________________________________________________________________ @@ -839,22 +860,23 @@ function is_due(unit, frequency, istep) result(d) stop end if end function -! -! -!_______________________________________________________________________________ - function mpirank_to_txt(mpicomm) result(txt) - use fortran_utils - integer, intent(in) :: mpicomm - character(:), allocatable :: txt - ! EO parameters - integer mype - integer npes - integer mpierr - include 'mpif.h' - - call MPI_Comm_Rank(mpicomm, mype, mpierr) - call MPI_Comm_Size(mpicomm, npes, mpierr) - txt = int_to_txt_pad(mype,int(log10(real(npes)))+1) ! pad to the width of the number of processes - end function +! ! +! ! +! !_______________________________________________________________________________ +! function mpirank_to_txt(mpicomm) result(txt) +! use fortran_utils +! integer, intent(in) :: mpicomm +! character(:), allocatable :: txt +! ! EO parameters +! integer mype +! integer npes +! integer mpierr +! include 'mpif.h' +! +! call MPI_Comm_Rank(mpicomm, mype, mpierr) +! call MPI_Comm_Size(mpicomm, npes, mpierr) +! txt = int_to_txt_pad(mype,int(log10(real(npes)))+1) ! pad to the width of the number of processes +! end function +!!PS --> move this function also to fortran_utils.F90 end module From 64029aa9a1a2fef52eca5a34090d5baf52f71821 Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 11 Feb 2022 10:04:53 +0100 Subject: [PATCH 830/909] move function mpirank_to_txt(...) to src/fortran_utils.F90 --- src/fortran_utils.F90 | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/fortran_utils.F90 b/src/fortran_utils.F90 index 1296cfe72..1ebd62323 100644 --- a/src/fortran_utils.F90 +++ b/src/fortran_utils.F90 @@ -41,6 +41,21 @@ function int_to_txt_pad(val, width) result(txt) end function + function mpirank_to_txt(mpicomm) result(txt) + integer, intent(in) :: mpicomm + character(:), allocatable :: txt + ! EO parameters + integer mype + integer npes + integer mpierr + include 'mpif.h' + + call MPI_Comm_Rank(mpicomm, mype, mpierr) + call MPI_Comm_Size(mpicomm, npes, mpierr) + txt = int_to_txt_pad(mype,int(log10(real(npes)))+1) ! pad to the width of the number of processes + end function + + ! using EXECUTE_COMMAND_LINE to call mkdir sometimes fail (EXECUTE_COMMAND_LINE is forked as an new process, which may be the problem) ! try to use the C mkdir as an alternative subroutine mkdir(path) From c5345850e9bba0ff15031579049de7ecf587da19 Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 11 Feb 2022 10:06:13 +0100 Subject: [PATCH 831/909] make own module for reading /writing derived type restarts --- src/io_restart_derivedtype.F90 | 242 +++++++++++++++++++++++++++++++++ 1 file changed, 242 insertions(+) create mode 100644 src/io_restart_derivedtype.F90 diff --git a/src/io_restart_derivedtype.F90 b/src/io_restart_derivedtype.F90 new file mode 100644 index 000000000..c884ba555 --- /dev/null +++ b/src/io_restart_derivedtype.F90 @@ -0,0 +1,242 @@ +module restart_derivedtype_module + interface + subroutine write_all_bin_restarts(ctarr, path_in, pathi_in, ice, dynamics, tracers, partit, mesh) + use MOD_ICE + use MOD_DYN + use MOD_TRACER + use MOD_PARTIT + use MOD_MESH + integer, dimension(3) , intent(in) :: ctarr + character(len=*), intent(in) :: path_in + character(len=*), intent(in) :: pathi_in + type(t_ice) , intent(inout), target, optional :: ice + type(t_dyn) , intent(inout), target, optional :: dynamics + type(t_tracer), intent(inout), target, optional :: tracers + type(t_partit), intent(inout), target, optional :: partit + type(t_mesh) , intent(inout), target, optional :: mesh + end subroutine + + subroutine read_all_bin_restarts(path_in, ice, dynamics, tracers, partit, mesh) + use MOD_ICE + use MOD_DYN + use MOD_TRACER + use MOD_PARTIT + use MOD_MESH + character(len=*), intent(in) :: path_in + type(t_ice) , intent(inout), target, optional :: ice + type(t_dyn) , intent(inout), target, optional :: dynamics + type(t_tracer), intent(inout), target, optional :: tracers + type(t_partit), intent(inout), target, optional :: partit + type(t_mesh) , intent(inout), target, optional :: mesh + end subroutine + end interface +end module +! +! +!_______________________________________________________________________________ +subroutine write_all_bin_restarts(ctarr, path_in, pathi_in, ice, dynamics, tracers, partit, mesh) + use MOD_ICE + use MOD_DYN + use MOD_TRACER + use MOD_PARTIT + use MOD_MESH + use fortran_utils + implicit none + + integer, dimension(3) , intent(in) :: ctarr ! //cstep,ctime,cyear// + character(len=*) , intent(in) :: path_in + character(len=*) , intent(in) :: pathi_in + type(t_ice) , target, intent(in), optional :: ice + type(t_dyn) , target, intent(in), optional :: dynamics + type(t_tracer), target, intent(in), optional :: tracers + type(t_partit), target, intent(in) :: partit + type(t_mesh) , target, intent(in) :: mesh + + ! EO parameters + integer fileunit, fileunit_i + + !___________________________________________________________________________ + ! write info file + if(partit%mype == 0) then + print *, achar(27)//'[1;33m'//' --> writing derived type binary restarts to '//trim(path_in)//achar(27)//'[0m' + ! store metadata about the raw restart + fileunit_i = 299 + open(newunit = fileunit_i, file = trim(pathi_in)) + write(fileunit_i, '(g0)') ctarr(1) + write(fileunit_i, '(g0)') ctarr(2) + write(fileunit_i, '(2(g0))') "! year: ",ctarr(3) + end if + + !___________________________________________________________________________ + ! mesh derived type + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = trim(path_in)//'/'//'t_mesh.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'replace', & + form = 'unformatted') + write(fileunit) mesh + close(fileunit) + if(partit%mype == 0) then + write(fileunit_i, '(1(g0))') "! t_mesh" + print *, achar(27)//'[33m'//' > write derived type t_mesh'//achar(27)//'[0m' + end if + + !___________________________________________________________________________ + ! partit derived type + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = trim(path_in)//'/'//'t_partit.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'replace', & + form = 'unformatted') + write(fileunit) partit + close(fileunit) + if(partit%mype == 0) then + write(fileunit_i, '(1(g0))') "! t_partit" + print *, achar(27)//'[33m'//' > write derived type t_partit'//achar(27)//'[0m' + end if + + !___________________________________________________________________________ + ! tracer derived type + if (present(tracers)) then + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = trim(path_in)//'/'//'t_tracer.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'replace', & + form = 'unformatted') + write(fileunit) tracers + close(fileunit) + if(partit%mype == 0) then + write(fileunit_i, '(1(g0))') "! t_tracer" + print *, achar(27)//'[33m'//' > write derived type t_tracer'//achar(27)//'[0m' + end if + end if + + !___________________________________________________________________________ + ! dynamics derived type + if (present(dynamics)) then + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = trim(path_in)//'/'//'t_dynamics.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'replace', & + form = 'unformatted') + write(fileunit) dynamics + close(fileunit) + if(partit%mype == 0) then + write(fileunit_i, '(1(g0))') "! t_dynamics" + print *, achar(27)//'[33m'//' > write derived type t_dynamics'//achar(27)//'[0m' + end if + end if + + !___________________________________________________________________________ + ! ice derived type + if (present(ice)) then + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = trim(path_in)//'/'//'t_ice.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'replace', & + form = 'unformatted') + write(fileunit) ice + close(fileunit) + if(partit%mype == 0) then + write(fileunit_i, '(1(g0))') "! t_ice" + print *, achar(27)//'[33m'//' > write derived type t_ice'//achar(27)//'[0m' + end if + end if + + !___________________________________________________________________________ + if(partit%mype == 0) close(fileunit_i) + +end subroutine +! +! +!_______________________________________________________________________________ +! read derived type binary restart files, depending on input (see optional) not +! all derived type binaries are read --> functionalitiy for dwarfs ! +subroutine read_all_bin_restarts(path_in, ice, dynamics, tracers, partit, mesh) + use MOD_ICE + use MOD_DYN + use MOD_TRACER + use MOD_PARTIT + use MOD_MESH + use fortran_utils + implicit none + + ! do optional here for the usage with dwarfs, since there only specific derived + ! types will be needed + character(len=*), intent(in) :: path_in + type(t_ice) , intent(inout), target, optional :: ice + type(t_dyn) , intent(inout), target, optional :: dynamics + type(t_tracer), intent(inout), target, optional :: tracers + type(t_partit), intent(inout), target, optional :: partit + type(t_mesh) , intent(inout), target, optional :: mesh + integer fileunit + + !___________________________________________________________________________ + if (partit%mype==0) print *, achar(27)//'[1;33m'//' --> read restarts from derived type binary'//achar(27)//'[0m' + + !___________________________________________________________________________ + ! mesh derived type + if (present(mesh)) then + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = trim(path_in)//'/'//'t_mesh.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'old', & + form = 'unformatted') + read(fileunit) mesh + close(fileunit) + if (partit%mype==0) print *, achar(27)//'[33m'//' > read derived type t_mesh'//achar(27)//'[0m' + end if + + !___________________________________________________________________________ + ! partit derived type + if (present(partit)) then + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = trim(path_in)//'/'//'t_partit.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'old', & + form = 'unformatted') + read(fileunit) partit + close(fileunit) + if (partit%mype==0) print *, achar(27)//'[33m'//' > read derived type t_partit'//achar(27)//'[0m' + end if + + !___________________________________________________________________________ + ! tracer derived type + if (present(tracers)) then + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = trim(path_in)//'/'//'t_tracer.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'old', & + form = 'unformatted') + read(fileunit) tracers + close(fileunit) + if (partit%mype==0) print *, achar(27)//'[33m'//' > read derived type t_tracer'//achar(27)//'[0m' + end if + + !___________________________________________________________________________ + ! dynamics derived type + if (present(dynamics)) then + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = trim(path_in)//'/'//'t_dynamics.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'old', & + form = 'unformatted') + read(fileunit) dynamics + close(fileunit) + if (partit%mype==0) print *, achar(27)//'[33m'//' > read derived type t_dynamics'//achar(27)//'[0m' + end if + + !___________________________________________________________________________ + ! ice derived type + if (present(ice)) then + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = trim(path_in)//'/'//'t_ice.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'old', & + form = 'unformatted') + read(fileunit) ice + close(fileunit) + if (partit%mype==0) print *, achar(27)//'[33m'//' > read derived type t_ice'//achar(27)//'[0m' + end if +end subroutine + From f88530dec9c83356c1b8f0d0bacfbaeba87631fd Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 14 Feb 2022 16:11:11 +0100 Subject: [PATCH 832/909] add variable coriols(elem) and coriolis_node(node) to mesh derived type --- src/MOD_MESH.F90 | 5 +++++ src/gen_modules_backscatter.F90 | 8 ++++---- src/gen_modules_cvmix_idemix.F90 | 2 +- src/gen_modules_cvmix_kpp.F90 | 2 +- src/ice_EVP.F90 | 2 +- src/ice_maEVP.F90 | 8 ++++---- src/oce_ale_mixing_kpp.F90 | 2 +- src/oce_ale_vel_rhs.F90 | 4 ++-- src/oce_fer_gm.F90 | 4 ++-- src/oce_mesh.F90 | 10 +++++----- src/oce_modules.F90 | 2 +- src/toy_channel_soufflet.F90 | 8 ++++---- 12 files changed, 31 insertions(+), 26 deletions(-) diff --git a/src/MOD_MESH.F90 b/src/MOD_MESH.F90 index 808de3564..df5857562 100644 --- a/src/MOD_MESH.F90 +++ b/src/MOD_MESH.F90 @@ -84,6 +84,11 @@ MODULE MOD_MESH real(kind=WP), allocatable, dimension(:,:) :: cavity_nrst_cavlpnt_xyz +! +! +!___coriolis force______________________________________________________________ +real(kind=WP), allocatable, dimension(:) :: coriolis_node, coriolis + ! ! !___Elevation stiffness matrix__________________________________________________ diff --git a/src/gen_modules_backscatter.F90 b/src/gen_modules_backscatter.F90 index f602c39c0..1a3c620cd 100644 --- a/src/gen_modules_backscatter.F90 +++ b/src/gen_modules_backscatter.F90 @@ -7,7 +7,7 @@ module g_backscatter USE MOD_DYN !___________________________________________________________________________ - USE o_ARRAYS, only: bvfreq, coriolis_node + USE o_ARRAYS, only: bvfreq !___________________________________________________________________________ USE o_param @@ -350,7 +350,7 @@ subroutine uke_update(dynamics, partit, mesh) END DO c1=max(c_min, c1/pi) !ca. first baroclinic gravity wave speed limited from below by c_min !Cutoff K_GM depending on (Resolution/Rossby radius) ratio - rosb=rosb+min(c1/max(abs(coriolis_node(elnodes(kk))), f_min), r_max) + rosb=rosb+min(c1/max(abs(mesh%coriolis_node(elnodes(kk))), f_min), r_max) END DO rosb=rosb/3._WP scaling=1._WP/(1._WP+(uke_scaling_factor*reso/rosb))!(4._wp*reso/rosb)) @@ -374,11 +374,11 @@ subroutine uke_update(dynamics, partit, mesh) len_reg=sqrt(dist_reg(1)**2+dist_reg(2)**2) !if(mype==0) write(*,*) 'len_reg ', len_reg , ' and dist_reg' , dist_reg, ' and ex, ey', ex, ey, ' and a ', a1, a2 - rosb_array(nz,ed)=rosb_array(nz,ed)/max(abs(sum(coriolis_node(elnodes(:)))), f_min) + rosb_array(nz,ed)=rosb_array(nz,ed)/max(abs(sum(mesh%coriolis_node(elnodes(:)))), f_min) !uke_dif(nz, ed)=scaling*(1-exp(-len_reg/300000))*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)!UV_dif(1,ed) uke_dis(nz,ed)=scaling*(1-exp(-len_reg/300000))*1._WP/(1._WP+rosb_array(nz,ed)/rosb_dis)*uke_dis(nz,ed) else - rosb_array(nz,ed)=rosb_array(nz,ed)/max(abs(sum(coriolis_node(elnodes(:)))), f_min) + rosb_array(nz,ed)=rosb_array(nz,ed)/max(abs(sum(mesh%coriolis_node(elnodes(:)))), f_min) !uke_dif(nz, ed)=scaling*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)!UV_dif(1,ed) uke_dis(nz,ed)=scaling*1._WP/(1._WP+rosb_array(nz,ed)/rosb_dis)*uke_dis(nz,ed) end if diff --git a/src/gen_modules_cvmix_idemix.F90 b/src/gen_modules_cvmix_idemix.F90 index a49b77585..828826706 100644 --- a/src/gen_modules_cvmix_idemix.F90 +++ b/src/gen_modules_cvmix_idemix.F90 @@ -322,7 +322,7 @@ subroutine calc_cvmix_idemix(partit, mesh) nlev = nln-uln+1, & max_nlev = nl-1, & dtime = dt, & - coriolis = coriolis_node(node), & + coriolis = mesh%coriolis_node(node), & ! essentials iwe_new = iwe(uln:nln+1,node), & ! out iwe_old = iwe_old(uln:nln+1), & ! in diff --git a/src/gen_modules_cvmix_kpp.F90 b/src/gen_modules_cvmix_kpp.F90 index 044fd27fb..7883158d9 100644 --- a/src/gen_modules_cvmix_kpp.F90 +++ b/src/gen_modules_cvmix_kpp.F90 @@ -730,7 +730,7 @@ subroutine calc_cvmix_kpp(ice, dynamics, tracers, partit, mesh) zt_cntr = Z_3d_n( nun:nln ,node), & ! (in) Height of cell centers (m) dim=(ke) surf_fric = aux_ustar, & ! (in) Turbulent friction velocity at surface (m/s) dim=1 surf_buoy = aux_surfbuoyflx_nl(1), & ! (in) Buoyancy flux at surface (m2/s3) dim=1 - Coriolis = coriolis_node(node), & ! (in) Coriolis parameter (1/s) dim=1 + Coriolis = mesh%coriolis_node(node), & ! (in) Coriolis parameter (1/s) dim=1 OBL_depth = kpp_obldepth(node), & ! (out) OBL depth (m) dim=1 kOBL_depth = kpp_nzobldepth(node) & ! (out) level (+fraction) of OBL extent dim=1 ) diff --git a/src/ice_EVP.F90 b/src/ice_EVP.F90 index c3685177b..0dbcb3cac 100755 --- a/src/ice_EVP.F90 +++ b/src/ice_EVP.F90 @@ -538,7 +538,7 @@ subroutine EVPdynamics(ice, partit, mesh) inv_mass(n)*stress_atmice_y(n) + V_rhs_ice(n)) r_a = 1._WP + ax*drag*rdt - r_b = rdt*(coriolis_node(n) + ay*drag) + r_b = rdt*(mesh%coriolis_node(n) + ay*drag) det = 1.0_WP/(r_a*r_a + r_b*r_b) U_ice(n) = det*(r_a*rhsu +r_b*rhsv) V_ice(n) = det*(r_a*rhsv -r_b*rhsu) diff --git a/src/ice_maEVP.F90 b/src/ice_maEVP.F90 index 467d54464..c33ab6a4e 100644 --- a/src/ice_maEVP.F90 +++ b/src/ice_maEVP.F90 @@ -696,10 +696,10 @@ subroutine EVPdynamics_m(ice, partit, mesh) rhsv = v_ice(i)+drag*v_w(i)+rdt*(inv_thickness(i)*stress_atmice_y(i)+v_rhs_ice(i)) + ice%beta_evp*v_ice_aux(i) !solve (Coriolis and water stress are treated implicitly) - det = bc_index_nod2D(i) / ((1.0_WP+ice%beta_evp+drag)**2 + (rdt*coriolis_node(i))**2) + det = bc_index_nod2D(i) / ((1.0_WP+ice%beta_evp+drag)**2 + (rdt*mesh%coriolis_node(i))**2) - u_ice_aux(i) = det*((1.0_WP+ice%beta_evp+drag)*rhsu +rdt*coriolis_node(i)*rhsv) - v_ice_aux(i) = det*((1.0_WP+ice%beta_evp+drag)*rhsv -rdt*coriolis_node(i)*rhsu) + u_ice_aux(i) = det*((1.0_WP+ice%beta_evp+drag)*rhsu +rdt*mesh%coriolis_node(i)*rhsv) + v_ice_aux(i) = det*((1.0_WP+ice%beta_evp+drag)*rhsv -rdt*mesh%coriolis_node(i)*rhsu) end if end do ! --> do i=1, myDim_nod2d @@ -1052,7 +1052,7 @@ subroutine EVPdynamics_a(ice, partit, mesh) rhsu=beta_evp_array(i)*u_ice_aux(i)+rhsu rhsv=beta_evp_array(i)*v_ice_aux(i)+rhsv !solve (Coriolis and water stress are treated implicitly) - fc=rdt*coriolis_node(i) + fc=rdt*mesh%coriolis_node(i) det=(1.0_WP+beta_evp_array(i)+drag)**2+fc**2 det=bc_index_nod2D(i)/det u_ice_aux(i)=det*((1.0_WP+beta_evp_array(i)+drag)*rhsu+fc*rhsv) diff --git a/src/oce_ale_mixing_kpp.F90 b/src/oce_ale_mixing_kpp.F90 index 3f8a7aad4..22f93f4ca 100755 --- a/src/oce_ale_mixing_kpp.F90 +++ b/src/oce_ale_mixing_kpp.F90 @@ -582,7 +582,7 @@ SUBROUTINE bldepth(partit, mesh) !!PS IF (bfsfc(node) > 0.0_WP) THEN IF (bfsfc(node) > 0.0_WP .and. nzmin==1) THEN !-> no ekman or monin-obukov when there is cavity - hekman = cekman * ustar(node) / MAX( ABS (coriolis_node(node) ), epsln) + hekman = cekman * ustar(node) / MAX( ABS (mesh%coriolis_node(node) ), epsln) hmonob = cmonob * ustar(node) * ustar(node) * ustar(node) & /vonk / (bfsfc(node) + epsln) hlimit = stable(node) * AMIN1( hekman, hmonob ) diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index 8f27ace17..a926559c2 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -38,7 +38,7 @@ subroutine compute_vel_rhs(ice, dynamics, partit, mesh) USE MOD_PARTIT USE MOD_PARSUP USE MOD_MESH - use o_ARRAYS, only: coriolis, ssh_gp, pgf_x, pgf_y + use o_ARRAYS, only: ssh_gp, pgf_x, pgf_y use o_PARAM use g_CONFIG use g_forcing_param, only: use_virt_salt @@ -103,7 +103,7 @@ subroutine compute_vel_rhs(ice, dynamics, partit, mesh) ! p_eta=g*eta_n(elnodes)*(1-theta) !! this place needs update (1-theta)!!! p_eta = g*eta_n(elnodes) - ff = coriolis(elem)*elem_area(elem) + ff = mesh%coriolis(elem)*elem_area(elem) !mm=metric_factor(elem)*gg !___________________________________________________________________________ diff --git a/src/oce_fer_gm.F90 b/src/oce_fer_gm.F90 index 222912f21..f450be553 100644 --- a/src/oce_fer_gm.F90 +++ b/src/oce_fer_gm.F90 @@ -215,7 +215,7 @@ end subroutine fer_gamma2vel subroutine init_Redi_GM(partit, mesh) !fer_compute_C_K_Redi USE MOD_MESH USE o_PARAM - USE o_ARRAYS, ONLY: fer_c, fer_k, fer_scal, Ki, bvfreq, MLD1_ind, neutral_slope, coriolis_node + USE o_ARRAYS, ONLY: fer_c, fer_k, fer_scal, Ki, bvfreq, MLD1_ind, neutral_slope USE MOD_PARTIT USE MOD_PARSUP USE g_CONFIG @@ -257,7 +257,7 @@ subroutine init_Redi_GM(partit, mesh) !fer_compute_C_K_Redi !___________________________________________________________________ ! Cutoff K_GM depending on (Resolution/Rossby radius) ratio if (scaling_Rossby) then - rosb=min(c1/max(abs(coriolis_node(n)), f_min), r_max) + rosb=min(c1/max(abs(mesh%coriolis_node(n)), f_min), r_max) rr_ratio=min(reso/rosb, 5._WP) scaling=1._WP/(1._WP+exp(-(rr_ratio-x0)/sigma)) end if diff --git a/src/oce_mesh.F90 b/src/oce_mesh.F90 index 0bfebf092..f30bb3758 100755 --- a/src/oce_mesh.F90 +++ b/src/oce_mesh.F90 @@ -2195,8 +2195,8 @@ SUBROUTINE mesh_auxiliary_arrays(partit, mesh) allocate(mesh%gradient_vec(6,myDim_elem2D)) allocate(mesh%metric_factor(myDim_elem2D+eDim_elem2D+eXDim_elem2D)) allocate(mesh%elem_cos(myDim_elem2D+eDim_elem2D+eXDim_elem2D)) - allocate(coriolis(myDim_elem2D)) - allocate(coriolis_node(myDim_nod2D+eDim_nod2D)) + allocate(mesh%coriolis(myDim_elem2D)) + allocate(mesh%coriolis_node(myDim_nod2D+eDim_nod2D)) allocate(mesh%geo_coord_nod2D(2,myDim_nod2D+eDim_nod2D)) allocate(center_x(myDim_elem2D+eDim_elem2D+eXDim_elem2D)) allocate(center_y(myDim_elem2D+eDim_elem2D+eXDim_elem2D)) @@ -2207,7 +2207,7 @@ SUBROUTINE mesh_auxiliary_arrays(partit, mesh) ! ============ DO n=1,myDim_nod2D+eDim_nod2D call r2g(lon, lat, mesh%coord_nod2D(1,n), mesh%coord_nod2D(2,n)) - coriolis_node(n)=2*omega*sin(lat) + mesh%coriolis_node(n)=2*omega*sin(lat) END DO DO n=1,myDim_nod2D+eDim_nod2D @@ -2223,11 +2223,11 @@ SUBROUTINE mesh_auxiliary_arrays(partit, mesh) DO n=1,myDim_elem2D call elem_center(n, ax, ay, mesh) call r2g(lon, lat, ax, ay) - coriolis(n)=2*omega*sin(lat) + mesh%coriolis(n)=2*omega*sin(lat) END DO if(fplane) then - coriolis=2*omega*0.71_WP + mesh%coriolis=2*omega*0.71_WP end if ! ============ diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index 22790d9c9..6c36aa257 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -190,7 +190,7 @@ MODULE o_ARRAYS real(kind=WP), allocatable :: Tsurf_t(:,:), Ssurf_t(:,:) real(kind=WP), allocatable :: tau_x_t(:,:), tau_y_t(:,:) real(kind=WP), allocatable :: heat_flux_t(:,:), heat_rel_t(:,:), heat_rel(:) -real(kind=WP), allocatable :: coriolis(:), coriolis_node(:) +!!PS real(kind=WP), allocatable :: coriolis(:), coriolis_node(:) real(kind=WP), allocatable :: relax2clim(:) real(kind=WP), allocatable :: MLD1(:), MLD2(:) integer, allocatable :: MLD1_ind(:), MLD2_ind(:) diff --git a/src/toy_channel_soufflet.F90 b/src/toy_channel_soufflet.F90 index 53052ccc5..164f015d4 100644 --- a/src/toy_channel_soufflet.F90 +++ b/src/toy_channel_soufflet.F90 @@ -247,7 +247,7 @@ end subroutine compute_zonal_mean subroutine initial_state_soufflet(dynamics, tracers, partit, mesh) ! Profiles Soufflet 2016 (OM) implicit none - type(t_mesh) , intent(in) , target :: mesh + type(t_mesh) , intent(inout), target :: mesh type(t_partit), intent(inout), target :: partit type(t_tracer), intent(inout), target :: tracers type(t_dyn) , intent(inout), target :: dynamics @@ -342,14 +342,14 @@ subroutine initial_state_soufflet(dynamics, tracers, partit, mesh) DO n=1,myDim_elem2D elnodes=elem2D_nodes(:,n) dst=(sum(coord_nod2D(2, elnodes))/3.0-lat0)*r_earth-ysize/2 - coriolis(n)=1.0e-4+dst*1.6e-11 + mesh%coriolis(n)=1.0e-4+dst*1.6e-11 END DO - write(*,*) mype, 'COR', maxval(coriolis*10000.0), minval(coriolis*10000.0) + write(*,*) mype, 'COR', maxval(mesh%coriolis*10000.0), minval(mesh%coriolis*10000.0) DO n=1,myDim_elem2D elnodes=elem2D_nodes(:,n) ! Thermal wind \partial_z UV(1,:,:)=(g/rho_0/f)\partial_y rho DO nz=1,nlevels(n)-1 - d_No(nz)=(-(0.00025_WP*density_0)*g/density_0/coriolis(n))*sum(gradient_sca(4:6,n)*Tclim(nz, elnodes)) + d_No(nz)=(-(0.00025_WP*density_0)*g/density_0/mesh%coriolis(n))*sum(gradient_sca(4:6,n)*Tclim(nz, elnodes)) ! d_N is used here as a placeholder ! -(sw_alpha*density_0) here is from the equation of state d\rho=-(sw_alpha*density_0) dT END DO From f7bb758d819830f8abe2155e3726f7b9c448daea Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 14 Feb 2022 16:22:24 +0100 Subject: [PATCH 833/909] forgot to write/read coriolis force to derived type binary files --- src/MOD_MESH.F90 | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/MOD_MESH.F90 b/src/MOD_MESH.F90 index df5857562..4d4586a96 100644 --- a/src/MOD_MESH.F90 +++ b/src/MOD_MESH.F90 @@ -235,6 +235,9 @@ subroutine write_t_mesh(mesh, unit, iostat, iomsg) call write_bin_array(mesh%zbar_n_srf, unit, iostat, iomsg) call write_bin_array(mesh%zbar_e_srf, unit, iostat, iomsg) ! call write_bin_array(mesh%representative_checksum, unit, iostat, iomsg) + call write_bin_array(mesh%coriolis, unit, iostat, iomsg) + call write_bin_array(mesh%coriolis_node, unit, iostat, iomsg) + end subroutine write_t_mesh ! Unformatted reading for t_mesh @@ -329,6 +332,9 @@ subroutine read_t_mesh(mesh, unit, iostat, iomsg) call read_bin_array(mesh%zbar_n_srf, unit, iostat, iomsg) call read_bin_array(mesh%zbar_e_srf, unit, iostat, iomsg) ! call read_bin_array(mesh%representative_checksum, unit, iostat, iomsg) + call write_bin_array(mesh%coriolis, unit, iostat, iomsg) + call write_bin_array(mesh%coriolis_node, unit, iostat, iomsg) + end subroutine read_t_mesh end module MOD_MESH !========================================================== From 2c9b0b596678a94a9443543fd9703ce640bfc311 Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 14 Feb 2022 16:36:06 +0100 Subject: [PATCH 834/909] correct small bug --- src/MOD_MESH.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/MOD_MESH.F90 b/src/MOD_MESH.F90 index 4d4586a96..8574cef3d 100644 --- a/src/MOD_MESH.F90 +++ b/src/MOD_MESH.F90 @@ -332,8 +332,8 @@ subroutine read_t_mesh(mesh, unit, iostat, iomsg) call read_bin_array(mesh%zbar_n_srf, unit, iostat, iomsg) call read_bin_array(mesh%zbar_e_srf, unit, iostat, iomsg) ! call read_bin_array(mesh%representative_checksum, unit, iostat, iomsg) - call write_bin_array(mesh%coriolis, unit, iostat, iomsg) - call write_bin_array(mesh%coriolis_node, unit, iostat, iomsg) + call read_bin_array(mesh%coriolis, unit, iostat, iomsg) + call read_bin_array(mesh%coriolis_node, unit, iostat, iomsg) end subroutine read_t_mesh end module MOD_MESH From aefcb6fda61ea0f02134742cd09618b1ea7be39a Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 15 Feb 2022 12:55:15 +0100 Subject: [PATCH 835/909] clean up MOD_ICE.F90 --- src/MOD_ICE.F90 | 78 ++++++++++++++++++++++++------------------------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/src/MOD_ICE.F90 b/src/MOD_ICE.F90 index 71d15135d..2339afc40 100644 --- a/src/MOD_ICE.F90 +++ b/src/MOD_ICE.F90 @@ -363,28 +363,28 @@ subroutine WRITE_T_ICE(ice, unit, iostat, iomsg) character(*), intent(inout) :: iomsg integer :: i !___________________________________________________________________________ - call write_bin_array(ice%uice, unit, iostat, iomsg) - call write_bin_array(ice%uice_rhs, unit, iostat, iomsg) - call write_bin_array(ice%uice_old, unit, iostat, iomsg) - if (ice%whichEVP /= 0) call write_bin_array(ice%uice_aux, unit, iostat, iomsg) - call write_bin_array(ice%vice, unit, iostat, iomsg) - call write_bin_array(ice%vice_rhs, unit, iostat, iomsg) - call write_bin_array(ice%vice_old, unit, iostat, iomsg) - if (ice%whichEVP /= 0) call write_bin_array(ice%vice_aux, unit, iostat, iomsg) + call write_bin_array(ice%uice , unit, iostat, iomsg) + call write_bin_array(ice%uice_rhs , unit, iostat, iomsg) + call write_bin_array(ice%uice_old , unit, iostat, iomsg) + if (ice%whichEVP /= 0) call write_bin_array(ice%uice_aux , unit, iostat, iomsg) + call write_bin_array(ice%vice , unit, iostat, iomsg) + call write_bin_array(ice%vice_rhs , unit, iostat, iomsg) + call write_bin_array(ice%vice_old , unit, iostat, iomsg) + if (ice%whichEVP /= 0) call write_bin_array(ice%vice_aux , unit, iostat, iomsg) call write_bin_array(ice%stress_atmice_x, unit, iostat, iomsg) call write_bin_array(ice%stress_iceoce_x, unit, iostat, iomsg) call write_bin_array(ice%stress_atmice_y, unit, iostat, iomsg) call write_bin_array(ice%stress_iceoce_y, unit, iostat, iomsg) - call write_bin_array(ice%srfoce_u, unit, iostat, iomsg) - call write_bin_array(ice%srfoce_v, unit, iostat, iomsg) - call write_bin_array(ice%srfoce_temp, unit, iostat, iomsg) - call write_bin_array(ice%srfoce_salt, unit, iostat, iomsg) - call write_bin_array(ice%srfoce_ssh, unit, iostat, iomsg) - call write_bin_array(ice%flx_fw, unit, iostat, iomsg) - call write_bin_array(ice%flx_h, unit, iostat, iomsg) - if (ice%whichEVP > 0) then - call write_bin_array(ice%alpha_evp_array, unit, iostat, iomsg) - call write_bin_array(ice%beta_evp_array, unit, iostat, iomsg) + call write_bin_array(ice%srfoce_u , unit, iostat, iomsg) + call write_bin_array(ice%srfoce_v , unit, iostat, iomsg) + call write_bin_array(ice%srfoce_temp , unit, iostat, iomsg) + call write_bin_array(ice%srfoce_salt , unit, iostat, iomsg) + call write_bin_array(ice%srfoce_ssh , unit, iostat, iomsg) + call write_bin_array(ice%flx_fw , unit, iostat, iomsg) + call write_bin_array(ice%flx_h , unit, iostat, iomsg) + if (ice%whichEVP > 0) then + call write_bin_array(ice%alpha_evp_array , unit, iostat, iomsg) + call write_bin_array(ice%beta_evp_array , unit, iostat, iomsg) end if !___________________________________________________________________________ @@ -433,28 +433,28 @@ subroutine READ_T_ICE(ice, unit, iostat, iomsg) character(*), intent(inout) :: iomsg integer :: i !___________________________________________________________________________ - call read_bin_array(ice%uice, unit, iostat, iomsg) - call read_bin_array(ice%uice_rhs, unit, iostat, iomsg) - call read_bin_array(ice%uice_old, unit, iostat, iomsg) - if (ice%whichEVP /= 0) call read_bin_array(ice%uice_aux, unit, iostat, iomsg) - call read_bin_array(ice%vice, unit, iostat, iomsg) - call read_bin_array(ice%vice_rhs, unit, iostat, iomsg) - call read_bin_array(ice%vice_old, unit, iostat, iomsg) - if (ice%whichEVP /= 0) call read_bin_array(ice%vice_aux, unit, iostat, iomsg) - call read_bin_array(ice%stress_atmice_x, unit, iostat, iomsg) - call read_bin_array(ice%stress_iceoce_x, unit, iostat, iomsg) - call read_bin_array(ice%stress_atmice_y, unit, iostat, iomsg) - call read_bin_array(ice%stress_iceoce_y, unit, iostat, iomsg) - call read_bin_array(ice%srfoce_u, unit, iostat, iomsg) - call read_bin_array(ice%srfoce_v, unit, iostat, iomsg) - call read_bin_array(ice%srfoce_temp, unit, iostat, iomsg) - call read_bin_array(ice%srfoce_salt, unit, iostat, iomsg) - call read_bin_array(ice%srfoce_ssh, unit, iostat, iomsg) - call read_bin_array(ice%flx_fw, unit, iostat, iomsg) - call read_bin_array(ice%flx_h, unit, iostat, iomsg) + call read_bin_array(ice%uice , unit, iostat, iomsg) + call read_bin_array(ice%uice_rhs , unit, iostat, iomsg) + call read_bin_array(ice%uice_old , unit, iostat, iomsg) + if (ice%whichEVP /= 0) call read_bin_array(ice%uice_aux , unit, iostat, iomsg) + call read_bin_array(ice%vice , unit, iostat, iomsg) + call read_bin_array(ice%vice_rhs , unit, iostat, iomsg) + call read_bin_array(ice%vice_old , unit, iostat, iomsg) + if (ice%whichEVP /= 0) call read_bin_array(ice%vice_aux , unit, iostat, iomsg) + call read_bin_array(ice%stress_atmice_x , unit, iostat, iomsg) + call read_bin_array(ice%stress_iceoce_x , unit, iostat, iomsg) + call read_bin_array(ice%stress_atmice_y , unit, iostat, iomsg) + call read_bin_array(ice%stress_iceoce_y , unit, iostat, iomsg) + call read_bin_array(ice%srfoce_u , unit, iostat, iomsg) + call read_bin_array(ice%srfoce_v , unit, iostat, iomsg) + call read_bin_array(ice%srfoce_temp , unit, iostat, iomsg) + call read_bin_array(ice%srfoce_salt , unit, iostat, iomsg) + call read_bin_array(ice%srfoce_ssh , unit, iostat, iomsg) + call read_bin_array(ice%flx_fw , unit, iostat, iomsg) + call read_bin_array(ice%flx_h , unit, iostat, iomsg) if (ice%whichEVP > 0) then - call read_bin_array(ice%alpha_evp_array, unit, iostat, iomsg) - call read_bin_array(ice%beta_evp_array, unit, iostat, iomsg) + call read_bin_array(ice%alpha_evp_array , unit, iostat, iomsg) + call read_bin_array(ice%beta_evp_array , unit, iostat, iomsg) end if !___________________________________________________________________________ read(unit, iostat=iostat, iomsg=iomsg) ice%num_itracers From 20d1f4ca433750ebfcd3ce1b54ce5d6dc662e25a Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Tue, 15 Feb 2022 13:21:40 +0100 Subject: [PATCH 836/909] Automatic rotation for vector fields in io_MEANDATA -> io_r2g; vec_autorotate is set in namelist.io -> &nml_general; io_r2g checks whether both components of a vector are both a subject to IO and does rotation if vec_autorotate flas if .TRUE. --- config/namelist.io | 11 +++--- src/io_meandata.F90 | 88 ++++++++++++++++++++++++++++++++++++++------- 2 files changed, 82 insertions(+), 17 deletions(-) diff --git a/config/namelist.io b/config/namelist.io index 0a3270c4a..4ad3a8b96 100644 --- a/config/namelist.io +++ b/config/namelist.io @@ -9,8 +9,9 @@ ldiag_DVD =.false. ldiag_forc =.false. / -&nml_listsize -io_listsize=100 !number of streams to allocate. shallbe large or equal to the number of streams in &nml_list +&nml_general +io_listsize =100 !number of streams to allocate. shallbe large or equal to the number of streams in &nml_list +vec_autorotate =.false. / ! for sea ice related variables use_ice should be true, otherewise there will be no output @@ -22,8 +23,8 @@ io_listsize=100 !number of streams to allocate. shallbe large or equal to the nu io_list = 'sst ',1, 'm', 4, 'sss ',1, 'm', 4, 'ssh ',1, 'm', 4, - 'uice ',1, 'm', 4, - 'vice ',1, 'm', 4, + 'uice ',1, 'd', 4, + 'vice ',1, 'd', 4, 'a_ice ',1, 'm', 4, 'm_ice ',1, 'm', 4, 'm_snow ',1, 'm', 4, @@ -37,6 +38,8 @@ io_list = 'sst ',1, 'm', 4, 'Kv ',1, 'y', 4, 'u ',1, 'y', 4, 'v ',1, 'y', 4, + 'unod ',1, 'y', 4, + 'vnod ',1, 'y', 4, 'w ',1, 'y', 4, 'Av ',1, 'y', 4, 'bolus_u ',1, 'y', 4, diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 4b7ce30f3..4231314ee 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -62,7 +62,8 @@ module io_MEANDATA ! !-------------------------------------------------------------------------------------------- ! - integer, save :: io_listsize=0 + integer, save :: io_listsize =0 + logical, save :: vec_autorotate=.FALSE. type io_entry CHARACTER(len=15) :: id ='unknown ' INTEGER :: freq =0 @@ -117,7 +118,7 @@ subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) type(t_tracer), intent(in) , target :: tracers type(t_dyn) , intent(in) , target :: dynamics type(t_ice) , intent(in) , target :: ice - namelist /nml_listsize/ io_listsize + namelist /nml_general / io_listsize, vec_autorotate namelist /nml_list / io_list #include "associate_part_def.h" @@ -134,7 +135,7 @@ subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop endif - READ(nm_io_unit, nml=nml_listsize, iostat=iost ) + READ(nm_io_unit, nml=nml_general, iostat=iost ) allocate(io_list(io_listsize)) READ(nm_io_unit, nml=nml_list, iostat=iost ) close(nm_io_unit ) @@ -316,15 +317,19 @@ subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) CASE ('N2 ') call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'N2', 'brunt väisälä', '1/s2', bvfreq(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('Kv ') - call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'Kv', 'vertical diffusivity Kv', 'm2/s', Kv(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'Kv', 'vertical diffusivity Kv', 'm2/s', Kv(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('u ') - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u', 'horizontal velocity','m/s', dynamics%uv(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u', 'horizontal velocity','m/s', dynamics%uv(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('v ') - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v', 'meridional velocity','m/s', dynamics%uv(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v', 'meridional velocity','m/s', dynamics%uv(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) +CASE ('unod ') + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/),'unod', 'horizontal velocity at nodes', 'm/s', dynamics%uvnode(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) +CASE ('vnod ') + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/),'vnod', 'meridional velocity at nodes', 'm/s', dynamics%uvnode(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('w ') - call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'w', 'vertical velocity', 'm/s', dynamics%w(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'w', 'vertical velocity', 'm/s', dynamics%w(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('Av ') - call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'Av', 'vertical viscosity Av', 'm2/s', Av(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'Av', 'vertical viscosity Av', 'm2/s', Av(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('u_dis_tend') if(dynamics%opt_visc==8) then call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u_dis_tend', 'horizontal velocity viscosity tendency', 'm/s', UV_dis_tend(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) @@ -429,9 +434,7 @@ subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'av_dvdz', 'int(Av * dv/dz)', 'm3/s2', av_dvdz(:,:), 1, 'm', i_real4, partit, mesh) call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'av_dudz_sq', 'Av * (du/dz)^2', 'm^2/s^3', av_dudz_sq(:,:), 1, 'm', i_real4, partit, mesh) call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'Av', 'Vertical mixing A', 'm2/s', Av(:,:), 1, 'm', i_real4, partit, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'unod', 'horizontal velocity at nodes', 'm/s', dynamics%uvnode(1,:,:), 1, 'm', i_real8, partit, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'vnod', 'meridional velocity at nodes', 'm/s', dynamics%uvnode(2,:,:), 1, 'm', i_real8, partit, mesh) - + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'um', 'horizontal velocity', 'm/s', dynamics%uv(1,:,:), 1, 'm', i_real4, partit, mesh) call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'vm', 'meridional velocity', 'm/s', dynamics%uv(2,:,:), 1, 'm', i_real4, partit, mesh) call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'wm', 'vertical velocity', 'm/s', dynamics%w(:,:), 1, 'm', i_real8, partit, mesh) @@ -908,7 +911,7 @@ subroutine output(istep, ice, dynamics, tracers, partit, mesh) endif if (do_output) then - + if (vec_autorotate) call io_r2g(n, partit, mesh) ! automatically detect if a vector field and rotate if makes sense! if(entry%thread_running) call entry%thread%join() entry%thread_running = .false. @@ -1245,5 +1248,64 @@ subroutine assert(val, line) end if end subroutine -end module + subroutine io_r2g(n, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE g_rotate_grid + implicit none + integer, intent(in) :: n + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + integer :: I, J + type(Meandata), pointer :: entry_x, entry_y + real(kind=WP) :: temp_x, temp_y + logical :: do_rotation + + if (n==io_NSTREAMS) RETURN + entry_x=>io_stream(n) + entry_y=>io_stream(n+1) + IF (.NOT. (entry_x%freq_unit==entry_y%freq_unit) .and. ((entry_x%freq==entry_y%freq))) RETURN + IF (entry_x%accuracy /= entry_y%accuracy) RETURN + do_rotation=.FALSE. +! we need to improve the logistic here in order to use this routinely. a new argument in def_stream +! will be needed. + IF ((trim(entry_x%name)=='u' ) .AND. ((trim(entry_y%name)=='v' ))) do_rotation=.TRUE. + IF ((trim(entry_x%name)=='uice' ) .AND. ((trim(entry_y%name)=='vice' ))) do_rotation=.TRUE. + IF ((trim(entry_x%name)=='unod' ) .AND. ((trim(entry_y%name)=='vnod' ))) do_rotation=.TRUE. + IF ((trim(entry_x%name)=='tau_x' ) .AND. ((trim(entry_y%name)=='tau_y '))) do_rotation=.TRUE. + IF ((trim(entry_x%name)=='atmice_x') .AND. ((trim(entry_y%name)=='atmice_y'))) do_rotation=.TRUE. + IF ((trim(entry_x%name)=='atmoce_x') .AND. ((trim(entry_y%name)=='atmoce_y'))) do_rotation=.TRUE. + IF ((trim(entry_x%name)=='iceoce_x') .AND. ((trim(entry_y%name)=='iceoce_y'))) do_rotation=.TRUE. + + IF (.NOT. (do_rotation)) RETURN + + IF (partit%mype==0) THEN + write(*,*) trim(entry_x%name)//' and '//trim(entry_y%name)//' will be rotated before output!' + END IF + + IF ((entry_x%accuracy == i_real8) .AND. (entry_y%accuracy == i_real8)) THEN +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I, J) + DO J=1, size(entry_x%local_values_r8,dim=2) + DO I=1, size(entry_x%local_values_r8,dim=1) + call vector_r2g(entry_x%local_values_r8(I,J), entry_y%local_values_r8(I,J), mesh%coord_nod2D(1, n), mesh%coord_nod2D(2, n), 0) + END DO + END DO +!$OMP END PARALLEL DO + END IF + + IF ((entry_x%accuracy == i_real4) .AND. (entry_y%accuracy == i_real4)) THEN +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I, J, temp_x, temp_y) + DO J=1, size(entry_x%local_values_r4,dim=2) + DO I=1, size(entry_x%local_values_r4,dim=1) + temp_x=real(entry_x%local_values_r4(I,J), real64) + temp_y=real(entry_y%local_values_r4(I,J), real64) + call vector_r2g(temp_x, temp_y, mesh%coord_nod2D(1, n), mesh%coord_nod2D(2, n), 0) + entry_x%local_values_r4(I,J)=real(temp_x, real32) + entry_y%local_values_r4(I,J)=real(temp_y, real32) + END DO + END DO +!$OMP END PARALLEL DO + END IF + end subroutine +end module From 301cf7cd8cff8854b2eecb5e0c094b3c2f45c137 Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 15 Feb 2022 17:29:40 +0100 Subject: [PATCH 837/909] change how the binary ice derived type are read/write, now first read/write first all the constants than read/write the arrays --- src/MOD_ICE.F90 | 102 +++++++++++++++++++++++++----------------------- 1 file changed, 53 insertions(+), 49 deletions(-) diff --git a/src/MOD_ICE.F90 b/src/MOD_ICE.F90 index 2339afc40..d38f80c63 100644 --- a/src/MOD_ICE.F90 +++ b/src/MOD_ICE.F90 @@ -362,31 +362,6 @@ subroutine WRITE_T_ICE(ice, unit, iostat, iomsg) integer, intent(out) :: iostat character(*), intent(inout) :: iomsg integer :: i - !___________________________________________________________________________ - call write_bin_array(ice%uice , unit, iostat, iomsg) - call write_bin_array(ice%uice_rhs , unit, iostat, iomsg) - call write_bin_array(ice%uice_old , unit, iostat, iomsg) - if (ice%whichEVP /= 0) call write_bin_array(ice%uice_aux , unit, iostat, iomsg) - call write_bin_array(ice%vice , unit, iostat, iomsg) - call write_bin_array(ice%vice_rhs , unit, iostat, iomsg) - call write_bin_array(ice%vice_old , unit, iostat, iomsg) - if (ice%whichEVP /= 0) call write_bin_array(ice%vice_aux , unit, iostat, iomsg) - call write_bin_array(ice%stress_atmice_x, unit, iostat, iomsg) - call write_bin_array(ice%stress_iceoce_x, unit, iostat, iomsg) - call write_bin_array(ice%stress_atmice_y, unit, iostat, iomsg) - call write_bin_array(ice%stress_iceoce_y, unit, iostat, iomsg) - call write_bin_array(ice%srfoce_u , unit, iostat, iomsg) - call write_bin_array(ice%srfoce_v , unit, iostat, iomsg) - call write_bin_array(ice%srfoce_temp , unit, iostat, iomsg) - call write_bin_array(ice%srfoce_salt , unit, iostat, iomsg) - call write_bin_array(ice%srfoce_ssh , unit, iostat, iomsg) - call write_bin_array(ice%flx_fw , unit, iostat, iomsg) - call write_bin_array(ice%flx_h , unit, iostat, iomsg) - if (ice%whichEVP > 0) then - call write_bin_array(ice%alpha_evp_array , unit, iostat, iomsg) - call write_bin_array(ice%beta_evp_array , unit, iostat, iomsg) - end if - !___________________________________________________________________________ write(unit, iostat=iostat, iomsg=iomsg) ice%num_itracers do i=1, ice%num_itracers @@ -422,6 +397,32 @@ subroutine WRITE_T_ICE(ice, unit, iostat, iomsg) write(unit, iostat=iostat, iomsg=iomsg) ice%Tevp_inv write(unit, iostat=iostat, iomsg=iomsg) ice%ice_steps_since_upd write(unit, iostat=iostat, iomsg=iomsg) ice%ice_update + + !___________________________________________________________________________ + call write_bin_array(ice%uice , unit, iostat, iomsg) + call write_bin_array(ice%uice_rhs , unit, iostat, iomsg) + call write_bin_array(ice%uice_old , unit, iostat, iomsg) + if (ice%whichEVP /= 0) call write_bin_array(ice%uice_aux , unit, iostat, iomsg) + call write_bin_array(ice%vice , unit, iostat, iomsg) + call write_bin_array(ice%vice_rhs , unit, iostat, iomsg) + call write_bin_array(ice%vice_old , unit, iostat, iomsg) + if (ice%whichEVP /= 0) call write_bin_array(ice%vice_aux , unit, iostat, iomsg) + call write_bin_array(ice%stress_atmice_x, unit, iostat, iomsg) + call write_bin_array(ice%stress_iceoce_x, unit, iostat, iomsg) + call write_bin_array(ice%stress_atmice_y, unit, iostat, iomsg) + call write_bin_array(ice%stress_iceoce_y, unit, iostat, iomsg) + call write_bin_array(ice%srfoce_u , unit, iostat, iomsg) + call write_bin_array(ice%srfoce_v , unit, iostat, iomsg) + call write_bin_array(ice%srfoce_temp , unit, iostat, iomsg) + call write_bin_array(ice%srfoce_salt , unit, iostat, iomsg) + call write_bin_array(ice%srfoce_ssh , unit, iostat, iomsg) + call write_bin_array(ice%flx_fw , unit, iostat, iomsg) + call write_bin_array(ice%flx_h , unit, iostat, iomsg) + if (ice%whichEVP > 0) then + call write_bin_array(ice%alpha_evp_array , unit, iostat, iomsg) + call write_bin_array(ice%beta_evp_array , unit, iostat, iomsg) + end if + end subroutine WRITE_T_ICE ! Unformatted reading for T_ICE @@ -432,30 +433,7 @@ subroutine READ_T_ICE(ice, unit, iostat, iomsg) integer, intent(out) :: iostat character(*), intent(inout) :: iomsg integer :: i - !___________________________________________________________________________ - call read_bin_array(ice%uice , unit, iostat, iomsg) - call read_bin_array(ice%uice_rhs , unit, iostat, iomsg) - call read_bin_array(ice%uice_old , unit, iostat, iomsg) - if (ice%whichEVP /= 0) call read_bin_array(ice%uice_aux , unit, iostat, iomsg) - call read_bin_array(ice%vice , unit, iostat, iomsg) - call read_bin_array(ice%vice_rhs , unit, iostat, iomsg) - call read_bin_array(ice%vice_old , unit, iostat, iomsg) - if (ice%whichEVP /= 0) call read_bin_array(ice%vice_aux , unit, iostat, iomsg) - call read_bin_array(ice%stress_atmice_x , unit, iostat, iomsg) - call read_bin_array(ice%stress_iceoce_x , unit, iostat, iomsg) - call read_bin_array(ice%stress_atmice_y , unit, iostat, iomsg) - call read_bin_array(ice%stress_iceoce_y , unit, iostat, iomsg) - call read_bin_array(ice%srfoce_u , unit, iostat, iomsg) - call read_bin_array(ice%srfoce_v , unit, iostat, iomsg) - call read_bin_array(ice%srfoce_temp , unit, iostat, iomsg) - call read_bin_array(ice%srfoce_salt , unit, iostat, iomsg) - call read_bin_array(ice%srfoce_ssh , unit, iostat, iomsg) - call read_bin_array(ice%flx_fw , unit, iostat, iomsg) - call read_bin_array(ice%flx_h , unit, iostat, iomsg) - if (ice%whichEVP > 0) then - call read_bin_array(ice%alpha_evp_array , unit, iostat, iomsg) - call read_bin_array(ice%beta_evp_array , unit, iostat, iomsg) - end if + !___________________________________________________________________________ read(unit, iostat=iostat, iomsg=iomsg) ice%num_itracers if (.not. allocated(ice%data)) allocate(ice%data(ice%num_itracers)) @@ -492,6 +470,32 @@ subroutine READ_T_ICE(ice, unit, iostat, iomsg) read(unit, iostat=iostat, iomsg=iomsg) ice%Tevp_inv read(unit, iostat=iostat, iomsg=iomsg) ice%ice_steps_since_upd read(unit, iostat=iostat, iomsg=iomsg) ice%ice_update + + !___________________________________________________________________________ + call read_bin_array(ice%uice , unit, iostat, iomsg) + call read_bin_array(ice%uice_rhs , unit, iostat, iomsg) + call read_bin_array(ice%uice_old , unit, iostat, iomsg) + if (ice%whichEVP /= 0) call read_bin_array(ice%uice_aux , unit, iostat, iomsg) + call read_bin_array(ice%vice , unit, iostat, iomsg) + call read_bin_array(ice%vice_rhs , unit, iostat, iomsg) + call read_bin_array(ice%vice_old , unit, iostat, iomsg) + if (ice%whichEVP /= 0) call read_bin_array(ice%vice_aux , unit, iostat, iomsg) + call read_bin_array(ice%stress_atmice_x , unit, iostat, iomsg) + call read_bin_array(ice%stress_iceoce_x , unit, iostat, iomsg) + call read_bin_array(ice%stress_atmice_y , unit, iostat, iomsg) + call read_bin_array(ice%stress_iceoce_y , unit, iostat, iomsg) + call read_bin_array(ice%srfoce_u , unit, iostat, iomsg) + call read_bin_array(ice%srfoce_v , unit, iostat, iomsg) + call read_bin_array(ice%srfoce_temp , unit, iostat, iomsg) + call read_bin_array(ice%srfoce_salt , unit, iostat, iomsg) + call read_bin_array(ice%srfoce_ssh , unit, iostat, iomsg) + call read_bin_array(ice%flx_fw , unit, iostat, iomsg) + call read_bin_array(ice%flx_h , unit, iostat, iomsg) + if (ice%whichEVP > 0) then + call read_bin_array(ice%alpha_evp_array , unit, iostat, iomsg) + call read_bin_array(ice%beta_evp_array , unit, iostat, iomsg) + end if + end subroutine READ_T_ICE END MODULE MOD_ICE ! From dd3bafc27fbbf62bd3311b842bca80b3bdc0ec7d Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 15 Feb 2022 17:33:51 +0100 Subject: [PATCH 838/909] add directory for dwarfs, submit first files for ice dwarf --- dwarf/dwarf_ice/CMakeLists.txt | 17 +++ dwarf/dwarf_ice/README.md | 8 ++ dwarf/dwarf_ice/bin/.gitignore | 4 + dwarf/dwarf_ice/configure.sh | 9 ++ dwarf/dwarf_ice/dwarf_ini/CMakeLists.txt | 98 +++++++++++++++++ dwarf/dwarf_ice/dwarf_ini/fesom.F90 | 104 +++++++++++++++++++ dwarf/dwarf_ice/dwarf_linkfiles.sh | 52 ++++++++++ dwarf/dwarf_ice/env.sh | 72 +++++++++++++ dwarf/dwarf_ice/env/blogin.hlrn.de/shell | 15 +++ dwarf/dwarf_ice/env/bsc/shell | 7 ++ dwarf/dwarf_ice/env/ecaccess.ecmwf.int/shell | 11 ++ dwarf/dwarf_ice/env/hazelhen.hww.de/shell | 24 +++++ dwarf/dwarf_ice/env/hlogin.hlrn.de/shell | 15 +++ dwarf/dwarf_ice/env/jureca/shell | 9 ++ dwarf/dwarf_ice/env/juwels/shell | 22 ++++ dwarf/dwarf_ice/env/mistral.dkrz.de/shell | 67 ++++++++++++ dwarf/dwarf_ice/env/mistral.dkrz.de/shell~ | 40 +++++++ dwarf/dwarf_ice/env/ollie/shell | 11 ++ dwarf/dwarf_ice/env/ollie/shell_cray | 6 ++ dwarf/dwarf_ice/env/ubuntu/shell | 5 + dwarf/dwarf_ice/work/job_ollie | 36 +++++++ 21 files changed, 632 insertions(+) create mode 100644 dwarf/dwarf_ice/CMakeLists.txt create mode 100644 dwarf/dwarf_ice/README.md create mode 100644 dwarf/dwarf_ice/bin/.gitignore create mode 100755 dwarf/dwarf_ice/configure.sh create mode 100644 dwarf/dwarf_ice/dwarf_ini/CMakeLists.txt create mode 100644 dwarf/dwarf_ice/dwarf_ini/fesom.F90 create mode 100755 dwarf/dwarf_ice/dwarf_linkfiles.sh create mode 100755 dwarf/dwarf_ice/env.sh create mode 100755 dwarf/dwarf_ice/env/blogin.hlrn.de/shell create mode 100644 dwarf/dwarf_ice/env/bsc/shell create mode 100644 dwarf/dwarf_ice/env/ecaccess.ecmwf.int/shell create mode 100755 dwarf/dwarf_ice/env/hazelhen.hww.de/shell create mode 100755 dwarf/dwarf_ice/env/hlogin.hlrn.de/shell create mode 100644 dwarf/dwarf_ice/env/jureca/shell create mode 100644 dwarf/dwarf_ice/env/juwels/shell create mode 100755 dwarf/dwarf_ice/env/mistral.dkrz.de/shell create mode 100644 dwarf/dwarf_ice/env/mistral.dkrz.de/shell~ create mode 100644 dwarf/dwarf_ice/env/ollie/shell create mode 100644 dwarf/dwarf_ice/env/ollie/shell_cray create mode 100644 dwarf/dwarf_ice/env/ubuntu/shell create mode 100755 dwarf/dwarf_ice/work/job_ollie diff --git a/dwarf/dwarf_ice/CMakeLists.txt b/dwarf/dwarf_ice/CMakeLists.txt new file mode 100644 index 000000000..95b7e7b78 --- /dev/null +++ b/dwarf/dwarf_ice/CMakeLists.txt @@ -0,0 +1,17 @@ +cmake_minimum_required(VERSION 3.4) + +# set default build type cache entry (do so before project(...) is called, which would create this cache entry on its own) +if(NOT CMAKE_BUILD_TYPE) + message(STATUS "setting default build type: Release") + set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Choose the type of build, options are: None(CMAKE_CXX_FLAGS or CMAKE_C_FLAGS used) Debug Release RelWithDebInfo MinSizeRel.") +endif() + +project(FESOM2.0) +set(TOPLEVEL_DIR ${CMAKE_CURRENT_LIST_DIR}) +set(FESOM_COUPLED OFF CACHE BOOL "compile fesom standalone or with oasis support (i.e. coupled)") +set(OIFS_COUPLED OFF CACHE BOOL "compile fesom coupled to OpenIFS. (Also needs FESOM_COUPLED to work)") +set(CRAY OFF CACHE BOOL "compile with cray ftn") +set(USE_ICEPACK OFF CACHE BOOL "compile fesom with the Iceapck modules for sea ice column physics.") +#set(VERBOSE OFF CACHE BOOL "toggle debug output") +#add_subdirectory(oasis3-mct/lib/psmile) +add_subdirectory(src) diff --git a/dwarf/dwarf_ice/README.md b/dwarf/dwarf_ice/README.md new file mode 100644 index 000000000..4dcde298b --- /dev/null +++ b/dwarf/dwarf_ice/README.md @@ -0,0 +1,8 @@ +1st.: link necessary files for dwarf + ./dwarf_linkfiles.sh + +2nd.: compile dwarf + ./configure.sh + +3rd.: run dwarf model from work/ + \ No newline at end of file diff --git a/dwarf/dwarf_ice/bin/.gitignore b/dwarf/dwarf_ice/bin/.gitignore new file mode 100644 index 000000000..0fa27a178 --- /dev/null +++ b/dwarf/dwarf_ice/bin/.gitignore @@ -0,0 +1,4 @@ +gnore everything in this directory +* +# Except this file +!.gitignore diff --git a/dwarf/dwarf_ice/configure.sh b/dwarf/dwarf_ice/configure.sh new file mode 100755 index 000000000..b4b6e27b8 --- /dev/null +++ b/dwarf/dwarf_ice/configure.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +set -e + +source env.sh # source this from your run script too +mkdir build || true # make sure not to commit this to svn or git +cd build +cmake .. # not required when re-compiling +make install -j`nproc --all` diff --git a/dwarf/dwarf_ice/dwarf_ini/CMakeLists.txt b/dwarf/dwarf_ice/dwarf_ini/CMakeLists.txt new file mode 100644 index 000000000..c7cedc905 --- /dev/null +++ b/dwarf/dwarf_ice/dwarf_ini/CMakeLists.txt @@ -0,0 +1,98 @@ +cmake_minimum_required(VERSION 3.4) + +project(fesom Fortran) + +option(DISABLE_MULTITHREADING "disable asynchronous operations" OFF) + +# get our source files +set(src_home ${CMAKE_CURRENT_LIST_DIR}) # path to src directory starting from the dir containing our CMakeLists.txt +#if(${USE_ICEPACK}) +# file(GLOB sources_Fortran ${src_home}/*.F90 +# ${src_home}/icepack_drivers/*.F90 +# ${src_home}/icepack_drivers/Icepack/columnphysics/*.F90) +#else() +file(GLOB sources_Fortran ${src_home}/*.F90) +#endif() +#list(REMOVE_ITEM sources_Fortran ${src_home}/fesom_partition_init.F90) +#file(GLOB sources_C ${src_home}/*.c) + +# generate a custom file from fesom_version_info.F90 which includes the current git SHA +#set(FESOM_ORIGINAL_VERSION_FILE ${src_home}/fesom_version_info.F90) +#set(FESOM_GENERATED_VERSION_FILE ${CMAKE_CURRENT_BINARY_DIR}/fesom_version_info-generated.F90) +#list(REMOVE_ITEM sources_Fortran ${FESOM_ORIGINAL_VERSION_FILE}) # we want to compile the generated file instead +#list(APPEND sources_Fortran ${FESOM_GENERATED_VERSION_FILE}) +#add_custom_command(OUTPUT 5303B6F4_E4F4_45B2_A6E5_8E2B9FB5CDC4 ${FESOM_GENERATED_VERSION_FILE} # the first arg to OUTPUT is a name for a file we never create to make sure this command will run on every re-build (let our file be the second arg, as the first file is inadvertently removed by make) +# COMMAND ${CMAKE_COMMAND} -DFESOM_ORIGINAL_VERSION_FILE=${FESOM_ORIGINAL_VERSION_FILE} -DFESOM_GENERATED_VERSION_FILE=${FESOM_GENERATED_VERSION_FILE} -P GitRepositoryInfo.cmake +# WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} +# COMMENT "determining ${PROJECT_NAME} git SHA ...") + +#if(${FESOM_STANDALONE}) +# list(REMOVE_ITEM sources_Fortran ${src_home}/cpl_driver.F90) +#endif() +#list(REMOVE_ITEM sources_Fortran ${src_home}/fvom_init.F90) +#list(REMOVE_ITEM sources_C ${src_home}/fort_part.c) + +# depends on the metis library +#add_subdirectory(../lib/metis-5.1.0 ${PROJECT_BINARY_DIR}/metis) +#include_directories(../lib/metis-5.1.0/include) +# depends on the parms library +#add_subdirectory(../lib/parms ${PROJECT_BINARY_DIR}/parms) + +#add_subdirectory(async_threads_cpp) + +#include(${CMAKE_CURRENT_LIST_DIR}/../cmake/FindNETCDF.cmake) + +#add_library(${PROJECT_NAME}_C ${sources_C}) +#target_compile_definitions(${PROJECT_NAME}_C PRIVATE PARMS USE_MPI REAL=double DBL HAS_BLAS FORTRAN_UNDERSCORE VOID_POINTER_SIZE_8 SGI LINUX UNDER_ MPI2) +#target_link_libraries(${PROJECT_NAME}_C parms) #metis + +# create our binary (set its name to name of this project) +add_executable(${PROJECT_NAME} ${sources_Fortran}) +#target_compile_definitions(${PROJECT_NAME} PRIVATE PARMS -DMETIS_VERSION=5 -DPART_WEIGHTED -DMETISRANDOMSEED=35243) +#if(${DISABLE_MULTITHREADING}) +# target_compile_definitions(${PROJECT_NAME} PRIVATE DISABLE_MULTITHREADING) +#endif() +#if(${FESOM_COUPLED}) +# include(${CMAKE_CURRENT_LIST_DIR}/../cmake/FindOASIS.cmake) +# target_compile_definitions(${PROJECT_NAME} PRIVATE __oasis) +#endif() +#if(${OIFS_COUPLED}) +# target_compile_definitions(${PROJECT_NAME} PRIVATE __oifs) +#endif() +#if(${USE_ICEPACK}) +# target_compile_definitions(${PROJECT_NAME} PRIVATE __icepack) +#endif() +if(${VERBOSE}) + target_compile_definitions(${PROJECT_NAME} PRIVATE VERBOSE) +endif() + + +# CMAKE_Fortran_COMPILER_ID will also work if a wrapper is being used (e.g. mpif90 wraps ifort -> compiler id is Intel) +if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Intel ) +# target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -init=zero -no-wrap-margin) + target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -g -traceback -check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) +elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU ) + target_compile_options(${PROJECT_NAME} PRIVATE -O3 -finit-local-zero -finline-functions -march=native -fimplicit-none -fdefault-real-8 -ffree-line-length-none) + if(CMAKE_Fortran_COMPILER_VERSION VERSION_GREATER_EQUAL 10 ) + target_compile_options(${PROJECT_NAME} PRIVATE -fallow-argument-mismatch) # gfortran v10 is strict about erroneous API calls: "Rank mismatch between actual argument at (1) and actual argument at (2) (scalar and rank-1)" + endif() +elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray ) + target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64 -hnoomp) +endif() + + +#elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray ) +# target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64 -hnoomp) +#endif() +#target_include_directories(${PROJECT_NAME} PRIVATE ${NETCDF_Fortran_INCLUDE_DIRECTORIES} ${OASIS_Fortran_INCLUDE_DIRECTORIES}) +#target_include_directories(${PROJECT_NAME} PRIVATE ${MCT_Fortran_INCLUDE_DIRECTORIES} ${MPEU_Fortran_INCLUDE_DIRECTORIES}) +#target_include_directories(${PROJECT_NAME} PRIVATE ${SCRIP_Fortran_INCLUDE_DIRECTORIES}) +#target_link_libraries(${PROJECT_NAME} ${PROJECT_NAME}_C ${NETCDF_Fortran_LIBRARIES} ${NETCDF_C_LIBRARIES} ${OASIS_Fortran_LIBRARIES}) +#target_link_libraries(${PROJECT_NAME} ${PROJECT_NAME}_C ${MCT_Fortran_LIBRARIES} ${MPEU_Fortran_LIBRARIES} ${SCRIP_Fortran_LIBRARIES}) +#target_link_libraries(${PROJECT_NAME} async_threads_cpp) +#set_target_properties(${PROJECT_NAME} PROPERTIES LINKER_LANGUAGE Fortran) + +set(FESOM_INSTALL_FILEPATH "${CMAKE_CURRENT_LIST_DIR}/../bin/fesom.x" CACHE FILEPATH "file path where the FESOM binary should be put") +get_filename_component(FESOM_INSTALL_PATH ${FESOM_INSTALL_FILEPATH} DIRECTORY) +get_filename_component(FESOM_INSTALL_NAME ${FESOM_INSTALL_FILEPATH} NAME) +install(PROGRAMS ${PROJECT_BINARY_DIR}/${PROJECT_NAME} DESTINATION ${FESOM_INSTALL_PATH} RENAME ${FESOM_INSTALL_NAME}) diff --git a/dwarf/dwarf_ice/dwarf_ini/fesom.F90 b/dwarf/dwarf_ice/dwarf_ini/fesom.F90 new file mode 100644 index 000000000..9dcc0d083 --- /dev/null +++ b/dwarf/dwarf_ice/dwarf_ini/fesom.F90 @@ -0,0 +1,104 @@ +!=============================================================================! +! +! Finite Volume Sea-ice Ocean Model +! +!=============================================================================! +! The main driving routine +!=============================================================================! + +program main +USE MOD_MESH +USE MOD_PARTIT +USE MOD_ICE +USE MOD_PARSUP +USE g_comm_auto +USE par_support_interfaces +USE restart_derivedtype_module +use fortran_utils +IMPLICIT NONE + +character(LEN=500) :: resultpath, npepath +character(LEN=256) :: npes_string +logical :: dir_exist +type(t_mesh) , target, save :: mesh +type(t_partit), target, save :: partit +type(t_ice) , target, save :: ice +integer :: i, n, nzmax, nzmin +real(kind=WP) , allocatable :: UV(:,:,:), wvel(:,:), wvel_i(:,:), wvel_e(:,:) +integer :: node_size, elem_size + +!_______________________________________________________________________________ +resultpath="/work/ollie/pscholz/results_fesom2.0/test_binaryrestart" + +!_______________________________________________________________________________ +call MPI_INIT(i) +call par_init(partit) + +!_______________________________________________________________________________ +! check if resultpath exist +INQUIRE(directory=trim(resultpath), EXIST=dir_exist) +if (.not. dir_exist) then + if (partit%mype==0) print *, achar(27)//'[1;31m'//' -ERROR-> could not find:'//trim(resultpath)//achar(27)//'[0m' + call par_ex(partit%MPI_COMM_FESOM, partit%mype) +end if + +npepath =trim(resultpath)//"/fesom_bin_restart/np"//int_to_txt(partit%npes) +INQUIRE(directory=trim(npepath), EXIST=dir_exist) +if (.not. dir_exist) then + if (partit%mype==0) print *, achar(27)//'[1;31m'//' -ERROR-> could not find:'//trim(npepath)//achar(27)//'[0m' + call par_ex(partit%MPI_COMM_FESOM, partit%mype) +end if + +!_______________________________________________________________________________ +! read derived type binary restart files +call read_all_bin_restarts(npepath, ice=ice, partit=partit, mesh=mesh) + +!_______________________________________________________________________________ +! even though the partitioning has been read some things regarding MPI shall be computed during the runtime +! these include: MPI_TYPE_COMMIT etc. +! used to be call set_par_support(partit, mesh) +call init_mpi_types(partit, mesh) +call init_gatherLists(partit) + +!_______________________________________________________________________________ +node_size=partit%myDim_nod2D +partit%eDim_nod2D +elem_size=partit%myDim_elem2D+partit%eDim_elem2D + +!_______________________________________________________________________________ +do i=1, 10 + if (partit%mype==0) write(*,*) i + !___________________________________________________________________________ + ! Dynamics + select case (ice%whichEVP) + case (0) + if (partit%mype==0) print *, achar(27)//'[36m'//' --> call EVPdynamics...'//achar(27)//'[0m' + call EVPdynamics (ice, partit, mesh) + case (1) + if (partit%mype==0) print *, achar(27)//'[36m'//' --> call EVPdynamics_m...'//achar(27)//'[0m' + call EVPdynamics_m(ice, partit, mesh) + case (2) + if (partit%mype==0) print *, achar(27)//'[36m'//' --> call EVPdynamics_a...'//achar(27)//'[0m' + call EVPdynamics_a(ice, partit, mesh) + case default + if (partit%mype==0) write(*,*) 'a non existing EVP scheme specified!' + call par_ex(partit%MPI_COMM_FESOM, partit%mype) + stop + end select + + !___________________________________________________________________________ + ! Advection + if (partit%mype==0) print *, achar(27)//'[36m'//' --> call ice_TG_rhs_div...'//achar(27)//'[0m' + call ice_TG_rhs_div (ice, partit, mesh) + + if (partit%mype==0) print *, achar(27)//'[36m'//' --> call ice_fct_solve...'//achar(27)//'[0m' + call ice_fct_solve (ice, partit, mesh) + + if (partit%mype==0) print *, achar(27)//'[36m'//' --> call ice_update_for_div...'//achar(27)//'[0m' + call ice_update_for_div(ice, partit, mesh) + +end do + +call par_ex(partit%MPI_COMM_FESOM, partit%mype) + +end program main + diff --git a/dwarf/dwarf_ice/dwarf_linkfiles.sh b/dwarf/dwarf_ice/dwarf_linkfiles.sh new file mode 100755 index 000000000..56673caeb --- /dev/null +++ b/dwarf/dwarf_ice/dwarf_linkfiles.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +#_______________________________________________________________________________ +if [ -d "src/" ] ; then rm -r src/ ; fi +# fi +mkdir src/ +cd src/ + +#_______________________________________________________________________________ +ln -s ../dwarf_ini/fesom.F90 fesom.F90 +ln -s ../dwarf_ini/CMakeLists.txt CMakeLists.txt + +#_______________________________________________________________________________ +export which_path="../../../src/" + +# export which_branch=refactoring +# export which_branch=refactoring_dwarf_ice +# export which_path=https://raw.githubusercontent.com/FESOM/fesom2/${which_branch}/src + +#_______________________________________________________________________________ +export which_files="associate_mesh_def.h + associate_mesh_ass.h + associate_part_def.h + associate_part_ass.h + MOD_MESH.F90 + MOD_PARTIT.F90 + MOD_TRACER.F90 + MOD_DYN.F90 + MOD_ICE.F90 + MOD_READ_BINARY_ARRAYS.F90 + MOD_WRITE_BINARY_ARRAYS.F90 + ice_modules.F90 + ice_EVP.F90 + ice_maEVP.F90 + ice_fct.F90 + gen_halo_exchange.F90 + gen_modules_partitioning.F90 + gen_modules_config.F90 + io_restart_derivedtype.F90 + fortran_utils.F90 + oce_modules.F90 + " +#_______________________________________________________________________________ +for file in ${which_files}; do + ln -s ${which_path}/${file} ${file} + # wget ${which_path}/${file} + # cp ${which_path}/${file} . +done + +#_______________________________________________________________________________ +cd ../ + diff --git a/dwarf/dwarf_ice/env.sh b/dwarf/dwarf_ice/env.sh new file mode 100755 index 000000000..f568651e3 --- /dev/null +++ b/dwarf/dwarf_ice/env.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash + +# - - - +# # synopsis +# determine which environment directory is to be used for the current host +# # usage +# *source* to silently source the environment for this host system +# *execute* to print the environment directory for this host system +# - - - + + +# see if we are being sourced or executed +# as we use bash to execute (see shebang), BASH_SOURCE is set when executing +if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then + BEING_EXECUTED=true +else + BEING_EXECUTED=false +fi + +# if an arg is given, use it as hostname +if [ -z "$1" ]; then + # no argument given + LOGINHOST="$(hostname -f)" +else + LOGINHOST=$1 +fi + +if [[ $LOGINHOST =~ ^m[A-Za-z0-9]+\.hpc\.dkrz\.de$ ]]; then + STRATEGY="mistral.dkrz.de" +elif [[ $LOGINHOST =~ ^ollie[0-9]$ ]] || [[ $LOGINHOST =~ ^prod-[0-9]{4}$ ]]; then + STRATEGY="ollie" +elif [[ $LOGINHOST =~ ^h[A-Za-z0-9]+\.hsn\.hlrn\.de$ ]]; then + STRATEGY="hlogin.hlrn.de" +elif [[ $LOGINHOST =~ ^b[A-Za-z0-9]+\.hsn\.hlrn\.de$ ]]; then + STRATEGY="blogin.hlrn.de" +elif [[ $LOGINHOST =~ \.hww\.de$ ]] || [[ $LOGINHOST =~ ^nid[0-9]{5}$ ]]; then + STRATEGY="hazelhen.hww.de" +elif [[ $LOGINHOST =~ \.jureca$ ]]; then + STRATEGY="jureca" +elif [[ $LOGINHOST = ubuntu ]]; then + STRATEGY="ubuntu" +elif [[ $LOGINHOST = bsc ]]; then + STRATEGY="bsc" +elif [[ $LOGINHOST =~ ^juwels[0-9][0-9].ib.juwels.fzj.de$ ]]; then + STRATEGY="juwels" +elif [[ $LOGINHOST =~ ^jwlogin[0-9][0-9].juwels$ ]]; then + STRATEGY="juwels" +elif [[ $LOGINHOST =~ ^cc[a-b]+-login[0-9]+\.ecmwf\.int$ ]]; then + STRATEGY="ecaccess.ecmwf.int" +else + echo "can not determine environment for host: "$LOGINHOST + [ $BEING_EXECUTED = true ] && exit 1 + return # if we are being sourced, return from this script here +fi + +if [ -n "$BASH_VERSION" ]; then + # assume bash + SOURCE="${BASH_SOURCE[0]}" +elif [ -n "$ZSH_VERSION" ]; then + # assume zsh + SOURCE=${(%):-%N} +fi + +DIR="$( cd "$( dirname "${SOURCE}" )" && pwd )" + +if [ $BEING_EXECUTED = true ]; then + # file is being executed + echo $DIR/env/$STRATEGY +else + # file is being sourced + source $DIR/env/$STRATEGY/shell +fi diff --git a/dwarf/dwarf_ice/env/blogin.hlrn.de/shell b/dwarf/dwarf_ice/env/blogin.hlrn.de/shell new file mode 100755 index 000000000..7ca9794ea --- /dev/null +++ b/dwarf/dwarf_ice/env/blogin.hlrn.de/shell @@ -0,0 +1,15 @@ +# make the contents as shell agnostic as possible so we can include them with bash, zsh and others + +module load gcc/4.9.3 +module load intel/17.0.4.196 + +module swap PrgEnv-cray PrgEnv-intel +module swap cray-mpich cray-mpich +module unload cray-hdf5 +module load cray-netcdf +module load cray-hdf5 + +export CRAYPE_LINK_TYPE=dynamic +export PATH=/gfs1/work/hbxeeeee/sw/cmake/bin:$PATH + +export FC=ftn CC=cc CXX=g++ diff --git a/dwarf/dwarf_ice/env/bsc/shell b/dwarf/dwarf_ice/env/bsc/shell new file mode 100644 index 000000000..a5ccfb77a --- /dev/null +++ b/dwarf/dwarf_ice/env/bsc/shell @@ -0,0 +1,7 @@ + +module load intel/2017.4 impi/2017.4 mkl/2017.4 bsc/1.0 netcdf/4.2 + +export FC=mpiifort CC=mpiicc CXX=mpiicpc + + + diff --git a/dwarf/dwarf_ice/env/ecaccess.ecmwf.int/shell b/dwarf/dwarf_ice/env/ecaccess.ecmwf.int/shell new file mode 100644 index 000000000..743116a12 --- /dev/null +++ b/dwarf/dwarf_ice/env/ecaccess.ecmwf.int/shell @@ -0,0 +1,11 @@ +export PATH=/home/rd/natr/cmake-3.11.2-Linux-x86_64/bin:$PATH + +module unload cray-hdf5 +module load cray-netcdf +module load cray-hdf5 + +#export CRAYPE_LINK_TYPE=dynamic + +# enable full MPI thread support level (MPI_THREAD_MULTIPLE) +export MPICH_MAX_THREAD_SAFETY=multiple # to also switch to an alternative (probably with faster locking) multi threading implementation of the cray-mpich library, use the compiler flag -craympich-mt +export FC=ftn CC=cc CXX=CC diff --git a/dwarf/dwarf_ice/env/hazelhen.hww.de/shell b/dwarf/dwarf_ice/env/hazelhen.hww.de/shell new file mode 100755 index 000000000..27b142a0a --- /dev/null +++ b/dwarf/dwarf_ice/env/hazelhen.hww.de/shell @@ -0,0 +1,24 @@ +# make the contents as shell agnostic as possible so we can include them with bash, zsh and others + +module load gcc/4.9.3 +module load intel/17.0.2.174 + +module swap PrgEnv-cray PrgEnv-intel +module swap cray-mpich cray-mpich +module unload cray-hdf5 +module load cray-netcdf +module load cray-hdf5/1.10.0 + +module load tools/cmake/3.4.2 + +# enable us to use cdo +# module load tools/netcdf_utils contains cdo but breaks ftn which can not compile fortran code using mpi anymore: +# "error #7012: The module file cannot be read. Its format requires a more recent F90 compiler. [MPI] +# USE mpi" +# so we append the dir which contains cdo manually to PATH: +export PATH=$PATH:/opt/hlrs/tools/netcdf_utils/4.4.0-intel/install/bin + +export CRAYPE_LINK_TYPE=dynamic +export PATH=/zhome/academic/HLRS/xaw/xawjhege/sw/ruby/bin:$PATH + +export FC=ftn CC=cc CXX=g++ diff --git a/dwarf/dwarf_ice/env/hlogin.hlrn.de/shell b/dwarf/dwarf_ice/env/hlogin.hlrn.de/shell new file mode 100755 index 000000000..9dc02bbe7 --- /dev/null +++ b/dwarf/dwarf_ice/env/hlogin.hlrn.de/shell @@ -0,0 +1,15 @@ +# make the contents as shell agnostic as possible so we can include them with bash, zsh and others + +module load gcc/4.9.3 +module load intel/17.0.1.132 + +module swap PrgEnv-cray PrgEnv-intel +module swap cray-mpich cray-mpich +module unload cray-hdf5 +module load cray-netcdf +module load cray-hdf5 + +export CRAYPE_LINK_TYPE=dynamic +export PATH=/gfs1/work/hbxeeeee/sw/cmake/bin:$PATH + +export FC=ftn CC=cc CXX=g++ diff --git a/dwarf/dwarf_ice/env/jureca/shell b/dwarf/dwarf_ice/env/jureca/shell new file mode 100644 index 000000000..35d77df62 --- /dev/null +++ b/dwarf/dwarf_ice/env/jureca/shell @@ -0,0 +1,9 @@ +# make the contents as shell agnostic as possible so we can include them with bash, zsh and others + +module load CMake Intel IntelMPI imkl netCDF netCDF-Fortran #intel.compiler intel.mpi netcdf/4.4.0_intel +# the netcdf c++ path given by the netcdf module is broken, we have to workaround this +export FC=mpiifort CC=mpiicc CXX=mpiicpc +export NETCDF_Fortran_INCLUDE_DIRECTORIES=/usr/local/software/jureca/Stages/2018a/software/netCDF-Fortran/4.4.4-iimpi-2018a/include +export NETCDF_C_INCLUDE_DIRECTORIES=/usr/local/software/jureca/Stages/2018a/software/netCDF/4.6.1-iimpi-2018a/include +export NETCDF_CXX_INCLUDE_DIRECTORIES=/usr/local/software/jureca/Stages/2018a/software/netCDF/4.6.1-iimpi-2018a/include + diff --git a/dwarf/dwarf_ice/env/juwels/shell b/dwarf/dwarf_ice/env/juwels/shell new file mode 100644 index 000000000..0b5451c82 --- /dev/null +++ b/dwarf/dwarf_ice/env/juwels/shell @@ -0,0 +1,22 @@ +########## +module --force purge +module use /gpfs/software/juwels/otherstages +module load Stages/2019a +module load StdEnv +# For intel MPI +#module load Intel/2019.3.199-GCC-8.3.0 IntelMPI/2018.5.288 imkl/2019.3.199 +#export FC=mpiifort CC=mpiicc CXX=mpiicpc + +# For ParaStation MPI +module load Intel/2019.3.199-GCC-8.3.0 ParaStationMPI/5.4 imkl/2019.5.281 +export FC=mpifort CC=mpicc CXX=mpicxx + +module load netCDF/4.6.3 +module load netCDF-Fortran/4.4.5 +module load CMake +export NETCDF_DIR=$EBROOTNETCDF +export NETCDFF_DIR=$EBROOTNETCDFMINFORTRAN +export NETCDF_Fortran_INCLUDE_DIRECTORIES=${NETCDFF_DIR}/include/ +export NETCDF_C_INCLUDE_DIRECTORIES=${NETCDF_DIR}/include/ +export NETCDF_CXX_INCLUDE_DIRECTORIES=${NETCDFCXX_DIR}/include/ + diff --git a/dwarf/dwarf_ice/env/mistral.dkrz.de/shell b/dwarf/dwarf_ice/env/mistral.dkrz.de/shell new file mode 100755 index 000000000..48e2a4279 --- /dev/null +++ b/dwarf/dwarf_ice/env/mistral.dkrz.de/shell @@ -0,0 +1,67 @@ +# make the contents as shell agnostic as possible so we can include them with bash, zsh and others + +module load gcc/4.8.2 +export LD_LIBRARY_PATH=/sw/rhel6-x64/gcc/gcc-4.8.2/lib64:$LD_LIBRARY_PATH # avoid GLIBCXX_3.4.15 not found error +module unload intel && module load intel/18.0.1 + +#export FC=mpiifort CC=mpiicc CXX=mpiicpc; module unload intelmpi && module load intelmpi/2018.1.163 +export FC=mpiifort CC=mpiicc CXX=mpiicpc; module unload intelmpi && module load intelmpi/2017.0.098 +#export FC=mpif90 CC=mpicc CXX=mpicxx; module load mxm/3.3.3002 fca/2.5.2393 bullxmpi_mlx/bullxmpi_mlx-1.2.8.3 +#export FC=mpif90 CC=mpicc CXX=mpicxx; module load mxm/3.4.3082 fca/2.5.2393 bullxmpi_mlx/bullxmpi_mlx-1.2.9.2 +#export FC=mpif90 CC=mpicc CXX=mpicxx OPENMPI=TRUE; module unload intelmpi && module load openmpi/2.0.2p1_hpcx-intel14 + +# intelmpi settings from DKRZ +export I_MPI_FABRICS=shm:dapl +export I_MPI_FALLBACK=disable +export I_MPI_SLURM_EXT=1 +export I_MPI_LARGE_SCALE_THRESHOLD=8192 # Set to a value larger than the number of your MPI-tasks if you use 8192 or more tasks. +export I_MPI_DYNAMIC_CONNECTION=0 +export DAPL_NETWORK_NODES=$SLURM_NNODES +export DAPL_NETWORK_PPN=$SLURM_NTASKS_PER_NODE +export DAPL_WR_MAX=500 + +# bullxmpi settings from DKRZ +# Settings for Open MPI and MXM (MellanoX Messaging) library +export OMPI_MCA_pml=cm +export OMPI_MCA_mtl=mxm +export OMPI_MCA_mtl_mxm_np=0 +export MXM_RDMA_PORTS=mlx5_0:1 +export MXM_LOG_LEVEL=FATAL +# Disable GHC algorithm for collective communication +export OMPI_MCA_coll=^ghc + +# openmpi settings from DKRZ (note, that some of above variables will be redefined) +if test "${OPENMPI}" == "TRUE"; then +export OMPI_MCA_pml=cm # sets the point-to-point management layer +export OMPI_MCA_mtl=mxm # sets the matching transport layer (MPI-2 one-sided comm.) +export MXM_RDMA_PORTS=mlx5_0:1 +export MXM_LOG_LEVEL=ERROR +export MXM_HANDLE_ERRORS=bt +export UCX_HANDLE_ERRORS=bt + +# enable HCOLL based collectives +export OMPI_MCA_coll=^fca # disable FCA for collective MPI routines +export OMPI_MCA_coll_hcoll_enable=1 # enable HCOLL for collective MPI routines +export OMPI_MCA_coll_hcoll_priority=95 +export OMPI_MCA_coll_hcoll_np=8 # use HCOLL for all communications with more than 8 tasks +export HCOLL_MAIN_IB=mlx5_0:1 +export HCOLL_ENABLE_MCAST=1 +export HCOLL_ENABLE_MCAST_ALL=1 + +# disable specific HCOLL functions (strongly depends on the application) +export HCOLL_ML_DISABLE_BARRIER=1 +export HCOLL_ML_DISABLE_IBARRIER=1 +export HCOLL_ML_DISABLE_BCAST=1 +export HCOLL_ML_DISABLE_REDUCE=1 +fi + +module unload netcdf && module load netcdf_c/4.3.2-gcc48 +module unload cmake && module load cmake +# we will get a segfault at runtime if we use a gcc from any of the provided gcc modules +export PATH=/sw/rhel6-x64/gcc/binutils-2.24-gccsys/bin:${PATH} + +export NETCDF_Fortran_INCLUDE_DIRECTORIES=/sw/rhel6-x64/netcdf/netcdf_fortran-4.4.2-intel14/include +export NETCDF_C_INCLUDE_DIRECTORIES=/sw/rhel6-x64/netcdf/netcdf_c-4.3.2-intel14/include +export NETCDF_CXX_INCLUDE_DIRECTORIES=/sw/rhel6-x64/netcdf/netcdf_cxx-4.2.1-gcc48/include + +export HDF5_C_INCLUDE_DIRECTORIES=/sw/rhel6-x64/hdf5/hdf5-1.8.14-threadsafe-intel14/include diff --git a/dwarf/dwarf_ice/env/mistral.dkrz.de/shell~ b/dwarf/dwarf_ice/env/mistral.dkrz.de/shell~ new file mode 100644 index 000000000..9b1c8acaa --- /dev/null +++ b/dwarf/dwarf_ice/env/mistral.dkrz.de/shell~ @@ -0,0 +1,40 @@ +# make the contents as shell agnostic as possible so we can include them with bash, zsh and others + +module load gcc/4.8.2 +export LD_LIBRARY_PATH=/sw/rhel6-x64/gcc/gcc-4.8.2/lib64:$LD_LIBRARY_PATH # avoid GLIBCXX_3.4.15 not found error +module unload intel && module load intel/18.0.0 + +export FC=mpiifort CC=mpiicc CXX=mpiicpc; module unload intelmpi && module load intelmpi/2017.0.098 +#export FC=mpif90 CC=mpicc CXX=mpicxx; module load mxm/3.3.3002 fca/2.5.2393 bullxmpi_mlx/bullxmpi_mlx-1.2.8.3 +#export FC=mpif90 CC=mpicc CXX=mpicxx; module load mxm/3.4.3082 fca/2.5.2393 bullxmpi_mlx/bullxmpi_mlx-1.2.9.2 + +# intelmpi settings from DKRZ +export I_MPI_FABRICS=shm:dapl +export I_MPI_FALLBACK=disable +export I_MPI_SLURM_EXT=1 +export I_MPI_LARGE_SCALE_THRESHOLD=8192 # Set to a value larger than the number of your MPI-tasks if you use 8192 or more tasks. +export I_MPI_DYNAMIC_CONNECTION=0 +export DAPL_NETWORK_NODES=$SLURM_NNODES +export DAPL_NETWORK_PPN=$SLURM_NTASKS_PER_NODE +export DAPL_WR_MAX=500 + +# bullxmpi settings from DKRZ +# Settings for Open MPI and MXM (MellanoX Messaging) library +export OMPI_MCA_pml=cm +export OMPI_MCA_mtl=mxm +export OMPI_MCA_mtl_mxm_np=0 +export MXM_RDMA_PORTS=mlx5_0:1 +export MXM_LOG_LEVEL=FATAL +# Disable GHC algorithm for collective communication +export OMPI_MCA_coll=^ghc + +module unload netcdf && module load netcdf_c/4.3.2-gcc48 +module unload cmake && module load cmake +# we will get a segfault at runtime if we use a gcc from any of the provided gcc modules +export PATH=/sw/rhel6-x64/gcc/binutils-2.24-gccsys/bin:${PATH} + +export NETCDF_Fortran_INCLUDE_DIRECTORIES=/sw/rhel6-x64/netcdf/netcdf_fortran-4.4.2-intel14/include +export NETCDF_C_INCLUDE_DIRECTORIES=/sw/rhel6-x64/netcdf/netcdf_c-4.3.2-intel14/include +export NETCDF_CXX_INCLUDE_DIRECTORIES=/sw/rhel6-x64/netcdf/netcdf_cxx-4.2.1-gcc48/include + +export HDF5_C_INCLUDE_DIRECTORIES=/sw/rhel6-x64/hdf5/hdf5-1.8.14-threadsafe-intel14/include diff --git a/dwarf/dwarf_ice/env/ollie/shell b/dwarf/dwarf_ice/env/ollie/shell new file mode 100644 index 000000000..3b9efbf52 --- /dev/null +++ b/dwarf/dwarf_ice/env/ollie/shell @@ -0,0 +1,11 @@ +# make the contents as shell agnostic as possible so we can include them with bash, zsh and others + +module load intel.compiler intel.mpi netcdf/4.4.0_intel +module load centoslibs # required on compute nodes + +# the netcdf c++ path given by the netcdf module is broken, we have to workaround this +export NETCDF_CXX_INCLUDE_DIRECTORIES=/global/AWIsoft/tkleiner/netcdf/4.4.0_intel_impi/include + +export PATH=/home/ollie/dsidoren/cmake-3.13.2-Linux-x86_64/bin:$PATH + +export FC="mpiifort -mkl" CC=mpiicc CXX=mpiicpc diff --git a/dwarf/dwarf_ice/env/ollie/shell_cray b/dwarf/dwarf_ice/env/ollie/shell_cray new file mode 100644 index 000000000..4c7d399f8 --- /dev/null +++ b/dwarf/dwarf_ice/env/ollie/shell_cray @@ -0,0 +1,6 @@ +# make the contents as shell agnostic as possible so we can include them with bash, zsh and others + +module load cmake +module load PrgEnv-cray + +export FC=ftn CC=cc CXX=CC diff --git a/dwarf/dwarf_ice/env/ubuntu/shell b/dwarf/dwarf_ice/env/ubuntu/shell new file mode 100644 index 000000000..29fc59dbc --- /dev/null +++ b/dwarf/dwarf_ice/env/ubuntu/shell @@ -0,0 +1,5 @@ +export FC=mpifort CC=mpicc CXX=mpicxx +export BLAS_LIBRARIES=/usr/lib/x86_64-linux-gnu/blas/ +export UBUNTU_BLAS_LIBRARY="libblas.a" + + diff --git a/dwarf/dwarf_ice/work/job_ollie b/dwarf/dwarf_ice/work/job_ollie new file mode 100755 index 000000000..0fd30349d --- /dev/null +++ b/dwarf/dwarf_ice/work/job_ollie @@ -0,0 +1,36 @@ +#!/bin/bash +#SBATCH --job-name=fesom2.0 +#SBATCH -p mpp +#SBATCH --ntasks=288 +#SBATCH --time=00:05:00 +#SBATCH -o fesom2.0_%x_%j.out +#SBATCH -e fesom2.0_%x_%j.out +module load intel.compiler intel.mpi netcdf/4.4.0_intel +module load centoslibs + +set -x + +ulimit -s unlimited + +# determine JOBID +###JOBID=`echo $SLURM_JOB_ID |cut -d"." -f1` +jobid=$(echo $SLURM_JOB_ID | cut -d"." -f1) + +ln -s ../bin/fesom.x . # cp -n ../bin/fesom.x +cp -n ../config/namelist.config . +cp -n ../config/namelist.forcing . +cp -n ../config/namelist.oce . +cp -n ../config/namelist.ice . +cp -n ../config/namelist.io . +cp -n ../config/namelist.icepack . + +date +###srun --mpi=pmi2 ./fesom.x > "fesom2.0.out" +srun --mpi=pmi2 ./fesom.x > "fesom2.0_${SLURM_JOB_NAME}_${jobid}.out" +date + +#qstat -f $PBS_JOBID +#export EXITSTATUS=$? +#if [ ${EXITSTATUS} -eq 0 ] || [ ${EXITSTATUS} -eq 127 ] ; then +#sbatch job_ollie +#fi From b3d943c9e102db02844b6ff99614f0e1b2625439 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Tue, 15 Feb 2022 19:04:05 +0100 Subject: [PATCH 839/909] after hard day work with Nikolay we added an advection dwarf in the same manner as Patrick did it for sea ice :) --- dwarf/dwarf_tracer/CMakeLists.txt | 17 ++++ dwarf/dwarf_tracer/dwarf_ini/CMakeLists.txt | 98 +++++++++++++++++++++ dwarf/dwarf_tracer/dwarf_ini/fesom.F90 | 89 +++++++++++++++++++ dwarf/dwarf_tracer/dwarf_linkfiles.sh | 53 +++++++++++ dwarf/dwarf_tracer/work/job_ollie | 27 ++++++ 5 files changed, 284 insertions(+) create mode 100644 dwarf/dwarf_tracer/CMakeLists.txt create mode 100644 dwarf/dwarf_tracer/dwarf_ini/CMakeLists.txt create mode 100755 dwarf/dwarf_tracer/dwarf_ini/fesom.F90 create mode 100755 dwarf/dwarf_tracer/dwarf_linkfiles.sh create mode 100755 dwarf/dwarf_tracer/work/job_ollie diff --git a/dwarf/dwarf_tracer/CMakeLists.txt b/dwarf/dwarf_tracer/CMakeLists.txt new file mode 100644 index 000000000..95b7e7b78 --- /dev/null +++ b/dwarf/dwarf_tracer/CMakeLists.txt @@ -0,0 +1,17 @@ +cmake_minimum_required(VERSION 3.4) + +# set default build type cache entry (do so before project(...) is called, which would create this cache entry on its own) +if(NOT CMAKE_BUILD_TYPE) + message(STATUS "setting default build type: Release") + set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Choose the type of build, options are: None(CMAKE_CXX_FLAGS or CMAKE_C_FLAGS used) Debug Release RelWithDebInfo MinSizeRel.") +endif() + +project(FESOM2.0) +set(TOPLEVEL_DIR ${CMAKE_CURRENT_LIST_DIR}) +set(FESOM_COUPLED OFF CACHE BOOL "compile fesom standalone or with oasis support (i.e. coupled)") +set(OIFS_COUPLED OFF CACHE BOOL "compile fesom coupled to OpenIFS. (Also needs FESOM_COUPLED to work)") +set(CRAY OFF CACHE BOOL "compile with cray ftn") +set(USE_ICEPACK OFF CACHE BOOL "compile fesom with the Iceapck modules for sea ice column physics.") +#set(VERBOSE OFF CACHE BOOL "toggle debug output") +#add_subdirectory(oasis3-mct/lib/psmile) +add_subdirectory(src) diff --git a/dwarf/dwarf_tracer/dwarf_ini/CMakeLists.txt b/dwarf/dwarf_tracer/dwarf_ini/CMakeLists.txt new file mode 100644 index 000000000..c7cedc905 --- /dev/null +++ b/dwarf/dwarf_tracer/dwarf_ini/CMakeLists.txt @@ -0,0 +1,98 @@ +cmake_minimum_required(VERSION 3.4) + +project(fesom Fortran) + +option(DISABLE_MULTITHREADING "disable asynchronous operations" OFF) + +# get our source files +set(src_home ${CMAKE_CURRENT_LIST_DIR}) # path to src directory starting from the dir containing our CMakeLists.txt +#if(${USE_ICEPACK}) +# file(GLOB sources_Fortran ${src_home}/*.F90 +# ${src_home}/icepack_drivers/*.F90 +# ${src_home}/icepack_drivers/Icepack/columnphysics/*.F90) +#else() +file(GLOB sources_Fortran ${src_home}/*.F90) +#endif() +#list(REMOVE_ITEM sources_Fortran ${src_home}/fesom_partition_init.F90) +#file(GLOB sources_C ${src_home}/*.c) + +# generate a custom file from fesom_version_info.F90 which includes the current git SHA +#set(FESOM_ORIGINAL_VERSION_FILE ${src_home}/fesom_version_info.F90) +#set(FESOM_GENERATED_VERSION_FILE ${CMAKE_CURRENT_BINARY_DIR}/fesom_version_info-generated.F90) +#list(REMOVE_ITEM sources_Fortran ${FESOM_ORIGINAL_VERSION_FILE}) # we want to compile the generated file instead +#list(APPEND sources_Fortran ${FESOM_GENERATED_VERSION_FILE}) +#add_custom_command(OUTPUT 5303B6F4_E4F4_45B2_A6E5_8E2B9FB5CDC4 ${FESOM_GENERATED_VERSION_FILE} # the first arg to OUTPUT is a name for a file we never create to make sure this command will run on every re-build (let our file be the second arg, as the first file is inadvertently removed by make) +# COMMAND ${CMAKE_COMMAND} -DFESOM_ORIGINAL_VERSION_FILE=${FESOM_ORIGINAL_VERSION_FILE} -DFESOM_GENERATED_VERSION_FILE=${FESOM_GENERATED_VERSION_FILE} -P GitRepositoryInfo.cmake +# WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} +# COMMENT "determining ${PROJECT_NAME} git SHA ...") + +#if(${FESOM_STANDALONE}) +# list(REMOVE_ITEM sources_Fortran ${src_home}/cpl_driver.F90) +#endif() +#list(REMOVE_ITEM sources_Fortran ${src_home}/fvom_init.F90) +#list(REMOVE_ITEM sources_C ${src_home}/fort_part.c) + +# depends on the metis library +#add_subdirectory(../lib/metis-5.1.0 ${PROJECT_BINARY_DIR}/metis) +#include_directories(../lib/metis-5.1.0/include) +# depends on the parms library +#add_subdirectory(../lib/parms ${PROJECT_BINARY_DIR}/parms) + +#add_subdirectory(async_threads_cpp) + +#include(${CMAKE_CURRENT_LIST_DIR}/../cmake/FindNETCDF.cmake) + +#add_library(${PROJECT_NAME}_C ${sources_C}) +#target_compile_definitions(${PROJECT_NAME}_C PRIVATE PARMS USE_MPI REAL=double DBL HAS_BLAS FORTRAN_UNDERSCORE VOID_POINTER_SIZE_8 SGI LINUX UNDER_ MPI2) +#target_link_libraries(${PROJECT_NAME}_C parms) #metis + +# create our binary (set its name to name of this project) +add_executable(${PROJECT_NAME} ${sources_Fortran}) +#target_compile_definitions(${PROJECT_NAME} PRIVATE PARMS -DMETIS_VERSION=5 -DPART_WEIGHTED -DMETISRANDOMSEED=35243) +#if(${DISABLE_MULTITHREADING}) +# target_compile_definitions(${PROJECT_NAME} PRIVATE DISABLE_MULTITHREADING) +#endif() +#if(${FESOM_COUPLED}) +# include(${CMAKE_CURRENT_LIST_DIR}/../cmake/FindOASIS.cmake) +# target_compile_definitions(${PROJECT_NAME} PRIVATE __oasis) +#endif() +#if(${OIFS_COUPLED}) +# target_compile_definitions(${PROJECT_NAME} PRIVATE __oifs) +#endif() +#if(${USE_ICEPACK}) +# target_compile_definitions(${PROJECT_NAME} PRIVATE __icepack) +#endif() +if(${VERBOSE}) + target_compile_definitions(${PROJECT_NAME} PRIVATE VERBOSE) +endif() + + +# CMAKE_Fortran_COMPILER_ID will also work if a wrapper is being used (e.g. mpif90 wraps ifort -> compiler id is Intel) +if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Intel ) +# target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -init=zero -no-wrap-margin) + target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -g -traceback -check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) +elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU ) + target_compile_options(${PROJECT_NAME} PRIVATE -O3 -finit-local-zero -finline-functions -march=native -fimplicit-none -fdefault-real-8 -ffree-line-length-none) + if(CMAKE_Fortran_COMPILER_VERSION VERSION_GREATER_EQUAL 10 ) + target_compile_options(${PROJECT_NAME} PRIVATE -fallow-argument-mismatch) # gfortran v10 is strict about erroneous API calls: "Rank mismatch between actual argument at (1) and actual argument at (2) (scalar and rank-1)" + endif() +elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray ) + target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64 -hnoomp) +endif() + + +#elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray ) +# target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64 -hnoomp) +#endif() +#target_include_directories(${PROJECT_NAME} PRIVATE ${NETCDF_Fortran_INCLUDE_DIRECTORIES} ${OASIS_Fortran_INCLUDE_DIRECTORIES}) +#target_include_directories(${PROJECT_NAME} PRIVATE ${MCT_Fortran_INCLUDE_DIRECTORIES} ${MPEU_Fortran_INCLUDE_DIRECTORIES}) +#target_include_directories(${PROJECT_NAME} PRIVATE ${SCRIP_Fortran_INCLUDE_DIRECTORIES}) +#target_link_libraries(${PROJECT_NAME} ${PROJECT_NAME}_C ${NETCDF_Fortran_LIBRARIES} ${NETCDF_C_LIBRARIES} ${OASIS_Fortran_LIBRARIES}) +#target_link_libraries(${PROJECT_NAME} ${PROJECT_NAME}_C ${MCT_Fortran_LIBRARIES} ${MPEU_Fortran_LIBRARIES} ${SCRIP_Fortran_LIBRARIES}) +#target_link_libraries(${PROJECT_NAME} async_threads_cpp) +#set_target_properties(${PROJECT_NAME} PROPERTIES LINKER_LANGUAGE Fortran) + +set(FESOM_INSTALL_FILEPATH "${CMAKE_CURRENT_LIST_DIR}/../bin/fesom.x" CACHE FILEPATH "file path where the FESOM binary should be put") +get_filename_component(FESOM_INSTALL_PATH ${FESOM_INSTALL_FILEPATH} DIRECTORY) +get_filename_component(FESOM_INSTALL_NAME ${FESOM_INSTALL_FILEPATH} NAME) +install(PROGRAMS ${PROJECT_BINARY_DIR}/${PROJECT_NAME} DESTINATION ${FESOM_INSTALL_PATH} RENAME ${FESOM_INSTALL_NAME}) diff --git a/dwarf/dwarf_tracer/dwarf_ini/fesom.F90 b/dwarf/dwarf_tracer/dwarf_ini/fesom.F90 new file mode 100755 index 000000000..d6467d29b --- /dev/null +++ b/dwarf/dwarf_tracer/dwarf_ini/fesom.F90 @@ -0,0 +1,89 @@ +!=============================================================================! +! +! Finite Volume Sea-ice Ocean Model +! +!=============================================================================! +! The main driving routine +!=============================================================================! + +program main +USE MOD_MESH +USE MOD_PARTIT +USE MOD_TRACER +USE MOD_DYN +USE MOD_ICE +USE MOD_PARSUP +USE g_comm_auto +USE par_support_interfaces +USE restart_derivedtype_module +USE fortran_utils +IMPLICIT NONE + +character(LEN=500) :: resultpath, npepath +character(LEN=256) :: npes_string +logical :: dir_exist +logical :: L_EXISTS +type(t_mesh), target, save :: mesh +type(t_tracer), target, save :: tracers +type(t_partit), target, save :: partit +type(t_dyn), target, save :: dyn +type(t_ice), target, save :: ice +integer :: i, n, nzmax, nzmin + + +call MPI_INIT(i) +call par_init(partit) + +resultpath='/work/ollie/pscholz/results_fesom2.0/test_binaryrestart' + +! check if resultpath exist +INQUIRE(directory=trim(resultpath), EXIST=dir_exist) +if (.not. dir_exist) then + if (partit%mype==0) print *, achar(27)//'[1;31m'//' -ERROR-> could not find:'//trim(resultpath)//achar(27)//'[0m' + call par_ex(partit%MPI_COMM_FESOM, partit%mype) + stop +end if + +npepath =trim(resultpath)//"/fesom_bin_restart/np"//int_to_txt(partit%npes) +INQUIRE(directory=trim(npepath), EXIST=dir_exist) +if (.not. dir_exist) then + if (partit%mype==0) print *, achar(27)//'[1;31m'//' -ERROR-> could not find:'//trim(npepath)//achar(27)//'[0m' + call par_ex(partit%MPI_COMM_FESOM, partit%mype) + stop +end if + +!_______________________________________________________________________________ +! read derived type binary restart files +call read_all_bin_restarts(npepath, dynamics=dyn, tracers=tracers, partit=partit, mesh=mesh) + +! even though the partitioning has been read some things regarding MPI shall be computed during the runtime +! these include: MPI_TYPE_COMMIT etc. +! used to be call set_par_support(partit, mesh) +call init_mpi_types(partit, mesh) +call init_gatherLists(partit) + +do i=1, 10 + !___________________________________________________________________________ + ! ale tracer advection + tracers%work%del_ttf_advhoriz = 0.0_WP + tracers%work%del_ttf_advvert = 0.0_WP +! if (mype==0) write(*,*) 'start advection part.......' + call do_oce_adv_tra(1.e-3, dyn%uv, dyn%w, dyn%w_i, dyn%w_e, 1, dyn, tracers, partit, mesh) +! if (mype==0) write(*,*) 'advection part completed...' + if (partit%mype==0) write(*,*) minval(tracers%data(1)%values), maxval(tracers%data(1)%values), sum(tracers%data(1)%values) + !_____________________________________________________ + !___________________________________________________________________________ + ! update array for total tracer flux del_ttf with the fluxes from horizontal + ! and vertical advection + tracers%work%del_ttf=tracers%work%del_ttf+tracers%work%del_ttf_advhoriz+tracers%work%del_ttf_advvert + + do n=1, partit%myDim_nod2D + nzmax=mesh%nlevels_nod2D(n)-1 + nzmin=mesh%ulevels_nod2D(n) + tracers%data(1)%values(nzmin:nzmax,n)=tracers%data(1)%values(nzmin:nzmax,n)+tracers%work%del_ttf(nzmin:nzmax,n)/mesh%hnode_new(nzmin:nzmax,n) ! LINFS + end do + call exchange_nod(tracers%data(1)%values(:,:), partit) + call exchange_nod(tracers%data(2)%values(:,:), partit) +end do +call par_ex(partit%MPI_COMM_FESOM, partit%mype) +end program main diff --git a/dwarf/dwarf_tracer/dwarf_linkfiles.sh b/dwarf/dwarf_tracer/dwarf_linkfiles.sh new file mode 100755 index 000000000..a9b254fb4 --- /dev/null +++ b/dwarf/dwarf_tracer/dwarf_linkfiles.sh @@ -0,0 +1,53 @@ +#!/bin/bash +#_______________________________________________________________________________ +ln -s ../../env env +ln -s ../../env.sh env.sh +ln -s ../../configure.sh configure.sh +#_______________________________________________________________________________ +if [ -d "src/" ] ; then rm -r src/ ; fi +# fi +mkdir src/ +cd src/ + +#_______________________________________________________________________________ +ln -s ../dwarf_ini/fesom.F90 fesom.F90 +ln -s ../dwarf_ini/CMakeLists.txt CMakeLists.txt + +#_______________________________________________________________________________ +export which_path="../../../src/" + +# export which_branch=refactoring +# export which_branch=refactoring_dwarf_ice +# export which_path=https://raw.githubusercontent.com/FESOM/fesom2/${which_branch}/src +#_______________________________________________________________________________ +export which_files="associate_mesh_def.h + associate_mesh_ass.h + associate_part_def.h + associate_part_ass.h + MOD_MESH.F90 + MOD_PARTIT.F90 + MOD_TRACER.F90 + MOD_DYN.F90 + MOD_ICE.F90 + MOD_READ_BINARY_ARRAYS.F90 + MOD_WRITE_BINARY_ARRAYS.F90 + io_restart_derivedtype.F90 + fortran_utils.F90 + gen_halo_exchange.F90 + oce_adv_tra_driver.F90 + oce_adv_tra_hor.F90 + oce_modules.F90 + gen_modules_partitioning.F90 + oce_adv_tra_fct.F90 + oce_adv_tra_ver.F90 + " +#_______________________________________________________________________________ +for file in ${which_files}; do + ln -s ${which_path}/${file} ${file} + # wget ${which_path}/${file} + # cp ${which_path}/${file} . +done + +#_______________________________________________________________________________ +cd ../ + diff --git a/dwarf/dwarf_tracer/work/job_ollie b/dwarf/dwarf_tracer/work/job_ollie new file mode 100755 index 000000000..df38d528d --- /dev/null +++ b/dwarf/dwarf_tracer/work/job_ollie @@ -0,0 +1,27 @@ +#!/bin/bash +#SBATCH --job-name=fesom2.0 +#SBATCH -p mpp +#SBATCH --ntasks=72 +#SBATCH --time=00:30:00 +#SBATCH -o slurm-out.out +#SBATCH -e slurm-err.out +module load intel.compiler intel.mpi netcdf/4.4.0_intel +module load centoslibs + +set -x + +ulimit -s unlimited + +# determine JOBID +JOBID=`echo $SLURM_JOB_ID |cut -d"." -f1` + +ln -s ../bin/fesom.x . # cp -n ../bin/fesom.x +date +srun --mpi=pmi2 ./fesom.x > "fesom2.0.out" +date + +#qstat -f $PBS_JOBID +#export EXITSTATUS=$? +#if [ ${EXITSTATUS} -eq 0 ] || [ ${EXITSTATUS} -eq 127 ] ; then +#sbatch job_ollie +#fi From 66e0de962d7ec4f826db4c8c6afaf3c7009d097c Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 16 Feb 2022 13:22:14 +0100 Subject: [PATCH 840/909] make the dwarf structure more slim, let the directories be created by dwarf_linkfiles.sh script --- dwarf/dwarf_ice/CMakeLists.txt | 17 ----- dwarf/dwarf_ice/bin/.gitignore | 4 -- dwarf/dwarf_ice/configure.sh | 9 --- dwarf/dwarf_ice/env.sh | 72 -------------------- dwarf/dwarf_ice/env/blogin.hlrn.de/shell | 15 ---- dwarf/dwarf_ice/env/bsc/shell | 7 -- dwarf/dwarf_ice/env/ecaccess.ecmwf.int/shell | 11 --- dwarf/dwarf_ice/env/hazelhen.hww.de/shell | 24 ------- dwarf/dwarf_ice/env/hlogin.hlrn.de/shell | 15 ---- dwarf/dwarf_ice/env/jureca/shell | 9 --- dwarf/dwarf_ice/env/juwels/shell | 22 ------ dwarf/dwarf_ice/env/mistral.dkrz.de/shell | 67 ------------------ dwarf/dwarf_ice/env/mistral.dkrz.de/shell~ | 40 ----------- dwarf/dwarf_ice/env/ollie/shell | 11 --- dwarf/dwarf_ice/env/ollie/shell_cray | 6 -- dwarf/dwarf_ice/env/ubuntu/shell | 5 -- 16 files changed, 334 deletions(-) delete mode 100644 dwarf/dwarf_ice/CMakeLists.txt delete mode 100644 dwarf/dwarf_ice/bin/.gitignore delete mode 100755 dwarf/dwarf_ice/configure.sh delete mode 100755 dwarf/dwarf_ice/env.sh delete mode 100755 dwarf/dwarf_ice/env/blogin.hlrn.de/shell delete mode 100644 dwarf/dwarf_ice/env/bsc/shell delete mode 100644 dwarf/dwarf_ice/env/ecaccess.ecmwf.int/shell delete mode 100755 dwarf/dwarf_ice/env/hazelhen.hww.de/shell delete mode 100755 dwarf/dwarf_ice/env/hlogin.hlrn.de/shell delete mode 100644 dwarf/dwarf_ice/env/jureca/shell delete mode 100644 dwarf/dwarf_ice/env/juwels/shell delete mode 100755 dwarf/dwarf_ice/env/mistral.dkrz.de/shell delete mode 100644 dwarf/dwarf_ice/env/mistral.dkrz.de/shell~ delete mode 100644 dwarf/dwarf_ice/env/ollie/shell delete mode 100644 dwarf/dwarf_ice/env/ollie/shell_cray delete mode 100644 dwarf/dwarf_ice/env/ubuntu/shell diff --git a/dwarf/dwarf_ice/CMakeLists.txt b/dwarf/dwarf_ice/CMakeLists.txt deleted file mode 100644 index 95b7e7b78..000000000 --- a/dwarf/dwarf_ice/CMakeLists.txt +++ /dev/null @@ -1,17 +0,0 @@ -cmake_minimum_required(VERSION 3.4) - -# set default build type cache entry (do so before project(...) is called, which would create this cache entry on its own) -if(NOT CMAKE_BUILD_TYPE) - message(STATUS "setting default build type: Release") - set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Choose the type of build, options are: None(CMAKE_CXX_FLAGS or CMAKE_C_FLAGS used) Debug Release RelWithDebInfo MinSizeRel.") -endif() - -project(FESOM2.0) -set(TOPLEVEL_DIR ${CMAKE_CURRENT_LIST_DIR}) -set(FESOM_COUPLED OFF CACHE BOOL "compile fesom standalone or with oasis support (i.e. coupled)") -set(OIFS_COUPLED OFF CACHE BOOL "compile fesom coupled to OpenIFS. (Also needs FESOM_COUPLED to work)") -set(CRAY OFF CACHE BOOL "compile with cray ftn") -set(USE_ICEPACK OFF CACHE BOOL "compile fesom with the Iceapck modules for sea ice column physics.") -#set(VERBOSE OFF CACHE BOOL "toggle debug output") -#add_subdirectory(oasis3-mct/lib/psmile) -add_subdirectory(src) diff --git a/dwarf/dwarf_ice/bin/.gitignore b/dwarf/dwarf_ice/bin/.gitignore deleted file mode 100644 index 0fa27a178..000000000 --- a/dwarf/dwarf_ice/bin/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -gnore everything in this directory -* -# Except this file -!.gitignore diff --git a/dwarf/dwarf_ice/configure.sh b/dwarf/dwarf_ice/configure.sh deleted file mode 100755 index b4b6e27b8..000000000 --- a/dwarf/dwarf_ice/configure.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env bash - -set -e - -source env.sh # source this from your run script too -mkdir build || true # make sure not to commit this to svn or git -cd build -cmake .. # not required when re-compiling -make install -j`nproc --all` diff --git a/dwarf/dwarf_ice/env.sh b/dwarf/dwarf_ice/env.sh deleted file mode 100755 index f568651e3..000000000 --- a/dwarf/dwarf_ice/env.sh +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env bash - -# - - - -# # synopsis -# determine which environment directory is to be used for the current host -# # usage -# *source* to silently source the environment for this host system -# *execute* to print the environment directory for this host system -# - - - - - -# see if we are being sourced or executed -# as we use bash to execute (see shebang), BASH_SOURCE is set when executing -if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then - BEING_EXECUTED=true -else - BEING_EXECUTED=false -fi - -# if an arg is given, use it as hostname -if [ -z "$1" ]; then - # no argument given - LOGINHOST="$(hostname -f)" -else - LOGINHOST=$1 -fi - -if [[ $LOGINHOST =~ ^m[A-Za-z0-9]+\.hpc\.dkrz\.de$ ]]; then - STRATEGY="mistral.dkrz.de" -elif [[ $LOGINHOST =~ ^ollie[0-9]$ ]] || [[ $LOGINHOST =~ ^prod-[0-9]{4}$ ]]; then - STRATEGY="ollie" -elif [[ $LOGINHOST =~ ^h[A-Za-z0-9]+\.hsn\.hlrn\.de$ ]]; then - STRATEGY="hlogin.hlrn.de" -elif [[ $LOGINHOST =~ ^b[A-Za-z0-9]+\.hsn\.hlrn\.de$ ]]; then - STRATEGY="blogin.hlrn.de" -elif [[ $LOGINHOST =~ \.hww\.de$ ]] || [[ $LOGINHOST =~ ^nid[0-9]{5}$ ]]; then - STRATEGY="hazelhen.hww.de" -elif [[ $LOGINHOST =~ \.jureca$ ]]; then - STRATEGY="jureca" -elif [[ $LOGINHOST = ubuntu ]]; then - STRATEGY="ubuntu" -elif [[ $LOGINHOST = bsc ]]; then - STRATEGY="bsc" -elif [[ $LOGINHOST =~ ^juwels[0-9][0-9].ib.juwels.fzj.de$ ]]; then - STRATEGY="juwels" -elif [[ $LOGINHOST =~ ^jwlogin[0-9][0-9].juwels$ ]]; then - STRATEGY="juwels" -elif [[ $LOGINHOST =~ ^cc[a-b]+-login[0-9]+\.ecmwf\.int$ ]]; then - STRATEGY="ecaccess.ecmwf.int" -else - echo "can not determine environment for host: "$LOGINHOST - [ $BEING_EXECUTED = true ] && exit 1 - return # if we are being sourced, return from this script here -fi - -if [ -n "$BASH_VERSION" ]; then - # assume bash - SOURCE="${BASH_SOURCE[0]}" -elif [ -n "$ZSH_VERSION" ]; then - # assume zsh - SOURCE=${(%):-%N} -fi - -DIR="$( cd "$( dirname "${SOURCE}" )" && pwd )" - -if [ $BEING_EXECUTED = true ]; then - # file is being executed - echo $DIR/env/$STRATEGY -else - # file is being sourced - source $DIR/env/$STRATEGY/shell -fi diff --git a/dwarf/dwarf_ice/env/blogin.hlrn.de/shell b/dwarf/dwarf_ice/env/blogin.hlrn.de/shell deleted file mode 100755 index 7ca9794ea..000000000 --- a/dwarf/dwarf_ice/env/blogin.hlrn.de/shell +++ /dev/null @@ -1,15 +0,0 @@ -# make the contents as shell agnostic as possible so we can include them with bash, zsh and others - -module load gcc/4.9.3 -module load intel/17.0.4.196 - -module swap PrgEnv-cray PrgEnv-intel -module swap cray-mpich cray-mpich -module unload cray-hdf5 -module load cray-netcdf -module load cray-hdf5 - -export CRAYPE_LINK_TYPE=dynamic -export PATH=/gfs1/work/hbxeeeee/sw/cmake/bin:$PATH - -export FC=ftn CC=cc CXX=g++ diff --git a/dwarf/dwarf_ice/env/bsc/shell b/dwarf/dwarf_ice/env/bsc/shell deleted file mode 100644 index a5ccfb77a..000000000 --- a/dwarf/dwarf_ice/env/bsc/shell +++ /dev/null @@ -1,7 +0,0 @@ - -module load intel/2017.4 impi/2017.4 mkl/2017.4 bsc/1.0 netcdf/4.2 - -export FC=mpiifort CC=mpiicc CXX=mpiicpc - - - diff --git a/dwarf/dwarf_ice/env/ecaccess.ecmwf.int/shell b/dwarf/dwarf_ice/env/ecaccess.ecmwf.int/shell deleted file mode 100644 index 743116a12..000000000 --- a/dwarf/dwarf_ice/env/ecaccess.ecmwf.int/shell +++ /dev/null @@ -1,11 +0,0 @@ -export PATH=/home/rd/natr/cmake-3.11.2-Linux-x86_64/bin:$PATH - -module unload cray-hdf5 -module load cray-netcdf -module load cray-hdf5 - -#export CRAYPE_LINK_TYPE=dynamic - -# enable full MPI thread support level (MPI_THREAD_MULTIPLE) -export MPICH_MAX_THREAD_SAFETY=multiple # to also switch to an alternative (probably with faster locking) multi threading implementation of the cray-mpich library, use the compiler flag -craympich-mt -export FC=ftn CC=cc CXX=CC diff --git a/dwarf/dwarf_ice/env/hazelhen.hww.de/shell b/dwarf/dwarf_ice/env/hazelhen.hww.de/shell deleted file mode 100755 index 27b142a0a..000000000 --- a/dwarf/dwarf_ice/env/hazelhen.hww.de/shell +++ /dev/null @@ -1,24 +0,0 @@ -# make the contents as shell agnostic as possible so we can include them with bash, zsh and others - -module load gcc/4.9.3 -module load intel/17.0.2.174 - -module swap PrgEnv-cray PrgEnv-intel -module swap cray-mpich cray-mpich -module unload cray-hdf5 -module load cray-netcdf -module load cray-hdf5/1.10.0 - -module load tools/cmake/3.4.2 - -# enable us to use cdo -# module load tools/netcdf_utils contains cdo but breaks ftn which can not compile fortran code using mpi anymore: -# "error #7012: The module file cannot be read. Its format requires a more recent F90 compiler. [MPI] -# USE mpi" -# so we append the dir which contains cdo manually to PATH: -export PATH=$PATH:/opt/hlrs/tools/netcdf_utils/4.4.0-intel/install/bin - -export CRAYPE_LINK_TYPE=dynamic -export PATH=/zhome/academic/HLRS/xaw/xawjhege/sw/ruby/bin:$PATH - -export FC=ftn CC=cc CXX=g++ diff --git a/dwarf/dwarf_ice/env/hlogin.hlrn.de/shell b/dwarf/dwarf_ice/env/hlogin.hlrn.de/shell deleted file mode 100755 index 9dc02bbe7..000000000 --- a/dwarf/dwarf_ice/env/hlogin.hlrn.de/shell +++ /dev/null @@ -1,15 +0,0 @@ -# make the contents as shell agnostic as possible so we can include them with bash, zsh and others - -module load gcc/4.9.3 -module load intel/17.0.1.132 - -module swap PrgEnv-cray PrgEnv-intel -module swap cray-mpich cray-mpich -module unload cray-hdf5 -module load cray-netcdf -module load cray-hdf5 - -export CRAYPE_LINK_TYPE=dynamic -export PATH=/gfs1/work/hbxeeeee/sw/cmake/bin:$PATH - -export FC=ftn CC=cc CXX=g++ diff --git a/dwarf/dwarf_ice/env/jureca/shell b/dwarf/dwarf_ice/env/jureca/shell deleted file mode 100644 index 35d77df62..000000000 --- a/dwarf/dwarf_ice/env/jureca/shell +++ /dev/null @@ -1,9 +0,0 @@ -# make the contents as shell agnostic as possible so we can include them with bash, zsh and others - -module load CMake Intel IntelMPI imkl netCDF netCDF-Fortran #intel.compiler intel.mpi netcdf/4.4.0_intel -# the netcdf c++ path given by the netcdf module is broken, we have to workaround this -export FC=mpiifort CC=mpiicc CXX=mpiicpc -export NETCDF_Fortran_INCLUDE_DIRECTORIES=/usr/local/software/jureca/Stages/2018a/software/netCDF-Fortran/4.4.4-iimpi-2018a/include -export NETCDF_C_INCLUDE_DIRECTORIES=/usr/local/software/jureca/Stages/2018a/software/netCDF/4.6.1-iimpi-2018a/include -export NETCDF_CXX_INCLUDE_DIRECTORIES=/usr/local/software/jureca/Stages/2018a/software/netCDF/4.6.1-iimpi-2018a/include - diff --git a/dwarf/dwarf_ice/env/juwels/shell b/dwarf/dwarf_ice/env/juwels/shell deleted file mode 100644 index 0b5451c82..000000000 --- a/dwarf/dwarf_ice/env/juwels/shell +++ /dev/null @@ -1,22 +0,0 @@ -########## -module --force purge -module use /gpfs/software/juwels/otherstages -module load Stages/2019a -module load StdEnv -# For intel MPI -#module load Intel/2019.3.199-GCC-8.3.0 IntelMPI/2018.5.288 imkl/2019.3.199 -#export FC=mpiifort CC=mpiicc CXX=mpiicpc - -# For ParaStation MPI -module load Intel/2019.3.199-GCC-8.3.0 ParaStationMPI/5.4 imkl/2019.5.281 -export FC=mpifort CC=mpicc CXX=mpicxx - -module load netCDF/4.6.3 -module load netCDF-Fortran/4.4.5 -module load CMake -export NETCDF_DIR=$EBROOTNETCDF -export NETCDFF_DIR=$EBROOTNETCDFMINFORTRAN -export NETCDF_Fortran_INCLUDE_DIRECTORIES=${NETCDFF_DIR}/include/ -export NETCDF_C_INCLUDE_DIRECTORIES=${NETCDF_DIR}/include/ -export NETCDF_CXX_INCLUDE_DIRECTORIES=${NETCDFCXX_DIR}/include/ - diff --git a/dwarf/dwarf_ice/env/mistral.dkrz.de/shell b/dwarf/dwarf_ice/env/mistral.dkrz.de/shell deleted file mode 100755 index 48e2a4279..000000000 --- a/dwarf/dwarf_ice/env/mistral.dkrz.de/shell +++ /dev/null @@ -1,67 +0,0 @@ -# make the contents as shell agnostic as possible so we can include them with bash, zsh and others - -module load gcc/4.8.2 -export LD_LIBRARY_PATH=/sw/rhel6-x64/gcc/gcc-4.8.2/lib64:$LD_LIBRARY_PATH # avoid GLIBCXX_3.4.15 not found error -module unload intel && module load intel/18.0.1 - -#export FC=mpiifort CC=mpiicc CXX=mpiicpc; module unload intelmpi && module load intelmpi/2018.1.163 -export FC=mpiifort CC=mpiicc CXX=mpiicpc; module unload intelmpi && module load intelmpi/2017.0.098 -#export FC=mpif90 CC=mpicc CXX=mpicxx; module load mxm/3.3.3002 fca/2.5.2393 bullxmpi_mlx/bullxmpi_mlx-1.2.8.3 -#export FC=mpif90 CC=mpicc CXX=mpicxx; module load mxm/3.4.3082 fca/2.5.2393 bullxmpi_mlx/bullxmpi_mlx-1.2.9.2 -#export FC=mpif90 CC=mpicc CXX=mpicxx OPENMPI=TRUE; module unload intelmpi && module load openmpi/2.0.2p1_hpcx-intel14 - -# intelmpi settings from DKRZ -export I_MPI_FABRICS=shm:dapl -export I_MPI_FALLBACK=disable -export I_MPI_SLURM_EXT=1 -export I_MPI_LARGE_SCALE_THRESHOLD=8192 # Set to a value larger than the number of your MPI-tasks if you use 8192 or more tasks. -export I_MPI_DYNAMIC_CONNECTION=0 -export DAPL_NETWORK_NODES=$SLURM_NNODES -export DAPL_NETWORK_PPN=$SLURM_NTASKS_PER_NODE -export DAPL_WR_MAX=500 - -# bullxmpi settings from DKRZ -# Settings for Open MPI and MXM (MellanoX Messaging) library -export OMPI_MCA_pml=cm -export OMPI_MCA_mtl=mxm -export OMPI_MCA_mtl_mxm_np=0 -export MXM_RDMA_PORTS=mlx5_0:1 -export MXM_LOG_LEVEL=FATAL -# Disable GHC algorithm for collective communication -export OMPI_MCA_coll=^ghc - -# openmpi settings from DKRZ (note, that some of above variables will be redefined) -if test "${OPENMPI}" == "TRUE"; then -export OMPI_MCA_pml=cm # sets the point-to-point management layer -export OMPI_MCA_mtl=mxm # sets the matching transport layer (MPI-2 one-sided comm.) -export MXM_RDMA_PORTS=mlx5_0:1 -export MXM_LOG_LEVEL=ERROR -export MXM_HANDLE_ERRORS=bt -export UCX_HANDLE_ERRORS=bt - -# enable HCOLL based collectives -export OMPI_MCA_coll=^fca # disable FCA for collective MPI routines -export OMPI_MCA_coll_hcoll_enable=1 # enable HCOLL for collective MPI routines -export OMPI_MCA_coll_hcoll_priority=95 -export OMPI_MCA_coll_hcoll_np=8 # use HCOLL for all communications with more than 8 tasks -export HCOLL_MAIN_IB=mlx5_0:1 -export HCOLL_ENABLE_MCAST=1 -export HCOLL_ENABLE_MCAST_ALL=1 - -# disable specific HCOLL functions (strongly depends on the application) -export HCOLL_ML_DISABLE_BARRIER=1 -export HCOLL_ML_DISABLE_IBARRIER=1 -export HCOLL_ML_DISABLE_BCAST=1 -export HCOLL_ML_DISABLE_REDUCE=1 -fi - -module unload netcdf && module load netcdf_c/4.3.2-gcc48 -module unload cmake && module load cmake -# we will get a segfault at runtime if we use a gcc from any of the provided gcc modules -export PATH=/sw/rhel6-x64/gcc/binutils-2.24-gccsys/bin:${PATH} - -export NETCDF_Fortran_INCLUDE_DIRECTORIES=/sw/rhel6-x64/netcdf/netcdf_fortran-4.4.2-intel14/include -export NETCDF_C_INCLUDE_DIRECTORIES=/sw/rhel6-x64/netcdf/netcdf_c-4.3.2-intel14/include -export NETCDF_CXX_INCLUDE_DIRECTORIES=/sw/rhel6-x64/netcdf/netcdf_cxx-4.2.1-gcc48/include - -export HDF5_C_INCLUDE_DIRECTORIES=/sw/rhel6-x64/hdf5/hdf5-1.8.14-threadsafe-intel14/include diff --git a/dwarf/dwarf_ice/env/mistral.dkrz.de/shell~ b/dwarf/dwarf_ice/env/mistral.dkrz.de/shell~ deleted file mode 100644 index 9b1c8acaa..000000000 --- a/dwarf/dwarf_ice/env/mistral.dkrz.de/shell~ +++ /dev/null @@ -1,40 +0,0 @@ -# make the contents as shell agnostic as possible so we can include them with bash, zsh and others - -module load gcc/4.8.2 -export LD_LIBRARY_PATH=/sw/rhel6-x64/gcc/gcc-4.8.2/lib64:$LD_LIBRARY_PATH # avoid GLIBCXX_3.4.15 not found error -module unload intel && module load intel/18.0.0 - -export FC=mpiifort CC=mpiicc CXX=mpiicpc; module unload intelmpi && module load intelmpi/2017.0.098 -#export FC=mpif90 CC=mpicc CXX=mpicxx; module load mxm/3.3.3002 fca/2.5.2393 bullxmpi_mlx/bullxmpi_mlx-1.2.8.3 -#export FC=mpif90 CC=mpicc CXX=mpicxx; module load mxm/3.4.3082 fca/2.5.2393 bullxmpi_mlx/bullxmpi_mlx-1.2.9.2 - -# intelmpi settings from DKRZ -export I_MPI_FABRICS=shm:dapl -export I_MPI_FALLBACK=disable -export I_MPI_SLURM_EXT=1 -export I_MPI_LARGE_SCALE_THRESHOLD=8192 # Set to a value larger than the number of your MPI-tasks if you use 8192 or more tasks. -export I_MPI_DYNAMIC_CONNECTION=0 -export DAPL_NETWORK_NODES=$SLURM_NNODES -export DAPL_NETWORK_PPN=$SLURM_NTASKS_PER_NODE -export DAPL_WR_MAX=500 - -# bullxmpi settings from DKRZ -# Settings for Open MPI and MXM (MellanoX Messaging) library -export OMPI_MCA_pml=cm -export OMPI_MCA_mtl=mxm -export OMPI_MCA_mtl_mxm_np=0 -export MXM_RDMA_PORTS=mlx5_0:1 -export MXM_LOG_LEVEL=FATAL -# Disable GHC algorithm for collective communication -export OMPI_MCA_coll=^ghc - -module unload netcdf && module load netcdf_c/4.3.2-gcc48 -module unload cmake && module load cmake -# we will get a segfault at runtime if we use a gcc from any of the provided gcc modules -export PATH=/sw/rhel6-x64/gcc/binutils-2.24-gccsys/bin:${PATH} - -export NETCDF_Fortran_INCLUDE_DIRECTORIES=/sw/rhel6-x64/netcdf/netcdf_fortran-4.4.2-intel14/include -export NETCDF_C_INCLUDE_DIRECTORIES=/sw/rhel6-x64/netcdf/netcdf_c-4.3.2-intel14/include -export NETCDF_CXX_INCLUDE_DIRECTORIES=/sw/rhel6-x64/netcdf/netcdf_cxx-4.2.1-gcc48/include - -export HDF5_C_INCLUDE_DIRECTORIES=/sw/rhel6-x64/hdf5/hdf5-1.8.14-threadsafe-intel14/include diff --git a/dwarf/dwarf_ice/env/ollie/shell b/dwarf/dwarf_ice/env/ollie/shell deleted file mode 100644 index 3b9efbf52..000000000 --- a/dwarf/dwarf_ice/env/ollie/shell +++ /dev/null @@ -1,11 +0,0 @@ -# make the contents as shell agnostic as possible so we can include them with bash, zsh and others - -module load intel.compiler intel.mpi netcdf/4.4.0_intel -module load centoslibs # required on compute nodes - -# the netcdf c++ path given by the netcdf module is broken, we have to workaround this -export NETCDF_CXX_INCLUDE_DIRECTORIES=/global/AWIsoft/tkleiner/netcdf/4.4.0_intel_impi/include - -export PATH=/home/ollie/dsidoren/cmake-3.13.2-Linux-x86_64/bin:$PATH - -export FC="mpiifort -mkl" CC=mpiicc CXX=mpiicpc diff --git a/dwarf/dwarf_ice/env/ollie/shell_cray b/dwarf/dwarf_ice/env/ollie/shell_cray deleted file mode 100644 index 4c7d399f8..000000000 --- a/dwarf/dwarf_ice/env/ollie/shell_cray +++ /dev/null @@ -1,6 +0,0 @@ -# make the contents as shell agnostic as possible so we can include them with bash, zsh and others - -module load cmake -module load PrgEnv-cray - -export FC=ftn CC=cc CXX=CC diff --git a/dwarf/dwarf_ice/env/ubuntu/shell b/dwarf/dwarf_ice/env/ubuntu/shell deleted file mode 100644 index 29fc59dbc..000000000 --- a/dwarf/dwarf_ice/env/ubuntu/shell +++ /dev/null @@ -1,5 +0,0 @@ -export FC=mpifort CC=mpicc CXX=mpicxx -export BLAS_LIBRARIES=/usr/lib/x86_64-linux-gnu/blas/ -export UBUNTU_BLAS_LIBRARY="libblas.a" - - From d4dc2b45ef562241ceb7d88b11c1770c0657dd0d Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 16 Feb 2022 13:23:23 +0100 Subject: [PATCH 841/909] link env/, env.sh and configure.sh also from the main repository level, makes the dwarf more slim within the respository --- dwarf/dwarf_ice/dwarf_linkfiles.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/dwarf/dwarf_ice/dwarf_linkfiles.sh b/dwarf/dwarf_ice/dwarf_linkfiles.sh index 56673caeb..f09de5e33 100755 --- a/dwarf/dwarf_ice/dwarf_linkfiles.sh +++ b/dwarf/dwarf_ice/dwarf_linkfiles.sh @@ -1,8 +1,11 @@ #!/bin/bash +#_______________________________________________________________________________ +ln -s ../../env env +ln -s ../../env.sh env.sh +ln -s ../../configure.sh configure.sh #_______________________________________________________________________________ if [ -d "src/" ] ; then rm -r src/ ; fi -# fi mkdir src/ cd src/ @@ -13,6 +16,7 @@ ln -s ../dwarf_ini/CMakeLists.txt CMakeLists.txt #_______________________________________________________________________________ export which_path="../../../src/" +# for downloading from specific github branch replace ln -s with wget # export which_branch=refactoring # export which_branch=refactoring_dwarf_ice # export which_path=https://raw.githubusercontent.com/FESOM/fesom2/${which_branch}/src From bc7b91dcb8d4800d3641a108efaee1c900475a21 Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 16 Feb 2022 13:29:09 +0100 Subject: [PATCH 842/909] add some comments --- dwarf/dwarf_ice/dwarf_linkfiles.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/dwarf/dwarf_ice/dwarf_linkfiles.sh b/dwarf/dwarf_ice/dwarf_linkfiles.sh index f09de5e33..7e01a16d7 100755 --- a/dwarf/dwarf_ice/dwarf_linkfiles.sh +++ b/dwarf/dwarf_ice/dwarf_linkfiles.sh @@ -1,15 +1,18 @@ #!/bin/bash #_______________________________________________________________________________ +# link environment variables and configure.sh files from the main repository level ln -s ../../env env ln -s ../../env.sh env.sh ln -s ../../configure.sh configure.sh #_______________________________________________________________________________ +# create local source folder for the dwarf if [ -d "src/" ] ; then rm -r src/ ; fi mkdir src/ cd src/ #_______________________________________________________________________________ +# link main dwarf files to the local src/ folder ln -s ../dwarf_ini/fesom.F90 fesom.F90 ln -s ../dwarf_ini/CMakeLists.txt CMakeLists.txt @@ -44,7 +47,8 @@ export which_files="associate_mesh_def.h fortran_utils.F90 oce_modules.F90 " -#_______________________________________________________________________________ +#_______________________________________________________________________________ +# link the ther necessary main src files to local src directory for file in ${which_files}; do ln -s ${which_path}/${file} ${file} # wget ${which_path}/${file} From 59bcea151ffc4b544d510cb0630655074c7cbb59 Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 16 Feb 2022 13:35:27 +0100 Subject: [PATCH 843/909] put back Cmakelist.txt --- dwarf/dwarf_ice/CMakeLists.txt | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 dwarf/dwarf_ice/CMakeLists.txt diff --git a/dwarf/dwarf_ice/CMakeLists.txt b/dwarf/dwarf_ice/CMakeLists.txt new file mode 100644 index 000000000..4feed315f --- /dev/null +++ b/dwarf/dwarf_ice/CMakeLists.txt @@ -0,0 +1,17 @@ +cmake_minimum_required(VERSION 3.9) + +# set default build type cache entry (do so before project(...) is called, which would create this cache entry on its own) +if(NOT CMAKE_BUILD_TYPE) + message(STATUS "setting default build type: Release") + set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Choose the type of build, options are: None(CMAKE_CXX_FLAGS or CMAKE_C_FLAGS used) Debug Release RelWithDebInfo MinSizeRel.") +endif() + +project(FESOM2.0) +set(TOPLEVEL_DIR ${CMAKE_CURRENT_LIST_DIR}) +set(FESOM_COUPLED OFF CACHE BOOL "compile fesom standalone or with oasis support (i.e. coupled)") +set(OIFS_COUPLED OFF CACHE BOOL "compile fesom coupled to OpenIFS. (Also needs FESOM_COUPLED to work)") +set(CRAY OFF CACHE BOOL "compile with cray ftn") +set(USE_ICEPACK OFF CACHE BOOL "compile fesom with the Iceapck modules for sea ice column physics.") +#set(VERBOSE OFF CACHE BOOL "toggle debug output") +#add_subdirectory(oasis3-mct/lib/psmile) +add_subdirectory(src) From 16347979c47eabbadcd632afb05438f7a6a198ff Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 22 Feb 2022 09:58:30 +0100 Subject: [PATCH 844/909] declare parms functions before they are used --- lib/parms/src/parms_pc_schurras.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/parms/src/parms_pc_schurras.c b/lib/parms/src/parms_pc_schurras.c index bb37885e6..979acdfe9 100755 --- a/lib/parms/src/parms_pc_schurras.c +++ b/lib/parms/src/parms_pc_schurras.c @@ -324,6 +324,9 @@ int parms_PCCreate_Schurras(parms_PC self) +int parms_OperatorGetU(parms_Operator, void **); +int parms_MatGetOffDiag(parms_Mat, void **); +int parms_CommGetOdvlist(parms_Comm, int **); static int parms_PC_GetS(parms_PC self, parms_Operator op,parms_Mat *mat) { From 02c75da83101422eaf883b2971c1d86e115d203b Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 22 Feb 2022 12:34:33 +0100 Subject: [PATCH 845/909] declare parms functions before they are used --- lib/parms/src/parms_pc_schurras.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/parms/src/parms_pc_schurras.c b/lib/parms/src/parms_pc_schurras.c index bb37885e6..979acdfe9 100755 --- a/lib/parms/src/parms_pc_schurras.c +++ b/lib/parms/src/parms_pc_schurras.c @@ -324,6 +324,9 @@ int parms_PCCreate_Schurras(parms_PC self) +int parms_OperatorGetU(parms_Operator, void **); +int parms_MatGetOffDiag(parms_Mat, void **); +int parms_CommGetOdvlist(parms_Comm, int **); static int parms_PC_GetS(parms_PC self, parms_Operator op,parms_Mat *mat) { From 83594107683e0232472f4b3182fddfc8f51621ed Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 23 Feb 2022 08:30:40 +0100 Subject: [PATCH 846/909] - add environment to build+run FESOM on juwels booster+cluster with the Intel compiler - add environment to build+run FESOM on juwels booster+cluster with the Intel compiler with custom ECMWF netcdf library --- env/juwels/shell_2022+intel | 21 +++++++++++++++++++++ env/juwels/shell_2022+intel+customnetcdf | 22 ++++++++++++++++++++++ 2 files changed, 43 insertions(+) create mode 100644 env/juwels/shell_2022+intel create mode 100644 env/juwels/shell_2022+intel+customnetcdf diff --git a/env/juwels/shell_2022+intel b/env/juwels/shell_2022+intel new file mode 100644 index 000000000..3ffbc7970 --- /dev/null +++ b/env/juwels/shell_2022+intel @@ -0,0 +1,21 @@ +########## +module --force purge +module use $OTHERSTAGES +module load Stages/2022 +module load Intel/2021.4.0 +module load ParaStationMPI/5.5.0-1 +module load CMake/3.21.1 +module load imkl/2021.4.0 +module load netCDF-Fortran/4.5.3 +module load netCDF/4.8.1 +module load Perl/5.34.0 + +export LC_ALL=en_US.UTF-8 +export TMPDIR=/tmp +export FC=mpifort +export F77=mpifort +export MPIFC=mpifort +export CC=mpicc +export CXX=mpic++ + +export NETCDF_Fortran_INCLUDE_DIRECTORIES=$EBROOTNETCDFMINFORTRAN/include diff --git a/env/juwels/shell_2022+intel+customnetcdf b/env/juwels/shell_2022+intel+customnetcdf new file mode 100644 index 000000000..c49641b3b --- /dev/null +++ b/env/juwels/shell_2022+intel+customnetcdf @@ -0,0 +1,22 @@ +########## +module --force purge +module use $OTHERSTAGES +module load Stages/2022 +module load Intel/2021.4.0 +module load ParaStationMPI/5.5.0-1 +module load CMake/3.21.1 +module load imkl/2021.4.0 + +export LC_ALL=en_US.UTF-8 +export TMPDIR=/tmp +export FC=mpifort +export F77=mpifort +export MPIFC=mpifort +export CC=mpicc +export CXX=mpic++ + +export IO_LIB_ROOT=/p/project/pra127/rackow1/RAPS20_fesom/flexbuild/external/intel.juwels/install +export LD_LIBRARY_PATH=${IO_LIB_ROOT}/lib:$LD_LIBRARY_PATH +export NETCDF_Fortran_INCLUDE_DIRECTORIES=${IO_LIB_ROOT}/include +export NETCDF_C_INCLUDE_DIRECTORIES=${IO_LIB_ROOT}/include +export NETCDF_CXX_INCLUDE_DIRECTORIES=${IO_LIB_ROOT}/include From 363b248389e0cc99a1fb1c16cd9ae18de543a5a8 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 17 Feb 2022 10:10:27 +0100 Subject: [PATCH 847/909] set shaped pointers in a way compatible with nvfortran 22.1 --- src/associate_mesh_ass.h | 162 ++++++++++++--------------------------- src/ice_thermo_oce.F90 | 9 +-- src/io_fesom_file.F90 | 4 +- src/io_meandata.F90 | 2 +- src/oce_ale.F90 | 45 ++++------- src/oce_muscl_adv.F90 | 9 +-- 6 files changed, 72 insertions(+), 159 deletions(-) diff --git a/src/associate_mesh_ass.h b/src/associate_mesh_ass.h index 72a4d1246..882fc053e 100644 --- a/src/associate_mesh_ass.h +++ b/src/associate_mesh_ass.h @@ -6,124 +6,64 @@ ocean_area => mesh%ocean_area nl => mesh%nl nn_size => mesh%nn_size ocean_areawithcav => mesh%ocean_areawithcav -#ifdef __PGI -coord_nod2D => mesh%coord_nod2D (1:2,1:myDim_nod2D+eDim_nod2D) -geo_coord_nod2D => mesh%geo_coord_nod2D (1:2,1:myDim_nod2D+eDim_nod2D) -elem2D_nodes => mesh%elem2D_nodes (1:3, 1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) -edges => mesh%edges (1:2,1:myDim_edge2D+eDim_edge2D) -edge_tri => mesh%edge_tri (1:2,1:myDim_edge2D+eDim_edge2D) -elem_edges => mesh%elem_edges (1:3,1:myDim_elem2D) -elem_area => mesh%elem_area (1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) -edge_dxdy => mesh%edge_dxdy (1:2,1:myDim_edge2D+eDim_edge2D) -edge_cross_dxdy => mesh%edge_cross_dxdy (1:4,1:myDim_edge2D+eDim_edge2D) -elem_cos => mesh%elem_cos (1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) -metric_factor => mesh%metric_factor (1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) -elem_neighbors => mesh%elem_neighbors (1:3,1:myDim_elem2D) -nod_in_elem2D => mesh%nod_in_elem2D !(maxval(rmax),myDim_nod2D+eDim_nod2D) -x_corners => mesh%x_corners !(myDim_nod2D, maxval(rmax)) -y_corners => mesh%y_corners !(myDim_nod2D, maxval(rmax)) -nod_in_elem2D_num => mesh%nod_in_elem2D_num (1:myDim_nod2D+eDim_nod2D) -depth => mesh%depth (1:myDim_nod2D+eDim_nod2D) -gradient_vec => mesh%gradient_vec (1:6,1:myDim_elem2D) -gradient_sca => mesh%gradient_sca (1:6,1:myDim_elem2D) -bc_index_nod2D => mesh%bc_index_nod2D (1:myDim_nod2D+eDim_nod2D) -zbar => mesh%zbar (1:mesh%nl) -Z => mesh%Z (1:mesh%nl-1) -elem_depth => mesh%elem_depth! never used, not even allocated -nlevels => mesh%nlevels (1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) -nlevels_nod2D => mesh%nlevels_nod2D (1:myDim_nod2D+eDim_nod2D) -nlevels_nod2D_min => mesh%nlevels_nod2D_min (1:myDim_nod2D+eDim_nod2D) -area => mesh%area (1:mesh%nl,1:myDim_nod2d+eDim_nod2D) -areasvol => mesh%areasvol (1:mesh%nl,1:myDim_nod2d+eDim_nod2D) -area_inv => mesh%area_inv (1:mesh%nl,1:myDim_nod2d+eDim_nod2D) -areasvol_inv => mesh%areasvol_inv (1:mesh%nl,1:myDim_nod2d+eDim_nod2D) -mesh_resolution => mesh%mesh_resolution (1:myDim_nod2d+eDim_nod2D) -ssh_stiff => mesh%ssh_stiff -lump2d_north => mesh%lump2d_north (1:myDim_nod2d) -lump2d_south => mesh%lump2d_south (1:myDim_nod2d) -cavity_flag_n => mesh%cavity_flag_n (1:myDim_nod2D+eDim_nod2D) -cavity_flag_e => mesh%cavity_flag_e (1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) -cavity_depth => mesh%cavity_depth (1:myDim_nod2D+eDim_nod2D) -ulevels => mesh%ulevels (1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) -ulevels_nod2D => mesh%ulevels_nod2D (1:myDim_nod2D+eDim_nod2D) -ulevels_nod2D_max => mesh%ulevels_nod2D_max (1:myDim_nod2D+eDim_nod2D) -nn_num => mesh%nn_num (1:myDim_nod2D) -nn_pos => mesh%nn_pos (1:mesh%nn_size, 1:myDim_nod2D) -hnode => mesh%hnode (1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) -hnode_new => mesh%hnode_new (1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) -zbar_3d_n => mesh%zbar_3d_n (1:mesh%nl, 1:myDim_nod2D+eDim_nod2D) -Z_3d_n => mesh%Z_3d_n (1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) -helem => mesh%helem (1:mesh%nl-1, 1:myDim_elem2D) -bottom_elem_thickness => mesh%bottom_elem_thickness (1:myDim_elem2D) -bottom_node_thickness => mesh%bottom_node_thickness (1:myDim_nod2D+eDim_nod2D) -dhe => mesh%dhe (1:myDim_elem2D) -hbar => mesh%hbar (1:myDim_nod2D+eDim_nod2D) -hbar_old => mesh%hbar_old (1:myDim_nod2D+eDim_nod2D) -zbar_n_bot => mesh%zbar_n_bot (1:myDim_nod2D+eDim_nod2D) -zbar_e_bot => mesh%zbar_e_bot (1:myDim_elem2D+eDim_elem2D) -zbar_n_srf => mesh%zbar_n_srf (1:myDim_nod2D+eDim_nod2D) -zbar_e_srf => mesh%zbar_e_srf (1:myDim_elem2D+eDim_elem2D) -#else -coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%coord_nod2D -geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D -elem2D_nodes(1:3, 1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem2D_nodes -edges(1:2,1:myDim_edge2D+eDim_edge2D) => mesh%edges -edge_tri(1:2,1:myDim_edge2D+eDim_edge2D) => mesh%edge_tri -elem_edges(1:3,1:myDim_elem2D) => mesh%elem_edges -elem_area(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem_area -edge_dxdy(1:2,1:myDim_edge2D+eDim_edge2D) => mesh%edge_dxdy -edge_cross_dxdy(1:4,1:myDim_edge2D+eDim_edge2D) => mesh%edge_cross_dxdy -elem_cos(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem_cos -metric_factor(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%metric_factor -elem_neighbors(1:3,1:myDim_elem2D) => mesh%elem_neighbors +coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%coord_nod2D(:,:) +geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D(:,:) +elem2D_nodes(1:3, 1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem2D_nodes(:,:) +edges(1:2,1:myDim_edge2D+eDim_edge2D) => mesh%edges(:,:) +edge_tri(1:2,1:myDim_edge2D+eDim_edge2D) => mesh%edge_tri(:,:) +elem_edges(1:3,1:myDim_elem2D) => mesh%elem_edges(:,:) +elem_area(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem_area(:) +edge_dxdy(1:2,1:myDim_edge2D+eDim_edge2D) => mesh%edge_dxdy(:,:) +edge_cross_dxdy(1:4,1:myDim_edge2D+eDim_edge2D) => mesh%edge_cross_dxdy(:,:) +elem_cos(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem_cos(:) +metric_factor(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%metric_factor(:) +elem_neighbors(1:3,1:myDim_elem2D) => mesh%elem_neighbors(:,:) nod_in_elem2D => mesh%nod_in_elem2D ! (maxval(rmax),myDim_nod2D+eDim_nod2D) x_corners => mesh%x_corners ! (myDim_nod2D, maxval(rmax)) y_corners => mesh%y_corners ! (myDim_nod2D, maxval(rmax)) -nod_in_elem2D_num(1:myDim_nod2D+eDim_nod2D) => mesh%nod_in_elem2D_num -depth(1:myDim_nod2D+eDim_nod2D) => mesh%depth -gradient_vec(1:6,1:myDim_elem2D) => mesh%gradient_vec -gradient_sca(1:6,1:myDim_elem2D) => mesh%gradient_sca -bc_index_nod2D(1:myDim_nod2D+eDim_nod2D) => mesh%bc_index_nod2D -zbar(1:mesh%nl) => mesh%zbar -Z(1:mesh%nl-1) => mesh%Z +nod_in_elem2D_num(1:myDim_nod2D+eDim_nod2D) => mesh%nod_in_elem2D_num(:) +depth(1:myDim_nod2D+eDim_nod2D) => mesh%depth(:) +gradient_vec(1:6,1:myDim_elem2D) => mesh%gradient_vec(:,:) +gradient_sca(1:6,1:myDim_elem2D) => mesh%gradient_sca(:,:) +bc_index_nod2D(1:myDim_nod2D+eDim_nod2D) => mesh%bc_index_nod2D(:) +zbar(1:mesh%nl) => mesh%zbar(:) +Z(1:mesh%nl-1) => mesh%Z(:) elem_depth => mesh%elem_depth ! never used, not even allocated -nlevels(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%nlevels -nlevels_nod2D(1:myDim_nod2D+eDim_nod2D) => mesh%nlevels_nod2D -nlevels_nod2D_min(1:myDim_nod2D+eDim_nod2D) => mesh%nlevels_nod2D_min -area(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%area -areasvol(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%areasvol -area_inv(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%area_inv -areasvol_inv(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%areasvol_inv -mesh_resolution(1:myDim_nod2d+eDim_nod2D) => mesh%mesh_resolution +nlevels(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%nlevels(:) +nlevels_nod2D(1:myDim_nod2D+eDim_nod2D) => mesh%nlevels_nod2D(:) +nlevels_nod2D_min(1:myDim_nod2D+eDim_nod2D) => mesh%nlevels_nod2D_min(:) +area(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%area(:,:) +areasvol(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%areasvol(:,:) +area_inv(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%area_inv(:,:) +areasvol_inv(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%areasvol_inv(:,:) +mesh_resolution(1:myDim_nod2d+eDim_nod2D) => mesh%mesh_resolution(:) ssh_stiff => mesh%ssh_stiff -lump2d_north(1:myDim_nod2d) => mesh%lump2d_north -lump2d_south(1:myDim_nod2d) => mesh%lump2d_south -cavity_flag_n(1:myDim_nod2D+eDim_nod2D) => mesh%cavity_flag_n -cavity_flag_e(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%cavity_flag_e +lump2d_north(1:myDim_nod2d) => mesh%lump2d_north(:) +lump2d_south(1:myDim_nod2d) => mesh%lump2d_south(:) +cavity_flag_n(1:myDim_nod2D+eDim_nod2D) => mesh%cavity_flag_n(:) +cavity_flag_e(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%cavity_flag_e(:) !!$cavity_lev_nod2D(1:myDim_nod2D+eDim_nod2D) => mesh%cavity_lev_nod2D !!$cavity_lev_elem2D(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%cavity_lev_elem2D -cavity_depth(1:myDim_nod2D+eDim_nod2D) => mesh%cavity_depth -ulevels(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%ulevels -ulevels_nod2D(1:myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D -ulevels_nod2D_max(1:myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D_max -nn_num(1:myDim_nod2D) => mesh%nn_num -nn_pos(1:mesh%nn_size, 1:myDim_nod2D) => mesh%nn_pos -hnode(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%hnode -hnode_new(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%hnode_new -zbar_3d_n(1:mesh%nl, 1:myDim_nod2D+eDim_nod2D) => mesh%zbar_3d_n -Z_3d_n(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%Z_3d_n -helem(1:mesh%nl-1, 1:myDim_elem2D) => mesh%helem -bottom_elem_thickness(1:myDim_elem2D) => mesh%bottom_elem_thickness -bottom_node_thickness(1:myDim_nod2D+eDim_nod2D) => mesh%bottom_node_thickness -dhe(1:myDim_elem2D) => mesh%dhe -hbar(1:myDim_nod2D+eDim_nod2D) => mesh%hbar -hbar_old(1:myDim_nod2D+eDim_nod2D) => mesh%hbar_old +cavity_depth(1:myDim_nod2D+eDim_nod2D) => mesh%cavity_depth(:) +ulevels(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%ulevels(:) +ulevels_nod2D(1:myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D(:) +ulevels_nod2D_max(1:myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D_max(:) +nn_num(1:myDim_nod2D) => mesh%nn_num(:) +nn_pos(1:mesh%nn_size, 1:myDim_nod2D) => mesh%nn_pos(:,:) +hnode(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%hnode(:,:) +hnode_new(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%hnode_new(:,:) +zbar_3d_n(1:mesh%nl, 1:myDim_nod2D+eDim_nod2D) => mesh%zbar_3d_n(:,:) +Z_3d_n(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%Z_3d_n(:,:) +helem(1:mesh%nl-1, 1:myDim_elem2D) => mesh%helem(:,:) +bottom_elem_thickness(1:myDim_elem2D) => mesh%bottom_elem_thickness(:) +bottom_node_thickness(1:myDim_nod2D+eDim_nod2D) => mesh%bottom_node_thickness(:) +dhe(1:myDim_elem2D) => mesh%dhe(:) +hbar(1:myDim_nod2D+eDim_nod2D) => mesh%hbar(:) +hbar_old(1:myDim_nod2D+eDim_nod2D) => mesh%hbar_old(:) !zbar_n(1:mesh%nl) => mesh%zbar_n !Z_n(1:mesh%nl-1) => mesh%Z_n -zbar_n_bot(1:myDim_nod2D+eDim_nod2D) => mesh%zbar_n_bot -zbar_e_bot(1:myDim_elem2D+eDim_elem2D) => mesh%zbar_e_bot -zbar_n_srf(1:myDim_nod2D+eDim_nod2D) => mesh%zbar_n_srf -zbar_e_srf(1:myDim_elem2D+eDim_elem2D) => mesh%zbar_e_srf -#endif - +zbar_n_bot(1:myDim_nod2D+eDim_nod2D) => mesh%zbar_n_bot(:) +zbar_e_bot(1:myDim_elem2D+eDim_elem2D) => mesh%zbar_e_bot(:) +zbar_n_srf(1:myDim_nod2D+eDim_nod2D) => mesh%zbar_n_srf(:) +zbar_e_srf(1:myDim_elem2D+eDim_elem2D) => mesh%zbar_e_srf(:) diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index cdd8ef455..6cfdd0641 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -185,13 +185,8 @@ subroutine thermodynamics(ice, partit, mesh) real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux myDim_nod2d => partit%myDim_nod2D eDim_nod2D => partit%eDim_nod2D -#ifdef __PGI - ulevels_nod2D => mesh%ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) - geo_coord_nod2D => mesh%geo_coord_nod2D(1:2, 1:myDim_nod2D+eDim_nod2D) -#else - ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D - geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D -#endif + ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D(:) + geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D(:,:) u_ice => ice%uice(:) v_ice => ice%vice(:) a_ice => ice%data(1)%values(:) diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index 88e36fa80..21b599ee1 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -371,7 +371,7 @@ subroutine specify_node_var_2d(this, name, longname, units, local_data) level_diminfo = obtain_diminfo(this, m_nod2d) - external_local_data_ptr(1:1,1:size(local_data)) => local_data + external_local_data_ptr(1:1,1:size(local_data)) => local_data(:) call specify_variable(this, name, [level_diminfo%idx, this%time_dimidx], level_diminfo%len, external_local_data_ptr, .false., longname, units) end subroutine @@ -404,7 +404,7 @@ subroutine specify_elem_var_2d(this, name, longname, units, local_data) level_diminfo = obtain_diminfo(this, m_elem2d) - external_local_data_ptr(1:1,1:size(local_data)) => local_data + external_local_data_ptr(1:1,1:size(local_data)) => local_data(:) call specify_variable(this, name, [level_diminfo%idx, this%time_dimidx], level_diminfo%len, external_local_data_ptr, .true., longname, units) end subroutine diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 17a49071e..fad2cdd2b 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -1105,7 +1105,7 @@ subroutine def_stream2D(glsize, lcsize, name, description, units, data, freq, fr call associate_new_stream(name, entry) ! 2d specific - entry%ptr3(1:1,1:size(data)) => data + entry%ptr3(1:1,1:size(data)) => data(:) if (accuracy == i_real8) then allocate(entry%local_values_r8(1, lcsize)) diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index fe5f69554..072c0162d 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -229,37 +229,20 @@ subroutine init_ale(dynamics, partit, mesh) allocate(mesh%zbar_n_srf(myDim_nod2D+eDim_nod2D)) ! reassociate after the allocation (no pointer exists before) -#ifdef __PGI - hnode => mesh%hnode (1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) - hnode_new => mesh%hnode_new (1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) - zbar_3d_n => mesh%zbar_3d_n (1:mesh%nl, 1:myDim_nod2D+eDim_nod2D) - Z_3d_n => mesh%Z_3d_n (1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) - helem => mesh%helem (1:mesh%nl-1, 1:myDim_elem2D) - bottom_elem_thickness => mesh%bottom_elem_thickness (1:myDim_elem2D) - bottom_node_thickness => mesh%bottom_node_thickness (1:myDim_nod2D+eDim_nod2D) - dhe => mesh%dhe (1:myDim_elem2D) - hbar => mesh%hbar (1:myDim_nod2D+eDim_nod2D) - hbar_old => mesh%hbar_old (1:myDim_nod2D+eDim_nod2D) - zbar_n_bot => mesh%zbar_n_bot (1:myDim_nod2D+eDim_nod2D) - zbar_e_bot => mesh%zbar_e_bot (1:myDim_elem2D+eDim_elem2D) - zbar_n_srf => mesh%zbar_n_srf (1:myDim_nod2D+eDim_nod2D) - zbar_e_srf => mesh%zbar_e_srf (1:myDim_elem2D+eDim_elem2D) -#else - hnode(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%hnode - hnode_new(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%hnode_new - zbar_3d_n(1:mesh%nl, 1:myDim_nod2D+eDim_nod2D) => mesh%zbar_3d_n - Z_3d_n(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%Z_3d_n - helem(1:mesh%nl-1, 1:myDim_elem2D) => mesh%helem - bottom_elem_thickness(1:myDim_elem2D) => mesh%bottom_elem_thickness - bottom_node_thickness(1:myDim_nod2D+eDim_nod2D) => mesh%bottom_node_thickness - dhe(1:myDim_elem2D) => mesh%dhe - hbar(1:myDim_nod2D+eDim_nod2D) => mesh%hbar - hbar_old(1:myDim_nod2D+eDim_nod2D) => mesh%hbar_old - zbar_n_bot(1:myDim_nod2D+eDim_nod2D) => mesh%zbar_n_bot - zbar_e_bot(1:myDim_elem2D+eDim_elem2D) => mesh%zbar_e_bot - zbar_n_srf(1:myDim_nod2D+eDim_nod2D) => mesh%zbar_n_srf - zbar_e_srf(1:myDim_elem2D+eDim_elem2D) => mesh%zbar_e_srf -#endif + hnode(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%hnode(:,:) + hnode_new(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%hnode_new(:,:) + zbar_3d_n(1:mesh%nl, 1:myDim_nod2D+eDim_nod2D) => mesh%zbar_3d_n(:,:) + Z_3d_n(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%Z_3d_n(:,:) + helem(1:mesh%nl-1, 1:myDim_elem2D) => mesh%helem(:,:) + bottom_elem_thickness(1:myDim_elem2D) => mesh%bottom_elem_thickness(:) + bottom_node_thickness(1:myDim_nod2D+eDim_nod2D) => mesh%bottom_node_thickness(:) + dhe(1:myDim_elem2D) => mesh%dhe(:) + hbar(1:myDim_nod2D+eDim_nod2D) => mesh%hbar(:) + hbar_old(1:myDim_nod2D+eDim_nod2D) => mesh%hbar_old(:) + zbar_n_bot(1:myDim_nod2D+eDim_nod2D) => mesh%zbar_n_bot(:) + zbar_e_bot(1:myDim_elem2D+eDim_elem2D) => mesh%zbar_e_bot(:) + zbar_n_srf(1:myDim_nod2D+eDim_nod2D) => mesh%zbar_n_srf(:) + zbar_e_srf(1:myDim_elem2D+eDim_elem2D) => mesh%zbar_e_srf(:) !___initialize______________________________________________________________ hbar = 0.0_WP hbar_old = 0.0_WP diff --git a/src/oce_muscl_adv.F90 b/src/oce_muscl_adv.F90 index a12cbafc2..03fdb74e0 100755 --- a/src/oce_muscl_adv.F90 +++ b/src/oce_muscl_adv.F90 @@ -76,13 +76,8 @@ subroutine muscl_adv_init(twork, partit, mesh) nn_size=k !___________________________________________________________________________ allocate(mesh%nn_num(myDim_nod2D), mesh%nn_pos(nn_size,myDim_nod2D)) -#ifdef __PGI - nn_num => mesh%nn_num(1:myDim_nod2D) - nn_pos => mesh%nn_pos(1:nn_size, 1:myDim_nod2D) -#else - nn_num(1:myDim_nod2D) => mesh%nn_num - nn_pos(1:nn_size, 1:myDim_nod2D) => mesh%nn_pos -#endif + nn_num(1:myDim_nod2D) => mesh%nn_num(:) + nn_pos(1:nn_size, 1:myDim_nod2D) => mesh%nn_pos(:,:) ! These are the same arrays that we also use in quadratic reconstruction !MOVE IT TO SOMEWHERE ELSE !$OMP PARALLEL DO From 26d608a0d4cc3976b634c382e8d0ff90895ccffb Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 1 Mar 2022 14:38:56 +0100 Subject: [PATCH 848/909] remove unused variable in shell environment --- env/juwels/shell | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/env/juwels/shell b/env/juwels/shell index f5e8a951f..98e8a6db7 100644 --- a/env/juwels/shell +++ b/env/juwels/shell @@ -15,7 +15,7 @@ export TMPDIR=/tmp export FC=mpifort export F77=mpifort export MPIFC=mpifort -export FCFLAGS=-free +#export FCFLAGS=-free export CC=mpicc export CXX=mpic++ From 20aad6b186265066dc21ce56b3a49d8a60117952 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 1 Mar 2022 14:41:12 +0100 Subject: [PATCH 849/909] explicitly allocate character variable to try to work around nvfortran allocation errors --- src/io_restart_file_group.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/io_restart_file_group.F90 b/src/io_restart_file_group.F90 index 0acbd78bb..772c8e1d0 100644 --- a/src/io_restart_file_group.F90 +++ b/src/io_restart_file_group.F90 @@ -108,7 +108,7 @@ subroutine add_file(g, name, must_exist_on_read, mesh_nod2d, mesh_elem2d, mesh_n f => g%files(g%nfiles) f%path = "" - f%varname = name + allocate(f%varname,source=name) f%must_exist_on_read = must_exist_on_read call f%fesom_file_type%init(mesh_nod2d, mesh_elem2d, mesh_nl, partit) ! this is specific for a restart file From f33812c5d47f45903cf0e371a753891be66896e2 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 1 Mar 2022 16:04:24 +0100 Subject: [PATCH 850/909] finalize MPI before FESOM prints its stats block, otherwise there is sometimes output from other processes from an earlier time in the programm AFTER the starts block (with parastationMPI) --- src/fesom_module.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/fesom_module.F90 b/src/fesom_module.F90 index b320908f6..b637a31c4 100755 --- a/src/fesom_module.F90 +++ b/src/fesom_module.F90 @@ -429,6 +429,7 @@ subroutine fesom_finalize() call MPI_AllREDUCE(MPI_IN_PLACE, max_rtime, 14, MPI_REAL, MPI_MAX, f%MPI_COMM_FESOM, f%MPIerr) call MPI_AllREDUCE(MPI_IN_PLACE, min_rtime, 14, MPI_REAL, MPI_MIN, f%MPI_COMM_FESOM, f%MPIerr) + if(f%fesom_did_mpi_init) call par_ex(f%partit%MPI_COMM_FESOM, f%partit%mype) ! finalize MPI before FESOM prints its stats block, otherwise there is sometimes output from other processes from an earlier time in the programm AFTER the starts block (with parastationMPI) if (f%mype==0) then 41 format (a35,a10,2a15) !Format for table heading 42 format (a30,3f15.4) !Format for table content @@ -465,7 +466,6 @@ subroutine fesom_finalize() write(*,*) end if ! call clock_finish - if(f%fesom_did_mpi_init) call par_ex(f%partit%MPI_COMM_FESOM, f%partit%mype) end subroutine end module From f0b923deb2281fe7b1921cd18af07c0e98c2382f Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 1 Mar 2022 16:22:18 +0100 Subject: [PATCH 851/909] add missing variable declaration for ALEPH_CRAYMPICH_WORKAROUNDS=ON --- src/io_meandata.F90 | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index fad2cdd2b..907aede95 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -725,6 +725,7 @@ subroutine write_mean(entry, entry_index) integer tag integer :: i, size1, size2, size_gen, size_lev, order integer :: c, lev + integer mpierr ! Serial output implemented so far @@ -744,7 +745,7 @@ subroutine write_mean(entry, entry_index) do lev=1, size1 #ifdef ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS ! aleph cray-mpich workaround - call MPI_Barrier(entry%comm, MPIERR) + call MPI_Barrier(entry%comm, mpierr) #endif if(.not. entry%is_elem_based) then call gather_nod2D (entry%local_values_r8_copy(lev,1:size(entry%local_values_r8_copy,dim=2)), entry%aux_r8, entry%root_rank, tag, entry%comm, entry%p_partit) @@ -768,7 +769,7 @@ subroutine write_mean(entry, entry_index) do lev=1, size1 #ifdef ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS ! aleph cray-mpich workaround - call MPI_Barrier(entry%comm, MPIERR) + call MPI_Barrier(entry%comm, mpierr) #endif if(.not. entry%is_elem_based) then call gather_real4_nod2D (entry%local_values_r4_copy(lev,1:size(entry%local_values_r4_copy,dim=2)), entry%aux_r4, entry%root_rank, tag, entry%comm, entry%p_partit) From 9214ac3961796bbcff91e3d308de670c9325e915 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 1 Mar 2022 17:08:34 +0100 Subject: [PATCH 852/909] explicitly allocate attribute type variable to try to work around nvfortran allocation errors --- src/io_netcdf_file_module.F90 | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index ceb15e111..d393f3480 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -205,13 +205,15 @@ subroutine add_var_att_text(this, varindex, att_name, att_text) character(len=*), intent(in) :: att_text ! EO parameters type(att_type_wrapper), allocatable :: tmparr(:) + type(att_type_text) att allocate( tmparr(size(this%vars(varindex)%atts)+1) ) tmparr(1:size(this%vars(varindex)%atts)) = this%vars(varindex)%atts deallocate(this%vars(varindex)%atts) call move_alloc(tmparr, this%vars(varindex)%atts) - this%vars(varindex)%atts( size(this%vars(varindex)%atts) )%it = att_type_text(name=att_name, text=att_text) + att = att_type_text(name=att_name, text=att_text) + allocate( this%vars(varindex)%atts( size(this%vars(varindex)%atts) )%it, source=att ) end subroutine @@ -222,13 +224,15 @@ subroutine add_var_att_int(this, varindex, att_name, att_val) integer, intent(in) :: att_val ! EO parameters type(att_type_wrapper), allocatable :: tmparr(:) + type(att_type_int) att allocate( tmparr(size(this%vars(varindex)%atts)+1) ) tmparr(1:size(this%vars(varindex)%atts)) = this%vars(varindex)%atts deallocate(this%vars(varindex)%atts) call move_alloc(tmparr, this%vars(varindex)%atts) - this%vars(varindex)%atts( size(this%vars(varindex)%atts) )%it = att_type_int(name=att_name, val=att_val) + att = att_type_int(name=att_name, val=att_val) + allocate( this%vars(varindex)%atts( size(this%vars(varindex)%atts) )%it, source=att ) end subroutine From 8fdfd9957e8288e56cf526c0f5fee1c8f892d2fe Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 1 Mar 2022 17:14:57 +0100 Subject: [PATCH 853/909] use nvfortran style shaped array copy --- src/io_netcdf_file_module.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index d393f3480..6ee8da822 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -151,7 +151,7 @@ function add_var_x(this, name, dim_indices, netcdf_datatype) result(varindex) ! assume the vars array is allocated allocate( tmparr(size(this%vars)+1) ) - tmparr(1:size(this%vars)) = this%vars + tmparr(1:size(this%vars)) = this%vars(:) deallocate(this%vars) call move_alloc(tmparr, this%vars) From fd6480688b7040b890e9024d024b319771a962c0 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 1 Mar 2022 17:18:23 +0100 Subject: [PATCH 854/909] use a fixed size array to store our netcdf variable attributes as nvfortran seems to loose allocation of derived types which contain allocatable types when copying the array --- src/io_netcdf_file_module.F90 | 27 +++++++++++---------------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 index 6ee8da822..9eb6a245a 100644 --- a/src/io_netcdf_file_module.F90 +++ b/src/io_netcdf_file_module.F90 @@ -18,7 +18,8 @@ module io_netcdf_file_module character(:), allocatable :: name integer, allocatable :: dim_indices(:) integer datatype - type(att_type_wrapper), allocatable :: atts(:) + type(att_type_wrapper) :: atts(15) ! use a fixed size array to store our netcdf variable attributes as nvfortran seems to loose allocation of derived types which contain allocatable types when copying the array + integer :: atts_count = 0 integer ncid end type @@ -145,9 +146,6 @@ function add_var_x(this, name, dim_indices, netcdf_datatype) result(varindex) ! EO parameters include "netcdf.inc" type(var_type), allocatable :: tmparr(:) - type(att_type_wrapper), allocatable :: empty_atts(:) - - allocate(empty_atts(0)) ! if we use a fixed size array with size 0 there is a segfault at runtime when compiled with cray ftn ! assume the vars array is allocated allocate( tmparr(size(this%vars)+1) ) @@ -161,7 +159,6 @@ function add_var_x(this, name, dim_indices, netcdf_datatype) result(varindex) this%vars(varindex)%name = name this%vars(varindex)%dim_indices= dim_indices this%vars(varindex)%datatype = netcdf_datatype - this%vars(varindex)%atts = empty_atts this%vars(varindex)%ncid = -1 end function @@ -207,13 +204,12 @@ subroutine add_var_att_text(this, varindex, att_name, att_text) type(att_type_wrapper), allocatable :: tmparr(:) type(att_type_text) att - allocate( tmparr(size(this%vars(varindex)%atts)+1) ) - tmparr(1:size(this%vars(varindex)%atts)) = this%vars(varindex)%atts - deallocate(this%vars(varindex)%atts) - call move_alloc(tmparr, this%vars(varindex)%atts) + ! add this att_type instance to atts array + this%vars(varindex)%atts_count = this%vars(varindex)%atts_count +1 + call assert(size(this%vars(varindex)%atts) >= this%vars(varindex)%atts_count, __LINE__) att = att_type_text(name=att_name, text=att_text) - allocate( this%vars(varindex)%atts( size(this%vars(varindex)%atts) )%it, source=att ) + allocate( this%vars(varindex)%atts( this%vars(varindex)%atts_count )%it, source=att ) end subroutine @@ -226,13 +222,12 @@ subroutine add_var_att_int(this, varindex, att_name, att_val) type(att_type_wrapper), allocatable :: tmparr(:) type(att_type_int) att - allocate( tmparr(size(this%vars(varindex)%atts)+1) ) - tmparr(1:size(this%vars(varindex)%atts)) = this%vars(varindex)%atts - deallocate(this%vars(varindex)%atts) - call move_alloc(tmparr, this%vars(varindex)%atts) + ! add this att_type instance to atts array + this%vars(varindex)%atts_count = this%vars(varindex)%atts_count +1 + call assert(size(this%vars(varindex)%atts) >= this%vars(varindex)%atts_count, __LINE__) att = att_type_int(name=att_name, val=att_val) - allocate( this%vars(varindex)%atts( size(this%vars(varindex)%atts) )%it, source=att ) + allocate( this%vars(varindex)%atts( this%vars(varindex)%atts_count )%it, source=att ) end subroutine @@ -439,7 +434,7 @@ subroutine open_write_create(this, filepath) end do call assert_nc( nf_def_var(this%ncid, this%vars(i)%name, this%vars(i)%datatype, var_ndims, var_dimids, this%vars(i)%ncid) , __LINE__) - do ii=1, size(this%vars(i)%atts) + do ii=1, this%vars(i)%atts_count call this%vars(i)%atts(ii)%it%define_in_var(this%ncid, this%vars(i)%ncid) end do end do From 1d592c3d7479b9d3d47c82b9a19d20f78596bc3a Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Tue, 1 Mar 2022 17:21:32 +0100 Subject: [PATCH 855/909] avoid lots of empty lines on the console output --- src/gen_ic3d.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/gen_ic3d.F90 b/src/gen_ic3d.F90 index 6bd4da6e4..f3fa32d34 100644 --- a/src/gen_ic3d.F90 +++ b/src/gen_ic3d.F90 @@ -351,9 +351,9 @@ SUBROUTINE getcoeffld(tracers, partit, mesh) iost = nf_inq_varid(ncid, varname, id_data) iost = nf_inq_var_fill(ncid, id_data, NO_FILL, FILL_VALUE) ! FillValue defined? if (NO_FILL==1) then - print *, 'No _FillValue is set in ', filename, ', trying dummy =', dummy, FILL_VALUE + print *, 'No _FillValue is set in ', trim(filename), ', trying dummy =', dummy, FILL_VALUE else - print *, 'The FillValue in ', filename, ' is set to ', FILL_VALUE ! should set dummy accordingly + print *, 'The FillValue in ', trim(filename), ' is set to ', FILL_VALUE ! should set dummy accordingly end if end if call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) From 5045f470271dc70df406545d1549b96a0d0dd0f1 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Thu, 10 Mar 2022 11:10:07 +0100 Subject: [PATCH 856/909] merged NVFORTRAN (PGI) branch link_ifs_47r3_nvhpc into master. 1. env/juwels contains several shell files 2. in different places nvfortran does not like what other compilers do. hence, a few __PGI directives to preprocessor were necessary. 3. derived type IO is switched off for PGI ("symbol $sd is an inconsistent array descriptor" ERROR) --- env/juwels/shell | 18 ++--- env/juwels/shell_2020+intel+cluster | 44 ++++++++++++ env/juwels/shell_2022+nvfortran | 44 ++++++++++++ src/MOD_DYN.F90 | 36 ++++++---- src/MOD_ICE.F90 | 20 ++++-- src/MOD_MESH.F90 | 2 + src/MOD_PARTIT.F90 | 6 +- src/MOD_TRACER.F90 | 26 ++++--- src/io_restart.F90 | 24 +++---- src/io_restart_derivedtype.F90 | 102 +++++++++++++++------------- 10 files changed, 224 insertions(+), 98 deletions(-) create mode 100644 env/juwels/shell_2020+intel+cluster create mode 100644 env/juwels/shell_2022+nvfortran diff --git a/env/juwels/shell b/env/juwels/shell index 98e8a6db7..7217017ff 100644 --- a/env/juwels/shell +++ b/env/juwels/shell @@ -1,21 +1,22 @@ ########## module --force purge module use /gpfs/software/juwels/otherstages -module load Stages/2022 -module load NVHPC/22.1 # older versions of pgf90/nvfortran can not compile mixed assumed shape and assumed rank parameters -module load ParaStationMPI/5.5.0-1 -module load CMake/3.21.1 -#module load imkl/2021.2.0 +module load Stages/2020 +module load Intel/2020.2.254-GCC-9.3.0 +module load ParaStationMPI/5.4.7-1 +module load CMake/3.18.0 +module load imkl/2020.2.254 module load netCDF-Fortran/4.5.3 -module load netCDF/4.8.1 -module load Perl/5.34.0 +module load netCDF/4.7.4 +module load Perl/5.32.0 +module load netCDF export LC_ALL=en_US.UTF-8 export TMPDIR=/tmp export FC=mpifort export F77=mpifort export MPIFC=mpifort -#export FCFLAGS=-free +export FCFLAGS=-free export CC=mpicc export CXX=mpic++ @@ -41,4 +42,3 @@ export NETCDF_Fortran_LIBRARIES=$EBROOTNETCDFMINFORTRAN/lib #export NETCDF_Fortran_INCLUDE_DIRECTORIES=${NETCDFF_DIR}/include/ #export NETCDF_C_INCLUDE_DIRECTORIES=${NETCDF_DIR}/include/ #export NETCDF_CXX_INCLUDE_DIRECTORIES=${NETCDFCXX_DIR}/include/ - diff --git a/env/juwels/shell_2020+intel+cluster b/env/juwels/shell_2020+intel+cluster new file mode 100644 index 000000000..7217017ff --- /dev/null +++ b/env/juwels/shell_2020+intel+cluster @@ -0,0 +1,44 @@ +########## +module --force purge +module use /gpfs/software/juwels/otherstages +module load Stages/2020 +module load Intel/2020.2.254-GCC-9.3.0 +module load ParaStationMPI/5.4.7-1 +module load CMake/3.18.0 +module load imkl/2020.2.254 +module load netCDF-Fortran/4.5.3 +module load netCDF/4.7.4 +module load Perl/5.32.0 +module load netCDF + +export LC_ALL=en_US.UTF-8 +export TMPDIR=/tmp +export FC=mpifort +export F77=mpifort +export MPIFC=mpifort +export FCFLAGS=-free +export CC=mpicc +export CXX=mpic++ + +export NETCDF_Fortran_INCLUDE_DIRECTORIES=$EBROOTNETCDFMINFORTRAN/include +export NETCDF_Fortran_LIBRARIES=$EBROOTNETCDFMINFORTRAN/lib + +#module use /gpfs/software/juwels/otherstages +#module load Stages/2019a +#module load StdEnv +## For intel MPI +##module load Intel/2019.3.199-GCC-8.3.0 IntelMPI/2018.5.288 imkl/2019.3.199 +##export FC=mpiifort CC=mpiicc CXX=mpiicpc + +## For ParaStation MPI +#module load Intel/2019.3.199-GCC-8.3.0 ParaStationMPI/5.4 imkl/2019.5.281 +#export FC=mpifort CC=mpicc CXX=mpicxx + +#module load netCDF/4.6.3 +#module load netCDF-Fortran/4.4.5 +#module load CMake +#export NETCDF_DIR=$EBROOTNETCDF +#export NETCDFF_DIR=$EBROOTNETCDFMINFORTRAN +#export NETCDF_Fortran_INCLUDE_DIRECTORIES=${NETCDFF_DIR}/include/ +#export NETCDF_C_INCLUDE_DIRECTORIES=${NETCDF_DIR}/include/ +#export NETCDF_CXX_INCLUDE_DIRECTORIES=${NETCDFCXX_DIR}/include/ diff --git a/env/juwels/shell_2022+nvfortran b/env/juwels/shell_2022+nvfortran new file mode 100644 index 000000000..98e8a6db7 --- /dev/null +++ b/env/juwels/shell_2022+nvfortran @@ -0,0 +1,44 @@ +########## +module --force purge +module use /gpfs/software/juwels/otherstages +module load Stages/2022 +module load NVHPC/22.1 # older versions of pgf90/nvfortran can not compile mixed assumed shape and assumed rank parameters +module load ParaStationMPI/5.5.0-1 +module load CMake/3.21.1 +#module load imkl/2021.2.0 +module load netCDF-Fortran/4.5.3 +module load netCDF/4.8.1 +module load Perl/5.34.0 + +export LC_ALL=en_US.UTF-8 +export TMPDIR=/tmp +export FC=mpifort +export F77=mpifort +export MPIFC=mpifort +#export FCFLAGS=-free +export CC=mpicc +export CXX=mpic++ + +export NETCDF_Fortran_INCLUDE_DIRECTORIES=$EBROOTNETCDFMINFORTRAN/include +export NETCDF_Fortran_LIBRARIES=$EBROOTNETCDFMINFORTRAN/lib + +#module use /gpfs/software/juwels/otherstages +#module load Stages/2019a +#module load StdEnv +## For intel MPI +##module load Intel/2019.3.199-GCC-8.3.0 IntelMPI/2018.5.288 imkl/2019.3.199 +##export FC=mpiifort CC=mpiicc CXX=mpiicpc + +## For ParaStation MPI +#module load Intel/2019.3.199-GCC-8.3.0 ParaStationMPI/5.4 imkl/2019.5.281 +#export FC=mpifort CC=mpicc CXX=mpicxx + +#module load netCDF/4.6.3 +#module load netCDF-Fortran/4.4.5 +#module load CMake +#export NETCDF_DIR=$EBROOTNETCDF +#export NETCDFF_DIR=$EBROOTNETCDFMINFORTRAN +#export NETCDF_Fortran_INCLUDE_DIRECTORIES=${NETCDFF_DIR}/include/ +#export NETCDF_C_INCLUDE_DIRECTORIES=${NETCDF_DIR}/include/ +#export NETCDF_CXX_INCLUDE_DIRECTORIES=${NETCDFCXX_DIR}/include/ + diff --git a/src/MOD_DYN.F90 b/src/MOD_DYN.F90 index bc85259f7..23ee1b243 100644 --- a/src/MOD_DYN.F90 +++ b/src/MOD_DYN.F90 @@ -27,11 +27,13 @@ MODULE MOD_DYN !!! real(kind=WP), allocatable :: rr(:), zz(:), pp(:), App(:) contains - private - procedure WRITE_T_SOLVERINFO - procedure READ_T_SOLVERINFO - generic :: write(unformatted) => WRITE_T_SOLVERINFO - generic :: read(unformatted) => READ_T_SOLVERINFO +#if defined(__PGI) + private +#endif + procedure WRITE_T_SOLVERINFO + procedure READ_T_SOLVERINFO + generic :: write(unformatted) => WRITE_T_SOLVERINFO + generic :: read(unformatted) => READ_T_SOLVERINFO END TYPE T_SOLVERINFO ! ! @@ -43,11 +45,13 @@ MODULE MOD_DYN ! easy backscatter contribution real(kind=WP), allocatable, dimension(:,:) :: u_b, v_b contains - private - procedure WRITE_T_DYN_WORK - procedure READ_T_DYN_WORK - generic :: write(unformatted) => WRITE_T_DYN_WORK - generic :: read(unformatted) => READ_T_DYN_WORK +#if defined(__PGI) + private +#endif + procedure WRITE_T_DYN_WORK + procedure READ_T_DYN_WORK + generic :: write(unformatted) => WRITE_T_DYN_WORK + generic :: read(unformatted) => READ_T_DYN_WORK END TYPE T_DYN_WORK ! ! @@ -110,11 +114,13 @@ MODULE MOD_DYN !___________________________________________________________________________ contains - private - procedure WRITE_T_DYN - procedure READ_T_DYN - generic :: write(unformatted) => WRITE_T_DYN - generic :: read(unformatted) => READ_T_DYN +#if defined(__PGI) + private +#endif + procedure WRITE_T_DYN + procedure READ_T_DYN + generic :: write(unformatted) => WRITE_T_DYN + generic :: read(unformatted) => READ_T_DYN END TYPE T_DYN contains diff --git a/src/MOD_ICE.F90 b/src/MOD_ICE.F90 index 22e81cd07..8b5d0ae84 100644 --- a/src/MOD_ICE.F90 +++ b/src/MOD_ICE.F90 @@ -18,7 +18,9 @@ MODULE MOD_ICE integer :: ID !___________________________________________________________________________ contains - private +#if defined(__PGI) + private +#endif procedure WRITE_T_ICE_DATA procedure READ_T_ICE_DATA generic :: write(unformatted) => WRITE_T_ICE_DATA @@ -39,7 +41,9 @@ MODULE MOD_ICE real(kind=WP), allocatable, dimension(:) :: ice_strength, inv_areamass, inv_mass !___________________________________________________________________________ contains - private +#if defined(__PGI) + private +#endif procedure WRITE_T_ICE_WORK procedure READ_T_ICE_WORK generic :: write(unformatted) => WRITE_T_ICE_WORK @@ -84,7 +88,9 @@ MODULE MOD_ICE real(kind=WP) :: albim = 0.68 ! melting ice real(kind=WP) :: albw = 0.066 ! open water, LY2004 contains - private +#if defined(__PGI) + private +#endif procedure WRITE_T_ICE_THERMO procedure READ_T_ICE_THERMO generic :: write(unformatted) => WRITE_T_ICE_THERMO @@ -107,7 +113,9 @@ MODULE MOD_ICE #endif /* (__oifs) */ !___________________________________________________________________________ contains - private +#if defined(__PGI) + private +#endif procedure WRITE_T_ICE_ATMCOUPL procedure READ_T_ICE_ATMCOUPL generic :: write(unformatted) => WRITE_T_ICE_ATMCOUPL @@ -194,7 +202,9 @@ MODULE MOD_ICE logical :: ice_update = .true. !___________________________________________________________________________ contains - private +#if defined(__PGI) + private +#endif procedure WRITE_T_ICE procedure READ_T_ICE generic :: write(unformatted) => WRITE_T_ICE diff --git a/src/MOD_MESH.F90 b/src/MOD_MESH.F90 index a318e784b..dd3c40c72 100644 --- a/src/MOD_MESH.F90 +++ b/src/MOD_MESH.F90 @@ -136,7 +136,9 @@ MODULE MOD_MESH character(:), allocatable :: representative_checksum contains +#if defined(__PGI) private +#endif procedure write_t_mesh procedure read_t_mesh generic :: write(unformatted) => write_t_mesh diff --git a/src/MOD_PARTIT.F90 b/src/MOD_PARTIT.F90 index 5c16ffcd5..c51ae2221 100644 --- a/src/MOD_PARTIT.F90 +++ b/src/MOD_PARTIT.F90 @@ -28,7 +28,9 @@ module MOD_PARTIT integer :: nreq ! number of requests for MPI_Wait ! (to combine halo exchange of several fields) contains - private +#if defined(__PGI) + private +#endif procedure WRITE_T_COM_STRUCT procedure READ_T_COM_STRUCT generic :: write(unformatted) => WRITE_T_COM_STRUCT @@ -83,7 +85,9 @@ module MOD_PARTIT integer(omp_lock_kind), allocatable :: plock(:) #endif contains +#if defined(__PGI) private +#endif procedure WRITE_T_PARTIT procedure READ_T_PARTIT generic :: write(unformatted) => WRITE_T_PARTIT diff --git a/src/MOD_TRACER.F90 b/src/MOD_TRACER.F90 index 2ce390267..6f1912d54 100644 --- a/src/MOD_TRACER.F90 +++ b/src/MOD_TRACER.F90 @@ -18,11 +18,13 @@ MODULE MOD_TRACER integer :: ID contains - private - procedure WRITE_T_TRACER_DATA - procedure READ_T_TRACER_DATA - generic :: write(unformatted) => WRITE_T_TRACER_DATA - generic :: read(unformatted) => READ_T_TRACER_DATA +#if defined(__PGI) +private +#endif +procedure WRITE_T_TRACER_DATA +procedure READ_T_TRACER_DATA +generic :: write(unformatted) => WRITE_T_TRACER_DATA +generic :: read(unformatted) => READ_T_TRACER_DATA END TYPE T_TRACER_DATA @@ -46,11 +48,13 @@ MODULE MOD_TRACER real(kind=WP),allocatable,dimension(:,:,:) :: edge_up_dn_grad contains - private - procedure WRITE_T_TRACER_WORK - procedure READ_T_TRACER_WORK - generic :: write(unformatted) => WRITE_T_TRACER_WORK - generic :: read(unformatted) => READ_T_TRACER_WORK +#if defined(__PGI) +private +#endif +procedure WRITE_T_TRACER_WORK +procedure READ_T_TRACER_WORK +generic :: write(unformatted) => WRITE_T_TRACER_WORK +generic :: read(unformatted) => READ_T_TRACER_WORK END TYPE T_TRACER_WORK ! auxury type for reading namelist.tra @@ -79,7 +83,9 @@ MODULE MOD_TRACER !logical :: i_vert_diff = .true. contains +#if defined(__PGI) private +#endif procedure WRITE_T_TRACER procedure READ_T_TRACER generic :: write(unformatted) => WRITE_T_TRACER diff --git a/src/io_restart.F90 b/src/io_restart.F90 index d29c934ea..841cfc474 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -250,17 +250,17 @@ subroutine restart(istep, l_read, which_readr, ice, dynamics, tracers, partit, m which_readr = 2 if (use_ice) then call read_all_bin_restarts(bin_restart_dirpath, & + partit = partit, & + mesh = mesh, & ice = ice, & dynamics = dynamics, & - tracers = tracers, & - partit = partit, & - mesh = mesh) + tracers = tracers ) else call read_all_bin_restarts(bin_restart_dirpath, & - dynamics = dynamics, & - tracers = tracers, & partit = partit, & - mesh = mesh) + mesh = mesh, & + dynamics = dynamics, & + tracers = tracers ) end if !___________________________________________________________________________ ! read netcdf file restart @@ -284,11 +284,11 @@ subroutine restart(istep, l_read, which_readr, ice, dynamics, tracers, partit, m call write_all_bin_restarts((/globalstep+istep, int(ctime), yearnew/), & bin_restart_dirpath, & bin_restart_infopath, & + partit, & + mesh, & ice, & dynamics, & - tracers, & - partit, & - mesh) + tracers ) end if end if end if @@ -338,11 +338,11 @@ subroutine restart(istep, l_read, which_readr, ice, dynamics, tracers, partit, m call write_all_bin_restarts((/globalstep+istep, int(ctime), yearnew/), & bin_restart_dirpath, & bin_restart_infopath, & + partit, & + mesh, & ice, & dynamics, & - tracers, & - partit, & - mesh) + tracers ) end if ! actualize clock file to latest restart point diff --git a/src/io_restart_derivedtype.F90 b/src/io_restart_derivedtype.F90 index c884ba555..c6f4d9049 100644 --- a/src/io_restart_derivedtype.F90 +++ b/src/io_restart_derivedtype.F90 @@ -1,6 +1,6 @@ module restart_derivedtype_module interface - subroutine write_all_bin_restarts(ctarr, path_in, pathi_in, ice, dynamics, tracers, partit, mesh) + subroutine write_all_bin_restarts(ctarr, path_in, pathi_in, partit, mesh, ice, dynamics, tracers) use MOD_ICE use MOD_DYN use MOD_TRACER @@ -9,32 +9,32 @@ subroutine write_all_bin_restarts(ctarr, path_in, pathi_in, ice, dynamics, trace integer, dimension(3) , intent(in) :: ctarr character(len=*), intent(in) :: path_in character(len=*), intent(in) :: pathi_in + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh type(t_ice) , intent(inout), target, optional :: ice type(t_dyn) , intent(inout), target, optional :: dynamics - type(t_tracer), intent(inout), target, optional :: tracers - type(t_partit), intent(inout), target, optional :: partit - type(t_mesh) , intent(inout), target, optional :: mesh + type(t_tracer), intent(inout), target, optional :: tracers end subroutine - subroutine read_all_bin_restarts(path_in, ice, dynamics, tracers, partit, mesh) + subroutine read_all_bin_restarts(path_in, partit, mesh, ice, dynamics, tracers) use MOD_ICE use MOD_DYN use MOD_TRACER use MOD_PARTIT use MOD_MESH character(len=*), intent(in) :: path_in + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh type(t_ice) , intent(inout), target, optional :: ice type(t_dyn) , intent(inout), target, optional :: dynamics - type(t_tracer), intent(inout), target, optional :: tracers - type(t_partit), intent(inout), target, optional :: partit - type(t_mesh) , intent(inout), target, optional :: mesh + type(t_tracer), intent(inout), target, optional :: tracers end subroutine end interface end module ! ! !_______________________________________________________________________________ -subroutine write_all_bin_restarts(ctarr, path_in, pathi_in, ice, dynamics, tracers, partit, mesh) +subroutine write_all_bin_restarts(ctarr, path_in, pathi_in, partit, mesh, ice, dynamics, tracers) use MOD_ICE use MOD_DYN use MOD_TRACER @@ -46,15 +46,22 @@ subroutine write_all_bin_restarts(ctarr, path_in, pathi_in, ice, dynamics, trace integer, dimension(3) , intent(in) :: ctarr ! //cstep,ctime,cyear// character(len=*) , intent(in) :: path_in character(len=*) , intent(in) :: pathi_in + type(t_partit), target, intent(in) :: partit + type(t_mesh) , target, intent(in) :: mesh type(t_ice) , target, intent(in), optional :: ice type(t_dyn) , target, intent(in), optional :: dynamics type(t_tracer), target, intent(in), optional :: tracers - type(t_partit), target, intent(in) :: partit - type(t_mesh) , target, intent(in) :: mesh - + ! EO parameters integer fileunit, fileunit_i - + +#if defined(__PGI) + if (partit%mype == 0) then + write(*,*) 'write_all_bin_restarts is deactivated for PGI compiler because of T_TRACER%DATA & T_ICE%DATA cause write call to crash' + write(*,*) '*** checked for NVHPC/22.1 ***' + end if +#else + !___________________________________________________________________________ ! write info file if(partit%mype == 0) then @@ -94,23 +101,23 @@ subroutine write_all_bin_restarts(ctarr, path_in, pathi_in, ice, dynamics, trace write(fileunit_i, '(1(g0))') "! t_partit" print *, achar(27)//'[33m'//' > write derived type t_partit'//achar(27)//'[0m' end if - + !___________________________________________________________________________ - ! tracer derived type + ! tracer derived type if (present(tracers)) then fileunit = partit%mype+300 open(newunit = fileunit, & file = trim(path_in)//'/'//'t_tracer.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & status = 'replace', & form = 'unformatted') - write(fileunit) tracers + write(fileunit) tracers close(fileunit) - if(partit%mype == 0) then + if(partit%mype == 0) then write(fileunit_i, '(1(g0))') "! t_tracer" print *, achar(27)//'[33m'//' > write derived type t_tracer'//achar(27)//'[0m' - end if - end if - + end if + end if + !___________________________________________________________________________ ! dynamics derived type if (present(dynamics)) then @@ -142,10 +149,9 @@ subroutine write_all_bin_restarts(ctarr, path_in, pathi_in, ice, dynamics, trace print *, achar(27)//'[33m'//' > write derived type t_ice'//achar(27)//'[0m' end if end if - !___________________________________________________________________________ if(partit%mype == 0) close(fileunit_i) - +#endif !defined(__PGI) end subroutine ! ! @@ -164,41 +170,44 @@ subroutine read_all_bin_restarts(path_in, ice, dynamics, tracers, partit, mesh) ! do optional here for the usage with dwarfs, since there only specific derived ! types will be needed character(len=*), intent(in) :: path_in + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh type(t_ice) , intent(inout), target, optional :: ice type(t_dyn) , intent(inout), target, optional :: dynamics type(t_tracer), intent(inout), target, optional :: tracers - type(t_partit), intent(inout), target, optional :: partit - type(t_mesh) , intent(inout), target, optional :: mesh integer fileunit - + +#if defined(__PGI) + if (partit%mype == 0) then + write(*,*) 'read_all_bin_restarts is deactivated for PGI compiler because of T_TRACER%DATA & T_ICE%DATA cause write call to crash' + write(*,*) '*** checked for NVHPC/22.1 ***' + end if +#else + !___________________________________________________________________________ if (partit%mype==0) print *, achar(27)//'[1;33m'//' --> read restarts from derived type binary'//achar(27)//'[0m' !___________________________________________________________________________ ! mesh derived type - if (present(mesh)) then - fileunit = partit%mype+300 - open(newunit = fileunit, & - file = trim(path_in)//'/'//'t_mesh.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & - status = 'old', & - form = 'unformatted') - read(fileunit) mesh - close(fileunit) - if (partit%mype==0) print *, achar(27)//'[33m'//' > read derived type t_mesh'//achar(27)//'[0m' - end if + fileunit = partit%mype+300 + open( newunit = fileunit, & + file = trim(path_in)//'/'//'t_mesh.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'old', & + form = 'unformatted') + read(fileunit) mesh + close(fileunit) + if (partit%mype==0) print *, achar(27)//'[33m'//' > read derived type t_mesh'//achar(27)//'[0m' !___________________________________________________________________________ ! partit derived type - if (present(partit)) then - fileunit = partit%mype+300 - open(newunit = fileunit, & - file = trim(path_in)//'/'//'t_partit.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & - status = 'old', & - form = 'unformatted') - read(fileunit) partit - close(fileunit) - if (partit%mype==0) print *, achar(27)//'[33m'//' > read derived type t_partit'//achar(27)//'[0m' - end if + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = trim(path_in)//'/'//'t_partit.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'old', & + form = 'unformatted') + read(fileunit) partit + close(fileunit) + if (partit%mype==0) print *, achar(27)//'[33m'//' > read derived type t_partit'//achar(27)//'[0m' !___________________________________________________________________________ ! tracer derived type @@ -237,6 +246,7 @@ subroutine read_all_bin_restarts(path_in, ice, dynamics, tracers, partit, mesh) read(fileunit) ice close(fileunit) if (partit%mype==0) print *, achar(27)//'[33m'//' > read derived type t_ice'//achar(27)//'[0m' - end if + end if +#endif !defined(__PGI) end subroutine From 17d8123f443d7db067010f8996dbb699416613a9 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 10 Mar 2022 12:16:37 +0100 Subject: [PATCH 857/909] when building FESOM as library, do not force to build as a shared library as this does not work on some platforms --- lib/parms/CMakeLists.txt | 2 +- src/CMakeLists.txt | 4 ++-- src/async_threads_cpp/CMakeLists.txt | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/parms/CMakeLists.txt b/lib/parms/CMakeLists.txt index 2ae7547eb..733d3dfb7 100644 --- a/lib/parms/CMakeLists.txt +++ b/lib/parms/CMakeLists.txt @@ -10,7 +10,7 @@ include("${CMAKE_CURRENT_LIST_DIR}/../../cmake/FindBLAS.cmake") # create our library (set its name to name of this project) if(${BUILD_FESOM_AS_LIBRARY}) - add_library(${PROJECT_NAME} SHARED ${all_sources}) + add_library(${PROJECT_NAME} ${all_sources}) else() add_library(${PROJECT_NAME} ${all_sources}) endif() diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 9971af279..d9ff42944 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -77,7 +77,7 @@ add_subdirectory(async_threads_cpp) include(${CMAKE_CURRENT_LIST_DIR}/../cmake/FindNETCDF.cmake) if(${BUILD_FESOM_AS_LIBRARY}) - add_library(${PROJECT_NAME}_C SHARED ${sources_C}) + add_library(${PROJECT_NAME}_C ${sources_C}) else() add_library(${PROJECT_NAME}_C ${sources_C}) endif() @@ -89,7 +89,7 @@ target_link_libraries(${PROJECT_NAME}_C parms) #metis # we do not always build the library along with the executable to avoid having two targets here in the CMakeLists.txt # two targets would allow e.g. setting different compiler options or preprocessor definition, which would be error prone. if(${BUILD_FESOM_AS_LIBRARY}) - add_library(${PROJECT_NAME} SHARED ${sources_Fortran}) + add_library(${PROJECT_NAME} ${sources_Fortran}) else() add_executable(${PROJECT_NAME} ${sources_Fortran} ${src_home}/fesom_main.F90) endif() diff --git a/src/async_threads_cpp/CMakeLists.txt b/src/async_threads_cpp/CMakeLists.txt index 9dfbf2fb9..d72d7ce7d 100644 --- a/src/async_threads_cpp/CMakeLists.txt +++ b/src/async_threads_cpp/CMakeLists.txt @@ -9,7 +9,7 @@ include(FortranCInterface) FortranCInterface_HEADER(ThreadsManagerFCMacros.h MACRO_NAMESPACE "ThreadsManagerFCMacros_" SYMBOLS init_ccall begin_ccall end_ccall) if(${BUILD_FESOM_AS_LIBRARY}) - add_library(${PROJECT_NAME} SHARED ${sources_CXX}) + add_library(${PROJECT_NAME} ${sources_CXX}) else() add_library(${PROJECT_NAME} ${sources_CXX}) endif() From 0f53ac0b49e806418cf3d885f10e85e6c06430f3 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 10 Mar 2022 12:25:07 +0100 Subject: [PATCH 858/909] add the existing cmake variable to toggle SHARED vs STATIC library builds to our CMakeLists.txt as a reference --- CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 4feed315f..bde6aaecd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -7,6 +7,7 @@ if(NOT CMAKE_BUILD_TYPE) endif() project(FESOM2.0) +option(BUILD_SHARED_LIBS "Build using shared libraries" OFF) # cmake-internal switch to toggle if library targets are being build as STATIC or SHARED, see https://cmake.org/cmake/help/latest/guide/tutorial/Selecting%20Static%20or%20Shared%20Libraries.html set(TOPLEVEL_DIR ${CMAKE_CURRENT_LIST_DIR}) set(FESOM_COUPLED OFF CACHE BOOL "compile fesom standalone or with oasis support (i.e. coupled)") set(OIFS_COUPLED OFF CACHE BOOL "compile fesom coupled to OpenIFS. (Also needs FESOM_COUPLED to work)") From 639a0ad1121e55fc9c3584e90fea6c9e6b3b11a7 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 10 Mar 2022 14:12:21 +0100 Subject: [PATCH 859/909] remove last occurence of execute_command_line. It is failing too often (e.g. on ollie, aleph, juwels) --- src/oce_mesh.F90 | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/src/oce_mesh.F90 b/src/oce_mesh.F90 index f30bb3758..34df05a01 100755 --- a/src/oce_mesh.F90 +++ b/src/oce_mesh.F90 @@ -704,20 +704,8 @@ SUBROUTINE read_mesh(partit, mesh) deallocate(rbuff, ibuff) deallocate(mapping) - ! try to calculate checksum and distribute it to every process - ! the shell command is probably not very portable and might fail, in which case we just do not have a checksum - mesh%representative_checksum = ' ' ! we use md5 which is 32 chars long, so set default value to the same length - if(mype==0) then - call execute_command_line("md5sum "//trim(MeshPath)//"nod2d.out | cut -d ' ' -f 1 > "//trim(ResultPath)//"mesh_checksum") - ! we can not check if execute_command_line succeeded (e.g. with cmdstat), as the pipe will swallow any error from the initial command - ! so we have to thoroughly check if the file exists and if it contains our checksum - open(newunit=fileunit, file=trim(ResultPath)//"mesh_checksum", action="READ", iostat=iostat) - if(iostat==0) read(fileunit, *, iostat=iostat) mesh_checksum - close(fileunit) - if(iostat==0 .and. len_trim(mesh_checksum)==32) mesh%representative_checksum = mesh_checksum - end if - call MPI_BCAST(mesh%representative_checksum, len(mesh%representative_checksum), MPI_CHAR, 0, MPI_COMM_FESOM, MPIerr) - mesh%representative_checksum = trim(mesh%representative_checksum) ! if we did not get a checksum, the string is empty +! no checksum for now, execute_command_line is failing too often. if you think it is important, please drop me a line and I will try to revive it: jan.hegewald@awi.de +mesh%representative_checksum = '' CALL MPI_BARRIER(MPI_COMM_FESOM, MPIerr) t1=MPI_Wtime() From bb7c1a963bbbabf97dc02b11db623f8629e4b9e8 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Fri, 11 Mar 2022 13:48:00 +0100 Subject: [PATCH 860/909] bug fix in pressure_bv (depth computatiopn for pressure contribution) --- src/oce_ale_pressure_bv.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/oce_ale_pressure_bv.F90 b/src/oce_ale_pressure_bv.F90 index a8c5ca7ec..633c2f965 100644 --- a/src/oce_ale_pressure_bv.F90 +++ b/src/oce_ale_pressure_bv.F90 @@ -424,7 +424,7 @@ subroutine pressure_bv(tracers, partit, mesh) flag1=.true. flag2=.true. do nz=nzmin+1,nzmax-1 - zmean = 0.5_WP*sum(Z_3d_n(nz-1:nz, node), node) + zmean = 0.5_WP*sum(Z_3d_n(nz-1:nz, node)) bulk_up = bulk_0(nz-1) + zmean*(bulk_pz(nz-1) + zmean*bulk_pz2(nz-1)) bulk_dn = bulk_0(nz) + zmean*(bulk_pz(nz) + zmean*bulk_pz2(nz)) rho_up = bulk_up*rhopot(nz-1) / (bulk_up + 0.1_WP*zmean*real(state_equation)) From ba88a951b66e4656bfe717126572fccb0319802d Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Tue, 15 Mar 2022 16:37:28 +0100 Subject: [PATCH 861/909] activated coupling of ice_temp & ice_alb with IFS --- src/gen_forcing_couple.F90 | 2 +- src/ifs_interface/ifs_interface.F90 | 9 +++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index 0e0465cdf..15ac1b446 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -172,7 +172,7 @@ subroutine update_atm_forcing(istep, ice, tracers, partit, mesh) do i=1,nsend exchange =0. if (i.eq.1) then -#if defined (__oifs) || defined (__ifsinterface) +#if defined (__oifs) ! AWI-CM3 outgoing state vectors do n=1,myDim_nod2D+eDim_nod2D exchange(n)=tracers%data(1)%values(1, n)+tmelt ! sea surface temperature [K] diff --git a/src/ifs_interface/ifs_interface.F90 b/src/ifs_interface/ifs_interface.F90 index 9504d5161..18ea78739 100644 --- a/src/ifs_interface/ifs_interface.F90 +++ b/src/ifs_interface/ifs_interface.F90 @@ -369,7 +369,7 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & integer, dimension(:,:) , pointer :: elem2D_nodes integer, pointer :: myDim_nod2D, eDim_nod2D integer, pointer :: myDim_elem2D, eDim_elem2D, eXDim_elem2D - real(kind=wpIFS), dimension(:), pointer :: a_ice, m_ice, m_snow + real(kind=wpIFS), dimension(:), pointer :: a_ice, m_ice, m_snow, ice_temp, ice_alb real(kind=wpIFS) , pointer :: tmelt ! Message passing information @@ -400,9 +400,10 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & a_ice => fesom%ice%data(1)%values(:) m_ice => fesom%ice%data(2)%values(:) m_snow => fesom%ice%data(3)%values(:) + ice_temp => fesom%ice%data(4)%values(:) + ice_alb => fesom%ice%atmcoupl%ice_alb(:) tmelt => fesom%ice%thermo%tmelt ! scalar const. - ! =================================================================== ! ! Pack SST data and convert to K. 'pgsst' is on Gauss grid. do n=1,myDim_nod2D @@ -426,7 +427,7 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & ! =================================================================== ! ! Pack ice temperature data (already in K) - zsend(:)=273.15 + zsend(:)=ice_temp ! Interpolate ice surface temperature: 'pgist' on Gaussian grid. CALL parinter_fld( mype, npes, icomm, Ttogauss, & @@ -436,7 +437,7 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & ! =================================================================== ! ! Pack ice albedo data and interpolate: 'pgalb' on Gaussian grid. - zsend(:)=0.7 + zsend(:)=ice_alb ! Interpolate ice albedo CALL parinter_fld( mype, npes, icomm, Ttogauss, & From c9be18e0758985f31c3ca7b3b6941ab320d16e77 Mon Sep 17 00:00:00 2001 From: dsidoren Date: Thu, 24 Mar 2022 18:59:14 +0100 Subject: [PATCH 862/909] avoid floating point exception when coupled with IFS avoid floating point exception when coupled with IFS --- src/ice_setup_step.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index ebcbdbc17..eab219213 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -188,7 +188,7 @@ subroutine ice_timestep(step, ice, partit, mesh) #if defined (__oifs) || defined (__ifsinterface) !$OMP PARALLEL DO do i=1,myDim_nod2D+eDim_nod2D - if (a_ice(i)>0.0_WP) ice_temp(i) = ice_temp(i)/a_ice(i) + if (a_ice(i)>0.0_WP) ice_temp(i) = ice_temp(i)/max(a_ice(i), 1.e-6_WP) end do !$OMP END PARALLEL DO #endif /* (__oifs) */ From 82ab94784fe0d9b19997aed1bab66b48924fb02a Mon Sep 17 00:00:00 2001 From: JanStreffing Date: Fri, 25 Mar 2022 13:01:50 +0100 Subject: [PATCH 863/909] adapting oasis coupling interface to new datastructures --- src/cpl_driver.F90 | 17 +++++++++++------ src/fesom_module.F90 | 2 +- src/gen_forcing_couple.F90 | 10 ++++++++-- src/gen_modules_partitioning.F90 | 25 ++++++++++++++----------- src/ice_thermo_cpl.F90 | 17 ++++++++++------- src/io_meandata.F90 | 2 +- 6 files changed, 45 insertions(+), 28 deletions(-) diff --git a/src/cpl_driver.F90 b/src/cpl_driver.F90 index d52faf1f5..6a45a917b 100755 --- a/src/cpl_driver.F90 +++ b/src/cpl_driver.F90 @@ -15,6 +15,7 @@ module cpl_driver use mod_oasis ! oasis module use g_config, only : dt use o_param, only : rad + USE MOD_PARTIT implicit none save ! @@ -91,7 +92,8 @@ module cpl_driver contains - subroutine cpl_oasis3mct_init( localCommunicator ) + subroutine cpl_oasis3mct_init(partit, localCommunicator ) + USE MOD_PARTIT implicit none save @@ -103,6 +105,7 @@ subroutine cpl_oasis3mct_init( localCommunicator ) ! Arguments ! integer, intent(OUT) :: localCommunicator + type(t_partit), intent(inout), target :: partit ! ! Local declarations ! @@ -141,12 +144,12 @@ subroutine cpl_oasis3mct_init( localCommunicator ) ENDIF ! Get MPI size and rank - CALL MPI_Comm_Size ( localCommunicator, npes, ierror ) + CALL MPI_Comm_Size ( localCommunicator, partit%npes, ierror ) IF (ierror /= 0) THEN CALL oasis_abort(comp_id, 'cpl_oasis3mct_init', 'comm_size failed.') ENDIF - CALL MPI_Comm_Rank ( localCommunicator, mype, ierror ) + CALL MPI_Comm_Rank ( localCommunicator, partit%mype, ierror ) IF (ierror /= 0) THEN CALL oasis_abort(comp_id, 'cpl_oasis3mct_init', 'comm_rank failed.') ENDIF @@ -210,8 +213,8 @@ subroutine cpl_oasis3mct_define_unstr(partit, mesh) integer :: my_number_of_points integer :: number_of_all_points - integer :: counts_from_all_pes(npes) - integer :: displs_from_all_pes(npes) + integer :: counts_from_all_pes(partit%npes) + integer :: displs_from_all_pes(partit%npes) integer :: my_displacement integer,allocatable :: unstr_mask(:,:) @@ -232,6 +235,7 @@ subroutine cpl_oasis3mct_define_unstr(partit, mesh) #include "associate_part_ass.h" #include "associate_mesh_ass.h" + #ifdef VERBOSE print *, '==============================================================' print *, 'cpl_oasis3mct_define_unstr : coupler definition for OASIS3-MCT' @@ -595,6 +599,7 @@ subroutine cpl_oasis3mct_recv(ind, data_array, action, partit) integer, intent( IN ) :: ind ! variable Id logical, intent( OUT ) :: action ! real(kind=WP), intent( OUT ) :: data_array(:) + type(t_partit), intent(inout), target :: partit ! ! Local declarations ! @@ -624,7 +629,7 @@ subroutine cpl_oasis3mct_recv(ind, data_array, action, partit) ! and delivered back to FESOM. action=(info==3 .OR. info==10 .OR. info==11 .OR. info==12 .OR. info==13) if (action) then - data_array(1:myDim_nod2d) = exfld + data_array(1:partit%myDim_nod2d) = exfld call exchange_nod(data_array, partit) end if t3=MPI_Wtime() diff --git a/src/fesom_module.F90 b/src/fesom_module.F90 index be37b1b67..2eb2fb046 100755 --- a/src/fesom_module.F90 +++ b/src/fesom_module.F90 @@ -115,7 +115,7 @@ subroutine fesom_init(fesom_total_nsteps) #if defined (__oasis) - call cpl_oasis3mct_init(partit%MPI_COMM_FESOM) + call cpl_oasis3mct_init(f%partit,f%partit%MPI_COMM_FESOM) #endif f%t1 = MPI_Wtime() diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index 0e0465cdf..35561d198 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -133,6 +133,8 @@ subroutine update_atm_forcing(istep, ice, tracers, partit, mesh) #endif #if defined (__oifs) || defined (__ifsinterface) real(kind=WP), dimension(:), pointer :: ice_temp, ice_alb, enthalpyoffuse + real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow + real(kind=WP), pointer :: tmelt #endif real(kind=WP) , pointer :: rhoair #include "associate_part_def.h" @@ -146,9 +148,13 @@ subroutine update_atm_forcing(istep, ice, tracers, partit, mesh) stress_atmice_x => ice%stress_atmice_x(:) stress_atmice_y => ice%stress_atmice_y(:) #if defined (__oifs) || defined (__ifsinterface) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) ice_temp => ice%data(4)%values(:) ice_alb => ice%atmcoupl%ice_alb(:) enthalpyoffuse => ice%atmcoupl%enthalpyoffuse(:) + tmelt => ice%thermo%tmelt #endif #if defined (__oasis) || defined (__ifsinterface) oce_heat_flux => ice%atmcoupl%oce_flx_h(:) @@ -676,7 +682,7 @@ SUBROUTINE net_rec_from_atm(action, partit) type(t_partit), intent(inout), target :: partit INTEGER :: my_global_rank, ierror INTEGER :: n - INTEGER :: status(MPI_STATUS_SIZE,npes) + INTEGER :: status(MPI_STATUS_SIZE,partit%npes) INTEGER :: request(2) real(kind=WP) :: aux(nrecv) #if defined (__oifs) @@ -692,7 +698,7 @@ SUBROUTINE net_rec_from_atm(action, partit) CALL MPI_IRecv(atm_net_fluxes_south(1), nrecv, MPI_DOUBLE_PRECISION, source_root, 112, MPI_COMM_WORLD, request(2), partit%MPIerr) CALL MPI_Waitall(2, request, status, partit%MPIerr) end if - call MPI_Barrier(partit%MPI_COMM_FESOM, MPIerr) + call MPI_Barrier(partit%MPI_COMM_FESOM, partit%MPIerr) call MPI_AllREDUCE(atm_net_fluxes_north(1), aux, nrecv, MPI_DOUBLE_PRECISION, MPI_SUM, partit%MPI_COMM_FESOM, partit%MPIerr) atm_net_fluxes_north=aux call MPI_AllREDUCE(atm_net_fluxes_south(1), aux, nrecv, MPI_DOUBLE_PRECISION, MPI_SUM, partit%MPI_COMM_FESOM, partit%MPIerr) diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index ae1cf3eab..defbb3d46 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -94,7 +94,7 @@ subroutine par_ex(COMM, mype, abort) ! finalizes MPI #ifndef __oasis if (present(abort)) then - if (mype==0) write(*,*) 'Run finished unexpectedly!' + if (partit%mype==0) write(*,*) 'Run finished unexpectedly!' call MPI_ABORT(COMM, 1 ) else call MPI_Barrier(COMM, error) @@ -102,28 +102,31 @@ subroutine par_ex(COMM, mype, abort) ! finalizes MPI endif #else if (.not. present(abort)) then - if (mype==0) print *, 'FESOM calls MPI_Barrier before calling prism_terminate' + if (partit%mype==0) print *, 'FESOM calls MPI_Barrier before calling prism_terminate' call MPI_Barrier(MPI_COMM_WORLD, error) end if call prism_terminate_proto(error) - if (mype==0) print *, 'FESOM calls MPI_Barrier before calling MPI_Finalize' + if (partit%mype==0) print *, 'FESOM calls MPI_Barrier before calling MPI_Finalize' call MPI_Barrier(MPI_COMM_WORLD, error) - if (mype==0) print *, 'FESOM calls MPI_Finalize' + if (partit%mype==0) print *, 'FESOM calls MPI_Finalize' call MPI_Finalize(error) #endif - if (mype==0) print *, 'fesom should stop with exit status = 0' + if (partit%mype==0) print *, 'fesom should stop with exit status = 0' #endif #if defined (__oifs) -!OIFS coupling doesnt call prism_terminate_proto and uses MPI_COMM_FESOM +!OIFS coupling doesnt call prism_terminate_proto and uses COMM instead of MPI_COMM_WORLD implicit none - integer,optional :: abort + integer, intent(in) :: COMM + integer, intent(in) :: mype + integer, optional, intent(in) :: abort + integer :: error if (present(abort)) then - if (mype==0) write(*,*) 'Run finished unexpectedly!' - call MPI_ABORT(COMM, 1 ) + if (mype==0) write(*,*) 'Run finished unexpectedly!' + call MPI_ABORT(COMM, 1 ) else - call MPI_Barrier(COMM, error) - call MPI_Finalize(error) + call MPI_Barrier(COMM, error) + call MPI_Finalize(error) endif #endif diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index 929ec1369..f85b265cb 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -151,11 +151,11 @@ subroutine thermodynamics(ice, partit, mesh) #if defined (__oifs) || defined (__ifsinterface) !---- different lead closing parameter for NH and SH if (geo_coord_nod2D(2, inod)>0) then - h0min = 0.3 - h0max = 0.3 + h0min = 0.5 + h0max = 1.5 else h0min = 1.0 - h0max = 1.0 + h0max = 1.5 endif !---- For AWI-CM3 we calculate ice surface temp and albedo in fesom, @@ -537,6 +537,7 @@ subroutine ice_albedo(ithermp, h, hsn, t, alb, geolat) real(kind=WP) :: alb real(kind=WP) :: geolat real(kind=WP) :: melt_pool_alb_reduction + real(kind=WP) :: nh_winter_reduction real(kind=WP), pointer :: albsn, albi, albsnm, albim albsn => ice%thermo%albsn albi => ice%thermo%albi @@ -545,17 +546,19 @@ subroutine ice_albedo(ithermp, h, hsn, t, alb, geolat) ! set albedo ! ice and snow, freezing and melting conditions are distinguished - if (geolat.gt.0.) then !SH does not have melt ponds + if (geolat.lt.0.) then !SH does not have melt ponds melt_pool_alb_reduction = 0.0_WP + nh_winter_reduction = 0.0_WP else - melt_pool_alb_reduction = 0.12_WP + melt_pool_alb_reduction = 0.20_WP + nh_winter_reduction = 0.06_WP endif if (h>0.0_WP) then if (t<273.15_WP) then ! freezing condition if (hsn.gt.0.001_WP) then ! snow cover present - alb=albsn + alb=albsn-nh_winter_reduction else ! no snow cover - alb=albi + alb=albi-nh_winter_reduction endif else ! melting condition if (hsn.gt.0.001_WP) then ! snow cover present diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 907aede95..99dbb7670 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -538,7 +538,7 @@ subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) call def_stream(nod2D , myDim_nod2D , 'ch', 'transfer coeff. sensible heat', '', ch_atm_oce_arr(:), 1, 'm', i_real4, partit, mesh) call def_stream(nod2D , myDim_nod2D , 'ce', 'transfer coeff. evaporation ' , '', ce_atm_oce_arr(:), 1, 'm', i_real4, partit, mesh) #if defined (__oasis) - call def_stream(nod2D, myDim_nod2D, 'subli', 'sublimation', 'm/s', sublimation(:), 1, 'm', i_real4, mesh) + call def_stream(nod2D, myDim_nod2D, 'subli', 'sublimation', 'm/s', sublimation(:), 1, 'm', i_real4, partit, mesh) #endif end if From f15bf1d765822c160378d1cc03336c2fb39bd1f7 Mon Sep 17 00:00:00 2001 From: JanStreffing Date: Mon, 28 Mar 2022 20:58:35 +0200 Subject: [PATCH 864/909] change default 3d outfreq to monthly --- config/namelist.io | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/config/namelist.io b/config/namelist.io index 0a3270c4a..e765b8f3a 100644 --- a/config/namelist.io +++ b/config/namelist.io @@ -31,8 +31,8 @@ io_list = 'sst ',1, 'm', 4, 'MLD2 ',1, 'm', 4, 'tx_sur ',1, 'm', 4, 'ty_sur ',1, 'm', 4, - 'temp ',1, 'y', 4, - 'salt ',1, 'y', 8, + 'temp ',1, 'm', 4, + 'salt ',1, 'm', 8, 'N2 ',1, 'y', 4, 'Kv ',1, 'y', 4, 'u ',1, 'y', 4, From 5f1f1e749f7337e46898abd082c7d4a023ae1c3e Mon Sep 17 00:00:00 2001 From: JanStreffing Date: Mon, 28 Mar 2022 22:07:40 +0200 Subject: [PATCH 865/909] something close to the namelist we used before --- config/namelist.oce | 6 +++--- config/namelist.tra | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/config/namelist.oce b/config/namelist.oce index 7af6867f7..555190b36 100644 --- a/config/namelist.oce +++ b/config/namelist.oce @@ -6,17 +6,17 @@ A_ver= 1.e-4 ! Vertical viscosity, m^2/s scale_area=5.8e9 ! Visc. and diffus. are for an element with scale_area SPP=.false. ! Salt Plume Parameterization Fer_GM=.true. ! to swith on/off GM after Ferrari et al. 2010 -K_GM_max = 2000.0 ! max. GM thickness diffusivity (m2/s) +K_GM_max = 3000.0 ! max. GM thickness diffusivity (m2/s) K_GM_min = 2.0 ! max. GM thickness diffusivity (m2/s) K_GM_bvref = 2 ! def of bvref in ferreira scaling 0=srf,1=bot mld,2=mean over mld,3=weighted mean over mld K_GM_rampmax = -1.0 ! Resol >K_GM_rampmax[km] GM on K_GM_rampmin = -1.0 ! Resol Date: Tue, 29 Mar 2022 13:08:44 +0200 Subject: [PATCH 866/909] combined changes for flux paper --- config/namelist.ice | 4 ++-- src/cpl_driver.F90 | 4 +++- src/fesom_module.F90 | 2 +- src/gen_forcing_couple.F90 | 34 ++++++++++++++++++++++++---------- src/ice_thermo_cpl.F90 | 28 ++++++---------------------- 5 files changed, 36 insertions(+), 36 deletions(-) diff --git a/config/namelist.ice b/config/namelist.ice index bcd86f145..6c08cdf2d 100644 --- a/config/namelist.ice +++ b/config/namelist.ice @@ -23,8 +23,8 @@ emiss_ice=0.97 ! Emissivity of Snow/Ice, emiss_wat=0.97 ! Emissivity of open water albsn=0.81 ! Albedo: frozen snow albsnm=0.77 ! melting snow -albi=0.7 ! frozen ice -albim=0.68 ! melting ice +albi=0.6 ! frozen ice +albim=0.43 ! melting ice albw=0.1 ! open water con=2.1656 ! Thermal conductivities: ice; W/m/K consn=0.31 ! snow diff --git a/src/cpl_driver.F90 b/src/cpl_driver.F90 index 6a45a917b..b3e4ef749 100755 --- a/src/cpl_driver.F90 +++ b/src/cpl_driver.F90 @@ -23,7 +23,7 @@ module cpl_driver ! #if defined (__oifs) - integer, parameter :: nsend = 5 + integer, parameter :: nsend = 7 integer, parameter :: nrecv = 13 #else integer, parameter :: nsend = 4 @@ -393,6 +393,8 @@ subroutine cpl_oasis3mct_define_unstr(partit, mesh) cpl_send( 3)='snt_feom' ! 3. snow thickness [m] -> cpl_send( 4)='ist_feom' ! 4. sea ice surface temperature [K] -> cpl_send( 5)='sia_feom' ! 5. sea ice albedo [%-100] -> + cpl_send( 6)='u_feom' ! 6. eastward surface velocity [m/s] -> + cpl_send( 7)='v_feom' ! 7. northward surface velocity [m/s] -> #else cpl_send( 1)='sst_feom' ! 1. sea surface temperature [°C] -> cpl_send( 2)='sit_feom' ! 2. sea ice thickness [m] -> diff --git a/src/fesom_module.F90 b/src/fesom_module.F90 index 2eb2fb046..c66d1a9e9 100755 --- a/src/fesom_module.F90 +++ b/src/fesom_module.F90 @@ -343,7 +343,7 @@ subroutine fesom_runloop(current_nsteps) !___compute update of atmospheric forcing____________________________ if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call update_atm_forcing(n)'//achar(27)//'[0m' f%t0_frc = MPI_Wtime() - call update_atm_forcing(n, f%ice, f%tracers, f%partit, f%mesh) + call update_atm_forcing(n, f%ice, f%tracers, f%dynamics, f%partit, f%mesh) f%t1_frc = MPI_Wtime() !___compute ice step________________________________________________ if (f%ice%ice_steps_since_upd>=f%ice%ice_ave_steps-1) then diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index 35561d198..866b2f5e1 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -45,17 +45,19 @@ subroutine integrate_2D(flux_global, flux_local, eff_vol, field2d, mask, partit, module update_atm_forcing_interface interface - subroutine update_atm_forcing(istep, ice, tracers, partit,mesh) + subroutine update_atm_forcing(istep, ice, tracers, dynamics, partit, mesh) USE MOD_TRACER USE MOD_ICE USE MOD_PARTIT USE MOD_PARSUP USE MOD_MESH + USE MOD_DYN integer, intent(in) :: istep type(t_ice), intent(inout), target :: ice type(t_tracer), intent(in), target :: tracers type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh + type(t_dyn) , intent(inout), target :: dynamics end subroutine end interface end module @@ -72,13 +74,14 @@ subroutine net_rec_from_atm(action, partit) end module ! Routines for updating ocean surface forcing fields !------------------------------------------------------------------------- -subroutine update_atm_forcing(istep, ice, tracers, partit, mesh) +subroutine update_atm_forcing(istep, ice, tracers, dynamics, partit, mesh) use o_PARAM use MOD_MESH USE MOD_PARTIT USE MOD_PARSUP use MOD_TRACER use MOD_ICE + use MOD_DYN use o_arrays use g_forcing_param use g_forcing_arrays @@ -102,6 +105,7 @@ subroutine update_atm_forcing(istep, ice, tracers, partit, mesh) type(t_tracer), intent(in), target :: tracers type(t_partit), intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh + type(t_dyn) , intent(in), target :: dynamics !_____________________________________________________________________________ integer :: i, itime,n2,n,nz,k,elem real(kind=WP) :: i_coef, aux @@ -135,6 +139,7 @@ subroutine update_atm_forcing(istep, ice, tracers, partit, mesh) real(kind=WP), dimension(:), pointer :: ice_temp, ice_alb, enthalpyoffuse real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow real(kind=WP), pointer :: tmelt + real(kind=WP), dimension(:,:,:), pointer :: UVnode #endif real(kind=WP) , pointer :: rhoair #include "associate_part_def.h" @@ -155,6 +160,7 @@ subroutine update_atm_forcing(istep, ice, tracers, partit, mesh) ice_alb => ice%atmcoupl%ice_alb(:) enthalpyoffuse => ice%atmcoupl%enthalpyoffuse(:) tmelt => ice%thermo%tmelt + UVnode => dynamics%uvnode(:,:,:) #endif #if defined (__oasis) || defined (__ifsinterface) oce_heat_flux => ice%atmcoupl%oce_flx_h(:) @@ -180,18 +186,26 @@ subroutine update_atm_forcing(istep, ice, tracers, partit, mesh) if (i.eq.1) then #if defined (__oifs) || defined (__ifsinterface) ! AWI-CM3 outgoing state vectors - do n=1,myDim_nod2D+eDim_nod2D - exchange(n)=tracers%data(1)%values(1, n)+tmelt ! sea surface temperature [K] - end do + do n=1,myDim_nod2D+eDim_nod2D + exchange(n)=tracers%data(1)%values(1, n)+tmelt ! sea surface temperature [K] + end do elseif (i.eq.2) then - exchange(:) = a_ice(:) ! ice concentation [%] + exchange(:) = a_ice(:) ! ice concentation [%] elseif (i.eq.3) then - exchange(:) = m_snow(:) ! snow thickness + exchange(:) = m_snow(:) ! snow thickness elseif (i.eq.4) then - exchange(:) = ice_temp(:) ! ice surface temperature + exchange(:) = ice_temp(:) ! ice surface temperature elseif (i.eq.5) then - exchange(:) = ice_alb(:) ! ice albedo - else + exchange(:) = ice_alb(:) ! ice albedo + elseif (i.eq.6) then + do n=1,myDim_nod2D+eDim_nod2D + exchange(n) = UVnode(1,1,n) + end do + elseif (i.eq.7) then + do n=1,myDim_nod2D+eDim_nod2D + exchange(n) = UVnode(2,1,n) + end do + else print *, 'not installed yet or error in cpl_oasis3mct_send', mype #else ! AWI-CM2 outgoing state vectors diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index f85b265cb..2e197a482 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -149,14 +149,6 @@ subroutine thermodynamics(ice, partit, mesh) end if #if defined (__oifs) || defined (__ifsinterface) - !---- different lead closing parameter for NH and SH - if (geo_coord_nod2D(2, inod)>0) then - h0min = 0.5 - h0max = 1.5 - else - h0min = 1.0 - h0max = 1.5 - endif !---- For AWI-CM3 we calculate ice surface temp and albedo in fesom, ! then send those to OpenIFS where they are used to calucate the @@ -169,7 +161,7 @@ subroutine thermodynamics(ice, partit, mesh) ! Freezing temp of saltwater in K ice_temp(inod) = -0.0575_WP*S_oc_array(inod) + 1.7105e-3_WP*sqrt(S_oc_array(inod)**3) -2.155e-4_WP*(S_oc_array(inod)**2)+273.15_WP endif - call ice_albedo(ice%thermo, h, hsn, t, alb, geolat) + call ice_albedo(ice%thermo, h, hsn, t, alb) ice_alb(inod) = alb #endif call ice_growth @@ -520,12 +512,11 @@ subroutine ice_surftemp(ithermp, h,hsn,a2ihf,t) t=min(273.15_WP,t) end subroutine ice_surftemp - subroutine ice_albedo(ithermp, h, hsn, t, alb, geolat) + subroutine ice_albedo(ithermp, h, hsn, t, alb) ! INPUT: ! h - ice thickness [m] ! hsn - snow thickness [m] ! t - temperature of snow/ice surface [C] - ! geolat - lattitude ! ! OUTPUT: ! alb - selected broadband albedo @@ -546,25 +537,18 @@ subroutine ice_albedo(ithermp, h, hsn, t, alb, geolat) ! set albedo ! ice and snow, freezing and melting conditions are distinguished - if (geolat.lt.0.) then !SH does not have melt ponds - melt_pool_alb_reduction = 0.0_WP - nh_winter_reduction = 0.0_WP - else - melt_pool_alb_reduction = 0.20_WP - nh_winter_reduction = 0.06_WP - endif if (h>0.0_WP) then if (t<273.15_WP) then ! freezing condition if (hsn.gt.0.001_WP) then ! snow cover present - alb=albsn-nh_winter_reduction + alb=albsn else ! no snow cover - alb=albi-nh_winter_reduction + alb=albi endif else ! melting condition if (hsn.gt.0.001_WP) then ! snow cover present - alb=albsnm-melt_pool_alb_reduction + alb=albsnm else ! no snow cover - alb=albim-melt_pool_alb_reduction + alb=albim endif endif else From 6444a17c767810b0900e345367674ce4825b3b5d Mon Sep 17 00:00:00 2001 From: JanStreffing Date: Wed, 30 Mar 2022 08:53:16 +0200 Subject: [PATCH 867/909] chaneging tracer diffusion smoothing settings --- config/namelist.tra | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/config/namelist.tra b/config/namelist.tra index 66eb9eb11..7031735c4 100644 --- a/config/namelist.tra +++ b/config/namelist.tra @@ -4,8 +4,8 @@ num_tracers=100 !number of tracers to allocate. shallbe large or equal to the nu &tracer_list nml_tracer_list = -1 , 'MFCT', 'QR4C', 'FCT ', 1., 1., -2 , 'MFCT', 'QR4C', 'FCT ', 1., 1., +1 , 'MFCT', 'QR4C', 'FCT ', 0., 1., +2 , 'MFCT', 'QR4C', 'FCT ', 0., 1., !101, 'UPW1', 'UPW1', 'NON ', 0., 0. / From 59244ff106c18f36cccd68fe92b14b6677c0efe3 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Fri, 1 Apr 2022 15:53:48 +0200 Subject: [PATCH 868/909] Update ifs_interface.F90 The coupling interface has changed to allow more diagnostics based on the first tasks only. diff --git a/ifs/nemo/ininemo.F90 b/ifs/nemo/ininemo.F90 index 0424029558..b16d5b25be 100644 --- a/ifs/nemo/ininemo.F90 +++ b/ifs/nemo/ininemo.F90 @@ -103,10 +103,10 @@ IF (LNEMOIFSLOG) THEN WRITE(NULOUT,*) WRITE(NULOUT,*)'INITIALIZING NEMO.' WRITE(NULOUT,*) - CALL NEMOGCMCOUP_INIT( MPL_COMM, IDATE, ITIME, ITINI, ITEND,& + CALL NEMOGCMCOUP_INIT( MYPROC-1, MPL_COMM, IDATE, ITIME, ITINI, ITEND,& & ZTSTEP_O, .FALSE., NULOUT, LOUTPUT ) ELSE - CALL NEMOGCMCOUP_INIT( MPL_COMM, IDATE, ITIME, ITINI, ITEND,& + CALL NEMOGCMCOUP_INIT( MYPROC-1, MPL_COMM, IDATE, ITIME, ITINI, ITEND,& & ZTSTEP_O, .FALSE., -1, LOUTPUT ) ENDIF #else --- src/ifs_interface/ifs_interface.F90 | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/ifs_interface/ifs_interface.F90 b/src/ifs_interface/ifs_interface.F90 index 9504d5161..be4b28b99 100644 --- a/src/ifs_interface/ifs_interface.F90 +++ b/src/ifs_interface/ifs_interface.F90 @@ -9,7 +9,7 @@ MODULE nemogcmcoup_steps INTEGER :: substeps !per IFS timestep END MODULE nemogcmcoup_steps -SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & +SUBROUTINE nemogcmcoup_init( mype, icomm, inidate, initime, itini, itend, zstp, & & lwaveonly, iatmunit, lwrite ) ! Initialize the FESOM model for single executable coupling @@ -26,6 +26,7 @@ SUBROUTINE nemogcmcoup_init( icomm, inidate, initime, itini, itend, zstp, & ! Input arguments ! Message passing information + INTEGER, INTENT(IN) :: mype ! was added to ifs/nemo/ininemo.F90 to allow diagnostics based on the first tasks only INTEGER, INTENT(IN) :: icomm ! Initial date (e.g. 20170906), time, initial timestep and final time step INTEGER, INTENT(OUT) :: inidate, initime, itini, itend From 6a39c4711f109715875a7c2d47146dcc916928c2 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Mon, 11 Apr 2022 09:03:19 +0200 Subject: [PATCH 869/909] Fix illegal POINTER assignment for NVHPC Fix 'Illegal POINTER assignment - pointer target must be simply contiguous' in ice_thermo_cpl.F90: - coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => fesom%mesh%coord_nod2D + coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => fesom%mesh%coord_nod2D(:,:) --- src/ice_thermo_cpl.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index 929ec1369..a8bd8c58c 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -73,7 +73,7 @@ subroutine thermodynamics(ice, partit, mesh) myDim_nod2d=>partit%myDim_nod2D eDim_nod2D =>partit%eDim_nod2D ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D - geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D + geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D(:,:) u_ice => ice%uice(:) v_ice => ice%vice(:) a_ice => ice%data(1)%values(:) From f98f2968c89f92cd1ccc884bb568eb0f637b83b6 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Mon, 11 Apr 2022 09:09:26 +0200 Subject: [PATCH 870/909] Fix more illegal POINTER assignments for NVHPC Fix 'Illegal POINTER assignment - pointer target must be simply contiguous' compiler errors with NVHPC in ifs_interface for GPU Version of IFS-FESOM --- src/ifs_interface/ifs_interface.F90 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/ifs_interface/ifs_interface.F90 b/src/ifs_interface/ifs_interface.F90 index be4b28b99..4849a366e 100644 --- a/src/ifs_interface/ifs_interface.F90 +++ b/src/ifs_interface/ifs_interface.F90 @@ -396,8 +396,8 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & eDim_elem2D => fesom%partit%eDim_elem2D eXDim_elem2D => fesom%partit%eXDim_elem2D - coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => fesom%mesh%coord_nod2D - elem2D_nodes(1:3, 1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => fesom%mesh%elem2D_nodes + coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => fesom%mesh%coord_nod2D(:,:) + elem2D_nodes(1:3, 1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => fesom%mesh%elem2D_nodes(:,:) a_ice => fesom%ice%data(1)%values(:) m_ice => fesom%ice%data(2)%values(:) m_snow => fesom%ice%data(3)%values(:) @@ -625,7 +625,7 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & real(kind=wpIFS), dimension(:) , pointer :: oce_heat_flux, ice_heat_flux myDim_nod2D => fesom%partit%myDim_nod2D eDim_nod2D => fesom%partit%eDim_nod2D - coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => fesom%mesh%coord_nod2D + coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => fesom%mesh%coord_nod2D(:,:) stress_atmice_x => fesom%ice%stress_atmice_x stress_atmice_y => fesom%ice%stress_atmice_y oce_heat_flux => fesom%ice%atmcoupl%oce_flx_h(:) From 91330ce42bda7fdaf1d674a002e7366659ccc2a4 Mon Sep 17 00:00:00 2001 From: dsidoren Date: Wed, 13 Apr 2022 14:52:30 +0200 Subject: [PATCH 871/909] fixed treatment of sw_pene in cvmix_KPP --- src/gen_modules_cvmix_kpp.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/gen_modules_cvmix_kpp.F90 b/src/gen_modules_cvmix_kpp.F90 index 7883158d9..0f7d7916c 100644 --- a/src/gen_modules_cvmix_kpp.F90 +++ b/src/gen_modules_cvmix_kpp.F90 @@ -776,7 +776,7 @@ subroutine calc_cvmix_kpp(ice, dynamics, tracers, partit, mesh) ! --> interpolate contribution that comes from shortwave penetration ! to the depth of the obldepth aux_surfbuoyflx_nl(1) = kpp_sbuoyflx(node) - if (use_sw_pene .and. kpp_use_fesomkpp .eqv. .true.) then + if (use_sw_pene .and. kpp_use_fesomkpp) then aux_nz = int(kpp_nzobldepth(node)) ! take only penetrated shortwave radiation heatflux into account ! that reached until the obldepth --> do linear interpolation @@ -791,7 +791,7 @@ subroutine calc_cvmix_kpp(ice, dynamics, tracers, partit, mesh) ! MOM6 provides different option how buoyancy flux is influenced by ! short wave penetration flux ! --> mxl comes closest to what FESOM1.4 was doing - elseif (use_sw_pene .and. kpp_use_fesomkpp .eqv. .false.) then + elseif (use_sw_pene .and. (.not. kpp_use_fesomkpp)) then if (trim(kpp_sw_method) == 'all') then aux_surfbuoyflx_nl(1) = aux_surfbuoyflx_nl(1)+aux_coeff*sw_3d(1,node) elseif (trim(kpp_sw_method) == 'mxl') then From 7ee511626617d79323b36687144bd9f570e02529 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Wed, 13 Apr 2022 23:52:47 +0200 Subject: [PATCH 872/909] skip wind stress computations when coupled to IFS moved #endif for (#ifndef __ifsfesom) case after the wind stress computation, skipping the whole part that is usually done when not using (__oasis) --- src/gen_forcing_couple.F90 | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index 15ac1b446..c29c5f7f9 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -298,7 +298,7 @@ subroutine update_atm_forcing(istep, ice, tracers, partit, mesh) mask=1. call force_flux_consv(runoff, mask, i, 0,action, partit, mesh) end if -#if defined (__oifs) || defined (__ifsinterface) +#if defined (__oifs) elseif (i.eq.13) then if (action) then @@ -342,7 +342,7 @@ subroutine update_atm_forcing(istep, ice, tracers, partit, mesh) end if END DO !$OMP END PARALLEL DO -#endif + if (use_cavity) then !$OMP PARALLEL DO do i=1,myDim_nod2d+eDim_nod2d @@ -404,6 +404,7 @@ subroutine update_atm_forcing(istep, ice, tracers, partit, mesh) end do !$OMP END PARALLEL DO ! heat and fresh water fluxes are treated in i_therm and ice2ocean +#endif /* skip all in case of __ifsinterface */ #endif /* (__oasis) */ t2=MPI_Wtime() From 58981764c8e88a42b66529ac918f1eef84bb0849 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Tue, 19 Apr 2022 16:33:28 +0200 Subject: [PATCH 873/909] Add -fpe0 to Intel compile options for IFS-FESOM Added -fpe0 to Intel compile options for running IFS-FESOM in the RAPS environment. Without it, the model often fails with GM and Redi if those are not set to .false. (model blowup or floating point exceptions) --- src/CMakeLists.txt | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index d9ff42944..b9be2a55d 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -115,7 +115,11 @@ if(${VERBOSE}) endif() # CMAKE_Fortran_COMPILER_ID will also work if a wrapper is being used (e.g. mpif90 wraps ifort -> compiler id is Intel) if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Intel ) - target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -init=zero -no-wrap-margin) + if(${BUILD_FESOM_AS_LIBRARY}) + target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -init=zero -no-wrap-margin -fpe0) # add -fpe0 for RAPS environment + else() + target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -init=zero -no-wrap-margin) + endif() # target_compile_options(${PROJECT_NAME} PRIVATE -qopenmp -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -g -traceback -check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU ) # target_compile_options(${PROJECT_NAME} PRIVATE -O3 -finit-local-zero -finline-functions -fimplicit-none -fdefault-real-8 -ffree-line-length-none) From 3727474d2ead0c32d7fe6d3818c8ff52e8563960 Mon Sep 17 00:00:00 2001 From: Dmitri Sidorenko Date: Wed, 27 Apr 2022 14:06:59 +0200 Subject: [PATCH 874/909] a bug fix in io_meandata.F90 -> io_r2g; the rotation was not properly made --- src/io_meandata.F90 | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index b9cb8f4aa..240ca6e37 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -1293,7 +1293,11 @@ subroutine io_r2g(n, partit, mesh) !$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I, J) DO J=1, size(entry_x%local_values_r8,dim=2) DO I=1, size(entry_x%local_values_r8,dim=1) - call vector_r2g(entry_x%local_values_r8(I,J), entry_y%local_values_r8(I,J), mesh%coord_nod2D(1, n), mesh%coord_nod2D(2, n), 0) + if (entry_x%is_elem_based) then + call vector_r2g(entry_x%local_values_r8(I,J), entry_y%local_values_r8(I,J), sum(mesh%coord_nod2D(1, mesh%elem2D_nodes(:, J)))/3._WP, sum(mesh%coord_nod2D(2, mesh%elem2D_nodes(:, J)))/3._WP, 0) + else + call vector_r2g(entry_x%local_values_r8(I,J), entry_y%local_values_r8(I,J), mesh%coord_nod2D(1, J), mesh%coord_nod2D(2, J), 0) + end if END DO END DO !$OMP END PARALLEL DO @@ -1305,7 +1309,11 @@ subroutine io_r2g(n, partit, mesh) DO I=1, size(entry_x%local_values_r4,dim=1) temp_x=real(entry_x%local_values_r4(I,J), real64) temp_y=real(entry_y%local_values_r4(I,J), real64) - call vector_r2g(temp_x, temp_y, mesh%coord_nod2D(1, n), mesh%coord_nod2D(2, n), 0) + if (entry_x%is_elem_based) then + call vector_r2g(temp_x, temp_y, sum(mesh%coord_nod2D(1, mesh%elem2D_nodes(:, J)))/3._WP, sum(mesh%coord_nod2D(2, mesh%elem2D_nodes(:, J)))/3._WP, 0) + else + call vector_r2g(temp_x, temp_y, mesh%coord_nod2D(1, J), mesh%coord_nod2D(2, J), 0) + end if entry_x%local_values_r4(I,J)=real(temp_x, real32) entry_y%local_values_r4(I,J)=real(temp_y, real32) END DO From 3efc3da34b3beff677ff454e51729084262fd789 Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Wed, 27 Apr 2022 16:11:54 +0200 Subject: [PATCH 875/909] add indication if data are autorotated or not --- src/io_meandata.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 240ca6e37..79979389b 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -678,7 +678,7 @@ subroutine create_new_file(entry, ice, dynamics, partit, mesh) call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'opt_visc' , NF_INT, 1, dynamics%opt_visc), __LINE__) call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_wsplit' , NF_INT, 1, dynamics%use_wsplit), __LINE__) call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_partial_cell', NF_INT, 1, use_partial_cell), __LINE__) - + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'autorotate_back_to_geo', NF_INT, 1, vec_autorotate), __LINE__) From bd27a60c3e173a6518dd9fccb3b3c1504fc198e5 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Tue, 3 May 2022 17:00:37 +0200 Subject: [PATCH 876/909] improved efficiency for autorotation inside IO --- src/io_meandata.F90 | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 79979389b..95206a8b7 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -1265,6 +1265,7 @@ subroutine io_r2g(n, partit, mesh) integer :: I, J type(Meandata), pointer :: entry_x, entry_y real(kind=WP) :: temp_x, temp_y + real(kind=WP) :: xmean, ymean logical :: do_rotation if (n==io_NSTREAMS) RETURN @@ -1292,12 +1293,15 @@ subroutine io_r2g(n, partit, mesh) IF ((entry_x%accuracy == i_real8) .AND. (entry_y%accuracy == i_real8)) THEN !$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I, J) DO J=1, size(entry_x%local_values_r8,dim=2) + if (entry_x%is_elem_based) then + xmean=sum(mesh%coord_nod2D(1, mesh%elem2D_nodes(:, J)))/3._WP + ymean=sum(mesh%coord_nod2D(2, mesh%elem2D_nodes(:, J)))/3._WP + else + xmean=mesh%coord_nod2D(1, J) + ymean=mesh%coord_nod2D(2, J) + end if DO I=1, size(entry_x%local_values_r8,dim=1) - if (entry_x%is_elem_based) then - call vector_r2g(entry_x%local_values_r8(I,J), entry_y%local_values_r8(I,J), sum(mesh%coord_nod2D(1, mesh%elem2D_nodes(:, J)))/3._WP, sum(mesh%coord_nod2D(2, mesh%elem2D_nodes(:, J)))/3._WP, 0) - else - call vector_r2g(entry_x%local_values_r8(I,J), entry_y%local_values_r8(I,J), mesh%coord_nod2D(1, J), mesh%coord_nod2D(2, J), 0) - end if + call vector_r2g(entry_x%local_values_r8(I,J), entry_y%local_values_r8(I,J), xmean, ymean, 0) END DO END DO !$OMP END PARALLEL DO @@ -1306,14 +1310,17 @@ subroutine io_r2g(n, partit, mesh) IF ((entry_x%accuracy == i_real4) .AND. (entry_y%accuracy == i_real4)) THEN !$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I, J, temp_x, temp_y) DO J=1, size(entry_x%local_values_r4,dim=2) + if (entry_x%is_elem_based) then + xmean=sum(mesh%coord_nod2D(1, mesh%elem2D_nodes(:, J)))/3._WP + ymean=sum(mesh%coord_nod2D(2, mesh%elem2D_nodes(:, J)))/3._WP + else + xmean=mesh%coord_nod2D(1, J) + ymean=mesh%coord_nod2D(2, J) + end if DO I=1, size(entry_x%local_values_r4,dim=1) temp_x=real(entry_x%local_values_r4(I,J), real64) temp_y=real(entry_y%local_values_r4(I,J), real64) - if (entry_x%is_elem_based) then - call vector_r2g(temp_x, temp_y, sum(mesh%coord_nod2D(1, mesh%elem2D_nodes(:, J)))/3._WP, sum(mesh%coord_nod2D(2, mesh%elem2D_nodes(:, J)))/3._WP, 0) - else - call vector_r2g(temp_x, temp_y, mesh%coord_nod2D(1, J), mesh%coord_nod2D(2, J), 0) - end if + call vector_r2g(temp_x, temp_y, xmean, ymean, 0) entry_x%local_values_r4(I,J)=real(temp_x, real32) entry_y%local_values_r4(I,J)=real(temp_y, real32) END DO From 95aafe57ee1e035954336a7c3f77184c4340ce76 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Tue, 3 May 2022 17:02:53 +0200 Subject: [PATCH 877/909] fixed OMP in autoratote for IO --- src/io_meandata.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 95206a8b7..52bd17eed 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -1291,7 +1291,7 @@ subroutine io_r2g(n, partit, mesh) END IF IF ((entry_x%accuracy == i_real8) .AND. (entry_y%accuracy == i_real8)) THEN -!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I, J) +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I, J, xmean, ymean) DO J=1, size(entry_x%local_values_r8,dim=2) if (entry_x%is_elem_based) then xmean=sum(mesh%coord_nod2D(1, mesh%elem2D_nodes(:, J)))/3._WP @@ -1308,7 +1308,7 @@ subroutine io_r2g(n, partit, mesh) END IF IF ((entry_x%accuracy == i_real4) .AND. (entry_y%accuracy == i_real4)) THEN -!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I, J, temp_x, temp_y) +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I, J, temp_x, temp_y, xmean, ymean) DO J=1, size(entry_x%local_values_r4,dim=2) if (entry_x%is_elem_based) then xmean=sum(mesh%coord_nod2D(1, mesh%elem2D_nodes(:, J)))/3._WP From c3033983f0931ef68917316a5c81ffc974a573aa Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Wed, 4 May 2022 11:22:36 +0200 Subject: [PATCH 878/909] add levante environment --- env.sh | 2 ++ env/levante.dkrz.de/shell | 12 ++++++++++++ work/job_levante | 38 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 52 insertions(+) create mode 100755 env/levante.dkrz.de/shell create mode 100755 work/job_levante diff --git a/env.sh b/env.sh index 89d4143e8..ce8c31def 100755 --- a/env.sh +++ b/env.sh @@ -27,6 +27,8 @@ fi if [[ $LOGINHOST =~ ^m[A-Za-z0-9]+\.hpc\.dkrz\.de$ ]]; then STRATEGY="mistral.dkrz.de" +elif [[ $LOGINHOST =~ ^l[A-Za-z0-9]+\.atos\.local$ ]]; then + STRATEGY="levante.dkrz.de" elif [[ $LOGINHOST =~ ^ollie[0-9]$ ]] || [[ $LOGINHOST =~ ^prod-[0-9]{4}$ ]]; then STRATEGY="ollie" elif [[ $LOGINHOST =~ ^h[A-Za-z0-9]+\.hsn\.hlrn\.de$ ]]; then diff --git a/env/levante.dkrz.de/shell b/env/levante.dkrz.de/shell new file mode 100755 index 000000000..bfa6db2a5 --- /dev/null +++ b/env/levante.dkrz.de/shell @@ -0,0 +1,12 @@ +# make the contents as shell agnostic as possible so we can include them with bash, zsh and others +export LC_ALL=en_US.UTF-8 +export CPU_MODEL=AMD_EPYC_ZEN3 + +module load intel-oneapi-compilers/2022.0.1-gcc-11.2.0 +module load intel-oneapi-mkl/2022.0.1-gcc-11.2.0 +module load openmpi/4.1.2-intel-2021.5.0 +export FC=mpif90 CC=mpicc CXX=mpicxx ; +export LD_LIBRARY_PATH=/sw/spack-levante/intel-oneapi-mkl-2022.0.1-ttdktf/mkl/2022.0.1/lib/intel64:$LD_LIBRARY_PATH + +module load netcdf-c/4.8.1-openmpi-4.1.2-intel-2021.5.0 +module load netcdf-fortran/4.5.3-openmpi-4.1.2-intel-2021.5.0 diff --git a/work/job_levante b/work/job_levante new file mode 100755 index 000000000..3e45afcb0 --- /dev/null +++ b/work/job_levante @@ -0,0 +1,38 @@ +#!/bin/bash +#SBATCH --job-name=ref +#SBATCH -p compute +#SBATCH --ntasks-per-node=128 +#SBATCH --ntasks=512 +#SBATCH --time=08:00:00 +#SBATCH -o slurm-out.out +#SBATCH -e slurm-err.out +#SBATCH -A ab0995 + +source /sw/etc/profile.levante +source ../env/levante.dkrz.de/shell + +ulimit -s 102400 + +echo Submitted job: $jobid +squeue -u $USER + +# determine JOBID +JOBID=`echo $SLURM_JOB_ID |cut -d"." -f1` + +ln -s ../bin/fesom.x . # cp -n ../bin/fesom.x +cp -n ../config/namelist.config . +cp -n ../config/namelist.forcing . +cp -n ../config/namelist.oce . +cp -n ../config/namelist.ice . +cp -n ../config/namelist.icepack . + +date +srun -l fesom.x > "fesom2.0.out" +date + +# qstat -f $PBS_JOBID +#export EXITSTATUS=$? +#if [ ${EXITSTATUS} -eq 0 ] || [ ${EXITSTATUS} -eq 127 ] ; then +#sbatch job_mistral +#fi + From 99095a9500a3080e02b16439a6f828c9f57b77e2 Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Wed, 4 May 2022 11:59:56 +0200 Subject: [PATCH 879/909] add partitioning script --- work/job_ini_levante | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100755 work/job_ini_levante diff --git a/work/job_ini_levante b/work/job_ini_levante new file mode 100755 index 000000000..c84232fab --- /dev/null +++ b/work/job_ini_levante @@ -0,0 +1,38 @@ +#!/bin/bash +#SBATCH --job-name=ref +#SBATCH -p compute +#SBATCH --ntasks-per-node=1 +#SBATCH --ntasks=1 +#SBATCH --time=01:00:00 +#SBATCH -o slurm-out.out +#SBATCH -e slurm-err.out +#SBATCH -A ab0995 + +source /sw/etc/profile.levante +source ../env/levante.dkrz.de/shell + +ulimit -s 102400 + +echo Submitted job: $jobid +squeue -u $USER + +# determine JOBID +JOBID=`echo $SLURM_JOB_ID |cut -d"." -f1` + +ln -s ../bin/fesom_ini.x . # cp -n ../bin/fesom.x +cp -n ../config/namelist.config . +cp -n ../config/namelist.forcing . +cp -n ../config/namelist.oce . +cp -n ../config/namelist.ice . +cp -n ../config/namelist.icepack . + +date +srun -l fesom_ini.x > "fesom_ini.out" +date + +# qstat -f $PBS_JOBID +#export EXITSTATUS=$? +#if [ ${EXITSTATUS} -eq 0 ] || [ ${EXITSTATUS} -eq 127 ] ; then +#sbatch job_mistral +#fi + From 5292f3c63d3c4c13facd1684071f3f54e78f2b90 Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 4 May 2022 17:09:26 +0200 Subject: [PATCH 880/909] update TKE, add langmuire parameterisation from NEMO done by oliver gutjahr in ICON --- src/cvmix_tke.F90 | 266 +++++++++++++++++++++------------- src/gen_modules_cvmix_tke.F90 | 99 ++++++++++++- 2 files changed, 265 insertions(+), 100 deletions(-) diff --git a/src/cvmix_tke.F90 b/src/cvmix_tke.F90 index 5f6ecc72b..a0e9b8129 100644 --- a/src/cvmix_tke.F90 +++ b/src/cvmix_tke.F90 @@ -1,6 +1,6 @@ -module cvmix_tke -!! This module contains the main computations of diffusivities based on +module cvmix_tke +!! This module contains the main computations of diffusivities based on !! TKE (following Gaspar'90) with the calculation of the mixing length following (Blanke, B., P. Delecluse) !! !! @see Gaspar, P., Y. Grégoris, and J.-M. Lefevre @@ -13,6 +13,10 @@ module cvmix_tke !! @author Hannah Kleppin, MPIMET/University of Hamburg !! @author Oliver Gutjahr, MPIMET !! +!! @par Copyright +!! 2002-2013 by MPI-M +!! This software is provided for non-commercial use only. +!! See the LICENSE and the WARRANTY conditions. !! use cvmix_kinds_and_types, only : cvmix_r8, & @@ -68,6 +72,7 @@ module cvmix_tke c_eps ,& ! dissipation parameter cd ,& ! alpha_tke ,& ! + clc ,& ! factor for Langmuir turbulence mxl_min ,& ! minimum value for mixing length kappaM_min ,& ! minimum value for Kappa momentum kappaM_max ,& ! maximum value for Kappa momentum @@ -81,6 +86,7 @@ module cvmix_tke logical :: & only_tke ,& + l_lc ,& use_ubound_dirichlet ,& use_lbound_dirichlet @@ -94,7 +100,7 @@ module cvmix_tke subroutine init_tke(c_k, c_eps, cd, alpha_tke, mxl_min, KappaM_min, KappaM_max, & tke_mxl_choice, use_ubound_dirichlet, use_lbound_dirichlet, & - handle_old_vals, only_tke, tke_min, tke_surf_min, & + handle_old_vals, only_tke, l_lc, clc, tke_min, tke_surf_min, & tke_userdef_constants) ! This subroutine sets user or default values for TKE parameters @@ -108,6 +114,7 @@ subroutine init_tke(c_k, c_eps, cd, alpha_tke, mxl_min, KappaM_min, KappaM_max, KappaM_min ,& KappaM_max ,& tke_surf_min ,& + clc ,& tke_min integer, intent(in),optional :: & @@ -116,6 +123,7 @@ subroutine init_tke(c_k, c_eps, cd, alpha_tke, mxl_min, KappaM_min, KappaM_max, logical, intent(in), optional :: & only_tke ,& + l_lc ,& use_ubound_dirichlet ,& use_lbound_dirichlet @@ -124,23 +132,23 @@ subroutine init_tke(c_k, c_eps, cd, alpha_tke, mxl_min, KappaM_min, KappaM_max, ! FIXME: not sure about the allowed ranges for TKE parameters if (present(c_k)) then - if(c_k.lt. 0.05d0 .or. c_k .gt. 0.3d0) then + if(c_k.lt. 0.0d0 .or. c_k .gt. 1.5d0) then print*, "ERROR:c_k can only be allowed_range" stop 1 end if call put_tke('c_k', c_k, tke_userdef_constants) else - call put_tke('c_k',0.1_cvmix_r8 , tke_userdef_constants) + call put_tke('c_k',0.1d0 , tke_userdef_constants) end if if (present(c_eps)) then - if(c_eps.lt. 0.5d0 .or. c_eps .gt. 1.d0) then + if(c_eps.lt. 0.d0 .or. c_eps .gt. 10.d0) then print*, "ERROR:c_eps can only be allowed_range" stop 1 end if call put_tke('c_eps', c_eps, tke_userdef_constants) else - call put_tke('c_eps', 0.7_cvmix_r8, tke_userdef_constants) + call put_tke('c_eps', 0.7d0, tke_userdef_constants) end if if (present(cd)) then @@ -150,17 +158,17 @@ subroutine init_tke(c_k, c_eps, cd, alpha_tke, mxl_min, KappaM_min, KappaM_max, end if call put_tke('cd', cd, tke_userdef_constants) else - call put_tke('cd', 3.75_cvmix_r8, tke_userdef_constants) + call put_tke('cd', 3.75d0, tke_userdef_constants) end if if (present(alpha_tke)) then - if(alpha_tke.lt. 1.d0 .or. alpha_tke .gt. 30.d0) then + if(alpha_tke.lt. 1.d0 .or. alpha_tke .gt. 90.d0) then print*, "ERROR:alpha_tke can only be allowed_range" stop 1 end if call put_tke('alpha_tke', alpha_tke, tke_userdef_constants) else - call put_tke('alpha_tke', 30._cvmix_r8, tke_userdef_constants) + call put_tke('alpha_tke', 30.d0, tke_userdef_constants) end if if (present(mxl_min)) then @@ -170,7 +178,7 @@ subroutine init_tke(c_k, c_eps, cd, alpha_tke, mxl_min, KappaM_min, KappaM_max, end if call put_tke('mxl_min', mxl_min, tke_userdef_constants) else - call put_tke('mxl_min', 1._cvmix_r8-8, tke_userdef_constants) + call put_tke('mxl_min', 1.d-8, tke_userdef_constants) end if if (present(KappaM_min)) then @@ -180,17 +188,17 @@ subroutine init_tke(c_k, c_eps, cd, alpha_tke, mxl_min, KappaM_min, KappaM_max, end if call put_tke('kappaM_min', KappaM_min, tke_userdef_constants) else - call put_tke('kappaM_min', 0._cvmix_r8, tke_userdef_constants) + call put_tke('kappaM_min', 0.d0, tke_userdef_constants) end if if (present(KappaM_max)) then - if(KappaM_max.lt. 1.d0 .or. KappaM_max .gt. 100.d0) then + if(KappaM_max.lt. 10.d0 .or. KappaM_max .gt. 1000.d0) then print*, "ERROR:kappaM_max can only be allowed_range" stop 1 end if call put_tke('kappaM_max', KappaM_max, tke_userdef_constants) else - call put_tke('kappaM_max', 100._cvmix_r8, tke_userdef_constants) + call put_tke('kappaM_max', 100.d0, tke_userdef_constants) end if if (present(tke_mxl_choice)) then @@ -213,24 +221,35 @@ subroutine init_tke(c_k, c_eps, cd, alpha_tke, mxl_min, KappaM_min, KappaM_max, call put_tke('handle_old_vals', 1, tke_userdef_constants) end if +if (present(clc)) then + if(clc.lt. 0.0 .or. clc .gt. 30.0) then + print*, "ERROR:clc can only be allowed_range" + stop 1 + end if + call put_tke('clc', clc, tke_userdef_constants) +else + call put_tke('clc',0.15d0 , tke_userdef_constants) +end if + + if (present(tke_min)) then - if(tke_min.lt. 1.d-7 .or. tke_min.gt. 1.d-4 ) then - print*, "ERROR:tke_min can only be 10^-7 to 10^-4" + if(tke_min.lt. 1.d-9 .or. tke_min.gt. 1.d-2 ) then + print*, "ERROR:tke_min can only be allowed_range" stop 1 end if call put_tke('tke_min', tke_min, tke_userdef_constants) else - call put_tke('tke_min', 1._cvmix_r8-6, tke_userdef_constants) + call put_tke('tke_min', 1.d-6, tke_userdef_constants) end if if (present(tke_surf_min)) then if(tke_surf_min.lt. 1.d-7 .or. tke_surf_min.gt. 1.d-2 ) then - print*, "ERROR:tke_surf_min can only be 10^-7 to 10^-4" + print*, "ERROR:tke_surf_min can only be allowed_range" stop 1 end if call put_tke('tke_surf_min', tke_surf_min, tke_userdef_constants) else - call put_tke('tke_surf_min', 1._cvmix_r8-4, tke_userdef_constants) + call put_tke('tke_surf_min', 1.d-4, tke_userdef_constants) end if if (present(use_ubound_dirichlet)) then @@ -252,6 +271,13 @@ subroutine init_tke(c_k, c_eps, cd, alpha_tke, mxl_min, KappaM_min, KappaM_max, call put_tke('only_tke', .true., tke_userdef_constants) end if +if (present(l_lc)) then + + call put_tke('l_lc', l_lc, tke_userdef_constants) +else + call put_tke('l_lc', .false., tke_userdef_constants) +end if + end subroutine init_tke !================================================================================= @@ -285,6 +311,7 @@ subroutine tke_wrap(Vmix_vars, Vmix_params, tke_userdef_constants) !tke ,& tke_Lmix ,& tke_Pr ,& + tke_plc ,& !by_Oliver new_KappaM ,& ! new_KappaH ,& ! new_tke ,& ! @@ -351,6 +378,7 @@ subroutine tke_wrap(Vmix_vars, Vmix_params, tke_userdef_constants) tke_Ttot = tke_Ttot, & tke_Lmix = tke_Lmix, & tke_Pr = tke_Pr, & + tke_plc = tke_plc, & !by_Oliver ! debugging cvmix_int_1 = cvmix_int_1, & cvmix_int_2 = cvmix_int_2, & @@ -414,6 +442,7 @@ subroutine integrate_tke( & !tke, & tke_Lmix, & ! diagnostic tke_Pr, & ! diagnostic + tke_plc, & ! langmuir turbulence forc_tke_surf, & E_iw, & dtime, & @@ -427,6 +456,7 @@ subroutine integrate_tke( & grav, & ! FIXME: today: put to initialize alpha_c, & ! FIXME: today: put to initialize tke_userdef_constants) +!subroutine integrate_tke(jc, blockNo, tstep_count) type(tke_type), intent(in), optional, target :: & tke_userdef_constants @@ -450,9 +480,12 @@ subroutine integrate_tke( & real(cvmix_r8), dimension(nlev), intent(in) :: & dzw ! + + ! Langmuir turbulence + real(cvmix_r8), dimension(nlev+1), intent(in), optional :: & + tke_plc ! IDEMIX variables, if run coupled iw_diss is added as forcing to TKE -!!PS real(cvmix_r8), dimension(max_nlev), intent(in), optional :: & real(cvmix_r8), dimension(nlev+1), intent(in), optional :: & E_iw ,& ! alpha_c ,& ! @@ -469,7 +502,7 @@ subroutine integrate_tke( & real(cvmix_r8), intent(in) :: & forc_tke_surf - !real(cvmix_r8),dimension(nlev+1), intent(in), optional :: & + !real(cvmix_r8),dimension(max_nlev+1), intent(in), optional :: & ! Kappa_GM ! ! NEW values @@ -480,7 +513,7 @@ subroutine integrate_tke( & KappaH_out ! diagnostics - real(cvmix_r8), dimension(nlev+1) :: & + real(cvmix_r8), dimension(nlev+1), intent(out) :: & tke_Tbpr ,& tke_Tspr ,& tke_Tdif ,& @@ -492,6 +525,7 @@ subroutine integrate_tke( & !tke ,& tke_Lmix ,& tke_Pr !,& + real(cvmix_r8), dimension(nlev+1), intent(out) :: & cvmix_int_1 ,& cvmix_int_2 ,& @@ -520,11 +554,12 @@ subroutine integrate_tke( & KappaM_max ,& ! mxl_min ,& ! {1e-8} c_k ,& ! {0.1} + clc ,& ! {0.15} tke_surf_min ,& ! {1e-4} tke_min ! {1e-6} integer :: tke_mxl_choice - logical :: only_tke, use_ubound_dirichlet, use_lbound_dirichlet + logical :: only_tke, use_ubound_dirichlet, use_lbound_dirichlet,l_lc real(cvmix_r8) :: & zzw ,& ! depth of interface k @@ -557,7 +592,7 @@ subroutine integrate_tke( & tke_constants_in => tke_userdef_constants end if - ! FIXME: nils: What should we do with height of last grid box dzt(nlev+1)? + ! FIXME: nils: What should we do with height of last grid box dzt(max_nlev+1)? ! This should not be as thick as the distance to the next tracer ! point (which is a dry point). ! Be careful if you divide by 0.5 here. Maybe later we use ddpo @@ -572,6 +607,19 @@ subroutine integrate_tke( & tke_Tiwf = 0.0 tke_Tbck = 0.0 tke_Ttot = 0.0 + cvmix_int_1 = 0.0 + cvmix_int_2 = 0.0 + cvmix_int_3 = 0.0 + + tke_new = 0.0 + tke_upd = 0.0 + + a_dif = 0.0 + b_dif = 0.0 + c_dif = 0.0 + a_tri = 0.0 + b_tri = 0.0 + c_tri = 0.0 !--------------------------------------------------------------------------------- ! set tke_constants locally @@ -587,9 +635,25 @@ subroutine integrate_tke( & tke_surf_min = tke_constants_in%tke_surf_min tke_mxl_choice = tke_constants_in%tke_mxl_choice only_tke = tke_constants_in%only_tke + l_lc = tke_constants_in%l_lc + clc = tke_constants_in%clc use_ubound_dirichlet = tke_constants_in%use_ubound_dirichlet use_lbound_dirichlet = tke_constants_in%use_lbound_dirichlet + !c_k = 0.1 + !c_eps = 0.7 + !alpha_tke = 30.0 + !mxl_min = 1.d-8 + !kappaM_min = 0.0 + !kappaM_max = 100.0 + !cd = 3.75 + !tke_min = 1.d-6 + !tke_mxl_choice = 2 + !tke_surf_min = 1.d-4 + !only_tke = .true. + !use_ubound_dirichlet = .false. + !use_lbound_dirichlet = .false. + ! FIXME: nils: Is kappaM_min ever used? ! FIXME: use kappaM_min from namelist ! FIXME: where is kappaM_min used? @@ -637,8 +701,6 @@ subroutine integrate_tke( & !--------------------------------------------------------------------------------- ! see. Blanke and Delecluse 1993, eq. 2.25 KappaM_out = min(KappaM_max,c_k*mxl*sqrttke) - - ! Richardson number --> see. Blanke and Delecluse 1993, eq. 2.18 Rinum = Nsqr/max(Ssqr,1d-12) ! FIXME: nils: Check this later if IDEMIX is coupled. @@ -670,6 +732,11 @@ subroutine integrate_tke( & P_diss_v(1) = -forc_rho_surf*grav/rho_ref forc = forc + K_diss_v - P_diss_v + ! --- additional langmuir turbulence term + if (l_lc) then + forc = forc + tke_plc + endif + ! --- forcing by internal wave dissipation if (.not.only_tke) then forc = forc + iw_diss @@ -706,7 +773,7 @@ subroutine integrate_tke( & a_dif(1) = 0.d0 ! not part of the diffusion matrix, thus value is arbitrary ! copy tke_old - tke_upd = tke_old + tke_upd(1:nlev+1) = tke_old(1:nlev+1) ! upper boundary condition if (use_ubound_dirichlet) then @@ -797,7 +864,7 @@ subroutine integrate_tke( & ! restrict values of TKE to tke_min, if IDEMIX is not used if (only_tke) then - tke_new = MAX(tke_new, tke_min) + tke_new(1:nlev+1) = MAX(tke_new(1:nlev+1), tke_min) end if !--------------------------------------------------------------------------------- @@ -805,8 +872,8 @@ subroutine integrate_tke( & !--------------------------------------------------------------------------------- ! tke_Ttot = tke_Tbpr + tke_Tspr + tke_Tdif + tke_Tdis ! + tke_Twin + tke_Tiwf - tke_Tbpr = -P_diss_v - tke_Tspr = K_diss_v + tke_Tbpr(1:nlev+1) = -P_diss_v(1:nlev+1) + tke_Tspr(1:nlev+1) = K_diss_v(1:nlev+1) !tke_Tdif is set above !tke_Tdis = -tke_diss_out tke_Tbck = (tke_new-tke_unrest)/dtime @@ -827,92 +894,93 @@ subroutine integrate_tke( & tke_Twin(nlev+1) = 0.0 endif - tke_Tiwf = iw_diss + tke_Tiwf(1:nlev+1) = iw_diss(1:nlev+1) tke_Ttot = (tke_new-tke_old)/dtime !tke = tke_new - tke_Lmix = mxl - tke_Pr = prandtl + tke_Lmix(nlev+1:) = 0.0 + tke_Lmix(1:nlev+1) = mxl(1:nlev+1) + tke_Pr(nlev+1:) = 0.0 + tke_Pr(1:nlev+1) = prandtl(1:nlev+1) ! ----------------------------------------------- ! the rest is for debugging ! ----------------------------------------------- - cvmix_int_1 = KappaM_out - cvmix_int_2 = 0.0 - cvmix_int_2(1) = tke_surf + cvmix_int_1 = KappaH_out + cvmix_int_2 = KappaM_out cvmix_int_3 = Nsqr !cvmix_int_1 = forc !cvmix_int_2 = Nsqr !cvmix_int_3 = Ssqr if (.false.) then - ! write(*,*) 'i = ', i, 'j = ', j, 'tstep_count = ', tstep_count - if (i==45 .and. j==10) then + write(*,*) 'i = ', i, 'j = ', j, 'tstep_count = ', tstep_count + if (i==8 .and. j==10) then !if (i==45 .and. j==10 .and. tstep_count==10) then ! ----------------------------------------------- write(*,*) '================================================================================' write(*,*) 'i = ', i, 'j = ', j, 'tstep_count = ', tstep_count - write(*,*) 'nlev = ', nlev - write(*,*) 'dtime = ', dtime - write(*,*) 'dzt = ', dzt - write(*,*) 'dzw = ', dzw - write(*,*) 'Nsqr = ', Nsqr - write(*,*) 'Ssqr = ', Ssqr - !write(*,*) 'tho = ', tho(i,j,1:nlev) - !write(*,*) 'sao = ', sao(i,j,1:nlev) - !write(*,*) 'bottom_fric = ', bottom_fric - !write(*,*) 'forc_tke_surf = ', forc_tke_surf +!!! write(*,*) 'nlev = ', nlev +!!! write(*,*) 'dtime = ', dtime +!!! write(*,*) 'dzt = ', dzt +!!! write(*,*) 'dzw = ', dzw +!!! write(*,*) 'Nsqr = ', Nsqr +!!! write(*,*) 'Ssqr = ', Ssqr +!!! !write(*,*) 'tho = ', tho(i,j,1:nlev) +!!! !write(*,*) 'sao = ', sao(i,j,1:nlev) +!!! !write(*,*) 'bottom_fric = ', bottom_fric +!!! !write(*,*) 'forc_tke_surf = ', forc_tke_surf write(*,*) 'sqrttke = ', sqrttke - write(*,*) 'mxl = ', mxl - write(*,*) 'KappaM_out = ', KappaM_out - write(*,*) 'KappaH_out = ', KappaH_out - write(*,*) 'forc = ', forc - !write(*,*) 'Rinum = ', Rinum - write(*,*) 'prandtl = ', prandtl - !write(*,*) 'checkpoint d_tri' - !write(*,*) 'K_diss_v = ', K_diss_v - !write(*,*) 'P_diss_v = ', P_diss_v - !write(*,*) 'delta = ', delta - write(*,*) 'ke = ', ke - write(*,*) 'a_tri = ', a_tri - write(*,*) 'b_tri = ', b_tri - write(*,*) 'c_tri = ', c_tri - write(*,*) 'd_tri = ', d_tri - !write(*,*) 'tke_old = ', tke_old - write(*,*) 'tke_new = ', tke_new - write(*,*) 'tke_Tbpr = ', tke_Tbpr - write(*,*) 'tke_Tspr = ', tke_Tspr - write(*,*) 'tke_Tdif = ', tke_Tdif - write(*,*) 'tke_Tdis = ', tke_Tdis - write(*,*) 'tke_Twin = ', tke_Twin - write(*,*) 'tke_Tiwf = ', tke_Tiwf - write(*,*) 'tke_Ttot = ', tke_Ttot - write(*,*) 'tke_Ttot - tke_Tsum = ', & - tke_Ttot-(tke_Tbpr+tke_Tspr+tke_Tdif+tke_Tdis+tke_Twin+tke_Tiwf) - !write(*,*) 'dzw = ', dzw - !write(*,*) 'dzt = ', dzt - ! FIXME: partial bottom cells!! - ! namelist parameters - write(*,*) 'c_k = ', c_k - write(*,*) 'c_eps = ', c_eps - write(*,*) 'alpha_tke = ', alpha_tke - write(*,*) 'mxl_min = ', mxl_min - write(*,*) 'kappaM_min = ', kappaM_min - write(*,*) 'kappaM_max = ', kappaM_max - ! FIXME: Make tke_mxl_choice available! - !write(*,*) 'tke_mxl_choice = ', tke_mxl_choice - !write(*,*) 'cd = ', cd - write(*,*) 'tke_min = ', tke_min - write(*,*) 'tke_surf_min = ', tke_surf_min - write(*,*) 'only_tke = ', only_tke - write(*,*) 'use_ubound_dirichlet = ', use_ubound_dirichlet - write(*,*) 'use_lbound_dirichlet = ', use_lbound_dirichlet - !write(*,*) 'tke(nlev) = ', tke(nlev), 'tke(nlev+1) = ', tke(nlev+1) - !write(*,*) 'tke(nlev+2) = ', tke(nlev+2) +!!! write(*,*) 'mxl = ', mxl +!!! write(*,*) 'KappaM_out = ', KappaM_out +!!! write(*,*) 'KappaH_out = ', KappaH_out +!!! write(*,*) 'forc = ', forc +!!! !write(*,*) 'Rinum = ', Rinum +!!! write(*,*) 'prandtl = ', prandtl +!!! !write(*,*) 'checkpoint d_tri' +!!! !write(*,*) 'K_diss_v = ', K_diss_v +!!! !write(*,*) 'P_diss_v = ', P_diss_v +!!! !write(*,*) 'delta = ', delta +!!! write(*,*) 'ke = ', ke +!!! write(*,*) 'a_tri = ', a_tri +!!! write(*,*) 'b_tri = ', b_tri +!!! write(*,*) 'c_tri = ', c_tri +!!! write(*,*) 'd_tri = ', d_tri +!!! !write(*,*) 'tke_old = ', tke_old +!!! write(*,*) 'tke_new = ', tke_new +!!! write(*,*) 'tke_Tbpr = ', tke_Tbpr +!!! write(*,*) 'tke_Tspr = ', tke_Tspr +!!! write(*,*) 'tke_Tdif = ', tke_Tdif +!!! write(*,*) 'tke_Tdis = ', tke_Tdis +!!! write(*,*) 'tke_Twin = ', tke_Twin +!!! write(*,*) 'tke_Tiwf = ', tke_Tiwf +!!! write(*,*) 'tke_Ttot = ', tke_Ttot +!!! write(*,*) 'tke_Ttot - tke_Tsum = ', & +!!! tke_Ttot-(tke_Tbpr+tke_Tspr+tke_Tdif+tke_Tdis+tke_Twin+tke_Tiwf) +!!! !write(*,*) 'dzw = ', dzw +!!! !write(*,*) 'dzt = ', dzt +!!! ! FIXME: partial bottom cells!! +!!! ! namelist parameters +!!! write(*,*) 'c_k = ', c_k +!!! write(*,*) 'c_eps = ', c_eps +!!! write(*,*) 'alpha_tke = ', alpha_tke +!!! write(*,*) 'mxl_min = ', mxl_min +!!! write(*,*) 'kappaM_min = ', kappaM_min +!!! write(*,*) 'kappaM_max = ', kappaM_max +!!! ! FIXME: Make tke_mxl_choice available! +!!! !write(*,*) 'tke_mxl_choice = ', tke_mxl_choice +!!! !write(*,*) 'cd = ', cd +!!! write(*,*) 'tke_min = ', tke_min +!!! write(*,*) 'tke_surf_min = ', tke_surf_min +!!! write(*,*) 'only_tke = ', only_tke +!!! write(*,*) 'use_ubound_dirichlet = ', use_ubound_dirichlet +!!! write(*,*) 'use_lbound_dirichlet = ', use_lbound_dirichlet +!!! !write(*,*) 'tke(nlev) = ', tke(nlev), 'tke(nlev+1) = ', tke(nlev+1) +!!! !write(*,*) 'tke(nlev+2) = ', tke(nlev+2) write(*,*) '================================================================================' !end if !if (i==45 .and. j==10 .and. tstep_count==10) then - !stop +! stop end if ! if (i==, j==, tstep==) end if ! if (.true./.false.) end subroutine integrate_tke @@ -966,7 +1034,9 @@ subroutine cvmix_tke_put_tke_logical(varname,val,tke_userdef_constants) case('use_ubound_dirichlet') tke_constants_out%use_ubound_dirichlet=val case('use_lbound_dirichlet') - tke_constants_out%use_lbound_dirichlet=val + tke_constants_out%use_lbound_dirichlet=val + case('l_lc') + tke_constants_out%l_lc=val case DEFAULT print*, "ERROR:", trim(varname), " not a valid choice" stop 1 @@ -1008,7 +1078,9 @@ subroutine cvmix_tke_put_tke_real(varname,val,tke_userdef_constants) case('kappaM_max') tke_constants_out%kappaM_max = val case('tke_min') - tke_constants_out%tke_min = val + tke_constants_out%tke_min = val + case('clc') + tke_constants_out%clc = val case('tke_surf_min') tke_constants_out%tke_surf_min = val case DEFAULT diff --git a/src/gen_modules_cvmix_tke.F90 b/src/gen_modules_cvmix_tke.F90 index 53a1fdcda..4d64768aa 100644 --- a/src/gen_modules_cvmix_tke.F90 +++ b/src/gen_modules_cvmix_tke.F90 @@ -54,6 +54,9 @@ module g_cvmix_tke logical :: use_ubound_dirichlet = .false. logical :: use_lbound_dirichlet = .false. + logical :: tke_dolangmuir = .false. + real(kind=WP) :: tke_clangmuir = 0.3 + ! apply time relaxation to avo/dvo ! FIXME: nils: Do we need that logical :: timerelax_tke = .false. @@ -64,7 +67,7 @@ module g_cvmix_tke namelist /param_tke/ tke_c_k, tke_c_eps, tke_alpha, tke_mxl_min, tke_kappaM_min, tke_kappaM_max, & tke_cd, tke_surf_min, tke_min, tke_mxl_choice, & use_ubound_dirichlet, use_lbound_dirichlet, & - timerelax_tke, relne, relax + timerelax_tke, relne, relax, tke_dolangmuir, tke_clangmuir !___________________________________________________________________________ ! CVMIX-TKE 3D variables @@ -110,7 +113,13 @@ module g_cvmix_tke ! nils integer :: tstep_count - + + !___________________________________________________________________________ + ! Langmuir parameterisation + real(kind=WP), allocatable, dimension(:,:) :: tke_langmuir, langmuir_wlc + real(kind=WP), allocatable, dimension(:) :: langmuir_hlc, langmuir_ustoke + + contains ! ! @@ -227,9 +236,23 @@ subroutine init_cvmix_tke(partit, mesh) write(*,*) " tke_kappaM_min = ", tke_kappaM_min write(*,*) " tke_kappaM_max = ", tke_kappaM_max write(*,*) " tke_mxl_choice = ", tke_mxl_choice + write(*,*) " tke_dolangmuir = ", tke_dolangmuir write(*,*) end if + !_______________________________________________________________________ + !langmuir parameterisation + if (tke_dolangmuir) then + allocate(tke_langmuir(nl,node_size)) + allocate(langmuir_wlc(nl,node_size)) + allocate(langmuir_hlc(node_size)) + allocate(langmuir_ustoke(node_size)) + tke_langmuir = 0.0_WP + langmuir_wlc = 0.0_WP + langmuir_hlc = 0.0_WP + langmuir_ustoke = 0.0_WP + end if + !_______________________________________________________________________ ! call tke initialisation routine from cvmix library call init_tke(c_k = tke_c_k, & @@ -243,6 +266,8 @@ subroutine init_cvmix_tke(partit, mesh) use_ubound_dirichlet = use_ubound_dirichlet, & use_lbound_dirichlet = use_lbound_dirichlet, & only_tke = tke_only, & + l_lc = tke_dolangmuir, & + clc = tke_clangmuir, & tke_min = tke_min, & tke_surf_min = tke_surf_min ) end subroutine init_cvmix_tke @@ -257,7 +282,7 @@ subroutine calc_cvmix_tke(dynamics, partit, mesh) type(t_partit), intent(inout), target :: partit type(t_dyn), intent(inout), target :: dynamics integer :: node, elem, nelem, nz, nln, nun, elnodes(3), node_size - real(kind=WP) :: tvol + real(kind=WP) :: tvol, aux real(kind=WP) :: dz_trr(mesh%nl), bvfreq2(mesh%nl), vshear2(mesh%nl) real(kind=WP) :: tke_Av_old(mesh%nl), tke_Kv_old(mesh%nl), tke_old(mesh%nl) real(kind=WP), dimension(:,:,:), pointer :: UVnode @@ -329,6 +354,73 @@ subroutine calc_cvmix_tke(dynamics, partit, mesh) dz_trr(nun) = hnode(nun,node)/2.0_WP dz_trr(nln+1) = hnode(nln,node)/2.0_WP + !___________________________________________________________________ + ! calculate Langmuir cell additional term after Axell (2002) + ! --> adapted from ICON and Oliver Gutjahr + if (tke_dolangmuir) then + !_______________________________________________________________ + ! calculate Stoke's drift + ! Approximation if there is no information about the wave field + ! As done in Nemo + ! FIXME: do we need to divide tau by rho? + + ! Option used in NEMO model (https://www.nemo-ocean.eu/wp-content/ + ! uploads/NEMO_book.pdf, p.197) see also Breivik et al. (2015) + ! They assume rhoair=1.2 kg/m3 and cd=1.5e-03: + ! u_stokes = 0.016/(1.2 * 1.5e-03)^0.5 * |tau|^0.5; although they + ! seem to use rhoair=1.2 kg/m3 + ! langmuir_ustoke(node) = 0.377_wp * SQRT(tau_abs) ! [tau]=N2/m2 + ! langmuir_ustoke(node) = 0.016_wp/SQRT(1.2_wp * 1.5e-03_wp)*SQRT(tau_abs) + ! [tau]=N2/m2, rhoair=1.2, cd=1.5*10e-03 + langmuir_ustoke(node) = 0.016_WP/sqrt(1.2_WP * 1.5e-03_WP)*sqrt(tke_forc2d_normstress(node)*density_0) + + ! --> This is done in Coulevard et al (2020, doi:10.5194/gmd-13-3067-2020), see Fig.2 + ! langmuir_ustoke(node) = 0.377_wp * SQRT(forc_tke_surf_2D(jc,blockNo)) + ! --> other option from Li and Garrett (1993) + ! langmuir_ustoke(node) = 0.016_wp * fu10(jc,blockNo) + ! --> or original version from Axell (2002) + ! LLC = 0.12_wp*(u10**2/g) + ! langmuir_ustoke(node) = 0.0016*u10*EXP(depth/LLC) + + !_______________________________________________________________ + ! find depth of langmuir cell (hlc). hlc is the depth to which a water + ! parcel with kinetic energy 0.5*u_stokes**2 can reach on its own by + ! converting its kinetic energy to potential energy. + langmuir_hlc(node) = 0.0_wp + do nz=nun+1,nln + !!PS k_hlc = nz + aux = sum( bvfreq2(2:nz+1)*zbar_3d_n(2:nz+1,node) ) + if(aux > 0.5_wp*langmuir_ustoke(node)**2.0_wp) then + !!PS k_hlc = nz + langmuir_hlc(node) = zbar_3d_n(nz,node) + exit + end if + end do + + !_______________________________________________________________ + ! calculate langmuir cell velocity scale (wlc) + ! Note: Couvelard et al (2020) set clc=0.3 instead of default 0.15 from + ! Axell (2002); results in deeper MLDs and better spatial MLD pattern. + langmuir_wlc(:,node) = 0.0_wp + do nz=nun+1,nln + if(zbar_3d_n(nz,node) <= langmuir_hlc(node)) then + langmuir_wlc(nz,node) = tke_clangmuir * langmuir_ustoke(node) * & + sin(pi*zbar_3d_n(nz,node)/langmuir_hlc(node)) + !!PS else + !!PS langmuir_wlc(nz,node) = 0.0_wp + endif + end do + + !_______________________________________________________________ + ! calculate langmuir turbulence term (tke_plc) + if (langmuir_hlc(node) > 0.0_wp) then + tke_langmuir(:,node) = langmuir_wlc(:,node)**3.0_wp / langmuir_hlc(node) + else + tke_langmuir(:,node) = 0.0_wp + end if + + end if + !___________________________________________________________________ ! main cvmix call to calculate tke tke_Av_old = tke_Av(:,node) @@ -362,6 +454,7 @@ subroutine calc_cvmix_tke(dynamics, partit, mesh) bottom_fric = tke_forc2d_botfrict( node), & ! in iw_diss = tke_in3d_iwdis(nun:nln+1,node), & ! in ! diagnostics + tke_plc = tke_langmuir(nun:nln+1,node), & ! in tke_Tbpr = tke_Tbpr(nun:nln+1,node), & ! buoyancy production tke_Tspr = tke_Tspr(nun:nln+1,node), & ! shear production tke_Tdif = tke_Tdif(nun:nln+1,node), & ! vertical diffusion d/dz(k d/dz)TKE From 5b6badb0a2b20ad9a2a3e17120f498c31a923c38 Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 4 May 2022 17:10:29 +0200 Subject: [PATCH 881/909] update IDEMIX, add new default parameters from F. Pohlmann 2017 --- src/cvmix_idemix.F90 | 29 ++++-- src/gen_modules_cvmix_idemix.F90 | 149 +++++++++++++++++-------------- 2 files changed, 103 insertions(+), 75 deletions(-) diff --git a/src/cvmix_idemix.F90 b/src/cvmix_idemix.F90 index 88396c143..b8a215a92 100644 --- a/src/cvmix_idemix.F90 +++ b/src/cvmix_idemix.F90 @@ -476,12 +476,29 @@ subroutine integrate_idemix( & type(idemix_type), pointer ::idemix_constants_in ! initialize diagnostics - iwe_Ttot = 0.0 - iwe_Tdif = 0.0 - iwe_Tdis = 0.0 - iwe_Tsur = 0.0 - iwe_Tbot = 0.0 - + iwe_new = 0.0 + cvmix_int_1 = 0.0 + cvmix_int_2 = 0.0 + cvmix_int_3 = 0.0 + iwe_Ttot = 0.0 + iwe_Tdif = 0.0 + iwe_Tdis = 0.0 + iwe_Tsur = 0.0 + iwe_Tbot = 0.0 + c0 = 0.0 + v0 = 0.0 + alpha_c = 0.0 + a_dif = 0.0 + b_dif = 0.0 + c_dif = 0.0 + a_tri = 0.0 + b_tri = 0.0 + c_tri = 0.0 + d_tri = 0.0 + delta = 0.0 + iwe_max = 0.0 + forc = 0.0 + ! FIXME: nils: Is this necessary? idemix_constants_in => idemix_constants_saved if (present(idemix_userdef_constants)) then diff --git a/src/gen_modules_cvmix_idemix.F90 b/src/gen_modules_cvmix_idemix.F90 index 828826706..8a772f2b0 100644 --- a/src/gen_modules_cvmix_idemix.F90 +++ b/src/gen_modules_cvmix_idemix.F90 @@ -38,7 +38,8 @@ module g_cvmix_idemix !___________________________________________________________________________ ! OCECTL/CVMIX_IDEMIX_PARAM namelist parameters ! time scale for vertical symmetrisation (sec) - real(kind=WP) :: idemix_tau_v = 86400.0 + ! real(kind=WP) :: idemix_tau_v = 86400.0 ! old + real(kind=WP) :: idemix_tau_v = 172800.0 ! from Pollman et al. (2017), use 2days ! time scale for horizontal symmetrisation, only necessary for lateral diffusion (sec) real(kind=WP) :: idemix_tau_h = 1296000.0 @@ -47,25 +48,31 @@ module g_cvmix_idemix real(kind=WP) :: idemix_gamma = 1.570 ! spectral bandwidth in modes (dimensionless) - real(kind=WP) :: idemix_jstar = 10.0 + ! real(kind=WP) :: idemix_jstar = 10.0 ! old + real(kind=WP) :: idemix_jstar = 5.0 ! from Pollman et al. (2017) ! dissipation parameter (dimensionless) - real(kind=WP) :: idemix_mu0 = 1.33333333 + ! real(kind=WP) :: idemix_mu0 = 1.33333333 ! old + real(kind=WP) :: idemix_mu0 = 0.33333333 ! from Pollman et al. (2017), use 2days ! amount of surface forcing that is used real(kind=WP) :: idemix_sforcusage = 0.2 - integer :: idemix_n_hor_iwe_prop_iter = 1 + ! integer :: idemix_n_hor_iwe_prop_iter = 1 ! old + integer :: idemix_n_hor_iwe_prop_iter = 5 ! from Pollman et al. (2017) ! filelocation for idemix surface forcing character(MAX_PATH):: idemix_surforc_file = './fourier_smooth_2005_cfsr_inert_rgrid.nc' - + character(MAX_PATH):: idemix_surforc_vname= 'var706' + ! filelocation for idemix bottom forcing character(MAX_PATH):: idemix_botforc_file = './tidal_energy_gx1v6_20090205_rgrid.nc' - + character(MAX_PATH):: idemix_botforc_vname= 'wave_dissipation' + namelist /param_idemix/ idemix_tau_v, idemix_tau_h, idemix_gamma, idemix_jstar, idemix_mu0, idemix_n_hor_iwe_prop_iter, & - idemix_sforcusage, idemix_surforc_file, idemix_botforc_file + idemix_sforcusage, idemix_surforc_file, idemix_surforc_vname, & + idemix_botforc_file, idemix_botforc_vname @@ -212,8 +219,8 @@ subroutine init_cvmix_idemix(partit, mesh) write(*,*) " idemix_jstar = ", idemix_jstar write(*,*) " idemix_mu0 = ", idemix_mu0 write(*,*) " idemix_n_hor_iwe_...= ", idemix_n_hor_iwe_prop_iter - write(*,*) " idemix_surforc_file = ", idemix_surforc_file - write(*,*) " idemix_botforc_file = ", idemix_botforc_file + write(*,*) " idemix_surforc_file = ", trim(idemix_surforc_file) + write(*,*) " idemix_botforc_file = ", trim(idemix_botforc_file) write(*,*) end if @@ -224,7 +231,7 @@ subroutine init_cvmix_idemix(partit, mesh) inquire(file=trim(idemix_surforc_file),exist=file_exist) if (file_exist) then if (mype==0) write(*,*) ' --> read IDEMIX near inertial wave surface forcing' - call read_other_NetCDF(idemix_surforc_file, 'var706', 1, forc_iw_surface_2D, .true., partit, mesh) + call read_other_NetCDF(idemix_surforc_file, idemix_surforc_vname, 1, forc_iw_surface_2D, .true., partit, mesh) ! only 20% of the niw-input are available to penetrate into the deeper ocean forc_iw_surface_2D = forc_iw_surface_2D/density_0 * idemix_sforcusage @@ -247,7 +254,7 @@ subroutine init_cvmix_idemix(partit, mesh) inquire(file=trim(idemix_surforc_file),exist=file_exist) if (file_exist) then if (mype==0) write(*,*) ' --> read IDEMIX near tidal bottom forcing' - call read_other_NetCDF(idemix_botforc_file, 'wave_dissipation', 1, forc_iw_bottom_2D, .true., partit, mesh) + call read_other_NetCDF(idemix_botforc_file, idemix_botforc_vname, 1, forc_iw_bottom_2D, .true., partit, mesh) ! convert from W/m^2 to m^3/s^3 forc_iw_bottom_2D = forc_iw_bottom_2D/density_0 @@ -374,64 +381,68 @@ subroutine calc_cvmix_idemix(partit, mesh) ! to calculate edge contribution that crosses the halo call exchange_nod(iwe, partit) - !___________________________________________________________________ - ! calculate inverse volume and restrict iwe_v0 to fullfill stability - ! criterium --> CFL - ! CFL Diffusion : CFL = v0^2 * dt/dx^2, CFL < 0.5 - ! --> limit v0 to CFL=0.2 - ! --> v0 = sqrt(CFL * dx^2 / dt) - cflfac = 0.2_WP - ! |--> FROM NILS: "fac=0.2 ist geschätzt. Würde ich erstmal so - ! probieren. Der kommt aus dem stabilitätskriterium für Diffusion - ! (ähnlich berechnet wie das CFL Kriterium nur halt für den - ! Diffusions anstatt für den Advektionsterm). Normalerweise - ! sollte der Grenzwert aber nicht zu oft auftreten. Ich hatte - ! mal damit rum-experimentiert, aber letztendlich war die Lösung - ! das Iterativ zu machen und ggf. idemix_n_hor_iwe_prop_iter zu erhöhen. - ! Du kannst IDEMIX erstmal ohne den Term ausprobieren und sehen, - ! ob es läuft, dann kannst du den dazuschalten und hoffen, dass - ! es nicht explodiert. Eigentlich sollte der Term alles glatter - ! machen, aber nahe der ML kann der schon Probleme machen". - do node = 1,node_size - - ! temporarily store old iwe values for diag - iwe_Thdi(:,node) = iwe(:,node) - - ! number of above bottom levels at node - nln = nlevels_nod2D(node)-1 - uln = ulevels_nod2D(node) - - ! thickness of mid-level to mid-level interface at node - dz_trr = 0.0_WP - dz_trr(uln+1:nln) = Z_3d_n(uln:nln-1,node)-Z_3d_n(uln+1:nln,node) - dz_trr(uln) = hnode(uln,node)/2.0_WP - dz_trr(nln+1) = hnode(nln,node)/2.0_WP - - ! surface cell - vol_wcelli(uln,node) = 1/(areasvol(uln,node)*dz_trr(uln)) - aux = sqrt(cflfac*(area(uln,node)/pi*4.0_WP)/(idemix_tau_h*dt/idemix_n_hor_iwe_prop_iter)) - iwe_v0(uln,node) = min(iwe_v0(uln,node),aux) - - ! bulk cells - !!PS do nz=2,nln - do nz=uln+1,nln - ! inverse volumne - vol_wcelli(nz,node) = 1/(areasvol(nz-1,node)*dz_trr(nz)) - - ! restrict iwe_v0 - aux = sqrt(cflfac*(area(nz-1,node)/pi*4.0_WP)/(idemix_tau_h*dt/idemix_n_hor_iwe_prop_iter)) - ! `--------+-------------´ - ! |-> comes from mesh_resolution=sqrt(area(1, :)/pi)*2._WP - iwe_v0(nz,node) = min(iwe_v0(nz,node),aux) - end do - - ! bottom cell - vol_wcelli(nln+1,node) = 1/(areasvol(nln,node)*dz_trr(nln+1)) - aux = sqrt(cflfac*(area(nln,node)/pi*4.0_WP)/(idemix_tau_h*dt/idemix_n_hor_iwe_prop_iter)) - iwe_v0(nln+1,node) = min(iwe_v0(nln+1,node),aux) - - end do !-->do node = 1,node_size - call exchange_nod(vol_wcelli, partit) +! ! !___________________________________________________________________ +! ! ! calculate inverse volume and restrict iwe_v0 to fullfill stability +! ! ! criterium --> CFL +! ! ! CFL Diffusion : CFL = v0^2 * dt/dx^2, CFL < 0.5 +! ! ! --> limit v0 to CFL=0.2 +! ! ! --> v0 = sqrt(CFL * dx^2 / dt) +! ! cflfac = 0.2_WP +! ! ! |--> FROM NILS: "fac=0.2 ist geschätzt. Würde ich erstmal so +! ! ! probieren. Der kommt aus dem stabilitätskriterium für Diffusion +! ! ! (ähnlich berechnet wie das CFL Kriterium nur halt für den +! ! ! Diffusions anstatt für den Advektionsterm). Normalerweise +! ! ! sollte der Grenzwert aber nicht zu oft auftreten. Ich hatte +! ! ! mal damit rum-experimentiert, aber letztendlich war die Lösung +! ! ! das Iterativ zu machen und ggf. idemix_n_hor_iwe_prop_iter zu erhöhen. +! ! ! Du kannst IDEMIX erstmal ohne den Term ausprobieren und sehen, +! ! ! ob es läuft, dann kannst du den dazuschalten und hoffen, dass +! ! ! es nicht explodiert. Eigentlich sollte der Term alles glatter +! ! ! machen, aber nahe der ML kann der schon Probleme machen". +! ! +! ! ! temporarily store old iwe values for diag +! ! iwe_Thdi = iwe +! ! +! ! do node = 1,node_size +! ! +! ! ! temporarily store old iwe values for diag +! ! iwe_Thdi(:,node) = iwe(:,node) +! ! +! ! ! number of above bottom levels at node +! ! nln = nlevels_nod2D(node)-1 +! ! uln = ulevels_nod2D(node) +! ! +! ! ! thickness of mid-level to mid-level interface at node +! ! dz_trr = 0.0_WP +! ! dz_trr(uln+1:nln) = Z_3d_n(uln:nln-1,node)-Z_3d_n(uln+1:nln,node) +! ! dz_trr(uln) = hnode(uln,node)/2.0_WP +! ! dz_trr(nln+1) = hnode(nln,node)/2.0_WP +! ! +! ! ! surface cell +! ! vol_wcelli(uln,node) = 1/(areasvol(uln,node)*dz_trr(uln)) +! ! aux = sqrt(cflfac*(area(uln,node)/pi*4.0_WP)/(idemix_tau_h*dt/idemix_n_hor_iwe_prop_iter)) +! ! iwe_v0(uln,node) = min(iwe_v0(uln,node),aux) +! ! +! ! ! bulk cells +! ! !!PS do nz=2,nln +! ! do nz=uln+1,nln +! ! ! inverse volumne +! ! vol_wcelli(nz,node) = 1/(areasvol(nz-1,node)*dz_trr(nz)) +! ! +! ! ! restrict iwe_v0 +! ! aux = sqrt(cflfac*(area(nz-1,node)/pi*4.0_WP)/(idemix_tau_h*dt/idemix_n_hor_iwe_prop_iter)) +! ! ! `--------+-------------´ +! ! ! |-> comes from mesh_resolution=sqrt(area(1, :)/pi)*2._WP +! ! iwe_v0(nz,node) = min(iwe_v0(nz,node),aux) +! ! end do +! ! +! ! ! bottom cell +! ! vol_wcelli(nln+1,node) = 1/(areasvol(nln,node)*dz_trr(nln+1)) +! ! aux = sqrt(cflfac*(area(nln,node)/pi*4.0_WP)/(idemix_tau_h*dt/idemix_n_hor_iwe_prop_iter)) +! ! iwe_v0(nln+1,node) = min(iwe_v0(nln+1,node),aux) +! ! +! ! end do !-->do node = 1,node_size +! ! call exchange_nod(vol_wcelli, partit) call exchange_nod(iwe_v0, partit) !___________________________________________________________________ From 5572a39280563f05219d9e92e981adc61fce602f Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 4 May 2022 17:12:08 +0200 Subject: [PATCH 882/909] add new cvmix namelist, with switch for tke langmuir parameterisation tke_dolangmuir=False/True and the new parameters for IDEMIX from F. Pohlmann 2017 --- config/namelist.cvmix | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/config/namelist.cvmix b/config/namelist.cvmix index 00754cca1..18a90c979 100644 --- a/config/namelist.cvmix +++ b/config/namelist.cvmix @@ -13,15 +13,16 @@ tke_min = 1.0e-6 ! tke_mxl_choice ... Can only be 1 or 2, choice of calculation of mixing ! length; currently only Blanke, B., P. Delecluse option is implemented tke_mxl_choice = 2 +tke_dolangmuir = .false. / -! namelist for IDEMIX +! namelist for IDEMIX von Pollman et al. (2017) ¶m_idemix -idemix_tau_v = 86400.0 ! time scale for vertical symmetrisation (sec) -idemix_tau_h = 1296000.0 ! time scale for horizontal symmetrisation +idemix_tau_v = 172800.0 ! 2days ! time scale for vertical symmetrisation (sec) +idemix_tau_h = 1296000.0 !15days ! time scale for horizontal symmetrisation idemix_gamma = 1.570 ! constant of order one derived from the shape of the spectrum in m space (dimensionless) -idemix_jstar = 10.0 ! spectral bandwidth in modes (dimensionless) -idemix_mu0 = 1.33333333 ! dissipation parameter (dimensionless) +idemix_jstar = 5.0 ! spectral bandwidth in modes (dimensionless) +idemix_mu0 = 0.33333333 ! dissipation parameter (dimensionless) idemix_sforcusage = 0.2 idemix_n_hor_iwe_prop_iter = 5 ! iterations for contribution from horiz. wave propagation idemix_surforc_file = '/work/ollie/clidyn/forcing/IDEMIX/fourier_smooth_2005_cfsr_inert_rgrid.nc' From be019467f9d495fc9502aa7fb2fe58ac59fcc15d Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 4 May 2022 17:29:32 +0200 Subject: [PATCH 883/909] be sure tke_surf is initialised in cvmix_tke.F90 --- src/cvmix_tke.F90 | 1 + 1 file changed, 1 insertion(+) diff --git a/src/cvmix_tke.F90 b/src/cvmix_tke.F90 index a0e9b8129..3bf25558d 100644 --- a/src/cvmix_tke.F90 +++ b/src/cvmix_tke.F90 @@ -613,6 +613,7 @@ subroutine integrate_tke( & tke_new = 0.0 tke_upd = 0.0 + tke_surf= 0.0 a_dif = 0.0 b_dif = 0.0 From 27f3b325bcb64da852e2cd4c8c25cf6a4956e661 Mon Sep 17 00:00:00 2001 From: Dmitry Sidorenko Date: Thu, 5 May 2022 12:04:11 +0200 Subject: [PATCH 884/909] Chlorophyll climatology can be used. Shall be activated through namelist.forcing (see chl_data_source, nm_chl_data_file, chl_const). Sweeney et al. 2005 data is on ollie and shall be put to the forcing folder /work/ollie/dsidoren/input/forcing/Sweeney_2005.(nc, txt) A constant chlorophyll concentration will be used per default! --- config/namelist.forcing | 7 +++++-- src/gen_surface_forcing.F90 | 39 ++++++++++++++++++++++++++++++++----- 2 files changed, 39 insertions(+), 7 deletions(-) diff --git a/config/namelist.forcing b/config/namelist.forcing index 79eb1374f..825d6fc61 100644 --- a/config/namelist.forcing +++ b/config/namelist.forcing @@ -50,11 +50,14 @@ landice_end_mon=10 nm_nc_freq = 1 ! data points per day (i.e. 86400 if the time axis is in seconds) nm_nc_tmid = 0 ! 1 if the time stamps are given at the mid points of the netcdf file, 0 otherwise (i.e. 1 in CORE1, CORE2; 0 in JRA55) l_xwind=.true. l_ywind=.true. l_humi=.true. l_qsr=.true. l_qlw=.true. l_tair=.true. l_prec=.true. l_mslp=.false. l_cloud=.false. l_snow=.true. - nm_runoff_file ='/work/ollie/clidyn/forcing/JRA55-do-v1.4.0/CORE2_runoff.nc' runoff_data_source ='CORE2' !Dai09, CORE2 + nm_runoff_file ='/work/ollie/clidyn/forcing/JRA55-do-v1.4.0/CORE2_runoff.nc' !nm_runoff_file ='/work/ollie/qwang/FESOM2_input/mesh/CORE2_finaltopo_mean/forcing_data_on_grid/runoff_clim.nc' !runoff_data_source ='Dai09' !Dai09, CORE2, JRA55 !runoff_climatology =.true. - nm_sss_data_file ='/work/ollie/clidyn/forcing/JRA55-do-v1.4.0/PHC2_salx.nc' sss_data_source ='CORE2' + nm_sss_data_file ='/work/ollie/clidyn/forcing/JRA55-do-v1.4.0/PHC2_salx.nc' + chl_data_source ='Sweeney' !'Sweeney' monthly chlorophyll climatology or 'NONE' for constant chl_const (below). Make use_sw_pene=.TRUE. in namelist.config! + nm_chl_data_file ='/work/ollie/dsidoren/input/forcing/Sweeney_2005.nc' + chl_const = 0.1 / diff --git a/src/gen_surface_forcing.F90 b/src/gen_surface_forcing.F90 index 5dc444642..43a4f1fa3 100644 --- a/src/gen_surface_forcing.F90 +++ b/src/gen_surface_forcing.F90 @@ -43,7 +43,7 @@ MODULE g_sbf USE g_rotate_grid USE g_config, only: dummy, ClimateDataPath, dt USE g_clock, only: timeold, timenew, dayold, daynew, yearold, yearnew, cyearnew - USE g_forcing_arrays, only: runoff + USE g_forcing_arrays, only: runoff, chl USE g_read_other_NetCDF, only: read_other_NetCDF, read_2ddata_on_grid_netcdf IMPLICIT NONE @@ -82,12 +82,17 @@ MODULE g_sbf logical :: l_cloud = .false. logical :: l_snow = .false. - character(10), save :: runoff_data_source='CORE2' + character(10), save :: runoff_data_source='CORE2' character(len=MAX_PATH), save :: nm_runoff_file ='runoff.nc' - character(10), save :: sss_data_source ='CORE2' + character(10), save :: sss_data_source ='CORE2' character(len=MAX_PATH), save :: nm_sss_data_file ='PHC2_salx.nc' + character(10), save :: chl_data_source ='None' ! 'Sweeney' Chlorophyll climatology Sweeney et al. 2005 + character(len=MAX_PATH), save :: nm_chl_data_file ='/work/ollie/dsidoren/input/forcing/Sweeney_2005.nc' + real(wp), save :: chl_const = 0.1 + + logical :: runoff_climatology =.false. real(wp), allocatable, save, dimension(:), public :: qns ! downward non solar heat over the ocean [W/m2] @@ -123,7 +128,7 @@ MODULE g_sbf character(len=256), save :: nm_prec_file = 'prec.dat' ! name of file with total precipitation, if netcdf file then provide only name from "nameyyyy.nc" yyyy.nc will be added by model character(len=256), save :: nm_snow_file = 'snow.dat' ! name of file with snow precipitation, if netcdf file then provide only name from "nameyyyy.nc" yyyy.nc will be added by model character(len=256), save :: nm_mslp_file = 'mslp.dat' ! name of file with mean sea level pressure, if netcdf file then provide only name from "nameyyyy.nc" yyyy.nc will be added by model - character(len=256), save :: nm_cloud_file = 'cloud.dat' ! name of file with clouds, if netcdf file then provide only name from "nameyyyy.nc" yyyy.nc will be added by model + character(len=256), save :: nm_cloud_file = 'cloud.dat' ! name of file with clouds, if netcdf file then provide only name from "nameyyyy.nc" yyyy.nc will be added by model character(len=34), save :: nm_xwind_var = 'uwnd' ! name of variable in file with wind character(len=34), save :: nm_ywind_var = 'vwnd' ! name of variable in file with wind @@ -907,7 +912,8 @@ SUBROUTINE sbc_ini(partit, mesh) nm_qsr_var, nm_qlw_var, nm_tair_var, nm_prec_var, nm_snow_var, & nm_mslp_var, nm_cloud_var, nm_cloud_file, nm_nc_iyear, nm_nc_imm, nm_nc_idd, nm_nc_freq, nm_nc_tmid, y_perpetual, & l_xwind, l_ywind, l_humi, l_qsr, l_qlw, l_tair, l_prec, l_mslp, l_cloud, l_snow, & - nm_runoff_file, runoff_data_source, runoff_climatology, nm_sss_data_file, sss_data_source + nm_runoff_file, runoff_data_source, runoff_climatology, nm_sss_data_file, sss_data_source, & + chl_data_source, nm_chl_data_file, chl_const #include "associate_part_def.h" #include "associate_mesh_def.h" @@ -1051,6 +1057,16 @@ SUBROUTINE sbc_ini(partit, mesh) runoff=runoff/1000.0_WP ! Kg/s/m2 --> m/s end if + if (use_sw_pene) then + if (chl_data_source == 'Sweeney') then + if (mype==0) write(*,*) trim(chl_data_source) //' chlorophyll climatology will be used' + if (mype==0) write(*,*) 'nm_chl_data_file=', trim(nm_chl_data_file) + else + if (mype==0) write(*,*) 'using constant chlorophyll concentration: ', chl_const + chl=chl_const + end if + end if + if (mype==0) write(*,*) "DONE: Ocean forcing inizialization." if (mype==0) write(*,*) 'Parts of forcing data (only constant in time fields) are read' END SUBROUTINE sbc_ini @@ -1138,6 +1154,19 @@ SUBROUTINE sbc_do(partit, mesh) end if end if + ! read in CHL for applying shortwave penetration + if (use_sw_pene) then + if (chl_data_source=='Sweeney') then + if (update_monthly_flag) then + i=month + if (mstep > 1) i=i+1 + if (i > 12) i=1 + if (mype==0) write(*,*) 'Updating chlorophyll climatology for month ', i + call read_other_NetCDF(nm_chl_data_file, 'chl', i, chl, .true., partit, mesh) + end if + end if + end if + ! runoff if(runoff_data_source=='Dai09' .or. runoff_data_source=='JRA55') then From bce8620b8200fe8392f33ae345bd8b87a884f6f6 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 5 May 2022 15:45:32 +0200 Subject: [PATCH 885/909] load git module to build on levante --- env/levante.dkrz.de/shell | 1 + 1 file changed, 1 insertion(+) diff --git a/env/levante.dkrz.de/shell b/env/levante.dkrz.de/shell index bfa6db2a5..60b4e3633 100755 --- a/env/levante.dkrz.de/shell +++ b/env/levante.dkrz.de/shell @@ -10,3 +10,4 @@ export LD_LIBRARY_PATH=/sw/spack-levante/intel-oneapi-mkl-2022.0.1-ttdktf/mkl/20 module load netcdf-c/4.8.1-openmpi-4.1.2-intel-2021.5.0 module load netcdf-fortran/4.5.3-openmpi-4.1.2-intel-2021.5.0 +module load git # to be able to determine the fesom git SHA when compiling From 84c35c9199f816da3de3bfd79881e840b55fb201 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 5 May 2022 16:16:48 +0200 Subject: [PATCH 886/909] export our platform strategy name when sourcing the environment --- env.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/env.sh b/env.sh index ce8c31def..52868001c 100755 --- a/env.sh +++ b/env.sh @@ -76,5 +76,6 @@ if [ $BEING_EXECUTED = true ]; then echo $DIR/env/$STRATEGY else # file is being sourced + export FESOM_PLATFORM_STRATEGY=$STRATEGY source $DIR/env/$STRATEGY/shell fi From a1b36a27ed0edc45aef1fe38d3248f005d66c322 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Thu, 5 May 2022 16:28:02 +0200 Subject: [PATCH 887/909] - create a cmake cache variable for the platform strategy - set special compile flags for intel compiler on levante for fesom and parms --- lib/parms/CMakeLists.txt | 3 +++ src/CMakeLists.txt | 13 ++++++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/lib/parms/CMakeLists.txt b/lib/parms/CMakeLists.txt index 733d3dfb7..3cb7b48cf 100644 --- a/lib/parms/CMakeLists.txt +++ b/lib/parms/CMakeLists.txt @@ -22,6 +22,9 @@ target_include_directories(${PROJECT_NAME} target_link_libraries(${PROJECT_NAME} INTERFACE ${BLAS_C_LIBRARIES} $ENV{UBUNTU_BLAS_LIBRARY}) if(${CMAKE_C_COMPILER_ID} STREQUAL "Intel") target_compile_options(${PROJECT_NAME} PRIVATE -no-prec-div -no-prec-sqrt -fast-transcendentals -fp-model precise) + if(${FESOM_PLATFORM_STRATEGY} STREQUAL levante.dkrz.de ) + target_compile_options(${PROJECT_NAME} PRIVATE -march=core-avx2 -mtune=core-avx2) + endif() endif() if(${BUILD_FESOM_AS_LIBRARY}) target_compile_options(${PROJECT_NAME} PRIVATE -fPIC) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index d9ff42944..5c9f7fd28 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -2,6 +2,12 @@ cmake_minimum_required(VERSION 3.9) project(fesom C Fortran) +if(DEFINED ENV{FESOM_PLATFORM_STRATEGY}) + set(FESOM_PLATFORM_STRATEGY $ENV{FESOM_PLATFORM_STRATEGY} CACHE STRING "switch to platform specific compile settings, this is usually determined via the env.sh script") +else() + set(FESOM_PLATFORM_STRATEGY "notset" CACHE STRING "switch to platform specific compile settings, this is usually determined via the env.sh script") +endif() + if(DEFINED ENV{ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS}) # be able to set the initial cache value from our env settings for aleph, not only via cmake command option(ALEPH_CRAYMPICH_WORKAROUNDS "workaround for performance issues on aleph" ON) else() @@ -115,8 +121,13 @@ if(${VERBOSE}) endif() # CMAKE_Fortran_COMPILER_ID will also work if a wrapper is being used (e.g. mpif90 wraps ifort -> compiler id is Intel) if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Intel ) - target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -init=zero -no-wrap-margin) + target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -ip -init=zero -no-wrap-margin) # target_compile_options(${PROJECT_NAME} PRIVATE -qopenmp -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -g -traceback -check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) + if(${FESOM_PLATFORM_STRATEGY} STREQUAL levante.dkrz.de ) + target_compile_options(${PROJECT_NAME} PRIVATE -march=core-avx2 -mtune=core-avx2) + else() + target_compile_options(${PROJECT_NAME} PRIVATE -xHost) + endif() elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU ) # target_compile_options(${PROJECT_NAME} PRIVATE -O3 -finit-local-zero -finline-functions -fimplicit-none -fdefault-real-8 -ffree-line-length-none) target_compile_options(${PROJECT_NAME} PRIVATE -O2 -g -ffloat-store -finit-local-zero -finline-functions -fimplicit-none -fdefault-real-8 -ffree-line-length-none) From f57c2b79a94a8ff00d6addb0cccacfa41232d754 Mon Sep 17 00:00:00 2001 From: dsidoren Date: Thu, 5 May 2022 17:32:59 +0200 Subject: [PATCH 888/909] Sweeney chlorophyll climatology is off per default Sweeney chlorophyll climatology is off per default --- config/namelist.forcing | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/namelist.forcing b/config/namelist.forcing index 825d6fc61..82f24cbf0 100644 --- a/config/namelist.forcing +++ b/config/namelist.forcing @@ -57,7 +57,7 @@ landice_end_mon=10 !runoff_climatology =.true. sss_data_source ='CORE2' nm_sss_data_file ='/work/ollie/clidyn/forcing/JRA55-do-v1.4.0/PHC2_salx.nc' - chl_data_source ='Sweeney' !'Sweeney' monthly chlorophyll climatology or 'NONE' for constant chl_const (below). Make use_sw_pene=.TRUE. in namelist.config! + chl_data_source ='None' !'Sweeney' monthly chlorophyll climatology or 'NONE' for constant chl_const (below). Make use_sw_pene=.TRUE. in namelist.config! nm_chl_data_file ='/work/ollie/dsidoren/input/forcing/Sweeney_2005.nc' chl_const = 0.1 / From 67f47dd1eb594c7b08be3f32f8d342821571f3b6 Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Thu, 5 May 2022 21:17:22 +0200 Subject: [PATCH 889/909] Change path to pool directory --- config/namelist.forcing | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/namelist.forcing b/config/namelist.forcing index 82f24cbf0..c71eebf83 100644 --- a/config/namelist.forcing +++ b/config/namelist.forcing @@ -58,6 +58,6 @@ landice_end_mon=10 sss_data_source ='CORE2' nm_sss_data_file ='/work/ollie/clidyn/forcing/JRA55-do-v1.4.0/PHC2_salx.nc' chl_data_source ='None' !'Sweeney' monthly chlorophyll climatology or 'NONE' for constant chl_const (below). Make use_sw_pene=.TRUE. in namelist.config! - nm_chl_data_file ='/work/ollie/dsidoren/input/forcing/Sweeney_2005.nc' + nm_chl_data_file ='/work/ollie/clidyn/forcing/Sweeney/Sweeney_2005.nc' chl_const = 0.1 / From 8c38e5f7f6bcf28f9a273f80d18a8db77449edcf Mon Sep 17 00:00:00 2001 From: Patrick Scholz Date: Fri, 6 May 2022 14:08:41 +0200 Subject: [PATCH 890/909] fix bug foor gfortran, put native cvmix real format in put_tke --- src/cvmix_tke.F90 | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/src/cvmix_tke.F90 b/src/cvmix_tke.F90 index 3bf25558d..0925a64de 100644 --- a/src/cvmix_tke.F90 +++ b/src/cvmix_tke.F90 @@ -35,7 +35,6 @@ module cvmix_tke !public member functions - public :: init_tke public :: cvmix_coeffs_tke public :: put_tke @@ -138,7 +137,7 @@ subroutine init_tke(c_k, c_eps, cd, alpha_tke, mxl_min, KappaM_min, KappaM_max, end if call put_tke('c_k', c_k, tke_userdef_constants) else - call put_tke('c_k',0.1d0 , tke_userdef_constants) + call put_tke('c_k',0.1_cvmix_r8 , tke_userdef_constants) end if if (present(c_eps)) then @@ -148,7 +147,7 @@ subroutine init_tke(c_k, c_eps, cd, alpha_tke, mxl_min, KappaM_min, KappaM_max, end if call put_tke('c_eps', c_eps, tke_userdef_constants) else - call put_tke('c_eps', 0.7d0, tke_userdef_constants) + call put_tke('c_eps', 0.7_cvmix_r8, tke_userdef_constants) end if if (present(cd)) then @@ -158,7 +157,7 @@ subroutine init_tke(c_k, c_eps, cd, alpha_tke, mxl_min, KappaM_min, KappaM_max, end if call put_tke('cd', cd, tke_userdef_constants) else - call put_tke('cd', 3.75d0, tke_userdef_constants) + call put_tke('cd', 3.75_cvmix_r8, tke_userdef_constants) end if if (present(alpha_tke)) then @@ -168,7 +167,7 @@ subroutine init_tke(c_k, c_eps, cd, alpha_tke, mxl_min, KappaM_min, KappaM_max, end if call put_tke('alpha_tke', alpha_tke, tke_userdef_constants) else - call put_tke('alpha_tke', 30.d0, tke_userdef_constants) + call put_tke('alpha_tke', 30.0_cvmix_r8, tke_userdef_constants) end if if (present(mxl_min)) then @@ -178,7 +177,7 @@ subroutine init_tke(c_k, c_eps, cd, alpha_tke, mxl_min, KappaM_min, KappaM_max, end if call put_tke('mxl_min', mxl_min, tke_userdef_constants) else - call put_tke('mxl_min', 1.d-8, tke_userdef_constants) + call put_tke('mxl_min', 1.0e-8_cvmix_r8, tke_userdef_constants) end if if (present(KappaM_min)) then @@ -188,7 +187,7 @@ subroutine init_tke(c_k, c_eps, cd, alpha_tke, mxl_min, KappaM_min, KappaM_max, end if call put_tke('kappaM_min', KappaM_min, tke_userdef_constants) else - call put_tke('kappaM_min', 0.d0, tke_userdef_constants) + call put_tke('kappaM_min', 0.0_cvmix_r8, tke_userdef_constants) end if if (present(KappaM_max)) then @@ -198,7 +197,7 @@ subroutine init_tke(c_k, c_eps, cd, alpha_tke, mxl_min, KappaM_min, KappaM_max, end if call put_tke('kappaM_max', KappaM_max, tke_userdef_constants) else - call put_tke('kappaM_max', 100.d0, tke_userdef_constants) + call put_tke('kappaM_max', 100.0_cvmix_r8, tke_userdef_constants) end if if (present(tke_mxl_choice)) then @@ -228,7 +227,7 @@ subroutine init_tke(c_k, c_eps, cd, alpha_tke, mxl_min, KappaM_min, KappaM_max, end if call put_tke('clc', clc, tke_userdef_constants) else - call put_tke('clc',0.15d0 , tke_userdef_constants) + call put_tke('clc',0.15_cvmix_r8 , tke_userdef_constants) end if @@ -239,7 +238,7 @@ subroutine init_tke(c_k, c_eps, cd, alpha_tke, mxl_min, KappaM_min, KappaM_max, end if call put_tke('tke_min', tke_min, tke_userdef_constants) else - call put_tke('tke_min', 1.d-6, tke_userdef_constants) + call put_tke('tke_min', 1.0e-6_cvmix_r8, tke_userdef_constants) end if if (present(tke_surf_min)) then @@ -249,7 +248,7 @@ subroutine init_tke(c_k, c_eps, cd, alpha_tke, mxl_min, KappaM_min, KappaM_max, end if call put_tke('tke_surf_min', tke_surf_min, tke_userdef_constants) else - call put_tke('tke_surf_min', 1.d-4, tke_userdef_constants) + call put_tke('tke_surf_min', 1.e-4_cvmix_r8, tke_userdef_constants) end if if (present(use_ubound_dirichlet)) then From 9f850604900e22626e3587e9bcf71b14d7de3560 Mon Sep 17 00:00:00 2001 From: JanStreffing Date: Sun, 8 May 2022 00:14:27 +0200 Subject: [PATCH 891/909] salt plume parameterization not hemispherical --- src/oce_spp.F90 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/oce_spp.F90 b/src/oce_spp.F90 index 56c099e97..9631f1a62 100644 --- a/src/oce_spp.F90 +++ b/src/oce_spp.F90 @@ -87,7 +87,7 @@ subroutine app_rejected_salt(ttf, partit, mesh) nzmin = ulevels_nod2D(row) nzmax = nlevels_nod2D(row) if (ttf(nzmin,row) < 10.0_WP) cycle - if (geo_coord_nod2D(2,row)>0.0_WP) then !NH + !if (geo_coord_nod2D(2,row)>0.0_WP) then !NH kml=1 !!PS spar(1)=0.0_WP spar(nzmin)=0.0_WP @@ -95,7 +95,7 @@ subroutine app_rejected_salt(ttf, partit, mesh) !!PS do k=1, nlevels_nod2D(row) do k=nzmin, nzmax drhodz=bvfreq(k, row)*density_0/g - if (drhodz>=drhodz_cri .or. Z_3d_n(k,row)<-50.0_WP) exit + if (drhodz>=drhodz_cri .or. Z_3d_n(k,row)<-80.0_WP) exit kml=kml+1 spar(k+1)=area(k+1,row)*hnode(k+1,row)*(Z_3d_n(1,row)-Z_3d_n(k+1,row))**n_distr end do @@ -107,7 +107,7 @@ subroutine app_rejected_salt(ttf, partit, mesh) ttf(k,row)=ttf(k,row)+ice_rejected_salt(row)*spar(k)/areasvol(k,row)/hnode(k,row) end do endif - endif + !endif end do end subroutine app_rejected_salt From 92c3d9e3a81e50d7513fc0fb3b8c56399915bf37 Mon Sep 17 00:00:00 2001 From: Paul Gierz Date: Wed, 18 May 2022 17:21:17 +0200 Subject: [PATCH 892/909] docs: unified docs, attempt 1 --- README.md | 9 + docs/Makefile | 20 + docs/_static/css/custom.css | 20 + docs/conf.py | 116 +++ docs/data_processing/data_processing.rst | 136 ++++ docs/fesom_logo.png | Bin 0 -> 44770 bytes docs/forcing_configuration.rst | 81 ++ .../general_configuration.rst | 89 +++ docs/geometry.rst | 72 ++ docs/getting_started/getting_started.rst | 423 ++++++++++ docs/icepack_in_fesom.rst | 161 ++++ docs/img/call_seq.png | Bin 0 -> 309998 bytes docs/img/fig_geometry.pdf | Bin 0 -> 42566 bytes docs/img/fig_geometry.png | Bin 0 -> 189273 bytes docs/img/fig_vertical.pdf | Bin 0 -> 3537 bytes docs/img/fig_vertical.png | Bin 0 -> 112725 bytes docs/index.rst | 93 +++ ...isoneutral_diffusion_triangular_prisms.rst | 159 ++++ docs/main_equations.rst | 57 ++ docs/meshes/meshes.rst | 121 +++ docs/mybib_fesom2.bib | 733 ++++++++++++++++++ .../ocean_configuration.rst | 105 +++ docs/requirements.txt | 2 + docs/seaice_configuration.rst | 35 + docs/spatial_discretization.rst | 506 ++++++++++++ docs/subcycling_instead_solver.rst | 58 ++ docs/temporal_discretization.rst | 193 +++++ docs/time_stepping_transport.rst | 72 ++ docs/vertical_discretization.rst | 88 +++ docs/zreferences.rst | 11 + 30 files changed, 3360 insertions(+) create mode 100644 docs/Makefile create mode 100644 docs/_static/css/custom.css create mode 100644 docs/conf.py create mode 100644 docs/data_processing/data_processing.rst create mode 100644 docs/fesom_logo.png create mode 100644 docs/forcing_configuration.rst create mode 100644 docs/general_configuration/general_configuration.rst create mode 100644 docs/geometry.rst create mode 100644 docs/getting_started/getting_started.rst create mode 100644 docs/icepack_in_fesom.rst create mode 100644 docs/img/call_seq.png create mode 100755 docs/img/fig_geometry.pdf create mode 100644 docs/img/fig_geometry.png create mode 100755 docs/img/fig_vertical.pdf create mode 100644 docs/img/fig_vertical.png create mode 100644 docs/index.rst create mode 100644 docs/isoneutral_diffusion_triangular_prisms.rst create mode 100644 docs/main_equations.rst create mode 100644 docs/meshes/meshes.rst create mode 100755 docs/mybib_fesom2.bib create mode 100644 docs/ocean_configuration/ocean_configuration.rst create mode 100644 docs/requirements.txt create mode 100644 docs/seaice_configuration.rst create mode 100644 docs/spatial_discretization.rst create mode 100644 docs/subcycling_instead_solver.rst create mode 100644 docs/temporal_discretization.rst create mode 100644 docs/time_stepping_transport.rst create mode 100644 docs/vertical_discretization.rst create mode 100644 docs/zreferences.rst diff --git a/README.md b/README.md index de34d9620..c9160c82b 100644 --- a/README.md +++ b/README.md @@ -27,3 +27,12 @@ References * **[Version coupled with ECHAM6 atmosphere]** Sidorenko, D., Goessling, H. F., Koldunov, N. V., Scholz, P., Danilov, S., Barbi, D., et al ( 2019). Evaluation of FESOM2.0 coupled to ECHAM6.3: Pre‐industrial and HighResMIP simulations. Journal of Advances in Modeling Earth Systems, 11. https://doi.org/10.1029/2019MS001696 * **[Version with ICEPACK sea ice thermodynamics]** Zampieri, Lorenzo, Frank Kauker, Jörg Fröhle, Hiroshi Sumata, Elizabeth C. Hunke, and Helge Goessling. Impact of Sea-Ice Model Complexity on the Performance of an Unstructured-Mesh Sea-ice/ocean Model Under Different Atmospheric Forcings. Washington: American Geophysical Union, 2020. https://dx.doi.org/10.1002/essoar.10505308.1. + +Documentation for FESOM2 +************************ + +Here lives FESOM2 documentation + +Rendered version: https://fesom2.readthedocs.io/en/latest/ + + diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 000000000..d4bb2cbb9 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/_static/css/custom.css b/docs/_static/css/custom.css new file mode 100644 index 000000000..11b47c4b7 --- /dev/null +++ b/docs/_static/css/custom.css @@ -0,0 +1,20 @@ +/* Copied from MITgcm version: */ +/* https://github.com/jahn/altMITgcm/blob/master/doc/_static/css/custom.css */ +/* Make equation numbers float to the right */ +.eqno { + margin-left: 5px; + float: right; +} +/* Hide the link... */ +.math .headerlink { + display: none; + visibility: hidden; +} +/* ...unless the equation is hovered */ +.math:hover .headerlink { + display: inline-block; + visibility: visible; + /* Place link in margin and keep equation number aligned with boundary */ + margin-right: -0.7em; +} + diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 000000000..eb459b927 --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,116 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# http://www.sphinx-doc.org/en/master/config + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +# import os +# import sys +# sys.path.insert(0, os.path.abspath('.')) + + +# -- Project information ----------------------------------------------------- + +project = 'fesom2' +copyright = '2021, FESOM2 team' +author = u'Sergey Danilov, Dmitry Sidorenko, Nikolay Koldunov, Patrick Scholz, Qiang Wang, Thomas Rackow, Helge Goessling and Lorenzo Zampieri' + +# The full version, including alpha/beta/rc tags +release = '0.2' + + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = ['sphinx.ext.mathjax', 'sphinxcontrib.bibtex'] +bibtex_bibfiles = ['mybib_fesom2.bib'] + +numfig = True +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# The master toctree document. +master_doc = 'index' + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +html_logo = 'fesom_logo.png' + +project = u'FESOM2' +copyright = u'2014-, FESOM2 contributors' + + + +# -- Options for HTMLHelp output ------------------------------------------ + +# Output file base name for HTML help builder. +htmlhelp_basename = 'FESOM2doc' + + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + 'papersize': 'a4paper', + + # The font size ('10pt', '11pt' or '12pt'). + # + 'pointsize': '12pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + 'preamble': r''' + \setcounter{secnumdepth}{3} + ''', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'FESOM2.tex', u'FESOM2 Documentation', + u'Sergey Danilov, Dmitry Sidorenko, \\and Nikolay Koldunov, Patrick Scholz, \\and Qiang Wang, Thomas Rackow, \\and Helge Goessling, Lorenzo Zampieri', 'manual'), +] + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'fesom2', u'FESOM2 Documentation', + [author], 1) +] + + +def setup(app): + app.add_css_file('css/custom.css') diff --git a/docs/data_processing/data_processing.rst b/docs/data_processing/data_processing.rst new file mode 100644 index 000000000..fc908b6ae --- /dev/null +++ b/docs/data_processing/data_processing.rst @@ -0,0 +1,136 @@ +.. _chap_data_processing: + +Data pre/post processing +************************ + +netCDF files for initial conditions +=================================== + +The netCDF files have to satisfy the following criteria: + +- should have DIMENSIONS named ``lon/longitude/LON`` , ``lat/latitude/LAT`` and ``depth`` +- should have VARIABLES named ``lon/longitude/LON`` , lat/latitude/LAT and ``depth`` +- ``lon/lat`` dimentions should be one dimentional (e.g ``lon(lon)``) +- each variable with initial conditions should have only three dimentions (e.g. ``temp(depth, lat, lon)``) +- The fields should start from ``0th`` meridian and longitudes should have values from ``0 to 360`` +- the missing values should have values larger than ``1e11`` + +The file that would be read potentially without problems can be created with the following python code (variables lat,lon_reshaped, depth, salt_reshaped, temp_reshaped should be prepeared from the original file): + +.. code-block:: python + + from netCDF4 import Dataset + + fw = Dataset('woa18_netcdf.nc', 'w', ) + + fw.createDimension('time', 1) + fw.createDimension('lat', lat.shape[0]) + fw.createDimension('lon', lon_reshaped.shape[0]) + fw.createDimension('depth', depth.shape[0]) + + latitude = fw.createVariable('lat', 'd', ('lat',)) + latitude[:] = lat[:] + + longitude = fw.createVariable('lon', 'd', ('lon',)) + longitude[:] = lon_reshaped[:] + + ddepth = fw.createVariable('depth', 'd', ('depth',)) + ddepth[:] = depth[:] + + salinity = fw.createVariable('salt','d', ('depth', 'lat', 'lon'), fill_value= 1e+20) + salinity[:] = salt_reshaped[:] + salinity.missing_value = 1e+20 + + temperature = fw.createVariable('temp','d', ('depth', 'lat', 'lon'), fill_value= 1e+20) + temperature[:] = temp_reshaped[:] + temperature.missing_value = 1e+20 + +We will try to provide convertion instructions in the form of jupyter notebooks to all files with initial conditions. + + +Convert grid to netCDF that CDO understands +=========================================== + +We are going to use ``spheRlab`` for conversion. You have to have **R** already installed. + +Clone ``spheRlab``: + +:: + + git clone https://github.com/FESOM/spheRlab.git spheRlab + +Build package: + +:: + + cd spheRlab/ + R CMD build spheRlab + +Make sure you have cdo installed (``cdo -V``) and launch R (type ``R``). + +Install the package: + +:: + + R>install.packages("spheRlab_1.1.0.tar.gz",repos=NULL) + +If you don't have netCDF library installed, you also have to do: + +:: + + R>install.packages("ncdf4") + +Load libraries: + +:: + + R>library(spheRlab) + R>library(ncdf4) + +You can get help (for any function) by typing, e.g.: + +:: + + R>?sl.grid.writeCDO + +Define path to the mesh: + +:: + + R>meshpath="/work/ollie/dsidoren/input/fesom2.0/meshes/mesh_CORE2_final/" + +Read the grid in to R structure (the arguments rot etc. might be different for different meshes, but this is the standard): + +For rotated meshes: + +:: + + R>grid = sl.grid.readFESOM(griddir=meshpath,rot=TRUE,rot.invert=TRUE,rot.abg=c(50,15,-90)) + +For unrotated meshes: + +:: + + R>grid = sl.grid.readFESOM(griddir=meshpath,rot=FALSE,rot.invert=FALSE,rot.abg=c(0,0,0), threeD=FALSE) + +Define path to the output file: + +:: + + R>ofile = paste0(meshpath, "sl.grid.CDO", sep = "") + +Directrly write netCDF file with mesh description: + +:: + + R>sl.grid.writeCDO(grid, ofile=ofile, netcdf=TRUE, depth=FALSE) + +Conservative remapping with cdo (interpolate topography to mesh) +---------------------------------------------------------------- +:: + + $bash> export MESHPATH=/work/ollie/dsidoren/input/fesom2.0/meshes/mesh_CORE2_final/ + $bash> export DATAPATH=/work/ollie/dsidoren/ETOPO5/etopo5_lonlat.nc + $bash> cdo remapycon,$MESHPATH/sl.grid.CDO.nc -selname,topo $DATAPATH $MESHPATH/topo.nc + + diff --git a/docs/fesom_logo.png b/docs/fesom_logo.png new file mode 100644 index 0000000000000000000000000000000000000000..24acc742ce1efb7aa0fac7d7e0269f72bae3d7fd GIT binary patch literal 44770 zcmeFacT`kM);`>UAP9I#B1p1HN`@v!kt|VyAd*8PIW#$T8=wIRqJ$=^h#-5=13RQiQKsoVm=Mnb-N&`u! z`G`BY`Z>Fahxzz}+7O6}dYHeHicJZUU@}vDOp7YX&F&oX-O$L2}wl>DH$x_Ee2|c zp~HQHoWjI>(R{l~{-H<94eb)>;UDDT=gUjh>*VYg9HfdskQ@5v&(CrB`2VvZU-VDx z0E!Y}PW}>7;*t{oMaanL{}%J{`Hzz5ATzhWM)n^YL|cUWyGfY2q5XmbUEDx*zW>w( zbdYuk7}8IO{s%?cA#T1w|525z%Rlw_2M2op0NT|>!p+;w2Z*2n3R3@ZEdJ;Gg8b0u z{Qe7o|9t&lVGQy>{TD3AZ~m*8TbRdx5hK6Z75l-d-KkaqL{`!abaM*w3$*a_^H$rX ztLZ;_&#S3Prl&mbQ6nc84`1?-j;ToeUFa|B{H4!L%PGiBjVvoBsUU`wvXGKdl9o}D zl@^hdR+5z5qhz<{-*g!Hxq6_&_vla*lawLrkdahEO77LMr{>>v08?~z3Ud0NwCt_> zuX>D(l=OYkK~BCdZu(kkVC>=^9GbAli*=JgSI)SkFJ*!cQvvFh3=ae;?}4 z{>cCL{`{NhU;P2fRd!#y{@0Nz{7Um4(Z4kR6NbNr`Zs0+=uHAWfMa*^-h&l+2LJ!S z{2x^8=>*UQn$hz>0}Be@(>{6JWcOD zBJsn~{o~nRo!XTkH?&*fKg{X>M&ExE|L<*to^$j4Y3~2gRCiVVsP*5Kp#4xmp-zEr z8t%X%{x@H=tM^|8|9b;2=bU`q-CWfq{(;>;9{gQIMS?te-X8xtce_vjbxQw1!+)oM zimQtf$}iBzDM-!3$I0DI!r#|jMdGiAf3>Od&-Fk_)6d&45co7VHKdBfe|o$}io6n# z84kd5`augR6^Va)_V@bMWc&9H{!w3ES`z$7NRj{iDZ2Od-$l>--=ce8|6TMiNlRak zAT{Zqt^Mus9x3v2wF`zh2+`csz5Git?(PJy=eTh z{2Q)cLbSKSZ@BiN@yqgWxPA%I-U`3r+Ka|7%fI3JB}98G{Dx~U8ow<6hU=FQ?XB<| zuDxjdviuvaUqZCE!f&|tqVdb}{|OiEU*BH3`GOBFL&2AoOV?^6z*m{PE_!B05J)`u z)blbu1WH*Cfna7K5Hm^$l=UnGB3A{0@Myq#M?D~rpn82R4U4chpWcQxa9K8DmeviR zFzm%K#$rA_^TZo6lx^o(O^;XEvRYVJH1P6LC?{S@qOZAm!}?`8LRcFL)sFD%Q|AAJ z_m5|xJJ|`GCtkdsA&y0&dcH*#d@nZbc^Eo~$IE&d6}3MnLNYm)Z6coa2tkve%$-BU z%N^RC zv3@q2kp~WJx^oNf6vSgYUhK@@UoI@G0SQMs8Z={#!-)?~H&+fVe?udEp7qRd=JE(T9Ozk^)FgZ-JR-a$ zQD>S#^{VCAVlS z=hv0j>S~ZtHpcZ_Q_oQar4ml%p`)?3jhxC>9Ltl5#GMmy1s|3a+k4)n+**gPYqbV3 zpRD6TvcXRu>v)kdy}aV~!Ya3&NZmOEM{jQSjz5f@t>T+tR6ms-Arq1+QN^ypW?ic? zZ?_*<5upP+bP3+W0o!RP+zDo=WbnlycdOLi z*JWpTx*cq5%m0S`_Tp;Vx467(#Fn#5+@x+?Sih^8NqgTC!DdRKNxV%vbtZFi){_t> zGVp>KefehX!bXeBv($CscCeO;lXO|1OONFXDYTeleqgh0IVbWF`;9j>^VsE$#rZ(n zhvl3z^Y1@oh=*pmHVh@IQA=L%D+)(HPhYMk=2bTr&wOQi5E$);0b(Cylm@(Ms>Fu!n{#j?g==X3)bilmKZI6zXMpYO|y{u2W{z@tedw_~Oz!;$v6(u@1CT6eR;Ic=mF>JSOFDm9 zS03cf>Ywq`GFK-iHpB>z-BYjEcJ$-2Q3(2tuttT&6OJy4``c@+;zgbFu`(IAg(&=P zW%K8KUl}t1`dWxdB;5N&mWeK z>*h8A?^AH>)2E)czmQ^4)@#O1AeNGj3Vu^b88S^>`eMW=50lTRI*iTD#Tdp^7nE>>EZ}TO%ge z{?!1KXf;>0LAc6l6jfw=E<0h0aE8ExMIt7izuemY$X{~nz=$Wom>`1rd~6XOHHQb4^gW57P9>D9$vWs+_ z`Xns%?Zk`BjXbsFF_{8IDTSc(i#k?M^)gZ>ufFi28i7i{3*0koB55~-{3AA%Mh>?Z zQx*(8J~&*jQqS{hf-@H0Lc8UBpKVq^)$8sV_As7B2FFVWZ#PhHFfLBv8r=AIU_y|Y z??=uys5llzUyq0`FD>CN&Sq(jJHl3{t8`S4HTBljPN|@o<+>Y|7UuQKU%FVWuDQ%j zF)m>|`lkW{*SZ*$Gpjyh+C*=1=V&jD4tO0sZRlW?5CdHO{kXRmAymo7T z{?-L!h}sHV4(psTo_lm2wdCt1`#LD`Iid3y_r(|*e|s$z6i0Bls2y#X>LMyJ>GkB@ z8&nUooXr-^NYW%kqI-vm8%MHpp6&UGX`CH43*Kp?ExYQkNM(P9+uWYUw(o=fZ5hnu z#tD}4tq`#)uDjkD*kF49lbcFab2H0&^z19AuUmguv+*Jhu@fFX3OLTOWLy@U;utl@M;GZZYamQcOj%hgGxzA7e%_Djo z)i|6`g=I!esG!efPrg4%@WKp1l_iIJ#Ck~&sT&%i>?myiV3g0%hO4DW7zR9D^bLFJ zfA*~%H@tvit82xh&h=BIfIp)8zHHnSBT1Dp;Ks3Hr&Q!s%z~MyVDDlc?I$kAb+K8$ z%Likt<*3%6YhQA+ZRThl$8icBEFA9?l;S2D1;3fv_Sz2%dtM)hf75QQDCxCre{T80 zrE3U&medu>;k^!HgUpzD7lyW!-;#w)ZVWGng-Yaod4+sSvjy95(#rE=pr1Xo;Y8Y> zdFYA+WM?uN)(7jr%VNi&A8l^B$itPjTkBT7<=$}PDb**8u^dPpCHCN2MM-vyOI+rq z7ixlnNsqDh@Nyfii5QZl{njZ(9dU-{8kOl$ZlUk}_OFg|Tkj0u8u~arhpo4_0y)Q- z3!N$`A2Mt}+gwSHyRJjosny|}&IzYBr55Gj;L1U>}Xto3q1(FFIO58Cl8}|i- zmoyXg2{Mc|VGv3ZUA$z#9@W3Y%sm&Ss zr*~dFB7|b9bX&WoO)s>!zQq<=-!RkFy;y$i)@`!g#D+20m3_prX|>*0+&Muv8-GrS zG)c&8S?{ZUq=<;|rOF^TE2fqTIbDmB5jBC9YqxTh9NLgtzQn?o6xuOUcrrhmrui5m z#)Wo(WWByRWxZ{dRGNf>K8B{OQ0|`asqx+Bk^IeN&9p4;PFF=c4gb?cs94`f+>po7WGR~M+@9O zymttVLL#l?FSP0gELWW!%f|P-$?)Vo4M9z365KJLqYG~_K2$`g9P(Iyvh7P?aa2vx z<6C=*8y=y9%c>be#-N=0?m97+F=d&0pPF1-nH#Wfp=--rTYw6l)rt-gS=1(N;KGQU zcb+S38j~mtf(8l^&O*r2wn-utp^ZhJ@{aO6bdbG?^_<_agNmcZRoZzdq9)k=p1 z%g^E5P^vTHT*+ zF$0(F%dooHu&$FD9{Fd-LTQ&v7ds&&bGFGux7DdOmXZE@wWwi$)bvdPqUE-k+v#N7 z2(dTEuemczC@=Di=AFysWx48d;uOvM6*wi%>%9%q(fZ^_73?AEP{P@qBt6HsVr48O}dFDkR2+uSy{Mj zYadH$U85RO-E{0S|7st#Q7_W#UGSm0R~?{q6;t1{t?eQR>4b0k!Ea|=xK%tCeqMzn zjpibWlDwwmDf9;dVbyq-$Ia^lWwf#IhE=n&1RtZ^KdUUmzG5O{d zJT)W>He#(cmvhOex3nhU5^?KD+xx;Ejf@Dtdo&hLtheKqBc5boZ;9T3Rx9~4Ev^%z zY83n3{NyxxDiX>e1LEd}z35!G=^WD)8UoRlAGyP}!B?HWkjtNZPSlQlgLzRPFE5Q> z8tsjW!8nV&R~19(Cv$enKO?@vI*{gA=}W}0wa67+eB%wpegWm2A_=t|ybH1DAZOJh znBRlXA2-xW+Idpyuq&ALZ}$3qxQ;$6SXP*+g#HdYo4jbMR4Qd=HI84_m(u}(d~D4< z>FdW>M>vc1Lar1MIsF(c6$->@yj>Gotf_~)=sdB<1ZQ)7u9q-l@QsHTB^+JGh4eXI zDk#Yz)hV|MCay21_pb#6M4^nK;xKzhMt#rs*4q&zVUi}M$HXkxkV1`mO9WXj7cac5 z!u(}`u@csg;i>7ddo0tb6=Q z*c0pY9EFssJp5x=OhPL4sfRxM@~+}L+)9+$&)pxC(r~CqEr-Ysr$}n-bZopagk(~S z?@)8TkK_Zfm%$LFIK{S8cE`Lg^F*f39n3NlnWUdKt1 z`Aa^~Q`+G-a>`eutmTXhl8wjw>VZ~I!t1anHt9OZ%oYj-HtSA~bWUdUbK(}|qNSr# zlAhUQ`%(sSC6o}lgs__;+91ZxnM4CCN?)7MxBvQV( z!SS=EyMV`gmk+s^#}I=vn&~dizO=UcnxAl__Yb>_7+~%09uez}A>6`mBFsxQjUj_i z8S(6>UI1| zp8Sp(xpJ1!v)C?I#_R=&?IQ9?qwgvf>%6A0f_hd*^;4|AtOo=fg(`4a_tk5V6~!D< zDBw_iWtsj}hC8K;*aID>K3MaED&rIzY>VTAX*#Z={w)3|RHZXR*{PHu)*`C+VZI`c z?gsrG*=o^$gg~|rWRuSFlEFU!ZE>X0Pl;D#+>rH8ljSeFGP$7)NLg2LB>9aTK^xzA z^;mKHFhy7$y9}fxyU0F54S0_c()&XG6t-5yDYIB7_LM6r|liFxVx`Gb72Pn`r!pQe7a{cfe{60vn;m~(>*IG4V2?S0W07qKJHmn4T^ z+YH_)dVSB)L))ArPSWSWv?c_G4?FHU&5zcrt9L`S{7L6oor*7tiI9U@+ZQRaTL1Ye zU1p*t$SP5}SfK3ldtILNuf}j6>Hvr|~cb8oBJtsD}@SBRH z!vuvSJym&{4LelJ2#Z+-mnTg2(;+`|M=OJVbBWVeI1hOHcX zhs@+T$T)R@-wzc-Fyp7s)%88!Y^auTJ{~_uUnaLrtssSDlfGnOU~X6x$2sEz%-hq) zkZpX64O%E)!=L;j9wVQvVb*CPe7dM1tob}(!>P&j7lhY@q>3X0-35eWQ?z#atM@8^ zfBZu?yJ)OQ8Z_Ps5t?k-PM684`O-#snVHtVT;nfA3MG*Ft&=?Nb;@wt2Q>e{9-3!g z*`Hw{;agtX#PiVy@(Gm?aro6Gc{R)eOeEKeOl@OW%td)KJqw#4q?2vq#=p__KHo*$@iSimO-UVG+nWrM-`hK1W6syhrLnappI=MPa0# z8QOJPhoBf{Ufs`s*!(U<5R>VhtYd+hgB=+dv$uv)SkDk*{lV%ift>3vSTPIG2U2AKZmY!` z3B@6WhN28xOn$;wiv^S4dO>f8bBfSkq(3GI6~dpmb15IWLWwd>=47}mtBJHA<1GR! zl)cTL>?s;eIbSHIKMom`zQ8X#eJF$#EShR7CLElHVYl6;Z^yw>z@M*jmcPPNMBI7PIlsIr!Wh z4OxFlAb#zX2?)JH2;3#ticcx-3#(-3ud_r|#`PS}0KFbbf<1=IRmaIxhfuW?;+<&A z7E)xep8nn2AqFy?*{51QxL_o5Po?X~W>!*gl0oLChHo64KYB#ph`qi8CImlgqsb8# z2`QAn&>va}vv|x!ds$wy_Hqa%DHeK|EIIWG({nnlAGJf@JW7||f2adiC4HfvgXcMM zJzHb$;RU0jn4?xZpIs8B*^!Cz(Xd4t=97J;Em+@u&0S3CHYyC_si7GLW>?!kAAl~@ zUhG$Lcuus+(U>cLEB&gXe&7HYW9iGgkf;OI=`;c!7HTvbDwO&@Yi^GB*!j!0rO|Ap zLk4;kGE@q$pHr7r!4&?an_F5MPh?TnxO08HS$Da_q&RyugFkue@N6cb5*WcmfdARV znHPHLbLyz7{ae{D)lX`fp^YeFuHp@HPRt#)g5DWopgJh0iA?@Nh>msiSkdn*<@AN= z`()riEnr0uS2)Bv6t6jeM#!U8pI5j*e`+B3!J=o%7 zEUTfA{>?xf4S6!fX`@D&();)Az?<9Y(x?6XQ7waXW)7FzEHQK2;op{lvsr2n+9*kd zGDw0TXzuVV8Y6$^gjrHF#QG&&y2ZE2L7#w()Z*}Oz}q@el}W4xvKBRc(j?0%*+DmK z5+Ah)=<}_|BtER;r!VL*l#InIGAi^k0nw$1GbH}hvjSZmt|WJSW7@Ie0yGc~=WL*> zek_nv^@`?r;ddeW2Ok7yzsmQvVM5QG;IMvWSbcnfB2(@H|G?L5SB!sD*q;7FK3wg!i_Rjy-8$5rN|+z4^~ zJN4hOc0^u}Yz87RD_3K#v4(IRW!g6LxpMn3Z$6+#ePc0pZ5# z3RIPj0PHH_=)R3+0sdg#qXpCrmqGVOI)JYjrVdygCmb52a72m2n!9ZKj(@|`G>IVP zspV=f%Jdq`JyGc(ND-i!X~j0F1Cv;0?4^n$Wtt)|2Ve*2$pF@(JIV+1=$glwF1R#^ zKtd@qWwKQLX+U>oa1BJdl`kA5MHJAXOgB7Cm##Bwgm27c2YnDiTFG7z z_J>G5rvnO@F@EwHGQA9`xDTk=0;27{;ipV}gown6Bd>Gn*YU^GJ)@hy!btuGdK|Z^ z+)K{37(9~Y-oUZTyl{E)yuqu7z>d5uA4BOVWUdeYSWj3&%{JL#Zgug}-__)sU_ugC z!UF9`Q(!D_QeI7B7hojrxYMoJM?+)0GPEx&^|&FB+N^z z_*hf-Kx`lBE7f8+k~kD;#Ooo>Evw$g8Wmw!n^C>J|IpLd49NQiuN#ks|W^e15~GfjqV2w!w?^2-~- zmCQ7Ynkv*Z6Dlp@g(Zfg?0dk>J**50Dg+aOO2}Q6G4-WfyNr|9z8Gq^-YyP^lScI- zoREZ=U^dlMKYbn$vHSn0N2K$TwdgY3`MFl#$arRGEK+Y@)_9nGn^$8a&J?1T4V z{#Lw1e%UvoBps42;V@=Kr!}>!&)iQ2+aHF+8epm*BU-J4i~%iY;@nJ_^>UINvWs@4 z(FYu<4fN*H*IlPeKo>n96K&h+=D-q7HQYeCwK|ktDm4%H6?Rx>2!E^oK7;U-V9s*& z!~wj|qIRo=$QKh-%1!IjHJ2%o1EB1HU$I9+B{HQT#h$5x_XsoB0H)ej-h`MBXi z;VO!YC0Q=hK2uAxc2i-Btl6hqxVZx9a;6r#{OGu-4Q&arp-d*@r3zHm&f+_tP7rQP z^$0C~p}pOPS<;NLy|Fcu3C<>JceWFz9C8hpY`-M5Ol4$i)XW)v=eo&8O^zq+Wa6Af zdf6U&#q}eTPTieWYGvVQUn1RT`07`boZFMD&bUBvcdF2UL`#Yy@qmmftMRg4-6~UJ zQrw-a4s0xx?V7s1aSbbeWa8vN1l(6dejf?~_D82wYCq>Jcbsi%xHK6ptEOL|7XbdB zfrQG+6Q9cJ~1K?hd4Ixxd~qq3n`U=2MJRB;`XpQ8qY~j(h#8#i$*xd=@(Z<*3xje zAM}@M>q5m+Z`0TwW;DT69}@{U~KS6Y# zxks^PDzBzz7?+9_i(4*ZADZ25vQ+)Pe?w|<1lKUgS@CT}kJri4!3Y7 z^?M^Rb#Gx!r(?TY=%RX=;Gauz+0Im5Q$0Humlgg5cs^)v2#z5|AD^$MPDqQ1U{*IkhYB z*1dmxgOul7=bDy$z=T0`Ft4ND^`jNpr^#3>%6 z;_?v_oM=7a&y;uFQ<^cy0MzMwPf zF@%kIQ9X%0y~E|1uyzQUb>PX#DeTpznNXHcmxq^9WEvhwX$*1Ef3oy75PU9Wx(HeH zbxgu3yy8rc%bSs)0KrpTi0q;RfrD{Lv2ds#k82P^Fu+bj87SuOoves{L1AXXm5pca z;wNyA69Uy}+2I`VCw=zSX5i7cQSBZUuZPdB?K;;d% z*h_I^$lBmIR#l}a%jQYV{m7IgTu-*fozMDh5d`VXS$qc;obrA|{5imoJ}oDn3TL>d z^;yNDMdG7YYkjn_R~=&F3T}g?&4s-MGF%@60{VV)kQTDvXt*&5(w(8clp(=Wg)$q` zXqDHW#CvP(49D;g-DiujKSCM=-nbJvc`icaVU^oN!K5!kY$~FgGobO%Khv8H$*oCEa2fsZevg z@@Auyrno>!5-F5@Jp&+bhOUG}iB~tA5njjRHXgDU4-U6d*wvV5jk;5nt=U4~MzcXi zv|B%yq}_*9-3~jL^K~JTu);TCVg5x|%T+7t@bJEkT5V6KA)@U#AkAG&Ippl^<|Cg> z4er0nW5(oW7YABF3&GYzIOW!WvJ}D?dBNfvb5Z~c+d#i68w?zn$e8PK#HD5zEtHsj z+V>>|JEsm^^Hf9LD5b?W zxP}gv^tcblK|WBQ06hF@^M0#M(YIn4bj3^g{m=yvqnh(PH}o6zj(-cOxACnZ_f=B= zmYNf+%uX83?#OOnG0jNoM5S@KFf#{r#pb2!Yh;HK-$d(A zdcrTDR2#sW(U9CYYgSH)SzaI;{?dFQXWjf*t)FGi4*@DDjOMAXxu;4%NhaPV#eN z>g*pqd61xQJ#$+K1h?#k{*%U#(=#1{X&i2{j?L`C!BUHGps+VaaJH-iGo;gcrsNPO z;}V}O`>O*B(%AkLbi*T}r1gt|SY4YFBnrp+hOZ``5>}5bCiO{HU@qDY^WF))){~ar zp8$3(!+$cVBAJH=o>{%)_-&9dinmpLbv9DBwU8*;fS3>=2#LN#CYgD=h?a#8wpwB?Oy3mj z_0|-ESRr)ClN;RLjkfVOjHvLE^yuP$f<2O~1da8ATizp=B#bbiKrY;a^G?U~@&Ww$6n&Lrl z8o}`j(+LRGohVpJ^g}o|vGWJd`UuV@Bw?a$Z@m>~TspLAI%o z)VsPqsrQf7*d@mwEf_+S<%4S(?c;pTne4^wz(*>nBeBKCuG4`8V{p(zjUCc%y;*Xu z5Z6E-#=LkKwP0h^xh#Slw6aH1DL}F z)B2AT4!yFYY34(4OI-qHmWu@|m!$zr8RTXfjWBjyy+=W^GMudB8cb_GL|e@+jrOwretR=p6*_A- zax*lw$LoqsPfgI5N_lL*-c=*h1a^=S;Pq&bUTGvBC*>$(HHk%y2{?*1%7SX0p=LRd zG6lQwT#_)MM4^u&hX+g&x)$^UEy|@@zjKXq$0l0kgYY?huH^~a&}d0SRCn(5F+?Sm znpZ;Q8Q5?)NTP6}&PQ|-D=_(D*Eh+$0Em08%D=sY(H52RG=`k)dcP()uox$;pAU!_ z*`0zb#A`hDm+_Bh=bR~qu7HD)OYELLi8S|l0r5L7sFuCu)GZIl1^meIK7Vr52d4it z18)G8sL>jV8m8b#!G*CXj567+*c56Du6D67_pd4V?*}M2`GdySLF2L=FFjQpVfrc9 zbQ#eq?m*)z&+jsbZ6~T=uwl4`noYD^JagOZvG)gI!W*T23JwM^=t;`Mr&S5ydPi0^vV#VY96%DRE0b>C(Mv@3oj__mi{E#zM(T4 zHQZqqX&pdyyP4 zdiAU4Su5yZT+g<7$N2G%-0MtWdOlAdEK0d)mfILu!BJU!O~&?Nc2OWr)F2&4GER@g zoH?7A4_wp$3-g!F;{s?nDLK+J=xTcKyns3&;-3@L`H{nI;zwX1(N9 z9F6chilX989#J=OYLy?FNx1!35wTNcZqjHwFy7Eonh5fE zvjk+!x;YKZWb+```ccLtD|Z8sN=aqQEeaf$$2QGU3zp`|pV0fho-B!EgfGbWv1 zpQEA-I4b~{j6A;aD0v2h&9qcJsDcf&Mj5u+m@)!krg!4)>?ntWsVD!E%E0N3Qr#Cm zi<(XZVGo-Gq#b|VOkE1Ql_%S+gm1jdUTm|GP1D4Ow1ThIT-52LIj*11Z~4xb*#dzX z%GT(!-MHNA`IaWnk7||RhdqNT?cZ<2JMia6X*3)e)OMW^hCFv43?9&gbzWA(Ekqt$ z+_-o?v@XW3(lM(^7%>m+1}1r32B*pCf06wTSgkCd|7^pU*#Z?$v>gF?UJ~Vk=l3@h z%PWJ{XS0vrNq%cfY2A3if3;9};s;oF{NrhaSC0T}E272*<}yo(G9)@=X?%>^`IF(A z7cF=yVRzTE{j^E>nWmOMyW=aRack^U!InM~VHyOi9w2EzHM24?WBg&g0jD@zBLPWKhgd=E5(wA~E=|$*3Y%1<5Rkvl!lux4i}rN5$#X4Sj^7Jb_=Q&syIr=cNO1Bs@>_Qv^PxS2l;sI z)(Zr9X6a}Wc0EVKvx;cTVm-o?5wKeLHTbR!mja;p>sAW0&keNx?-?i#A}`#T-o-4i zj;3j1#g|gJ?#+ryXjMPEpeawjFNjg(kwQ5dcQR^Nn9s}|Z~W>PYfySvB>}W$2C(~- zqKzqxxMJX0F@rBMbI&~<9aKppT-QQV{N^>->>@RFfeWwviB)4ig^dM5hty>EF z^%$ZAYGrdn8n}(Q=a!hK>l&aDalmNugn+f`8$-9@!XFuL+E~R0HXqVhz>T%6I)E=O zXopkhI)RfBX@87BztJwrOpHss{x=1h<{OBkBXoVFP7p-{%3SVoFy~TWwBzziOPlj5 zB-*~DTL~s-rRM~9J9cZoVK4J|Cs7q-lbh=xh2PI43oV_gyA!|P$P_>0Ivs}8tGmCi z5|$?fX4GnzWf|Q_jh>Xr*fjit%q5xLL1_)Q%0v)b7I8FiEY?~l*EF28xlt9as;Cn`nul8k?IwH=V-RZoRKKP*rj5gCdScZqW!=COW5H%GbK5FQuC zq|nKvx^7RBpQ`2Or%#augZU{002~v9Q0H7Xt=z7Sk7(bu%g=_)U)sb8U`K#GR*bD_ zq7geTT?euGrqJ;b5h6P^#iR`{-G!bz1s zz$I@DSwKSJTHsjUoTamR1O|kGZz+^!?Sgj;F~LL%G5FG@Masf&mJvDK8Omx-!?uvN z$>oMcIb0%dDOFq(PTuu}l`*dM5Fb=Ujj&liNj)F2>p_B_E*SQP#>2pMg#n`@vgxrQ z<>-7G3(cXNBBrEobgpHL2jOGmgAy&a-$Sc;;Hx=5{ODZVcuvv52UOzS#J4~yW#o-z z$LE7nGlA6=l(NiMdAX#qHYb9wMP6Esp?PA9*b%%0` zq(y*LE&7q&7L)u)A)RsA+G@M5G-3{5$VfpnV)M^ z#L@JeB4g>#rU)M;a8Y{yTJ3BU8*ge{q^sz{tgM-;5U(sbO#?h?4Y18Zu z@1|07Pfro0|D-@ znVt&JQkz5&QdDG?wk`{rroNT!s7i(SN*d@gcs~&)Jn~<4xUI&{h*o>J$xwS41OdP= z%;n&~Eo8iar(Za!n5ZI)Ec9X^XRT=*j~H0|>5eSx&MN|E!-kess&XBQJ%*&Ks1MJM zSHXzH0B{;;z7Fr`np$_yM!nx9V|{cNMRE<&KO7ZvfApS?S!_ejEv3rp^unGe_4jG| zU0zKuN2-H;WWEwFQiyJL=mA@GiC=(jN&@Z!dNaCkN`-cJ>PCzyxTQ5+3?XVpQj8n4 zXuarnZJ*{wJz$%goMHRx9Yr(-47xWDPa!)?vP<|n%~etytJOH$<{m$dj#Lg*Ut~pE z{G2~aEfrYQ5L0rk++sXYMF2S@8Q*W-2_m}n?BlW_g*3vGaz%G8@W8F`*ZHUqfSp18 z#N1=!F0a)>om~T2{j-K(jyhL71haCR?m(hXq4iYX9V{9E&O0~+EU1$&JXY|*@ekd#B*vOpQj#9QE>3zoKC5H#(2>j&#FqAvhJ^ij|SOO9sR>L{&0om!H) zz$J?VKcx%k94NL%nc!}WBvaB?Vw5{@!3i>+lQOkM=t%QCdMm?mRCC z2c>df#6IE5cOrgb@a77bjYA5Za3Khr`?T?NnvR=3vxV~=n)fsJWd(DO2b)#GzJ&Yd zpJ5#?_t<#_Y??3nn-Z8)?w=s2v9~a7JP@E7j3e+MSNe%x#H~GC!A?e?1b;ln_^e9+ zb7|a+>SN08dM#asE@A-vx7ab(;nw;}zRK;7j6a-p{{bF2xAfEFBY0<7k~|U}j@IMC zgQ&$&7DR*(+9{YzOTgZV&#SM<5(AE$8NmTV+OD-V;=nEbYXVeb)+9TY9%>a8X7X#_>mmVoZAUt7sZV6JkJh(lci@=Z$sVB+~A`g`d;#c7)4oq%g++T zD?={3Jt!1<>JAyN*ZSy9m50aQcAZw5PAka78*f|A&@h)mxdERJ%40o!6KRz0sa5AL zd^iq9z_ZJM80CEpJ#-acY)FYj6>(&iTs#Fq0qp!w0l@+pASj2O6Ge3Y@DMjD%OOa# z5NNo}r15Lj`mTq0Y!bIXxmK&?l?-2__I*Z#BVX+g^HeT$vE-7FE~AN}a+Do~Ss3{j z(wI(@r7IzEB&k z!io~XY+pbFX)K2`dGtrB%NLw(zNL-op!*KCQ((X1c?2;}y#_;_(IgBK8u6R2h_+8T zJ+FWnAesxB!W+yiJ%J!>5+V+U-!?&9>i6m*TMvXf|nn%26im(KAX>~tzr zhZ^?*x66g^`?%wX@=Yw?Q35XEr)Y3JkADgKeMk}{9y;Vc{Sb-1t6w~r>jaY}hyvLV zmNe9%2VzL{ZSEcURGCO%16F*KhNj8H=3&~on;n>s7&OKlF(H1n6l+hizN3~)WlOn5 zxe=|U;+!6V!wE^R&=bO0dSb3lyN8p)NZ;|{rdoNPOl5UsDi3_Ooq0i2!B1??a1>}_ zUqvkO2!m@+p37u&)dBHQce|izp^tXW_W`R;c>Gho37y(fje}@WOZsM(VYX}#)j-vl z8GSn~Qkf^Ik>Fa>0^#1lmy+K5wvHf8;NX~{(+~loD2}fR)rZ>#*KDeNkm!dvg<9$z zF{_>6X+ba=5nreUAKr3GmeY#E^U<`n_)w1PXvlI$}fjEy@ z8cm7T10D!9qi6*avbn)$-t?X48F6M3te4TF1F@@MmnqY55ZY3J@5lz%WW5DQCio8_ zqN|FEE&IZ_NcxwQR2Ra9+E$?InRw>izHL%0USp5*86l2VcJ3r=c8y*+jN!?-9wYQ; zI@O4-^vX?e7l!fhfxK+Ia`p{w!~|WUyDRZtoYMV6Z3hj8+@?p6uU3wLz4pL6&Y6fj zl@As}g0o;#o#=B$HmdM4Skz1<3Kma_v10(Ukzjbq)G^n^KUjcUJ0FHUTGxf?tXYnVG(oX4PJQ0YP zi$FTR3-b!Oa*b#^#7PdOCO5lQ38A7duN<@7*>Cq<9sy{5ObBvoZJ1>kT&$82{^mCA zitL}`f31Jq@?9N)6(4R3wi^s!1gp=5uy{bX{D;-Fi#GuUn1QSmkPUe~0`fA#Nsuae z%|{9}rHPj**QkLRUyw#S0h;rdb41EC;vM&Hy<_ z^zd#D{wk4I|K>d`%^dj3M6)Ji(q+(;@-($EthbQLgdA;`R1{Ls zSq?b>_Cxs~N`?P$BK>#)LsLJ-0P$*i|0G$9>+ z+LKTqNN$v4H%OqLpjojbBw;%5UZ+i_R)dYE;Q>*X1h0SNv|B;46iSLFm>)dF*38nti!>n;Eq_3h*zGnCZ*fjYiSliTb!l{)jm6*P z<4MJxL7k)8UCc7C&Qw8_;p=BL-)`WbkGl@-381>2*I6%tA{Q?x$ALFv*CmVIr~eei1$OsDjU2)^+?S+M`Q#3UG~C}os9vT*70V`3Ed zBKt}Zxz(v3%chMOLYztt9^(lltrH(6SM?C#(&%P=a-+nR%rXKSFjy<7klVC01mDQD zDCBtR)gEybd`cMj-V3q@=dQ&yc}uq;b;Y~DwL160~P2}yUab6Xz(|Kw|ICcZp}EVf_40nfncl8}-N&6IqK0}}6eqJR(3 zHz~AmFj}%D^gulrR|&b~RXz>e;3fN(oYX3k@bT3+KyqMNpt1)8-C8wRKtd2;v#K$xCT zAc-SOlPU?|9`)|#Qig|7N)<6i`H61SlX3M~>?xqR{umjs4#gKP;AXff8`u2Ycjh5=N zH0d5O$^jKe?zTlGZ0oK-Kc%j=R?a`FE2+O$+dq70LuRp9yC0qeb%1W?;Jto;PZ(N~ zqP#QM1}>sG{%{!l$;Y*bINVcA7iLBD2KQ^K27cs$XNEm9K=+$y<7J9AqXaWc+wbzn zRh&SsHC-pPFPeQ3LCPn5KX~eZA*6*O?+U&g4`!%rpyfh2#43n|yKmsQg?onE zkgH}`G$JSq4(ykp1T3h}IRPsFe@%UPAk^I#_iRYEXR;KDo@s2^V(eK`V=HYUvQ7#i zTe4)GY0*R|Wf_D@F_g$-D=|HhmO;r@mL?t)#YnRAo-h60_x=BVzxUpA&OP@mpL1@- z2;m?q_wmK;oY8vp;Hu2o_Pw6L;>*-awF7U%x^GZrJ%+6q`X3QaKK$j!;U%3CQ$rwV z5Pt(+GTre(#-%)58IEZ}A>Z#j2ccP;BcR(W#up*gdn4_gB5V@1YLO8`Mu<^I$x#>F zo!$j}smvR#vs20u2oLv+C{cjJpoVzYQjFDw|I9}sf1`Kk)#`-)OFid{!bZY#Q3smY zLu|O~+B2twp)MsK=)qFB3-3IK<&IjUeq&nR&eh#%AHbDpUN8vM08hqY zRcTonX`Xa?S3w&Layk^!^^@hzn0wC_L~H}0#krS9SWT+8g{^^doVC#7=+c!rpoMH> zJWBw6ojVmRHG9re%^z;Pd|DVdyzSgrmd&3kt1kr!tSMtMkI8SK{s&~{Put>>= z?|NjgaZ|=6s2MUmV!fk#O`7JT&dY#LHpT%$luXlNPY(N9!`=Xpdq2D~OG|ykqJinT z33ANfI$3EWqpmm7+`NV@R!5bNNR(6Ws@$Ff!}F5mLotr{021e$5W>9Nvwn ze<{lXID5^vidP)Z4gi&jO!GK65jtFLN}63e&jbc&dT;fQfg@s2ai$#x6=%Y0lTI6X z$L=8X^VOpCS_z})f6#(8gS8Rf3S}SRPGTbD0l`3DHPdaH%^J+$)>`2mub^+QlnM0r z^gp?}Z-Zn@jwP7=O7f>fR`qXwTz0KRLl`v#IA9mx!THYa?Sx(eR@e}kpzVQmau;Ex zeA~vh5Vl&`ofH^4OF$dJ5|bW>oruFXM*Z{?q_Nzq=1Hn^A0+HvF50! zok}f#GvlHP7fidFh($vm7>tK&UgCUSyrcz#FYC`0NVuRff}P$eJiK!}+}<Q4YJ`1{l?!|< z$9o>;6$F=zp>3UKoGF19$ut!TGcLU>*=_-dIPe~FLNq9s*}d3VOlgXpmdFX(4N~w2 zdNn}(fIvTkD7ss%-ve3mZiB;R30)%AgQ-oD-Y3VbYMas$cI7kke8EW2W&KCSLF81ZqW z#w-O+sj1_0C!0gi~rzq_CxVEBv)^aB++#>v-q-@+QDHng*|_Pn#yf zfC4Lmr9tg68kLk4;qZasvnmopH^5%Hg5W&XE}eY5NH~@OI#axa6>6LWsQ*_la@;&gP`d4ND<~DFZPF)2=P3_us%7h#cDc;XC z`DFs{_9%S>c?PENiKK4<=(-bE%ptKjyq4*-O1(n2APoG~D`9)-9`#WMa@-CZC+hyv zel8|@DrQYp+d}N?6CAdP6P|V83jz7{)563lOF%w>HS;jP1k_n-T{a_uf&4)u3FDb@ zyAt*%u$I5fFUdUN^v=XFKlLcu7>-Oz{!8f>pZTQ&t~j2V&W&qay-KZ&4#Rm^&q-vd zN_gKA@fwa;-Q3l?5K!B)UxOC6X8u|kiR0^WoqWbZhR3gWZW4+5C^&js&U!0k3X^(V zcpOXstya2r;W9@E1Mlg2g57^r@%Y_@)J9H3#bT$&#$;LIM$|3b?QyYRZ9Ce!QgD9sb_&LvP6#u7Y13Roey#q(ad(fhrP* zm0i-N0a(nQ1evKzun!xLB%YQXd)#{0MKuVH-dy9n&uc$ZzWy&zmX)sRgJIg;&)PLQ zS~jOb5#fC9{K~vcb=fxX_mM)x<3+ae@b8}U{$J!6TzQh=TO=x?3E7h^Btvf= z=2&dYINDXX*FP~QT&!E4pr(7t{C_LohzB@Z604u}OhR&#toHNr@UXBDpoI{pqQ(bW z4NQ+$FGf&&IU9V>c5Fc)iH1jBT@XYzja4Vatb4#0)gy)dGRLU7AlWV_kOkkZXZaXH zs;HppzbYGROw0!?rxNw&O#2UPSh&%hT=Crj@~=GX5ZOtGi&%v-A}zsRZwYjcnVQX&)H-K z#O3rLG$}z^4G)j@Xv`gBd5X1$8aENPoe?jUEj>WaJ}GSXNAwcK@uY3;>r3^y2NSfW z{BXauU&I=Eo_}Ml1Cd29yQ70jqL$G{^0^>-bh@8n<=BcW=h5=V>_-W-gD5BS23~wn z6xq;U!tV9cFRK@-D;eT#PKQq`%mq>K#aeHAAR>aFyqG%_Zb8e_*#d`ep=H|n{ZmQM`U#i> zDg;}}Bi!JH)p`BYnk(CncTIe~`bBsuPZP(dqn%K>*fv6iiAi+1Buf3Hy~ktzcGLk|dk8u2P0>mh zRY7K#a==f9gX?peuh;*&V`MQuDg(eVKxHKQ{zCp5@f%}J=d`cKPuf#NIkvv-ooS4k zI7!g*uTBC(ozY*bm8@*+|0p_b!hkVlBj+wS;mmtonJj^i;*8Unj$@u`yjROF4&EnP z_5E4o(JS<;^wxp2ypW1w{@QCU*F7M0d^afzZ1vQ&Gv=s7XaRS#rPBOiEs>Od`rHL( zjNtaNQ%L89j_#lGfxS10Os7BVbHoRUly2kt`L@5*ql~2KhO?4QJ&r=bU)>ck31vLPNyZG=&t$MPjy`+q)KF*d=Z1|^tvd|VG=`t1MPHgPKRt!w(0m`WH(W>vEKI=>b}X5LYCdHxoXmVIFB<$u`e z*1~?!hNLweXtjsjP*3%vIz4=E@8`<%6?ZlSb^uwunLrR&P{lcG$ug_VJ!EH*+I7{> zrJ|?qNW}gqfjDNc^;VNyY-^T0c(DSjRwvP0jWD1?*_n;|p8n8j@nfWP#;`@+hGNLT z81wb7Zsp?|h4RlSDxj(E}L>#)noWy)=x-v`XM|2-|#s) z-HCP!;!N*@ohoO2!`f=3{L2cToH_HvAPnl_^1aqv))a}Ygc6*q2>T!NgMOa|9`%V+ zI~){|8x9P|Z==1(F|eiPH7zU^N1s(LeIyrA)GSIx_A@GfNC?mdTac~%-wkCD2A;C` z0gJ!iaj{}2U=czf)9l!aOG-~6hWt4V?Ti_0I^(uN=PS0H<9x;E(H=KO1v0D&ms-<^ z9Gk2)8EU7|!==NOxz*YccODj3E&p(GV@&Vg+EhtfMWqC7tiUc)2V~;C2r^fzE z-1txN;9lj9y`D)}D5a1a-yXn*cvtXRU@OIZxSZ7i0MVld`VkP=i@K#$hn=Kn%eM`V zVK;fj?d?4cJ05PqQdZ9wjSgVI?hP(gt_k&__UNN0j>q$FWUA@xF!fjlgdz<5Lz>(4 zS!`gM+!LQ^;I;1TZ+lNs(O0c^2lI(meruODF|p-~{MVumpo-fB>~}v1Lvqp$I4@V$ z_NuiWAEd(#+jqA4F{9?xzWU!|Q47lDS0JAkv=tG)Hp8_;YIQg40soUxebuEWAf+wx z@`tuSH-S&jD$0=AjQ%%U|0CK$^E++jieF=)9{WO$zGmM897%UKF7=!iG*JT!CV0)$ zBPIg!OL&pJ!KK`JU;VffO%iDG@n77}V7)^n$SIjxLjV!&21n$-W{Q8BHM3|?sb#s` z+X!MR%=g9)i4*oD+y(`ZQ^Jn1N(8QPgrom zgeLOr;RZ>uy#)#9jf)>}sb_YZKl^1MYWoJ6D(-*OIGVZ31;NujQ$b)}E-P`@YBWmY z9{NJ!qzyBfMTRvvI(JqCk3T-x4m!(=IZaYLE(CVG;*gSf}_F(6! zeS-RE!Bhjt0uY6g|2J`v{Gs&lFHh%9il1hgy8KhczdbAoZVM?HP)bZG5eB9GWflNH zw?blbD5}K$Ie@pGHRDF(z&23Gc>2W|;iVxk!A9E?#!8TA#McwvZBa{D?nnEMPXtn~ z7nEF&QRSKsF_qzRB>foJV0+ihc;4@_)be4}5K9Z> zwuc=i8$uy>N?hv&C##~DH{>^ISAn@L6mMsJ2weJo_(@{m6S?hG!llKMwltA}arwT$ zOj!EpRiLeH;y3wC6>@-jhfX4>Vp#l<{anF<%sk~JZn!P?VA_KMmSvhyt27625>olTGrh7Jwsv7g6zxdt)S+eFD~GfH-P)FZ`HAhz5tA z61t}Lz5eG5%o-9|6@Wbk-^=pkFfRUwiQTh$a)UK&x?OUWqDS^0;z#bA{Z%=DM-&z{ zOejV@J@J+u1-)D_vbdu=_Si2z2MGy5G;>iL0w`FWK?gL19(wk4K}MdhaXMp=o%dyX zRjhHthQ8MVi=E(x{rNx??!T9~l>E3z8vhfhmGoNJJihzA>KJw*!*q+@hK!SOC9@Xf zUTA#mH~f}gO8d)%A84ful@$4hE=^c%g!S^!+0TTgX*Y0v_v zbm!^^y}W0C<@h2W{5W;Kq`DtB^(&GRC;4Rf0;7f@sin@CI|>bP&Nge~gDydMKx&m_ z00K+u{{%9oogqr?3##BBRwLX8uuq?sgx=qvcXtQBVc(;wJKZUc@q{bQe3fu5(ZK$Z zllPqpAjqIVQb4O$knO86fy=eK`dzZnP){^Gu!(YvbhZPGU7|OFddeT|^*eu9fD#SN zlKNUPD_H0s1RpGmRH1)V9`pi8>$}s88fC69bS8rZGLQv=9Lh0afk#gq5rwZ#>586B zY>mxK28h^a$2-Lmya&z{tVf?5e#5F#eIEiqW5~&@iq41{AVfsrn@l3f9_8wNr6fZN zzoPR3a|;O5bItv&MMY$-EOrs)@JY&LW)BS1Lcnk~A>%tt>pA0QbSp2Oyzr-s7ol>=|eMNEybK>!nTkYjYq$@(S8@=zW?ria1Tb@fcT*DZXYZ%SQM z@F3Y46mOg&oO@vcJ57Q6^V9P-~ImfiLZcoOxg>fyDs!qow; zoNd0qvI9I*mGwJP59V{Lwf#k*Qh@)?s+PuSM+|2HADM^)X;a}3H0pfI7J@|=6|=L~ zMg64j6je^*nE$X<%4c%>0M!}+ialp=^IAqHY$KdL%%Gi}+ zdqS4)sLmeyrDohMrK;wIdvXz@&lq#ZNP=l+eol`I(0S6Fd9k(iTtrDxN=6k8?r2e8}{fIfGk zT*3ObxpmViHV1dL!rb17ck!*@tps{KI^E&Uh;R!VVA3b{lsC;bD&{~RBDaYpL&7-p z+4$ew04xomg7(se($D*nPsRhly_7Wnt_SGVI@UqW;PXnE06e}MzTA7DGq4u|w|^lw zTE`P9E`z7zoSCOTaLxt{u@zK61^FPTpu1-_aPuVR2llg$F=|qjq_pb-FpJpm+uHL3 zZurSCI3%{Ji1hD<>tnV`Zo4+vz40;8dx!PnG694IJNsS=6vE4UJO2b zPpa2_{uTO|vaY0kExtq>Bu&{P$wnXT%135pqZ5Dm{^~2ke`#J1+LMN`{#gQ`*GJHo zk-zL?%!P8gSUBHb?Gq4}R0o)>E1z>FL!D%%`to=5n_!*XnXXt7^k*$#LLZ`}?IsAK zXB)o&#N2B15?eZpk&CV4yINs+&}dz@f1D4wL7b(OR(L6uYT_|GLkd0?{LACCn*wP? zFlw^{n^3`>riZ^m6!~RjDP{C(p6aFzE^|54H`vaNVk!Cluc4!t35xI7UG)Q@%GGb6 ze7WMM4j}jdeHW)6p;@Ay%{Zc%(@}TV%x4hK*#sPFH$u)sm^0;ki31wCT^-Q8L}dCf z<_tJzI76LZvvL`Ej`&FtPI6jdsvJ-t*7N@0{+XA*G#E9yf!cw@Mv|nREL3$az)7-M zMGp1_!L>C!$MQaa*_%?7!cg`aK1g*#zjeqD7{|3}?XiczysF<(nXY9yQnZXq#o&Cg zQqI&2mJ*WC-Xh4ELOAvmQMTh?7(A^+9cX2X@BiCMipkEt#^5{rK=@-HA$lf(9{xtK zBY{2@C{DTc1Lb^;QQ$fJmaQ?M5$)_bex0#`ixD_|GWFBE`PxnP1PNb`Ce>q&>+e6+ zxm$hc@g*g=*I%(O0UCzG%L!(Yi`)O50i^w9M&9pG-sPJ}b4ypox3tt3n?Iz#VvAt9 zunOobr+b|;&%JJdU5f>Dz*<)Y@~fE*&b^a~KO|4JOtEZ8ya2<2{Oz;FIC==NsqVxo*xq_i25&LZeP@ z$?xKV!Ts+xg<1AHPgDV0X&`DZEboYC^tkhpCOVN z(&|RN(EN^6UWHpy3M7-AeK#=AX$D(R+&JQ5qOrjXkuVu{JP%qAhrDVHz>f2b$J4(_ z_)U9x8*!SUPL@}KS@BJ*=Qr~$Vc-pWQMmD9b*9IMpCM;GU4<8=W=%M`7Z$&>+0M9b zsqih04WEl*#H4AGc04aak~t&sDH?L%2BCuvqJ*6@Ic*GGh1y zz?~Yc-#cthd-Q^KS#v4?`h1jAVTG@GlZO%@R#k}&&Cghzit^dvEE3g>ECqpZx69!* z?ixzQGPEOdbKN70U1(XFQ({2~61Jh#Y_&G;zZ=AKjcSV) zr?0YWan&e5X7dNS%w7HYBysQtbz1SLj=yW2&cD8P7e=qq!~b1f8J}HByGy|Uj#B^7 zG^k*qNptG6SeV5MkDda-Upu+ojZve@b)wwPTvS)JtLyyE9n0XchK?!FU}fZYM27p= ziX54ENR>PhH+p$G2qwDNV0!LO>aPe?x# zua>Bms%8{awbhAMYZGL~{_QrpGH|S}7^Q5}d#zlekrdO#lQ#9kC$ZifsZF)A2|!%Y zzGdZg< zXEmH5CjAqd?e*tW4SB zwyamP=v(gQ-^cuzo3(09^dpzd6GcKvGuu~D&9x{djW1NmZBH|sy#VRTct4H(OP@`UoDa>O zFJ~{3#*g0q_aZd(R)Wq)HEm8Q`O7YpYs8SQ04VJ7eu-7SsqS4EsX29$(@n4Lm)@4n z@WmiE@_tR458w=zW=!L~8qWFcvawIPr6kj|6|xVMtDsEAqB?0>dTD^AEoot^pByI0 zOO_d(`yH=g^&{v)ljYJzy!}Avbjp(v7Ic>fBW)f@pN!T`(VU>Z=Vlq}GA;P+NH;Us zUtgi32MLw9m5W6&@dS1DQe{nSzfn;LfqxXNesfV#UH$Eb;X_efE~5&$m4+Kd9Q$JN~(U5mBy)`_Q$do}YoPK?LhBvP_ z<>C5TIB;4A6)YM)dKNyS>j}-?mm?;=`y1NCAT}T$vIIzi$#A;TuJL~Ih2UxM^rpaT zl2Tu`Q8#ksLPoVXdWiLdq)BZ8piZJ~7x-F4?-JuJ_^OQC%LI1=u0(Im?lx}UzOrdA zz+U9fBf-0fF%`~w66{J}{#w3Qjnr(dWac)48cw+CV*c@nKR($gl7Se#{WIwmG~g6^ z&5{xHF|0YyYmO65_{nn!F_CbT zP43NBTm6?Cz&NEEu7|!MBx{YprjlgrBfjy{xOT^lmI?mvQ03ed$kYu z>&IruLG)&iv=rV%bI(}v@m$Pr-WXpYZ;JF5yanPxc_I`a9n#~RMkt=S;7E>oDTo0a zGgbEsF;8nc!M(efSL$%Jx`Z}BEUXQVBMfAL!HWz& z!&~Z9)3z_OE9m?8AG>0`g&-%KL>GacL{@DS#CIC?mF0X!Hm?oEpQH4^tTU>I^!ckd zbEADNE5fQ<7jF=~cdYJT94~UnS#MG;67uW^3kLtoH84qwUJr-^9vG=55N=P7LO>Hj zNVbLfk}x34T1J)-{nLq)dSKc6V$)+uJRgS7aZgdIBrA(B;B+6{|G?E>Zv+4m^xy9g0I4ECk8LpS!l zHZRD~i~PXsq8Nl{5=qd!{-RT2aeC7~lK3N37nHr6RmW3&_jW=I1r7%S;<56{qgP*b zm&>~W5l(Ndx^=bi8jJR&`LoUoe2|*M_*$mUiI*NDJD)~AKrC{f!oP1&XB* z#=K#S`b@F1I8`fbg*TK=pRs5_Fmd^SEV-GF#^Xh>kI+fwaYU=_a)BzP@FIh+u=e-a z-xdIzTsgKUB5jbC2>5p3WPS}I+Ps!zLvg$;s^4@H6%In{+J)&cZ$RTG3-#OI4fgbG z8_)(3>yx-%o#}2i6NA_214xUv>*{3C{srqQ*n0|=={OR^QRK|wCv}5Q?Me!WUH^bD zdso?GJD;)cIxBLvAAsG-w$w1F)=p<>*3Bi{IgPR~MG^;T_$C6+Hh7Y>3 zhZ@1*47VfqQee7vBsRn3Aq@1eUQ(SJEsF$lk8QI%u|4pwQd4rayKu$V+0dh8NYwI3 z3$aV%d0$;%T~<1}wA6y^kBICqO>tsVv{I>wYk2yTq2r>0J=*fu%!l|n39L+f&{d~@ znT<`=9{`RLKZ0G}lq*u`nWb^^^a^Bqj&A_I*&srkKqi|v*wcilZjR)l>>Fb*!s7Vz z@eF=xkq2AwvRCMQ)&x^>uQWc==6l2!9|!H`BP@Q_lcDbAJSz}}Q^>hj6~kAVH*BW4 zpmIftc1`@OFALZC^OM-PI=%<8cyP;9<*yG%K*N7>t#G&0>|Hb4;DgZKyEB0vwzG0y zO8)lMpGe?iQ93wd@}%J3R(FG+^2Zr1I}0xMZ)n3bqv|jZ#PJh~Rlm~y;Y^T5%o>!b z8wcWdbwCxS6S!Y1;`pvCGV~en6Kw1=ms+x_A!^%@*;nc6SYMRI7ZN-FeRVr-%g4_e z7MTSL)PtyEi-rXkaYEv(!%Xf_JS&kVYhzt&x%6`#S1vxwpns?#7of@iP2I9ME*MxZ zY2N(oqnx%Y**cM~#0sX}eS71G)vd{z&|8T+bMZvq|#?gbwex@;oT!<*f{C=t3(^$OOlTd{5fBy|6C_7b6S^vepFp;I;vca!Z%Fx?m@=H12_5?Pd2i zm(jFxCI3)IBZBc}{E__xp^IbITjxd|^1pKg{Z( zR-xWHlBWVi0}0$0^uPp7ZNKau{A{_L!S6++3$4w;rs4q1#AC7ryvj6v!m=R@+thP! zP;@SF+knpzOP_DLoJp3ll04Rp=W2Pp<9`O6Li+4=r6&e0xKf!?kGW5n?oFF~w`g;m zzi7q#@x+SaKR}4$cX*gZe3niBI+f$8oliw)c^dr>jhK@&$wFH`OWf~1cS6!vuqSuh zz!0=dHFh-;sTSLGBJucwHCfu=EM~_WY(`g)XLtW*Jap70ntH0)#fY3EH3r+hbTY$! zb&P*{G8UaJiH9QW^hwXd6cRkA#s6HD)}eA&D*Uh6EnFu~JH*L|ZM&jZj&17Z565z) zdd-;o{m}=|F}&G>8hN~@oOj}8ugb-NCgCYrzA>-k8qE`)D;MWsP#Q|^6$icckKysY z7IT5KXUWp;yjGcGxPV#+w( z=T@fEFU`byBA00cceqF7uRBggd#P~UDfaPx#a(4@D9;*M_0$z?jEixxgH%Hqw0w}~ z5`0zO@oht_xM{yyM=`wu!>+EyXGVk=)L+5;`si5$Zs^YD4pA1mWbsD+@z@KlIEX8( zkKcJe*Gs%(XwYg>@V~b-UwB2U1XFURqlIo)|34Rp8jh+w~?*_`IF1AU2Nks8_x;jE3f)(8X+K(np-5 z9zNJnYseE7{7 z4WInnf!BCp_;B-umxm3H1d76oP}9^9S_N1Ce&*9+TDj7=juDBl?6<+{#jNWTCjQor z%DWE7NY6DZNaFCoSH#>C_Q)J_Xt2|C$k!@iu}Lfb($>$UOSV~)K31G8R@Q?_!PtU~ z#hn3P172d>&Ciw21ICOZMA`78T2J%BfTWB))(dSCGH@{E$AN50C!`X<$a%_??rBwXv!7Kl`F3ougTUHM+oF_H@l3k$A|=cJW|n!- zhpz#&0CGcgjAp)Ll6?HG(lorpGq_2oN;c4pw+HHkb9m z>9^m3pt1(l1ufC))PavtRX0uQFS)n1S{a8#Iy=U@y9cVqTm7pYw^;r7lGK<4$A**g zGvqkA_mqT{(R}~o^^N7eZVoaN{00Aq4gioynbtg5F25-jA`EU98M6$X^m0|l~()&0^pH;;28S?G;yWaJ>JD<>W zlS=%RI$x?X#B9&W1$vMTPsvzmFXTKH1o@wOE311aJm$B1C!>HVCK{bCbk1dbDjOi& zaF+vS+%xHRHMfqpu^eY#;E+mM7*U>kRx;vw`1EZf@#Cv^kjc106k<(|2Vbu3 zl>RxTV^}R_~Jg3*#WWAwAQ+3=66P!98=LR_oAUH!eO)GEK4RA^%YLKZ=dzwq7fGw1L0BmB;2UE;OZ^ z$QpDCRjiwGyQddA=9`ERTfLk?OC@)x{^GITq5|>0$>tm`H=e`HU{BD)yy>Hea_*oK|Fg~|} aM)k*SIJ>3AR0Woc}+7qILWL literal 0 HcmV?d00001 diff --git a/docs/forcing_configuration.rst b/docs/forcing_configuration.rst new file mode 100644 index 000000000..b4a34ff21 --- /dev/null +++ b/docs/forcing_configuration.rst @@ -0,0 +1,81 @@ +.. _chap_forcing_configuration: + +Forcing configuration (namelist.forcing) +**************************************** + +Sections of the namelist +======================== + +Section &forcing_exchange_coeff +""""""""""""""""""""""""""""""" + +- **Ce_atm_oce=1.75e-3** Exchange coeff. of latent heat over open water. +- **Ch_atm_oce=1.75e-3** Exchange coeff. of sensible heat over open water. +- **Cd_atm_oce=1.0e-3** Drag coefficient between atmosphere and water. +- **Ce_atm_ice=1.75e-3** Exchange coeff. of latent heat over ice. +- **Ch_atm_ice=1.75e-3** Exchange coeff. of sensible heat over ice. +- **Cd_atm_ice=1.2e-3** Drag coefficient between atmosphere and ice. + +Section &forcing_bulk +""""""""""""""""""""" + +- **AOMIP_drag_coeff=.false.** +- **ncar_bulk_formulae=.true.** + + +Section &land_ice +""""""""""""""""" + +**use_landice_water=.false.** +**landice_start_mon=5** +**landice_end_mon=10** + +Section &nam_sbc +"""""""""""""""" + +Forcing file names should be in the form of ``variable.year.nc```. In the namelist you provide a full path to the file and ``variable.`` name in the form of:: + + nm_xwind_file = '/path/to/forcing/CORE2/u_10.' + +- **nm_xwind_file=''** Name of the file with winds. +- **nm_ywind_file=''** Name of the file with winds. +- **nm_humi_file=''** Name of the file with humidity. +- **nm_qsr_file=''** Name of the file with solar heat. +- **nm_qlw_file=''** Name of the file with Long wave. +- **nm_tair_file=''** Name of the file with 2m air temperature. +- **nm_prec_file=''** Name of the file with total precipitation. +- **nm_snow_file=''** Name of the file with snow precipitation. +- **nm_mslp_file=''** Name of the file with air pressure at sea level. +- **nm_xwind_var=''** Name of the variable in file with wind. +- **nm_ywind_var=''** Name of the variable in file with wind. +- **nm_humi_var=''** Name of the variable in file with humidity. +- **nm_qsr_var=''** Name of the variable in file with solar heat. +- **nm_qlw_var=''** Name of the variable in file with Long wave. +- **nm_tair_var=''** Name of the variable in file with 2m air temperature. +- **nm_prec_var=''** Name of the variable in file with total precipitation. +- **nm_snow_var=''** Name of the variable in file with total precipitation. +- **nm_mslp_var=''** Name of the variable in file with air pressure at sea level. +- **nm_nc_iyear=1948** First year of the forcing. +- **nm_nc_imm=1** Initial month of time axis in netCDF. +- **nm_nc_idd=1** Initial day of time axis in netCDF. +- **nm_nc_freq=1** Data points per day (i.e. 86400 if the time axis is in seconds) +- **nm_nc_tmid=1** It's 1 if the time stamps are given at the mid points of the netcdf file, 0 otherwise (i.e. 1 in CORE1, CORE2; 0 in JRA55). + +The following options control if the forcing files for particular variables are used or not. + +- **l_xwind=.true.** +- **l_ywind=.true.** +- **l_humi=.true.** +- **l_qsr=.true.** +- **l_qlw=.true.** +- **l_tair=.true.** +- **l_prec=.true.** +- **l_mslp=.false.** +- **l_cloud=.false.** +- **l_snow=.true.** + + +- **nm_runoff_file=''** Name of the runoff file. +- **runoff_data_source ='CORE2'** Other options are ``Dai09``, ``CORE2`` +- **nm_sss_data_file=''** Name of the sea surface salinity restoring data file. +- **sss_data_source='CORE2'** \ No newline at end of file diff --git a/docs/general_configuration/general_configuration.rst b/docs/general_configuration/general_configuration.rst new file mode 100644 index 000000000..7232885d1 --- /dev/null +++ b/docs/general_configuration/general_configuration.rst @@ -0,0 +1,89 @@ +.. _chap_general_configuration: + +General configuration (namelist.config) +*************************************** + +General configuation is defined in the ``namelist.conf``. Here you define time stepping and restart frequency, details of the ALE and mesh geometry. + +Sections of the namelist +======================== + +Section &modelname +"""""""""""""""""" + +- **runid='fesom'** define name of the run. It will be used as part of the file name in model output and restarts. Don't change it if you don't have a good reason, since many post processing scripts assume it to be ``fesom``. + +Section ×tep +""""""""""""""""" + +- **step_per_day=32** define how many steps per day the model will have. The variable ``step_per_day`` must be an integer multiple of 86400 ``(mod(86400,step_per_day)==0)``. Valid values are, for example: 32(45min), 36(40min), 48(30min), 60(24min), 72(20min), 144(10min), 288(5min), 1440(1min). +- **run_length= 62** length of the run in ``run_length_unit``. +- **run_length_unit='y'** units of the ``run_length``. Valid values are year (``y``), month (``m``), day (``d``), and model time step (``s``). + +Section &clockinit +"""""""""""""""""" + +- **timenew=0.0**, **daynew=1**, **yearnew=1948** gives the seconds, day and year of the initialisation time point, respectively. If the initialisation time is identical with the first line of the clock file runid.clock the model performs a cold start. If the initialisation time and the first line of the clock file are not identical the model assumes that a restart file must exist and tries to do a warm start. + + +Section &paths +"""""""""""""" + +- **Meshpath=''**, path to your mesh directory +- **ClimateDataPath=''**, path to the location of your 3D initialisation data for temperatur and salinity. +- **Resultpath=''**, directory where your results should be stored + + +Section &restart_log +"""""""""""""""""""" + +- **restart_length=1**, how often should restart file be written in units of ``restart_length_unit`` +- **restart_length_unit='y'** units of the ``restart_length``. Valid values are year (``y``), month (``m``), day (``d``), and model time step (``s``). +- **logfile_outfreq=960** the frequency (in timesteps), the model state information should be written into the job monitor .log/.out file. + + +Section &ale_def +"""""""""""""""" + +- **which_ALE='linfs'**, which Arbitrary Lagrangian Eulerian (ALE) approach should be used? Options are 1) ``linfs`` - vertical grid is fixed in time, 2) ``zlevel`` - only the surface layer is allowed to move with the change in ssh all other levels are fixed in time 3) ``zstar`` - all layers, except the bottom layer are allowed to move, the change in ssh is equally distributed over all layers. It is recommended to use either ``linfs`` or ``zstar``. +- **use_partial_cell=.false.**, switch if partial bottom cells should be used or not. Partial cell means that the bottom layer thickness can be different from the full depth levels to be closer to the real bottom topography +- **min_hnode=0.5**, for ``zlevel``: layer thickness should not become smaller than min_hnode [in fraction from 0 to 1] of original layer thickness. If it happens switch from ``zlevel`` to local ``zstar``. +- **lzstar_lev=4**, for ``zlevel`` in case min_hnode criteria is reached over how many level should ssh change be distributed for local zstar +- **max_ice_loading=5.0**, for ``use_floatice=.True.`` in case of floating sea ice how much ice loading is allowed [unit m] the excess is discarded + +Section &geometry +""""""""""""""""" + +- **cartesian =.false.**, use flat cartesian coordinates (idealized geometry) +- **fplane =.false.**, use fplane approximation, coriolis force is lat independent coriolis=2*omega*0.71 +- **rotated_grid =.true.**, should the model perform on rotated grid. +- **force_rotation=.false.**, if input mesh is unrotated it must be rotated in FESOM2.0 than ``.true.``, if input mesh is already rotated ``.false.`` + +- **alphaEuler =50.0**, rotated Euler angles, alpha [degree] +- **betaEuler =15.0**, rotated Euler angles, beta [degree] +- **gammaEuler =-90.0**, rotated Euler angles, gamma [degree] + + +Section &calendar +""""""""""""""""" + +- **include_fleapyear=.false.**, should be ``.true.`` when the forcing contains fleapyears (i.e. NCEP...) + + +Section &run_config +""""""""""""""""""" + +- **use_ice =.true.**, simulate ocean + sea ice +- **use_floatice=.false.**, allow floating sea ice only possible with ``zlevel`` or ``zstar`` +- **use_sw_pene =.true.**, use parameterisation for short wave penetration. Incoming short wave radiation is distributed over several layers + + +Section &machine +"""""""""""""""" + +- **n_levels = 2**, number of hierarchy level for mesh partitioning +- **n_part = 12, 36**, number of partitions on each hierarchy level, the last number should optimal corresponds with the number of cores per computational node + + + + diff --git a/docs/geometry.rst b/docs/geometry.rst new file mode 100644 index 000000000..d3d823b12 --- /dev/null +++ b/docs/geometry.rst @@ -0,0 +1,72 @@ +.. _geometry: + +Geometry +******** + +The placement of variables +========================== + +FESOM2 uses a cell-vertex placement of variables in the horizontal directions. The 3D mesh structure is defined by the surface triangular mesh and a system of (moving) level surfaces which split a triangular column in a set of smaller triangular prisms bounded by levels. In a horizontal plane, the horizontal velocities are located at cell (triangle) centroids, and scalar variables are at mesh (triangle) vertices. The vector control volumes are the prisms based on mesh surface cells, and the prisms based on median-dual control volumes are used for scalars (temperature, salinity, pressure and elevation). The latter are obtained by connecting cell centroids with edge midpoints, as illustrated in :numref:`labelgeometry`. The same cell-vertex placement of variables is also used in FVCOM :cite:`FVCOM`, however FESOM2 differs in almost every numerical aspect, including the implementation of time stepping, scalar and momentum advection and dissipation (see below). + +.. _labelgeometry: +.. figure:: img/fig_geometry.png + + Schematic of cell-vertex discretization (left) and the edge-based structure (right). The horizontal velocities are located at cell (triangle) centers (red circles) and scalar quantities (the elevation, pressure, temperature and salinity) are at vertices (blue circles). The vertical velocity and the curl of horizontal velocity (the relative vorticity) are computed at the scalar locations too. Scalar control volumes (here the volume around vertex :math:`v_1` is shown) are obtained by connecting the cell centers with midpoints of edges. Each cell is characterized by the sets of its vertices :math:`V(c)` which is :math:`(v_1,v_2,v_3)` for :math:`c=c_1` and the set of its nearest neighbors :math:`N(c)`. For :math:`c=c_1`, :math:`N(c)` includes :math:`c_2`, :math:`c_6` and a triangle (not shown) across the edge formed by :math:`v_2` and :math:`v_3`. One can also introduce :math:`C(v)` which is :math:`(c_1,c_2,c_3,c_4,c_5,c_6)` for :math:`v=v_1`, and other possible sets. Edge :math:`e` (right panel) is characterized by the set of its vertices :math:`V(e)=(v_1,v_2)` and the ordered set of cells :math:`C(e)=(c_1,c_2)` with :math:`c_1` on the left. The edge vector :math:`{\bf l}_e` connects vertex :math:`v_1` to vertex :math:`v_2`. The edge cross-vectors :math:`{\bf d}_{ec_1}` and :math:`{\bf d}_{ec_2}` connect the edge midpoint to the respective cell centers. + + +In the vertical direction, the horizontal velocities and scalars are +located at mid-levels. The velocities of inter-layer exchange (vertical velocities for flat layer surfaces) are located at full layers and at scalar points. :numref:`vertical` illustrates this arrangement. + +The layer thicknesses are defined at scalar locations (to be consistent with the elevation). There are also auxiliary layer thicknesses at the horizontal velocity locations. They are interpolated from the vertex layer thicknesses. + +.. _vertical: +.. figure:: img/fig_vertical.png + + Schematic of vertical discretization. The thick line represents the bottom, the thin lines represent the layer boundaries and vertical faces of prisms. The location of variables is shown for the left column only. The blue circles correspond to scalar quantities (temperature, salinity, pressure), the red circles to the horizontal velocities and the yellow ones to the vertical exchange velocities. The bottom can be represented with full cells (three left columns) or partial cells (the next two). The mesh levels can also be terrain following, and the number of layers may vary (the right part of the schematic). The layer thickness in the ALE procedure may vary in prisms above the blue line. The height of prisms in contact with the bottom is fixed. + + +The cell-vertex discretization of FESOM2 can be viewed as an analog of an Arakawa B-grid (see also below) while that of FESOM1.4 is an analog of A-grid. The cell-vertex discretization is free of pressure modes, which would be excited on the A-grid unless stabilized. However, the cell-vertex discretization allows spurious inertial modes because of excessively many degrees of freedom used to represent the horizontal velocities. They need to be filtered by the horizontal viscosity. Triangular A and B grids work on arbitrary triangular meshes in contrast to C-grids which require orthogonal meshes. + +Notation +======== + +For convenience of model description we introduce the following notation. +Quantities defined at cell centroids will be denoted with the lower index :math:`c`, and the quantities at vertices will be denoted with the lower index :math:`v`. The vertical index :math:`k` will appear as the first index, but it will be suppressed if this does not lead to ambiguities. The agreement is that the layer index increases downwards. The indices may appear in pairs or in triples. Thus the pair :math:`kc` means the vertical layer (or level for some quantities) :math:`k` and cell :math:`c`, and the triple :math:`kcv` means that the quantity relates to layer (level) :math:`k`, cell :math:`c` and vertex :math:`v` of this cell. We use the notation :math:`C(v)` for the set of cells that contain vertex :math:`v`, :math:`V(c)` for the set of vertices of cell :math:`c`, :math:`E(v)` for the set of edges emanating from vertex :math:`v` and so on. Each edge :math:`e` is characterized by its vertices :math:`V(e)`, the neighboring cells :math:`C(e)`, the length vector :math:`{\bf l}_e` directed from the first vertex in :math:`V(e)` to the second one and two cross-edge vectors :math:`{\bf d}_{ec}` directed from the edge center to the cell center of the left and right cells respectively (see :numref:`labelgeometry`). The cells in the set :math:`C(e)` are ordered so that the first one is on the left of the vector :math:`{\bf l}_e`. The boundary edges have only one (left) cell in the set :math:`C(e)`. The total number of vertices, cells and edges will be denoted as :math:`V, C, E` respectively. + + +Earth sphericity +================ + +We use spherical coordinate system with the north pole displaced to Greenland (commonly 75°N, 50°W). A local Carthesian reference frame is used on each cell with cellwise-constant metric coefficients (cosine of latitude). Gradients of scalar quantities and cell areas are computed with respect to local coordinates. The vectors :math:`{\bf d}_{ec}` are stored in local physical measure of respective cells :math:`c` for they always enter in combination with velocity (defined on cells) to give normal transports. Vectors :math:`{\bf l}_e` are stored in radian measure. Whenever their physical length is required, it is computed based on the mean of cosines on :math:`C(e)`. We will skip other details of spherical geometry (metric terms in momentum advection etc.) and ignore the difference in the representation of :math:`{\bf l}_e` (radian measure) and :math:`{\bf d}_{ec}` (physical measure) for brevity below. The :math:`x` and :math:`y` directions should be understood as local zonal and meridional directions. + +In contrast to regular-mesh models there is no cyclic boundary in FESOM meshes which commonly cover the entire ocean. + + +Bottom representation +===================== + +The bottom topography is commonly specified at scalar points because the elevation is defined there. However, for discretizations operating with full velocity vectors, this would imply that velocity points are also at topographic boundaries. In this case the only safe option is to use the no-slip boundary conditions, similar to the traditional B-grids. To avoid this constraint, we use the cellwise representation of bottom topography (same as in FESOM1.4). In this case velocity points never touch bottom and both no-slip and free slip boundary conditions are possible. Boundary conditions are implemented through ghost cells which are obtained from the boundary elements by reflection with respect to the boundary face (edge in 2D). + +The drawback of the elementwise bottom representation is that the total thickness is undefined at scalar points if the bottom is stepwise (geopotential vertical coordinate). The motion of level surfaces of the ALE vertical coordinate at each scalar location is then limited to the layers that do not contact the bottom topography (above the blue line in :numref:`vertical`). This is related to the implementation of partial cells which is much simpler if the thickness of the bottom layer stays fixed. +The layer thickness :math:`h_{kv}` is dynamically updated at scalar points (vertices) in the layers that are affected by the ALE algorithm and interpolated to the cells + +.. math:: + h_{kc}=(1/3)\sum_{v\in V(c)}h_{kv}. + + +The cell thicknesses :math:`h_{kc}` enter the discretized equations as the products with horizontal velocities. + +Because of cell-wise bottom representation, triangular prisms pointing into land (two lateral faces touch the land) may occur at certain levels on *z*-coordinate meshes even though such prisms were absent along the coast. Such prisms lead to instabilities in practice and have to be excluded. The opposite situation with land prisms pointing into the ocean is much less dangerous, yet it is better to avoid it too. We adjust the number of layers under each surface triangle at the stage of mesh design to exclude such potentially dangerous situations. This issue is absent in FESOM1.4 because of the difference in the placement of horizontal velocities and the necessity to use no-slip boundary conditions. Since the number of cells is nearly twice as large as the number of vertices, the cell-wise bottom representation may contain more detail than can be resolved by the field of vertical velocity. This may trigger extra noise in layers adjacent to the bottom. + +Partial cells +============= + +Partial cells on *z*-coordinate meshes are naturally taken into account in the ALE formulation (see below) because it always deals with variable layer thicknesses (heights of prisms). If :math:`K_{c}` is the number of layers under cell :math:`c`, we define + +.. math:: + K_{v}^+=\max_{c\in C(v)}K_{c},\quad K_{v}^-=\min_{c\in C(v)}K_{c}. + + +If the layer thickness are varied in the ALE procedure, this is limited to :math:`K_{v}^--1` layers. With this agreement, the thickness of the lowest layer on cells is kept as initially prescribed. In this case the implementation of partial cells reduces to taking the thicknesses of the lowest layers on cells as dictated by the bottom topography unless they are too thick (the real depth is deeper than the deepest standard level by more than half thickness of the last standard layer), in which case we bound them. The heights of scalar control prisms in the layers below :math:`K_{v}^-` are formally undefined, but their volumes are strictly defined, and thicknesses can be considered as the volume-mean ones if needed. Scalar and vector quantities defined at mid-layers are kept at their standard locations. This avoids creating spurious pressure gradients. The partial cells then work through the modified transports crossing the faces of control volumes. Since the horizontal velocities are located at cells, the transports entering scalar control volumes are uniquely defined. For vector control volumes the areas of vertical faces may be different on two prisms meeting through the face. Taking the minimum area to compute fluxes is the safest option in this case. + + diff --git a/docs/getting_started/getting_started.rst b/docs/getting_started/getting_started.rst new file mode 100644 index 000000000..c544a1b7c --- /dev/null +++ b/docs/getting_started/getting_started.rst @@ -0,0 +1,423 @@ +.. _chap_getting_started: + +Getting Started with FESOM2 +*************************** + +This chapter describes several ways of getting started with FESOM2. First we show a minimum set of comands that will lead to a working setup on systems where FESOM2 is used activelly. We also have instructions for Docker/Singularity and Ubuntu. + +TL;DR version for supported HPC systems +======================================= + +Supported systems are: generic ``ubuntu``, ``ollie`` at AWI, ``mistral`` at DKRZ, ``JURECA`` at JSC, ``HLRN``, ``Hazel Hen``, ``Marinostrum 4`` at BSC. During configuration the system will be recognised and apropriate environment variables and compiler options should be used. +:: + + git clone https://github.com/FESOM/fesom2.git + cd fesom2 + bash -l configure.sh + +Create file fesom.clock in the output directory with the following content (if you plan to run with COREII foring): + +:: + + 0 1 1948 + 0 1 1948 + +after that one has to adjust the run script for the target sustem and run it: +:: + + cd work + sbatch job_ollie + +Detailed steps of compiling and runing the code +=============================================== + +The following section assumes you are located on one of the supported HPC systems. To install FESOM2 on your local machine we recoment to use `Docker based installation`_ and read about `Necessary Ubuntu packages`_ if you decide not to use Docker. + +First thing is to checkout FESOM2 code from the repository. The code is developed in open repository on GitHub_. + +.. _GitHub: https://github.com/FESOM/fesom2/ + +Build model executable with Cmake +--------------------------------- + +Clone the GitHub repository with a git command: + +:: + + git clone https://github.com/FESOM/fesom2.git + + +The repository contains model code and two additional libraries: `Metis` (domain partitioner) and `Parms` (solver), necessary to run FESOM2. To build FESOM2 executable one have to compile Parms library and the code of the model (`src` folder). In order to build executable that is used for model domain partitioning (distribution of the model mesh between CPUs) one have to compile `Metis` library and also some code located in the src directory (see :ref:`partitioning`). Building of the model executable and the partitioner is usually done automatically with the use of CMake. If you going to build the code not on one of the supported platforms (ollie, DKRZ, HLRN, and HAZELHEN, general Ubuntu), you might need to do some (usually small) modifications described in `Adding new platform for compilation`_ section. + +Change to the `fesom2` folder and execute: + +:: + + bash -l ./configure.sh + +In the best case scenario, your platform will be recognized and the Parms library and model executable will be built and copied to the bin directory. If something went wrong have a look at Troubleshooting_ section. + +If you would like to select platform manually (which is nessesary in the case of Ubuntu, for eample), type: + +:: + + bash -l ./configure.sh ubuntu + + +Data and mesh files +------------------- + +The FESOM2 repository contains only very small example meshes and data (in the ``test`` directory, see the note below). However, if you want to run realistic simulations, you ether have to have them on your system, or download an archive with sample data. THere is a chance that your system already have some of the necesseary files, you can check it in the ``setups/paths.yml`` file. If not, the easiest way to start is to download example set from `DKRZ cloud`_ (12 Gb) by executing: + +:: + + curl https://swift.dkrz.de/v1/dkrz_035d8f6ff058403bb42f8302e6badfbc/FESOM2.0_tutorial/FESOM2_one_year_input.tar > FESOM2_one_year_input.tar + +and untar: + +:: + + tar -xvf FESOM2_one_year_input.tar + +You will have a folder named ``FESOM2_one_year_input`` that contains all the data you need to do initial run of the model. The `mesh` directory contains two meshes: ``pi`` and ``core2``. The ``pi`` mesh is very small global FESOM2 mesh, that can run relativelly fast even on a laptop. The ``CORE`` mesh is our 1 degree equivalent mesh and is used in many tuning and testing studies. Mesh folders already include several prepared partitionings (``dist_`` folders), so you don't have to worry about partitioning during your first steps with FESOM. + +The ``input`` folder contains files with initial conditions (``phc3.0``) and atmospheric forcing (``JRA55``) for one year (1958). + +.. note:: You can find more standard FESOM2 meshes in https://gitlab.awi.de/fesom . Download instructions are available in each mesh repository. + + +.. _DKRZ cloud: https://swiftbrowser.dkrz.de/download/FESOM2.0_tutorial/FESOM2_one_year_input.tar + +.. note:: The FESOM2 distribution contains minimal set of data to run the model in the ``test`` directory, namelly ``pi`` and ``soufflet`` (channel) meshes, WOA13 initial conditions and CORE2 forcing data for one day. Those are mainly used for testing, and require a bit more involved modification of namelists. For more details see instructions on `Docker based installation`_. + + +Preparing the run +------------------ + +You have to do several basic things in order to prepare the run. First, create a directory where results will be stored. Usually, it is created in the model root directory: + +:: + + mkdir results + +you might make a link to some other directory located on the part of the system where you have a lot of storage. In the results directory, you have to create ``fesom.clock`` file (NOTE, if you change ``runid`` in ``namelist.config`` to something like ``runid=mygreatrun``, the file will be named ``mygreatrun.clock``). Inside the file you have to put two identical lines: + +:: + + 0 1 1958 + 0 1 1958 + +This is initial date of the model run, or the time of the `cold start` of your model. More detailed explanation of the clock file will be given in the `The clock file`_ section. + +The next step is to make some changes in the model configuration. All runtime options can be set in the namelists that are located in the config directory: + +:: + + cd ../config/ + +There are several configuration files, but we are only interested in the ``namelist.config`` for now. The options that you might want to change for your first FESOM2 run are: + +- ``run_length`` length of the model run in run_length_unit (see below). +- ``run_length_unit`` units of the run_length. Can be ``y`` (year), ``m`` (month), ``d`` (days), ``s`` (model steps). +- ``MeshPath`` - path to the mesh you would like to use (e.g. ``/youdir/FESOM2_one_year_input/mesh/pi/``, slash at the end is important!) +- ``ClimateDataPath`` - path to the folder with the file with model temperature and salinity initial conditions (e.g. ``/youdir/FESOM2_one_year_input/input/phc3.0/``). The name of the file with initial conditions is defined in `namelist.oce`, but during first runs you probably don't want to change it. + +More detailed explination of options in the ``namelist.config`` is in the section :ref:`chap_general_configuration`. + +Running the model +----------------- + +Change to the ``work`` directory. You should find several batch scripts that are used to submit model jobs to different HPC machines. The scripts also link ``fesom.x`` executable to the ``work`` directory and copy namelists with configurations from config folder. + +.. note:: + Model executable, namelists and job script have to be located in the same directory (usually ``work``). + +If you are working on AWI's ``ollie`` supercomputer, you have to use ``job_ollie``, in other case use the job script for your specific platform, or try to modify one of the existing ones. + +.. note:: + One thing you might need to adjust in the job files is the number of cores, you would like to run the model on. For example, for SLURM it will be adjusting ``#SBATCH --ntasks=288`` value, and for simple ``mpirun`` command, that we have for ``job_ubuntu`` it will be argument for the ``-n`` option. It is necessary, that your mesh has the corresponding partitioning (``dist_xx`` folder, where ``xx`` is the number of cores). + +On ``ollie`` the submission of your job is done by executing the following command: + +:: + + sbatch job_ollie + +The job is then submitted. In order to check the status of your job on ollie you can execute: + +:: + + squeue -u yourusername + +Results of the model run should appear in the ``results`` directory that you have specified in the ``namelist.config``. After the run is finished the ``fesom.clock`` file (or if you change your runid, ``runid.clock``) will be updated with information about the time of your run's end, that allows running the next time portion of the model experiment by just resubmitting the job with ``sbatch job_ollie``. + +Other things you need to know earlier on +======================================== + +The clock file +-------------- + +The clock file is located in your output directory (specified in ``ResultPath`` option of ``namelist.config``) and controls the time. At the start of a new experiment that we want to initialize from climatology (a so-called cold start), the ``fesom.clock`` file would usually look like this: + +:: + + 0 1 1958 + 0 1 1958 + +In this example, ``1958`` is the first available year of the atmospheric ``JRA55`` forcing. The two identical lines tell the model that this is the start of the experiment and that there is no restart file to be read. Also make sure that the ``yearnew`` option of the ``namelist.config`` is set to the year you would like the cold start to begin (1958 in this case). + +Let's assume that we run the model with a timestep of 30 minutes (= 1800 seconds) for a full year (1948). After the run is successfully finished, the clock file will then automatically be updated and look like this: + +:: + + 84600.0 365 1958 + 0.0 1 1958 + +where the first row is the second of the day of the last time step of the model, and the second row gives the time when the simulation is to be continued. The first row indicates that the model ran for 365 days (in 1958) and 84600 seconds, which is ``1 day - 1`` FESOM timestep in seconds. In the next run, FESOM2 will look for restart files for the year 1958 and continue the simulation at the 1st of January in 1959. + + +Tricking FESOM2 into accepting existing restart files +----------------------------------------------------- +The simple time management of FESOM2 allows to easily trick FESOM2 to accept existing restart files. Let's assume that you have performed a full ``JRA55`` cycle until the year 2019 and you want to perform a second cycle, restarting from the last year of the first cycle. This can be done by (copying and) renaming the last year into: + +:: + + mv fesom.2019.ice.nc fesom.1957.ice.nc + mv fesom.2019.oce.nc fesom.1957.oce.nc + +by changing the clock file into: + +:: + + 84600.0 365 1957 + 0.0 1 1958 + +In case the second cycle starts again at the very first year (e.g. 1958 in ``JRA55``) of the forcing, namelist.config needs to be modified, otherwise the model will always perform a cold start in 1958 instead of restarting from the 1957 restart files: + +:: + + &clockinit + timenew=0.0 + daynew=1 + yearnew=1957 + + + +.. _partitioning: + +Build partitioner executable +---------------------------- + +First meshes you will use probably will come with several predefined partitionings (``dist_XXXX`` folders). However at some point you might need to create partitioning yourself. To do so you have to first compile the partitioner. First you change to the ``mesh_part`` directory: + +:: + + cd mesh_part + +if you work on the one of the supported systems, you shoule be able to execute: + +:: + + bash -l ./configure.sh + +or, in case of the Ubuntu, or other customly defined system: + +:: + + bash -l ./configure.sh ubuntu + +The ``cmake`` should build the partitioner for you. If your system is not supported yet, have a look on how to add custom system in `Adding new platform for compilation`_. The executable ``fesom_ini.x`` should now be available in ``bin`` directory. Now you can proceed with `Running mesh partitioner`_. + + +Running mesh partitioner +------------------------ + +You have to do this step only if your mesh does not have partitioning for the desired number of cores yet. You can understand if the partitioning exists by the presence of the ``dist_XXXX`` folder(s) in your mesh folder, where XXX is the number of CPUs. If the folder contains files with partitioning, you can just skip this step. + +Partitioning is going to split your mesh into pieces that correspond to the number of cores you going to request. Now FESOM2 scales until 300 vertices per core, further increase in the amount of cores will probably have relatively small effect. + +In order to tell the partitioner how many cores you need the partitioning for, one has to edit ``&machine`` section in the ``namelist.config`` file (see also :ref:`chap_general_configuration`). There are two options: ``n_levels`` and ``n_part``. FESOM mesh can be partitioned with use of several hierarchy levels and ``n_levels`` define the number of levels while ``n_part`` the number of partitions on each hierarchy level. The simplest case is to use one level and ``n_part`` just equal to the number of cores and we recoment to use it at the beggining: + +:: + + n_levels=1 + n_part= 288 + +This will prepear your mesh to run on 288 computational cores. + +In order to run the partitioner change to the ``work`` directory. You should find several batch scripts that are used to submit partitioner jobs to HPC machines (have ``_ini_`` in their names). The scripts also links ``fesom_ini.x`` executable to the ``work`` directory and copy namelists with configurations from ``config`` folder (for partitioner we actually need only ``namelist.config``, but scripts copy everything). + +.. note:: + For the partitioner to run, the ``fesom_ini.x`` executable, configuration namelists (in particular ``namelist.config``) and job script have to be located in the same directory (usually ``work``). + +If you are working on AWI's ``ollie`` supercomputer, you have to use ``job_ini_ollie``, in other case use the job script for your specific HPC platform, or try to modify one of the existing ones. For relativelly small meshes (up to 1M nodes) and small partitions it is usually fine just to run the partitioner on a login node (it is serial anyway), like this: + +:: + + ./fesom_ini.x + +.. note:: + Make sure that you have the same enviroment that was used during compilation of ``fesom_ini.x``. Usually the easiest way to do this is to first (example for ``ollie`` platform):: + + source ../env/ollie/shell + + + This file (``shell``) is used to setup the environment during the compilation of both ``fesom_ini.x`` and ``fesom.x``. + +If you trying to partition large mesh, then on ``ollie`` for example the submission of your partitioning job is done by executing the following command: + +:: + + sbatch job_ini_ollie + + +Model spinup / Cold start at higher resolutions +----------------------------------------------- + +Cold start of the model at high mesh resolutions with standard values for timestep and viscosity will lead to instabilities that cause the model to crash. If no restart files are available and a spinup has to be performed, the following changes should be made for the first month long simulation and then adjusted gradually over the next 6-8 months: + +- First thing to try, that usually helps, is to set in the ``namelist.oce``:: + + w_split=.true. + +- Try to reduce the timestep in ``namelist.config``, for example to: + + :: + + step_per_day=720 + + or even lower (e.g. value 1440 will lead to 1 minute timestep). + +.. note:: + Make sure that for the high resolution runs (with mesh resolution over considerable portions of the domain finer than 25-10 km) you don't use the combination of default "Easy Backscatter" vescosity (``visc_option=5``) and ``easy_bs_return= 1.5``. This is true not only for the spinup, but for the whole duration of the run. The "Easy Backscatter" option works very good on low resolution meshes, but for high resolution meshes (eddy resolving) it makes more harm than good. If you would like to use ``visc_option=5`` for high resolution runs, put ``easy_bs_return= 1.0``. + + +- In ``namelist.oce`` make sure that ``visc_option`` is set to 7 or 5 (see also the note above about option 5) and increase ``gamma1`` to something like: + + :: + + gamma1=0.8 + + +or even higher. After running for about a month try to reduce it. If you change the values of run lengh and restart output frequency (which you probably want to do during the spinup, to run for short periods), don't forget to change them back in the ``namelist.config``: + +:: + + run_length= 1 + run_length_unit='m' + ... + restart_length=1 + restart_length_unit='m' + +Increase the timestep gradually. Very highly resolved meshes may require an inital timestep of one-two minutes or even less. + +Adding new platform for compilation +----------------------------------- + +In order to add a new platform for compilation, you simply have to specify the computational environment. In a simplest case this requires: + +- To edit the ``env.sh`` file. +- To add a folder with the name of the platform to the ``env`` folder and put the ``shell`` file with enrionment setup. + +In the ``env.sh`` file you have to add one more ``elif`` statement in to the ``if`` control stucture, where the platform (let's call it ``mynewhost``) is selected:: + + elif [[ $LOGINHOST = mynewhost ]]; then + STRATEGY="mynewhost" + +As you can see in the ``env.sh`` file some host systems are authomatically identified by using regular expressions, but the simpliest way is just to explicitly provide the name of the host system. + +The next step is to create additional folder in the ``env`` folder:: + + mkdir ./env/mynewhost + +and add a file name with the name ``shell`` to it. This file will be sourced before the compilation, so you can setup the environment (bash syntax) in it. Please have a look at the ``shell`` file in other folders for examples. Now you should be able to do:: + + bash -l ./configure.sh mynewhost + +to do the compilation. + +If you are lucky this will be everything you need. However in more complicated cases one had to adjust CMake files (``CMakeLists.txt`` located in folders), so the knowlege of CMake is required. + +Change compiler options +----------------------- + +Compiler options for FESOM2 code can be changed in the ``./src/CMakeLists.txt`` file. Currently the defenition of compiler options for Intel compiler looks like:: + + if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Intel ) + target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fast-transcendentals -xHost -ip -init=zero) + +At present only Intel and GNU compilers are supported, but the user can realtivelly easy add options by following the same pattern. + + +Troubleshooting +=============== + +Error ``can not determine environment for host:`` +------------------------------------------------- + +If you on Ubuntu system, add ``ubuntu`` as input parameter for ``configure.sh``: + +:: + + ./configure.sh ubuntu + +Otherwise you have to add another system - have a look at `Adding new platform for compilation`_ section. + +Model blows up +-------------- + +There could by many reasons for this, but the first thing to try is to reduce time step or/and increase model viscosity for short period of time. Have a look at `Model spinup / Cold start at higher resolutions`_ for instructions. + + +Docker based installation +========================= + +The best way to run the model locally is to use Docker container. You obviously have to have Docker installed for your system. The Docker image we are going to use have all necessary libraries installed plus have the ``mkrun`` python script (`Docker file`_), that helps to create FESOM2 configurations. As a result of the steps below, you will run ``pi`` mesh for one day using data files that comes with the model. + +.. _Docker file: https://github.com/FESOM/FESOM2_Docker/tree/master/fesom2_test + +- Get the image:: + + docker pull koldunovn/fesom2_test:fesom2.1 + +- Go to the folder with your version of fesom2 folder (NOT inside fesom2 folder, one up, the one you run ``git clone https://github.com/FESOM/fesom2.git`` in). +- Run:: + + docker run -it -v "$(pwd)"/fesom2:/fesom/fesom2 koldunovn/fesom2_test:fesom2.1 /bin/bash + +- This should get you inside the container. You now can edit the files in your fesom2 folder (on host system), but run compule and run the model inside the container. +- When inside the container, to compile do: + + :: + + cd fesom2 + bash -l configure.sh ubuntu + +- To prepare the run (this will do the test with pi mesh):: + + mkrun pi test_pi -m docker + +- To run the model: + + :: + + cd work_pi/ + ./job_docker_new + +As a next step you can modify the setup in ``work_pi`` to try different parameters. You can also follow the steps described in `Detailed steps of compiling and runing the code`_. To make your life a bit easier place ``FESOM2_one_year_input`` in the ``fesom2`` folder, so that the data are available inside the container. You also can generate setup that would use ``JRA55`` forcing, and adjust it - this will save you some time on editing ``namelist.forcing``, since original setup in ``work_pi`` folder use old ``CORE2`` forcing. + + :: + + mkrun pi_jra55 test_pi -m docker -f JRA55 + +Necessary Ubuntu packages +========================= + +Here is the list of packages you need to install on ``Ubuntu`` to compile and run FESOM2. Should work (with adjustments for package managers and names) for other linux distributions. + + :: + + apt-get -y install make gfortran gcc g++ libblas-dev libopenmpi-dev + apt-get -y install cmake vim git libnetcdf-dev libnetcdff-dev libpmi2-pmix + + diff --git a/docs/icepack_in_fesom.rst b/docs/icepack_in_fesom.rst new file mode 100644 index 000000000..3c48191f4 --- /dev/null +++ b/docs/icepack_in_fesom.rst @@ -0,0 +1,161 @@ +.. _icepack_in_fesom: + +Icepack sea ice configuration +***************************** + +This section describes the implementation of the Icepack sie ice column physics package in the FESOM2 model. The scope of the following paragraphs is to provide a practical guide to users interested in detailed simulations of the sea ice system with FESOM2, and not to describe the scientific features of Icepack. A detailed description of the sea ice parameterizations here implemented can be found on the website of the `CICE Consortium `_, which maintains and continuously develops this package. + +.. attention:: + The Icepack implementation in FESOM2 is still in a testing phase and we cannot guarantee a bugfree code nor good scientific results. + +.. note:: + To get more information regardng the implementation of Icepack in FESOM2, to report bugs, or to get advice regarding the model setup do not hesitate to open an issue on the FESOM2 GitHub repository or to contact Lorenzo Zampieri at lorenzo(dot)zampieri(at)awi(dot)de. + + You are invited to update and develop further this documentation by pushing your changes to the `FESOM2 Documentation repository `_ on GitHub. + +General information +=================== + +Icepack–the column physics package of the sea-ice model CICE–is a collection of physical parameterizations that account for thermodynamic and mechanic sub-grid processes not explicitly resolved by the hosting sea-ice model, in our case FESOM2. The modular implementation of Icepack allows the users to vary substantially the complexity of the sea-ice model, with the possibility of choosing between several schemes and a broad set of active and passive tracers that describe the sea-ice state. Icepack v1.2.1 has been implemented in FESOM2 and can be used as an alternative to the standard FESIM thermodynamic module. As the standard FESIM implementation, the Icepack column-physics subroutines run every ocean time step. All the Icepack variables are defined directly on the nodes of the FESOM2 mesh, ensuring an optimal consistency between the ocean and the sea-ice components of the model. The inclusion of Icepack in FESOM2 required a revision of the calling sequence within the sea-ice model, which now follows that of the CICE model as illustrated in :numref:`call_seq`. + +.. _call_seq: +.. figure:: img/call_seq.png + + Schematic describing the calling sequences of the Standard FESOM2 and FESOM2-Icepack implementations. + +Icepack is licensed for use through the CICE Consortium. Therefore, we encourage the FESOM2 userbase interested in the Icepack features to be aware of the `License `_ when working with this model configuration. We report here a disclaimer from the `Icepack website `_. + +.. important:: + Icepack releases are “functional releases” in the sense that the code runs, does not crash, passes various tests, and requires further work to establish its scientific validity. In general, users are not encouraged to use any of the CICE Consortium’s model configurations to obtain “scientific” results. The test configurations are useful for model development, but sea ice models must be evaluated from a physical standpoint in a coupled system because simplified configurations do not necessarily represent what is actually happening in the fully coupled system that includes interactive ocean and atmosphere components. + +How to cite +""""""""""" + +The current Icepack version implemented in FESOM2 is Icepack 1.2.1. To acknowledge the development work behind the implementation of Icepack in FESOM2 please cite `Zampieri et al. (2021) `_, part of which used to compile this documentation, and `Hunke et al. (2020) `_, in addition to the usual FESOM2 papers. + +Implementation +============== + +The implementation of Icepack in FESOM2 is fully modular, meaning that the users are free to vary the configuration via namelist parameters. When Icepack is used, ``namelist.icepack`` controls all settings related to the sea ice subgrid parameterizations, thus overriding the content of ``namelist.ice``. The dynamics (EVP) and advection schemes are still controlled by the standard ``namelist.ice``. Below we describe some of the most important namelist parameters, while we recommend consulting the `official Icepack documentation `_ for a more comprehensive description. + +Namelist section &env_nml +""""""""""""""""""""""""" + +- **nicecat** Defines the number of sea ice thickness categories. +- **nfsdcat** Defines the number of categories of the floe size distribution. This parameter should be set to 1 as the floe size distribution has never been tested in FESOM2. +- **nicelyr** and **nsnwlyr** Defines the number of vertical layers in sea ice and snow. + +.. attention:: + Increasing substantially the number of thickness classes and vertical layers can lead to numerical instabilities (very thin vertical layers), memory issues, very large output files, and finally to a substantial slow down of the model because of the high number of tracers that need to be advected. + +Namelist section &grid_nml +"""""""""""""""""""""""""" + +- **kcatbound** Specifies which criteria is followed to discretize the Ice Thickness Distribution (ITD). Setting **kcatbound** equal to 0, 1, or 3 gives lower thickness boundaries for any number of thickness categories. Setting **kcatbound=2** corresponds to the World Meteorological Organization ITD classification, and it is compatible only with **nicecat=5,6,7**. + +Namelist section &tracer_nml +"""""""""""""""""""""""""""" + +Logical parameters to specify parameterizations and passive tracers. Only **tr_pond_cesm** has been tested extensively. + +Namelist section &nml_list_icepack +"""""""""""""""""""""""""""""""""" + +It regulates the type, frequency, and precision of the output for Icepack variables. Most of the Icepack variables can be defined as average over the grid cell (e.g. **aice**: average sea ice area fraction – 2D variable), or separately for each thickness class (e.g. **aicen**: sea ice area fraction in each thickness class – 3D variable), with the ITD information saved as a vertical dimension in the netCDF file. At the moment, variables defined over multiple vertical layers are output in separated files. For example, in a model configuration with **n** sea ice vertical layers, activating the **qice** output stream will lead to **n** files where ``qice_i.fesom.yyyy.nc`` contains the sea ice enthalpy of the **i**-*th* vertical layer averaged over the ITD. Similarly, activating the **qicen** output stream will lead to **n** files where ``qicen_i.fesom.yyyy.nc`` contains the sea ice enthalpy of the **i**-*th* sea ice vertical layer for each thickness class. + +Compatibility with FESOM2 configurations +"""""""""""""""""""""""""""""""""""""""" + +In `Zampieri et al. (2020) `_ the model was run with linear free surfaces (**which_ALE=’linfs’**), and other ALE coordinates have not been tested. In principle, Icepack should be independent of the scheme used to solve the sea ice dynamics. However, at the moment only the standard EVP is supported, while the mEVP and aEVP still show some strange behaviors. We are working on solving this issue as well as on testing further setups, and we will update this document as soon as progress is made. + +Compilation +=========== + +Compiling FESOM2 with Icepack is very easy if you are already used to the FESOM2 workflow. After cloning fesom2 from the GitHub repository, download the Icepack single column package: +:: + + cd src/icepack_drivers/ + bash -l download_icepack.sh +The next step is to activate the Icepack flag in ``CMakeLists.txt`` by setting **USE_ICEPACK** from **OFF** to **ON**. At this point, you can proceed with the usual compilation via +:: + + bash -l configure.sh +The compilation of this FESOM2 version with the ESM Tools is not yet supported. + +Running the model +================= + +Running FESOM2 with Icepack is not different from the standard case. Make sure to add the ``namelist.icepack`` file to your ``work`` directory. Two diagnostic files are generated in addition to the standard ``fesom2.0.out``. ``icepack.diagnostics`` contains information about the Icepack configuration such as the value of some parameters, the tracers employed, and the boundaries of the ITD. ``icepack.errors`` possibly contains diagnostic information about errors in Icepack that can occur during the model run. Information about the running time are given in ``fesom2.0.out`` with the usual division in **dynamics**, **thermodynamics**, and **advection**. + +The model output is saved in the result folder together with the standard ocean output. Note that outputting sea ice information using the standard FESIM variables (**a_ice**, **m_ice**, **m_snow**, etc.) is still possible also when using Icepack. These variables are consistent with the Icepack sea ice description (**a_ice** = **aice**, **m_ice** = **vice**, **m_snow** = **vsno**). An additional restart file is generated for Icepack, ``fesom.yyyy.icepack.restart.nc``, and it is written with the same frequency as ``fesom.yyyy.oce.restart.nc`` and ``fesom.yyyy.ice.restart.nc``. + +.. attention:: + Restarting the model after changing the number of ice thickness classes, the vertical discretization of ice and/or snow, and the number of passive tracers is currently not possible. Also, changing the thermodynamic and melt pond schemes during the run is not recommended. In these cases consider a cold start and repeat your spinup run. + +Code structure +============== + +Icepack is a single column model and therefore its subroutines act on one grid cell. The Icepack code is downloaded from a separate repository (see instructions on how to compile the model) and is located in ``src/icepack_drivers/Icepack/columnphysics/``. To integrate this code in a host General Circulation Model (GCM), in our case FESOM2, additional instructions are needed to define an interface between the two systems and to drive the Icepack subroutines. This interface is contained in the ``src/icepack_drivers/icedrv_*.F90`` files, which are part of the FESOM2 repository, and will be briefly described in the following section. + +Icepack drivers +""""""""""""""" + +- ``icedrv_main.F90`` This file contains the main module of the Icepack drivers. All the variables are declared here, together with the interface of the subroutines contained in various submodules. If new variables or subroutines need to be added to the code, this is a good place to start. Try to maintain all the variables private to increase the modularity of the code, and use the transfer interface to exchange variables with FESOM2. + +- ``icedrv_set.F90`` This file contains few subroutines that initialize the model parameters by reading the Icepack namelists or alternatively by extracting default values from the Icepack package. Furthermore, ``icepack.diagnostics`` is written here, and the sea ice state is initialized in case of a cold start of the model. + +- ``icedrv_allocate.F90`` This file contains subroutines that allocate the Icepack variables declared in ``icedrv_main.F90``. + +- ``icedrv_init.F90`` This file contains subroutines that initialize the Icepack variables declared in ``icedrv_main.F90`` and allocated in ``icedrv_allocate.F90``. + +- ``icedrv_step.F90`` This file contains few subroutines that describe the calling sequence of the sea ice model when Icepack is used in FESOM2. + +- ``icedrv_advection.F90`` This file contains few subroutines that advect the Icepack tracers. If new parameterization or options are explored, you should check if the relative tracers are advected properly. + +- ``icedrv_transfer.F90`` This file contains subroutines that describe the procedure to pass information between FESOM2 and Icepack. + +- ``icedrv_io.F90`` This file contains subroutines that describe the I/O streams for the Icepack variables, including restart procedures. If new parameterization or options are explored, you should check if the relative tracers are restarted properly. + +- ``icedrv_kinds.F90`` This file declares some standard types for variable declarations. + +- ``icedrv_system.F90`` This file contains subroutines that handle model errors inside Icepack, possibly stopping the model run, and that output warning messages when appropriate. + +- ``icedrv_constants.F90`` This file defines some constants that are used in the Icepack drivers. + +Communication between Icepack and FESOM2 +"""""""""""""""""""""""""""""""""""""""" + +The Icepack environment is separated from the rest of FESOM2 and consists of a single big module with multiple submodules. Almost all the variables are private and are not visible by the FESOM2 code. The variables exchange between Icepack and FESOM2 takes place through the passing subroutines ``fesom_to_icepack`` and ``icepack_to_fesom``. + +Frequently asked questions +========================== + +Should I use Icepack for my simulations? +"""""""""""""""""""""""""""""""""""""""" + +It depends on your scientific questions. Icepack might be a good option if you are interested in sea ice processes in polar regions. In principle, the employment of Icepack should not negatively affect the ocean state but could make FESOM2 slower. + +Is FESOM2 slower when run with Icepack? +""""""""""""""""""""""""""""""""""""""" + +Yes, the model integration is slower for two reasons: 1. The sea ice subgrid parameterizations are more complex compared to the standard FESIM. 2. Much more sea-ice tracers need to be advected. Overall, the sea ice component of FESOM2 becomes approximately four times slower with Icepack. Including additional output related to a more complex sea ice description can also contribute to deteriorating the model performances. + +Which EVP scheme should I use with Icepack? +"""""""""""""""""""""""""""""""""""""""""" + +In principle, Icepack should be independent of the scheme used to solve the sea ice dynamics. However, at the moment only the standard EVP is supported, while the mEVP and aEVP still exhibit some strange behaviors. We are working on solving this issue and we will update this document as soon as progress is made. + +Can Icepack be configured as the standard FESIM? +"""""""""""""""""""""""""""""""""""""""""""""""" + +Yes, in principle it is possible to run Icepack with a single thickness class and with the 0-layer thermodynamics. However, the results obtained during the testing phase with this configuration were not very convincing and they seemed not compatible with the standard FESOM2 results. More investigations are needed to understand the cause of this behavior, which is likely related to a different implementation of the thermodynamic processes in the model. + +Can Icepack be used in coupled configurations? +"""""""""""""""""""""""""""""""""""""""""""""" + +No, at the moment FESOM2 with Icepack has not been coupled with atmospheric models. A coupling with OpenIFS is planned and might be released in the upcoming months. + +Can Icepack be used with data assimilation? +""""""""""""""""""""""""""""""""""""""""""" + +No, at the moment FESOM2 with Icepack has not been equipped with data assimilation capabilities. diff --git a/docs/img/call_seq.png b/docs/img/call_seq.png new file mode 100644 index 0000000000000000000000000000000000000000..8cb28e0c3c361cd92f0f5bc5e06970430a9f4572 GIT binary patch literal 309998 zcmeFacUTnJ6E55+3Q9JBND>pGhac&oDj zcqB5bD>LgNb4yf-%tq!^h}+FW@8>nz_Sz!M&F0HW7FwH~b%BoAZxRg z_AFK=MrVURVci!#dP)-y2%`(m9vOLym0z{Hu)0esd}_x=%(F+RelKlm@dz6=&|vSw zzRspjU3(8e%s8Ad!~$*|F=tH>eharc*Ar5WP0J*UV7N4t0w&VsZDQ|dR@!eLI^Ryb z;(wTRka1E@28)j5k&6%6F`c+$S7TZ~Hhxc4G*C-?)%xK3M9D|@Vqe9eF`hdNA-gJZ zu=tPl9KIgU=!s>|6*8QRMb~m+UV7dEs3P#=eK{;UOH!7oG4T8>o{xR4W2b}L{t?E$$2+%ALhai zEavmCsXnZwL)4J_(BWEY_4bV|^K0w#!}m8{L^WP5#08p zX`sWI_Sq!~O7l%ge%Dg+p`>dg&oW(@r9k-cXb#=o#5(d=NE0WI;US^lH^S@*bPBr10)GTePZj z@|Z=PClGVu^`aCza2bB6qM6eD+9I~=Ib4q?;>36-Zd?t=O=IxbQazpsBiCfQax_7c zN0Fe%{hsEvzGJJfomgthBXm_t7ROYNa8@f?#ra?QzPq60bLPbb_U9Yo^tmULyiIPTzR-A4({N|(&_~}vQR)j(4Ju2OwQjS`Iuzh`sRQ>mHqH}fcabx98_rA8o;uldY~YwTu0Ept(+zv+izmo$ zQM@H9Ba?mBUprNMsg|I&y!P^=YdKMUBIjS=ixXaSd%*DQ?$vXrgL_`u21`B9e8DST zZZvI_Xq0KBWn^gdsKCMKn^Ebfz%(^kr_3I)p7Sa%TpDw91Rc&gkULP6ym0m$yfGhd zAZv1E_th$*${PafRm5pfW9NXpDK%bbrFysh?5P^|o>OU$OjO8qoV3a^~g)i=D+ zSCg0$=3}qREIb(QG~h5+CT0wf7gmu-ON@Fe@OH(hgENwY`>b<7|JjhUHfMV|)Qw+q zW^gFIKFR6F(acF?px!zA@w~B;aZW+8a$f$`0z%_2J=2|u#-#@JMiYk2okIyOH?}+p z@6;>P$$4kx3G1h|(MQ?x-Y$yGo8sSe-n_m^?&!@V(|bEC{!NBTYN=R}XpxGvSi2gt zl6k6n!Vqiu^`jrM^(@Ym`0M)b_^bF!*Z6zqcpdX%q?YHK{hUp-S+nwV`N-y_O{W#} zX*V{_vmR&1&ZM&>bJ-jCP<*BcWQ}}O+_~tl)obzPt80xz&0Vhr8rODQ&Ri~EuGePy z-IKX8K{?MJ7w6R4XMCq0Wjg6~=lPw&I}weKgQaBO8^Tf=@>`ZYrtT(E2iLXe%Cktb zm$v%1oNpP^Q)y9d*=#Xw32vE)Sm1OsxRJA1*pPoE&!BL!kfVUol&2>5!;k3G{Y?WB z1_d9cBfD{oDP~U1c=oLHFq!Y5aZm7?TU9K)9X&C{)}GM$EhDY`T6@9A6)%cb#un2;m8Sy~b~twKTesKv zsI7xKsqq9}3sl%UI!Fi@3-FaQlz%Mmar&~_vl6(ZKH@UujW{{WJ}{BoE~X(pd}w&5 z!flgh^V|B|4=2_py;qu_3T5VY$9Glt75c8!xf|_D zw&^B4W<4{S6PkvLaR?$4ja5loSzOW+)DKR4Jdt|x^{Mwv%QE2(it>v06zz(|)|VF( zac|y|tM&C6pwuP|l5L#|T=tD5ci{Cs8$w*bG<_%EC7tpS!_56Nz&tgd2nB1dT=(QB8w`vi!y3f{vO@hbnv^KtO z91Ttj7JBFLE?v@)g3ofcB`53wfvji>f#JLSvHUqhfzg}w8PvV3L)y;Y*HuoMT$YTo z(0co|cJNvKWkabEDcv=Y$Bc@~CwEO(zo4gB2LR^>Z%%hdn9f!1X0=dPU z9djGUQi4@35x#tMN2S7icY52*@RfBb{)>ChIZsVi2W>W6s%%LAsQ1M34Dl?NeVIfV zo+|K=f8__qmZ4vg0Q2!{o4UM$QC#H?Uj6c-zI!xw!-w7OqIq{e3{ zaD&2_i`@L`mu15|gQY0OuPW22>1jP$n(dF?&7T6blQv3e?qeb_Gk@ie-&uH(Vtf(?-%bi(*wt%}_+Gd4A67Ov$ zR@Vni?bhwo2V2Xx#?`-;S8Q2P6H~w9|KzB++b}89yC{Ttme>G`6i z&l&t$~pX*vQ5ju=0Js)3NC7OZU%cfy9IMIPu-60IFvY< z*ws@^z1c0gJ49YK@I7oxqgCLsK+w=c`NG!ykH#IPOYv*Erb>Ejr@q2wfg`lKcL?iAKz;Z{ z91?I1a!y;LPzwG@*A zeXVRC@Z4-PdcWL-U9%#^R4)r{^c6CGfrdJKCY05-?6ZrSUD;U+s#9>DfPO!^{kyu(arJ;H!|WQ2ZDV_Y3Y7y5T1Gywex z%=gIszghaLK$mn6nP(Objx>aCegV17uC0^J6)86~68Lhpu z;0^1Shlw|NEE9vc&TuW>!=W*YRzep8&oYBrD!h*EHt*N^pgsM;UQR{OC-PYvx7zR@ zEGXSxq@#0LiqeU#pq({%8R1%0O!F8{X}YT=X+^d~X|@-tsx^S_?y&#x^*haHraUT3 zvy6GMU?r4G+dqIq72^!Np0Z^>C}$BgAtc+~&&-x&C6{h(@jR7nSvJ1BdqF_#LZ2Vc z{5J+Vl_UWOiqy+FrYi5z#|PL#Sw9|={%at5%B53FgIM4|uLsFB90;vi7MgIWx%Q%5 zKNz-Errpq|oYYXW8j%h7@%lnv9lsU-3h_k$honfyc66~Y{Vz}h5NG?pkh1U8-*8du zq6NL-Q`=TgmlsL?;~|+%uf?<_sU;G!$aRy$euxpp|{e?Jb!JeTv zm0=Oy#r~`t>v}`w3lK8AKvGP8YV7LqaG3!`8}>G5Lik3lBerlb|X?6X%(G6%@x$ z_pHCt#dkWPHQ+XLYJg2^EWvE-E+Z+rvP=n3Q!X+!p8?VK9aS~T0s9Y+Q5jwDf41(` z@uB;l!;BNAmdL zx3>CW4L_EgLY-Den$g`F=89o=XiMdTI9;u(!yG7RG5uPln4PjyQUN90ma4^T>1<`B z^eVc)0=y+qg*2tXDT3C8omsf`__oD)ukNQj72{UJ^E0mrOMG?lOI_ZVa&i}R&2^WS zJVl>jYyfnF32ug*A=nds420?_k83c2$FUMtx8_3hyM>7`ILNFvXhar2UH`53iPv0L zf50xQ;sJ1>*}ne3QHjhW_eB=PNPHH^;O*~;DjG6nMG|rD_-ipndf&7QDR^2HI}2zU z%oA5EJ9sbRzp;Py*h!PyFYI%K%gdWC+TTSW3DK%p0Uc}rb~xW>8h5*ncI(HC_iRLr z+c{A2YgXPYSa0mp#RR;IZE{w!i%D5)vmv>1S2qp1Hy2|>(oM3i=EtqAA+@Op$|C(L z_6^%)`cLd~NVrfAufm5Ay#T$g^Ip5oV82-Dd2FeKGJRPsZjX4#oJK#V zLc#?urhLl=`*@!nc(?kuqOr+Y#F10vsocm^gL1%+7fz!zATJgxNM)$phx<=~=e%PR z)8O5@uU>0UU1^KWLhr13U&RK!4X~H>OAbV~;=`$`!Mp2DZSx!9i9LRe-c$1yhP#oS zV3o!oit_Jf(eD7@UomMM2x^SR^n-{E9r$>S> z451yn)>%;Jf7;I;wDX+w`vwjeEbv&ujkqs#cDP+mPJZ44O{A>fWf(djSkO7B)OVw90FC6xA+*uH)QqE_SIgkt@mQ|Zm-R!w%t{p9p-J@(gNJ2las+% z5L?A#3U{p82nE3eQPo8-2g8o2Ep5%S1vBas&Jz#yEGf4Q_0V%!8#pDFuX{|FOEjZ< z58#JMkF3NEN=;e$;>8u34YU4KE^&;ExwQhe{^Tv#!mN0E)<^9j3Pk^&#X z(oFrPm3Y~^I_DwE(O}}ScIDbS=|Og8*O6clzC7?IVIDhNjoT$6#jee}2@{q!AsI1@ z@gkq&o3zI@*sr~REsl;YToFUS!<$!IronmyfCGcH{MzlamG?zFBK@DRd%XOLfR-@} z{ZNh-wnK^#8erdhZRN8FiemAmb@f_v_U-_~+FHhY!scb6cfcF3M}k?wy9@P|AGUO* zvm80l)nmzYf)Xgg1vC^AbWMP2h!&Zj1VX=wIq4B4JQbR1X|`r7b)*nud>h<^E)Cer z#gIvmy^23aD^Y#DCvedmv~{M%y%aL~%VJYnhn%Ob;-gFHsLX&ad$wamRX2(SmYV93 zrW1SY&GOujYeqWWe0XvrqP^eSaWmRY&)sQ!89JSDOzG{XQf)}3fNyCABn4QSR8D#u zFZROBAe;?QVepNgO5+)lcuEhm1AdmNu zWU^@F=7wM7pio$eK4H!(?M+Co7r+qs0OAzmNPt^`K6SK5o%t)$p}T2=NdDXX!ctGJSvCNU(u+A-?S z$@q3V&S`vwoYd|^#olJb>T(t#bLnf1_Lj&YZ=_EC4;$&F*%#z|vwnzl1r(_mHHCx8>6{=VKInjcxb7zlylNe9}memwm}33cp30 z>%Hi5;SD_!Kz4dA?K8(}OX_Fabpho2t*FOjS2@%#JF<}3>p{@Ud_?z+-C^RDz2p7T zU({{nqKFn^ULFnC6cLh9T;jxYWnf=Aw1eLgTv#f)oXrW27FlH+NHiBg<}5ukMlp4_ zjj7q<2n06-1%`|%oCMZa9%mUJwWkx@UtcbAsmsh5N+Zl#rQPWgA$mpQ;TO`oO`@6d znkK<{VTgUCWmZQsx_q>tE2}J~^*o?g&@f=y&alS|84Xwrzz}*J@!%A-co;{%YuG%O zZq4zPcFx_-OznLbF1wP~Vq>(+!)`^GmFY6Cyps36Zr}t=)}gMYsf}8kmAn@WFvuvA zUoey-n;${m%8n#u)fgqnNgg_}8akonli{oqlQzThKw)#RfM}&MnR;a9`PYjs*M+@b z0K7WNgj((Cffi8-q)r0gcDnkt@3%t6x@?6ZQ*=a;=IE<0zRIJKnSU)I6}zVwZhnm~ zYvaO@4(Hze2^CwP4Z_dvdZw_pUR{sd#?-&w1;DtohJ%S57?MOm+B`+3WR$Z&o*)=M z=KSG`G5Mv5{?M}*?lP`AK*qQ6xRku)Q$_$mga(s%4W#&*E>X7v6D2+6 zbzx)jE7L)G%FP=0D&cDkilOvj{9SMLl=0y1{`Ef*2_1znF1WNGfTnXe(P~R|Qjt$} z(qZ-p^Jx5>x3o7abSh3)nJ1e3*dXN`_JtW1i@>};=fP1MqeKB3Hi|t7rSKL^Aq#lr zV{k*Pd>>Mos6%>rZ8^&N32+Db@Qc^dNnodoMS#FZ;qaxuiyRb1;q4geAr^m-VccIB zoS{x!@Gv9mGiBLra%H_%PLR!z1ye>^$9rggfA5eeu_M>?{)Z`T^l93j+! z0W9Tp3&is3d{zTsAuB2V&>^S|;+8TP>S|3*#c z#|Xc&{awnTs)RW(rTdW;w5%UkkJAIbD=f#H8h|pLCT%`&f*(eZ6BH7OkGZ&f+Z;_A z07zr-J@`fCTYkw>kNuG~PIJWgI-m+~!G?9VT6+t(4=zUD^9)LehDgF_Ou3(uu$xDaxG~QHQx0#ISG8%2u(|e+XZJi)0w{x6&hf|<~>QK7LvP{?Er-5sel<32p zX~Tehda~%CSrKvsBI;u_hDp5>wx<%W_V18(PTc;qnRvN!MWGS(~U1Srn!!P0yU)d&`ABCQRW=8pKA(l^V!#BY;L)AG>j| z^Fo4snCZO<(JfUvyvXrlg6Zz-_S-HhwmH&LX^ecn5y$jMIx0zJel$go9}xhDD~}k* zWntA0z%<8B409~gZZ`p=k8nw`8EK^pV>i zcgmew9GF$XCnDtg#zBidr^P}HMYyvOvete3GXpKY7*@7!civVAnGmQsLfvqrpuh#^ zeR_EF{OgEDcDX)=L-qrM!L3w>SVBwmf222QaD0aB`C!~a*-rmE!f?%V?Y8}f&NSC; z73d3&?-@nnRV0aku22EGLaOR&U5cox#Rr#T??i^o(A8&ok3oG(O{DssV4BC`{uq*H zT*2e8BhHZW)tLf^W_s>^xF`!{Woj`adyh3A;Jsvtp7#JjO!bF0vgpk0I1gs}-* z^rileKQ3mlU7@f$`jGM6I$Vd8+<2ZC7R|`!tN&nEs*@d}syilC_1Xv7(_~fSKtlq8 z57^R~TqkvcQ&)F#8wLm}Ce96}`f1|98o?r`&7xnEXUl*Bx_ua_g(3amAoDsX7P&Hj z^O2IvCMjD0MiivI6C^Btz~$@)9WavN-c7?7{K|6C5zutRu<^@11d_zp^T?`n@wLs- zPm;X-1yYaTD;#?wbT*S0kHX!1B1}L8=+s!4dLqiLuDCrlID-yCIHrO}DhCXqVsYd~ zI)9ooM+go=#P<0S!!5%8Q<{P^lGp8UE!>-L`<^5~mxghJ+aeSsfof4?MFueCwha(W zMkryo=tnh<6JlK{YHNrR+3*vofrygCKwcJa1+3cBr5^uurS+=wqXvRhW~$yV5EtL? z-1SbFWH|x%2CITWf85=)G8$3`PIO9#^J0?v9;x7Iq=JZ;P4Zhi7aiU_#_^kURtE_- zZ+iQP^(kgH?rz=SS|vy%X)<82OmTH|iN1Uiw;B?`?YiSj-hPHxV{!CISgwIZM=n!1 z{x$qQ!&jB`)_CaCZqdi!JH7qvU_EeZ0{L_Y7-!v4IsE?H@{R;2AOZTJ;NyPa@pGO# z4#xNPNYV9!abj4>AdHYjwH0!s1OOJ^><*j-4oB=}j1gkNghDE*65aODcc9?87+|}v z`7>00avnGh*xs3CaK{x}lY~t2X-WMQM;RIVo&eU> zREHK*o4s!P6_4MrAYfKS7Zl!yf%tvrBEWh1j{cliJ^P@hS-kT#c0VBW8#u9&kV=w7 z=XUgo0U8)4*5Ub=mt`Xf3mh6OnoJXEzJQ^$p~`|i{w8vX-i^%Q1VpJJ3`+6l>Vtbj z$cHkTS7mp5Y`r_;kWzO?bLrzYHX@lI3Ncw=*t7W3&u(6%;+ITl2_yZkNa*(_J_ajO zOYaa1apihbi3o6+rJi{7q`)%F4zg5EM__os!3*;GZpV3}VCQ5}=M2wh0Yk}v0vkJ4 z=(l)h=LuUf{ zun}BN$ZGmp{v!Btsj*znmC0;#i^umcDzF%DfqKU>v!2eTO2s0BDK;Rcr?- z$Z1>{`Dy^#(e~Gd6nrm7+pg4fspYKj&he5fhu*s$vQpR}#MiOo60$M5!+x>%Cef3( z0W_9g7yM8p;HQg$MR0k+>^aGs%vk(wE!YT!`7HD24OEBjcN`IEyilBYy!}Esy7~Z~ zV>4u18mO5e0Ug_%kq`H9mk&yE&I%#66#lJ4uq4f_PjbuJ(jJh_rU!!Ho|6}Sd)`8s4Tc5@wiQv17uYJ!zEy+ZPDnABor_7YWHBAF_0L1fw%Euh zHowORQ0wR&0<5gOEz)mj$JCmo-}$m3l1pe05Wqh=*6?*L;WJ^iSKGp?gaq9 zrUe_22Uq{{_vgZ$29)o4`YP~2H>y{Lav{RuEsnL|!!VU*!kfo6150Zad|skUjJbB|8;c zi2W3A1~miI%pdosS0uVJAB=vl}kbA)*I;Lt3pKlSY?szj(7D_FTu{Ba98*3U~sn@?WoAZAjJkLCGrs%`-fT@`yyXP zG+Kb={R4YICXzIx5wF+is@x#tX(8ach`~^>Ys)^pZjD2qXDYE{1=&H;V@yAd@y8zbuLg8;_*DO{c}Ou7rki=rwJ>j zAwjf<0U{v0en{C2M*Vm%nBwr|=dO_o&np`BF0UleoCMB<)n6HOi9F){=+SB4nQsu) z>}xTo2B>#~zW7(H7c2f$GspK;#wyj;v9=>(DIMZosPhA_7Gp7R5x_h}(oX_-fq5H9 z*G6Lh%A#LsvD&8m_*1Db=$s9!H|LHX*7!5hv_Xar)C`BrhPfZZY!;J2uEDyZ1=_Y6Xp!W81MX5@ zNp#b@S1F8;AART437L#K1_y?9BRK-`&=)r^)#1HyqT}G?##cv9>3gkmKo`3!eF7LC z;Z@iJ+*K#0(^&RSL%sM5hB!seDO@9{VRN#)AU#NMp!V+JX>m#fvU&djm&CxK0RKB= zO#VzUXP!nJOJef_w>e+6XVr>sG!cZ|q0wlAlmr&9Fob|b3KAg1C9?LM@`7tZ^ZE(Z z@DY%Cz!VzCP4vwW-=)WkP{?CBIJ0)YQpj_-I|juTiJXkrg!*qtvCg~)xFf}m-gj5R zJ~crYS3&(^eadR~tJ=J>D&$Y)axSd$K?=1x|$!a0w8i27> z7M!a(z`8phHg()Jn9RHOH0l===o$jy?HF*cC@5M@tE6e;z{5hwMUx31(IdGZ3CiKB z(v!-@-bJQ4e(jDv5pkh$U&v}OQVT=f*50QAI;9kAG!jcq@n=L-?R*IEN!%i(9d0|% z0%2VBTx|#A3+jrSN3_fWdC=8l0kxZb7Uo07yRKK;qy$;*u_5HN96QKvHa_( z4!My(R=Ae`oma#CoS!zNj+8XPprgis3g5Oy59_0GoS$KoRP1Uky`2q2gCd<9sj4k} z2dVuFinJ5H9TRP<8}-8DM;bjHT`zbF2BTZR$_v1{;FRjUPG-HDuT8Ru+5o8|JV77= z=k0K}4PWV9d6@@J0Tz)EW33O_UFIIZU?sx5BCyts8X3;Lx$5((U6)XVlBpQZsbBSs zBI>y1^*mf;73*qSMRhaE*mtD$K0kGHs+9+i?`z2VjSK_L!K+q#z_LNsInA<=gs~vz znV*_b47n&szZRr)^Tc|Wd%-QB+Y^NDM4WHpY#8BqiaXbZ$G`!Q=Ti zT91kgk-d6LqxbeTx?Fe&CR=)fn#ZIERqR@YS$-ny&H7Y#Y{nedZ-%4gO_|6nYG8k7cgaY;SB6 z75hgt8nv8<<&}tVPg(gbwCL9yag~z4%V1V}va0J7B(*3uVHhyk0GaSu!3^vnDc$?n zJY&5l>V?JYLhs|0$q!m2Llb1&-9NeEZB=nBT`$5Gc=S3d@@9F z0U+gF+ZsSfAPbJeEbKgBVg(aIlWREx1wJ|`Fj@@74lEB`p5o%&>7PS#$MH&V-3*p3 z2zH`=)zkCDS9K6iEz#uwfVUF{W`o~d?U@%^_MzJ)lZ+KJFa(#Amek;DO6n61%C%V) zjECh4i}VuF%M4*E3{eG>UG^E29#*-FpPg8_y4_BFpAc3F?r{J#mJ7tYBNDV~DXxl2 zRe7;ogbEpugb3(kQjanVWIjcipH=F2HwjPoyoY?SZP|jCA;)WqQ#r2#{NUc2+egUK z5`a5k2v2wwvaAA;(F8abh>a4J-J>ROK0}HjSTBYm3I?u}XK|19sB)hA3J{7lhQIu9 z6eeQ-PQ5*v?A;Xz+LC%0z@|(=)*a$`v-qmEU+;J2idpu19`;XLpJbvoXS0Q7;9@ui zZs(Q+iFR>MuGuPpQ-R>(S;tv?D(dy%QGAw2+S_Zyv~-#=8+7*|L=LCL0LWqim~^#R zZ1E9=LoBMMpWXNQN5ZI=IirJ)F z9i32|qD+Ie_e{;%$RB43_Cfn1%d1_XJAGs#1!a{&ae9$gW5w5)bkz>Uh*l3^OU?ZJToKzOj@t)K>}s-p4Cw!t)Q!jpDWKi@{7 z8CRzgdQ-f4l9@nLqHB=0CC68F6|$KLG~{Af&Bh>OTQs16T{}UbBONcsMy*A^Nj=I^ z#x9S^w8!RqF};&!lW3I;J)Vt0 zf43PDkk~|ziI~?FH)?QnRqHF`&4kCBPbk)1&{Lk0H2!L1wOm7)1|)JV`z_@p4*O1# z*`J~~qRaCJeE_%-2F!mhOK2mg&h%mz%#fT+f1(>OR31OEmvOAeSno$dXX&HmZMczS zhK;ReRES{J+5`1>Zs>vmZcM3W-IMxpDfks(xldK+Di)6|`_MMoeeep(U1h%4<(_WR zLD!I*)ZU^a4`$Vnqfzg(R4mMxRc2@y`i^!{Y%)Ur6a|>aq`k4gTQQxa;s$5GD09&u z3hS>=KX@?*bhm@n9*k|K;kZslktY2?0#B5iJAq;4#QRT=gjAe2Xl4(Kb&9v#phIUX zVR{TyjJP8(Vp|HXyKs1#UEK4gNly08<7ltU4c=?W^$Hh~8jgx+tZg~p#(vALYURVG z`gd%Rt(xV6-KyiJ2-bRMYsmIkHm2oI*}I>+q&VQ)S1}=KqerTmGIyR{jvUCv+q+*@ zb4~xQjGKtB-6z^a4Nc#XC|Mo~{3=?ufo^a9cCxEw3>bYL${P_a(q`zebY=V;Nj(@h11&In>c<&?rTcY8>NlBTCcGlHeG3j zQ~@*V3p+akeCWmpB7fE@MM&bc%$}itYANpI~5;=|%x0z6dXh?N+Po*{KrsmzXN>-g&m~?Ls%oz3Io4w)2r9|h? z9eZ3N>jF9miwS!}!^3%y2hea)EHp@54M&h3nJge2W9yoM{$s3}`D0 zW~+r-INV$dLT}tP)RSkaznaSG#~s9uM`Y=p9eBQA_vO*`)5E;c26|g8-ylmf;ML0T z)|QhkWZOYJyU4a*f5oR+&{(3L)HvNTvNKtG4!CFuc*pBV-qAiRhalAARFL0hhE#yy zETgQT{kY2YtLH0Xgi+MQLI5ilfjXO71u^w>4YGl|F-{oLUTX<)`7P-1h*A3b4 z2jrJ9tBMZh-`9x+P$K=0u9|unDE&YB>J=V{1r9&}z2pBP5O(Y5DW8KXp97xk2ASO^ zelHAeU+ODCV+^y?+lSg<{s7W?AoKuJh*Mx{IB+RUs9d zAfgT;3ZrA7Hy%V3IwJpZeGpLx5%nLgf!cf!QK)$Q=hZ<(9YoZBI>-JG4kBtl8oxP5 zkEo_hs;=wAfH3hZDex5;tmhwyRmp?XgvCz+UZ-%UD&4-!a6Hw-V)%{3=8w?r)PK6e zncv1^d>24d=`-YE-9LAso-o|yLv8raE10g~pT9bvFJV|6`eV-7=w5ZC+bQi~7s*7H{i&^P(&mh1?cw z55XCvtKI4TimSfyJKT#Gfynw`3k34}$H1e|;RGT7KhF}ABo4yrFI@iD{}00I|HZI6 zNC5xK@BjY%AgdmfDu3uI2c^nE{r6|Ca!^zstW^HYQx2AYzgb_SKzLB6{{OX3-R+M$ zjQkn|Y~hZ*&Q{!;WRR~5;BV$dvE_eX!1L1oG|3zY{cjjiJ~_bjz@<`8 zL2EyVsDp??a|z6s2N8uC7f9)vF#3?W}97Gf*R4}w1MAShp*(?LW*F~QjK z+lW$1nz)4VT>#(!UyNu%8-I&0YWM$ZFny)Q_u+;1L>s`%w0x-S;>7mJESSYuneg&? zk0+L5M->pYAx?LOs;pQ(2zQ7(3Vca_e{qHqm*`^+{>B$4rEV%>O})F;sB{1CE4{*7 zf4c8_AQpu20E7b&{)#3|y`o8!xY`bo`!On1X1-E<5v3jVLR#_dpB5P`c`B%J?0PL^ zdoXKOMc~|TcEI86W}PT+lC68n{g|n&FfWjSe5297s=+20QSbWqBgk2JKjXf=S(a2e zOKT6I{-U!~Sm5u-WmFg^ymVoP{F(W`QmVh~M_}~MB{8veCpW?p^OgMH0S5qNc7FTj z?SCf? zangU@I0ApzQY>X`t#Vc+0Q$`z`|o9Wzu$VA_$gzPxU7Q5hl|d>fqcow{`)lV@7EI0 z7Qa-vr#oz9astpaqZ!@@{QjGMo+sw)=kha>S2uQ~t%(7c*v9m4%u%bn-yHr|Ln0*t zrBECQeV~rgOZ(J-_dRF?=Z4`2Yxdo@gEC0Vxjz^r9%T^zUxNq@{B*Q^vQbZq^HJ_I zK^hr=xzDKt1fq6Cx$5XUvCqPqH&ezyr-uHN*S>pTObq)2CNV@p>XKN}z7Eh+T$EfJ zu6mV+_cwL{e>CtN%2c+0P6aCV-QYQ4J*4lx26#m1{**)2U-aI5W$d18Bi+Jp?PZa;-;uzdfXtXz z5wNP4`w;>874&_h0YIeTel#WL`BFAN>l(G1tf2CqL*4 z^!>L9byoO#!_%EA?_LHC{WgXX94IP&N2+)>>bJLO;-lOmGp8b8_FeABW53^o7P_ri zvR%?Z-uvC*ScH--1}eIr%1d3M$!GiBO=u>n87Ok>Xi6D-Di8Uhko{7)TT$>n3MSkO z`#*=ATcgAW%QKfWF@`r1f{&CSJ@+-hx=`qE-zU)1a8wVV20cLWdWdsvF0|{umI;*F zO%lw?*1t{VunH87^zzILHeRQUjT`^g{*l`Iqi)~u&{KE|O09P}-<&gK(6&|XM@4s` zaWzSW^?KCwc(m(AxTrUE%s9trsb(W(lh}ik;4d*6T!xDK)e?hJp#e2_FL7`lYkf6@>t#Z8n4{KYvS@}~#93kjTA zhnBmX6NGR$C#J;Jq9U0icEbfmTD*iFy^aGliMl@MnrK_~Qe&)UGq0~`M3S-l4#3@3 zxidN(e_{Ds+Y5kXbCgF+aqwTmwzU3rVL@qk!gWOIFRQa~?YjWKoi+}|gg!4M zTwOd}((YATPuA^Hc1HG!U4P`YMnxxR=+;qF>o#?vufyt?`9dR>qx~x;MYdK?p%U{v z$_!WDjq9%L|DEIQJNDP?@Rrq##FrFI;SxhdpDn7$Rzj0P3T0=i{DP00H#ouQi3aq) zesLX482=Ifm?k;-MO`TWGKovMO}rgK&71i{ka$f&(SROd@89py{{ngczZKK_b1X2a z4BEVU&q+GhoRhbKc5`Y;p;aKPkT=5e>PEi9%Kh96y!q6D9Weyfa9;Cq;i%u3mIs9% z8J~xqNsYOK@QKdH|ddH@8?<`r))Bmw99RO^r@M?ZWWWV zYf{?+z+=Aja{g1+vC!Hf-*Q1Yyz=(*F0lDhA~10Axs2uRgR2gc+ionzYSJ@hv|_bj zta5N{&g7(Rt9fpuJ`O$%R9b2D{!^I2Jq97@pJ_(nSyv>_Zr`ktOBs{D>uLP(Zj6Xq ztzsADwYgHcA3iyiI-7@RIGWNayvj5i^Eq1$9Iw3d#%F#Viwz*?)@%a)sh1@iAm)I% zqc9tc>D^1&#!mVQlk^tt1eZFSt2XHUD&#u{&XBq#qhS9t^fAJR%zy7s0-k)gT?8x&78l&fd*NY>ERQzMw#D`S;(~{E`YTkfl+g=$tSz^l9}n{Mzo>F76gaHcq>| z$vL0-f&3`oW&1x|ViJY7fGQ+!y!Gxs9^A-ZO>v%2H-FBsRMU@W7o@ewzX1I5cO;)T6#Uam`gxt4J| ziM#v4%I;VFM4=PjuM3|YT#Q;Q+K9x-L|1R3Q${X^qLP>{<|8~ z8~vO6|HVjH0fqKqOJF6bXgh3#n6Dh1tP&qJQbg205F;|ED531k5mUBqi(ht7msz3C z=cJXQ_%>?(rd^1q$H6$daKmpiUz!}yJOr8JnvIK-lNBR+PKd=pS-9}ihm@HYG(Omp zmuTz;F z=Si3g`Q|Guz|jJ2#+S#G(OFiQ2xZ3S6p>y*em1lO9R0YrtzgKuW1DNFYV+g6ul1Os z`0vy4ODyoFtj*QFByS^G2;FU05o#Yl>9jJa2%PJ2QLA*>qyDFk4u1}f_OB>wC2MdE zY>oY)V=;MF#b$ALwp>(3jV&s^;OzY4V2Mj^+eOdhWrCO@E>QLk#19AOWQ|yP$gPPh zOqlTji#Y!~lu$QORGTZhCvm;+f;(Hd7M_jZV(V?os4~~G41e-~bge?udz6LW7-`9DaPye*OexwzLZX11v)~u-rpAcZ4~AlYpa8$_u~M|9U6o~T*?;01m3zj6s|PrG;C z?ApUz78rG$l&^&QxrR6o+_#>Qhi^Q8!k6K&*kF~kmpPZGC7T_(ZQ$Z89QBs^I#N-c zsoZ@lW((4RTs;MZKcUJQc|>9Qgfs4hN%z~s>fig-dkD-UGzhxRL`Q55Ntdc|&GjZt z(;f!Q$CdN0{hBeE&SiAhJtrEb_UmKti^}lMCbE`R#}(?fW@d*yHD}zm%wVW9Rg$jo zl~T_-<}FaORF+OL#8%JUNYS1_^2434F?c(vh-hR($Pa*5{g1H__G-5K#;-67|1f*p zbP3pdq|#%ml&8Y6pb?`b($14TLEz??QT9#v-tZ5Cuu0cyn(>rSc@d}etk0%|z+jiE zvF)$#w?#Z@fp9QodNb3%Q-VJ;X@0HdE-S8jY)#2yAa`g&&wPHJEUF^k%)Xg&G?IBU zG6MiI^)oq3e}$LD4Xca~kUT`#rMvGp)QY1(jjvH^A*X;-GK6=pGQRK7)ZOpB1O1_# zJG5^1;tQ;F1@dddY-&`-r61WL*egfezVx-LQUkHHRvG=L|JD#@%oh5uvVWaf_?!3t znjqN19cHTJZP;S=*yGLMxV|Yko3Bs!y9eHXO{@IMXyt!7w8w=|1^}LoE7JtkuZ4xp zp8s1Qyt}2~HKd~HQ$ewK^G}Pggr5%yMS>< z2(RcNHK0l7LhnMK6JeE+DyYMoq14Oq(}X!_>gq>(=IjM~5erwGaR8elXGq2rdTVD7ajsKt@8~Ph3XX zxT;wh!YfO^F7b3T#-xH@aJm=_&Kq9R(~2Z+uy?(;SPg~ zI7koH>^TF#Jy#NHkG2(jIQlD47^m;DLcLcHMO7&yq_EkBt#9PCGy35lCPbu0yo8cL z5&;0>G;}3j<@^HSZ?yqf*Yti*g2#Cowq(NTkmkqQts_vF0(mcJDa!OoFE$W)OGCm0 zp*o{#YyvLD$G)p{qr3~y@mAo&iATSD;MV}adJTs(B% zBdGMNbTnSJ85UsnmN@}b+G;PT* zp|c=1a8BZ<*2X61{#L2uJT#v4QP>AIir=;F+)$X_?yY0^oVOOyM}V`233esCfP(m#kf?q^J; z|JVTl>{um7j~q8?-gL%=SIw%y%@b(a0l>98>;wgO?^%jp8?yNZK@r&65Xy<^-XCp< zS!Cz}z%iu1bi7Qi|AzQZ;x66Divvg{qt8o5!x!p!swjTUUCk1JWbL*F)?)y7++WcT z#=qp>`p1eo`CqF2Cp3P|N>vesRVc`jtRnlDYQ*N@A9~si0r<*t!g4~b9!hAj{k`Gw zOAr7&M`1wW!+!~hK{VHWo6BkNHpFko2J9dFfiKa&pZyiVp!DogWXuBqhi3R~96A7D z@+WpvN12t=&smWn=?x2DYU^EY199k+Yp*4K!5(3DgeYgq~V0Kwa%8y z0|>c4QR=~drGQmV$;dh-mUBGB8_o+VX%tyEI52MXB_pr^_fJHKM4N7lb;GC#N z)9Cm0@0(}Cd3rZ@zFlJZC(VGN1f|0$=lv85z{!vtiv^SI3+0@NUv&Lqsw4{*6m~sf zKSozLSu8SFwwmv1p8FdDOmxVH%3ZQG9M>N|GgXQqau=@QsMyoB!}1w>v-NHnXL0}= zcsKdKoluhq#oTA}e%32dT4#s%a!UaVb$#xn4NWYde&&CRI;V=_hp=Ia#P4&=rMN&V zINj1}w+#<0KnxNW`Sp@dARL5fLkZQvUX}nlqG6=b*V94Oc60rJj(4(Dn$Dd7tOleI z)a-mb=8pXth$ESyl*htOKWU_w1L=(YjDW#VVR{m8_68iBD+IPrVUHO074SdpbMDsu z*&@`v4F3;fUmXzD^8S4Q1yK+SK~T`k1qCUkq*uA<6(kfvx}{56a=8jzl~5@GfmI}= zyJH0b1qtb95s{LT?sv`-?y~&e@0~x0vu9_XdFF}F^O-rjK2JM?`PdHrEpST@vkrV@ z<_t}rn0<)@gihJCa{$D)X;tLO@k_x+b|U-MibGazc&ibXwe5V)AFuZ?t@d#H3a=f3 z{B({BXM*Cy@m;zBgw&Y(FF|zb9bU#kyKY+ISQorB^~KR0`IU5yCJ&$-_N=322a?Ya+|Z`z$&_n0Y- zEiOb{1hQmaR1dAHk%a>^1Z@B$-%%$b76_qbcgHcXx&hh*CX<(gH(N@e`_0+5U-s}QrKLstb}$DF#|cowlmo6UKu}}`$5@w^{ZS}|Wnty=ExpzlB4o{f zh-#QgJRRq90~LFoF|VO}Y2y`l!j5aopiQwDblv9`0$pi5B}=>-%yk&Dx=E022}`?G zJPrcVrGS&edcB*eHOvb*93(@E3$BxVN0LE56LurFJ#ei!UT&BYggrBly*_xRE4by_ z{g3p$9*}v6$k9vYHKULAL%hjYE!u$^=J=L z3s05k4H)MUzXo*&K*E^l`4uz4KZqf+VQJu{wg01n#mM012`G|pbGm4Fx_3`Nr}fm7 z`nVvJ@qe%v5e9$U0D=wzwAZT~4pZ$;hPu8fxlZ#sPnBRaWkqDf-&s}nzWjoq!~Y@9 zA@JmLaE!f6q_G+|f7*BjOa+i( zh+XO+WGq23O&G_!Rd8j(guwy#xgH#YqFQwgAtcKcBO*wKzfD?2aYdl*vykaer>z+K|EK5^6uPcC4p87kY zglPJ&G51I!u75$Ln*kiyq2qrljk=}q6lDTH2xWILAOFb@-dY6LYgq>lLC z)e+hiFWqt8hZ#OBedS4I9CJQbSB3bBJP~B2`X3Bc0FVF4^V6X#=gGZKS#S16Vby;!MR=tkuzSUN1w78oJ1buVzBT-6bF+X-ux2 ze*pE!>k{xqW(3)GPgBB(;7N23n+j@vnZeL-yE4Y%#r7zPKwXHNVtGL!EUKG(zlUl5 z@0Ff1+4|Ga6{;v_P;95})(;^;gn5Z4jX8bn1OIk&m4$x?BhCY&4noiTS0kKK!Lw^G z7#jAU6fuO1DzFg`!WjK0s*(net1)T;XqDNrra^n6~(TBOL-RW~p6rLbOeFJqUL z8ct#eef2-AWXe0-m28OAq4PHGW_3|(jd=NR=v+C^d>2II)-oJ9Q{U;^TgD)A5W@WW z58ZM`<8}KACIh-(RMok@cFB$^Vz45G)8{8p9;B+p)PipT7yA!=ro6%H(+4Z++_A2V ziLj_Y=3^t>_lLNAvhpcjKp^`}jAo4E!MEV8{72ivBMXrH6is;x%mC7^9Fq~oP($wh z0tZ~w7vIM|;fR0oGTBC(6jJ^AUt(HA7G6r}plrHwYoPZYoP?$SEST@PnnyDKx?V_! z898(1M_#*-Kfn&*yR!jdUPjvE@vC*FuPSxusPY8NNFhPlw74G9rLWf7OSr9`6jc zD;*!?csMDO@pXLqmG{g-A_+8=SQ5yXn(6^)u0AbI;%&woF17=v97TMXoNaf~e05EBih?a&Ml7U^$s zPP>Z|P_7UFRwU0#FwVv!<~lr@l|rNl4fm}9*^3Y1gh145UgLyTnsIw_-l0Z%;^}Rw zp#d6V2s62S$E~P@6p$Ta4E~c`Tpr+CT0xw5Z!gq4T@^G=fiAknh3?XktQ-7D@AwbX zc`mjC>W|NI3s@0JTy%UARQh*kUDt;zM$vuBJWWl>V*2R`B~-^XY&V1y|4ft|RR<(5 z!Z+ARx`CjU`s*Ppj?4(Ilp*PnW5^)&`jEBhBMd8Hr}AF0-~JOV$C1Uo-duiY&A#KO z$t4$hv!7~Bl0yxJMi5F-PpX`|+cFpZg>|S&glwmDbleJ z%_*e06W(M7u#<5bIwJpZ&ZUEq>RHK0iFM()<;Eb$<>SFaO!XbV9!y_93=MyH)wpJV zh~l-iSgp+tM<>rDx|@$jlNJZ!?8Bn&x6Q=Zk|Vh0tIC(ToJ6&r7IDh6J|m6#R?G`E z`2Sa_lRJWNJC$jh8FVTMOwo=0LJ?uKe%I0si9WWhF0_1ljwX~BLJ8O;ns?i4HYVAM3Y-qkOVi3mg&~I_}wId z9C>1SV^4$&4xZj&0Gs~?$pgR1H>B9&u#SoakzBATQ6pp$GThZt2vo=enCJ`Fwg() zHLV2Ko{&QDS}~sh!9a5(i1V?WO>=wRc+^`Zan!$3G*Nf>?g?pFnxry9+A;*TA3S`s zfncMfNDpttYJq8Ch2B4ND>tX8&Mb!;b(}i+nws*LT={at7G)j?@qnP`GUsEY1a^i_ z#ft#(mDs=%lc)>~m{o&#D)L8L)VNQb(*d)Ot-)1f^y(Y}VtSVRSL`5&`jkRJpM?i- z-MEK`DYhNLgg2I#;t`>L?00urnNPN*+_oEzl?C^uwnk>N8uyKQ_aIIpB(yT{#+F&_u5jw)Y}Bni2rd$Q@)ro-`6fWH+ueJ*TW5! znm`M~%HHP7cF>V;;_Kt$(jjRa?n^VNG15lWAQ0Az)Y>?*q#YI|r`irJ=1l=p2)S!{ zQ#t!Y$>q6ry@IjaSacrQ3CF7QRk@aZQf=F^H(b^d*g&_{l4R+FD9_n1iVd4CvY1mX z^IzSa-qO7amaQ!LHA=@P7{%etL>3beFzW2J`k?ESHHpN(cc#`1ivv_vWt@u@$RMr$ zu^)`9gY~7soNul!baC)c@z$q%poM1t6xmL0Su%2EB% z2jz=1W zM?NNVU#aSo4%yLOhIonVic-P$vlGbRrGh205{rN&ns3`@0T`ggpdEp=O-tN-^MsH} zlEu$xomAOK&W|gK1JyC&s7~d#RE>mE)c~o6Z#w>eg$luqx6qU`2sd#XuI?f(tKLLm zv+t>Yt!a%^3G-<8LJ-^KSliMbVi0WQW=_*?Z+Y(#7So>Z>a_?CwEK|08P~+Biin`* z`?p5i3ipGA74<`KXMZmKMdY?OZR<>%94{T9)5ICBS%ZxA_hI;sW?h05bxf4JbH{$S zdO_K6;pQmpRm?*@DWE!TtLi1*dJhk9tt(xeK`Nvn8!IV%kC@IaV_L$DHlt2JYqL<^P>} zrn2zX0h5TuM)13}Z3wa~$JYQ9!O>W!G(y!qqg8lH`P$lV=fD$)>+cBOxtN4E_HgsM z;{^?Dbjq^at+qgdoPbflQyY>CMoiefP^1h!cKAqZzuIkAFv{) z(Ga~hEnk3r%kZaH@DlvM#aVkGV6t&3oFju!ec_G_CO*5NBq98F!ABEVk&^UxPkj%I z$;F#GN*J5_;I}uVUW#c)vkv_x=k}-z7Vw3Z18{3gacnsF_hX}88tY-$BN64>c@HZg z_Qxci$zd-cvvJ;yy~4LAdd||p^O)rfIAUP$rzqzp&>AS42msBh5nd0D0CoVh0e5kC z({LUFzzsG&&OVlXJg(Ds|tvlBL4kXy| z_NwE!De&9?J%k0I@a>39f*?Q$wegMd(2g;py$M!xPlLFtkGGsyF>~(kc7wg@rr}n+ zc~6mJ6%BXL;JMoo?hs})IlO5%lAthE=H=`x7%iz*{}z17xQ+@Pw2>~i+J*5^_FHwfZPe37a@+fV%Ybgi2uX;&f~og zb$?~x9NVZ<$zPIKQ>#C4jIbV8=&R`>^%^&IUwI9a`C$)33G*>Sm)SFvN$T zV&J@`YoTLH8P{NL&31w*H=e3HeG^NA3k^B=SPkx0KI&484(&v)n)Z*t8RJNj-!Q_L z74{-bCSXdM#N9=;-qrvOq8y@%Z@Vr^E+oKj#u_)c!ZG$u<>hP;h^UM7#yW4uR6$bK%oscboxx8~A~o$IBatM`qxp z9B3K@>rQCws=w~wkOq)r{Z7Q5a^4Lg;Xb!n9K;Mz5zTQIxufva&DQ*lFy>}y?+Jy! z0w7z?ZM2CB=EnWK?Q~OzYL0PjqXN*O%U3)K4in;%$pMe^e}L<+uNt{2HX{k@I*4>X zpDq7^^H_GfAD$UBpvIPdRalnIiRQq4%D>+x!9*y^fkEt?k%F0)El_&$SHJ1od!dI|_Hasw`OeaV*`KSgUV9)KsV>l~nzDbA2I%FZ!0 zYE|%Cz2=U%L@9QBsFrRQKcOuwmOW>N`H^u>t+&L}%)G5DD7SEZeK3cgx+}N)gS9$C zl8weJ&$!a$1?i~Jwb#HGXC*d^ZL<&6XQcWy#};l1d9;q~qko;dIR-q>>^nWbWbiW}H^2Z)Qdfs|cik)_T^@nT|A=x#xK2u(%j2o^GJ(TvUDyFzj6!HDZJv|9bp)LCZ(K zd;mXdq!{2}Lp5LZs7t8#c7-B}{C8Yx$492vp@%E5S#k!{Brt|dR(*J}dr;HDUF2?h z^5hB~ZCTu1-rlEDYwt%npN}5YjB0Uqr49nYp9*Ys{7bnH#h6;JEMfp=N7qzuE-TJ7 zsE|8NTK(y4tPWVk&i7m1h#AY8XsX zT#ReK-0~#>v?>JAdR>(xMsO@YT%20|m>o3@9F1~pyfrp=`j^^Vp%*WBuUd!WWc{j?WW;qDbu`F=3uDf#q ze`pITkt+MvP-31ftN*=nnoryW6In*+IkA3TLo0l6b(K5(y$lB5VbaDCDcBD!J)+EbcXP&mrw4Vwqd|_#f zjhxs45iNVqJF+fNEzCS58-8EHNvWFB3!)^q_NsYi!Uc2x30HHU zV&B^!{EUMrk@nz>+ex^`Y@?OP;&Gf$XzI$v!>HI%H8zWDY;BLat~g1jW;d;Mdj-8v zB{44HSk>y4lkF7TP)_UKTp8a*TM=IAMFJr#H;HKkyT2CDH-}CAuWFImSZ}7@a@rkl zL)PN$S8YzQn0~jnan3(WZd-Q*%zI3t<8>;-9(zAos?0SLVVe>^em^t%XfQyq4=ZIk zpem3Y&aSb|E#(O9+pWi8voPuni$aR0$1fuzJ!fr9=32vlyPR1S9=ODgzg3UQ304^Z zE0*`{T=B-aB(2a6{M@%ix%|&lOu$BQ^87Au&_W z@IU|r<}KH>HrO4^BZkKj3;e%74M>}b&`gjzv|f0Q$DcAUs%x4mE~POee=j?G%qGW~ z){aUKi2t-H07B0{ln0Ec?KkX34+mLeoS5YoVE&s*1V80l@n8=2Iwop&o`EjB9Xf+r zsCra8kefQB08TKOyaBd8KEr(kIVdu%;#DdI^{7r9pOW0jEAryHSEQDcGuOS?-wnpz zYD)8jNo3J|_dxBjk+~JVzjAUdravd$kM8?+JiFwMw?X#%^`FB+B4lORm%OF@zdadC z@qGfIgcU$B?Io&_&q?G%hQzKou5q}(f)1p2G6uHM{CXo8_3iQWmEYN18GloCbE*Kh z;qPgx>8W-5uY8)wpIr5M@1~&7wk*OcO*MW|P0D?v@L6leXlol~RIb)S1yf1qXHWpN zfPx)8pS>({1bL@q@ur9PEvQ)K8`F=ujX?E!ny8?etFqMFI)IQtgy{=%)7^zpk>hC` z6PEiY%+^k3&t^^fd(R1cU(ai)EAW>q^B3=^j7cB9pj)5bmNj^=cTsZsF~96r9S@Jk zjKeL#77!Hg33%R|)Z!rzbSkLV(cZbihaJ6ZC{clTObvckZ&cT+I=5QJEMi!*+;n6u zcVyG%BjjU9CaP2-H8A zckQef4h%LNp?!Sr0@?mIC%$L>kl-naG-p)vH<^EA`XNtiDER5xA01{m55)veD+%w! zi>(OYP|JR$*TtDAvz3@IniL=BmHcD18Fg%>%$)h^jB`ez zS7T~7c)yGmuX^rV4?cGgLRZVMn)B0F9fOQIgcd&QcydnP+TC>{Xgu0?3WaqZj=H=w0qv z{LMavmS28%!_;toM-b$6QH$)(e_Ma7^T%X$n2Dvdp#u04MG$WSyw(KYsCPEKPRNm6g}V46w?EK8pZt03@C(GrUv>^Tv-(nnec-bhwf zL#f<7kd&$U^sLP5)Ps~>p5Dc`DYwQ0@O&)U{`WF&UbaeQjqxAjyfq{^8vZp4HuMsJG^X{-L8Fl}L3D7e47_6o zLPI+e+>;gitRS;5py+hHhW@%nobkqm90r>jj+r4xQ^BQFr_TY-!!m$0dDw4wjNgwL zO1l14W39c`1lYhamq9~ORr>M%rS^X}2ZMhe>}Vx0<4E3^6ZBSfg8XN}4dX`1#1mYc zRWog+og+t*o{1#~eRn!2cq4m?PBrNnD_9%~W2w*Jp|D5;twZ2`)#J{Mm4)Kqug!*T z%E>;y{m<5KY^z-`-$QGk0~>+fzHI5Oxy9_LdTVUz8%M#& zQkHJ%U%yU2Q37kC3fv!(={Pf=tpGYht|tw7c0L5gxYbauU7fImxDA18JzIQuQdiEh1!3fra=RcBj+M>YxMe$n4@tDwp41JPpO)CzSi&OE}7} zQB&LBO*%px+}!3kcCpLT7<`g1iVxeRFnk8dwZL=6FmADq5U|(%NJ4?{FL(srmEpN+;_S zvSwOc*wfqdyBP{d!CncTt&D6KXoXQe1=Ey~%#F##^!GwId%7Px39as*O`Gte1l!4I zVxa_N&6Lmeyr?D6BX7#b(0w4c;Wy|(Z@ZQ&KN@4EG#IfH9uvL$6VJ_%m1I-KwHMhWfq~v9Sj}(@d z*dI*#hk2~2Gpy_n;0whUAF!VTenXO(52DA^1iv>rl>cHtT zFuSuK2OU!)h#$3|nLqcy=VqRFC|D}n+RX=R7f;xYvM7bC+z#W>PXEHR)AiYE!uh8? z#<3|#`Fu=a+pQ1Qpa_4o^)MRmrw_lSLtQ1bO7t}xxiHk#Ro5Ksz1POBep%}sxeC0H z0(P16YC2dgo|ey_Huy~(LaAMZ#u&_lgUcg3jcvb}i z!dE!{rSm?V3;(y-9`DiPNj{t&h4Pu}>g^I^L;)dFt6} zyv)a`(+|KpZ#C5!LfEL>8Q54gM@MT_x3p$^kOKA_i4!8E!!KG^5_@z~;fd3N9Dqpb z(b?$U_!`+o%rJUJ^x6Sa;w;QaXx^n$!u=<~r|&u}h%h@{0Smg>CiVcR@!C=&15T6_ zSB8o@%zEYG(6FUKXs7O{Cof7?VH>0p231( zl9Id5lYUgrAxlj}yz|UF2ZdKVwGXw44lPOZ+;DAk`MIs3AuIn}!f<;=K`OlV2N>Q* zaA?&{Xg_Rw5Ut|h{WjEU@5cbt^ZA}??_KNxP>~+cBlz^7iK8W-?KkQ>o)s#*5nHB6 zWLlq=f8^ih)VeP?>(|J9A@lhG@zU&2af!&Kt4i6V zVx3}#cx97sR6Wmb#-<&tgj3Obh$lk;lL3gzz4LMO2nmFS0K!0SSaMJutySI0i2~4b>}VT9z$LJL5qah=J6YL9}<+0Pqdk-dCs`L z3+Asa>YI=*ID%JxcrJody*O3-dBg666^HsYH6IC>iL_hTuagY&cOwBIp4M& zn1IOKqo9!4DJV*#WIbTi3L!}~{D$7{e`5K;o81T))fkLR$L`wpCB@rWRP(iHjs5(m zpO3GSK^JlP9Yfr^&x=p(oz%s1QfFzV8Vh72FL)85qmy25>j7Q1M;ukUg#|#^WCuY> zO%rLQYb~TZkJFyGhTcrcF{NQbp12m z$}TD}`TK4_X$N@WV&=3oq;2-dyemaVIAx8q>&(2K!-w`xMr|sU@-TG!^{|&e21My5 zA4IfgFqb09*j{9wtNvRkqB9K{Bi+tQ%W=^efFWs)~}FrAkob@uh&682DqF? z9z^6FJ7jD7{+md@$6Bs9e5MVf@rg-S%lLY2&$l_VFvJs_A^}x zAIO{^?E4GG_?+LI8L_Yx)hXihNcR=J*mKsc`Cgpb-7Z_fNIkMU`F<0o=%%C6Ol;;h5AhuDPn-WyFzG1&X8pMzx{IhhUMfRZfY3gSe)mn%klrTtR z+&*(ZrF?B;)-Z7)^sn`!xNl(M%1AQq?eS14 zglGw+fm6buCoT5333C+oas}n#qW?60grIg%-#9`45 z50&F6KGX1^#DeyXMmr|kh}HPAKH)Ow{O&a4?-9?2l;xmx{r5XbC1H<@7{-s25m)sb z+TGV%>h^2fVet?D1khB&A)xo+HWP;^r_!n3nbK1OY^0f;vMS@AO(k}R>+Ce=;)poi zkY~3o;>l4j&>7IR@2;&~cra!?C-syXB6H`3R=xNqFH2Z31va;~4 z+sGjfgaXAU>KybzuF#a?%evC;D;A>?;)rt|rV-xHCx$O?kBK@%h>H-6IJijLTq-1t z6hj>{AeiPiH`Wc9Rn&b2DG+RY%-=gcV{F+61Z!PovkbMsKp1npCS zL=*36PWz&ZqZ-0B)+6a(>w<{WeBy zph2}~u8 zrF08=3yz=pc3(3!{#+^uQfCp*U_tvHUF?}Mzr4Eta?T2w)ySLPPmlH`Ut*(G!yH&Z zG}c(Zf#DyvAzSxgfQ0r6C|>V4SrDbbtW9ecIifV5N&sNsbPHyCpanCvwO@T%IW%NN zMuVo;9!1esPj`G1%)b6&PCzMkBuxLd#@o1U8#rnI`BayeS$t?a&a_-Q`6wpDQzgp!(pfBLCIJWQUIDVtgh;*9)LjU19%b%RO)l}_ATTK zb-y7QXVVps5Sc+?!&?SWrPABklxD-#R{QTK)6mS~ZLX#hC)V|47A3>`Xo#0&zh+GJ zsyJRgJb#Y8=qrQy>y)-Tnhyjc%Z1mfM)JQj&>5>hj@K{=V(po&5=zefT#Xi+3kU$L zLFU6flxDVDX(&^vS0oGaaXB|~HBC(-N}2Vp0Hq1TN<;iyuh&)+IHV<;e-W2_Nztnh=Z)xo*x)Il|FA~fH9A+Pfm-QwX;96qnLgb+ zpym89v;~bK))uXgsu34Wi8J30B}kfLLY{G%0M#|HRP9-zPLa8)OIR8Y<5u0aH~6G54j}_7*^4?^t>F__ zf!77HGK2En=|o9YWAoug8@dj%i!UTh)l3F|v35Sy4#k7DI^5D=(DkbFc%EyP*A^s3e5*FNr)A; z;8pf}OBo?Tv7#prlU3TN`6hDRC@j~M)CY{I)Dt~&kmLj9b@k%x{?8#_YX$4tzS5J0Ru|bQRmVS)Ee6mFY*h3d-jop^Oa?Am+#7*1TeJ^m3^WsMX}EZije3Z?V=t6si5; zd%%b7QK7w35^WRCym@2P6}4kxiLPg?CZt%1g3$H}Oxqp+%D2L+o_~}MJL|5w`3&=p zGkQ{0T&IhB!Mo_mN3@(>cd7KHzgCN|0KZ4jtg1r-Rp#Wf>j5HKw9`bJM6an=(S>ap zQ1|x;vpR%fRwnco^0sTrRLzO3-ZS$PtiV6%!1NQ;3=Csc=AU`B-KEj;8w#q=o9E-x z*47?Vr|8)qsjXsgNv2kwi4h|x!|_8h`TON43Ki+~XW z7qeXMI}Dc%I@&9Ms*W1aGAIXoJEN;KkJ6s~Nu(>Nik?)RXbrX*uq!Ng>kpuJEf(nM z99m~_nk^+m#@X45!&@ZS=9NXrY9`g-7}xCI9$#AHpHtqw_pMd?0SSbx1kkF1Yh`un zhytm0gl^l0PKirC$DNw8fMKvh!`G>09Hn#e0*c8sIo&@aWd}iJ84y~ras&=Hsq7^c zMM+nf$DYRD$_v)@9B>;aeR}VE``j!?O)ty`aiA|M6*k*Yxk^zNmiNAo>oaE5uDz~7 za>HqYZbWr^LP*&?su5G%O_wvb&mKo^d}9Dcck%lzV#E!AyeMB^e{A~^OJpzbxL&9V zgK9Gt-DCLT-?mcL`BI~J{ZC$e6TM@mC-+#Xi?|#l#0|xOk8s!alzM&Bik;}}YghWP z#u4OCUUMW{QfRa8PfAddO&(q;Aiuk62MG<%PY03lY)=7ol(5)9JK;vzmF7?H6udau zS*>`CI|EbT=Zm#x~ohCY0p%@wD)#Z z@Q;vTCgG;7r`2mJ-JS?Scn^2Z-mDBoz;?-YRvo?#GBeoKY#p!`0nxEj1x@9|7+O3; z1yMa1(7TD<2!N)Fp3`^G%4zAT?-(z)4`JDjti?;lX~&-W6Gvy15J6}YV7T%xDBX44kgYk(-6 zg@x}!G5~d?fz2fsu#aO1EPKia@S(zOL0nHWcvQ#AjJ*m zQ=O+K-kAZ+0e1Zm#=u2kdfZsd`6{_+NzLpl9_WzOqrB;MiY+mrtUU@%3>gk-0u{;D z-UD|lYi`nZFpU9xrV~`fXP!9)PsyCjQ}}xC)mzgSAzVmO{UlWD>~&5x2mymch>W(VaS*G#W2m3qGV1f@Ua?A#U^N+XPxQ&F3` zs28lNgxA2x^mwY}-}4lHGiB+gq73f2+cp-)i{_E@vh z&^v&$@)g|NScEF0k1Xo)!bi65g0O|YjI~`_->dr}#J|>5!Qu=DSRJH39>2`~SRd`yXT28h3(Ia@)m?t4jIU|KiE)g zJ;_@7rNKI&&6Ealwtgq&XP$sN5#MgfI@vM2r!eW5KYP+~{v_rLqJJ_%gWsqI$ ze59d%jppVx$>96Id~N`c9^rJKC#_oS2^bn9d%K=3QWU9m!il56a4`RLy{B7`DGhWk zm)2oBf4a=Ks3 zKZem*obEhCnI9wot2zrEx=-7@#L|v4Q@UfZ60mk@$2@zHs*Zi{*EJi#&z8&&uJp2^qWzW(%LXFgiS?68z-dXO^YrU`gOjGzFRsb z^boun4=}9uAYfR=Q)j6<%E22(*=Ch~HLTqI8j{+pRd~{oXOO1YpK)toFtK_3a#lHY zZuNK;h@RD@4(ekLY7KS6Cg%hCTx)Hz%V9?my1aW$5L{YcHei-DHRjpzGbFZJG~*Nz zm-E41ml=SRX7Cv(WJxza;^{ee1Zqs$6$Zvk+=!{a*L& z@J^nQ@5H00kwJGVp|CkY(Y2K{AVfn3!GW3zUS)~S&(GZye-^%tY4xDb1R*02)L7VJ z8lXw!}wZoM<;Pf1`E_L5}wY+Jn>Gh9`&|L@|HQWO_HOzRewhSxIsOG&4`shj2V%vapwhOm2p9Lvku%p@ zH%hBD!AQ35gZ@?g*1P=8S>`LxT`>QhwtF{KzClP@CTS+21{{e9gFpT)Y!fp)1pCJ< z;NL?=L**92q5Ifwzb?%7GWqU^a8$^(y6rQxl4qh%y7nGmJMgiAfmgt%ZXD6R#LWM6 zX*B0^YmE}Ud>9`QBy{Xo!QI)2n&bNsOed9}OSqNEp4}D&O^lz3l|AD&ie)#h50&vQ zy5BB>jS%S%a`s~PoAEcz*dH>w} z;_`=yaOR^nBBHBNDJh(ik)zAb3okMPWUE*-N@EHQlhf{!l_^^F0?sd zNwl+io<6sNe$qXrssGR;pJ&t{FLKkF9)o)CFKg>R_YW%h&QotV%)1=T0{KsmQ?LHL z=*mAu_r*ytbqU$8Lptn#9)SL>R%R}ZNAvTR*N1|a5ZQ1<=A79)kI`N27S**zQyuE2 zl;c=REr+=$SWr-Au>L7zNucMg-?h6ZI=0UOw1{*h$N*!DkK!YTCV#mqy=F4YwqT6* zZ8gC-H1GSw*3@*UA%2S9!tupt)oePaUiso}NC-|n^yb*^*gTM8^+P}vvw$tP?Z1Yd zWBQ}QzWndORTMRxgF+r|lD4V3r3svKt|^l*qVzFhpR*oEch0ZZ!TVJ~vX3{uMIL=` zg)ylCd)P*PutWf;nj85~hn}B$0bDM*X14Ca7(Hh_L(h!YnbDB!6W;so-P}?Q1huAf z;O$s8FoC=r_&F)a07>n#oP>I*eIZhlHdbcTbQQd}5d zOQuW~qO*xoVp7%bBI~P>bn)bn{9ixUa(gEIM7<%A*q&%%IyfJuOTBh!5}!wZvBGRr|d^1+dk} z>U5S58T(MT1;xsL{t7r9B~Qfoo*bYe>!^m8a~>a?cJe0#b?d1K{ay_4^%p-s^F&t( zW-o;X{}6jscyHhfY&S-q;L_%PB=%g{)NMbmqa}Bh zAu5*q9ww61@wX(e{F(FzFr>|v7G^#Id)RhE=CrtAu7Lk2u#oa{J@!D~Cyu_5d{C}^ zw|v^R)Y77MUc9gV_5&xyH1}Su&S8rC*;fU!ff(KF(oi1x#>c#HIlBVm14pjOK*-3X zOlt#AnqU8xrM+d@FlpHy2pDCfH2%;(#t%DJF@ubZFpmxFg$cA85pdeGZR@nB0NB5% z;u7uCC3TA$8;ZBQ?AsP1R@}gNBQx`oYI0(q7W3G$cinYwL;Wyom7!NHS7CZbFqm06 z0Ln6|mGcQl2_XHO_d)Sj3mv|9TjrHfRU3hCNp4qW+yU)rDItxZn%-%%T;I{P=jEOq z7N;nx(P$z8%}rl#J{ZH6+H@xod( zb>%aM{mxw%GGoJr!X;siOagVJO+T47$b7%j#G&nK0=|FX!h&;3!r|)p^EeLjX*32e zWQY&!)46^`tp45#c|gM9)4=bcUK!S6pa2jtwiS?8(o!O|eqmu!KlOcFPP3e)#MNN< z$Jru4ZW^v>whPiAv5>Ql@JC9l${`q zUvFI~k;KFS6F3{tt)=PgLzJg*_v&HxA`{r)u?_JIKT1HY_VD38!GDC`p9D7~U~^*V z=84Sj>qH~}IGL-ajwb~4#*+Rx2ecakR`%aDOneZBYVvpTBncTLq9lOWL_tEMI^vP{ zsjX4KFXh@@#;$?KxC9(Dx^njnxMQ&;u(1n1^-V=kQY%no8v1KZb@wp(W0-8iyr&%=iJP51;EqU%|;; z+E#0>9$i|`-1x513$*ZNzT`DQUHnwN4~WCS4F@CcjCQ%ZY<(1Y22Q!-mW&%p&c+y7 z6XFJyN`3&++u`I|Wj`ND2Wq9(oXs=%AJ z`BuR|>e#Hyq6jeq8cxswRJdi`8DCFIdeS2nIDd|s_b(6uClnpI>LTuBTUJ4A zM-{zI<8!Hde{7EpT~EVLh2k^c1ta-ROVW*no)kPWCtUysr%LSE&@H=TGF|TKiVbkT z?oOGSF9}MYAG5=1sW$cZ!JYdu0Nr+PDd)nnLB^!5b}<-I9vD9>!v;&!ZTWRiLrfjQhd3wh2N}2a)aD$$i>ZG2j{$vJ30~zybWOvMs~bo zzLHrsGcqE!G1@U^~vO~0Y#0CkmU*GAns0bl#Yh-C|nF;&Kh))^50Mh#_%xlx+pmgIe* z?WQ+AoCSdebu2&X4=15_`>#3L#vL2w*&Ytjv`f=$)YgYHj>8{HjEB0o^T3JP@%j#> zaAium(bwRK=(S~|ib6FTCzIP%s;A$dv;nR{7o;ci&wX-+e41mCk&r( z^@T_ltIV7|AsezTn;OL}FFzKdHny~*d0umT9Kz6ltN#250tkuJf+;-pADc(NflVc( z7Ma$>$p$SnaukR0xFE(jMy=&ZzgDBOUjlb*2w-KdIR-fn4PZyl0Htz_0;DW4asDYd zfB2SI|1Ek4Zc-TG4|;HT;6kIA>TSW&`|v%{@cHB|$TzI`$-&0g?Zbjgr!W8NDIkVp zB4b^D|CeV1OE+^F5)-Ji2al(eiEU`Aktg={#Xr1|2t#7p$FMS%SB|h)323=TkdWLt zj+FWXL|KM zTT+eUlhmO3!4c}?auQB^*oJw??FS^#>`D-k!(u|E! zJnrxPi>f0ls1%x;_7$dwm759kc?jShY=~CHV z_4BPH3j_50W^?rA@ylc=OHQjTQ6S{uqCi|%hT5*SA)cGt#6@!A(3TPynfMOJ#Q40t`jN&p!YPt9-YZUEZV$t zQ7_+I&Mh-Pu^zwva^+$h%(q}0;sF<49f5fu7kN|Y$LCTKemK2jP7zA*?_@3geayke zr4u^v;Vh;!7NX%^pmpH^CN9`{9W|RD1wY+a{L7D`H~*wjS%jfZX#Li35aim97quX_ zFC+1_mtYE^Aqyv7VeeNB#6|{0s+9TH3_c2Ab0At+3r0%EO=X)Le>53duqvx&6P*ml zoz3QyjUS8~prOyeEzua9RFA#W==NgRW8eGa2pO}Qx8#7h0bj1`^5M}w^bA{440Y^2 zd2&Yi?M@+|0bKBipmMYIiBPyoqzk;q(C>K9nRyAtA8+Wu^9SI&ucSuC!2#pEFEeAC z&n#_@_7$d7(%FeH{@1&x4nTj6OtO9pN7OSk_VbWZ;AT$A#e*WeEUS41z#c&0SLNHuson^sE^)I{Om=ZZ16#Gc~?u~?(`|Bon<_4=JMVwmp0Ho+j-1bz-BRM z3C1!O_5i`IgsnBQpr00Ow|LxzYL~@H^n9eYy*`tTI>bS8oPKW|~G4u`f3i z6WFo73^7{Z=O-@*PBJ9pNVM7f*Mq1ekQ#;dcMg#5OMH;OW57PzxakFD4}xXw%*bm` zvZ+_+I;eC*Dhq}$-Eoz1nwHaR4Ja829*P}Rfy%Q!2w+#gTym=2ayjxib-a1rmc@D zFTP)AKsS3Fb>v|vtGLGa1$Jd@Lnh!Yesdz`==1VOG4d&jAvZWx!jFsW5CqtFaD?5- zU?+l!!Qs!0$*q#rTw^?AMq%W?7rPj5v8AZRpQktyL(wCJQ$iQP z<3-R}Ea_e&vX%I-0~p=*yASVvP3VNH4R-e27wl{{ad>C+EcAL+XGE_@sK5Qf;|t>_ zpId^{TXrVbLW%`YN|j$_0{*f75~{=r4m)hMiCo+;gSHghPlfdF2Cx}?q$Jp_X8^ej z`w$F#0bx@Xf-TzXyb6_X3ZfgM<0pZ;okb`Xut1IJ7%-cm&zd z@8_4=u?K`J9?~+oTi+2er%&9lFV5ZVW-i=+l7W;lSg;>>3kJ-(A*Fi3=Uybkq|aRe zHpc4eri*{q&wey2yK*{j`o>tuUy^xtbN?Fhm&nZwwGP?CuQqHU z8Z74tUw~ABe?cB*8mkeVPO&*I!$F~>cGl+CJeTvhYjWQDi=7Vce`J@}x|}2Ylab+@ z;W`Lan@#=+t`R}&Up^Q@3t$H+Mj`&$;rEX{z+Fh}nVW4W3BRr34^Vuo-tG6AKB(|b z`g(#i^}OKENu&6Zbh7#J zhmrX8yJE5ZBXg=BQGwN_pC+4w?ai00N;@l4a$L@SoH^cgT-&qx;^zE8EX(Y`^HM7s zvUnn&FB&LP7|KA7U-tG<6vv#|oy7A2)zp*m-^iaUiVmpBu`B_vX=FXZR3)$EOnx_< zHtRy4FDhBu&Y`_6)GGVQ^o=OkHRZs;yv{$pn`toE(h#~0-;E8 zN;j;`yw!lEluW1ZGds5N-p`r6Dzf@c4-B>#SQB((*8&;F?s%4mF)@2nAy}wepKI|= zJF2*QCD+(8#p__oY^<98eEj5Nh{=WUZ4DqO@jMJdar!00!1^qHiH^NJ>q1oq{0wK& z#X^q<4((l6mymP9wcNgd7$F0bKdqBka5_PXwBW^ir|!FM9ZHeCHz;E|<2fy(V<}yj z#>V}nE+S_9h0Kd61XhZ}2%$Y_GQJN9-G6Q%5MJ(|?p2 zKXqC^RX4WB#dN;+EK>_FA5n1|cw9=W9Ai-Z!OP)O@=Lm3HzQPBQ!8F~%+4u~#r{9a zzB(YPpzC`P6+r2{^ozC>{jhW%bCuAfM zwM@U%B~!ZmOBZSiD)102g+t=k5FR+~RjG*|4V`s`woGiN?4biu!p)ap#9$@#dA<46 zK9A~uPu#){Im!pX>McGm2glhYnG?CT#p2FP``kyQxUZE-H(#x1D6AxMJV2~6`$l<&k0DFO4eP0(_C=Y@vh(PQ>M2I}_uWi4Je|Whn=|f0M9Khyh zT$mDqNW@@yE0`n7tFXj8vy!AiZB^HuS;XDNG-U!FQunbu@%b^r45@m1=hw(8i*S9d zB;Hkn0g+hmG%|b9fTGZoBX`-E>I_UnIBwz(O%pnaRYQW0D)AlI3B7okVw%(_rkcl4 zZfho3)~*0Nz2MsHs@>LoY)~Z#u7hp50Op?V7_h0y=o}|!H4}{Hf65vBW95TN^-%>v z3`oDp@>3%VMBbg{Y52hDouEB#pfyuRlX(_u0xD5X*iqfEi0;di&zdQnDl6*2X@mie zs2f$W?&Hy`q~5$-&|X2}~sof5L|B4N7Nu3`scM=anW&%t)L5 ziWD<5@?>iKsFB=BwqMb~#IP{f4TFW@_LsGcCOH)fgxsuc%1~E=H=tdOyVd+q<60`% zf!})OuY&r*?K(&N5%;|1cU|z6ya$tDFZ|R8 z%;6BkWQoTGR)jzzLY>D=J>N(*&?|o=SKoNV<{T>XUHa36BNS|%rs;`OXYXFo-x#hX zC+0OM;|QA!Zb$bgi5l%-L&-K zY^X~lcy%XuIuPo%oZb9<^}AcKcwKc)RGJt(YJs0Uj&#lIo-@<(rK1w|=d=~C8#n)n zYH6VT9MLx8B@psQgdxANp>D1*!1T!PU)PDbazSpv9Neat8Y`(MI9xk;#GK5t?(W?u z;OSIb<;yM!O3bbyMc!|oOedw(i)(;}dwx&ESnuZxblLc9=ldqTHX0znB&ttKnkm}t z);q_v;CGVd2Ht8j$F3ZZ4POl19rory;AiT_kD|M(`&t7v$!6BOm|F;drPoFG*3iV zQTE}{>pvB;Rl_Mi3EKbu<73rKxkx2f>&`l7Ehw~Kjjw{*tFSEChnlymE5k%;v-I9o{ z`)(nUpuB=S$ntaMWRAaYznjUUrhTF#aFp_fOOqE5U8|S=za%k}+wQbUA!(IU!Kd%5 z7aZu17JDN~6VB~N{xaG34~b@Ce+og&H!0WvI-%M&FN?A69z`ydfNv9V@$8xR!O$Wj zhy(uU97)^mg~NewcWX#Ab49K!TNzhXlqZ$f-_yA1`oZatHc{;A~be6 zxsX$`zMN!>&nX+>~DhUY+(G)Tr8A;9t_M-Nf5=;F7K%`T%%n=d)& zNbd6y1vy1Ym#X8abFL1{?f1!JX2Rj5%C<=w@}#S56GZmw+J+=5Z=LMVZHT#>`jp1g zvONm_O8;BQMcqVDwyb&aZNb9(ce*`4ji=Oz+52NV6J{i6vfD?Sn^JgaSEl`GztsMd#Bc%H*XriOusmP1pEmZd*;I-HmvDF{j9sA@E%dZ3TW&iwfx%!|Gtu zy7qyt1<^(~R;jpo9=FLzLZ~sU)v-BunWwmTODIiJDhS9CaTul z|7DTs7XGZJG<2(>sph=6zIF^RmdGsnEZ^M{_Ug_PTa`d+)*cHHCR%{U2MOjpm8^NS z|B#YGcpSG$HMmyD?LV8d`)|CAjITfFfAaQbmU;vXDYtX45)2kv{R9*9Mcu72-XBFa zsgeW&Cp!wza(wfiL;_f?L6N5e9IFo($sNe8qYAdw7m6|9dXnl+30u50%GY33di=X2 zv@#Lj{U00LY6iHMN~{!m5!m3Fo<>p!?ie-VBu@APZXtx$C4?Ao`VXj;qdL!8Uw(Va zcznCbT}#Y~ilc?xLmhZR;C2A8DV+y;XaqbGV+jydFG`rZ`4ZU8fL%PXIc5#Afh$V; zdqluq;{iO|AK-=)X2|Dn-d&D`>tg-D#w6@wgS}XQ4@PRh{H4th??|ePUHi=wiRJ}| z#}q)FDALX7{sCAUrQK?y?jvOtbTcvZS126xe~*#?JW*q-V6U=95i^AG583d-l))XC z>x)V}(B2NXmYi5GKmy?D4cnFIP&Q(_jm>YwIjl&R4cvkN_Cs}QM_#>w3S?!Pc+11x zH1paaO7KHu+l)A%)vsSWt6xw%P}~Nr5LDnR+(u~22M@*4D=#2

e|9@b$wYPAjInV^gz zq~eaPMd|8G5ixZwKVIeMfuB$>8~kT^>!64iDmN<8dW5&`Uw3No9o7NVmt2ZGsVtUu zgskO5U$uhd&Rx`&`{ZjHR6Va1e%#ak^*tG=n*qt$EQ%=HsHBLnQAsd-^p0_`t7^B* z#bbl9g~dg6LX7k|3x>L`3!v)c3y`_%BcU#)J~%#!r34CtXq7<*;oGExJG#>te*17; zyC@D6>C=~*$+C;FIulzXB<@dR%)mWUUg@0%4M#Br&i!+;2VX-%2UvI3Bvy$e~w;6Dtr%=P>3EvEWjX$4tKhW}B9rhdM{#Dw}11uj(uMIRMzl?dQPn@$m#BfQ5?C9M}=U3WMRomW@&aU zWB!!Uf&%Ad5j&X+A~ufR;=p^dH`NZ#I3f=m&$d|`KPrQ>jZ_1chrt>)$jxsGv+1Oh1#|g5H>^HU{{ClT?AHn@@LHas*%PqbPL*_heHIJW z(m|&OPlVq>9wQ4J)&C?5Q_vh}=Zm!sCP@8<8>owJUxE)wKM(h1ywo)?F(u+^? z(v2$SVKw=CA2#p1P-}YY+!lvQx1{F#b8>gLs^56`GemH|*fXx{Uynr zHEvE;bFz_&I#(3(Yc~un*IO^l*x(yETFjXXV#cImsLCj?)$*qCXDJzljM@82Dj%!&C>}2nDC5!W1~Cf%E+|_{ zx1w&ZCj81vF0k-RDK3-edplZjPpiT~WkflN^ID9@?&;fS2J@ah5i{=4-HMpjf@}fO z+MJNt=PV}iW@)#XN!YbLa?|-o08xFpY7jbjEa!Ea-|TUez$@y&x}v(vMC@G7YAP$% z)R+dA2g7YCzel(QB&w^bmaFAOc=QIM_yk+dw1tcfnpWkz*+*I_QJ%1ibDj=P$amfs zoU-I=VgPOvg+$xf)k&8eZ+dE4`y%|v`v>KD0nCxc;d$r0ScAUD)oFAob|mU&EguLp z`k`=1bNAY(g4ZC)?>zY!&gSsDBIxbd+S~`r!LkWom07_zpg6w7`c*uTEZd;*@KA9n z=@66RYPx}M9a2lxMI%9VwF(u~x8oNTDyn%FWAd-6SFTHqrgD z%DedE9!KlStn+u*tjeyyB%Rjif>>MUxgl3y@gJ<5>ie4bj{8!Wm{cy4x%+Of8k(YR z5lfW%67$Q#kL`BVHn5qA5tNgP<0U(@nN1puCR)rclu4qk$#vD}bHAKwv#0q7PQ9m! zhkX?To-3Udtt*_+Sd-7l+$~iWRF+Y*SwZq~XAxP3!jUNdp`4n~xS2dM)>! zH{4}(Wb&%32j9EVt;#36mM5s(J+Adp4o{VI2|Om9UVfBQ6GnI>+f(3oVY8Wq{1OkC z#p-(RS~~9b`HB*J&4A@8)CUYbnUz}9Aecrl;7FoT*s$q+dt>(>|2Y=dQ^@oocqMVj z{PeLtlIJ?XbIZ<=RzHgEM5^@^#||C2e)PSXdreQyC)WdROZM_3zc9brU#;3xq-Jd0 zBt4}eI}!avv#;_h8FE+#l;#{RJ6NHA!<818)oYb?u>mJ^4+*a1o?D(PDelh%F z+9^6vdDv`Gf!xiM=W3I>#D*8hZ7`SzDOGtva3eBUzn(jhSe}#fsw=rHUA4+n^wsE^ zW7D^9MhOKL4n?-YZ%bO{Sbubd>Rd^gjTL@ww)ufT-!A1^X>GCn-2O}mi!=~LeTI8% zta(#%psD^@!&o#T#q3kbx=4FMi+FM}_@0N`N?_ojG!>UDbD z1x+{zKU2KZgY9?EUs{;s_;^;1CwNQQiO>7|`2sCd(%ffv&M)YF;S|V@AQSx250upS zImlgQkc&ulT#Ix;eZ}H)(UWVVr;T{ zXJhTv5Sdq2kzY=qre^@QXeUTF0C%i5uuCFmlX8#|B=S7WY2zDp=K%von^8yF4;?omy@^8dv)1Nx2S%yWt}u^KI-M7-%ezv%C*DTSz$=FWLV->ra!abHnS4JvcnMp_Z#=! zu(Wvt(!fY*5wx_pQ&hU!bH$y#@eZ&D9+6$M(wF60K;(f)n1VC~zS$BB*VcP#086<& zJN=q1Er5=$g&AtD+g{X}ic8}y?#?y4O>tmekm_+X3~HZ}XURDtY1Xm7ED&8Mz|z9p z4ZdxHX%Kw-ujfmS%#Ig*NzvgO@=h17UdVXvX3jL%lcHWN_c5&LdcjAVVph-N9Z%sN z5!^{Qby(RnRffR@!&~f=_}o5*0$*dcdT4gs&0(c+)<#M-FJTnifbAvLIv)P5vMw30 zbs$)1KjWOyQ}pI-;9TtI=PH-=4PzbLKI(|ogl+-cw?k*jfU#Midg`Ob)KMdTyf%{J zwXu)0av$I?L)^NDfFE6Zwq@7q;tWIze2YW7TGp5$YYjC$;8qlMwg2&4HlM@-Q=|m5 z(adi3Xs>TKa|%hTR_r+26XfwBO1bKK8OPcEbkx;SmAkcMt!`3O31 z*d`-yvp4PC`%iG7J$;+6i0v_junnh0`>G&G^t%_V}q#JribUuCXHQZrO^`0P|g>M*6kWB+wdaxil zYx85OGAGo{&1GWht8%f+mXUFQxZ3R=qe+RWt4&hc| zqQf_Af}>)T?aeq*24_U#wNHMeZKOXbXFuF*f;9iRb849XFikP(}h!BYezKw1*{**I(6YI%OEUXtjy_#fs-s*#p4 zejTA;5e@C&$Y|D6!tgV2Fwgym!2Sd|Sofl+rJ~ic2PMTt4sI)#6Hcg1d~A(f zdG$S8Z>vde8Ap~ZxO*SWh9W|O-&+&9k68LHC3s(2k^F2B+#*>~5gLv{4({rI2fS#{ zNG|b+k>QijGPVFD@LvOg+~?8fjtZRz78n}dRZBV!WJi=ybo}gk(pxNl3r!nqLom>% z5bQz3o{xWX7Mm}(oY1}#-a2;!KU#-nw5&k+Gt))I9@V0@ocUv7>5+#(x1ZLn&pHJc zK!A#GKl@<#X?y&Y!_RxMpMTuCH4;Gom9;=yekq5tuPEp zTmgKoIE%^1yuRjRz-Fc_E6fsEptJaEu6U_;d{pxeRKka7Pka4LO@bElZ$v!7>nsnx z1}l~WBwu4{0*}@TMh~QconSa&sDy7P(0Gi$Q|4ZPeawX%J~tocu1dOZfir?l z+dvP)Umz;PZI#K~{7o|YK=t|k_;rxsGx%|QQXvK6jmr@toDn1b`vk%gv0JqZi*7Yf zK6k^FsCHiOziVTm3N%m&)xa&H@z;v@cZM% z7r~+--poK!H)XA_P~++D%7)1$e9NDLq1Uvdn~Oiy)AXp#)qB-Xr*0L!8L!*>nUh&Z zY|!xD?x5^a2EeP*XqXWcu#u`lLOKvsMKIEM!-?*WnITza#|oyEw?8pJ?mOJYiazzY$#%puG2XfUbxwIja#h zAV@Lspn|_aauiGte_2XjNCh@+SrBzpiz(l11H zji2-F_N%XVu%Ouea}aNaC0{tPQqNuhWOuKx0^&!|{hy+H>c*qEF5yt-XSS5)W@{~7 zPvkF-e59y5oG&8X4*7He)e3{CYU8(8&qYXQ<_0%DP(&9V`#k?UqQQ4^18z>iH)ohz z!<+@u)s+okvrja}n$nJ{aW%yu^st}cNlUkHZ$HcxzJ}3*A)J2fc_0^*OeL!cKVE-l zT(mm6SftZ4JGYL}_`zWg&%!_w*P4%CqD6bM21~&BE?Ha~Uk+l(O;Z6j+K$=pBU%pp~C$D(1GI$+XJKSF)}Bsw}R{23TRnU1?v zvV;6jHGwTW{clo(d-m0K3c;6<^+jN+0Bsm_;?R#+=FV0&i%MV=eP$Y1#81ag+U2Xu zF9oZzU$2Tz@#NUwY4mE$uKwflZg)7KVaS3zbvZ@F%5r6m3| z5_TrrvyJ*e6fUu5?M4mfQLeKOUg7pb%*IBoF5O;CZ#ejEdR2N=+m`$>NC+_|Ie+nk z_?_Jws{4&6JnSl+cGjJ0O=)}1adEM)eXkV5gRRD8o5jIfQ^{v%OPN|I89_EnGLGYm zUj&e9(DZLyr~Q#E*C)!fyN?t7i2jTWCZgB)HF1%gQysFFiQbHC-hN|u8uiShOz9&D zPu%isTbw1|3A!JTUw)r=$u*+0G8QC)`!t)yx))2qxA$+1lW$MB!xf`VI1lIE z9l1z5sv80~1D(pKv&DdwFg=Bm2IO|}#m{(+iVuv>+Bx=-o1IXzyjEl>%!*)czTER8 zS+|e;(cqV3$0-~(0*+g1!PYX0o5%osQYXvhZiquzWQ3Q*by`tS8=OHQ`aP2!6?la} zp=pFE)6iX^Gq(B21(Bu%^J2f@UcclY);H+G6SGG-0Aek0+fg~JPOL5F4AabsLiYjV zphrFC-%TU>Yb^x7yu*%K2ZpPDhQ4CwUMmXGG`{&1!v>Wul4#jPezR=QPt-r&JEvN# zTT7;Qrz~Mb&f)#C`hDh-W`)81ovy5%#*EEvJDED3_7<~%$5fz*$b#LxvhIYA;yng9 zE#%GXD|GM+SbdcM0W-c~nITVbH7I;=2hEiP9RkIufMjUZ5Oau)>jnbc!lJUVfeXDq z!!mMR9)OwMuk5t(R8Eu_^c%}gqFvN_iM~exe#iHVzdSmVS01W9+Hl>(Of(DSzq|D( zia+RM0@kBK@CCzzKN$&p#4;1nM!W%3D8)DT-_{0yJ511!80Y;Y9zs~}Nf>cEx@Lxd zhR6Tg8%3|r=pwG4oSFXba2>q}_lW4Z6Jevz2SPc%dHpss8q}(lefo@l>vyav{YQ|1-ca94l=`i|@%H~G z^}j8m=&uLX@Tvdk@t|D~1=KpBf5G|@ z^Avw0zrHXJf;M!o(E4ZR2ExL{|MED{t3fS_;q}{)VRTCo`VFgPnX>2c(T%_SCz#={ zAt6d&+zI0B7-r%3AK`EQPtODY>nBI(AEOQXi3n~!E$b;n^e@(M6DGd>rl|~r#MOV^ zzt>b4v7xQr6YTZDH=Ha{4o%PfNeDPP?jGoAmN*`Fx=mYwv2oDnjzpzTsf@1gSJ24RwqKd?au%Iw%sXi>yJgG zuMwvDumB;I<)uY%#At;G67r^VgT_f#UBW{1S(m1}Phq5wwoeQMNo4uNw~-oC5hBnz zDT+@9f~&FdMI)suglx#w?F5$7^lu!vgbZIENAuv|dPgtf5akJ)z-9`ZCbVG#iY|E& zyg6omVxN}uVE-}xnxk-!2igMSfK|n}Q**`?1iBrouOC?d9XGE<$n@oJ2U`p9je?$F zGHKK{*kesgKc9AK?+_6C3Z z1)mXjMU72@^1hN#j3*#u7UdlMO4Lx7zCqWq8_ zBcN62Tf%KbK|d@=fR8^5`UQfZ<5FhO1QsN7xKo=?mcU+5__83{ej~ZkJfAdCgirfDu(79opwE;SgyZ@aLu$Csy#>yokt`d605V4KCBa)qf>wRnl z_`pWKC7_P&3xe1X)sN4sQYXS z8%Kj7x@8s{M$Vd>IFC9mV_QZxu+f^pNQAj{czO-8ihhfdF|GX~@HC&)-QBiP1VNg`zY)#0%EBTFw4S(-(hMY?gtJ>Fou48Ad3 zB?u>$p=85H!*Od9v@xxcpv3jh3EmM=STNyxy(ea zh7wI3QQ&9{30!5kN>q$JSN}mVh(zOhiHgWQ^>=vsr}`;veQ~%@~VX28=JVZpBf3g_J0iv)e5ryqL?p+_2_<1^x?92?8GXp1{+;5gmfF zfwA@zw?jfC8~wVzfcPuq9c{`hG6vnMwHoGQWZ`X|trd`G~GjM#tU*7S)Hr=o^9 ziGNi>LiB^s5+O>mhTtw_1QV71kA~E%NU=0vBi2L?5c3S#MBe<3xDg^-EGJQU=l+FG z3=!u~_#ba02zQ(zVI~%i{41{}?iyj{7k2*LoqtZi#1UN#$AEue>0c5s3u#1E+(~rZ z{y72DM?!SLa()XG{viQ_+e>s6djEw}<3zN-y?^Ur5Y>qoq=-Ht4lw2K#FQ97|7K)N zhyk>#@84+#F-71(ND*LnMG&lv)DXdZ|3|jykM2+uQKyuNS*`m-rXD0_wMYqBEu0xK z2Xo+G=!GCrz*o!v$;1#bIGCtTpNYf!t9R1(=uco1)l1O)NveM*;eE73)~pe=@&5tC zV#J6~`ZpLRI8whQG5(Tw!88+z{Vi<&58q_fa6&goyX*-mR)9`7xsZf6VXH&mj`Ebe ztW;*v%DQoXo}IFXc8PLcub=8{Q^Fz(@*Mc^gdEg&T8~szs+wSW9vhezIc zHyNGLFYuEszWz15jz8oSA)F$GC1B8oC5L52kiK07b(^cd#LRXJd=8`{L;4(*pTRVV zx6^RekEJ>nr5%wY;5!YDAjdc@H+YmL{wWAzb&-%vK2#3CpTcf^73^^x}(-q0hsiC2G)*zteXy$BI2?bt3US3BoXDwy`9J zYjh>V#1C6U&QIIKss8 z-y_mmb%7AVF~|}_KOb1(7;P9*aB>iAS{eN5=@V4pcXT2>2w z!``8b*vRyIz}jK>b|wIyM8rh^SvoB}2V{vJY=L1m&Fm7slgkdQzwBf++ z(YcA<5-vh^cqdw+4fLBhHsFer1}~!c5C@bpiD$l3_uAfXGgapLj9W$km*UcjUr z3%Me{sr^~>%-oD>ZMKn4==Fg8AmygwLFk3W1C-0cHx}96Oj6io#^Z~}qk4}FZpSIx z#N>(F(0a)zmVkN!lW;B%zowr>MSPWSxy7(a@Nx&Z?^{fJ*|ds8ZpwSLPqL_E+hp&WM6H=fRHsJ~VlVWv$5Y zi26K~r~vjbFvMHo4;O(KAbs$xIef#Qti#>dMaQ+)PRbrlk|^>noRZuS=)2ZCsIw|^ zKl^F@uikhtcHjg$j-Bi3`&0cy?}W3MrWMfH$4!6LW1=lU=Lzdz#U8D02loYGkD!ui zy@wA?&c@N>*SPtg9}n(mwXkEnb_I~f|G%MXA>?H(q@F((N)mRhj8Y7K)!$T%uDA3d zO9DS)^mHZ4-YJSNb5`a8-ewl#3*E!(5UfZKdV&f11#Iuof$3cL8I6`p1}4a;4L!hy zg<&W(eS*}-O2p%oZ6)BPKk=txfc5*QI)h_YU??9m;D9KOes5nCXzoV1j96FnLhN9EFQ2x zAtG^w&m=1vB2QczI(RF`&GUhu;U!ouynumf9V8$JJACSYV9w>_akNU!St3kSWAJVy z6@U@u&q_hYls)5?>q=ent$wPlJE!8?;YY4n!Xr zUG%#Ul^xAn4-!yEntx#Mpqz9mD6~u|E=2z*`tA z|3oj%kh56Iq{2aOZiLjy-HC~5lhM#+I_wuEXqerG+x%K**>+30zR{{NP zjpuqpv$YK#g7cLm2+)jIOO$j>KrwpwCM+sdDtJq<^ywi9QG}li5N&HquoNo(>*lS& zRgN4%a_sno_*^yy-%f;+Qd>|Ib}9$_Md93C69n4`6y@HsHYh(egU;W?xJ$#r^7QBq zGzL2Z3!4MY5-8qk$?Es^iTe8C1P6bM%}=CvmWRDz zgx1BybvEW5Lm=NvQy@jIjsF|F{H0p8{SJ>TKRHwfc8j~^N4bm@!Rp`g5dP} zE1e|ZfL@gDyZ4U3>o8_L+~rGn4sGA}b}4?$_F-e=QHtQM>=Px0qd=m4>i-#n24^)| z#4;o@VQ2h9R;To?vz5VK$XcKs= z>{pZpvfX1j;on`JBwInES$y#Ft&o0fvt{wNeZ0Iuwe!3^fDYb2`$fYs!7RMsPOW+X zvMQECi=+2Z%|44dG!9etu;vsZ8ndo8I;85LAMD7iqE0(Jmj~b0e)IBB(T;mI6skFZ z0a)klG%bkbhriE9)K&VF|^E2Qvdia)Y2~VfNH?P-l z#S|0uV|R=R7cc7n?nZIGyDH^4yrd6VpBvH<)ts}ZfTVE;5I}Rop#;~Fj;zKjo3pEY*zlm#Dfay8%XVjSl zPAok22Vs15HQK~3&tiB1pe=qc0Hj&}F)H!dmJ{5kU7BEb7`}BDFg`o6bODqG2QT~) zP2I`=^j|J?j!8o4L9U{V<@?LesYYjVQ03)j#Rx z<%_}xZb!{#-?6qTr{Ws(A|6zV?yS357B-836;J;+h`^^HaUUV7W+a1oYB=~fi5iYN zPG^4GG+rK&g(r-%Ksl3L=1!IMqId-^Xl5svN1u8TPJ`eO@MDH%x@sMmmZ}N(4CIFc zG%=I>6D{x?_9))0W0m)Do4yL~YfJeKj7>(|?X}e>f07`g8)W+Ij4*rx9Xib!`EHRk zE~kMDfgq38-ImYeRRdR9?rle~6tHG9zWrC2g*jEnmp)!{rUr!y(ROs$X-D~h0}Yxz zEO-l=K95rZc!6E%k1mkc5A<|0cpVtgHR!E-=jqnYuVwI3-?0E;6r+68>6Bv+vs`j( zk7;{;A68$XIGq{$Mp4(Ii1smYuudtap_>l^g_MYm&f>%TfT)yG+py{|HXbc;y}{P zuIc$T2|1Hmp~2gB1+N9O9{&hEEVk$L%)HKEh&|&Yv9d2z4KlZ6VqL%)Fj=Vi9hN^w z&;Q}jgwPj$)xCgr*1w`)S2irFk8h>WaES(WP9Wd)sZBYqp5Dd~e{>~`=%??7_? zlCmQ24yezv771%z+b+DHB&na2}{Kh=Y@p7|pUABG?+VBv{`U zv|Z(CQ~)E~^S}H0Rj;O*J;oWAJeOn6^Ra_d*1L@6 zEprF!QuZ0;kjRfps=KP}t!I=CW<)wCvBA`(!!nFJO9U-VHsK;r@|^<#`Nj)N&y<5o zlsxD=5|iLB zRp7YmRLilQ%pKHC+n+9bCZ)G@?&nfB2?-0*I2HF106R-Pi3uMkul}pXy%aBgtY}>c zYS3~%cia48S#4Wzy=Kta3C_gH2-O``TPA+0%}Rr_cu;ZK1H^?SsHAXGX%`@C4n4IL zrOzksug2MWdsZ(=zKgq)_<@B?^9c$ze1?$B0!sT;JzV}0h{dew6z7 zJpD1uNn~>J3Bx42$M@Rv2jupg0}6-k$u9!auQTcTkVr1MP}qm z4=Wx}Vy*j7sF#O*d!*scqKxtMuHwAId-#_>=mRrzJurWr zFoZM^S{JmT8tM8RO!|D4UWK%iqfx+ZhTG~#^%vA*F6i7;Y(J>Hf6vI|;x{pCxzb#j z7^P=XmsyiEGI^&JM%fH-uYd!Bt=$?U9Q7KB2e9i$bw}=7UjK^R^yH0ldZ#Or8f@Dt zgpW<^q03v0Jo6+leC>@v?2-Lj5Jof`n!&O65+*Z0xah{W?mBK`U;-`@)1o`!+tlb; ziI7s%H=BF{z@owUos{1zGrZr7j7E%%?_(QsW#TmHy?>p2D2t85(J!2T)$8D_ z9d0>QRG?~$YnM?C2}0EmBMGghNzVJ=wo~e^w|>gaF8B)1lLsCEz6+zmDC#h6(1)@V z2sNO3vSspq7P#l|!S~lMy$-sahxOF}B`{S_xVPKXeaO6%RsSWk$|=F--0V>&#{_k^ zZx{X7yrUFBDJL{L5%x%s-n!6+TR9IK?)zmA+sv~?-tW1}C2t)EuU4K&6!X}*@V=wBRc28xSh)~nnr zeg9Wp889Us7b!X9D;bDXC;aM?5wP~7QjNbVPfEe0C_Kn%UR(L{K;`JvX5@Rn8sy=n zn4x&5LnO}O7P`uiI>5%R&>E@Z;)n5tzdEPuPo)7Z^cIWP$Irq$W&Ybz4xieDW z?eT9MC1@!TNhq1U{Lad&IWZZn9g`7We`R?jYA$NBL#0w&q zBn7*V+B%$L#1Q+_4L&Hrgv6SS$FK%_Q|jYt4YpvRm?Xokq~u32UM!wpDPOWSdx^z} z;UU3@gI7n0Cyv29jiNsMN2e$+zn3lG=gVVgw!0Hrj8g+P__FC#;IV=fCfRH;)72A!~$|wCjY8q7SUCG>trqt7gX5HkLjYsH6N>G9zY)@RTk&n ze}N;m*Nls`PU}?9Li{k_tIHQ^zdEy0hvpX&;vv0Cm0N;-uq(`#0R`p_-+IrrTJ9Kb zuKFper!|`@rL1dz4;@}4qVJO~YdHBnz`i_N8K~>c+Px zifv7v&4kW>=yN)&9TnK}{!635Hg&YQpjx@^d+)A!Oz1c&G~^vpk%^FODsFO@zv^=6 z70~PEl|3bu4+4^sw_}D1Do}9%=aa=lZ`Ijv=eCxo>>!cMgbIbKfgCmLHwLsJnKk7QPae9?pa}~nc2o@ zNw;L>8l{q~u;jBgdiPdxdZaZX@A@(Mp7YI(*RiL<(G56XAOE2d4rx-M=1GE>Sg{Wi zrEZ>0cJ@2}8DL!=c{nO6!D$=8%ntn#8G?g%DrIB$aW2EZX-ewYtyq71l}(2&GqMfa z`N7nUGo4V{W;sN@u*I}N6mWC&i{MF$={B;K+yfO(9%re5_B91!z@7?iLj z)xQLrv!MyCkkmC%W{*s4}pT@Tvp#>gH9PIeYsWPda|gVlqa_WD_OVA=-H>$f;u}a+P3%2bw4E`KR%kbgR^oiqZ--nc01K`%YGWJ_7Rhm zP_PFw5)5bc6$!(38IEAo)%4froESU@m>=oChc@JRhEAw%$%&I~3_Fy8!?o~%4*Nx; zrS5WQfaJHC!EpM8mqIxD&e>`%w{+(uJc&JfQaIqy3MR-O=QAhWh`Oe%{ z@i#3PRDzn07lh1)Fv&;Qg}%`?G2sUe7cTvt9$20yz_vS(w&lAw2Jr;gw=KW(0q@GB zy@hA^BhSO8^N+SZhL+A0@NdVk%zl37&KVEj3_Ov#m8zz$i#XjoOjA+rbS0)bG`8mM#!!NC&7{P0m0yjo3g8j3>ooauNwx}T%tvU3-8yz7OeK&-V zCHlR3rxw=NS05CG9}$geUarXM;x!6!z~Qo8iAs;Z8Gg>$aWhuM%#6D9I_`0#GdMEp zb-X=hScR`Y!;h08Piu-TDj_`Q5RY^m0ea zmUhWWcv>e8d1id!`;ji(wdHyJfexRVgH1Em)RJ8yxr(w_a3_)gfO@4}*u-4SPFBq0 zZqpqnXsdcv`ObF;U#5I>`o1P-@_N60D-PJ90>$--a`)Wz?i~<+@M9!#;0~CX5F!uH zLZyGQ?njXU4v4_uh@n>o9X1vq2&RjNg2(wPPDhvWDJ))Q=em|JQ;112q2O|9+3=L~ zENI0-WGlj}ZaqwZ%)1hD1f~LXa~=nrrB+_H;TowuQF&Y;@*1Q3ddGV`2KG)1DEb_7 zgaw;?K1{62xL$nc4Lv=8RZrz#tlobeCy1HJpk%GlCMBpFGjsz$tD4V>KiDCRXjIrU zLMV}jp^3-aJiFIHmyAzMo7wrK{uy@(LC}!UYR^K=NqO2FxDwPy?8@q;3zSbBgFn|JdEa)U?|xj6sET-GG0o=5LI0wnh;dQ*Ybdi#wf?O)Fzs4~ zF9Y7-wZq)x&|EU$l;->zF?X4osR(gY5v%|o!jOYXLX9aW=r`akd zd`OuAoMr&)>NMwvv0iko2#woeQ(ynqumDbVcS=F>f}#>9G|IPh6LQiY14TlA$Hr+r z{*GFs1K7U7I!xLD`EDofOwOFIc$j*WO8p5vc4>%5DIZpPD9R`ZTi?SNCJ4__exYt# ztY(dCsEeC@(RD*5@m{MD3pDo)O+gCSzx{mMM)Ju@jgHOQdiMvw7Vz)dP^fq>*p+`c zBgCUb#^|L0Edk9=DW<(Y5mN_3hiiC_xh+aL?wVN$)93iO5Dv|KH zh4ju%Y4bbWTI>tC4KuP9)Z}8hn21L|`ASajW?;1c!JnW1xt;)M0qrZ+16u^^qi4zk z-e4^y6|HQ*OF!V)C!jjcJ(Y7!Z@avl2EJh#S8*DgRAq(XW^dFEWMM#WhSAZL%F^d$ z*h~Cl@yC$tCyHC?aCT)7;1O>EdmKl0itbE&1#vl47->q^e!uD*OZ>KC#a zuH%X$hX(;YUK^Ojg<6gCrR^7WJ8jb?YcEwjb5?|xb3HS-&Lv^Wl+*k_2)+h{GwfOFTLYF4?uu9CX zXE_b*{b?syPkQ=S9smA6FE13vM^@$SUS-co6?r8E(}y*9;sIk9M~jox2?BOxZw-!W zqDZH8Jfs-Z;K+2F*AkyltP~QyYr8#TmZyiLvcVb&L9Iiyk0riIKIh7O59|-~lYiIp z^V^+}(^(KsI&ie9zUI@3b!0{oF+CMWFlDB3R~U{Lvig0v9)1K&j}YSMp4ev9uRf;U zwuPspm9u^QWXbu{Y1XwGr}T<8xeL%LB??>{KKAzfsn_6`fitKO{O=VbpA~rq z1EE#05K*;c<+8ys5cEk56(aagEpRU-QZV*`v|5E6s}O1`88jxnHKDbq%0peSuGi>j zkNk=^tpsl`O)e64f!%0aH#ugG@S#-rp90Ze=TZZ=9vhn;QDRepJ0fj78GuUqJ>kaV zvLawpFhWt{wK{gchJLU&K60e3|Dr|%#!r>=#|J~kZ-ifiHC=T581)zxV-ox#E&&l+ zIcQ(@5c<7Du*q^_H8VKfl5zbUfSu>Ii*Q3ff@mXJ@vMTHH3f+WDPvv>R!gXxeP zi`NOH2I*YbwX1VAyMR5FXOxz3ppIJwv2*Fif@_fvk?M7lAJNs=Q+0=vg>IVQQMc`D z<4JA;gKfsq3=}SDpW<{wLqh}-k{(Fj@wa3F#mmFUDyi$C3hToboK!>>1Ux69c%%bP zmA8f2>hrM7+IrP>s#_>@GCz<^UdXvx=+NsgrDOhH;Vk2pyf<5q1uSzqFbH{2mspD0 ztqBHPmpJB1fl%qiGgBB~d`dVlOZ>(E{QcI>*vN%s|5Hgtm+#Hny?!`@LbWEh&k7R+ zofsqFifKppqkl`NDOe7}XOC)ft7txAYnEGGzah;F%^fS0L`n%i z*A5P1vCg1KIVC}e%=mBen*&U%@IWdo(`RWaCQ>N@5u-6_IzFiyTog<7_yW$;zD9ni zlI62X^f{uVs!9D0;n!#zS}||NyCrkcAP>L==>(`_@$JI}?2Qv0ZA^zBHK7hd|8Gt3 z+DCViLQ1>w${!%tf5_9psXKX3lgL#811}VjZCe)l&dLAkFaz zWWWvI8DR)DBG;Szo}9oLMSX-gU~ND|<4YJ^F5(^zB+;zlmx+(76xcY0MJ{-s&nVUk z+4|luFxz$RXLEtoVIU#wzyln+)1<5Uoh?TOZ`i?gtn>HcK@LSP=%w}pqWBQ)C-r>N z5mTbc;bRpsU*Lx4QZeBQX)BTuD`{yAoP3Wd^A%J5=JY;=3zrTlu=Y6IyU-d{s+ClV zxxnozw~aCZi1JtL(I@VQF2-(`znLm{vY-&TTcrr;qAeoG!ipBVSzA>;1jDl+pZ5f_ zi#v3*KHdhBmw%%Eu7s&Rc_}B$v?MDkt3@~E{-@aLBktPDRH))spHkfw)obN)D;lfX zt$auTv;$LYajQIRc|>L>~ah%*ylFxq3P#?8SSoN4Qi_nxFvVu`t(qYdk=jkavc zWt(|BG4&tFG7$t*l^E3@xUROTOiNa4ueSY|N>qF%_#A{U26{Zd4^SeRB5jx-<9P_e$w{GEqjI#*5UD%27zgSG+l8T22Q zPxk*>3vjryCX{iv${3xV6)FJJ4T# z*E%ESf$`AfEQ&e;4Otb65Pu@=jKblgtMn&=R8?@Gv*SbtUs$a8Lk#tpj0T}3ya+3@$5K1Rv!Qdu=x6^=ow#KNIxsTdlU+&< zn-4@F&VtnER?!+MVDaKZHBcMGU}W>?{SQ9OPW^Wix968~?tH<{Sg7_0EvQEKU17Z& zyN*}DO7ey)s=msI@WNzS{?tR`br@Q%Ty5Oku_->PQSsNSv6?8I4G{yhs`iJ9AM>M_}hR>J;XqEjh3u7_3=!uzmRI^OoqTUP_ZUu55ZDjt~IvdC|38(kmtUiHuHAwBVq5t~S2 z+z6qiE+@(bfB`Cepz(N(-%ndV*R!p=YH~5jkczG0>XI+X{LeBo(i4z;!fuI^4B3^3 z3FZdQHAKPSvM5z?<@5yd59-EQ9-6!LQd-XYmb6U1;zzS2&j{9rtH}i8KYK7_^3LeO z&_*QBqNbL|9k2BRTNoV{3oz;t;Ob*#UY7l9);jw6ylx zd-eokrz13{eBls6deYR0>a(Q-ua=G4d&tAm4A}OgpCgZT^xq8k;ZywBLo*@hi1nVS zq;xQTaZS0-Ik6A>grFe0f`mQ08ZZ|`U}1|rFc*s!P+y_-BxqWb7jAT~UaSQdVO82U z<6p1$NfJj7SPxEl}&TH-3SzsQ$Thb)^_S!>s%}RKR;V!FU-_a5`+3lt6 z_U+- z_K$uP#LuhEzOEK}xqjPyOG%bC8YMh)&wDP0G;u^$Ep|Qqk}Y@S>a_`us^$VpbUD@; z?C8nUZ^a@gP%1twoO5Kh>R1xv$a5=Ae98=7@9VveC)+mS#De0!-9l_&lhvZyQ}$5# zQF#aL&=-B5gtEoHx&{A22|`d|nP^&7Kse`%NiN-Q)u1v=oIZgi3=S&^w_TBl{{H^X zfB{UC7zIPO%?JFm9~2$sNO$^*tFT>S&AiL{d6uNk$V873$2Z?Ym$<$+X2d)M&poUR z(-uUwEIvy!O;Hbx#t!k0ssZC8f3UGXc6}Ghr0KV9l-@av;TyL2*`IAu`ezgGE-(Zw z+%cD)1auv|95^{oz?&Qw_>p~*EbzRDpbyE=l1q?>5am>inz6($tWdXdk&9oyv2BCB z)=r!`v^!SdFU{@5o##PG!d6r1DKBSujy9q6%iilL#DYl&xtuWm#;nA7R3phB?*zeK z(xLK2Zz5X|18TM|6d~G44H|*rQ)-%Y7{hbEr2;E9FC8LJFN`_P_i(1*jkaj9N0o*U zYO&#-g+^zvk6sB9j{8nt54_K0b~R^+HV=g|i|xo`yn+InmS(K_KFfZm3j7PTE3YSA z-A)Le&`0AI9um)fuK2EsP|k$$;XOvWFQWv8&`5vk?GOwckMmUzWmu+{86>b4$GX5; z<7nK+M^nz%;FZqJz+I|-+KN47)g+!YmD)LGAdblyz;0eg{P|EI^OW-wLV{@F8>dCZ zQNXgKqE5ZL*(uHfyBmC0oN6q{z)s<(r*u4>0gHm7eD}g>pFb3SEK(QINWXNvQcOny z$O=}^qSWxTR{eut3dToW!G~(vVtVsDI=`&qTz5^&kdAb%viUgycr zu*%B&@fWa(M6FrkG&wHNW}u2zAoe`{u@!@buCfV__>lJ#Jod35^wey3DM7ABZciY4 z@t`J9ssH4SQ0XlDjOVtG8wOyNr!AclO|cAzk!scCYTaSfVmM-1DG5pxV);plXr5vT zn`BSj9i`yxCz0<$qiUF(n6r7+B)mF`rQ`wmgalK_0>NPr0t%O8(>R7@%p81%S)}Xm z2BT@n@nwx6-U0uAyvS!xx*k ze$Fd#YqUv!s{xfEdXK!AqIWR4UO24o_^s22%Ww&$5a6K}uq+yi~E6RED_IPD?iznN;bphuc?6bLMHpV3#8xU*;5&nE2%~1c5i9KP;2Lf za=XC@7-38$3u7BMHlvIvHqj-*i;o~lWmu^I(u^?Iq`SY6S$U)AK@$Jy2g`EO`Bwiz7CQ6SUj#vhYtV*+>(Ew7s8f8n4=)Ih#TPfIoDoaJOG3nCuuCOw z*Cq7ktjEijKqTmK=T%w|Byh{RucMpx<0p8`iBz5&hVhx+#pp8SpI*B0tMq^Whbsf` zdn@k8D|g0IHW_$03Zmk~=>WK$=>wA*lNQ=uFkPvaJsX;L-KU=?y#6Q)n~|j;svQJ` z0Ti9bKA0tRo5lGVU75y5N=^5IZn3soS(ANuveU5{Fm*P|FSWg^&S5W#X0n3d=yqHU zxO8Jc5%rqGztw>JMyBxbh-C%e<971;dM~-Hi;;_V)yb{*^E)E0(qx&E#9bo7NgVJ*|=sz z8_*AaMRQNDD5!{rJpA?m&H|>@A+9i7>`764GSP7?AGjO|ugt*P+mr<>E_d3)sxLt% zR*$u$Pegm8OvT$7pCzwjrQcHo92^p~90BY0*4_y?{U{=X>G_nSoLi~vpv}1CgrH~P z^MaOy&!+nChzH{dICH=>7rZc}Pw_qBg>c)Ev`HrRSYbF->*e&Z^+gspf@$8*e809 z&~aYl(Ae{B1|4f>*0uEcoPHn45{LseO;GZ+^Il%8Ipbo>dd#DK9OnO4M*=&_(p#fx zf-8nhwEDrqN>uf{K(|kE7;$%)VgOHE1$u@BddXKS{9w%A;5GlVy(cOflZEV7 z13hP4ZH*20^&(H0~hD1$W#vu)d~e4dGaAs7FR>rQ_2Ylx{Re9rJ{j3)6eZZyvyA`2cjg_S*YBC`BRY> zmwwjyxbb^-1NW<^>>l|r>IE!xR!qCb5a zLoS5X$j*Ons4+1~y6#1)u7dRC%elliID3R%*-0YYQHIGu?~0P_X2weztz!SP-V%~8 zr6gl~D>KndZlAQ|b4jWv;2pHx66doYMGPW6ROb_48Cyxle3o=b?kxPW_Ez2OO%QMY zOXNg8CNkM9C)7vfv(F56@OPAtZ+hq=gO|pW0m)TA7y3lX>_?>ow<7^fFH%XO)7yPM zF0d+T1wC7nSK5>a>~G`3im4M$F}TU(e5A{9=(1hda|RcBsB6vAF3K#NV8i3>wbxy)Q?~ z08EEOrB)U|4w-J;Y1$)vrukZ`h;Ei(tSVcmf=(w8unVDw!#v_fqA@&U73SYwAyukb zS;}dy9ne~jEmmjNe_rURO!adfi8+o*As)vs7GRyE>C&Q><%}((mx9Bh-5M2qGk~)$ z!o@h)_s35L?iz`8_8EO8v^c;0Jsvto*{?NY_t_xGESUKDL;CY^kHj#PSK)MeBy%$u z9#}ZurQA?sA zr%mjG%U=?aWf*3*s5C1)>$#KTLh4WU`Her~{6ZErRku;n%3;x~DE4dXJb%iRgKKkC z*y_ea>bZzAQrA)pr?{1jcuC{#tVdiP#+qI?r8%w*Z&oJNq5FJv3~kL-0+BMsvkR7l}M`|mdc;x^^VQ3i4kZFTxnG17_OHN z_ZwTWY&L%)oIjBGVQHS$;aTQKdGjicdrHT&@Ba)*iek5NWd<7?83l)ZwWlxj-`ze# zG~;6eEv___T{qiJ%h=!Y)_r&gol#aJ zaksg`=J{g)3^*RV!aNEZ4_?y_hAKA4^vdz$XEP5&6`O-#nO3R}^Q0>{$D)Pb{myn> z@yrBr%&Hk z4<(rot9&ly`6TiNKD-vWxux{>Nr>VU22J!Z49>s3q_J%`c~pDt{m$m=Rys&?<@3c) zz*x4IhU&+9JU6qs!aOuZ9M$2iH{b215c&o*9d6mo6DC^@$X`e9^KnOX^T9uS_nZ3? zi3PWslCsy9L&J=d+mSf3-4Ciixj`A(J_;cizj;oy{P}O#>f_v*B0Hz0|-q;2fxe=q%b zxJENKQ!4qbTCcqmI44T=t7gx}^q5eF=jM%@TG`PRVl#z6FR|>p!s8F1=g_R7d<$Bz z0>2AJj)fu_WiTh%|G|l(3NYDkMetr=w1Er4S!IgB8HS=)_9Gq_T0ATicL6~n*F(M* z^w4}CfP|TTa^-(0B?Hx>5d86eg#`SVF3Jyu<~o35zB9eFT}g^F$M?UL&iN7P<$*)K z(XpC82=(g{Z-i{(Ve7Q{S&BGP_c_{0Bpy~4Ap;;U*WFJp-6{S;KJ+GJ$^VrDMo zP;PflZuy)gf^bLnTki;nkN|Bs+{-BQxi{1f&Yed$puN7>@%_M<6-A$`KP5i_Vg2lF zd?+|TIRAs%1Wk!*_YhN z0PMI(Mo-{U&YK(E^nCTSx`R2Su@LEbO%Zv0PL7CZnikfPHSxYgCSYhzW~p?c)X z1&}sfA13m;h3>;zC8n)ZfHf4`eQoc7+FpokY^_{mC1HiGAeSkC^f0)|9Ziz27EiU8 zm`)x#Dsgk*@)KXeBroD{I1VjWLXox zpyq#@moAgeI$N=dCL$t)^>qPoD_p>$)xCEgcbaT`m$lDgq9lhi1=%8HtI1%murXzV zcrtzxoyg2Rl1A>dfk3}yr12fd-UoH}9zXm?HaqeOi@I~7>Npvpf&kQA8pC;b#X(zp zMBM(qd9^7gYTDhL%aI>ibu~g-`*g~->eH3RR`Tpl{3-~R3HgkFusnWA{P9-(C5F&#e zt>?XJ{5ya1lc`9M9$(u1kAr^d88G;5^o_aJKlatKPF*=sH5ouZT|Xj&WTC!PX(TZA zk$Q5cM{7zg)xxOE3p`-Z&M+E`SBawM9QX0S@7tFbme%spmJUIEYABHrnVriEnWIVu z@~d=Hzt)`YDTpY*hO3KU+N6#;7UWco>QQhurcLeT-HD8U6x1jAeR+XiLm+L^R0In$ zv)*qbi<^R80KrFn_laQ$^4q&`#@mkB%&^-ucw$F&2k z9${pqHRJ!KT}5b2eS_gaoCKJh>N}BkOp&wfjo{-+#}(-_S7#Zyyx;yZ&~|`TtC(bDa^G$n~&TqV+osUvb4w zqR0_M=DYo3Ot1#|7R`RrL7@ja-49Nn14!*lO<>LJK{wr}rVb#rPemO->cB|v3n~Xa gYF}zX8flC~ri|!;EJm=^H3UgX$X-bk(|q)Q0J*kwxc~qF literal 0 HcmV?d00001 diff --git a/docs/img/fig_geometry.pdf b/docs/img/fig_geometry.pdf new file mode 100755 index 0000000000000000000000000000000000000000..077c92a94cf7aecf14487c74062cc400b54ae652 GIT binary patch literal 42566 zcma&M19)9sw+6b?7&~Ze+qP{dZPPeAwynmt8rwD+H)`0}w(Z>X`~L5o|D1F0xo1Cn zt!J(=-+`&I<``>|D~U$Sl=ls zj1p#+jv#wR2}^xPkQm6&#t0-J0O#On57M`Sb4@?dmX9Xmw(i)g3b`Vf%CUiELU09J z>RO#$@z2#X8iRh_oA>JoC=2uF?eBit$dfjl<2k+qua}*Mw~prr z7tE)jx3@c=@BTgPdkKx55D6(09&rv1zGa2E8DN z7V^cGZ*slhqcwB`Rk&*E&X6?Aai-J4O~7zw>Qc{=Zqh69tGA(JSa;k-mfZ-E`_G_4%>>@>&|`%MlI@6w746f1m*--y)=ADT7c*RYyD?9&JY$i(j%4hO; zy0%^I4=CP`-07~|-3Dk?F?-Y>UjgEKKYB#MB%q{6TJI~Ci;Hw(C=+Tj>xQLzg&r`O zUCYjP{`{)6cF^dqgG+N!!7LQrKVIW>9y4FiWQ3*4EDkm=J%CO35*cQAbSj{sfyZi#^%kmJ5q+~ zG1xa3)3879nO9p%r(#%fg}An!0RbhM<};h+{JJyiY^LoH$va2FG z)h!jzZBHTRjI2-Kp&QWj9g;Cs16JCh)jbw+N}&_;9f@M_L+Q8|T}@F+`jzj!ANET; z+|HWL_7M|Z#anU2C^|3bpbA||CUJSe);x$*FAl$hq>qNX_H;&*7Y!FI^uu$#A8Ap4yu!lh|wFs<;8}cT#&zns|`}Qi6YY zzVv%mh?CJxIYPw&6>z^K4C3h{0I#1aFMRgG7YZTcEz=_PesxoeA%#_aW?Wz{=}O3B zwa2)p8MmOB@*?O#hrFZ;EBABhM)n>qi^?hGkIBgcU$quRIh-up#hBeVaIq*@l;>ke z|KM9U3Qsm-&1EpbCo*q&KI1i8Q!nJV=oF=pdu6NT%V{-jDwbnw@nMOfz4qyH;N-@-(})o(7&X3l(^j+#X4i)0pJ&`v zC~)~I;Ip>6tTFE4KFm#E~8aG|)pDl&H z-tXL9PTKTK-uC6M?8exg?Qa@OJ1(NsFO15W4RE;-PAtp77H}LyKtAF(AmhRDvei$O zqtmrpI09BM@%zII>fO9>WqFGt5XV7AgWryo5Y=)*P|t(>zW@`=z)pm4i=xCib-HaO zZ-SfSej~3@lj?mGUCzH?V~?85L3;Dsl(vEJ-Uj))Z@frWzx-Vh{6?y)2BRF-|a9p@j!=*(XZ zxtEYDvYF3MMvzFlL;rDgWjn#k%y|klL>EQwc8-muIuk;n|5=X5&v=lliuRo0P_y1q zz{<`h0pc(spE2MipndINT5<6f^v$^z*z((0UZcF$L93Uci6>?RH#p1MMd#g#U0j~+ z-Bt0I$h|JnWB7`(k=6mRNeJ%cdi=Y%^Bfu(i0hIwlTlG532UuUQN4EGZ|UdWO5V6~K| zxVCNj>x4#rREOmw(xg}y!+Q-ZAa1oDvo0HG|3n?!xP@Hpuo*#W`u6PVeSf)Zv*cj$ zKq6k49Yyxh&zO`Kd;*pl3C{0|nBZ8Ew|CC-8XcmkN3Z(3cHp2CfsW~SbFFAn23SVR zt;pBV%NTxnyiirkP0<04c?%7RNNF16%C(AyLyVioZt#sw?W!Si4O=iocw2aFQA&hg z3jWaW6E!WX+g<&P_JtWyh_* z=4tzNSUd|Dv*PdP!C^|gY~WtsGOe_QHx2aCR0Z#uUi?0?#ETH8mY_ySd-27erfLK6 zYfZk0@-kZK5H=!HewJp@;~!eZk6%jrbO3oMbe8_?+;+A3jXWy3vopIh)l@P0?Kyem ztdsb{ZupGD=jC>7N$|OhT-I{!^*P$(zR1R><9Vd1iQZl!+|yiNszTngNFJ`;q}=D4 z1B3NdN+E183X4ODYa60o`m8=ws;pe^B_B(Rok0t7k1&7xE=>-;KW#09>TE{R9Yp8M z_(rwGAD$XvcA!iBZG7Zu_6yQejq-@Uhf?Y_61rPD57^gx{vMdWB6w=!HSQ4Bd99=vHt@i$=Oa6`ZRY+R7w z3a(w!rc3=Mt~hP$@SXysU!qlhdPLi%;=>1>j<4$Eq&?~@DthFzoSCQV@qhI1EA*wS zFku&lLU)Pbrwv>nEO>*fG9AVLUX|JSDOKB^o>KBhNAIadx~lkB!#%_*wVKT!mD0h6 zg%kW(nnt-}SQ|ezt{V}ELcK{JNzV`A$j3G|n6wM%%UX~&BP+%_3NGbxu4d~pb5B%U zRlfVwASM=>1u#*=T)1`7daw&(a}=h`eD7?^qBKqFnI0~>FVp?d9fkLt9kjgr8?NSa zT%@LkmFM9}#x(5OL)P&2_O!}SCz1>GWyJO#s3m-{$(>_A-u=a;FxuPB72}~pxp6SV z**w(G9{hBwMBfyuSp~RR#)!sqN#T47gO2)^F ztu6VdgKIp%nT&DQs}F-&Wc2UmauX$JrNG65YbYE=(bg#duNc|N!EQ4Oq?d^3pd-Y} zZWI(qtDF+yD#;?kFE@J;!0kEZzfydt7gJ@na7@RFg7sPAJYc8ZVtJn9meL#zNmSYM zn<29sz=_M3FhIkxeEz6l36AbQIVD?=PrE=*tWz74A3%;fOGeXFmy`H1FvJ)%6%Xk0Qio1NCqTm*(?mXdso7e=xcXkdsj znTYM61J}%Qq)CS!tLT)a0B1^6XKV|R_9aOYoPt0^7k=VeyiL;y^hM@IU-)7J!|`&j zg)m`+e&Oc4M9ChNvnD)AgHBnuF&lqy=tLB|9uM?8vu`_zqx`aq%We03;biRFIA15D zt`EL$r$daC#$fB@Bpy$~%PB&^)~L3dN=zcpZN&VOfMEssp1?TGmGS4YwYyVqP@EXB9Me7?Y6_jh4NQ2 zQn4Z5=Q{nBWvsz7f|@Wzh&7(MnYAFNw}%y5{M*jPy>FG+MSAnHCvM*tY!`bLFVJkg zu`0xPWP~mcesiem`Sh1hp>mAOKdg=EyAMAovTCu(KF>7?J{$a2sp5k$O0DAW3MP+6 z^M>Tn`cl)EM_6$V!L!?XCdpO#vR**w>|<1(^2n@VmPkif>Z_2+jLuK*Gtfg5ZP=a5 z%BdNgGoyACN&3j572Z$Gni&g0qX(PJb~{0gS<6v9jI^_z)$7p2D=0Ab)5Gog$n7h(m}55n7)NepGSFi*Ld;m|o7(qD zJ!UpyH)O5hk9Qr8T;HY9GAM^nt~9CUq#=k-*!t%m?N0HuC>0zA(Wyxi5=)WEdO@(% zOs-2I9I5QZ;+sFiVl}~@&Xwl8KJz8!f4P;qF-q_TMs*?Dl!(@`%e{=#y(o$6`of$Gm9-IGh&3;~LmKl>VtH3kVA`!jF>Zw5rt zL(P^$ve)ZE$M#jf5(G|Qis6bJekQk`SaZnwO0Ce%Do5gsLAO9m#R4ZMJgFp#n`=;` zKRF@qIB_;)g~G=Q!SU_|qH96FbEHF@W$75NcgpJ4-uDrn?YKKTXH~m4<4?3<*@SL- zT<3IlfxuMN!8Yba)@G_y$!zleUHZ8Yi)cQo(L2%&riRFtzWfh7Qd6;=!(X~v0?IvW z>)i;jkiPfV2fK=>V_S~>N+=Q=vuIjP#Q5^MOL)D7pt z<0|ZdkBsx{lF{HX#Vn5$**_>G69X58FM3$8{(d`LZfsC~r0;D?> zC|1sLExrRCn$G^~d~;hg&+A>FG2Cps9p|O%ne#) z#F+0$So#I@zID0k#GZ?|hz_i3c&h(_3U z)2%Ex4iiNQXZf856F!5TR^W5g%jM^qasoasJxVr+J_H^l2-KlSNE$uS${F!!wCIz| z?eKF-qm2j_gZ<;Ikcy{=px7%y^C^5|es3F{CObrR_HmoBrB-8U2#$@e&qaO7mf=v$ z@!Q5g^jvSnsr0XUNP7yrsd5Xa^T9SxU@KEM)o+(0CtqDXJ$K$Cw~_B>goM#wllxc^ z7edH?CPol^Cwv*A=ZwXsbSuiEL{Hj@?qvo?#0(ExUDz;GNJN*8z((nCs;1;VcNDh& zIWKV;mC?hD)IaGPt92dHF=6gB8F7rS`B)lFp41OZQHb#KluD~iuP4D33U7iUw<5ep zxFGkc^n2eV5=saCtNseVwJgbHUe{BtH?P~y`P@n6^?s_?!^6@2gXc3r=Xp)|E{f$R z?7dji3>@m5HbZoF(%|&q0MK^RP0r^Jo10wjbmzX>mdK82MdGg12QvLArju?!K{i|! zsDUHCo=8?if*B|^u56%5rph-zLq#Vtt+vNl%K>yT)@gs}yjY~myTMMn638roEniZm zLOP@~h4&(>vU1$^_7T&Cmjlas>_)h`?IOD*SpKTeY=1tTfBk;Lmu zog@6tiGznz13>P~Q;|~YCwR~hWa#P&gLV<@m%{XLW1^)kt0&N(SrPX>6Du4A2q$8F zGYQX~Z}rLj>3UIpyjzPq9=I9o+^SLP*JSsrZ+!ngpEmRsJ6d;Rz+`^&7c6v!N|1o_ zZw~K1OX4003H^TiR7d2KJ<3`%C-N$!&6s35aqN0xkfs;gdNp^=6z+5m!wL<%!{fzX ztj_A|m(^Y9*z=lXtZQk!DX^pA_*Q#NFQ^m3nws(Y^(zKgW-DWpO};SX7ERi9DY!Sa zs27Zs0E)DtME8;dI6=;M3`PbPS(!y`j@@UG#1HMn?J1H{@a287yp_Yc&!k+LjOuxg$P8U zHuicn#ZwY`K^^Q|RPA5f2{4#@z+)#gmao^EB(CNoVOmGb^CoRvVObDAJ{3+`Lp}Y- zusAe++PZyXdgvK}16dpWE0_9S`jdWz`;&G3Q($LfWByb97m@9MBxl+GBxnCK6RRW( z4uJ*$zyJWO>LJou;0%4V007|6^-cmi-1y?5AXNUbtoVJfG934Cam_JInL0HP}9U8ID-<~ z&Y=s1H-fkGZJ4P2(e)9xKDO%{+r1KMMOau;%z|E?_(Vz!7W+MsguEP^9i@Z_6O2Sp zXKCP|#Ex)WEt{F}ml}BCB4nb|MkIKK$gLG_R+I777Kgp(r;N_dad0qalhd>Gr)2*vsD z06+Gw-ixD|A@PF1tGK5bTF$+4ods}rZrVPjpT6&v_kmux(bl;9)f;`453cz|5nmy7bC8dWH6P*iNgL)2$2_&k@l)q{VS|j#RKLD#Y#pGXylmG%~ zLs}m#a0!*$lYSCSf}d^~AQ3^Ac|&aUDT7PZXOuj$62xG^S=dz9pxWX&K>5yg>#do@ zZ@^suuwnBL75QUqqoI9C#NOY+dHg~oxwxhFB#yVL+dcu7zG8vI-~e_%?%^N0FTXFg z_88YZO5a|t-->~7e1JtM=vCO|s!ZOi%k&@7wN9t-r?-IqZO%ia*V_^|-A@-+E6IWj zSBDm70&9ZdFZ#j~MnW0g?%CC2D+6ZW+hK)7S#CR+YpM4K;ZL2HuC+I*P3CX1amS-w z5mLPPN6U6{SlL!1l=Du`Xs15b%a0Q#1hR3#dkR0=4USk(yy*?W4S^0Z832M_BTUr~^;VtI=)*sCkDNK6=D*uzf4*M9l$5aemERO~IV4;To|YOQ-On=Us&l z(q7PT56MN8q^Y+LBQ+PG-Ob~+_CSf&`TtIthqFC=wQ0<=rMi8Z2rNBT#pPsy2rPIP~ z((t1xxAz9TI@;X~$O}Mn#cWRj`^k8`i zVyuT+lnZ^|IfiSkxxEgaQto|^2t#ECzb;>@JzVbyT?%#~T=Ow2R=ij|M!@Ne2uPC- zfl93O6)6_z?`ws2@2ca{s`>dA6<tegVlhNa`_sbCy2?F$?B*^nRzYgwPMZaDUGM0NB^YegPh)082vMXDPLr=&xl13;91wgjFK0hY! zRndW!2`Y06&8qd$kN2*%DXUycw3BV*IDr8g(YGK?aeRSaJ^!K;0*9!uXTFeGA>g z!fpYs&n!O?<~O!$Q{}5cE4g3S^QWBHd?$<7URB}3#nfv1V8)Gdp_W!~S>ww_rSm)= zVt#g!K8p(`Ev3vC3i=6o?UOuzzK|0$BQ96>&n-5Uk_9k`Lf<_Ip4Kzhz(`O{tQ2nT z6;eflZF5{({9Hv|MmRxTK8OtMnb62%0l04wxu-Nn8Nl_wS(VlaVVLY6Gl1g?z>+k5 zbu=XLVP(r(x>kC@Hs*_%o$kmHuI6z~3thi%N~cTQCWgt*`53aBqpwLlz1K-ue#ox+KnkE6Wig#CqxRZ%82uy8~3K z#?(?^TnJbb-`8x4AeO(EUPhg%Qk_uhoewC+LRc$Y41PImE)>~x8#1vF5~scY8}U6Y zvL`-KT?{o)F_xusy#e}z(2X7pWQ1kV78E&Efb?&F9jSK$X^J-NDqB`&Ib*^HY0XrN znH7}ITyi6KR}SxjZ^RLL(|}`mP$a%>S=|&ixF!vHk##;V+B640R!>H0T_a}5m5~>H zV!;S_DyN7=XRHFXC~t+hLr%LZO#p3kHdH*9ijF805u>uv2KQH8LwxQOuOPU$LbZJ< zS0;S`&ylWAw}xTY^o6V4iM#qWz|FC)?A1IX-+H|srZp+x8*`CM0f19Tg2WQitv<&$ zXt6a_X9w5~q1Gv`2h7$V_^v_5{IJH>(A08K$wtcP5d;_p5EO8>3^wfdGbSJ*+_H~18pcBR0Yrc%yUC+`(%PQr z;*jwojcd+CGNv591~is($R~W+w26B|A?+G(XnU&)X@-DSApywqt#$WF zGeAqw@2)Zpt_?WR?Ng}tv=O)7hkHBQ6H`{-)(~J*NUDM_t#?V=cMI^d?{5^TDk*Mw zUrF5&lo*m{;*GETi3o^f?fEXsmbGW0z-;oj-0+sEud#gTE=rfx+<(;$sSzjU#W>BJ zS;vGT!!K{G3vtz8GsK6Hor~yK_1I|ByKts#Q)1rQ**@+4wAWh$U<(T&;W=nJbBixL zRtvsBjgDC*4|=jJ7&G5WWy#rbmzNb|szsHL6rMH-%A5FHjk1Q~~LaAFBI`w@Sj z9xOY~GK!VrzFUlnfKS11-q6LN@35=V(J}TZg&zlHJwN{0>eq1F)C@0}S|?@-tFN@M z=|W;us1gDei?2TU!D1rA37BIPqbna2Ro7{yqgtnA3X`v!u+c4jnQy4lyf1E7lKU53 z6$<>I80ZOl?+q`w=?f*;czLeUHQst`B25zp7?Ba$MqDAd%4V`2n;r^6-ix2_v99Ob}f7J>nqIyr+iV zF-*24lCLY`*oRPM+%HS&HTV#@NNm1LCz;~jT;3f#EyeY=I`*CJJG<#Kv>uC|V4KJ| z2wiX_p@te{O%Yp|w^n?uR@YrYsBKAjUwlrmaJrzcuK~;i7$JKJ(Iyoi#>+i(0mddm9^pz`YDwI#>;pTX1rZ*=v42JF7QAa-2Lmv&^o z$CUtSaRsqT5k3F^8ndE`EBp9D004ikzk*NlYCbZFv|OU0hrkTv!=w>`fS%xw*L+ z|EeYVmntK}|G?Qg*<1dFGBRWYS%UtA>kf>}49txGVELh4_TNpiwFf!83;ZiL z@PC!@?>zr5%f`m$KaBpA!c&`;k0H8+!p& zki7}WjY#ahk(M^je2o9EpubuFVtHQ$^zUs039v9RbI>z!(lfECv9RzkGxM-A{fqm* z;`odVdH$1unT4K-OYI%a_Rhe~$M{d>-x@$h@5Wi1&>K0~|JC6FN@}c9%*^tvT&n+) z{XY=@mC?}6&=TYz!1mwJ|04g5H33YAG+e^W~!2Fe}`{Td<`60X)96*kLI?UmZ zr&{@<<*>y?}9Y0KbaY~SjAS#f=mgrvf;p26L8Do71XZl|L`;c zmy#tW1QPd6Fc0NYT`DoP(rM}l_>Cd@Asj;-B_!!uiw>%FWp+_}g~jMQB_qV|qQ<5K z7HjpgvYA8A89o!f^4Y4&%F^8vXR}|zR8(YZTp~ zc_}FE%4OPWoNqRo;V|*JU)`qY&%`#D>ULZ3(}9tXV(QPpbs?H z*WEB(8yz;ruuV-8G)*SpI;1}xX89%3$g%S4(406~&k{nf7kz5^D0AU{TDGCxZdQsCvlGq!8 zJ{&Z)LiXU?!`k`?m?lz2T|{cDn8GnTMQF1q8xc{fQQ)kNn+Tq7;9OwtO;f5`zW1mep;`*Jdqu0@8X7H^%U22+!tyYU2CFBQ^Lp zPxGZA77Y}Y+_x|`(p^E}^G2$frW}G#nx*H0E_S|1<>N3*m^JNnf)o(Vy`FiZH<*b* zb)oh{&8<91cg7#YAlYEp?xp>LdzyRr;qg)osNA{aw({dATzfw{1$p_u6j8SiE&&LC zU4ZJDAYRCGiTP^fmo|kmV7tLWn9n5}hXuEK3iK%2Y3ugEGo_r+LQMF72RO>!Er*#uzsGzE*#~t&m(mGP-GZdo zds&i&XA`SY9>#;oK17jiol;r*vZ&7P#3$?{=yLyP)7)yqWe@66n@ZbFIuw}@FDjnI zM?3am#_%fO21)w$rCr&&tlI87@$?Mp5(!}tYFH7FEgK!*$yK!4$<4BYJwr)5K8EE6 z7pORvYU|%}@h~YrJS2<3o?FN$p_^R_U|L!al)_;}si6!#(M_R#;zvrS8IGTgFEnqO z{nbR;A#7kaiv7vH=L5MI&0^@UD$>KqP2q#{W9zPaJZP|qxh-13A>0xfj-dlG6)ms^ z{wJ|3LZJ+Kx}%SM%!}Vw3W_0HrwIa54r`Lwd+32tr;NCJt7FPkU2wk^`$c(2)`$<5 z*2)U3YM(!(I|uSy*@<|iHk&sE^&l9#NARQAvf`|g?-Rcb4N=(#Z|%plv4su?T0^Bn zdHL?t(B$!TXN2aa^^Qt?axlGy*37f&W$K4mUNj5K7_1^KhRdJb=u1k@WzW3!@P|qI zT$hXukNs&~G?Duz_=tuwYQo(|p#FxUM+04bpC!(eT~>K_`EV0bEQ}c})rhwLZfu!y zANz-uz{s1-QvfWci_<3Q2WxkIaIgCKCWy>_zhOlha0%5A1A8*fjw*e2po!Dj)5jUn zwXTC_a3xu2nCbB}Ml?tOpqCT?`2Y5%@bBK#(8k*F&sU9iXL|BgD?YCo-Fv*swkDO& z2tIcBi<_{KnWxCi#_|iKB1@>#mt_k%I?X zz-x~LpZNaX4D-H@5{FU;ZBmjv+Zr6F zuogFv+H*{ZSMKz3*24@p*d;dJbx3<3f~`e%rOi~dSLG-w`Nv3QBBAsYW@nQ-#V?B@ z$K+de<68g9tNFL#RHmwqkc_<_!*Uo>J2T13x;xsNBO@@MkT?_$*_Dnx8J|ydz7!qo zS_}AiduwEde(W#fuC?;WY|!vsk+H6ee33KX7%Nd+=#S%8nP^V{njT*F4;Z;gZE(2e z+u4|>Pn&&qLj4UPmfiM?`6(l04(0jCY-Xt~Qv;u^&4Tju#3Z$PpT=l%O?}XR4w&w^ zqOWW53W-zaqbo4yjSlTq)o%n@x;(#heQL}wdL*f=CJcget2VK%DK)=(=A?Y_RC?yV zRl#xCJQcQ^yI2XkpqiJ&8O-698oUl|@kAGk2+0=(8^79>;ri^D`Ix0E;NvvMW^1m0 z)?9rYD18{f&MLkB&`|-X#ee_ksAHq4S6eX=n#$_PQaNte8oF7}GEw_R1kmh8mpo!S zZ&SsxpcmJ?FsRW)TaV$+XO*J3jF-&vi3ylIs5V>#4*XJiVNR3_A%sVyA#xfsDkZup z)-?{0sn3o$M&ro=S;)a!i*=2wBwrrZOT%G$hE{~py9kj zw?%rv)B!$Q4RC`y_< z4=nQJ!UAL4!}3hrx{{Q*+bUZz+K;c!#e znf?NWenm3(T4;uhKlp95kp~55C3J#oRPV6?>o~yg7(ZI;=Tb@;&R3dvx?~)Uaefr8 zoV?vn`0DDtwksn})9$@dyZRamShoa3Z3nFM>69`sm~zUEsu#(X-?$W}4fZXL>_t{ZSW$r=hD+PG7>X|~iP zU9Y+tKD*%ZU3Xg3!q-=Q&(Lk(W-(ycc|kN0nx#lQG?d%Yx$I(VfY+F+jdxY}bjXIg z5Lc?`BJS|UP*1YH&5#l%kc}FMv6c?`d2+Qt$LV)aPWTJ<;8bYXXa{9ke)#X3q5}2H z+oFQ$d+J*}T|i>fAT7?+yu(=m7hJkh%)Dy}e9rT>@2Me*<+o-A2K8z;1R3f%uwu(f z{fzNa$e3Xj$sCP1Z9Q5)>t;hYV z;HN{KUH7dCZ-N)Pez&m+&lZw5=%M1{viGUC2N z^x7HAvW>(265gvPcXosP&J=ex_Rdc@hf);OX zA;$2WmhGCz1IDmnkiEyZ^#a2d2lL93rh1Pgh!gPpQlV>kRY92B&|8c2JNY6*H zm)x~zEy8w(B+lBcbu^V%R-3p5(UrTYx7VjE8XH^6lDdKRGl7Jv5>y{ASfXedFP12* zA8D_|M~^vU=@aUF#bqrDwRT^*sI?`SL0Z7Nj+D>vOIRno(b=k2#dHL1EdoJ<3s@p> z-pL4NCPstq$p{A~hA^~?ZAOD!xT4$Imxh_qSuSZs6?M${9l!akvSM@YQj0#fxKe~& zAIVIAbzsiObos%ZZ|WL>!8jAWiB60}SO={;A1-n!sdst6`4pLdS<{+)2$p@UP1?`O z1xt&ZsyBJd+3MIRP&UKrcR5{-W12_F-vwO~nn>O+T$Vrbdn8)-2R`=k6={TYm}Wn;`vYdnBbFKLw;o+4!Yds0qxE zY8xZt&Es&2btzpA%MFsykIDXq!_;U!8>Q}=lF0P!1(I7IjqI8aFDX1aW-+q?8?IMO3f<6RN6HmEjCovHi^;a3A5EPfB6mlRJ2`r1UgI{og1=6?{&!c|5Gg1=}Z&Vsh zs&3A1>pApi7szVNE&GYSZsazwZD$Tg3>B>^2j_Yg0YVT6rN5_ChOwqlB3>Pdih%nK zsQXSLXZihiFDyrWKs`inBt>%(VgNk&M~#S!6;)Mavrj9T1jAbzSx^d4N9V;uxp18A z4(I!?#_tpv1ku73U+SL|Y_talCoWahP@<>G#zox?% z_&!ceWpig7^;dRcmNG+_J3%AO{myyBk6G^4{>e615TR+vxQw^817@*K`)nRvU>g4u zK3?S)qrL^)s%}u!hm=nhPxd|C`0(uU`~H$xkE;m0=s}_Whltw%%Z`6{?OYeAYe~t2&h4-nhC`u><>pnkEhV#GSvsG zS`Fa_-vO8OS{X3a>|qp$k3_|2E8&t`i@82mvs5ODoHmskOb9}U`s=L{F7EB% zYg>$0Y2B$4Q->tWNgZMkx;(i^F}CYDPoUkf zGXJACM+7n1uBXL)2a=Zir*yQ~mlqWC(4mX6?;1$n>Kn%C8s$WCv1{F6KHZPLRwWk8 zRh{c6X9T{X{%WESo-IHWuP|`*pO$vtz$SI-kPb#sdh3^vExsE3D161y*HY98aPX~| zOK3KP$QVPyX*9r&Uwwx5y`$U+-3(M-ST{s;S(*I~r`IDmna*0?X>76xLqLpd*=1xl zY)f23Z0Etmj93`*JU6L%hu~BBkruoqI;NZWzr}lh4xKqTy1i!$|H_?d zy?=~q{P^Qc6^6$C)aYgYd*zC2_$?YFoTCHc7{weUy)p)_G#Zt8$C&K(W&4_29INPI za4mUNx@+AKg^=@TDvF7~7?MeO`@;dTY=sAn%+ITalX1SKT(?2eB%8V0Cznp{xm!if zwT=zDUm~qxuAe|`$qLkW)b#YoV_JEi!xQk z!Sd*uzY6Q~k?c#EE^m#7j339s780T$7Ua<-bh@U|q94>Yf?1plP6PO8LyVxm@a5CJ zRX3tihbJDLI=3xY3v3x2J&x>5sZVQcRPWHz#L`q!Ihx7X)lJ=){8&0SR*NfnB(5q^ z(E;JoP>70>ar4ZGOnm&*_4Rth42cEg{_513ajbo}gp$%55HjRlnDSJ~5?$Cf%*O&^ zC5ex{>TtI4D}HTx$-APBm0y8(^H2)(M~@$!UM}R@Ygb2ybgZUz0w33Eeyr>6;2rvb zDyO1f9Zb0(8W%*r$%p#^c<+hHh36j4u(p^{<5)Y>#k)yHW)hzxxk{>u4)alU!1Z&OK{(p_3(VK* zM*fky594Vb?)W`CSNfwDcJ#3A(TyHg5_@$aKl5dd%*_rOQhc9|=WaD(Nc3}7&S$eS zM0}}5Sc)r|bx-tWHUh`|xn0g3?wLdUM>MU0W`c!j4}G?Zn~#v6!g9qXqJYw6u#>8*erID>j?|u)gvn(U&HDif;DKmwZlX0w?zALa-8u1 z9Ot!y)i-c<8?5@*2GVH?w)$vk(Gn{&m^V+}e4N6O1d;1@QrIq6nWrQtwYMb0W&b45 zL{L&%wJ7bEsWY3CF;Wk|<>p-M86NF~xNQ`aSTFDLvV3rqpMvWSuEQi5I@ce$Tmq5- zjKADR8nS%ON(380`OFjOr$iO&mQlF_*T|cwRpvZY9tgh~_z%sc_~3#10F)b74p)J~?J1 zC#dltd67Q#y%5zfA1wBE+BTQANqt^WyoavTuPv~Jm1eK2E161=kwvw`r%9|3G>`k1 zJlyco`S!Z<7<&;jmbhG_=9U6|)&+KYyAV`#%WbB%*VsQpi%wV+!QDH(9xpe;Qrg3f z3+M7V4EhEKNk*9^iI;l8SMF#)>X`y1O&5W<8PFjWfPpEt|0qu4=)@f0zpyDza{Xuk zGpb7_AovwA3p1^#iZN(nGjR7{2NK0S)ot;P0cvulDX|!6zhcp1NdJ3?80Hx0HH-nA zjTBiSpAMIgpU|C&l=7)K5k3U54@(s%-SM9#eo-G}W;o<#ju26c z8y!H6I)Gc2-j2x>qb{8#OC`ll<*uR)H`>3y|4Qorqa&!&%vN zYZy?4?I;AVFsUq*qR;@Zi|0*RQGZBIi8G~>%e~xBTjcDiK7;7p_$8+3`C2T3fpQeJ zy|YD{J%Xq#A!m88B@tCk#hLv%7WqW6Fr3OxSXE*RY;EICw34+?X+RBA_b1<8EH~oX zv$$xB;m6BL0svB!LmD;>%86#~m~zuw3u;!XkJITyQHE9)7W9j)J6?X?(Kg#Y>clkm z2K7d{>KH$5s{H|F>b0l;PRyVk!fim|QOkF@;GxGC*@HWi^_Nhd&GGcD&U?CNSS^=u zEVam76x#5FWBe;-41wO^mcS{Dkj=Bg3DltIu@6}h;1p3dq9efQ+wqcvgKz^cZdc&U zre)b{sSFGX4-fB~{cNN*)s3b(^^(Ypgw^ME!8JKA>1X#yzOC8i&TEc}J~6Hoxh!^O zIlJxtO*%=5_~*T9v+EHI#kn$83q#vd4g!>Di>+&_siw%10hDwfyzbw40~73{M?b6& zGlcPiH|!srGP)Cp=GyQ$23URa`K@8W_dIlhmj$mMKKGO5kh!1I4v_6zuwiy%rTYou z=Usis>)!vYGIQlOP57COH3+Dfca`IjTYott(u9?A%LFS-;LVVWO%5XeA=aKIez|PS z&1IRCgH4cd#6BckMRcuDFGpmd$sO-h#D7ENpCjagvBa0x9WeG)+Xz8BbJS32+^teSyIw! zhC3oj&bl$c!AQzP7kYb*__av0?TN=s_SZt_oOUYwwXRd8!OZnGf z27eYt`5R#?ODlV;4_0sgKggl|ot%xO;Rlnq)zP;3A0&Sb#{bvu+F4qf{m)W=*Uw*4 zR@xT&X8*^%{W`S&d2fH$M*}m<59IvEk^LPECH$Ks)7I7f82IoH)Jv&rZ)5Y%NBck@ z6aeJc5RXs4!VG@3|Jx<{1$OQKbfW*RJMbS`#rfs9$%Q2ZD9u0Ex`Dp!Ke$3^Os7RR z);Iri_uxQRtz%;zVb_8h!UK#S(1hH8J{@wW%anyP1j{tWr8d=_Z|#^=vu`J6*4~Ms z{mHFYhzg%f){#z3?yTJ?*B3nM3sZTfd2pP;gMUIzvzID|6SayE2+Ki60?BV4Il|>Kyv0AlKIerEkS1CL}zN34q3+GK6v`LVF4JmYK?I=D8 z%-2noGwW9?h>~Gw-)&cU5xT11=+i%E$E+bpX6`qILiVjNf@dEogSBhCQXE+jJ{@A- zcH^Bb>_X(_DZBZ@fJj4lz-Loufu|VYxxZI5h?RAuHx72!yi(An8Kft(=SZ_yA{L!725x+hE5{>``j^-BE~{`BdW z*ZAGc{zloKecgZ7^UK@)ZaIIa=P$nRKWqBsS$?;*ztQyD7yf50e?2ZZs((L8fA)?4 zSubum|JNV^Ge>cGoJ%8(2r9?k}`Sr2yA1{WFL44^nzn1 zYC0-!2i1K9sku0-rOwX2H{Ls^I~!;& z_~Q?<#en)!nE`rzrxxqd`L@VQ)-yrl1A79qSx^2(z({0Zh_fuWZy0|K` zhd~4uzyO_47@U^#Q@&Aux-k6&GhIPkP?CB%@A|zR0Be@9meI0c%De_(0K=@@)Z)|_ z2Fk%4gz5}v!qWKC88zkRne5XYazS>*XV4S@$Dsv{kW*xGRPR;Uo^{FYJB2w52s$U- zQ2_S3N=`hB3x|46t_WE(&QSB*(dN$j`}mvIg*RNTmOAzcWMDcttm|*v^^Olqnw9ET z-?-wxW-|Yv^M-mxf;Le~0n5sXs%2q+Ie!u2jqF(miX_Y9#^Ktrd}tdzlJ?wj??Z*) zd*gZc?&_amd~!y@@H5r^4!JQ6rGUeOd(%PThT-FwiFnr$wnJpsrC$McDSQRK1ZheY zb20%L!Ak*+L)r0ag-`AEMP~z<2?DV@EDBYK3<(Q#C=*EHt0ee4x^sqHG(%=2;BEy> zIQV(@FkcR$P>_rv9ys1aXN*-cg+R0UcPM@3NXPl zuVZaWHc^XAjwSwkP)X5ZTt_}DxS1S3x&mNM5&JHK2B6o69|yu@&Y>;Dc)^b09&}I6 zzWQ5YL_Z#-0Mrjg%Qu*vKCk(X5135xt=6a*A>vmuM}KDyW{~t_Q}P{$QG&R&`mE*$ z-3;sTt-GyhyYW$*=$#)a@Tj%+P{R6EVa0U$RAntw)%oLxBFe5WhcvQfEVH zxXU4ii;5Mfsnv-)PT=mAO{J`xHJreHs0Qm2=aX9x8gd{TyfY<@_oSWHh%9xMvJ?;p z+M`JWv_(HORJYIfF2k>HpFDlrb$9F@l!VbLW03U_2632kBFP9z2T-QOymJCx0@Wm+ z22OV*U~qhlMsasQuSlFz+=Mu~2DB0jy;KT(B=VE!LJX#JYmzMT-ETZxLODXm!Ef$w z^q)YU2wp*6)ha-(AH)!+eEqs`v`fE#Z}P*?4zWVoeoVdaC)#>Z?y;kmeW`5F{1|T# z_E7fFs25q5U=i;@(n{P~2oNT3>t)cG$A1kgu5=wtHbA;V;2h!>_k#P_fKWqm`bzbq zI2)5a?c%5DCdLE0Gq46Oh%b{%B3VAjU}+B-UYEWL2xe+7J1jQinT#k zzqs}q9NzCp_K3%ekSBU#wPCB_(M>A3T$!Ry`dGm!b{Q*?u|b#)iZ?t-$}+0)0a+zn zfL}Q>iIja$@k6MoN(t6OE%STUmQo@@(E>n&5AVX=iM)WZ?)K4PG{$8VBoR2;`0yT= zffJPQ!Lm5x;crnVgp_>#2b`HiS0xoLHAySP1e;AQfQptN0t24{D&Nx@bkRPMXUVBW z%5~VZp~|CZ0_Hj){n> zTdguekj-5_J%j>k7{_JcS6rn5nmZfNLpR^$r(AM+h11FEC50&yDaWi#kTYXTtzVS` zf}hfca~LGz(uW@;PwW+4B9_~g>noAa980BKo%Q9?ez!~opKjz5CcKw!jY_-wum?kg zUq`eN2Ip3UzraRT)RNzw`RJJOU|3C`!(D;k<{08DL?DlYqAO?RI4+u$YwwB&mq{^oGH*!sBS?TmGyEy}We<5WbQ}wTE;5-P7h+Z@tVnVVA(T`NY z8I3^Av2P=?t@Hk+6=iGYW_I*0^K-tymgK12dwtf!ym!N6c0J@)vjF;zgkZ;R~b?bX5)$P>FM98 znok_LsvK#;l&+~whik4LXBqA%!`an zM!7?iDpX^QVL3iFv;vpsSLRUc4et*XKd)84)X0ovn;$cJR8|2pr;S@<&o5q6+R+pX zaIw>1%3G6eEfHqDrARwAM?L0r?}C_jF73e|3S~Uq=Seo_yAT}Th#h@FxzI+`ZW32) z(wmHun^mt7W-fo5qY%q!n3|s@Wu_b!$s^Av3p~~1us0|Bkb?8m`kXVAOQ0{ z$UsZDA4Y9}OZCW}Fb1(ZrQdF(DML9r7wH&)1y{sczGqWJNEARL=T1D|P*IjuxXcoVaAzWkg~?^;jQ-V>h*Vp&ky51kd__oEF43z@jv zthYpewR&>Ep%lv4#AGKgTe911>%zUB4tYQ9gl@KGDC0R!t9bsI8=outGU&~W_E>8e zw5XsZSCMF(6qU-7t3*@-*YA`^k*FZbh@_&E5`8Rtlusw7&Uk9$(l{aN>X#yWXWHex z`tt)4rO3n3$wt#>9Z(TlM{jXaMzD4kxNTKI{!C!FiUsjhPAa1m7jrmei$2gWdDR=F zBAAMdV>B;+yO~1A2T?Nc@UL`2^US90yk}eF?v8+<<1dL*$tCDUXz4YnVF?lVa*YoY zMMjX8BVAMVRY0~7sF_1d1CTaJ@=B>FP2adLbz^USQ`KyEoRhM?Y%FMwH>!Go*c6x~ zQaMQUSm-V&N-n|5=;;}#UD<6ITr(2t3nbdZ>A~fsc{V41Z(3A+FR(Sn8-io0peEVF zcCxI+bYRft0BZWIf+}~VM?zSc_@!$3#I2I$yhw6`Z-a6ZjSR184a)GhYZ9QOsE~G^ z+sgIxUZenS37d`8h;kSa4mv_2nVREM!-~z5%Z5@V8nS^{K&kilus*jQ4y8lS~=J?l5m1Vnu02F z^?mW`)skHja0!F(s%AuS#@XOxkA%k1M4O|kyY1(yr9J!I&<)y_bWVD82m7Ze>xjIZ z`(q?2Py%$FkudY4lxYC{bJ^kQX{|=L8PzSu?C=QRPBzBAoR7nNp?PZ>HwY-N4mvi8tih0QK+4}Ydb>uWgaY7pxk zcunP+KEH^6znMx=ZOTAYh>uTqKDqW<4%(dNY;`WFljW<1WMy%d-2^}JDkeJ>`=qEW%Ih}zHZ=nk;=GLhs>cgXk*NVy+ zu09KeH&~KIkGorzNv{!gP~}l`W66Oj=B`NAH3J0|F%wfWGYe&39GM`4?3U<^s>bbR z^9i1HNB&;5pv|)>QL*|`T=})CW@@7<={upbL5Zvbn2B0p;G^&Hi9Z$j@r@j&ajW%o z)iK4xW5z+bI&QmZN`2|p#BhCR_)61mf<_giL7qb=J>nP5WvfT>5Jc32XAl8Lef`$E zg%1O6ZQVW;VBeEPw*Xv^@xM5sk19+D$HQedVl#3Xc*wadk%kEi38fRHi1WmIU3j3K z2Fi=c+ZR*esnwcE)N9SjR%z9v8q}go4%GsS2_)qHz75RH=bbYT zSRGE>hIke-YX7px`l$e8oQ+vb+p0Kz!}xXRhXh==+|7YK4C{_POREMuM3?|NuvJ#K z-RKW~LM_aQPH)BpylQC4zBIxnoI3YHH-?Y9OgcZMmr1MzFD7YE#pyG~RwJl7sys)9># z%cI17y23QeR@s+CVU4y2(#&c#f?aRjR*GY^SW;O4B*X;J4RU!auiQ@{Bq0oPC1N<- z6}QUpsHqRT^hmlZa|ok->9D6On4Mj#zKS8pL~{pXGYrcmYWJdYe%8Ssh7Qz=ON>r6 zJ`Pz#=hfBW>(!1uZOG{NJrqxmW#UiSOD$hmEB=4hsu6vWz+>@rEtA>m+>^~`Erlte-Xl^f=95h5;0x6Zpk z81XXuvMabX=HHZ^O>0EISjq7~blGp_YS|60h>4B3_PSP{P-chsx>Ghhpf4NBYOwYK^(mVtZ>yk-Bqu47-EE|f zFx{*2j1Dm73KVW)b#D=Ah!U7JuLH%sj$1zJPJ8>pU}7Zf0v!Mq?c!j5A>VsBvQ9dj zA|)W%0Xz6%s-8B_EhX^A9a{L~%!It+1-|oapl#?R@=|$f0O2O-UO2j`QWAtrsV3Q8 zC(|emxk_M!Lwb1A4l;;hC4ZJfaClQ}Y1wQYbUC9Xc{7}1`hF=|=+9`wC9}m!zIx_3 z{V@6ag^82wXr@YTHKuZ*)RPR>WVTNT@U$+J{VEdc**XH8U6tXzn$J1G)Nf5{@YyXE zWxwV`;B_@ufBc#=ln=d}Z!OgcZP$>a?$dk^{8;&P|FM$M&UWNZzS_Adcv8@I<_pl& z_DG0aDCzQxdw3XG_K2z+Sr^>(Em}%c8U1>`4)n9YX^qXRWKrVbJ9~d+>RVed@~de0 zcH((FFno4dy=`jG?ahZkW85|zg(qLH*qe5EzRhET z+{n&*SEbMNd!bH_Dm~h`mP^UBT&PP)5}KivOKvc7Yw|W(UjI^_jvC|KFw)%BLxe6t zyN~W!f_+3VGJ8)2HOYI)$KLhBA6t|-WvItO!n#^BxiEINWCIIOR5}7~Vc1faE+BZI z&Im$f{7WHwN^Wpg$MLJ8XffwKNaskS=0ab6CI(eGO5BNBL5BrmQzI54+*LPmVzp~A zQQ8=$RzPi}$ww#r7DD9oWhCzuq4U~hYx%w0(018~xMIUylEgr4ST4=GJgNzd(+duZ zT@11@5TeXN#qNDIlr<|`T%rXy@0UX?Bl`=WYb`b$$^}uXH`gO5rTpuwzGgsUDQeRp_LhKm3A_7 ztvK6qMhKI(gG_EH<(2@~S{yh$b!$n#6e1Qn++g@Ti&T55B(SVeR+tvi=GE^;c68hj zUWsfh|7wO>kF%sZ5*`j(b;^c`I=S_QLx2t=rE!%EXZ?h?W>gQ=s)hn1N}%nk4Fz;A zJ1*NUHp7p8xWm3^b-kqd^u6oeb_unAzs+ZVenBlO*knQEP&&#o)(?!bCYn!VlmZl2v^z@IYZX)qjG!>A7%14$)pfxV6 zv~jx{a`PMaB*iW2=3xh6HX4_yElXSpRh`S(cJ@ybI5(NZ69{tN0J)Rvi)+E(=tPXq zFK{aXm^n&X(?pufb$vJo#5Cu-(L0GAmJ-4AsL;_G%`Jvo_ei@`^ey0;OcF++>VwO!bo$@>XtSlijZy##Ft%q#FF6}d)6=Q z-3CDxkM4n1OI^`eO21*AiUw>sW1kv+=?rT3dAZqJB-;`eoOY<@L1mdxkO=kumEsm| z#rIfmYmgzSI=!C4XR~}ze6J*9;`mrb+GVzWKr9oy*)OK+wJXz#=OBZKHt6krn%v;z z?lMDl#M6TMy|MRYAL07_0pU7r+%L%0`A2{A@$%h`^;=__5pJE^dR0BY^RC|KI`0_0 zKBLK5jy%_@Fj(e&vO8_L_C>_LN67strn64xYmaQX*zT8y>ZUG0&1YUOBFz(F2feD5Z%Zg?hBgWf?d6;XPMp-L*5}%LS zLd(I<9mk5#k=i{)*BwWhH%;He7!Id3(_Ew1K!Dk9Cb+EI2*scvRJN~`*s-ejT7CKw zTZ;FV4*;CLuM1wje$s@tsZ{enhq{2Tc=qx_O?>h4!n`oCIt*?pEzxMfi>PM@&#qmA zE48=HwzuGjS!Pt499??;a=j<8jJ(gkOepm=!&(n7YY4@pT* za*xiyLW#M%>(er_sZUIqa7oi4N+Icg=?sLR0Cm;p*JSBZ$y87HT614RB`(@9T|l;*i>glrjfP zaL-BCv`SaC3fx3Rj6R@rl30#wt1YD04Zr0I(zeAV5U>q>QhGt~g9i$Vscir(CIkfS z-OjgGuj8^2S1gEd?lNgRbX&GxxW7M~KirT8l2sye1|2B80+%dn<}8$o{iKq~fmtET z+7N9uTtP6UOSx2{gzbwUkb_E0WuJ7m?;?>XJjak9q@3-FwAaX0C>3@;I!3>Qpa;Tv za(@$sW9t5al^*MZRnp|90l&GZ;jDGP7@8RiW^PyIQV#85@Y5U7J$m#rCPTQl+ z`lF5J&lN`1v*EaBn;nk&l?0M|@wn|8YL()-5_*$cl~RRl_*yyeC8j$>OvYyc4hdcT zI03!ffy%h5Xo2@c?x6TCoUgrL@--&2fHLqbv!CRTVbli*Rd=j3HX9-Bs?WU+Pu7nye91x1xfXun4|0vggE9v#alp*KD? zEvX1C$R#VG(2gspDZfGRx2iOayTh!;aL5n4N^FqLe;W!1*E1{YGQc#wt2Ee}nJASn zHsQG!FNN5p{+^_w3g6@$S5>zM%*=CEPQ=NLp(SrV3W^QSZVoR2k5;j|5Y(`R@tH!AD?mR?q=J{Hc%>-lDyo{Aj*5QA4&k%0)tV4MGf zkm(CA|$Gb0$K ztew8O0v6MU$gdrI)OpgU!TJSFWxzfV=pUI!si}S+#Xsvp(J(Om4MhDjHdT)jH|@dt z?tlIU7O~u&MUax?_g$%uY-a*0`3?jrEr%ytX9WAb&BILffZMq*$il~y4_6OhkG+K0D#O9)Z*k*clBmz!jxUmXKm zm_Q{Ckufu+p@n)>${{cpRVl(pCSm7$eV&c9}_*p@9AHc|G7>5zii;Qbz=Q8UKub-Im?d>8o!EuVg17p z{%RWkUNVgOKh+HT6Jn=hqWiP0oVvKG$<5ckFFmI^UtnN>1+svEprL*L&Mz$;kRTrf z=xE6FC3GE<7m*klkaK7rQ&V_oenL50hk-?Hv}BaLvS+`i%A!?`ZUO<;|M&eja&FdA)UR$+W!oj^p_R$hX-Q2u{>h2-{LL zpqFcr~@snGY69j`} z&F7cWcN159FCuL?4upDctKEe#hMMuLwAnf6wxWzzj^u$T@48m@nJrBsf1G|XwG#BV z;wDH^Rr#9gi8+>6bT7j3Te>WD;aHbE(ivC+V3F8rp@ZfoAa%^tm85V7AnB9cNb2ZK zTwEQ6#kcntU1kB~Xb&#rBefx{;8cwK%Cx9y9>zHg=F#^bI-_(0FKXhH)QqCpwq!w& z)yg+;tCHD7B0TZabUxzQEW#!*F(2a=eRJa zv$`t;P4R7h(u!5EFBQ3Q22&&|yvIhQ_7hMC{cg>>WOHEojWgj3nE8wg5sQ|wjIr%S z4!7V(qwyz|@<_Ijx zMD?Z@3>+BS<2k$6r9F)v<#&*k$g?s#u$;-$}j<~oL@v9R37>Yd$)sNBbvhJPO#)o@@*Ba<7szdvz z_9log`b#TfJXiJ5)h;l6g1YCLj9sw(=X79` z@2TE=#d%f0g6j!T`r!VB493=bhrLJWyF^mi3^Bw|+z?}2bF3y2WkHZhqe|&oDTcJC zd77nr_Z}uI)!p-ezmV=u)4)6~6u$?CGxilEzcd<-l{Cg@>0{EUc?-EK!W*hK$%*!< z_Q~=?eU>nJL9%%L3Z1&}*=q5UH2Z4RMUFcN2aspNJ4&`>!Yktz|M@HQ7ne6+PXXm3 zGDU$|v|6E3nfe0O*$S7|+^_4m@hRl@%8)9M%3~^Hsxc)Xr+S`GxZ{H5;k9A6VXZJh z;XQ%|f?&9v1I$$uW+Ukt4w(e&0^(=AM-R|xnf(>wABW9nM>$P;L-L9M{pL4T4abm{ zyP48?RZ+)KL0MJ2V4v8_x=_D(zX;7R_lO?W$wnP1~)fI`=a~?~dGqr{Rz?fkA4c7kZ9)EN4>pfT66weaa$8f-lG zyioDV{>brx<7_j)H`l(f2)yhi(n20dQVJw)d4cf&fK4iXT5(ZiskbdW>Fx7z*U?aR zbL{pIlICayd4;PoXlp*35Bnj%oGG+w2Z!MC1ME?|5=OfY<@lDS)_a~uCra*{x5YkG z;iyUFkkqn=*%>2I{e+H-;Gwiezxw1)$gS`e|H2w?%6v}{Pf*i=&MIe)QXSJcJB4C1 zL)ysvgJ&FvAS}$S;sfeCh&I_lZA~41yYN1vo-3kNBec*54G{=V*zIjm2 zy)i%G&P_N!!9o<*lh*TT$U}zBUS`O|?}E8tuqMYHJy72vx&S;Mb}PX-HD3v z!yV@ful}||*{>c5NqQ0fG*zYFZ{=QdC$%#15~gzqgN1aM4^LqzEgq zg=GH7Ks;l)q}W#7bZ!LEJOLB%9gn-ZjJw9AD6EWLl-9f`7cqXBQ~QvftQ9tXoQ0^fisrnMpvdsKfH__bT{6j-L4a!OuMTV`3fe}8aQj*eAnQ7%}h1~KpQ`7>J zGT-)}i3ebc{8%m?7!dsZpZY!>7M7J^$2Ua_cgPmULs(b!W0Sc17gvc$I?4xsx5IB9 zS}&TPqgRkO&@*`2+dnMH>a7(gRBTL-Bo!~F`LAjThGav$zH|7?n1Nm}i@)Xq6H@GBb zefzipY)To@eTXUp&Glu6gN8+d?R45_RTu-`mJ0 zGHr{Xb$p_9E(9H26V0tWGDSwa&7Kq(gxLwE@V&{QEdBMhKU$CgeZUs1*mqP$WMBe2 z7-3tlB1VbWn4my4)5>plrTn~jVH9{WI22wmC`QCy$Q&I08ZM-H*ad>}bWlw3pp6IR zs#WL|<{N0*OX`MiO!8Zd_;Y6c%rH`9;X$9jyfV$%+S*45mVm2%CV-(Xi#^?Xbau~n zt1ce?>Gji1ZaL7RP5*O&vq4_sY>Gf@4FrU{q?RWv~jtG3$+G3K0mM@M-Ix{h(C~@D;Xupq3<&csslM=9n z2i*#sufjbpJ3a-3_Dren?ft|%4Wh=_+}?hA2I%B zw6h?N{B)G!E|e8U-aWJ@$FZZsI0NCl2yv^562WqzV9(3DO(l^DKf23-sd3tn$ev2t z6Os@na=w4UU2Gnkah(^%?=JPzDCG$&+pc-`3QO^(V7okTmt~FgwZP72dtG$iQ%uoiotX3x%4CyB^RfTKwteM z$*kgN4S9q{d>vV~waV!Ud$lJQNIs{8XC~v;dVV{RXIS_>-@e##+zh>HTAoq0Gnye8 zhlRj5$-0XfCj4}19&dzknJq6RyQ*av+HQ2+5Xnp@5OY1BP~(!2Q|%#kF~b%A>r{VR zC-UlwMJMoD)|2Gd67|}Q^j?|$G_hfR8`W_<^3|r>p)+sVMx0i8!wd3N)4}mq&hUOE z9hoB;5iynEjMmHWseS~wD$;t3+|r~Z(GrFE)W+TrBW-CD8v@a!whAL^3@cl8+lC_y z)|9$URdEmrn$rw!SLUuZ+ekyt2aWsmG)fQ`3v}0y zzWzMsa3U=u{&|zizDf~6v|i5I_OLyjqAdG4|Ll@c+iH2J5<-qzJ2uEU?PXH|sjxlY zLu>n1?Piw_9Pf4~6R<@#>WRX#OAgGq^$G#Ge2-5vR+0ZMB%YlAE?Ghwmb51Ai&)8) z`9?{Q^;2JSo>ysXFPOMlwr)vhj|0ihB|&%=K-*N0^$V7#4$iQSXnhc}GTHllPpS?X zCru>}I5||ZQZC=HDnXr;_bxB2D)AtSxels%?HFBU+EmHwhd9ZxIYGE?N(p5UP-&4? z$>NkI9*X(pU>KDFNEu9(8BXQVnL%Xmb>GuOt@z5v1b6V;nOm>}*EqUEN z1)d|TA0O9Ug%J!{+0LM)wu$gm+(H_thM5vCkXgC~K&%%YHpcEPP|LED9fo30)?RHHG~C$OR4Pe%KG1jI$VTBo^riYtr78&e!}=+k6Bh)K?VewWyuPf^W_UP`F&aU_Kim@Y%Dkt$|Lj*Ny~4&#VI9U(`;DU552>J-x8Ka*3!X>PW0pTL9O}9WERC|U5@u1k zg!WIe57vp6O(^iK;Pg`+Rf_hJlh|ych1OM=cN(=;;gnu^yy)TcMK}c6XmLWRm2{XJ ztRUX4KCTbE>YkUv+OCt&&p)Vzm#2g4Q#qSUhM7L*FvBDvHTv#4Z68;uzCl)Lj#+%A zhf50BQFkni+sL3f2Dp}Z+tFYy zj$;NH?B4NnO|X=sV!<*dq(94bZPlHFxvrI$t!Fd;K-4NbWVRKdBYOKW{y zSi4s{xg_6uQ^f{QdUC6l+esH1d5ag@*cV_X_DoTEm#K#QwM115R@Yj2ZU5BYH|h;k7ik3UR5k5KS5*roc~3!A<_ZMGwPpn5MdU9&KzB=9BZ_mCrY|r*o6B=>2 zlDtjl&-js30$Bo``*>7BA513%0;8coC;lSk$6Ob~~Jyop-LM zRGE~PKt^8>rt2YAquj6Pavbx(vW`22A_i}7&86fT*z5u(z7BFwm{kgxJnzIj$&Bxa zF3#$>QM`!FNrVE zu=i6brN9?Y0|-(i_bD-|Dj(Nc)k^o%$QDgY{OHgVXW_Nq%`8tpLg1=5yhKTNn(UMM zNS?}>kUAH(bG?6}=Gu5#M!{C@3CpfAp^zoAE1GD|Ix)owf6TMy$R17OHps38vH<*x zP2IZ!YxlZQ+{@_JQk0iS?I0NP_MDjZrbBghi5I(1XO(qpU><@@g*($rasqkin3@vDioVdbZE2#?CHFq zs-X8HHy5UbMMg#8X1L1kjf4gMDq#@3=rg@f`sXyjw9kuyR_MU1KvSFC>NH9W0q?X= z5JTcJO$ zse!7&v_sR&?{1(mX0({ry9@tOva``^1TU)DTR|+K9V~5(tt-tooH%IuZR^Hhl(@{Vq!+i*Wk-}1u4ilwG9#d$8{k+{hkhs2Y!7l~|9OZZC&tx`#+4lwFv zVrN5`)`|d?1ujxObm;QeK6Bu025)QB|9!RcPq9vFaOK`H=@BnIN#qk@ANI{Kfb#(o z`!?dGlWPsa2l`A7DOw`T9e_seq%HwvdbeY?ia6Zl;YB%w+>*@iMvO+7Lh|w9MoAKc zciAvx%P?d!Fl4JAGp`L8U(i8_VOh%HFum5@i$$nCo}hiN+P>-Y2@F6Yn^ku*Ad_`B z1H1rDsAleSH*=+B4s$1Xk#Lc6^=9ISMoUyyhY|+0n?lj}Q3vaw)pMDUIPB-}HQ!@% zsJ|RWuQQ>E`M-ns+d5zYIdJY4gr=JL(hHZ#N?q~p7%XmpbTaz9Z&&O$AJkzML!DTZ zP*rqTjgeK%ZK6Sx=riSiAK^@uB{*U`c`K_X3s#q~XtrudVmqO@5bsV${b@m^^jeV* zIAZMdaf@KvTutUbnuWy)y!_2ZG+hG2!)W2r%T-Rld#t@l@bM?^*gZS_SUp>|gG0|_ zUjF){&KQy58t^{lwIoZ%INPBcU4F;oH5OSb^%!ht%vgXnM=U=2$}V>;cH6Y%dA?cb=jQqn}6S$!{91rm_6<=9A=QHhF~dvi7{r z$zzsC`ZE)O@)Rs?eEKBCg zjM#>zA8v|nb;cV=4ruK{b;6z#3R;RQCmAyMDKI(8(;F7OVoIYT8 zj}6NDsfTM-h_Z4?81IX*Vx7tnoP5-0m*0)d6GlapmRbT7 zr;lW23yG5ESdl4ez=_;haYNU}G`c$J?ognprZd(y5_@@zICUcj`;vTFf5XLwW?cL+ zk^X1ZNo^-aT~&9OR?Z4XZZ16*5kFjpaKKhnIMnU~#g$8%E1!V#ke2Ls+KKDmrYS~M_*dP$L=15!#rXrLjt zFi|>LD_v~mZ1hrhhJZ96N?KB4tZJ~;w>lZQUiP%*lc}WdpGECAzQUaMZ`)#DZ^>=; zMs z($@={VMXx@@{oU?_ILd2j|ztj3^a7VIpd#79R4>I`^}?%(^9@a(^4KwGrj+Xn9?(U zU{OW}ENVI?<`0NPMg9L1nSO|Vu(iKqrqsVU*k8%m9}%f+Z2tsbSTr=W{~I)oP9}_aDF;?nHQ`gN^a@~1QwqQeI(;+Yc}}PB849$eb7nS7{mpyaIn8bUmT5P!(t`=0Hq!c&JkY6 z`*=il$hYS6^=p_;Gn?IHnp=2yc2l1P6*7M(1T^_>TxEJL+U)M-Fr~Yn%I( zBZ+TA4~^@LtnVkKa%q}rwnsC}`A2JVwe!=&T}lgpXbVTGsi&a6pEw8sgxuT{6c-*X z9mn}!pnsT1^SbJa8oZFR{U883ZVnqgMxk(t_y*X66EVXOB!PHRF#qYmk{h);IkK}* zwq{{H;I&Yke65!6j_4KZu6ES?cC7FMj^|a{Y=(TS_Oz%Vcj#fhg0hB1wI_i=KVRn^ zCYZ9O-rflS{d1y~OjNyV&)tJ%-Ky6s<-`i|4z$*pE+5xS$@}|@^lF4XbE4d^7K|e& z9>9QShYNQh@36E2R1h!sBRK_P9HpSiLlUS)!4FX?@N}QYj&2Ml$i@y>+OzoniiJUl z3N#Bp!ws-Og6&Rz4erv%g7@(YEVY0dFdknYodI7mtM2IwYIYLOPtH_`Dwz;$M(1>5 zpJ>TCUhm>wYTN*CE&|;kiNfi)=;PzApdV?({M>46U>mr4tQ|X3s6g3?{bihx*Zd?m zVS=!UcRn$HLkSc#W$ad90s+#?Z2@5y=|l$4{HDN#UIN4^bfJ2{|A-$5fEwEYP%{OU zAPkelM^mWkq1=WwY(xy2$3>FUX@|y%N{`qjqvW6NsJ#gAys4;^PUssu0Vgru+9kO) zEmwHu@(A>ZTj1q#BR9Dcv8z3zK-uyck&8fDkBk}yHr_5;5=;ox7&}5HlS89ig#xWqN+FKOg!d`M#aNDsweTuWXUL?9{fNZSn_cor0 za?x4~`8?xwmA#nNfKD60=eL^7ebud?PI_mb@Y#ZQjc=|T54N3eE&k6~&5u&?OVDTqz(#V7<+x%Py8RXsh4w`u$FgUC<76raTYU7tTnOhTVk;!`KeuL$n z-Zrvpv%6woLF!1kzYl2>&>;Dc|G@YV^MXHXBj0KgX5w=MKl|A}_Q~t#ATI>3BgCuA z!ODa_vj9bO!a)D7;|}QqL@)?IS-;pmH}nEE4&&Z}kL#Ah!l&iRmCvlyMe2#Stqr1( zTO7_X+brM8dA_bgBLUW-O6CV_(NMb|bQ62^ZOyh~nakvSmwm zWGSv9Dq?V@vK1*=61wDnhWg*q{e6D_|NHrznfE>KInQ&>^E~f)Kl42EJiUp)p}CG``j_RsE5j!7<2YF0zUOS?iNsV~R#p2fLxs-qc%yvt}6C`D-of zI`hrC`!7uEqCbYO%3=gw4%KFU=+@!>_!}=&$y!Eo3{nid2p4ZgE{nIfd4%dWUq*^5 zQ3D)E0j6(jrVXSGx(u|!z2<+PPDwo~cvLJwhoA3oQDS^|yfC+LVPCP)2{ktRfv!T| z@HG8nbCR!F*b(WCgE1U$BuLl9pd}w&gf)uoT<-T}p?h7I!>-GeIv?y73$T&x?CRY2 z&H=AzWP2doex=8>btx@VI2`qGHlAZavb3Njp*=!$XMNrc)g<+6))|`hSwWxZTs8el zA6Aqt_^w^q*6>;9^+{z?r8K_M#~^iEOukJVfTXB%r+%cj^q6K%Od(J+v{ajCoJ)!FT#!}pN_YQcWH7jNB^4K983&ijSFl-HSG z5kzx3>Yn)#ZE{HWY_K2y;D-cOWqh8hOyQYsq#ZQ zcOvUPH!|nkEt2&fQsiC>C0@DIsomJ;o4FvCyAxA}WJyVJZCDP|sbX(@Yg(;Qbu?S} zamsX##xF0mr*p13aUZpmY44nrcg?Z8pSI|5z~R7gT0;9R1+Ddk$FTJ!wFj^UV`YNr zg=gxF=+Wr&A|w&E#qEl^)HdDubW-PTt6<#CkuhlI&N?0=^Ec0)ob|q0HQ5#Zff{7= zw)3Kud6yRn>vHJ}Rrj~50nvlICR)gvIvG7Zo?TwSs^&*&ZaBNn&d$k{l&<#yc+ED$ zdt99b&)`bz3+@QX)3=qJE*cFa-);qG*ZIJGnnxE2Jf2b> z#JN2k=ln9PR|nd=PLZ@*1a+B1!>?*$3 z+%QNv!PYc!6z7T6l?rJ!_&xi1RP+;;NuLXX;e$3(Mov4lu|bWVoPHyExpIR=cPB?K zBsFsi+-4PJ?fj*Y9bZLZ<935*r|?D%+N1N#61c9q>}^zTIf!EQ2~liWAUXQu0+bRd z1E2jmZoPkFe`nT_a%b`{0a;S+r8JI=)iH!?)A5!<s1$E|Wf-wGP%EyMm=Ta%zaF-d0|=_h}B3+3_)b03sy6^%v5 zn?%>8#1QmN@+tHD?j0!0Cq8Wcf-(~>cXpLu8oDR^>cXGbCU6s9vY&Z9PjgyGD@~pH z{D&Bo`1v{QuDaRo`p4p3N>}V}T{u~+r>*ny_Yis69G7D}`-1!1OL?_WqS?=PNx5gI zW28Ku2O*FcBGL!fF6t5=5JVW=qfkA66pGT(xT0{nC}G7X?VNl24(bKuN6Y-Q%bC~zNDZpK{L)FdG5Bwt=e%SA zr6np?5oJxyGP9ntJP=uzx~+3{Epw$fc}P)To+DoIs&klWxnh}~Z&*d2|IWC2r~<+^R*m}Ca)b=WS&BbhCMW3O3X&-Jc2 z?UY(7dP%w^_y)^$Pw?)s91c5Ij#;;z-2A6UMKcLJSA^~fO|~y`aMvg&j1zC|>S@nc zM`7ir;;i?Kj}4~5p7>{;vIH0QNq+pVd4n~Nkatu&`CIaS`bfj?q(XmXAWBJJ{NG-( zu2HEMV8GV6_s1>tRC7?%&;Q}Yp(b+XRS!Qy^4y)3N%oQR9}T;V;H9Li%?kVsDB4qo zd4K#i^2Dw=)C@lA!I3kgmNw*yQgOeW5v+rn1NM*?hjPM*SFG@hR?qPKmN)RL(f$al z$Fr)f+dnw?b3Gd@w3b;&T{mV)jCc5)ctWI8LgL|LX^DN1p`mPI-kq|-hG$X%5s_t! z&sd0A$|~e{?rsa^RKCdj2t5BGxJvO^GWd7&HA_D;>VqZGBRxm;B;2Tr5aP8%-Gl9| zeKQ9t3yRAI#ic(gW)Clx%Ej4s?w`NzSnm*xe9fvbh^#_ZFCZPq*VLu*Tb8dtLTcpw zc3VoFuq~+H(X!7Xynl=qBXj17U|yLGO)RpjOLM1N{2|A{)Ot$^Uaev(6Prwoxx^B{0=d~mU-1iJf4Jcw(^tj-Gl>^Vhf7)mRydttk8Wi zHRb8ICvb*W)vrYVAdyxNk^|3lok<*^ThjV5F=b8<7VK2`@oKvTsoZaec&KhRpBg5k zN7aH;>t#~*C32^m&m7L4!2Z<13D*V( zb+F&`0;^_H3mWXZ`DV&>?mQ!!&~lI3sxN{LfE5tn)uTdZjz^8!S7OzUcqy!t&RX2h z(vyvjWq%n6MR2xW=HunOJM;QFu5+gL9gosU?87OSmXm&b?&a$GeAw)I!KV)=C9{UL zAImysPRw|F+_egtd^MyCIdbPzsQgp5L#xg7yi0R;$%rH?T&&ejx2K&oeEGdG1A7*i zMLyUEVM??gez>prS3ZKVhce(}Yjx_xiO=ZX{c2*u$>)c3i*nXj1#;?S{x<*QH<;_# zloKreclW!-cVX|0s{#{({Fa4SA2c}1e!>OM!+LL3Zs$qtJWfy%ODQQRIb|vO>3Q;% zG%_xswWsaa{L0<<+y@y8EX!V5U3_*G`_3!ue<3e@{7Pvly`wypIJ^B}9;a94ccpf_e7L)xC8QicB{R3^?IlY!%PcNcZ(3?ytykM`KJe^5C zLq_jRZ%%Jx?-#@LzM=Q;=RTePyWu#sSgDu$E&JlHhK#V8u$-{QurJ%vt={)Pox{_W zbRD!<^$(|b;pyxeK}j>JTztR#iFEIt+ACIxrweOnCAq8Qv-c<^$L;AUQ*YZjC9Ix* zk;a}<3|(X4a!Yb2<+Jr@tM}sR`(uu)W1u4!X(zd4)%!*A@bm+0Y&}ECr;^SM+9q?I zU{M_j36XJfi?nl+U*VSCCA^YU8((TMU;igP@xb(1Sw@4pJvshrKRK{S#-VnmrH_s| zJDpI@a5F8F>P^=9jZ0jv@)HXK!*K4`I0*m>eW9 zYpIIh=O3U*$BE{){E}lHH*Qe;nm1tEPR2Svp>!P&rLp!s*+uAhwX`G8Lj8-VwMoSI zaA=`PBQGqso8`ioZNpQ5K8OtkEp>%cMS@%N|u-HCA$;dcaUph ztE8;wpmjKFlSuH7ZtAsB9T|IsvOCH2BF(wEv^9%s_6**Z&Hq`F)*gQy5e-O0N0OEy zVZ{%zw`x*k2s?Hdp20_mZ%fo$4lC^O$4RMkvc$cqN*ifg>cZvShC*|U{ zvG!i5&i1zc-QdRxbw}L!6Iuyu>aKQi6;a3I)v9C#bJfho6~>U6yw1OLWcl7Jl6J)b%Cr6D zUuJo*K)58H;Zo}&Q&G75>DbPRL|5kyxH7)XI6BMmO-R(Dp|kVyb|#uApRgn6NiFRd zLzrA=GaT#7&gddzY%LOfWf-!hxRnCO^zMdUHOwK#uOlQn?!LK>pX$@SbaXhQM>cBK zV6hZ&tf%=%sO73d`_PBvUL~sdk>r!@T-MVti6&*SqvuTaFqT)mSPI4ao!%aC`@EDm z5^@r{wwqJZs;V5es|izi2SS<`roOaxJ3+O}?c-N|7Lop_gvUbp*&=fDqbYy-FhgD| zWpA2spQg?kZ*>Jo(lLdO!W$iOtqnC%XNOJ)Od%q%E`c#oi~$e*WmSXGQ?;7cqjJjy ziDe{_Vv=J^Xnxe{pqATtvdqUFwZr>tT}0^FH-sT2Y+8G6in^?t?90q*&Nb?t)n=P6 z?4iWw7>g7;7DpIf&u}SLZ8&^%cK!4IOBc$o4_g?ev7RjnTXV`2kh;-DOle!iwE3;h zYGmZUlZ~k!yRdTjc0z!_zxf-#fpky`7|cI@=l?58M`hFRZ0HOU{mT5~cm9Uc!F+|$ zY~pl~aNu=DA>^TOG=O`+5g7UZ>~#h>jqiav%&UKZI-o1+KX{#gg6bgt1*(HU{8z7Y zgFArgLdzDzenzqQ7IO?dFJ|rpX3a3N2+tPsE z!_Vmj)9@Dlhe%m3%CE3jJnBY#v-c)c(ucdyk}|GMG6HWB+Q|KyO)+)$FJ66oH5Tfp z*>p>7FY4Y@!F%EoRWlbWN`c*UZXS~W+roTwb?l8)^s{`IR96iR30sm8F^?^8EV(o3 zrNIy}%hK&@L+M&o#1rP%?_T65`Bh7|Yo@e4uM-cvI@60|=p6KvszymxXF&cQ3!7aE z)YnonV(>)>y2bsA5g`7b;G6%f_5bE_#$ccbQ2xc`{1dchLvcR@5D?!%Ye>L=n4{L0 zBO#z0;|FI*U|CzE4v24o6%ch`B8NcCVbi&->+NA@K%tq*>8g_C3@QFJiyz$Epb-lf04@WR2z-v=85IJUQW4s*=&&<5U@=H%i1y|jRYFhE3T%%-$3Kyplp zp_tMG;V?826s9zpQUwympn)V+HnUKFCBtmGK>{?#V8Cr)iuS&Cly7WM%x-`rQyENh z%xg6IYhjXRD(#!104)S)1FAN^nTp$B2WGDdb`zISRb-~Hh9h8DI2;8>DoDZL$6&By zlHX0(-<#q9fk6>i2%z%yfTHE)k@8Rn=w=-XrhoxP$vmL$U+cg*SkbrCfnGp=%;TSS z^78Uq`@$7KpmR$*6dZvC2Ju5*&<+a}^g|s2t$^Oz4%A_{@25-^qaHZ37Up-ppmvz&U^dOh``O z!vm~D8*3y)PQ%^71FZPWB??@qLd~Ej;CMU%jzOW33NTf;Dx4q>Lz3_$6c&TUs;J@> zP&mkcZ^B$0R6XoeoG5lwA73}998sP?1VoUk2qXawG@^in5lI+eCU_MR9Hy!Q0KoRl zr0X8;Hm(LV@HD#lK+WWAJUs!dcq0Y7D1vm(-Ch(y^sw=9_F;a5HRM~0a&YPBZR6?W gZ07^rTrD=<6<}XyZ`#JQgMwoaaEQ1#QHKQiA3DAr!vFvP literal 0 HcmV?d00001 diff --git a/docs/img/fig_geometry.png b/docs/img/fig_geometry.png new file mode 100644 index 0000000000000000000000000000000000000000..8436f3e1e22af0321cc0944a28f5567c1ee6e8fe GIT binary patch literal 189273 zcmeGEbyQUA`#%mNA*CQK(vnJd!>FWmiL}y4cY`A$9Rkvg0s_+AN;lFC(mlWcL-X5X zzYpj8SYTzdi(QEK++k)rgYN0f?o*2ZQQMhFN}0q!d1!La2Wdnd5)NH2zHH2TUlu4Y+q769q)NS3;nQ~_{6hcM_8l|6Gy>?;gZb++G+)p~ zUb4x(#`m}BLe7%29YxNPEwbRCr|Ts821A(ZV2WnAiLlqK8LaJeXa12fr3Lle4hot= z-p1$UB0e;0@i7iYM)y|=)E4o3L@A{Wu9@w^8hKp&3R_0XE~52MV>>hty=Ou%Kjq1; z?ccfHP6-uzpw#|O#CDdW_|YPh6na>!v4{zWcZBOe>X?~vkY{9OT+<%r+rFL~*h$N( zUI;4N)X!UGOBe(nZ}$Kg}COC&S^^#}@^)n@6^GmTp zw8CrHhE(rZuQ4`iDk20nJ}JbD@_AfHL5e8(*|z_Nybp8^xAMBxOToEo;ot{5xWO;6 zpWS#xNh$`;4!`~$llTE)IN9P2SI~tb13n7v!w$teG%w%xW0(r7BEJJIDH5n+o+Bo` z>V2f1O1?@ygnjdobs8%h23-@^QxcvxCQZGCyrhV2jhNrK_)vR&px=6Ih_!E0fD`7R zJKRTDOg?&FG8kUoO_ zC@0R@|HPEn;g$elkHkuoW<_)5^PCV-^b^GoDTwTqw!f(SZmV?k5QJoZB0y!E`_jsY_5E1b{oZatX0 z6^&DV3+t99`Y5{3YxZ=Gudi3-gQ+s{Msd6R51NCT{hFO=aHUfgAI3}0M~;MdO0`MF zNT0v&GbAYHACIYh$wX^SM?q8Z7V>shi2>grkcc+mZkk2rkTQ!Rxr)d=MS?tnSPW9% zVGIHcISgI`T}E8|5d1mZW?>`I% zno62Ym*S`q4D;vM=E$fqs`h1ft};}!CrS6cw@#zTf(SoWRdP$xKU4PW1NBzhkL87FUTCOy+YFVg>0EDCx%Z;nx|#er4nV1rATGI6E$V5rVRsW8!l__H#YnEb2;;5h7bCeHn#hPvrb>7 zb?z7&a&+D%3&s&zGsQXU0VkWjez$@=U;QaczNc@2~Kcgh>Ju`ut12&orK$MB-U(JJYH7mR$sU}I6rc_nm%Sf;Ob%j#8YSahTEQ} zi*3@}sfm=wnQbLbJyx&cB{M1W5urP?8xwmpOEhBFi!Kqy;&&Bl`D#gO4cyZLLvBfq zB?lYhZ%QJKtrMpCPisPaeYEoqrX;Xc&Vyq#~ zfZb2fWHzw|OVy9f?4wP z^67HxKAJI}S(}lp)jJ(K^7pvoDRNSMlD}^WeRPa+LU7`EntRlC5_2kw)sHQOnT>fH z-2qLTe2nllItKa#;ZWO7nc=@z1==SimB5b5>rMaaupyw-)MA6E|=MO%=n(?K> zjA5xQmaoOk--}HZke-XAo1Re$$NE!i;-o;K)H*?t`e*slBirqn#*ofR>&9 z$35lG1NlbFT|6u#otW*XuKaHN?yjd8bp+<>`vv>D8f6%o?}~Fg%RaB44f~iQj*@E1 zXeSgMRygu(JRYgjqiCq-))VxWyJ*A)iw+Sm;QM2o@Kjo6pW&QSU$$+B9FjO&pXs*W ztJO4AQPtcpi`DUPjNHmBmRm?r;dQpQohr6wbjp2Z4&mvu^m`m7v-kLVxu2!=#OE=k z%7ydZ7Quq3-g}>>#HVPdX&Et@&BrY(-8Z%imTLC|2}B5Px|beWOhIi~O&~p-Pmi}oML{+4OUH5?5*QvK z9}%eifR!IPa+5;CX0cVNA%pqg$${F<{Ws|bbFuqlL3$D1Yr^Dqwdb#)0Vk0X^e`@E z63m3uOd~n!o?2rK8GV_)k8>(ppR7KGG-zJB?Uc@}1#;$?lqb?A_lOF5u|TFw6AHJ6 zk4vFQTS1c)+ESG^H3qtpu$J(k7*-VR>i)H9*X`odr9pM=g2d`|kFw+Vg|bW*zx>u& zQZMsOr%kKsZHKz0H(enk$yLH8)^0OG-d&K950l-KquZ}%*VaO|>Lx2v6`W%1VaIjd z?$+BYGZgNH%T&wHzfpgX{18?~u6}8C=z364={QE0$g7R?Z1 zBpV>h@w9@S?b|f&gb+dXZq(z~le(+z`0Y`+>n7BT9A}ND=uo3m`&f%vx6Tqa47xb) zoF~GzGTvr{i5hy|K7D#5y)c;8*P9~P(CkfoM{AD&R<@ZiUNHipnxzOe2?@j0xI1FV z`10ZP#CVLxNgo77+-s>b2rv1(bM%t&xrP@MaE}y(hE)_gaS)m@n#-@TeU08qS$z;0ndt<}`s-&w7uSeWQJ(8|(X9Y57Trcl!yW;FkQV;;nzS;_4g$ip z6f@PQ4o~G|1q`e$S)LnO>l?ASSlR%uMnDj95da=q8aX_tbg{IsvKMd>zWeESV zuUYR>et(IBx$xbma*C9XtnG{_xmmba*zSs8P*PF~*%=xOD2qw_d^_+z;k%{|4mJX; ztj^BPEY6%P)^;YW?EL)vtZW>t930HRE12zFtsI`aFk9JE|2W96I}01@?_&dR6@vd(K+(*_ z$l|G(nWd4HJ#Y>Y4mNH+q3>_F^3+i)E!GJ`j!}o*KOiV!)_=yPr5AfqD@DIa}pTOg5?p%izk`@REAOvZ# z2dXaDHm5yn)zp&sF0ixXXkxSF9vWefvOfD5+{!G&MScTPAxkbBSpfCLkw}!tKSC;v zPklwF@(iH~!_Pb(8?VJ6(?bp9PlwqGdFe&KmiHW=o&wibObI+YecKItWyZeOQ)Fcm zcD#~UcQ|2Q(C2Q+x8evhDHwjPx5ZeCc8e1965$#m^8f1rSQX77oFP2WmXPNEc-8l( z##kwp8h!uo!vQZ;rlC}#6!&3P{ePeI$C-emasPL;-y8Gax?Ks>|03jnQun_I`7c8L z?;`zQg!~sF|3%0TQ1M?b{Fe*=<-&it@c*D8|6$Aju;p*=^{)^9i;(|6jgZtSltatF zjZ;{N`4iz3;x*~W`ENH><`*Av1|<}iIiD6+ZblM`_bdoJ-&%6AO@%tn_K(VCE_WJX(*ASzB@37-^|v^df*2xKKLP7SoShP6}I#pVVU~;X^Lx z{4(|yvRYhydILKYY;ddm`DMfjlJX3C|C3HAuusa~bT1PNWY?6qhQ|%ByF6>sTwv2SOUxQ20rSPRD z{eyVlAqqlrL$_DFSe>?W@x1)7+lFCzAsfBInexvKL`Ad&wLYg#jAM+LkdN)YuRPf% zD6?$fEvc0)k08MwmA$ZH(q6; ztMLVt=CY0hUsNzDV#f_p(Yz|^?Hb61%Q|+L_)6eK(P<&IIo`4Sl=slg=mlxP<=U>x zXznGMyY4i(rzmv%wr5B9Z4N`J&?j4Wf5pY0EmHIXHl)*9R$PoyW-f%@DwxwfBxJT; zmscfK#>4BdU?G1!%y^i!^cie{x3w`$y{*8>xR=i6jL14jazzhU(wU+3!$5Y0{Yr1F z+g5Na^qiuZisCe30vQaQiRVtehqLMKslGYoFnaMC3>ATiW*BeZjHWTgcy9m?T*?E) zAR^Y#-BH;<6J0mY%NwGQlbx5f(jLC-R$k-(ypmNXqy)j?2+)%3%;2*0(pPJliMzr7_>pKD}*Mvbo{nNKcc`6;kHn z&>MaV7-Kn?j$RHkXQ)J0NXw5Q{~fb6)b0jojV<^XN-wW1=n_%}FF_L_gI~ z>phjgIH{_NL(#7;kk^$*U(&u_ZYs1$(18b@!20#}#t>xqUeo z?{cT|R}acc2=e-b$`_<<52hJkOnO-xS214!f8ZfZpq`x5B2bcj|2V6S+2cn+m8J7L z5^S3RmZHSO6jeuu#VI4@pCXa5ZhYC8yyQ$y@cstwDRzO37U(%_O_Zq_xWZ~?$+8md z9_J=go1F!7_L?5p?{AKm9E`ME9n1(`1C~%zVXo2H-ft$R?X9rLs2hEbVljmczhQ@ z6)hQ1)_@F>IJ}9)P`>ahQI~3n3}jii;u-^Z8GYy!Sqm?-db-9_L;J8JD1CW1DUR%Z ziJ4{Oy%5pEQIlp&h;W5k3`x6~hdZOjo4F5aDh9LLK8dsW^LSum^hCGyk4j2PU-Nh{ z_>b~Rdhfs2ELr#1&!7ll(W&Oj4e>yAhRg_8J8nc5#GfOhm8Lg>X~5<2OZ4cp(le7) zn}N|XgkzrG!2)Ubt-OpC-l+cnkmyMqU6$XA?*GKzt!#pDhB*$(a9prk%Cij{4{f(o zA0^JS*N6nVq1>|q^t~s`K5+-|xKoM=3!3yk_TxL;EHG)Ec%#KCqKa2wb>g}??b?rv zZ8?UTbr+OsiSp(q$;djrY=l^-zP6xKp}T`W)ld zk>C#GP=A#MA<-v&{H&ob91@6iN*-LH;fk5?F5vLYSDaENb%uT{)Sr`%wV>|!@*^M3 z#>>rDrLyKVR@2pd=5s3xtE?AX0;D5e3g5PX(k@hTG%!}Np-==FjwwbJLx(R{X*EcT zDQWLBwilbo4j8-i1(UR0JCJHa@Q-Bu>4S>=vCFJW`hw)bbXM4lgWKzZ4~lPm-e^e1 zoxFfL_h{cSc4s0xUArx0Vz6u$S|>CjbbQVSO$w>ay=={}^&IB6?b-8*z)<*A>JR$6 zp=1Bj+jfYq1t7AZAe!Nr&Y_oo8O`@BKg6E&d2v}-C!&&fBdhFO^%6S&m9K7iRHXNH zp7%zX2}KamcG7KkJ+J%=jK}D3s|pl{>EJQyp8hCidsls>&vH~MMels;X!3$0l%h9o zJlLz2Fszptmx9Dy!I;DkD>y!+!C!_EtBcC)S-nR2CK$JgCbzr`08k&oay8 zjPZ`TS6Bm_vFqyO6Sq!x!$i-OH$A_>wkFX{=)X7mEe4w@anX>b(2Hk+hm`t#`785r z6y7Z5mXn0j)sE(NdQLOyWYc};x|?~YkvR)y>|39Q+0*vgUrqi`rncc9O|n2KwXQmL zW_W;6SB^tu9f+cvucJSg$hVPzZF~oxW7sk-V_Ep?(qa%t(uA$~D8-YO z(An;QI@WV_!NV^f(ceFsi&Bvv7yWcJs31yK0QiL$B(K)|wWE*&RZ<^i8O-nC^@Ze{ zXlvk;d3^gkZu_~drqvQ^WoqX=<1*jMS28+ww6)>Lfg(Uk5i@h~Z0Lq)TB8%mk2krwa{fU}&8y|ya!&6@Pns)WP3i;C`BbrLQU~uVLiVnaiIA$#mXsfMlo+g=xEfe`ES1{NS4%n> zZnHL-Swa#U*e(vWL_-bwF5Has#0%f$1)W8D-yweV=7w3>3{4kH>EXu5tmHuY6pvyx z1t@@iTYy@mc|aV-l4wOwuX=kx zga*pRu1r?jXoH;^)o{ti=AVLOp!bdUI3?tR0cy(}{|0AhSg%jI@pO7+spKw=ps`aw z;s2IZc%HEXu$m$BYd;a8P#xf#e4N>ECQ+h9haj zsoj@ae+fzvNbT;CZ&fP3>)J*QH5kg8At=eee4?~3Nk1V$8Mky-1G5Gg%W>NDm!$A4 zBzT$zJ~M32BUY309bTH5gXnyluS+^i zRig1PS%bU|q>W-qAP3^o{r<*H1OmN!dh(j0`&fbu+jWyMEk-G|3@^xMv z7*9fA-bWclHG2u-oOQN3g`>%yO!5;&Y)@t{q17$$czcDYRc;YrHkfHt#^=S3N-$1L|HtRA(a^WP$em;KbGRbuY zufSieA&%I@S(vM2>gwJifUg5+fD0ib7Nmi}wSzqQ&7>!|O+6$C+we@M8Z4?Gvs+qD zCg9+26;r_6nWlh?G6U;YSP=DcbXGc4$U=+qX`CoC&&bTAV~h&Gqn8jD7k^UqY97Zl z=z|Km`(}|;G>=(N%m3t*n>YcB>^3)vH%`nG(&U>(4TcIr_Os1L zV#D2In$C9FSZ%GxB~O~atgFn2Uk_E4z0w*%bI*V-f*LeNr#OOmbLyyq#%OLlsU&G7?mA?QM1OTFi5=LrS@DfkfoX}N_4lSVO3E+#HYu@wHT zcE&K~%hJg793PVG9qRFh5MD3V_J=zRPViy|}^qCqm6 z#DiJPPp!CK`LJ=G(FdX9>$vK`{Fp&u|T<_CJF;i|G zoS;+4NQO1Ewo)P0#}M1&SPGvvJq|z==OV9G_ClWYCD4}C4tg>G*es}MC*hfH? zt~E8_CZT#&I4TS^kLY>6(AoDopLx3L9nJI6_YXmT%1c%9n<|bBF8@x@xRP5ALih6P z-Nh~KKD%%5OW<5ddIazrfQ)fj7+`~o@v~v*#gVp;)X0+Khr7r(KVdOO@jSOPa-d;K#J%$)k z36iSEak9&1YYyLLs>s%KJrm36qPPb1)}eJziCE3zUq({0)j{c? zIwlPCr~*)(LZPD&bEa^m0G%p1anWB{k9IG1q&zl;T*^wy!E;_uA9|u^6dD9zd`;Q8 zoZS3Cof;>j($QS?g4?`S+7fX$fhY7*H}O#J9Mw4cy=sx8bT5%XSqyWLFO z0;%VRSo^|;?R$eVI3h4KowZBlujGQF2plGwFo{1*WrG9Y)2{eRgRmT6!vKJI`gQQ6 zwS?jh8N{Ve+E=6e`6p59dchNlQ@^KYgIbe2dGg|v!ibaQmO=MaG7V9LeiW+CpI1W@ zfT(tPnrnMxl5aVU$6a(l7Sk`^G-<1q6AWooFahLeuyCfqsQ>)no~_gdme0slSlIa#*m+{?{C>0{R>ey^3*n0k zKigr<(bgqs#Rxx>&frA;)1tfeY-e4&+i4Y}>SsAh-zA8$xG-nR*m#mcw~zJ8OiqS=~Bsi_N1!iVZS4Ut}~cy=UXaMn#4l5qWR&0Ytb8 zl>R4Gi15^zQ#tPo>Vf10I@cFeMl2p~y0Ii5@2=?2)_aJ)-@9E`#8YoK!EZZFEjW`u z_N~eGK-het+CD76&4hH-MOoVu7C3PZ@jShqay7fqxE=tDjMq>4Fm(v!FL23!(U>$~ zX4!B)9k;6>>dH`^tR$pzM&E^X<8rY`3EHIb!E0?z6r4^F%WeL|Vycoo*BfZOJ@-|B z9GgVLUvRd=Jx<}BB{LyEKdp5C7M6gj1PQ*TGY>8CCUXuEzHe23)P4IM`=Cnmu3jxO zd6Ks@>2#WFmJ`?2S_!nF{A-A2_>v9`rqC*hfRLO%;^k^UoLtdrjN{)Tp00tf$sS}FUv zfGd}?x5oz$5RO&c%ZJSJ=4%{yoZG(l(41UEZ~Z8$OVE~lesJPBQRn~%Z#I3S^X0C* zrQbA?0zoIpt--{R1P%qdb^244wi-(qXtzuS!G_1+5C|WmfafZwrSkx($;5%5+U|;X z0*)5%SZioN$JIQsFjS1`Y7& z{8#=*^9BZxj?l)^@>97dc~5atC@a1duFr>l!Rn6s)x4ui987ikgcv<%!T@nQ22`TovoNZelJDYIU64}I0{{8ubrCRq&G1yyP$GDkneagvZRv$ICeQ=07K)uJDgU2HOtHo9}`(8}L49dA@af#kc8_$LpfYFJq(Xa>}>^kyVIK zP+rKv{A>~lCbrU>G|BFkMHh|*HtLFQiJ+A!ewD;$tFNl6YWSUkY@y^7#NV1tY2cWF zikg&w!gg5Tww}_NL`0K$P2c@JGuTx-ZHM3PQ324Jw*&TRqS&) zCi5O%=cPYn<=xrv4$W|~3Zy4Lg=NkyEihq6e|xo-s!6v6tug5qYe->!o>}x2N1T7-`_@okSQp88;Dme;7OfI3il@#vI7G^ z_t0y1O||tL<+V%B8aUlOe-p6HaE+N(Ej&6|e_~GKO;K~drW!Ga)mxH@XCL_1qk^<7 z))(OTDTX2^0L!nI`mp|N^TM81fe)LQ<9$emqX6O>{!y=UJsz+WJ!tjn1-k53Q{P9FQw9j^7mjpHD z6`_~&mbV7U=Bx-kVhU)WN_&uj8hF zQo?qiP8f6O*r~*(=UM|tJe<5UJuRgC!49a|jJpBvg%-o`Edyo$GNOX$`Tj-Lek9CC z4>bZd<9Maq4$jlA!IL7?D8N76D46S=98DV3NCg<=gUGw z!$Kh4qwh)deaNgRxyu|l5fGwb z0JigMK2BC06xG9k6|w4eJnjR$)TSm0295>o;6B!ZBGYDG6D$k4BsxQCL+i zMc-!E7kg9_lzSF3+m?W+eVnMGbO}j#6RPqrtYQitBkmsi zp`bX7T3qbDDu(-QGY88QnQ(24=!VL7q^ZdqZNyaCGk?$UNojvZkSdBiVKBVAW;b)wcOmL}@1i+1*K)s3ygg zOV@of%eK8sp>-IqAb{~Q%_;g-ZxZKx4|mOj=|JgMvw#&5DDu@Ba;VoLigm2-;e0@` z;neWCkBsqx&Z~UWk6pDGzILeGG}cz-4d+tLrZ`=;ElwOp$8ABx(=* zR$!M}zbp)~-v&O}gw<$isyZDZoS}*b)N9uTjsNw)3O|PR;-VEgGD-6px5xU%5DkVbJxS0 zCpwf2-)DvNDC8{@CmU70?ZMcQehP1l%HwG2%H5R9d-aFsdiC6sAa*89wb!xLV*g~E z3>pA*q!n}!kWa-&Ic&CXIJWZmRuVa*jRb2iSvWcMBE;+CW6?c_eYD-9ugB3k9LW!H zA9(NL(iFv4b-stJzdrPl-F0s6zSC% zmq$dt!(0f2#M5F!=@HwGhD(o*hZ?AGDP$Z`(0Mx;5OrDsd33M*9nH!9+F;g>oztpj+lVJKO+#>`KAe1I|DMjhRmg|ae$QxT($xP z=jU^}48B4xv<2Ta{_q%Mh+M2K`P}s<6(m|ES?WD6Rm^|Oi{@=0sPI;bP)nudWV!J{ zVK_ZqTNniez!0rRTuEhLf|?L{ryW#;_WK`c5G?m5C8%Y>=`0ZgjS}mcxI1%SBn<~m z3E5&i&gx6$e6cr3juU5|vnsaG7L)p_A(gF;KRr5av72|KRb5_?KHuF?(sGMs_jtS^ zv?fP%VtLx>;T7r#jwS-I>t3cN&{N65e`%l^2{b#R^AM|3*Ry9XVSI+((_3XKb6(X> z7*ioQWT9a)*4}I{-lQlhFj}gV>bC@*4`QzfXvs{cu1?oD&t%a#Z;rjSzrUz`UJ9iA z6k>^SPhbd%`6c8d6bKBslvE-g5C)V+Ul7%7gS2SG$MYB#oe8VikPckbckkZ0N=MMX z9?8=fv^vkL@Tq}#yMN4jwu#p%u=&ioM(sfz4@RJY2N~jg>Ls(`j3-0W*fYPb{dgfPisAn_W(x|G~?Ft zlxl%j6Nkp0FypPr74P-28oV}!CVJs9po2}!WAJaQ*1T^#nV2ERsjJ`jITEc1@y8}D z#oF#Y&B-{enhi=iz554M9CqOOKTO`yD>YTR0brmOiBx72Hq#<`bAJ#q8o<^pF9lXm z{WyMfMS;pO9zb8DQEYd%wp)MR^rqz%3Z~{qq7Z8*=yK0jAdzcPoi;|8BQ$OaIBzFb zSh1_M$KvFa8%V-Zh_R;ySwtNAA(i^;CG}S-JQ_X=AVAqLas#?tRAY+%a78`-ikH3X zX1?(DYW-~RlFS9W{D#|(UX0#vKe?>zhaEe8p}H80T%lBFYOyp^=iUaCBRlePgSJq@ za|3TGa}FZtPz!(2CXHhAZ#&??W++<;<&$W#V=5q^++hx%vQS9wRxsZ0ICyMTUZ7RM z5&_e9d-o#Z5ld6uZK*#TqkmT?hV4055fXV0z)EKvlbT@i1vp* zA99f&OU}I;J1S5Y8>Z!L8<`kJo^NKT=uLP#$cMK7}ikN+f02fxy6x?1;2auK5rn)b7e5i2fdwnkEJ|FR0_GcG`+ZyhepHq9|_V)5;b0-?oHDILW zCHmg|3-ue6B1)i6xC{?}Ddr_^Jiuk;aljKW4@6aRL}Mab?>-6?v~-z^QVSaE@+a*a z52bs&&QDr8-!W=sV9Hr@s=r#P5h2q%?_g#>fD&w#GpP;_UFiQ%>Nb$LD**hH=lHs3x2}cLUT8c8Ase#PcL!cPCYK^_9HA#3WzY zhD0V&MT-qiUV7s;TvG+`37}&OZdV>1Ry0f}Pup1`8nK4ctQ>4j^72@Wh4YS-nU6}O zhBaC9bSQpzdt>g1Rqs19GY% zE7UZAH2w^bIQ{{+J}?3i;?O4FhaLrPJbWr}ILPiIzlB%#r?d34*b)@i+nCS3XMJPn z>-5=K8F7o|JG0<~tq(H-u0+gt6*U^~wy>MS(9qDLWnmfK{+?6nTY}De*XGOUxBY{1 zS;E8{PN81D3k@z>LIRf&eAPFNU0=}DtX~aUIeefKit+j3Q1&AtTIYonBs$4!!`egr z|6l{H)NifaO6{eWO>b@npC7%+5Wf6ljJhM^=~WH za!!oB3ZRUD_E(yhH3C$c*NwTCBHZ#9&Aj?uGoD|QRVEI%rv+C8Iy!C!8IFU?T?qDQ z=8tw43;?)i`IT&iQn4H{OlVK4JC2i{v7Rx%@~*td@#6R9#-*1UZsZkFGG*7r%&y<; zznoG?rnUO3!@2iA3GjCnRMtZYKs@-!K>ZKQj}{dIXpP0hkHNPXxc>-uAGePFW`cad z4>Lhl8zfL)8Wwz*LyM)5zr18BIwG)&L9{Z_hGhJ<^Uc}W**}C9D01iWy;DTuJDlzh zBWPl2XbC474VJSWp-S|$>NS5L@FUXVJ@C7_JOKa^i$_~wLqv|h_m{c7C6KfQU=_Ik zYQokbcU+!$Q5l|aPy8HNUtUfS9p4YMXT3V(4(VC%o~p7h>BI*FHz9CO0#2>`0cI5WmO3mZP8ONP!r9P*F?tC%LV%J z$_7MY_t?iyUyVmvU|c5E@XcdMbimypF<)TJO2QizE~ZZZEtXCk7WBczm_3OI; z0K0L9Wo!BZ_~sZYy1vOFV&%t;Bx;?1aDwL%)919bklQ2>ApCNr9oa zkqJ6|7c+%QTY`w%`qLiSCs$}&=PcfskVj;9R~J|x4p3w>(pZ817YCx+pbfjjeSf(pVbs`D;7F`v52-2{Bs*d!Z%nWQd=Ohi z*4mwi9tXL4Jp!gXbDx#e)ZTqB?yZ>kZISyS5NC6;uNRz*n*l3c(CMQuCiHbl$l3*l%F|kM@SkQm!V}B77KfDtwLY8X_|l+w(840Y_G%H*>=M)1vaKfjkIE zqK}Qy%v$;rrDhs+BqAOziJph!KT2XPh)QOa=2LI7uwYJZE{XRmT*152iF}u5P-w4V z)a&8+22m%`BCb{)0XnavE)zQp^1Zj13#iV$0S&oAEGEK&SC!JoaC3VtG$|RF9k~Ja zP}ra!qy%_-J+X=DMcN0pjR(+-Aw+OF(bF0yOQ5G2Cb|6W7`CDV^5-XZyT$fe)SDzc zOoU8N!z*;FR2dfCg#)>SyjHHsPsWD^^(Au$-ZDWn#aro+;FBv1N0f}in8o25%0yvAe7%NQsY#hNI8JVyR7@byy@>&pvMdL5OTJtpb3bkScqCsnL z$GY=uz2`!N3`@s%yTJ==c$ly_tC$ZNMfwESzkp6||BQ1Ok|7xu8Xq5Dd;O;*Q+x0) z8&BDP8`y!6ugWn9;0Qv7MYA^jx^KEtubP0Jf9&SrkPOm7`z0Yd=6v=+l>|V{cb|F> z6Q2VrHvnK$75#zt(#IsS(l^lw?-83mzcRTKs0t+FTdZI!0PgJ}`cAbojL?&1$tT^3 zq*v$-P_~evTinb`LY|O@QxL0y8;V{-G@?>$rxiO z1Htwr4o;g1D-TAr3>Mqp-cA}^Ur5SG0%>j3jMcaA(**DUJj^M)~foyZ-I1X9HkX!!3eZW~rv^f?+ai z$PR03VLR`i>j~z<0Pb2?$PS7Fb^cJF@0~3Mv;oLj*@brSruvg7%bV*zbM*T@h9DNy z?mezUzvgrOW?K@n5E)-N?Pnq&jz4DA1M`HwS>`AbXIg(%#GUTkIz|nfvUM zk&|;jR~xt`A`a1)CYM%{rfUxHDO+KRbb6n)e9xSu?`#0c!K70iW&gSO{*|Of8-lBR z>P;qy$er@f_E*0usi;J9<}173EePih{#oHWh%1ti2k3U%sl%ER`R$9p)XxVp&M!tP z^ulWs&?Z7$2c~iPtZTRrRMI$kqcn>RRD?+-|JerMWdRpOb4E1?84W+65k4i|QH)fj z+cD_EsGi@g_F?t#ow>F+>ZeCW^uOfmC)$4}Y%Q?F^E%4zKcMRQAy|Z4no_!Zb+W>G zhWK?vE3GkCz}SN;+xzRLl$W4mj3{U%%y+8rURfM&C+so4MiJVS*Xh=2)+5nMQDE3b zxUwCP%N&sjS#?Pt`C)xhQNCIO#n}Xu*2C4aJ!)+|5Es0 zHeLgw7L}$Jk5bVI5!l1tQ<@oI10U~UZ}{fvSqRJ**Stp=lKq{Zns2`kB{ymzb};dH zp6mxK3Xd{YY19F?DM-!KY0AUR9Djfbb6ETQbnuowWg%XH8VFz{V1couTd-^0T+0mZ|qLl_ZcOY{Oz9B zGWR!lUjjxN>I{qN+#qws6fU(yu<5gc`DL`*KFh28(f?DMzuUc3z~{c%!2q29Jqn#{ z>@7~??snow9}Ct59oL6qgfWSwBS&q{PL7XV%7UQ@8aO`?vbhPu$ z_Hf+91Q>n@HwM#u%eS7bF9JAY?Trs2f(kPTLD^q^vQg`>&Lv*?8Ian)6v1eKRU7Z} z$0C6B@hbt~rvL4WFMrJw7T!Kg>A*rOhhCY_y-{d+x^)8Ewb0%?Q+oNt+XMszaLoN) zBQ^YkC2q=3yg48{oLNM~VK4y3qAd{u@_nH()A^rk4D7205J>PLUrNPJ#111HfLebE+VX)@UbAO+v=F^?N{g84w+z7b$NY!x0gUmC>w*4#3H z|0`!Xv4poP7v?8tFPz}Bux5k|hHJ^qCO;}ul5vhxSe^If#cm@K_Tp9>V9gmczP#qT z@h?*a;UYlJtdZE`;4x@{xn|i|X&&5-Q%eOx+eq*Nkvd%F_wcRi+HkIVFbOXU&_}_g z7{7_jx0t~_VOJmKd5oWPLCaguBPiESMOh=SE~qMeL6-_K<3X+%!Q{dMfRAc-T|HlA zXZDvS6Zh1e+oqT7{Yd|eroRS@kY{kEM$`bP-=#Hq;JFGkraZuRsn?;^i=LWV1AKs9 zJ|UF&$hKY%!rud_U;culW_WBNo@=2lX5w0(=ZOvav-S2%cg7 zN%=~Ae^b7qNT7-_X_YfYGigK+j5G(}Wu&Ly{l2(pG+k);5lr^$AG2EHxy?II1WTjZ zJX!(?*lp$-YpJ53-H%;;)UR?4N*Ul12m=6w_gIcnL#xW9FPXn4=?y(qWXQ5c?}8pwsqFGMGU7@gwODh zBVNA@A>lP9i_bAq$S@)pz8+|&x=@>%>OU(0_%HoR?pOEkam_E4E19fuhSaJ^L&l1lGza+$?g(~CKLcDA zjc@yZL~~FTCQa8M;HA@>ywcI{2Us~UH(&&m*NiFY&inNwR+UuwwR@el>>r*>XV#+K z|EGA5tPIp2$|a(gJLpl>jlbMQ;#MGD4`+AUH9iJA&HU6U5^<(~k#tAEAH(_!igqR# zs$%U1fg3*~`g&N?wH+n%JzOO$f}fyVc}m`A1#=@pcns+x&g!S=jaJR#~(Bbwj*TNH-mC{ zIY$px@*0o$8&|`un($0@hup&AX&O*7T{m(RYY~0BS)%rJm8Et= z(~_^4ubRO9B_MOaJVO9K!TZh14+lL;41VU>b<6*cudj}ZYW?1K#z6;AL6Da2Mg%Fr z0i;u;TPf-88c;+Oq(d47r8}fiyo98rfP~WBUB7pTd++D!_vasXEthNEGv}QB?)~iN zdGKXi(IQHfET%CD3|b2il0o zK&XG?SE6--1tKyKlmU)XGanM&=8jT{Wv)f^`zbbE5pAfgYS8BT?ahjS(|Us`jSK=6 zqYk-C*`Fp89e*fg<&v#eD`b*jH~QwTa7-%`2r;vs^cWs%-CU-v>kOv#sFh^JoVVta*@pUY(9pT3-HVF4%^BitF` zlb6Gc;8pO5cPH~CpZWhaPm=rtmi<7_ih8ivCG1(~ddIzc%ga8gn`f^%)`?2uw~&HI zL;!>qM$DE#i=en}vtGF}k5Cf(scIILgb0I$-=%b4F+U|?9c>CM!Q($SN~||}| zHzwfp1bps0kh)!;O5$ARu^hY+mE0tg_8ZVoo718fW=q5_7oD_S~98M$Cfl5$e+uC^(?^i$z)5;W%nGv}4w-^=Fw47db|M%SV^=~g~ zszh>#qmx`*oU5R03~nb zA|<(F@r1*{9EV5JKmr{TEQ*j@?G(i_65}|lZeN9yL%!tG$RAHf*FWJAe10wx-01}D zv}73C@5_Ua@3}i)Bv)~yKRU+LwCqe4@78HP5;NxtV5MWG>tnhU25(3Up+}^Lv4GF9 z^ul;P_4%j9s^^5<5cPa3r%r|cati%Eg9RB+=^({1j)t64_h?jU$~?Ptf2xSe7jk<3 znC8|W?)2*p%dIB5?0Vtl)1c>b9hD-S_UPEAz>bH)FksW3M#^wl8bVmur>C#a>~&6F zUU09q{IkGp4qCJlT;6LMnqvXDhlmTmi|ytp^1TsDN67%lp1*f3a9v={|K`nk`dUW+RvXOIMCCT%gu%$ zIhUD7wdcpC2cL(H;e8x`EpYP^0RaJhmTHb2Z#x8*&8DcNLK0V{<~$(a|erBmDjScX71FNx${<&}e9A5Od#GH_dDoP4Rva#XdKYkNYmkBj9a0LAJfq?-fJw5BcD7#GV!8f+IUt!>P zV$diL$K0`uO&^onY`yr~`^84o?SZgWKPKaZ!hALBB&Cg1KaqH2rZ-=Re5*Mx?DmM2 z^lLU%=lAanfHSh4S;AGgOZn*0qicB@qhzF=Fy`RV^WM-Y>u$UE-o%HQfcf> zJI%gOCW_>;vi_85kST0<8$jeT=ucRe6BB4W&USIW*lki;01p&U6Z`#IP#~frk|)a9 zpm0JQ4$kYGyu6ac+M=;ZDx8I8A=TN#7t%5r(a1bE9`HHLgy@U{@W8e`>Rmg0OP4}l z%;zj@7Ts)Z3qW%-hYB0%I!S!`xQMSx*Ku(T+?#LzDUQ&FuDTP?P zo)i(0I%HJBs-UJuF!0DBEuBL(xuis;tE;Pl%xst4G-FZ1~ETb1YH_vC2wC)T6qtXy5oBch^W)2AKK{uD~25GdM@Z*Rd<-%+8U zq!e~@bJNzf2P)>u7DN{$r)^M#Yp^8z8}SWw%5h$KJgWt30+lorFrT z{v(#(^FRG0yo{aii;$Op&BetvG3`{Ld~x0XG*G9sL3Q6G2z(-RGD=vKOvgyH?2Go6msPMzQh*S3%O+I!{OKz3dyR z`M#VYfd))iHbq^g74wg@u|?_u7T@g=PR`%o8o0p7ML2~%zt6L0u-Vz!Zwvh^D;)bj z%`k|VxeBMjRZDK*;}e^QXu@L4 zg@S$!`#uk-{(D1+6*x8n_TBBUPzvlDP;ojjN&n?)t(;QV7ePUIIi7jraX44;`*^#!V=gB1bRe7gd{K#(22{)yh?rl-mEXB^CR-QJzYwmyVC7^ zs99Fv+M1oeyy3sjq3YkMDABDpIx6ZU6D7h4(OHU1M*I8eZ{5088h!Tu@a0ib!P(;H(W^wKP_3Lp)c6Lv;C?(Vwn~;z&1juRp4n|BDnEUtd^WL1k05!(tShG%8Bfn@ zRlKis4EkU6(mq<5o5zje@m_@-!|$$s!R;Cvnztzcq7?7V@ssP~6sPUA*7_S)+Nbm; zoh2mFVvL1_`FU!R(A*n;h%BEvR9F)_=f1oE9fzG|l=0VtwT5Dq)&6CAcsFiHT3Y59 z7a2ML_|^%7wnNdVsHrDICCF|GFV8@sxoABT=F-wH)9YAs{9`>--2V#Kk&p@C)WgK? z`DOlSP~!KZ>cI{Q_;L^!zJ?QGOG-+HBqowYb8AFSg*BL?o!0ZT0o~IQ0fHOb2Y~@* zi%GA6)mN{=RkBq1P98IkqA)boo~N5Mh20H6!)CCRBz$6OYK~ii?@uK)36#{8+q}^L zP)PXn^{Gn9$k?+G+NXZ}sLsU9+<5b+gdRM7k-S*7C=+=#QU!S#YK&)t&%j|la(9@5 zu;{hFm)~<_Xg0qz)xCRqfT|U_==UI27vZTZDJ$D^@I#;E*`O~49bE&e8vo${t{+eZg+@#m=@ zJ_wxLHMZgqoQ9Tm)`?a~NVlS*;`_3W+DnY18yq+b8G9hz+%z>eUpMLhi2tW&@EVjJ ze#8wBWY%cn;^Ky2S|OI;K{)m0Yd1TyMN^z%J!As~1qHD|1#eZt{Xbv8xOvw_G0|g9 z%Af3qW4{ikLBkakLIxhvxzRe1$xN-P4dxA;owsM^EQib!uXDUYh9VVu)k@hpTS(uL zpO%4tW7IkzZepy+BQE$LF}S$7XWzq2thJ6usHpc1!C5ZK?5!>H%Ob))2d6#Sr$zVV z)_w397#KnZ2Q`PS&X69e-eJQbP+e$BJNF-sbqDH|{|Lbc)%_xbI=t)Y)2E!^L}TTC z&RBx?^!7^W>LyMx!mOXHDH2lnG&G0<0xP(_H_7D}Iyxq%J*vflG9tciA?EBYtugb# zB26(L8PAVjh36woY8|RSp?fbtY}c{3YQrF~cyutzi;A0~ zw+|aNH8p9txi@>vy;g#ep@+}+_x56{);bo)m@c7_d8`N%5fc|?k)HN$g{bNZWeo`r zr@ngiYQ->x`zug%Wa;#29!AU+e!UR>=8b9LNH^*4VN6jB`Wtx8e%q7l9FYiR<%lOa z@0FPudrL3gJz&?Tp>;ZixqG$t+SefjQ0E`E{-Wq7uneA${HD@>+Eh30eZGXWB0Vf==lh}M!o9FXUK)YM(9JLJH{KQdl|4(Sng9&6Z z1y|FX5STXljnh<__3PWXnMg>aaqN8|R<1&BSjJONH_RsosYp%RicOOa=@##WQ0Gwr zs(|SPHiCrJfP?M`_Mp9k-I>O<#$fsI(W>nNk6oD$Qp!EY^v5=?@u0bX9fiwKH`K%e zrKYAXwD~DwYHi)Wh%>Y_gY)3i1XNsTI)gI(Lf+@kBH_W$mni=WT`oXBF@iS6W<*Wg z51E11lc`^_i0m_HkLT;^>wB-_$4QP!Sgw$LX=7Cm{&R}NyFiT0ZA@r)Z zt9L9_v|myR_cgX#F9p1Q-FG)w1a?5FPqQPv`P72{ z=p%`CeLs|mgYM29RcB0SFIHp%14xejI&VFH53374wjeD8Ui?W5@V+i+9W_-m_m6}K z%SBF+=Mz=h^`~@LepVR8RaFm<7IP!*ngbqbIQ%49%SmUnDqYdojJ_27)u6hbYT8Sz z&pJCCwCLz1^-^#bQG`*0q_lLGCDU2D>=Fa%QWg7&rDbGsG1qWz=2++AP}zD-f8^Ug zbo667pb@E>mof(!6LHpvaG75Nu*0w5eTX+k+-VoE{y6n&AE=JWNKH@8jl8mE01S*4 z16R;5!vkyMF$mQ?ZSLn*FS)tebsLx7Cc^q+o`jiSI5_=9yY36ru#020Ed^ZRKd%jY z4Lp=*2U_5pzz59whAKv9hpWs$yDbA5iq2L7-2#A9zGIpKDh}J*-#2RK$qOxB!I~`C zS)W|!rYGTRvq`8Wy+^4~E68$u6fITgY5j_Zj*eI;`X$_jsZMt>5yP$~60K(%Wj>U4 zcduOH0C}Y8=xx@YFiM>}_&qKiVNoERA`wzj0br4XY2U)qk};LS!=_q4@g*X?rTy(h zSa&Zm2%*AgFReRz_BH*5@Q-UmZEP!3qZ1iPmJ!)nQRSHpwy-KXeZwqL$6JwZ)u4wp z?@yzIwbCT}etK+R;8@88()26%n_KG%0M;%v9ArROVjQr7RVFp}PnNt+gmUtDE zF$~3rFFqGQ%^o`le`Rr<9kib;T)EwVzk6VU$C5u*@@QaWP-+V&$=rVlO-V`VN}86n zg+)YbYip<9;|WUY^SnGhdbulm!JKO_+!0t!Wnm2D&$1vUYG9Pzw5Q~jW8_J|5uA~&Ul>9wdC;A13FoQl# zWd!k^Bu3wrsoONaB?pcKP+Z?v(mKF>#%_Q~gpk5Q&bChlR661GV}598Xi8dISp&f} zHA1Qv%rG8&9~@-Lied@T({)aa_uQL6`W`IqpqE!H>I{7A`wJfismA?ezZx2P{SCKy z)RHQz*_Ywr;i_fd>Au@IJg`8IM8!6hQx=JY^&#Hxr5;iD4>KXFj<61PFDX>A6sW7O zf|~jAu(HY(J^i~ZeeT4M@$8LMvT68qujWNR{3Gi*dpf;Z zri6MFEC__$2?6_C35>!4WnZc6RXdfw9E2LT^^{jMZG${l%V;Lv~L1-0j3o67%>hoUk1BdO~W`hb5YP~tE zYuCaWh8>*Q@=2AnRJBP-SgUz-vA@6m_;Ig1$UE%k&qrNal`B8<&B5bu&nN$twwIL`xr3j(l`QL)aXm<#-i^m#416%*e* zaZBeBunHO)8JTXq8pD!H3}I2my&F7{`qf@7&U^?m)V9-SUa!=9Kpqwo^F-jwZD<8l zyf8amJZ7RNEmZ`LiEV-VnI|dbx9%T-000`!aF0NO;v-t1Y;yi`_U6b%@zQV<3lAKU z>eeKS<^mQiVsP>}|vycD>%cI>%C3W>D$#+z+u`^E7gboJu7mArerc-?R{#|{T z6@EiSPcP}qmoLfBe|xwfi>81dKnAtrhr)RYIM)2wv%m%&ji2e#)F=*hK!vW6%T*-T zX82;()QS`nOCC1YzBi2`xa~HVwa?BFJ?x!G-79ELPEJ@<)cc`dfTJ}8jNO6m ztn>p=F#g*ab^^5mG32iV!;VQthI%fvw6vrZ7d!Z>jTIv>&rz*ax?p*4<~_P5Z5%&k zh!2#~vGS=~J`=VTm?4k#KDM}Es9GSk5{SG7p(c8zEaUMUOmegSb}0k`#RCEs9vh+m z87jUNOM3I>4emFs^|4U1j$Pn-X=~@XbEj~+^1b5B4IBn+{%!v65}KzpZ$*7}Zmuyi zrVcF=g+4oi;&t`qy#L+`pjGq$k1{5s12&`HN4rKY!~(X##lGukZ;#sBa|1j|tQKDkbBky*dWJ6LV9>B6d7v4ms!cg?WUy_7vLtZhMoT! z;q6`frJ-TYDW+BI&p7-ab%~CuPI^5cU1R+wr-b@?4cgm&v*p>%vcp$*ofp-hwywkG zUi&4^lY!<|VMErnI>sG|tU!gRm{z1LhAG=91DZPB1n?MDKgz7hiR_C*7(@{gbxkHx zz4y%%a{zO`W=Lt(i=CY|j%Or%t;yt~?U0>B&I=dMfrh+A-_kk?+ z$T^-Mg}!cXsj!32gh&$N|2ilbo7*AJ{y(@`g_N#aKs% z;@8Mf_mQqHDv+}Kh;9gO2GrFWAH1SDob4}Mcx-Qkr{29^+jWaiy2fkxs(d4=Uj+*` zaPV%_Q*Xe&vkPu4@loFa{cWpQW|{p|YiMcdopp!!#Q!2^0HpDOU9t|!6Tn8LkLQD` z_xz5T#?Bf#xy=U6BGlne3`6rLayHFA1mEPNG9!7}?|l?l>3BWc(Jk(7H@qWvbh{vu`DPkSRqJ9Aq|Q)Tvqa&z3s@!&5Z&**#7|= zcfcwP$Ot+}G~7NF75vRDE!`Hu)p6XeELt%kA;|)vckbS0`T287Q_4!i2GjB2dx1gM zw{Mg{;En{Owe;bXoRW82ISS9dC;Ut1sl?>B+IU8dvcs$^SFg@)nS!|xPUj`I5fLLMK1)byDou4}gqHF-G3=hFl!zjR5ezzv zOcWfpKEUNjwCA41sRkJQ)qP%Dv-aO4Wgl#Si71{~-4PY5i-wAKbxfa%AncXGLl7K} z!bRBr<;ycuu(8667cZ`T#$;~-(*g0ihtcd7q08Qz4jN9j`nP58g3?jTw~S%Bu(O%r zHONVkj)YJLgGDSYU|kh0KmVYlc1t}6|kET zg0;d6aAOTN$J!+8K9BcLyuf=z1EMhzb)Q_OYYDqUsU=ctYIL`jN00v!roWeQ<2Y|& zkn;G=8eTcaf&)Fbhxg24Xt3cY^ZKru*7u;oZt&hqrs;BPq?Qb-D`Rxv^ z;udc3ct>V`-Z$($#4^*>GLraSZvSCpYs;*!2(LYkM*;9Dv@9&GPA^Pep%V^Bi(O?D z5OC+>Xn0)zt`e>ewBbw4+=$KchiLTA{V(6K4@xVTz5CnGgrQh9VeeN&6wn%ibJ7R% zmT-16E|(uH5Z0E%bvLBFUH*aTUND(praO>3Qo)+8u^ypr4f}_a;m3QCsFd%}v1#mg zdDKPW5_^Rvv6jG99)KiR!NP9r$g9H3w~W5tXa>^%#DpOT$T(SqoXGps)F+o}GT5@D zBuh*x+@^dkK#%Y!Vj62lw6FeFzkSt^2%oX)vcM~lT699GZ`p~ za=CZPtRqmg)z!V0+12K#^HUuIGPAaq+UirU>phAl49BW=pC*5x>1(EH{ z)p4*wW(}BAh~)pe2W)TOZY<_1t!C-_sFdY}lnFRB2j^XhZu#7?fI!4I2IVm_ajPIYn@Y2d~%YD&-dp#zdF)@Tm+X0w=$nB~q9Bk$sctoYo37R41Yzm$MxpiCT!0aa4 z`e(t`zF4u_n-eaMlbWto2bY!H2Ht?;vb@AtJ&8rFw7TVAxK`;bGhh(xE@wg|jLS3X zQ3RwLmmDmv3GkAF5TH4=GFV+*eFvdhaL<(yLxXv+b%MUq)YP=1pHg21^|=50tFYgq zxSE+DlkadBnR9F&M?yxn^<*>Gc>G4Jd~!yHRAFHu*S?C54y&J^AE<_0!N|(lt@X7v z8jxoeA?eM72U>O$Kd?q3{m0v*vZ&e}DuW8>tGHa0$(G4O!DmrIOGwNl)RJDaZufQI z4m^JZ9PYoET71j)!~_~(%Bq-=yCTb@oV}XP&Sf1V5)E!L00a^;pLLlASbT@&RaM&3 zGBOEKw!nw;U;GAF0T4!Hv;DRetcpV45Y70MZ*u!-#`Opf!aa)*YQ~#u>N#HpZN`u) z&uTNe%V@Tc_QQNrA?yS!^}yq4^)=oy`H!3`g5RC zTy?j2OE3P##pRpf;5$V$;qB!Jvb~=MVU8`FqSJ1gQgT$*!xbv8!HFJHK0VQOolH){ z4e4W{XI7}W(ogr_p;Xe9$_qY1Ui8zxzrT+AC>-*w&5!}UqgB_$#HOmZx1Sp`?pDRq zYD-Qxh*=_nnivB5nR$8uaIQ)LyF$NOQ&w?T$MHmC;fHlRzT*vr$)t`w^%|$w>ns;* z$@VG_l5iibjrh|~M2YdOvCBkB0Sivbk$X|t;MHqt%6H4_v0^&fPjDkX3QQ;XV&pwzYKTRCcO%-L<9`vy8o>~7ywrVpn;8Jx1&_` z7)2?R|DnM;{(m%>h%iWfiXU47uS3Y;`B>FhichnffePOySlc_H#Vr;{e7*|+^js)* zVW268=jIU84j95BmX7OSav9(L&N4t0xXtTemPrx){Q0F}i1lExl%2i3<2#V?s7WW*eLcGas+qY;3%vJW}m-!Jfb-C zbm4n}C@N3|ZiAe!QQ?q*QqlU-e5;X_?ZB-}I^6!L22a;jCOJ1(er;{dA+F`LZ#lW9 zPaS4@>+QMqj8GOzruFMd4=#Fl%s_sxS*qZd)a}_j!dMuSYmh;}S+O9Hp~LMe^0u>S z-w+V)?-;8^pCs zsimc=_Em<7ofE5WKLHdGSCd>GtsPYy0;>|j;;d^lsHmu}^nM7dK@QFdr&TS_&2{B@ z6pCwW$G0Zk(|h~w-Bs|9<|sU^@zEwFWn({Pl4D9UDU0q55WxBB?(|r)& z=T8M1yP|=tJjvs=^e~J;m!)6o6S0KJLmkr0JC7t{bJ56UVsDQG&#PD`|)p z3uO&n>iqv18z@oEF3Dl5ORtdA=k59cHZDu-T}rid0(Bg6a&{n#!ubfiuc0t*snswO zSmN7Toh|KLtN#fq1w@4$8MI~4SW)P@{6s_9zpS2GTuhOfDD)i+|IxIsc_^neK=!%5 zUZ+$Hgh~uZMAItV&=Rz+t;L~TPoC|?ROjE`;21S{gB~!MA~-Ay8S0Rm-Hy>$d9T1W z^3?J%)ttK1H}S?MB^EgX*kKg%FMSk;lvhyb2R~*M{t>q`WvL&`?_ZVk*neNvXBLUq z&{v_ife*2UhX{Wmd07nL%zrNW3FZ6b7}aTbj*Ld@x!(fZ?A%%^%teEPgIx29gqqO9 z!^3xs$P)2{q$GB*lvY!P!Cq9Kig6LpSSXkml%?2904-&uLctV4Y4gs(xIAD;>*S{R5oZpzMQz`y% zjAZsrMRZFvd-RJJaOz&s%-5mbl{er%R+O8i`uUVp`ZVef9)O+ua?0W?r8m(-c`nue z-fuhrZ^nfhm^>eyn~kzN4D;v|n(&F=q*FJ2DJt{Fz~-pH5ai^vh(|OM49)733j&27EWiJt}RhpF86#3vHe zyy${!cA^fO!`VW5BL6C+SbALkbBHet+qS}!@0=f_pg>`fX9=&U^jRU)dfBR{>9ORJ zxb-1hNJt0;AQRZ~vgj6jq+Q8gL(S|y0x$uN5;Mp)j@cYbO=stclAG`DYl<1Y%;eDN zME7SED78z&}`L01(SC=g=0ORv0!kT zwF|)y|2A|8_=FR|biLHqqH=GMQRq9@G?_-TMVXY|Wh|o?d(DhejZg6SKaDL5Q}UL> zN%i$~aw_fV?>7f7A_#5WeslGN*>2`?sj2{XXg%&SZb?NL<6cEl|C+osGr>D%_3>W zJYQ=-4J=o5#e^wJG2Sz78uv0}zfba_QcD@Q0Hk$wbrmfhq^M#vHZ;7xL^E(#TX1`G z6XcP=tjDjZktgrlCKV2nU>cCgnz#wc&Xc{q_ShFaz0k{?1kJ(x$BjEuZrtpZGW@z) z^v%~l%=e}O1H(zNt^&mX87*vj%|{=Xqwqsjtj(hmmHc7X(V!1!4GcSWru)SqI@h?k zIBri1KU-T{PVb|Go1^%$1TzEIpGCP!|qvboqN~n3aeeWC~IyR3GX~VW2QP?zfP?rKCM?lWL-dQ z4V%^->*&3t?C`~l5=H<6W_s<`E5e(Rl`Axp_UHERI6MgrTCmKBwnvyC`&KEc}u zjFoO+`CSCq##_EAB%`2EW?rAo99~r3B%$3rk z_m}6L?U-mW8o~nR-d_eP8wLgs#jZe zUnDX+IXS%?9Q*}_Gb@5LXzkHv>m99W0ZEAvMOw;m*)WKlGm{Z@t2{<>dqHc}QMJ5o z@d+&ETQv+>d52U_kp^kEB)A6o*PlDCRlZiSyFP>!2pN|+{gEEbAe^l|O-Ugwz`y?J zFQJa8>gxA1E+Qhn8yg!>wv1d{)B!@{o>$LV0eowr`?eLz1D2ATRXq+iZqNV$#-VceBz0B6M-n(>zk-==u2&AQZqB zb63T>pHW5Vdh9OpAc5%pkcsSld&)anr@Kb>{+f!jjsjRnbkkF(S<)D%_j2Un`>ZVH z8Q<3)i!%H0XJH6)^Q_dgwApK}plc}vGXh76Gyf=9zT$N%myl8fO`qvC(^6KXx63eg zHaZGzXE(!Y7efTQXHAucDj%xePcrq6S3AR-{}dB-wYny+j@{i|rH2mO9K9e#YS- zIN%3|n;36w+prABzI@oKFJEArb+JsKk%+hQ{?VC*1p=RB^0~imNuFFX@^9U1#7%h9ry9H3jlSpE9@tJ|_~eOX*)SMVZvs%BmXT4aESH^$aCayrSL>8L zs+oaWQh|Jrj*Yxw$P@AN>|T%rFm2fx_b@sAVm0#SI0fUc{k6w6H&j@n@)GGdcYUp! zEM>wcZaR^!eD-{YetJX#F@Z7l$IDpwvq0n^rcTb56Kcr73>tA6nuzjfN~x+w8Q)gb zF8`oZu?#o}uHo{jF(LLq0JOmzW*l8wWvI~phimSXdyTuVvTh1E$@25_{}`mAW@lFb z9b@g2FOBzzb}LgVuvv(tJX)pJ;S?O=b&{&ADmtF{OY`#vn7+514$iZYFoR8%R*iRW z(Av(YNBf`TbXbNid$D0s(TB;^)iCOwxl$1NC}BGLo|;6R1F+)jXY<;c^FQHtkfw-% zKKzkjm>}d?0hU31&kN7~FqX6OG|K{-n60bNeqYpiB>NK^(+fFE>SVmu_m|S?RzWjn zGGIaT+zKZW^sw|NYBmrZ%lw0P^=%<)L)WxFJ}IgyUsUA_?mfn@t#>#r@Q~}E zF(3eOmH;}oc2#*aYaVso7!eBf(u#bgL8pg5TrMWqY^FT}xfvq3tZeJN9|)kE6hI~& zr5#Pp&7mmj;}cj~`VaMiZ9{if6x&}eyyiYNoug)o&OKtEC|7p3E;uh2lDDT~8vV7N zC%srWyVdCGcZTtzKo3}v7Tkr6u?RI$v-~3j2gX6N08)rCs~jJ!Z@_2M|nsl5d~#~VSyISX>5w8 z2bfIfRruck_cnUGi*uZzZmF!(!7_TKmDp&x3>ZSzsWEhpIG=IUN3pZnZqqY7P+jo3 zE9<1C%#PF9i;cBN(`%6H+}C+g_L!GDJ1%^Hpfx7dBXDwdZt2U>I;@Eu>>uaU+h6c| zQse&XJ9VVWyVF{*tUfc*1orvS($hCCW)p)wHA-N?R;t*(=xLrb^A15}Z{UTWTbDvj zMCQOZ$UHqgfiCqydYyy+bk^a3Bl}HR zJP?vf&fN*4Slsz%!HSYOUl*MYvpLLX^o)Xi8PWd?Uj4Yvfg_tW_z)5Vkoy6t-R0UIvbJ8%@G?70?s{ipVnPyoq1TLlq6PfKFlz{&ELfg??c~tZVx&KS zTm&VIZJv0ytl7yf+4qG-|1^LFzFNpux=)_oM5Jfe<;4hGV2wzR^P-|Qz{QZ=%luK9 zrA1ad>(PkI>-+nm)`3v(*B5PPI=7j&l_LP0nG%{nK~rwZnR%6Z+<~W_-`M`a+E!=(nzQau5QJoviIfanU6q2CR&4uK2k;<12rs^iy^>b9w>h7do-VnL6BHB_ z_U287Mq~JAAlI2%S>3tYTyEHm^X8VpbNc9sAGXj1Ki+F!eA!j%5Tc^>Pl&Ckhf#wb z1)uH!;piCX=#cO&qD{WzkWa5Vu)uUTHEiYZa*$sZtoM1T%-xWoaYzyHdg=C$=5Il!d ztB1O0`o<^}Dn1fi6H9oeb+nnAcC*%54zlqdiuhJz%emJU^S}3P)%92vD=S?_E&Zew z6x8V%y$C!cfe+pNalgDzyAN0$U}2}AJGHQ%BsM?E*j*FYGPweD6ZyU!1AZl7rc^9SQow!1}L-AeR7QF_? z!J@IJwn3}sGR6;7t&!u%q{g;@nkhKusKlA2y^oayJV7n&Y)o?9uD7=r__NYAB=T&i zGQNS$V{9PhyU9IVI(1#e?l?G?Zt=Qpq*tvrki0t%BFA}S)$nSC7*J5O$wEBzbaZB3 zUaFoCl2V2S2Z4#XK~HC#{~W_1w_!qy;oOWx6YiwTgJpcXOd9)UA0lWHEWNeYd;jjv z)_(4|JtSz^@=0p(mFz+f`~b?p$^YUAj!|4%Dh=E-#>-z%tXRrL*ZjcS z(+9(J4p{m*7otFZeUKGIzorO*l9G}ZIxp)!zzuc#mt{aRk7-gD$0qT02tagSNLGw3 zYM-{XwLRZko4$9f0VY}H(Q-0Ik58xAc^rfQ?6?5222RK;Ik|k`8*|_EA=Y|HTW3Y} z0%R1^!^ZBF&j|)~{Mh5lIB$5IsA3n8QtlZ<0YMMn5h#@|~BB+$*SgAmY0jkvVFU{h_sw!_qMG< z1wEHu}aIEq9eVBa)%?CO`k%bNp(15r9K-hpR-duwFSZ+buM zOC4nxHQ#;JZI$F?S{i{r)%01b(W`$mv8rMdZMT=XHso^BkwRn6B@0%$8f~l5D$zs@ zeCiJauUF6ApkBa(UAH`;jSaCspI|aJxpV4fjdc{MSlKxajZWGvxVu)@y*AkNp@BjE z-^|`?bxMAusa_7kFxmK<9gm9W1a4cqn>L!B^HcT zs3n^Zjx#LC=D3&}oyjh0CvJ1F+ntPV zvG(2{`nEuCQ29BE(SUpBydBP_UJ0lwaxT#huaUo&uwtAyU{@CxTYQRLk@Ntexx8&G zhpVgCk%Z^tlhyd^+S>muOeUh^>?f%fk0;l=HlKje?vvw-8kg$qO1a`gfoBud=joBD?A%pxnK69*Q9>(K zr+T4Cl!|kOeA3i`Y~wo<`j*&g_OEm1==e5YLcayn2UrBBbnSz^lPEQOE?cA0z|3e3 zd@)8`?cyLsmiQ*xfw|)>C3{3S@373JU?fgC6q*K8_K9 z>bHHs(iRze`y$c(WzZ4=)xPbg%c*~xA2nBaj*QHF#;sc#e#kbDuD$mkrje`@MaZ8) z`FYfPNr<>~oul?_`Xga3U3&8%$l9ZxO@&F%F!2}PW<6lLsIUy#!pI$uIx-B%j%@~X zx>g7IrCw764!$uuU1q}1%5cO!k{kr7eX{J#2T}6Dmb8 zHf*;~01bUM*j$n4d7$+mAKY>selZiN|aRw#>$EGN6>2D%#}IJ8odRBlkV~ zs@A;5IUIgi9bJ2w0IO;K+6zE^=1)09MuX%sPU!YK&{OwORI zT-tA$!M4FBBq9p^{{8zy)5KoG*}n+%JTgbXw_51H+L^G6m#ZR`7L>dDjy{=x>mDci z9&K6G^u52dx}}kf*kn%dSpHk_Ol z>5t!NJ0DVMMC^!VF5r55>XcfHux;McpdY#{WMf4nq+qtq{q@fZ=p7xj?RHAu2O?`Ug5qIcEofA-bci8jk{rg^`R$8$%)3R%gPhK~ZD6oa5 ze#S4)EEcOEJED7odGM*7`{aMZf|^yHw5Gg>nm-@%>NNA+Ogi38N=Q7IWcPOEQbeMw z{PnIVQ$tMrI9Zyk(z`}GwZt81EbhxuUK)5DV7q60*ERp`TM@9beBov9{`S1yafQdM zj9#I@(t_XMxCdbBQfZx^8~7Q-)(R|LK_T^TYeqUIwcYKxYNw^jNem3B6gtkUe$#Bu zDy3>V74RzUT~S)DtnwJDO}8BvZ>tr|Tm??66k{I7W+|w;z%d?$@Yjz}U(s!oJBu z-GrfLg^JaU-QC@Cf+&ak!Q`&we^q!rj~1`w{}tR)1xxyWMq)xD#q_tAh6_zP$OyQF1hR$fv(*b8 zb1{5tDwUec=Chw_0$fkRQ9^eWq2D=jyZ|+T$sc*wwP4ANbaA69Ce?=zQkKi|8rMCi zV)*~&rE)U!|r+d+JK?Yn=iyL4ABTZEdp{P+HH{WOQeS<~{Pmirjr>pixZXlaeO}Fvgc-QZ814Z8T^E9i4?E2kt|jW+ELuO(>JS1HQs1!m}UuN+NFtv zy=~T0o~=>OxBnBB4QI&AnXtEEqIZRusJ}VcCnC~3buM%WPRQ{K@2%sd%aNMH_+D?nvs*!Tm4eN-5CqCHsD){X>r%IEyWN{*LB=psSuXgJ%Lu=TkQI4tu=~% za*Jjf5{~;M?4j>{gwgdUR#ttaZD0X?yp{c1=sSSdCRfUbG@hQ>Sh8Oj{1X@QXg%A| zd(r#Hi@F)JZjHGOey{Gd#0^-g9`7{XlQuE=ki~RUqD}_7+?=IV*3|yqq%rjlR(Rgz4{GMy z1Yb{B-I@@aeAQ1vV8PY3#zu0*jE|sEY9R0P$WeM~{XJjU&E1`QK7)l1?DsC#+RSo= z`c2{4l-fOcTkYsfk`kcnzs^Fb?_iF*xeW#n`mJR0Rxd7~AoE+!qn(|8riWo)-~O`5 zid=R4r{+AXI1#swbv3C*Ju|y+?SFXITw=sZU+cX8ejn3agfwdPWTxm@HbC!`?(;-0 z>F2vkS{_M=i3Q33^4R(5FF*fm#7vd|cQ4TpmoV~Ba>Rr4Va?Iuc3S%50*_6u&}IUU zr_Ev?mMc200Y=L_S40R{8mji@OneA3t-!#BxcLoAjt4&B%=lY^)Mf1ZH5TMMbz;wIBQW@;FD@`1d(6pJ0v0shN4}=WI`)po=x=tTSGL{cSQvMw1!+2S}td?6uT# zUnR|9UyzfYB(JrA?62h?bO{PxuUer=7t>Cs;_P#8NvQN;M-DfH`+pPsOXYq1!U9UZ zU!SmxpSS)OT#U|ZUCxOXX6=ptDqZNhZah%yU29wm2G(g$GZgoH5PRzh+x>jipHvrE z{Y<%Jd_*Ca&x^H>BEineJsNMf3JZ_vaCRDbwh1YXa`8U=1WGJ#gYt!)(eQ5&CO&_orTA3B!z$)E=c04C zMCh97J?HGuGPS659PXHV+xJ;V_@HM{Ayvcf9oY}Wk{ ztST)v(jX!&pfp^N?vQSfknS#NK{}*CLAty7 z_I(83Z;bm7bX?Bad&OL9&e@x9w8=PlAC-hlktf}y9NC4*KK%J)=efTfWw(x9#@LXl zI=@CA_q1>uxw_`Tk_a^4??G`D!H(>AKyS~$gllzgZ|`f<%c>FXg35k#Lbxh5kv&1d z1(L-b-4n57hm1m}8?~>x&*yGllr(~^y9zEeO>r>N7e6*dKDwD@ z*s}tvPW4z2;42mlNSG0Bp0D2yDZ+m2`si`+r+f=h?O^iyu&U;I=!bOc?zZNL!R?fp zaAZw2<2!S6_DbDvf0fRdC(oaf-nS}?eyy|ztNOF_=eN#*SA8My-)pZcAlHKgXW|b% zBWdHFxqEQN$9D=?m%jQ-4iFhy?9m;hm9s}%cdqZo&teN8tErR0|NtL zRT^fEZvoXX(A6c7x0_C!YtsKyj%d*742>DS$e zn5SPY%lK5G@n89q!Lwwxyg<^asi!AM;ItAn_PSdwCsM%m#O!&l_hR$q+^%2832!2L z4r8T0waL$>?;1T#EH%AL+}BxihD~@&9KOle|vqb3E*|(W0^lu=8FmcCQUi@EA)iJqMc_#NV}Z;J z=#m$vzQziE{5@wCSb<{5?RE7ypMZk7t7C7_8i$pX6DlUmu~rS(Q|4|6 zxUd9^IGYvB)u6Ve1$bS3K9stU&;+&`K5V}F$2bMc{SCXjJ+K7%HgH}4HI4tHyPD#% zmXqes`{e6^_s#4MjDr&szHK)X-(IWOxzElWB!vqR+drfK(PeP9+zlXnpzRerT0JhS zUnQ!A(46P5_aNlL`x+`mG4mTLMX{w$x04@g_%a@ioQbXYQ?7GB9CLrKF!$BR-bME@08TKJLtKDcru1YX3MagV!6hsvk4` zW)wW`lT65TmX5`O-mz~zv$U}8gtWH3#%pt@vKdaCO4ubD3@b*gfy=DMs|wG!v}>mn zP<|LV9OddJGH3hplG+H!^VIO*W%Zl(ox@e1V2}J77z- zJPNtgt<+suT1*LF?x*GWv$^1@AH;y$r=he%`)_=oJT%MPK|`oI$iQj_tU5<#3Qh_? zTq{2f23zY__1i%01j-d{*cn@;c8dktg!lKrNk;e$NBMeJoBk@CByag(HZ zN@#I>b4xw)KTqnQK@Wb2-AUcsWjwCa%Yg=zUup zCsJIUc$ZT9!L61Q?9LO+XKn*b`wcK9xl65!RxmMv4pO?ITk97wSATjg%NL`f7o4xJ zZBF8NHu3eUn62SU#rXjH(i=l4(( ze54RO%@&U)Y#f|WA)~di*|*QcTZy*5Cii>_1qke*K4E{nmf4q5XU)^hPm>+&Ey|yC6B(9{YYA zOQ)<`r%vLz<&Sxt9scIs*i-gZ(zfL`)@`y`C=b|u*PTv@^g^1(H;ooepjr}8IdN-$ zg+F0Pb&DxuhV&9kurG6scm0Pi#@4Gg~0WRJ#tHJKfP zyh*nZfE=*+b;It4y>HP+F!5*7Z`J{2fu%p}iDk^THtfR=-@BQ8YXK~S%FCPI@^#A{ zrG*#f@j5u~SA3>D-|gk`&kIqk>SIz`aF59v8U6N|-e0C_cyRl_#V)Ec+MMLz|Bg!{ zI4(Xb?r+CLo}~+((Na^t3oHMtN5{2U%b=Qo1Q=dRl`rtU5yEb7=<5V`tk3(qjZaDb zE#xVh87(;PeUoWJPf=ABfdxfYG=Nfh+)2N(>}voS*vcOtZB0zXa}-m|UDuDMQntC0 zpSONP$F?n}z4(4Tq_R6tMp#h3OyMtqyDQV(Uelw1R@LJ#%Lq@)#`u>zeA@K;dkbHE zsB1>szdFY8`$y#C({*5~yyu#4H*+ku;%vzZNJZ+5*SXfQ-oPU#V8k2br^gn@*d62H^fo5l*;s)uzflS}zgyPMX*50?;E~evy zT%qf)K%9QzUD}-U?VYIC=toP$bE7$$Zxw7_ExjiVCD9?Yu0kawH1BKpDO+ZfH>so- z4;hSANK*N+-B+Ihdl`rGsIo`e^|6tyoLsGEXJ^Bn9JBpKSV%;aSYZrwI}Hoh#ZWof z%;1do{qI{eavur|dj>0UwW58aO0Q;p_0&%Xfn>VNd}Uiq`}J#TX6F70wYWieq5A(O zw*NgqPWXGBB@c%`Jmu`#u4TTG9M6JpK3@6E{ya=2ig##kJnK^zLU=UQ{!%W2-kx|J;H zUdGZ^u1?jCcNxEaJggR}Eka{{pwI*~$J)AN>#nAH;x^30r^1M`Rww$}L zzZY9_n=WJa#E%SGAdNd&XxzCp@G6_?7G*6+Q?j87{fkK?Loo@*zNfaOkDy9~irP*V z3m=<~_FY_mB%SI_DqVzbul}30ie2oNRapvIz#}LBph5RseZKB?@Y+rhDO*xSf5T>~ zyz`;SDgUq)BPJ#$CoMy+LK2O6CpjAYW8{!W*tNF9rq1>=MRutxyX%wVk0~-Rf#y+` zKYPjFf?e#&H2=#@=zkT_7HxHNerqkLh!Txj>|dvO|C%~~F0As#Vz@c*l z#EOaK2}z9WrQnf!Y3t;c@js|Dr-<-g{1rk7f>N!Qgc=awz1u-c)K!blF+Uh1M61fJ zm`H-A>}@X`O@np}!+TMm*4|2~*@T}&lj8-{zfciQkL@=|s=5^sUP@n)6fe%qYD z>V?p7z0`P+QIQp_dWoJu2FqdSR`@*71y4vwnCQ$}y}|l_C_wa2PG0(_Es^SdN`05}4E83q1 zS*{`+Xl)Y{0SfC3!tql*s{#|>A{_@rv3o7ny+GlhQ0Li z@{vDr)qmSv6UU?{me>EU-=0hB4 zez023vU@!^^|qwy=Y^Y|Mf?2QlG6(}(f3}jh=YmaEX3RXFj(2?Ry%t4>;G(uV&S*8 zgB@VR;=P6sZ>qx?eA1xPPXCjRN*yiqnWsHimX1)WQlO~-svnJ{A7Aw&p~ZZ^WfgE9 z>VLtrPA*Pjle1rB36JJV5<1`_Fy|Rf!tMX$A^{?fLC4H20rac$sVRU+eUGrXxR1F& z7Vb0cN+e)dI>9+eICz+tQVxq8@!-D?gdVA9w=$_1tvi_8p>vV(IQzptPE3pd|EN#>99*XSVxdC&%z)=8tQU_tLKNSjHnV) zG~PIb$L8Ojy4_4Xo=uJqhkGk96?y#%)eKpNKay|qXCx=TvqUNLCk$Iffpo`LBU(D? zd?;SuuB<=t02e^>)Y4JjPL+4ol^6SJb@Hm#lcg|(+}5hwui8?S;#oyhYICQN9{svH z*yTrU@{C?pS`5QM>fy+_68c(FqVa*2pPvX#=&JNgg*9GnN(T-4#qz1b6@P z0XE7W0~yRCj0iJWb4LEAlmcDGN;jYm@SR4nxwT#fSz23XGoyg&!LJt0pcdTRU*>-b z5lA-r@cVl@rYi;o3(o8oRKC(5`G{~^q&gUfd`T&MF>>VS={ma5wA?&#IP&W;YI9=P zrevHOp2U~K2R{~Gg{}|BX<`y+G{Cxur6sW^hItih*L+3fW$55Jr4N!sYZIHX5#9O1 zXRDQ)({eDBMoCHT!-D&S^JE|=;Kkc*?c*3TObKCe|D0gGKU;aD9zMabOp<4~_5T7uA}zXjl4w*ylu>XR)Dk-!7f7@UD# z5|@$=>fN<->!Se20S)V_B|WzxRfVP9Ml~`1#SgG9qwe5KA%cz46_@HU5v5ltU~$1> zf{5Kj+CB)cKd4X`mg?`v;1Mjn`a^(epPHCv(Vl)^91xv+)CdZLQm>YqWKLSPCB zNEQBsWRS+mG$6nOfDw!gpwZE4tbZkowS}CCdt%Q4}d+pl zKd|9E&dWpvv-slUvu4ua=m}32RFu}^temB<-88@E^HLQc&26z7xJz}j=L!XQ&F83^ zxs0qvhSahZHkBqwhk(-}Hqvco)f}Lc_2yGa-|>o)lC(5bRjQ<)!^U&<#7NKbG0DPJ zb^+S9?Cw6!bD_gWk6r6Cs}{oxB6Q9#_LJtNi79wv#!uAo7S;k|m+_zJ2YL}-??>Mq z8tGOic#mUc8rb>sMslZ!}Bwxn~HGSIx&0%$OMe1C^q9l=#xj1h*# zB}=E}e+Pt&l}LN3xj{)eaQP8wFx|tIUeDPPFF$qg)-Y<%mGW7wwTb=w5{ZD#x5?q* z+DiJ{_x?4bulF}$#P(;4A6_d|Id*2>kYHQn7r`HWya?>+kp>#i^wSd!jy64gN3E^G zoxDtBUZ$`?1>tjh?=X?6mah;B(wav(JP)nw_g2XTzFKt%VtfIPaRafQ`gVhi&z`A= zWd6n%D)5tWJX@U_v{63%p**{P6sz9;WhZqACqYn1m4Vj4T<6jVoSj=1IZR1Eq;EO@ht3tFKi>GbSo@;C5=>PCm;@_weWuq zjDDNuj(dE3Y)#+SH_v9=Rm;5ejTDJ)MfD~_wxB6OYQDo?4)eWX0p>1A#L$g7JuB2| zW*i3s#I@RhL8w~FQI(nGI6f{xQtNv7%g?bPKhzpxwk?B**q?dw;40LQM^V5Oo2ikw zI8sx5)&~0p8z#9(Ig;TdMNeTmbuT~i<+$BG5^izz435)xQ{^%m3{-kDKJK|sFW5t;6zxuAY(xXn1<)(9k zC;KH0s36j5m@_vb`^MDhy_4>Kc-@3yk-s2jW|l|3o%QlQ0lbC9MwxUzT5q660=qNB zKyOR=xO0K!NhqsVAxck=@2a50@R~@E&K&2jvEs%b&F=@Kz0n1*l8j2ekS|KYQl-la zD`^w*yl#V(XjMqokrT%DFsuWU0R!Mbmp?C8e2k1mL-yi@W>u>Dqrt_)8q>vMfSx2OLXN8>1Z)Rb#GuBgkL-&I)(ees{}lU#ii`{fUz1oXZ^o5YTn zngW}f*jRw$0Hn=c9_v$(0(3TS!jK$!G65nl+u7A6n(2^d#ZFJnQ%T?WUP;Dg{)6Pt z_k+6)S^5TZU*M^jPsS?bAGCRvVFPX5_VKRzlzfp=`GolZLg0NVqODCJ!Kp~8v>%K0 zcqi&_9(jOVi{9gD^iZ)G;L9ioE89Fl1zm$8fCCgfclEz4;tAf}ndf6O805+%NGkF$ z*mh{=50Kkg0j>vi#~SLPG7j*?`3_z74-xehx<6#eC5q9W*4N-SXoQQLDh_RT375b!pS-%U@hxHaQL5Gvk1EFUG)t4N8b- zz-aK-(D1dtFviZNrnDco9yg!%#YQ&cIM91iwJN--N1yyY=U2Uu`4}B=?x`nnIJ5$C zGX0{DxFz_-@6(`x2&^O?N@Hyc3`|1B<3o=T@3;rL6o12)*GEuUq|dE6Q6QhCK~m#< zB_EXmj;J??hc$cROk@R(u19OXs%JeATSWRf#NKz0zJ@3^MZ7FW@cruk*jL1g{|(n7 z5U03~QgH&fSWkby+M7SSpNaCj$|B&eqzwS!0oYoX=OBPS5|~6Lm{b<^aU1<#orpf53 zOlM>&SmKU|2y=M5ZZ82(PwqP25dkX<9|;`Ke!y9-7kCXs{(ZZxXz={Z*k_A-$VB`e z58FRATo1`0p3q^hJ>XB3y+7nM!Wob_mWd`8mL;{cR%d8r*ckjoJZgHY>6{F2{5S8g}ytX5JGmbAp5z+(N$BZwFzn?Mf@26Y7^}d1YoFdd8Ej zJ0Se-67uu+ObBz$a=G|X0Mc@1Y3Y!ar=$#J2CajOm>3)_?_3xK+H|C_u`#7dIhcPc zj=GONl+2Z z@#4&!x0q0OgvjW;cV<^)Y&FIytJ9>%c}9Yjb)6cUS^C?!o*wI08ccLosAgwC={H98 zLNioDx75fa;+4G*aK8SQat0Si>zaT8pL3h+G$Q_`!262D{ZEXB?bak)=LLHpugj5{ z?n@GELPIY!Bu-?!`Ag!Rb1l^Q%L^$#d7Nc9q}7HYLLOs4BEjeT$drYYOq=WJ-}daf zQ1iYN+!9W2Y$W=lqOyM_JjhH|SGcv%zh+dt7If^WGfp1TzF^VnU1g(UIZqz+?*TN5 zfv~=7`=)?uQ4NIkFvX{I7)+UD<%UVB-!%z_h#Z z9#>jeuNsfmHm3P?b?n3Zb654!qF3%Ho$LcwlaEqKQIVA=Eq^pTlc?~ZyM)dgI*UQn z+AAfTm!n>A8@HQ085>$Vsa8ew#BGFwWbQ}&jXx)`Ve;hoQBE7fLL!3sXF_GPqs`Qz z-F%fgJS!Nm0wz$Yxd3iHmO(!L2SFz4y92<75wYqWlGgnzh40y6{0wRag!2X47s8cU zC+CKl*lPUYvbi%5`TkxXrx@tf=fU=;L?5WfsT+l1I^oO%%{lY{l7v6Nv6Wo7;k*2>*QLJk|PpR5Dji(&E>}VF$ZF zbOD&UcK}~$bRr^4alEZ}qyHMaq^w&`v05X3^P6kfhq_zQZis%riZzZX=()PWNrCR7 z3x3x}4#IM?VG8+Feyvdi47+EqtK}<5{$HGk69(H$rL<*uE(lIEKfY8b3+G`kxbgTQ zI$Cg~Z?J?dorY={*>CSWPwT__m*^XnpeKUE6pd=}C9Jv*brDYQymY~s0B<})C|7e# z=%V4E7%pw*;jMwrjU7{>LqU=>jN-4bBu2QV9pesecrD3!%+{7F?S=C9>2nbS#*&e{Q$sSeksmA;>#nymjmjCG1w4%KzM`LO1Mo^O=J<{Mq{;3fty_2eXN9hCMSWE|P7|Z$&y=F=f1~}6twVIZF`F@Ax=eW(c~$EY<_c+fi~Ne-yP+Rkm3l@`F9JZs*Pp0-_CT3{ z0diRNED|E{@C}`v)lpMfo|aPt-iYm@n56_T2|BfU&XP(WceNCf5^T1jS8M*&mV8~VSBC%+*RZJ$!!hxyHeUYvT+r-lH zm}S8UE zAWFCac`ad#%@y@kg_3NIt1X|)@Bbk?^&MHZgPT1_>t|?URr?V#`E~ZI1lm+&ns0nl z)HzX#V|UgNdJg*~g-0es@Y%e7P=Wz8-8L**p)KJCXV z{vTOJJpzKW_)#7_tOb^d*-U#{I+T^!+@`9uW__ZCL`1YQJiV7=zc~bTPEFAXq^>GAERac1Zekq(K932_q zlz3SUw?^*&zfvSA^Mn%_9@HLz38N(9TOZ~YU#FzyFaK~f-D^{vcUVHiub z8YpQS0a-ymJk0!BgI)>@h|FjFB5e5I$>M>?Po8jPr*S6UXL#WR2tS&S-r<5%*@7>= z+na6*p?qY74)5PdEZqR73+f7)@xU&Nwqtd^cnpOp?#qfjA@IttgdSEx;2p3c)P~Jj zZZA%E!=Wx%+fS=9NqqiL69vVWMO<pawy5j%YTo1 zc2R)px~%KdL<4*eq0>}UytEU`Sj+FjL zB<_c^SEpF>GD2q$LUj2wNbGxV$sqL%@IoX>^E8U5gj;|zv( zxp6?3<<843WG9vFn-f>y%37ON;gkY@ zlUQdN_lsz1d~8AF-8A!8C~NUYLaruoB=vthusy~m`5Whq56g^~uytGoJz*b-t`f+D3^oJ=PLN(Pr4tiR29fLuegBVf<21HL!cln zZ5GCg4N~-r4y8{RLx-0l^0>p>fSK{wRG|MK-nj=4gYjtg@-WTNc9ms+(_>Ee6}(y1 z0&WJR5@-0@s3Tmqlj9NYK}ry=PVD~xdu2L<^l$0(RO#Zv7Ivek1zuxq3m-Qpo?xo$ zF>ZKc>WdLO%1}OV2zMsHu}Eu$kQBYWQF1FsRbeOT3iiCb;=|)TZ8x@21?gPKyh08r zRpfpB`rb7)03NK-pfJiml=iSe?k=j<(sIaJ;qEI`0JI!jC}!sn^|OVBqFpVoxVS4{ ztw41<>e56~@*&tDQr#>oWro)oMeyZ6pb^xG3V1X#;^Vauv)ih-mZ zXG2DpMv_IpnFvgJ{3_BO-r)-mkR6gCU&y5i6MVvJnm$QH#&I?2*Gp?;S>d4Q5MT$K z)>uPPz~U3F`zeleD?Vr`S-V~gpv-tDdC&V&`^;bY{@78vxpG3o}!E0n*>>h4(PhTgQ zee{(2wQ~G)Zd1|uF@MunriZ9R0iZ|MUexLuSi>Iv(W7VdXU6dD{`FZ%@P8rDAsgku zKTzAfmx~f3>AAJ)AUWe8++Dk<9?ir}<^?=?zW{A{Z5o!70$_`=S@yqqwllJ{gIC+? zBVaaaA7HK>XlGMWjBhvIX{~};DMd$S5K{`6= z@o9khWL2xL`jnf?tinVZIZ&X*g%5dv(=fi#J0v@m=n(jMB%f`!YNq`IU*9H)-iAo^x9GQ~2Pd`N#G6FZ zVMm2YtvmPr@*T8x6}>zV%!)`!vEt@Gio2_rETx1!7-O5uQTp_C zyf<#=d|iuCJD%Jk!0S#v9bU+#>6vlGwz*j~X_(wg$kd&5!n|v(&wN2^?eT3uQAU^) zMt~8r6|bV&0XryoS!fjSCIO!ipeceq4@*9Pie91|f z5Tii?i?mQYq$W2AmLEv7TP5J2_R4A_cNbJ1@5~Xl1rQ>~CqPmIIDeD+vYC>+IjtrY zbi-$9KFeI8Vio(|u$sGSw)A=miR6Tz3T{Zkad|1LU&Xdc#sL;n!eD#4D$!aN+Y6|Z z^}7@rxWTeN1#I7(i>;+S?>06>WRLqYK>(@8-h1};cH!0oLV0_E_;KetZ)#uZ=9vED z&ZTvW02MIzK*z)TnX2N9dOs6^pdl;MEs9NU1;J0t-i4)C0tz%7Z(faBCryZR>svGG zV`y(t`B$Z)SM>_ux0u`^sa&6h%+>ZwhZfAW5;ig0m0c+L4H^%d5SPBCZ zbL89nmcxb2mR@oQAx+cNvcm)G{vzRij+!c-^6-+tK!^!|l7r}ziB4Drd_VbA3D#K6 z-%U4=jsMeZ9jVzaDgk zP9f^Fg9~wL47jyH7$!uyDh8`$8B2$=xpV{qNC^c=BV->od_$s`>EySwmc(z>hLfU1 zJG?O^$jHcAJ-f=^mS7A~e*QcF*0?5!oO><&N)7*wVA`?P@|1;%zOc^iwadfZeOc(5 z1qB(*C(NNk3MF+E8d_SyF}Q_zYL@0lj~?OhC4nkjL&if%N{)(*5eX=KeC-)AM+liT ztis3^LWKnjB%<%$eO)T}olC6pldi4Q$?NLY6C_oKBC5B7F2s24>qTeBkPIhl4Y(m{ z5?@z6DajrQUd1DKoTryS0tzZ@UUdj`0=y!rRWY=3lm`#PU-b3J#y+*Q!FB9 zKpf;@j2iY1sqX%oy{FK2(l3!XQg{Z5)xX|D?+)RZ!CLODSzgCY{kQ;x^Ag3r*DsM# zcmdb=X>$ZaV9RV8grNAPzCqjRM-RMup{5t$EGS?}g%JgESxlILft3Y@a)UlEf6S%n z-}Fc1e>Uaax~>#!pnOKv=(#~O#Dd7TX-b|c7cBemET22E1CR$m1F89101n^4>B`hQ zHBZ(k+2>Jg9*38gmq$em@m+R*fP5AX$@PK#4YN>mWjWNc_=q zY;43O4-bz==|H@0zJhn(7yr_Y;Vgf)&{Hq9#J`>) z zb{CO^5s{p<>4d+zAU^z*dmp9npaDms)-n-(T1;5o+uZj%DZ2?0SOA+IZOk5w421gg zb;d2R|6;RWk(c8zTTcdk>4swRi{Xi1*~j9$27*no8aOS4k*z>_B@zgHztc7~rHN&J zoud_wvn68ivsjmofuRl52&%-23JRTo>#nvrnX~urF4O$x`?riXi@qa=?F9_z&8LC- zu+LqHwSvXeq6eZb94Mj6>1HF-KVh5h3k*l-*d{`7*?pz{1Mn*8A+{yC|B(>1mQY%p zVn(LJ7{WrSEG&Esbi8C0CNFAZHHAg#!=D02&kz$eLm;E|8$LoFUs6hH#@jbB8mwai zQ96~vHln5=Gfe|@*eFTi!G-cKBM~^uKe(+Tyf^}$0v>7?5W>!*E4PpT&VRBH{bK() z`z2N8+98MVkUN-8yqO)X2!@N+7d#FlrxZVX-x{xcveHr0WY zQYx$?k;{T8cP|?Lu^6}+kDxXxCtyLF3~)mL&74%mJn&wB$B+EJUZhU|o%3VHCm9IY9mb~Bm$0UGtnK<}M?&7dCX z(UbZ(-C;hARtI@K5~Sz%@AM%>`ssOk6U|{=X4A40$z7~)iUq|^>Ywiu(&=;XCR|FI zn)oIkDn@9|vn?zvVuGA!pTGbFxv(7Ty`-&eX%a`ja~>!`c@XRPHs8E@3*6ry^KCs0 zOGVc3oBaS6=6@wAyn_R_IP*^*vnlE|1Akv~PR=o#>T+Xy%!r7S0`;O-TzWfMU~OB{ z!J!fiL0tSG0WYW|IzY@&BV4WpPd5V=M&N7wvMR$wnNb;8Gu+Ej`g-fx^)Vf=+8(lcTE$nY&3@vnq8VZA zq_OzHkW@N+BQ|>>0*l!nrlI1mv;cd`=re)85E~m?1U54a3{05V)e$Sq)8K5mIH=-n z)gPb;7(&P$cIv7o%cM$yPDmK6ko24$Cr#+tU^2MUt1b5a_b6Sf3%naSHoCcdnR%0i zKgr)~79R{A@(KZqwuK?A3ynMeMdLLdWL#rM!( zjih>+oOvu5YpZFvo@@nzbqP%d+3@^!L8i}iuogdy$G6z%>5+j*d*ZRC7yM_WN^0#; zc@kFYXiOQB`+4Fa)?SRVu=GnGvztv{lD~&FJ`Vati21aw$zcy(DKcI)6^^~GFb(x> zJZWMCI?!r5IPX?Uuc8W^fc-*{Fs)*80Fe0Yw8M3if~uJJuM3SiDX$kx`oC!=OJC*1 z;fvoL#>+A+5nR?&QC0N;+W232Qxg+|Kp$5yVVEHZdmpCGwtkyQq#BL@xHOHL`1o7~ z)*PNHNE(>tXveIu-#i}{@AwDinZq_?qafvhN{szY>emu{g1;x&KpkB1_z%lwHQ7i% zQUwSBIFT>-Qnw&TN+Y8%=*QrohTE@a8PVM#PqPY`vs9z#u)zE;2#Y&PDz+}`I9%Pt zglc1BLsiMl%uEcroqH05hFCGv;ft4r@y7O5%tfeo!|h1 z`;m2Xj|L9fBR*+ipLd3ah7oyhwFxzD-(eW>++FCSwL1*SEm5J=i{h;d>WmjE22eeXQoI=mYr)zRwU9a$z&{DpS-#`&qLp2_&$b;E8AlV1@j14SRcg3?iZ}eag8bu%t55Xf&yS zVZv?-Vy4cUHPo;HV;6O6m0!hJGzHE+5R&~MhYp=#l5hQz(<15h^#tIStI2OtD)dsF z0ufsbV&b_ll0w%YaULjZ!PLa>E+!d0+SEhwJCgS(0E3wS_P#1hGBaU;rv87S;jUR~ zs8g*-W2oIk(Q6ZKw%@I&>YbqVMY{_z3BmoU4NYMG2Y-%VK3W3UkS?2$S zpHE;skpZn@lrUk1JtN~UR!c(*N|Nu05i3ba!azrM1tkTnE;id995 zMiwg^(g=nah`;<4=igt@zlmJcr|hIh4EuauT)2Y)nwK(hV5p-xqU1nn9tjabUVDV#Y=n zmcKST>+^Yd_l=XGr+`#A(MAszbHoEW9z$jQnae;Dj@!%711-SE-J=MdfI%q9wu+50 z<^T^6C7*!<5ebqX0_kk zA7DQC5Hf7^LAtQhfwOY8yAB8)T)aJAG8iF@pz0u3>7X8^8Y0xj1Z zY(!%(a6EkfdZp+S`!I#!UN|w|O7)}S7Y@Oll!(A9nM=bS_rLmYhVr@$1Jf2gf)m!I zL+HoU8d;YTCDvrAA>2frb{hUHrBq{V^Qt{toIGGAbJ&kMj=@@fu{?gz4_uX%af?-ud2!| zUyl!5?;?(&Wpx%g`*}gNwmFdT)9WYIdG#u@7WO60OMtTX4s{6DvL5?D(wNDdOF&kh@l#5n=w=tPXS*ao51XtcG6_mr0g-xCX!K;b#J7x0fVJXrpouLvA& z$a#3Qs!$&_DGAeXr~^W6HDy9aL`=&-CK`s9G^e*?0r%2YBunC8!PE(iQRYL=UB?{{ zYU{K02R$1i-%D~Tzn|FvGMZIcgU6iuvPd{aKuCz{6F3&*I-na1|L~zpq0(~`A=rak zu@R)6+zFbWuK|p&&J`OCF+}cZvg-`O33_QyIjQD%t@C&KbqUG~@OH4(*me0{_sh`Nz-yS5HdAx_IVZ zOgm|nz65p7fnfj7=cPh-5d^dpCi;B1@6kivxCj55%t$W9-ojl`(f= zeM_`8PZZ`C%dR0%YwunQ`&fs+PW#`BgZxAS_zG90Wjwq#R)q2~Smri^;#TUP<<8z& zcaf7tKLAjFKY0{zU-7;EKx(j?Iu}22+QPb$C}K;iVgb67xsCdz!a?c?hDp);c3 z*#t4Nw76T6pimedYO;84>yFb!&7tj*!f?bY7xN|6#kbn`OG8;wXlg>5(y;1eWS$n7 z#8Qk1US|x$@4-%E(c*LW`)Pk4mS&|H28~jxrIXOpl<_?=@jC?lu{?;raA>Fs6}T`0 z1(HweXXA~Z@u+2GWxaGe{DV(2+rKI4G3$gWbUPDw=6pcM#C)ORFUsUPr%CuBo%Vw? za3kx>S0VhBQD8;2c-E5nArVR0WZVukit1`0mcBvi3A(;R%ExmB-7m{!cgIuxzhOHE+1c59hsY-5h$Oqn-Ya{T%@Nrn5ke>#g(BJO z*kosKQT8f(KksAo`##U>`KLeXbk1kI?{(eReceF*?M>3zpv)WPg__7jM%3H;-%*-f z$JGm$)1y3W-20c8Rvcmb%EDCw?^yMnKDO~N8$X14G5zK>h`YywQhD#9{t<;*5@KU| zgcEYRAM`O?oH{xr_<28LG^z$`(bKx+bz4id+zfFTqLptA#-$>ehXOY15;S zqug-K=fCFWych!o1O%LR%3@lZ?ItZQ1_%4?=5+^vGZ&(#?@K^-3c$EbuGbx>8H8m$ z!nbkOdAFC%^uHJ};|{eX0s%dHZR?0d2KE09=yZ^RiFqPQMZ+Lh{u}=xpuL&KO(|Po z-`zgs_$k`MDNpOaTr) zjaX1_E_++fSj%y^jhnXR6KK`5wYv`=#*S;C>77;523DS?tI9&V&3LWvOH@w8!`}Le;GaGd$)C2#nET%r8zKb$s&%x{wO}aoY-qGN-wY}a%5p2OiN4o z1RWIiN*y-Iy}fFj<+c=bH@e>jDn~3B|1QuvoXpw5h%uR3ATwJy+xs|e{|9;S3-&FMx2G62V6HW0?lP2iR~^N?rJLD5eXH%Or}`y}N@Q9mf`)KA%I^$Q zPQ{-xE~m%p;rB1h#zt3-G**@tM_ zJ#B1sB@ruuQE@c$!Q2ltYUAaH+ELN|-y?~X5u-=agGMGpbjUfzRiC6IKQO7NfVnk$ zU}B6rl{H1irEg%p8Sud~nBkH+S}Ixzw6U>D@oKPrQL7G{Yh$QX8JE|3QW|sB_m>hG zjB`MfhFuIaX_BvIX6%_W_T25LQg*a=04H}g0D~6h<0IZr;Td_8>Vo-MNQEpngl4yg zb3k*8KQXl`^>}Bg(CV2cn3{TA*K=D|01{F}^f+U6_Dp(o-km07f9XE-vJdeMHn0k2 zuQWhjQ-jHCR9w@NGIbaYKTfZaVz?nC@#kZY`t@(yf16M6JkG*T+4J%pbecApjrE`x z(@}lf=?&J(vj5mhehe9VhU;{auv*@mT$Q`;$|GNAO>bjbIP592iWu zi6Mb}oGns*C(8~1(ubr8BF1);m9I^RXn6M@K*cb@EI8g?Q;V_pDygKZXzvg43RV20 zBSJ3y6Uy1yS=}33cd$kh;A!#7(4311Tl!nJ^{S^FiuTJBtP>vF$QFHsBfdWqs5H5} z*OvjgxO`0}fKd#}#h;P~!_z{ngV}^To!$z|YMw?@lDu$VFJC>row=$s_Autx@#9FC z2^`WFrzv}Pra-5-1!d?5nm>w`FrgHgd3Xd$ewnOBd>UQ3WHtZ zqEv^9$Fz|-aX$`y5%=rMit|5GtZV z>UeJM;!r(XR)953yhaXg+5J&&Fy)8J;;;)H>gwo>rgA>)u)wOQ2E0tU#6D=V3y~8e zy@{GD9d)4lHKTR6fwL+TN8f<1@s;u)?PoI5IJmN~4d%cgqR$^c{ygc0ncY{vto4A$ zkeG;z|K({6U@~$sH}>fq44^90zG|o4nyn0ehq3T9eP7(nl~%}K*I}=9)a%4R6>ROAff8_hxoIc^gJhqe_ zdA(OallKcv6JS+~|DdH>A_siadJ?p!kwtHMhKQZ-9_!WmhytvY@8>Eh4ao{7d zWDOg6qTAAH;^}KK(udRh_dg@%$0ijXd!(Y-DPHl%w(I^s*k2Wz4-3i6hUrE$lipQx zKoC60+&&WutZ$$O#9|C)c4X7%NoOU-zps8e-cj?d?x5rQsq#D6(I3xarEKy)B99aW z%1xWGk(vRAd@jG_0jWpsVn(B!=p|V~|Eplr6K?6x=+_Qfyi267J+~x}Mpi6HVv2R* z2m4o6cR(9H`RRXfC~xFty>#;dVz2fZw=&-_xG4+g)9`7tN}D%-n7|njdV?0bavz|2C|KX3;Gxc?J)UT; z4%OB6Y=^nKoA@dw_31`305FO1G8bkxriGI9zj>1X4+dTITzD7q>Uf<-%FoBuCIDzR z(g8Wx0+qSvpETt?u4v>wNGKxR*plrTh^S|lIyw}u;^NkVngwbfu^T796{c39eVB`3 zwX8z?xJ7+((YqG`aU=?M{{{4`6C)@MS+ZXK@b15a{~bb?c&>_aBvkK52c~aT*79^zGKpysd?UZeq^RdTN~4&=y<%L$G24metu%0C}ZoHG!1-#8O)AzjrOw;j$l=fFuhacfS_MV+Zy9YtV2ymAM5xb}ab=3&fSEcA)28@*=oo}Qur?A3kzd-lJ_vswil&o4}t7|$5! ziSJ2EFITu!Ft+S#HU5>V4x-uee<3Pt=pbTdG5d4;-IB*m$9F&NimRj*q+H%sY_gQ` zl+ES{4Y2Ltq1IO37pnWVMC3DGJvZP8y9z=A9v;Ak%K=y;NZ~G!mU2B zcCR&8kC2Pk9w=%RAN@9dGRY-5ERt3AHf1oVT9G98#{N zc@VKzo=ojVH?-zYvA4at$b9O&s_!H9o~@u2Yk^kGAb=2u^I@_t(=8`Shj%AHY@(~M zAFN@~Xihfn+@N-1`dUfLr*Rn26P+%%AKZhmZ7V11z2vyq7=wv1t)x#xd|K}?WdNmG z1D$a;R>|SA;jVxEhb|E>J0xuOk)$AL5B{pPJWk{EDN#!43D4O)o~W3>_t2}JhHWyd zXH4P+UFISRgFnU3`@M#qpC1!VZuxCg6qzYr8gxboczIszI-k0)Xnae{kN?dZJ4;Np zIQ~;(S=1Tkmmn`fl$e?;Nj^CW%=E)5qe$ zoYjBE=rU)L)yaG380*G&&j&Bw(|)n{3o(g2k)&L{w&>a3l}oUU+x6)CV2mXN$C&kF zs7v~-Wp7H5;!}Qa1|`kAzxRdbRBfku;|&CY(IQnq3&e}r0K$eJDf#|e%;lYY!(y*d zEeRPhG`XFQQ;M9W3rK$AP;3`uNalX<;`~Dh9)Nvq;ygQh=Jk)Z@1lu~_H+HYdy(YP z;WH}t*-VzuIu5E0fl9gmaL>jjzx`Czc!UtcP1H9mDd9#ViN5Pc`3f#e>S5Es^G9X% zv0s_$d#vi|8oKX7bt~?AR&cQZQcg{r@DSo+2@BpmN z!>+kpqhfMbht<%w!L92mAoU%2gA;q&+nDB<%KZx746@W{BD70h4wp*c`V}u}2aElr zlolyYbSSAvEb?z^_q&6axkBf<8#6hL3H*#{X6SbvwP8}g!si9U~%u4-8i7k_tp zhm#@FY+CfYerrpW_=N7j$Vg~L27Q@DbRfxfQTe1T^%jqFz5o;dt4Pt-B+ZmSc)sL~ zck^jYhKr=V%>;6Heq85uiDU>4luL?-X=>g8Q-Xp8JnZZ+pww{Qt+~n~g#s@0WlH#5 zHl}JY@$l@QPVix<{2PghH&`Z$X^{BB#ME@wr}8?=9P0wm9YxmlyM%TzC;wC3=@=!O zjE#vAQmr4fH$3nMnl6H_XQx~GTd$feWZ1S zGbclii=(%%60}tKkE+Lc_s#5lIH~Fz)9z=wTF%_qUVGVH_M+qXLfK&k17zd-kjk?v z@vzB=IOoO>!UoFLS|XWO^ZL(;y9=IHLYZ!8z85%>)1UE~Fg;TEr1eC&Tp{#` zmC0!VPDR+aAEv6s6)vB#y?JkBuLm{NPGEG0asK9uZrAdLiJj|2^9R4M?{7tta+i?( z7sxWAp#yRATDA&o-7$SDk7hL$E&nc@&U0yX~ zkl#~lm~}nkGI(Un0S~k&BBNe9qggWIi8Al_v1NNv(e21>x9ys^TgM6eac9!Si_Yrz)&c68hpPXn{`zI60;2-)y!V>6 z=BNMo(s0Ltk7Rrbr5U^>F)CW512J!peijMe{4-HVw`A9vI=CIw^lcTjKr|#@Mzf>i zI|sg{@k+~ZA^TsxFF_)wpnc?5QcU>P+bCpb(0{>PCPc2Yj*Sz%p3-pZ2dUQN-u|Ft zCl5MB0#XKi>I zRa|f>XS9%Kk+hEPj{6ae2(IVKnS7USGMm!?5f+m`f0}Jq-S08fOjz|xHCfVE>;{+i z%&d0@&qXs@qoeWI!4%qYwn=0uDMZ8Mo!0#2=qYj>(|si63eapmieWe#9ZllfYQJ9A ze(hKl#^IJ$Y|&}4rqq~p#CZ7fZ0_&J(srCk`|(og^Kx!K4K|vWw`moZ;doMzcmM{w zp>;p?qD|ZBB3}ND(3rl{Mf(784NM(a6XBAqjLw*5L|o)539K30X@lV$2LBt)eS2fG zgnTk$y{+|rO3uu(3ODxtRSQiV*+IZZ6;yWppIeP+KV&wIpe6rFAXdqS8SLUERptz4h?rS^II@U!M5+gsLU#>PO4E2-$n_W1bo z&!m$hs0v^5?k6kz@<)r}3&|kaZtL!TCfVd0NEixq?n!q?!g+Xbg-8TK&;+nVnxJ_O zl>?ly{;7Dmr?VoopY1=$BPg)<9v~FawnKTcR=@&vCZO!As9C7tYEGJ^FMm1tFYunSLQC>m#u-!I!x|5f zTP5XV4|~w)jWl%r^0YQ{=tvxoQ^XZnv|_aFZ+s4|y9pa^3d)CZoo$mF9?jy489if3 zsQLRuF3pL-Hj0Ol9V~cJHwaUajxB1{-MHBT_FZx5m3EGF_lgk#h=dSF$!GB$D}xbP zB)&CG?YB&FX6#-ZI^h}BkZzOT9bf2AKUduuW9|V7crSf#!lERA36xOZ*T*$q=Ap(K z(E9p%hv|3PoRpb&ZyGH~Tf2H0FT_<`@m=i!7zZ2o(8(=rkZC8Kd$_eh#=B_x`ps#7 z)omVUcG@rdov-gthm9N?N?zORRih|Rv-s}lrJ1C=!Y~xy_=`W2#eX@^UqAfEUzgRx z);g!2l0?*Lzo>C9L!YT&_m#Tlb``RjFCozkv&qIy_{xYyj8=7er?i)X?(ugQM7{MM zJ`5-?7g>;*5Jmy(-S6GMZ>!5_iY!<`)81yt0;%*cP4M#QuOqD|Cf%@o>l_0Fw5k5c zYcUeAA1n-Om!F9^)c6wI;Nh_(p`XTO^JW0Ta|(I5db1ROA1})Vh{2aH6XAn)(V7EU zj-Ji!guC%ZWI0urBm5O@zj^B}9U9;}f&vBaQ?`{lLuc%KVB3GxO5akixuMH(<;z;{ zMwKI$3Q666|1Qoa(i|_Z)QE~M=`bff04rSjdV6R7&SiV=tuKz1aGF2*14Nj=3ZZ$q z1LnpV8IV+zF*KYya4nznhyHb^CMFI0Y;|H2G<<>#}*uZ(Tj{sSBbGiPjHZQ4ym>=C&ftz4bx8VpE?7QV8pU%Yfs~IQ8NQHREO!-GLfp` zn&y!jFWkyJG;rtG|GRULJN7!b%}e#wJim?UPF5YUGdf)5ym_SxStUvEbUmbXD%XyP z?6|E%wF91xoC zf1Ej2#Yd5M-Xa^C-x$Qz-7QK5Z(ah@T5|wYM8LKYD_52Wc+7kAekX^5{)uO(6TomI z6=AAD!bNFIGm&?KzeQf!ay%w`jm)2phWviIOm=RHH+j!E-{fuVvjpDWD5x`T;bv32 z$X3)ORXNAL{C#s(1_e9={niYF5iXY>6{?exsrqjBB_+qi(Y30Agb%YHw2P3qi`O&{ z9w5+295`jZk%MZ@vySRaEa}&Id$Bo_~~==1r(g~;+`{_dGeJ@3Ss}qMTAkl z0DH+};8ouI{ZI3(kMcT!u zi5_RQABH%5-`=NRl*}JL<6B=EZ9ZV%yXvjjA}d!)U4DUl-h=6j@vep?&tFaV6$$*+ zg{G{KAzk^GB!IAj6N)gpm zHyr%PqcK4V9km5MPS-ecrJ$(wY*-)bihVw{sNFnfRQt_xJm2ovUFv8bDTs zwNfOPzxYoIZ>WE}mP2?2hup2mY}e@L&)lmzMT7%8b-ZY#HthlPJVn_^hI=m!Ho6#Q zuW63rA*3X6)DJ83$?cZ4r}tT@0NGqXHSzYV+}r=)bVL#wSbQ1^=uze5_us*BuKO`S z`T^E?q?G!utw%scLFV!SXS@L_XJ%WDZtfdul$4a!$0^4myVZPzVP@e@2+x|-m=-w> zmcp3IC&CvYR)`rCU3eIiqrV-?dgPkAR87uw*jUUGJxJC)X($-T1ht-0DQEPPUMdOA zLKm)aSJP2YgaM!b7{BJM@Y9$C4j!PqLjv(a)4TfOR1-~^&Txqu$&Nzqc(XwK37ptS z7W;GXExS{f@Lfj$UkQeR#cQvI0r~58RVA_qEcNG@0YYj){?@K{T(Qw-IhSdT>Sr2y zEKc=LnPQLfxIRb;;A#nf$-EPIxaQx|;OWY{$3s{8)nE&jW7x(%`n+L4GWqep#)J4h za6ziE0@imN=={X+5y#wvNwmILPV4F+m+)HNU5!aiA1J|TMAp8o^Vl9VAc1s@;x*#n ztxpFl;EG&)z8XmY;Rz5_F8-g)pH$Ms5}U_rSAmDa$Zsbq=Zs63>LA8Xq;=L?;il%3 zvNDIsaNwFi8d7#U06EbL^ehT6na>GlMT5??g&3fbHjGaz-X>ltSxLPeifD>QrnG=@ z)bSI~_m@Bw10)5BmCci(OqMZ2vciHAy1zFyfN3`mDMzeeWFG-}g@_Ea`1e&_D>Y%` zdou+iR$*w=Y}BpY&3jB!MBF(+hq;s`8r5E)%$!i;=CI16^0D+w>Q)DDD>W zNL|G)cfRzhwIc1Fo1|}KKLHsw)h+SX_eg5jBH~$Z!No9qZs}K_1D6k3Ss#Z?Oj@xP z52n`_U)<$IPC>&Du9|Hfp({&u$kmD%_vXy%~-)hZk?-I@J2>XNbj5^jLS zJ~@qZK105y!{sm?TebB5%02}aD4o>nmwJ~blM+QpVzhkb-PAmX9X2LjKPUt-9(nhP zNZ-X`FSEOIrLi?@Zh#zaMvOOd5_)c_kNEFS;9LAdqNJ#c_tVnCB)*7e=%(fkPG+hh zmYF6b&&*xKz%>4jId+W3ACG+A%L%o$KWA0vBq4Pqt45>op4kJKFvPPZJSBb^59v#Lyj#=t@LA8yOx{wtgIid1E_Avk=8NaNkbZ_8L_Y>aDSz8xUFOU+1D)CUK2+gu0HBMZVPK3CV)?Q{ItbK~m8_PGK`v1Q zxU1RpR{uFZz#67HzSoF@=>(Y@ReJKnl}qbI2O~w!*q;Xpl@QLm|0GjBF~VMcSNRC1 zS@?S;80dr4voTT=@}RpCXmP`)o*DX;aa8DV)={4Ggeom~LdZNV5Jc@CKGvyk-h_b_ zK5tq%beD;d^j^L4wq~dw@p4dGHGm6^v<+ zH%7s4NzcsOBo_UHn5dBBitw8}wI!VTA3B!|0azc5{-eKNZ*;bAkM)11TW~1qpV8Ha zRx7nNVb~cFRX>+fCq5lN{`S@%_K7VqzPq~3c?!( z^a^=|y1V5UR#PJ0xdDEKm5rXEbux?Qof_g7s^ZoIL>S7(jtXzN;w3MGl`SK_;o(#Y zwYWzw)08h#n?*N!+J4lfj(RmeNz*It%u&<-zkwfzAB^iv!3I~*% zV|P0~J^hD4M1Bns^40F+f3xs0S;Ft$ohkk2W zTE>^XQr;OK>Q!J3$^4-C^qK7iko*+D2}D+`83Kg$q0nj=HJV%$_#jWhqGwkdf^9g4GjB1dB*22T&NpxP`JY(& zZ5^z-%!~GT-kRqB>C}C8Ooy!ohmvH#yU}&a9Ps}1zBtGOJO@zz&o?hmk9)yYzLD?| z>2*vs&2dwgi+s*LL44L$tH$^k8#Ne{BFF)QApbul7OxtNNq)m&p6|Lv^K2T&MtcsS zIKk-AUtS?D`P!Zk#rUDRS1NB*)8z%r?QdncF+C4}&ObUmGUQ-1y!k3NHUNwxKL8|nH{LYnIyj&Vnvmk4F-7w-%t4rGyrZm-b~W6L@BSpoK{C( zJqr3Z%S$jD31$P@ua#JKQ8F_#w|#$1R_^t{P;l)IacG9u`+WKHOC>4fLcky$x452$ zAO?xV$Dg9i)8wm7>$M6{vvT4|#B+fP;wR z!z%P&Pn7JE&A~8~)p;5u~z&{Qbcre~i8P;O=X+ zd^v~|v2g}G{jHPA@CN#T<&9(j)q~`BanBG9JYRP3q8A^-mb?o1JA@1+YN;g}b7tZ= zKbOZkvP5j){R&=8ON)6wivO-ViqGNl`X<=Glit%QW+R3O8pSJJM}g{^+Ns?BIyauY z*1NmbmwDe-T|?LQHCb%_l2bYXkh#Vm`eMY>Xe5eRJp;=)imdxtQ{Fn+ zo2CG!8zh~mkc8do>}1dEc>j;2psGqRl%$-4fRyxwLDjQKF0~O;cs3L_;__sQ5vXCG zx>{05CtkxI@QE1A^k(GP3;LRt^4XmeAMTC3Ej7e6Iyw8yMmmaWm(k%pjzZ2GBpcE; z=4Pr+x)Kfui<$YZlqXJdnOLG++H2!W5L#=SKzZj|pr`s=K(`(i87D6;Ypq+x1Vl zOtzw?G}nJeORIa=+E!S}r3gE=03v68>AI2_XC9twjw40wO*aPhJfk0KMMuhxZ8@ zGvRA7y^sf8FET;OEa|)4xwIE&M!`PA#-^KT9Ze;C9ZV5u>JeKzH8wQl5B*dz2E&R; zVAqOrI6yq|bxXd3t|hQ8A63Mo3>et=!26_|dOC&ci7W&=2{Y7AX4YCpPbZ{F`gq;q zZmTfT(SZ|dfXOal$H0dkZd>}S&L-N}PlHFi_~_ef8LIc*UbPG|2$@{7tf43atvbYV zkT!FHlv`%o-{7}vx(-2EmtT>C22MN)9sV=<85^v$H8GT}>zXLnWzEF~a>4`Shbmst zeiw*;gy*0~tcFeE0n~1K^L6-bLtN77u#t)mfTg+INfRAiHulMJjidwf+^NND*A?dG zhOXd`$D5*K?`H!GfZXl}ZS4K|AW=?WX0ouZy<5&G{9{$)^eJwYj>kW~6 z3cQZJ_Lg_{6E0n-B;*=)c~7#$38T|ezml8R%)R)>!ziXS*(dG$o^!hSe)bSe(zn%y zoEf5rs;L23sw}3mvN&ZUA3d=Li+EBUC?xOny@5ZAt&f?5-aR>&i7ivG?;~Ha0hb zh?2RhpZ>FMRzl7zpLi>SWP3q-9@fv|QlOlzQWfiA?|UOHTBBUkcSrG8R`k8&9}=a3 zXtMVin2JhPmBpvte=c#Th`GGON60&jPmJn7SE}=woSJi%P31!De%<=}>Afr(u98l` zU_|h{sE9iLj};HteyCo< zE3cv=#53;=-cbzNzT-E#w{|M8q1aGdUJba(?Ta@Bnj#=Uuy^KQxStSNAo(_8WcQE9 zf_}-`D8}jay7U9G$dksnP#brND%j4bY7rQyv-7bi`q@&~An2gT9f7f^z~9^6JY3EF zw2SXD<#-6pNDk}NEe6(ArY%v)_R2*yccX`Zi&HA}Q1Uu42ZqPVR#ckARZQarDOy&sk-I#EdBXN?2pUK^M-(to-;|DtV!>V|50h-q!hbJ)F>%Z?@r3@tUJXZY_I^n@x_{^$ zb!N2Gk@bx#R+%1X+UPdfW3yv#vIa5YxoZ=KhKDx*Es^{I6zVWwIT?u?B#;|~goHQJ znO~|Xb4>#FTif*9BN7L7gerRcX;d0$0)i2p6O9-B*>HdCTpd4^HeT(1DJi;C!HamT zH3Nn{7kl3Y+QFkG>HozH1yUV!^BOTVf?gss*YH@H>7jr?Ps~e$y8}*;)syxF4NQ%7 zCAk&pFB!Y1bqtUD>27?f|H?7RZH`?YtnO{pGKZftiFw27$^}OiV~2i=V_;y=ygz-J z`Pk5`e}HSimRQk-r4nvBkLw$6&3BOt*(G@vwR>lG3;#r1#_s;RI2+VnpAdKNaJ@%C zuQq}$OEL9UBe z8$i*7f2VQ9zdV{6;2>qw`Yb#O*kVtXK&g2`iW>YIEtrb~k^Rft zwir`;UGtHVhErdqvW-LMpKEcy`I{bDK(bN@a7omJ=4fGPx57d_{=`@RhzL*=wiu8^ zCnqBd0SdWO;xF}peH-C`+Kd3q;qjqf!)}w;h5_1eA={l;?awfCj@gI{(}Y2KaM%rq z+uGV-y1L0HsMUw)nruys)+Y={F@m%v9=z*Ibp`i*MoG4bhY_z0yV%|@rfBsQ2eha* zaqP6ZT$;pl5YtJFwe#@<@>&x!pX`(PxHK1{Nl#jt)A^B^$*kCli3qB`vxi=5(A(hWn&g;;Hv0A zHLeg)AV*gz*y))}An3<($k94^C8ckoM%U=Zn1S=Yh|1$=vV}%KZ^7b3bO-)|xE}g+ zEUL+NrFhe=DJF?-5T|EVlA9`}2=VxhYt-5n+vNc(0JkupP?YxMe+fYsO90fyLBLr; zp(oYLapNgaegm?zBP&{)`_o-v^P-XRJce$q z%V_eg-K73*dig9`ptvNx~mM1wvhKC6P4+on(bWQt}Tm z@0QOeCmf!FbyY83H!fX^>OFn{QmPLit{{NKKOr&kT%7Ct2XKMoP@4oeU+L_~SZhn> zLEP0jDnN@*h##DX;K(2ldj73!_n(=yr`F8{ZP(T@h|KKqD9M_4IN|zw5sVEC+Q@8 zRnvW{a1gI`os^nI&QP4pwGFrZ32cU49RScK9MFsO|9Y9tM{=J=f*mvjblWg9JBw5tIl2 z#S60%7Q=XKDnirmlp%onRv?^MEnMVvcdBZp0w@e)z|mRlGc|C9WIf&euyeSx64KCc z*E+VxaqEFpx>R#2J^BRec}-<>SyTCOAUf~)d5(!%pTDO~(|~~i4KCep@YDZg@@yAVhw+Zh%uNha+D1!S}2j8FM zwMm1{?xYSwW4%tnhXy9ODs^%Tn=O`sMJrt;?eIr(85gBOUlfyx7D;aeV#k7i_ig3# zWjd*b*(?O7kyXk^9UlJQBH(Kmkd;X_gPt0LS_MBv0Y=U9@02nYkKaH(|NPaxYpuG`k)E>GA5Q`vUBrv zgdipwdQ`+2oOTrL{O1g)>r7XVNwAG{?6fNns*7e?dJ>q1eyXjL3zY24%ttYmOM{ai z2j1%OlAX8HXRR2X`#qZ+`@g~&9>JQG&z6ht@SfH`Y?IW~xwD-8Fyhasl=`M#R055f zYh&2bNAdlDKLMXXHjT>m5JO$;DZ5%dY|AIku}bf6Gmap6ENEJh5Hl@$nD%7H;aXv0rC z6F6oxA^n|Q%auct>cy3QI>57oWk4WOb-ekz>zvHRgd(seh;yK42phKid*|$NYZ;@( zDq8hvefvuB_0>>{ai9>}OOwpuw)6u%D=SNL95gOrK-p_hY~lfg1$+^Ed*KA_`PsJL zm-#*pdIyh}{3&KwPRyNBpd^K?yzcAzK&_Aqx@0^01x8y;0fSo96d?2RKy}DNZv~g z{U#;(BB(Fc%2eK4-FH_#-{?q-OVuCG+jf|pR|DHBI>6eI$z5}LOiXtqEJvA%iK#$H z(0)t+XsYy{$9X}d{q+BoEP*QcFDk_B?c|K?3#4z`xq=JtRe+zV*s`=gW1ks^?wSkU zSAg4&LOPR}k64-F2x1o^XHZG?1(!iSV9zLi*xq-*thJjsSK8&b}ca(EQcMW`|l zTR(683Kv?VMB%){PVYVQU^c~<``Xf`=x>Rd}&MgePw|oRL#Twb|&!k_% zjn%e-9DtOseX^}Vp)-LY2ZD;g&~wa9)Jjmb|H$SViC}=i><4FGV*@`oQFiP_UBr(% z9DTqAw7z z8F>sUTQI4kZLpBc@iCvu<%<|ZQJ6mIhxS_<+c((kC+|25en%`hG;gdJ)_9HTv&xA8 zWcko~MhKlfLk@YTv*n-y%N0$13T{Y>m|WO_8cc22cY9rA{iQ(1@$ybddJSRbP@Boh zl6;dtPSQX~G=IVy8&~ys!JFZ?epo7Z(0s=xiKQ@cKL--P$#m z$krJ$(o>)D6NCyu(f`3bpR6O#e-;^W($hRtRjZcwRm>g7!=GAq;D(YsknxuL*>04; z)a#XcRon}8MBLHzELYD$biUztULN3x8^a~x{W(MR8af;733i7Lj@6RSd|*9_fc<&R zhMEsF1wNpl3y=X?h%^|R%{REr(6GtT$PsNZkV|v9gBFZW7Y&r<+OMUgr^f&n0vaOk zGL}bNWZe~8LS7FJhuF3?jQh{xDtX)_WMb4ENC;!`bxpVh^O~P_eqWlrLv@rGQy12E zn>co_xo3FXZU9w%K}l|wE;v)s-l_f`_2iqV7q0WhkNetJo|X@tJjIYW@&8xWXwyEv z3dqJcF*{&fSt#g1-~{Z8ZgN5dIjZjkeGC=&9RcmeR00c$pNXKNn=^B%ab(PASMU^6 zw35`_lp+TR9kvKpAy}DZH~RJ7Gx77!fFSD!_!jY?reC3KKR7l_&6~0rEmU^-p4X-sy+AnRzuWUznRr;#s3dOEfjwqZ7rgf9H0_O zT)5i!2wN>f6XfN`^kBsI(IE#1?7{=HaL)>%Qxky`A!8>lFAeL8a{Y3WIc(Q$lMs5F zc2=>TutoF_$R%fhql7_7veiTvD$9z84gF?T^^Zt3{iO{L^&0lh3m|pL;M4&dgzhJwZ=h{Waz3 ztzfoOjEuef*|B~Ku_XzgTJdW;k!-_Uk9|7`SDe`0ag%i`{uo_m3<}OXl?SpUzFRlF zgbH2YMLef&MewDHgr_Z8jNq7oBgBOR>fNt6ioR+YPgjNrbkO4sYhB`$t^K&%zkpLZ zv9g9=b0AaLf0;aNxwSMmBggB7fJ!>jz%FB3Du`h$PTPwQLyaM}{(9j;P1A{{70_F@ zPZZ;iF7engY<3Ha)!{F6fVy9@)@f1jMzTHTAJQY_RUIj#Q&j%syLZV>Co1|U)dj{< zU-2nXExb*+uXu`YH-vBwUq)$KFJbB2yMe3(Zh@!me}=Z-e*C(YULj|L)4D3lb+&W? zjSC%n!vE}qT*|L{;aYQAjV`R~FUOP*s)4@S2B?_3T$yi`MzCO%uYXa#iOga1F_D@9 zfDG}Kvh$)YVmFqC*GJt@)4qTYNP1)8k2G5V&c}l_dnckm-OJc&$RtNY85O8gPMhysgx}uJ*7F@<$8B;9&C{rpsrD=3gC38ZPh4=I8`wF?+Z9Z zpNG~AVMtbfq+;T&dtfGODwvZlru{4G-$@%qC4g|M52mcEk29_;$so8E8-3Iu+_!~t z$VdO!ckOLTAm7^?@N&Tk*c1!^{QRJxAnmsa?pbMH#Yoo&!oWYdxiI?JkzaY{?(epz zi?2KHF5!DTcs&M|hukLni7Y4|0y_=Z-}eBMb*+7tp47l}??Y!dlw$GPhltRg2T3pa zZP(9eTp-3|05Qh7cXB9Y#_%n6(ly_>FM2O(Oc@~iU6r!g)&`XnFMwHQ zRR%}{mH_Sg_Z~SDwg01w2Gaz6Mtnst6?FeItxT zh3S<15|178Lw2?)SeJ?I{e>(sJ|1kvY_Oq`Sk)ljH>S=h>ueV>JOv?do7-u=lR|uN z!u6{(pBfAaI;tUp;bWch6ciNlii*X3lWqP?bVnlv*b~+Mgy*KGkWV+RHnuj zT5Z{}A=tcko=>D*bJSOFIxK!y-h6pWB7-jzAc!>NXEW^Pba~%WUm%#(V9k`fTKFb@ zohagQXEh9{0pW+Na586lTaxwkQG%3O4^Zjm+I~%ROeOqZm6?!iQi`c~lM6(|d?cWU zecj!&4xY^Bc-j2gNauQz!3ml2=^b3!tf=3=n;qYkB5Vd!>4bWs#)TImgIab{7(j-RxohC3FlAtoQLv?}-Uyff+ z%vYzX$bFMPR{ZoQ44F=jOuC)t{9cCgdc0*uTyi59UZ8~wB(5jeNBFLssVzkyKUZ5} zD3nBv0Jt2`0IM;ADJ$4I>UE4C^@RI(&dfGPfPK2L$_Y!67R+2uYA_Dpv~Lw)cX?Aw zi$a83G57n!<{y}|IVFkZNw@geL(_dgW=ztni|kBzc$7E$HA`*H!hVxP$et}m7J*B{_~ zD`0@30+xnb7;<^{Y>4m?A!82G@#?eEC(spcXWRwaMxTq1lek#u-6(eu0~P##5+4Ni zX76I1&^Kqjb-S>g@N#x*f^+851;xAkP7{kps`Tr38u!nJ&~N!BhpFIKhkmt@ zAqZI|eSSaDFVeoc-oUGuUFzk)twUk zUc3a%wVP*j?!85Ll629+r;5Y5weUYwSdb?Gk2?hVQ^v|6N7b6JuKrRWpEFYKoH^Vb zi#m)>d7Kd+GWPzK2Xj!s$OotlP=4f zXzWlHJW$tKgm|~sHQz`ZNbG7-Gj0>woCp?d^V#;YL{WU~kYLdCvy7F=veSkWf{EAX z+@$ESwIlUTk^i>_lvTBis%0}#i)`kjS4SWBsj|k4NC9&)r0Y?WJe^M}&r#sJlMpcVDc)-hTP)P#xvPu5s z|IJX4fEIa3G}wOaGCUy~pB@S2c0AeWvbct8+2yU$gu`^0v61vs({WL6mb3%+yevcZ zG=B3~ucG>gO!v}p*P@_^H9e4()~MsflA(=g4Gq1Ka6K;Gps|dKm0>AxdbTL-p@)mx z2a^iacH)g{Ywn^Smc@r|w@k-d=ia&|s4E4jc;Tn>#7IlnSDJBxBbko$!>TIZ>Xg-& zI@0^3C&Y<4DuDNlx(6`QF7VDXC|IbzGQ0#NGO)EGujKCqO(E~;M3H)zcC!c{AK%M_ zg!yQOB7E>HdeA|VhxrQGVXVN-Q3d4@Cq0z;j@zQKvxNCp=}x&u`~mqv+9f`^22~hl z_Q1P4$FH}=tr@c(_F;mqS3+X?CF>c(Kwmk|nf*HDcVXuDlZ(_EI(IRx!iq$j+oBz9 zt$C2}1L)%b95XifPW9sqz}D&O>(dO8`Y%9|2~`L{O1|6&5|No*e?_p{&;~yI1u9?- zVA+&;Yh^L4Ys2C;ZS1ZiK-6((T^8C+j7T=k0)QF~*l^ko?)@B$XP<$?A0?Y@Ws3nU z=%^}GdUc);pswdAX0Hm&`i<_NjZIL#maMc*>+xl`oT)NReH%I~c4ESu3;LZ?BJ}UWeRyi#&;OaPf&-0RVskQi&bQx6_oeTG^KK z(urm%vhU7jOkv%U#FaFNR%BV zRyp^^NVO+_Fdmg<$thSW0JeM^mqh110~<}LQ-tilj*1U2mYhSA@D}7_=c(By0w2qE zcC?EIq4_@K&)yOci?zG<6q~Asi|lvILYQmzIZS;<09M~|6A=?z#91KHBAe==4{UrS zC)YFH{WTGO&il!hOT(ruigpMZnT)!zQ$bw~S(L~l_o9nu40Eq3|UL z?M&c;1Qqh<1fdEXA3hLonqxcs_kxDsEIGH6IzQAuR=Kf)qU#+d)z(Z|JIiK-{Iv;= z!Y4hn`9^hyLo8R=K`M#y<_B;VK;;HL!JZL|xTsRNZMiSn0$*Kjj4-JEvzL9hDjK^_ zpQZ{WOe(xCh#WbU3GT)5$ar%F5q=?3+xj4PQSJYJyftfFqY6UKlzVrzZxa~F3yqF0 z6~lAS`Jql+&T_3&pz8ln_0>^Tb=}w8OLq!N3rYxrgn;Blkd&|~2|-dyQu5M`phycS z64DKl7fGeNTj}oR+Xvx!f8QU^7(NaMoOAZxYtJ>;Tys4Ku8CmBa#2jy3~<%I?g2mQ zA!6n2!&W3~vi=2ugW!XuEU{J^X2CWtL|Z3mq>Zx_%p%ptI=AC`VcaxgyVQ0yhehmQ6j~<4lMjNJHX|JB{#3%h-3t2+gm!E z?H6Bh^1cUvwVKC!(dx#ZrE0cKScJvanSNU_^h}D#n%HmOjj)uHuIRd#&O7qtNFeDZ zIv?kcykM}px54W6JmV6U|xT?UP_4As=BiUl?Vm7|?K?D|LKaeZPi<4{GeP&|B8pVRNGAgEh{( z37$~%{^;5j_+?fYdiNhU3KTc2m_-ouqzrshrw!YPh{cI6*abG!bzuBN_A_f!oqxC5 z>$`;>`Cb4w4yP>!ubo%DkQESmv#{Qkh2eolq&V_f;p3w|E$?%R1U%?4cu<`dgqV?w z<#?+hwTlt$ZCW#}?kQa#pU;Ss@gVCr4tT>DYNKuUq1XB^@=0FS1cjfbzAyM)_n8_B z;ZhF;UPmEgi!EOsRH(MCz2f--Mo}=`yEl~cBcB&ainB4fv@881A}G`gFNuJpFKFl- zck2zFTp%B8%!o+KKbsOnp8c1SGHgJL35tRiyhWiw3Sz!1Wlk>loP1SkSznkwbm*-% zGp;>iA~FbR*yfg*40%x3=^KE>S(XprdKmOj8MWrQ6slVkx0&x(ZT32?BmrDdoYuaC zox@TQm}$|?_bF$<-9-o#Q&Lc15V*mXVG=Uf8x!pCMz~gnyuPXc_;ldiDe|5mOb%oW zxAVz2qZa$pY)D7M>DP|8IfC2RW$axvo2aKB8Nxr@q-)*nttR|Wx6nAAq2EF}cKaH` zI1g^c9+ym_VK7CRM;#%!WWNHu#aWQo8HW*3Jt#jn8EW?<<-~fV23oR-xrvX3R4VCq z^PMmzq{2eqKV^W9NvYJ|uQg!0MdnGMuV%67vx;#GP!NzhS!aZvQUhNNkAg%oJ@^`* z%u>!EJ@nOqM9{q{5d9*b_^5vXRDDoL0{H|+1ih$D!(H+@ncvALnvLG}RgDv_CMTm6DZ0qpEbnTDrP z1re&!>k1{|;+}S}e#Mhc7PK^)n!5k8!^>;yHi&8a{Ri76L!eSuB#?6iII9GJhP!B={=_Xm&Yp{=aN5Y^CTe!;0i5*rtjLM*Nlx@>i2w_2!99c=_93uogpz%{BmcB2c-a;xKwe z?`c9B1hSF1*@k?lQm}t2kB$$1&YQp1AhpR#Pqz~835-p`=%V7Qz`?8r4=RTDPnZ{% zYHg5&r0<|;l7>xOxwxp!mBS>X1OhJeOWi1>S~bs&8`Aa!xq3j0F?s!eN{JMiF&s~9 zCTAV`MqkYDnH?I?4k7>#Hra$S0~W-Tr30SUlNON7btHW#+!Gm#-`>uJ&gh0se2rK7 zS-Do~$`heSoB%0^LY&e?GIR$A%%_2Ld;+&4xn2hT`D5^Zvt2vDvVXB2Vz6_|`l|t} zfMZhc)gFJg)OXsW%%P*u1pC898C)CU4hknm_Z1ete1lhpG4)NBRDkuJ)6yZ#aHqBwz1^DIy^e;Fm#Wn~D{ChePj8J;9Kv4W^ z4kb}1+gqF9<}kXfXGKBVN71O~{p@sLoQ_+4)Aw>Uu+6xz47V~ci-{GPJVP5Klpanz2T&9@s zPQh1DRD3OauiPLx>-r`CVG9w2pA)VUDXOEMEfjrn{zH#92*Kr}O0^L?xCU1b(=e#c z=apF`+G0(A-GL0&|HT=^+W$$E#{D`r0HY*+>w;0*Xl4`c>LMlFZ0E^pR2 z3~8&`#@ixcksG|Xlq0`bjEozvFZmCTFA1DpSrjOzi&Cb~a&Ti=X zhw;9vY+JLnh{dV^cno?fN5H*$QBG?FR4EEL_0``y&)6x}ouMdxB8#d(tB(zVWOdkW z5d)y`*+U2&JFr7lZ|A++G9;i8Xf-|gS2U8$4?TEdu5bl3kU*SU$`~hf`y`26l3W5@ zm+CYM53 z;{H*5M*W7K1$MN1m#neX6W#swsX=h}w0Y{3fK+^J^ChC62mkzABjFYh*rN#>^09|2 zm?V7@8!Vr?ncZTJS9Tv_3`a`LH;%VDD!z)8$ip(>rtmC{lUc5MR^|E{1@X$R=W@;2 zI4f4f)R+lPz;?|tg2UXO%{k{|=C?W^g+t^*d*malicFgF=WXFPp#aEQeMG7$v-9#6 z?6RML&H-i?7MU#?%;^e^S;e5d$&{gD7+guXDOp(+-_KCZ=C9{ zsLFZ*bRzl9bNB%zh$+~&L2I=yxEl|dap(StbKD@+goMzqYTgDgv|>8HiN20X=V~{H z#yilf!6+m&s6e>-71Uz@>EP|>#|(xe6w?*j;r++g3+`f3KkomkEcWz8#6W6c)!$t! z4C?;!s+5J7zKJ2GOE1OF&~r{*y4P#YrUS26trVZpBPO%WAGxm9huD9kVRChluHPvt z>waqitw3ktg<#Cg5@+$E$9JT*D zW;_3#Kexe1$r<_Du|5?T-OA^3)Hi2LG75j;IWih;vJ*3t{==5YBZNRjvlst$gudGa(*&$iv|(+aPc>r`0bQPr^5b{q}BqBtJe ztSGFk>E?6)%N$;PJO@15P;QYc{F0UvbCgT>m?)(;pT~G|AuUk`AjijdgZ?hr2u|&( z5Xj2aP<08t$!id9r_?^QXW(+VLvwARL;tYod%Hl>%*7Qb&aHczlvaxs#@=}cp7olvmi%XTDnPK zu)-nNn(DmPv@YxctZ$neTZ7>c-;CnMe1nN6_XzMo!9X__Ona}}uoHAwT&!1(JSLMO+tWRJ+SA`krxF!RpO!k zEd^pasj&*#_AeqJ)O`z>Azl@3h2%Qr!OYsD=-H_-TGC5{k|gq?iZ1 z8&?gV`VJN@Mw5Zb;n>6Bngjcl#~30{Hqv;!c-&Vxx5h46vLvK*5-5m*iqoeEtqBOv z*cry);a94l!XAJ}bUdCain|8<)4yKUSNe*d^&}sKfZri`#~4b*@YJ9+13X&!Bh>yk zvl;pxlF=z1KR8CKpPAOjji1$5qHC;hdpKj!H>wn&Jm|5uK+a0UZy5S0a8X=P7QT?p7h(l<%;ua!$) zRiqSu4xw4Tw-Rkha+`R_pAEEJb7Mke}`chkdt0Q2RWa+{s zv|VE#HuPTa?C1?}@!dfuPf)1$$EhRh@O=Sv`Qe4Oux`GyQ1aqF>aSbhw3dH45h&$I z6uF>yH6-{m`07Eqsfa-Q?F&@o>pQb zPF*4wr0!9CH&liPGYLZ!uIA{x@86FYCV#)JtDt#h&hHtxFI;g!#)m^pEUTapSdI0& z3*bQus!k?Vgz&tkr=_fyg}d}jUTycQQgfZtDVwJ&PFS+1Tin{Tc5%WYP}&7vjhrOL z5>UI0G$lBvBQ@?dS@D>_(Zb_D*=DtU^MCT>KRal!o_AUoT>j!xE&Q^X;(m?#`Ei`1;e?_*fD@};J1S^fpj6KnLP;a z;{j&^%Jt)gy#vy9U$2hDS7R#c;}%t>e7I%Iawva20!r}C$vDtC9%s4(=Agt2+x<2u z`=?;S+s{Tudca|lrC(O*_P#dMZHX?vTP=KJYqm+ZTX_~L+f>_r5x$%#FPa4mq(tBT z!2!+`27iG#^AwY78qv=ttvuv=i)XOW^^W`8Nc8yF<1iF=VK^2gMp6A>{40Z|(!Q0a zE4UP4%0EZEu%xYeM&c&wjxa0MC!QEtWosey-vQl z2H4*Hy@S2x`&I(}}(uaw!XXIe1y z7vra`_e7?xTZ}txuT4y>oD@nyXg?I^4tW@}si-gf^m-CVK1q)H7)p|r!#)M&Tj*vJ zu8s=OS4^})g$)`f{-v+8IJ2@lKlv(D;#TLQebG~{&3~(vGW|cgqatE%b)5m*PM3c? zN5X$J?n{+_3a5xqg-Ccg)&Ej#-!B5vd)qD)ZEiSrSwzQ`U)s^O6P{^la@ck|@4LJs zSX38lv=N~rqzDTg)bzxv7c1GJ5qPy)`9R3|^j6MEDz}A6Kam~HSg5AO8r$c`D_%uR zTf2V?b*8%A&q7-KCM5UV-oIL?wCy++)8!Se?(%}_QdX45ZwTbcpX9BcpwqwmFuk2{ z?`n6-wbSa;T({Krh4Lo=qp$WF>O6*7^Hg*oWl4GeSL z^R@>^?XQ+1=w4Hmh!bq~dg-8Nco$U9yAS5VCj!JlqKhiZ8MMjxwBR;*P^LHd4- zY>ndkkl+uTsm@Z=B|$k*h>LMqqb=ZBZV`&O66Bm(`9iaU0gm~N(TR|n#66jS=97Qo z;;4rc-E`;Vz%J%lAZjXTB64!G=EawhDm}HBVr0zYZ>;0QrGV4&V;%eWgo}+0#hjmP zJ#%m4)43yZqTZXU zGZB;O$cf2`M=NYA_k2A45%cJ~xzB)wS4D6@<5?Jumgq+XT&kWc36F-Vc~1VBzydW* zbfs8+>oPiesKXgW2iI{wx!ay7!M(J&TCo=SDIcK8b#9Ndqnfk2uMdLaV{H*9B)t`x z+_8Hx3r|Hf7&s$m<2L2eW#J+m4lKz#NiS5@nPZ-zUPf%ZJ_G?OXc&ulm7=hrH zh-m79RN1uf>X0jk)lmxXpS9*K1Y`cwsF4IH`(W)oDeBny5w~u;xm9MT7TSbaPloYA z*~L2+U&6_SlNPy#8_Xb~Ex>L~{tA#57oQP{rPTGU!-k%){zLl%Etj0bX@+Aljt5ha zWLD9GUd##gSQQ0YYwYf669s}#$K`=)beQa~HnC5`u-oBmc>Bm9qQ*z+?ABYm7X`by zmvW#hCf^V^^e+#2N^XkKe|z>{188ENqFT^=F~VDH3^lmY7zo+$@RyQnmiG@cv|17fX zGh2_PZ*&;H$w>e3-Kr0jb@?V3y`*{$VfPgWGwaVy>EZ_i%eU{0nE#Sbt)B`YB#`jm^0r!{IR-Z*bv*-O*Tz2|xGsA|1+wTFf4 zvO7h8uTFQs>g}AZC`sW})`uT(`K9ONe++)yxf(k-minu6UnUJM`<}kRf8u>z7r6xE z{He9FL^M-m#<9tn2EWkWz5cnZx?~=7TVWd_D3OjrHa{i1c8z^Wd6a~clk@!MbfQSV zZ0U@j3VM9mZ{}(#Z>WNeC)ZI$DT&U`r(I7&GwxO4i4i?aa96t@t!Cis6hW#3a91`n4tr`!6hF`rySyQ>|*3AG_|r zc2dagSg4+Q6ExX8gWvvWUvaabH(}!iZEJ&1x8S3bE%%js!FCsz2znX1xe+Dt?p8!L*=^>W$OY&^?~%03KuB?c8+o z-uuJxnkDYy<#g9|YC<{`_jp@AXx593;G$!SF0D~j1=+<9`%L&5Zmca@RG*FAA?ur~ z?jI`?VqCidW`(0%_r{EPJvd(EfCi#60lW?a!002GVO8SaS>Ofje`=76fp0?$e;Yys zjWM^?TGifkEjB~M-Q&}vwHe@>`T@d7;4^A?phn#Ml8D}$xRF0GKPy=b;B?MCdxyk+ z%&OD)B};9T@2^VIAkxL|y{-K1jlU6E{LifpLRryQV7s6T=Bdm;`7#|u{?$690 zl>OZe-KEPauy6Z>zi(o63u~_udx#@)gXxEY;PYf8H{M0IB2Wx${=cQ9{DQKTG2Cd* zuHH+><6LlLJG^3(AdzZOb)ndzSVSmRO=uYXYQH(@ z(|Fzt!P$<_fBW5rVve9>%+01|$-05Y{lV`|IRBFPGY1_w?>S! z-!biF<2UJ+7T5>+_^Boa_wzH*j)x&&;+5nqGrtQjQag$~rzM;y%T`K8O!?pe5{)U` zSh&EyLYaS}F0y8OqTsE;!rHum1DB)v0Y#$sJ5S? zP(C%cTKN#%nlNqeR%+jTE=o5ta>Wu2MYnTWFdc%;#E`H*bvw1anZeQan1Y&PofUJL zghY^9n)>?IfQrP4H!zqwe-IY9M~FpJ_KEhkvG@1l007QniS_^*g&-K)WDB4LRK+X) z*V}dDF=Nuri`x9A2a9t%aEPLT^j%5{QxB+$l(5?!N-%;=9LGp+=+feQVFp9h^|Svn z9vm>SlV0totN;NZKT?sLWd)CjAAS}A&71r<=B9j@$Ntl${kl75_<0%ge|B~WuEk=d zR)vhJyLQ#LfBN!V8VnC^wwE*>F8W@mwBZOPLR_RIG3f0H zVjN}_z$~qR_gu7olhI8O6yt?OO{%c*;*U6KxI-4^!-L^rIiH-R)Rb{ZJ;!eU49>X< z&V`Rn!H?@$hFHI_5Xl>q(MN_?P*~LRJ^o zHk?gcZ!wDTRaXGdlL@iX@|c)?%rS72TQoA$`R_k)Fga3-7dg=};VEUG41*xkc{g(C zm8EEPg-0xz&2T&+Mk}3;+Wp-Z%yu+vs(Uk3dDeYrcYcn0*tAH2FtzueCC36M8ygN7 z*e$?5LjyqU8Dz}u*DTi*^$4%*=2b$GrkVbukG27<{d6bBb)p?EDw+SgDFqfSH_wpS zPq1DYN2>px`>&gTWo0vD58u``NiekEc6-oWWm@uM#>Iw6Ajrm>x^_tASp*Zo0)Q zA7;9XQ4K*<;U7LQ(db_oew_cxPauCR`d}ae)~yDwHU1tspuTxy+|HG^14_3b!;Hx- zcsmvzPExtK^UUs5S8)@yVjDdBH;xTb9`K1*JM#&0zs2+p?=Akzpr6LU2W+FZ$5K@I zx)waB9zDN(qFH@iSmk#^LRM2HlURea3B)?XK)}HLS?P1Tj|W^>jE6Qd75++zSJ9o5 z0L)Sb9X~xq+~5lSjxb1+-FUV|THeNHm92|cQnl~}zwlZcG^pR@u>T3l7xGtF z_4mdb=4X7UN|acCIy{DLP*^UNtURij5Pg5v#(Y|FU^3#k>?YX62NQUF75e=ampy}7 z6aTDS6rQ(368tK7H9Z?+pRGeD>PIEd@hP(yOuPh~9|n;xPVq#5BXE2!#F7mJBy<;j zKgFw|D~~395zEaO%Qga&5rss8ls{qUwoZ|xLt~ArlKTJO+F)| zBRtL^Gq%0X7)f~ovPuK}@BeyRA<7^Ld4zEc67h`1&?H#+vxOgedeH~Vi778_Mr5Iy z05j|3j-F1NB&3>t&d`RSmF=0I6U(2$N(8FA$6sq=5MMc-nBb9?kTd&QdQ<;mk<9!7 zYhPAK=n({D(7+@hwLD#qVBi>b!G}X{ozCFtr}vzm{Weo^d=1E zpx5Yp6NtVpwEorZA~>*BE6 za7>MoHvtSB8J+wVbc-0potU0wUGFSrr$*J+K;m zYp%V!uT1DF*xEZ#n(+YmJQ~pL{PS75{4C%W{ceYxvV4EI!XVRWN&i;Z7)d-DJA61S z0yxFtYkgBL3A9nc4%Kgb8}+>G1j_@xeq1H*BFVko-a9r3@Ga?i|7N)8>$yL_K6ZV7 z!nziNBfD4(zg{}#frYLuS(WOrXiD@@F(tF7wic`eC5HX-pvP^^lIC3?4>)(+NkJxi zz^7-AfF+7q)SZQi8VN&D34F!UV@*MI%=(Gi@Q@lrtwf2_ZsjclY@E;A^x``7UQUL| znc@}bGYqso%le;KoHat#{G@*9ck-jYMO(TcCa12eywd5bD%Uhr(&8k(^%6<``fLOC zS{WpDc`q)@40KRuvmtf(_dJv)rnG9X+T*%7Z6MJb=hrF?eHN4iASZ=W=a^R$8!=hKYj2EO29DQ>l{G)sC7Mzp9gbk zFF~eo^~uYw$~yDs7Cc&mBUPW=*+e*-Au+-WeBhom1<4$;XijOk&J8^tqY@CXtw}WY z=!EVxsQ7k8Gm-Zjp(_<|o9WgocdwY$%%UG7m;b@~@Gxh>nJ08bS(4@>&Dq=|erGhN zj#~oXgZ{fj|K&^5zfh6#6{M;i2U{GxCR+E7iVeBaw#f3G_bmKxVjsK>L6%7Z71;Fw z{`Ti$EMS5#EMCN+UH`9Lg`59>2n@XY4J1byY4LK*8_g%qtd53oFbk3>zOXU8_9?u1^3t`)`$%8 z`!AG$(wgn+=ry36a-9F-$HLTODL`oB3>?EHK|NWVU~cbByJ-QO942=^0AAdfRwnjk#N z{o*3UG#t_{`KhS%zpK-6@9d%je^=2Ac5!p|JS2OhtKqd)sRo(JM3?QmwiTrEsG@=- z2xx&A2a;z1Fie$4$Mvj@k*9E0UyG@}`413bSpEHQ&U;O!XLavSw~#W}>gGOE2Uqi>yM7r3G1e%BD@q&#WzF0B4 zndZEP0xk)2n)iZ{ZLbAjvS{xh;fjC<0L>b$w3n)1bJ}GfD(%cgs>!HuZh#~sb%=3b z&fKm4Iip$9fj|{)o<}!=&@@+KaYkpk2s<%k$Y7d%d2ANf#KljDNr)(wyvEotyyGlR_ z3d5osa>NET)zULD#+{@88GDuusr|=e6H@XK{p&#UJ#iQn_0<%I1WSn`De)wi8&sG) zkCK*=ZV@22!4nH2CMHpvDs^E>3o~x;FISX?zFvWC6q${Cw3O|f-ry+Ks>qzyyu zudNY3Ko!&F+a*%v73JhdZ!25Y-R}(oLBFu*MT`g4O<=ma^cbI7c$uEcfQ?)Q;&s?FsKJ3og`Q1%Xjyd z793c<8kJUT)M69Vzr`080Y@aBY%pIRyX4IflbQbE_5v8v_UVG4>W4#p0(1w?T4sV2 zN~?6)3wr{jjAx*vvVQoxFM56Zi3ah@y506<<6Wqz$7xb{y*zDh=A;R)^DmU?ytJ{= zSw|SGwbTvX*>h>C8saG(y`5*jqVZ7cxZ1EK<0U9E$G0om?(q+vD`n61iR)#^s#;h# zERkJ1ko~>)tJ6{S?pGE^VIVI9)|pN%PlHvI%WRZn^lVmsvL6@yWd~*XPfLc*Eq)K0 zW=1c(OKGl0(Lpj;rAy}-%WcZReIg8uQd`A56ok&-NXAYDyNIKZOM? zy%?lLhLo^`Gy=UdVfR=7j^5^$ZjwM<52&w=Sz0uW zSmtbcq8(h?7pUI6SEJ|dLgy~NMaD9l5EdcuA6a}S4q4wID7Xg_YZ&vXw+v6q-55r6 zOn$xIZ{a{$qBN|>d`Mch)h^PNszh@_&`ijD&wlx8BzM%A8*l}=YuFk+fuD16U?^gBvO(w1ByyZ}ZGnUgb{}yJSRE+E{Kwnr`Akit`|B4jBtp9)x zY>2|_*gY|Io7t>J>bNVG9kLxYR*uswuSSaCq!W^nc#*i<`r|uSA2<0C#6gM3061&N zIzQr;#wsvTS^e6%SglM0;o_j+Do&obdW6yEK!gAY_5O*@SoSSr#3)+Tx)M*udkdMP z-I}^(s#8zkR!k7RD}5tDC=Z*$eS#VA_K6FacRMIl*gEnsG)nDt5G{{azZ3 z3?) z(XcMIbVf4xjSy~GMGmuDOmb_b>rV=elJ4R~Uei#l^TN!|v~372xkX}V zb2@PMosy6up6J00c;WITrh9xsZ`XB>tM#QfKyE~ghIV$gS6t3=Ioo=<%2}7!X| z;p9zYD?}lOp^OYJxo2G%)eUV%CK!uBRhSiNfKV+4Zjd|<3rv#i$8DVJACZziHwm(? zQ~RJX8KA6w)sy}y;BF>{U!5(@^7LY3yQZfqH=cv|n3I#Sy0{AdTeLir9I#pZz7@aq zE>?Nx$=p5G^k18Bog2DnaK*DNm1thOTW%Id9HXNv*D%g_sU-!ApP8%obWm9@WQy%C zG#|}(&N9t*#QA&+uY0P~5qcxZ7X`ve*r!i77r>C2zcmbk-Xqd<}=?j1LR@kOO( zLVtN)8lv6+5elS;++bLP<@{6Od5}bxSWe{F#-2|WUB~q}Jahv7zjBSra&ibTnOs^K z29182SJDOc*g$b-C)}DK=GyE(D1d6UI2s#u&gU-vO6XyYebIt8xT^@SDLqV4kcD zQyMf3#2LfW)3Z`AI!Xr5)P}?2o$l4))5S-lhl*>*SY=KdoNj8slsc;F*PNMltdR~e zc|AZoDzK(fR#okM%3Lanna)#{&fU|dt_*$d(a}@4qVfZQqa5So1=(ZUQ%fA-E%z?f z8oDv(od%8{>c{YpVP6p(@Si{9N8t(UDEsaCE)7B{u^W>SC}>!8V&YoS1dvpKe&vO{ zL=qvZsh^lb;-HA9D z7q)SS#+tRX*z~s%cbs{H>AL&ojfUS>u3NK>X$C~#?8M=GQ()ULg5O7O=dLb!#vQgb zwVidhrRCtzJgs(i#}FSX`Q;505r5E9E040o0e8_ai^24qoJOE-J8mSVR)8Dc(2WE< zG@X5K`Xk5VjV2!RMi&?B6mnrvCPLM2eIg&{&kQBAlSA%HUfrcZJ6Ao03FSaejulT4 zYa4=G*SG5aCcobk``&}&48!8Y=c>}ol~FrQ8mKYmy##e%Wje?5+rL2=+czM5K0K`P ziv(Qc^W#+&&<2^8M6p#Ii2X86!lqih0gVmLX$a6b!lHUV0CLpF(1lt(;OxhUU zN8f=*?ldryA@TzoS$1oU9IlnplHO}`qKZfTrgbyNw{pB#$% zM3h+-*-PbSdbrg2;-~e8GPilvNk019hmZqtRYt`wV?AQ zqPt}+wNK*;sEUAiIC!WvvUR}Kn^_^lraA@Y<@U2X)6Qz91{;XVwz{HT{zk3bI!Wv} zBX%1+CX->k$s>n)=IAz_=mB>V_b?RHVgLNG&ob4n;KO7^DGp982NkzA1>r!6xhCk( z3@~^Dy;`q$Je9>AA0#~G4VuP~QA84E_VE{bGDZ&+n1jW3$8ucM7BDDx7&{fg(@2rv zEPp_+{_NSfl{oc&=1JJmDXE|D=(Ud_4li1NCRrKYwZc)D`$)VUTzgjHinCJwzW3dY zcRN3)4~7DbtblLYn7jTjO$0l5;apq-&^HaLPoJV60XMn&dg=dOxC!C0yu3WS=~q$< z+wJu(o|O2Os=pOFX1q7Z|B{#UWM+`%_;|wdOZueUx2k>5D%>}5@c<7$6t6v9r3bE# zu2^la&j2T_`uH)Eb>M648**&GC28393>2xQL0vR7vMBwz*~eI3oZA}jq@jT)hoovH zZ?5fvW|nKZz=DqK8YVP|C5Z}F42!z29svfTExndOESig+;c6?zn>nKPwR@_aUo*~b)Axl@n~T!0W(?8(0j&@ z@ws%?`ov3cj!F%tss*S=r0gt{LU#=H`vJcwnj$P(wEZDIym3;1i19pOk*N)0quCP1>3>-&Jm&?kbqJ zl30Du(>;Fws^E#`T^5xD&j9X_*Qk$AOlMa>HoVoI%`OVs5!h3^_?4SK(UHWH(+fa0 z!;LD!uO7mGzAA`4EFQmgPgg~ry?ne{=j`+(-}T7enP&uA@JTSF-Mt4$*TO%2`uj{o z=qi{`Hc6%$7yiAxT;pjPzg2+A--BoIL$N`A(?M1Rk2om$3wl@q!E!xwcbUkJt!V^zPNzFT^azzAeU}GT% z(>HJEV!?lU{m9F^qy9oIRGJds=f)&CTn3+_^Og{TDD|#xQr0$M(Mg||v7M#F=wa?y zTp>a8Yi%+GwwEUxoTu+*Z6^N^*_LG!b~lE+Mj~lP(IR+M8PQnB614#Ol4y(kVSdHp z*oRH;2)G_q-LV)Ig5Egk9;|rBtV;bYYFysv@2jj6bx)LnCo~w$0ky@x}M`P;Pl;087Qsy%VPyJ{mNY#`OYN*?tT zBcfk;-(mP^&qJe!2o#P7+zAW2St&d(|4m1N*kIXT?!m&f8tXq>Rr^yFd>?XZDsyR8 z*G8{-VQn}0*SqVNyukf}qPzoisB;tRM%n6F4Cemwp`nQ@+rfPLJzIHtyCeez6JQ5VH1Dxm+OaH>@Fx%Z4np_OHr<;mnc zQq@=YxTeNCa|*X?#vz&idym=Q!;akD>1iE9rrYMCfmE$Ufd|U)8ze~>-f!SPS6?1c z4cuK0$Www>-<$-JQA=#>{YYL}L1w_pMO!~t`tvdNX+IU0maai*!0c#x&cj4;I-`z+ zPti>U=wvr2j31)rZ(=WxJ=?0ft<{q(OWfPvzj-u5&j{`Aa`N=tTvZ%RK(HcMxbLDP*B?NaK(K!E@YLs%J;C5C25RrP_>3y#qX8@xv~8*D=n9N zy(OQ3U@2$=&QkGOpR{9~A!eAVJqLceRbx=D;zHGRPOVC(EB6sbO){fK@oA^P=K zkUWYO1K1DvfvBtyuc!V1mkdu1HHT4ojSV%4mZ%~~*aD!=O8y|jc9?j09%18az4&eq(_)(okBXW*M62|K7EqLt zaa1HSGE#y6&+IIdsHoQEOEM)1B)tVmm0F=;$n(0#?+MPM*FnFG=+3n6=n|VU+H^3S zPPWpk?n-i3!Zl2T^h4zv2FdY4uPIO0PQ{F`gd2bFJMv%rfN{TVyk&y6@l)NJz`#1YAH%? zCs-+$L=8~^!%T~$X%A%#FmE1=Dd*~H|Lk|Zk_hu(uql~7;~g_~Ps9u{dS5siuCYu~ zMh4!}KyK7L(Es{{J&)i9moiyr5Hj{C*?IK`_D?C5sn6W#BxMEIx!bhf2Yy^?!6tsJ zRIY&`F~nC^RW5Hr16wx#$p12CF_k_HT>&}(x<4wh1R_{eO}&U~!s& z;pAu2?3^5d<@PAhP^;G(&7lp(Vf)doEB#Y^p zztnzyK&OTcNVh$UA3G6FICZ7B7vxvx$4YMU#_Np`^$}6CN09$~dOlNBhGlZzau2iu z5|pafHFKKK;5r5&3+LoQmvya>%)+8Z_7H1xL=cCPNYPMzKA=Ya8^mrW64hHGz?cS# zUt+%Bl*y38*vm4U_jDy(P0{JEZogIug}!MO&PC+7^8I~qFjV`MYXl1ncGv^N%1Kz> z(LE-n``!qd~hYz`K|Q(InkR*MN46J!D2UCmOkO& zJX)%9-t%2woxl05ng7))--TiqWyjHY^;(}z>m zWXd%)m>P4H&$M>uAsmspKDQy+H*%G_{R2AvObUdZ$=1=~p$d|`=^ex@VJujju)7{` z0wSEj_t@U&8 zIC5puveM-K7!fzb${45#OlZA9g0`hX1&z?0q=u#3C5XhCX7$DhR)V~FYGI8Df*Nzq z5aWG9+X-jIh!^9ZX~$0ad}P!nWy94&$hRSZGSG(sp-AT3xOf zT}+n1M5vk1ih!efMsgAD>vgg4)XN1)-B|XK8`fGV&N^SB6jVzClLe~l$YP9)+5$L1~2y4&Z zhPP9`0DsPo!Kt(cSFA6v&s3!CXlr`_^g-v(cOS75%IB`smD?}j&i;M!m)4dB_|X%j zLbmfHU_#RktaZzZe5*jadk(w*-$8QrSGQ|6=n%BQ_&Ua};JVwd;fS^EPW6<^qlssF? zg(a1U@9%AZpm0Q~{m==MX3@QJ_P4p~ot>&r(I*B%8b_rzMG4UDCWj(~E123(nFe2Y_3SH&BO zA-8dG7VejVI?1DA(Cilwe1779j5{w*KF2$zO5B|Yt$45DY&c4F&q8=wHW&4)(X798 z^_XD%7I$q>*O${dqTB!GIj%<|!*F@PWSXyjF~t7>=N(axM60qJq&01WoFzriR4A4+ zB$c;UUi}ElZnWeG8@!3j;O2fJGwf95dksJ9)w-Fa~<`!iegT zWmg>nCaAkf2HsO0xLO6Rh{Rn->m6r3GjaF(%$L|Od98220fCu3;{Q(VWLMGuRp8UJ z7#R%(C^S-&L$VP)y)V*c_Vxl`SRLE>hD5++SUU6WlIwX*Af!Hqe3IPL-<%bTme^OO z`I6~3p;mCEUcKPT;3s{PKEjWJ=7sasBhP%6*Usx-8yHrObI`GiCv`Sf(FMFmnd8P+ zIi40Gf2+JNq#mF;0I06I?*e=)LQg7??cKb}5|qnOM#JRs!)9(8$XC8*3fnDuoAjhY zc|XKzh=7nK9eWe4mrka-Muz$Z$-%Pb{+7tEvVk6gtZIi)O6Xsq;g-oG_G!j*UWyE5 zs(cp4)Z;_f0q=NYQUxO?Ud^*Q{M!Dq(TJRfz2n=iw87{DxF$& zAeOx%x&9uMGq)-wEwEA6H|qXww|w9>zNtnUy4L!$iVOR45OM`%P zhal3mbcdt@5+a?V0+LIEQqqEiBBi28DAIxm(kd+tA{~Ns{LdwPzrUIP%+9=wGw|-c z=iYOk=Tpz*_Va2*u}AENrom{E1JV=NtWqwguI>_GOcQv>NJ)_l442Q|0!LgNSiGR1 zU@0-R^L?D4I7qz(67-AHTFjX&jWsGxci(!V*P;^?2XrUXmI*X1N2~6a1~thSFZnN40y^?lZ{x{Sc}6w?-!12v-@i3<2Gy>6G!R8nWjqWjc69C$Yc=YOU&;+eH}Z?$ zYj66EZTGAiOOWw@4kM9B{Ha@)oFAjJ$#jQXKj}pV$h^CnjeuvN8>`$if;fx!akwTw zRHEj%wQEJOGF6f%OP38(^vo3=ez0$9PhL(mWONRj(&IR>7L zz({|%_xHzrXKrEwxT6LUu*yR9%Qm%oQ@>^?VE_iG*zsJjNo6p9cgTyKmi4T|>gd~<(tN|8RI zffX9(LW`IJ1!3<$uRS%l^yN@isZVV;?&D^sMl0U?qRb!pDvi*S2^Pqfv+4Ep2KwN> zvV$mi>-k_q_pL;OEh$1cgb8}eD>7QfpcZQN&#b*G2p`v72I4eWdfhr{tP5XDN-m+; z#@E1GE7&TZL-|Ns3#9bgYgmS+nDu@ z^12`Q!$jZca=!h7xlSAEje^@Uy6`c^Fb7Psj8&Pe5l8nxPJt<^oq~b)La$tr1pJJhd z(vmHQeIv8S>kNAMdj&em>{U^osdQ48%f4XVA>@d9soKy0sF))#1LYkERKqaU{&Dga zvBARbTm2>8r3mIw_;(InY;;v-2hTrCH<3#<)L&7Or|`m)#ko{X3|QX8d4(k4V}ICg z3NB_~+$O=oAJnrRf|*{F>Q6uqM@%%PnIzNMzq5GeY4>mUD8Fau|HACI;bBbvJ%MaY zeO0YzVApL3JvnU6mHy9R`;-I(m)tkeeh7>0;`}3p>wS75JZ1R?FN5)?E@@L<<^ae5 zvfrV~a~Y5675b28;>y4i;2M zald=~3c*`E!uqYfj;QFTeh4t?h=FPWw0;to;5YP6 zeMG^S62Af(r}GzQEfeS|Gtu9P6!(Jw4w{7nP*Fix*iDa>)EoNT;Ay-QV;8#sK~?AI z)l!dCd+u$-$uUN-*s)Qa2j)#aY(J-&`v{QMJ zv@GwS$AQ_N{5f^)!B#RhaMKdd-%aj< z3``I{t2Hl_3GNV%A{_6ARi>t(j$Sqliq6wmge5A&e?uwh9XcdDSg-+ z3VS8?luF0%ye#%4o^jT!EGA~}(w5JzV-a}nrNI^SWn*9Ws0-`$gIpZY+?0Q8nAHs# ztrravij}#=TZlS ze|w>f^9Cu|LJ5wB`g2V4a}1cUrl_KFPT7&d?CQ0O2OKQ4+g?6_5N$pLd1-<8%>&nO zJsvMg;=3@{DHWxdVTnjxle^$izy^JzCF$k)6A;9Vh0#=oaV|h}tOk1ndz>q7zU@-Y z_q)hhhAKvxx23m(c9`EsL-~2i*-XO&nhis@qQ$|0#OLo{XGa;;%YAnWOA}pI8(A#$ zK{rJsR-`%0(=AiBp@OfU!*&Q()={#0pqrs=vHhA8uR>BX)*Cy5M>)54!AEc0QbC!H zRDCHxY_Y^UKP6MV{@8!}$*)#cOCz|jurLrXrB zvQI`rB%+EkhL;ZgF!`(a>CMF%#ql>j&dq-u&5*{O@$_m97C6!Ll13u0IFAGN+X=pF zgJ;DUr`2uG@f;>>E!1me|M`-Z-EG(wm%q|<_Ts|>Xe7tMgDrUL78=;<)E{tk>@Odd zST16>`oCZcK{v&PVBB08Sh*|q+#=I}Te6|5IY2ljT*wpTZBdK4=jd01(ByEC7bdp+ z`xACS&(aXUyDs8u693AkpL}84LvEy{tbI}qGjfd{zZYHY{^SFfP(sdz~-7-GC{tK>FpIE?ce*lJ-EdnnucOWe9+a?aA1RZ(g{Ijrw7K_ zv#{n1$8)OsL#qnTiyMu@tFlpi=)Kfb`Mk1$bN%LHo?i@I}1aA9FVv0&$ed1weBxIk<~ukAySfDI9RboNtR z;Ga5iIdALeA4@7dtGP(^SvZ@Un-yk3=3ZVD@&wOcT9Pgs{3Tg99`UiwmCiP7o&S(g zgVeWu-L#JVS>Q=5w^NUi05m~Pt8)+tx}51@qnGredS-#`^~aazKn`aO6$G&6XD?nT z{wcBpiKBi*sHryXW)kmB5i zd&mU=rv%9N!@JG930)l~Oof=j`SL2*DIA@go|Fz!;>D>o&?1sVlv;TM+Mlr0A2nj$ z5jRSD3^Dl?juc;_X;|(w{ah3x#URJT%aBn#yxE?^T$KdwOV996cP2XY?+?(Vh`r-|*;jEoN_2p+tNSU)J7L&qX` zczB%5d&-XMKc6%)Mhd89`OUDx#^(H0rb z+8m3!R>6i-iE@*al>*pH2^*mKY=1h-tn|I20)faJXD!-$XyI-(Jm(X>l!yiu>g#8! zl=U{wdU%nu!IcEO4^h#L6~&qHyVHubt*cvlcL2X;$>zQ?A`T4)8LnB+I)TS`Xk9Lyzyi{+iYL##kF6kNe2pDC`#m3x z>pdceKmW+cucU;Fn}%+vy1xiX*`qib!$UyciRw%^lC!vF@Z#^y>aO=fao|OTCxgP& z{U7TK%WGgwrb&k2y79R-PuJs@BEY;!>3R3>n#7dp|AlFGY- z#gSoVtd5;>PAzU+WpS4qQ7LP+QbSps^BZg-(C~0DxqxW$NLDVQY+yT(=eaCIz%Wy6lYA z{C9asr#skC^(b`HujZ5n|cXB0p#P;Ic?T}z-tY#GM#;FgMDvT0GI_%c-T2` zyo2rLDN^^L=7;)@5BOif0Z&nHLB6-|z20#noQ4M^{=ZL{00=$G(2%14t;J>7ClvH6 zpx%E&rn|APKete(dF?5*R|>*D-t;|u4va^|c}rxdN0A0v5^>F0Wj$Uy#N{vo>8-UO6jX8Qq5H=cPGxJYwn+^F*N z!^M(Gi_*Dx(dES#&XDIK0t$mlBLL}sdK0Q!NYm-{MM~o zP^x`|Ky)4cw`c28YH(YYoRWF@x{mY_`Hz@5RG&MyigILGdo)oy)ez8l<6`4*&f4}! z{N=D!;m5^zi{j5^P9RGVx4(OTGBPlbHnFR-$@`i}kmY8YZcutcfqR5=kqNUfEku*M zC~Ga~6>a2 z9gHW52u;A?W1ehfd}QbNaT^qcyOWGgSdR)G^O53Qh0EvXfPOX#*eL-K2qE$EKo+ng z$OsE$%~@Ns$oKrD9A>ZSc_$I49OJK0b2DQT;4uM z9Y)U9y(Vq*58vF@W{N2_Y~0`>63ltdRO*QmQIci9M1lZtW3|v*l(2Zi6BQf^$3`6d zk*jciKvsPL%@gsCF#wrw5XiSRhJIvsL|Mp$1zwkU^amUe?4cG8@^eplCQ3Ls?12O| zLV;2~pOa}i(@LL0vOJ4Jt)T{Sdq97fK+$1TMHRO`Z|KgDGu#X*x*8Fjg((#TCJ;hE zEh&-w)+Yc#Ug1>t+11NI9dL2bFBaOCNaLAu*!80{Mc8Sy$e%=mN<~xTnXM-W z5{`NMM}2EPrs#&|;;N~Z79p4`#)iSh%E~AsV-Df0OSn5O3%+3&y(Y;-rdt!dM?(Jx zWQiHTx+$eENvzCuXsM7C0rqZlX7~B;Y(pKegN)TY+zA2J2^B<=9@)?85|3Z%Aa?QR z>8c6BhL#-O|E;D7zz@QolF1S$(g8|0UAzlG5~FzQ*7E=i-`6*uJ$+bg?#f%1_X&C& zg%Rnox8$4&@|O;b>b(8~myV?J@*&Xehj#HUopnw_=4^%m!-E$Fuf$|!#~q^Z8J=`l z5h|mWwLPb8Bpx5Xd%$vY)G*8$j%6;j(cVFXwJ-I9j#NPCT&Zhz#*8C-5Z65za|*J% zGQ!YiTURq;s7h(DD;Zr=h6)&18JQSwA4kQ;rnH8|Amn#V6zZQ!caTMYXivLkA81bp zR#y(^X-jytj4EeQVnupIO^mI^@At@W4}R)|3%!1xM~-~$2pOBxa|whREn9fFMSq;~h0X8Z4``SKsEmCUa+Py*C=6ATCs z4-c>Kzr>fnKA5Mv{YFrT^qRx&fUK+b zQA(q?M0j;AoXA%ndXWY2{-f1|InU1pBcE8(A5i}Me8Hd;k%VcDpaNd&Kiu=SQ?M93CK`}+Ht0oCeWbGx2wk$@0UoX0exl*-42;^4abfAUD z{FZHEXenUaK4c+ssS{$SU44@lCO6sPIDt$e;-KNo!7zB)8-W6^BB}_>@L!dD6TMIG z+2uzWVrQU{d;O<$2;tvX>-P$;)+Xa)?A$a4=pGQGD*;!=_^v`ot%l|LRCAixo##K~ zVgOe6#Thhn^mmT^sn$Q#A6owSX8OqPoNKnM6QEx1$>_52G$VhAjRN_f6<@u;ZWuypKbRg>^2rSqDebv~ z*CR|4q3NA~(3Eeu_C@Zd$o+pJib0{^bI5hFy);ixu`Oh!Wv0_tAaE^{S&;UmyT1zE zrBAP-Sk`X5@Y~*B8B}{h*bS-+>IgLJ&-((GsAX`;TI$?$JJ4od zDF?DbXj*Otz7GYKJSkt8t*<2l??T)C!r*T%pQT<0?6X}P<=&6`^oz_eO-LB~u8gkg zuc?@bz%6B(7dMTJ`prbg#-5%s{G;X_BDf=rmf--xfBNZE5Ao`n?If?n~L z@>CA%2g^@ro6IHGc*C-Q{IiOebnrnVJJ_>VaiK~fI`UKxb>bmGO(1nmJ#xS^A?7}T znm|7*V4YY+@CQcsYOi0vGyAfX!`1e(zEP_XSYTyl$Y=_@F6x@!Wy&T*Q!0|h+J_&$ z-^M-uqiZqZJM(t%H>WUOeGBV`2BiM@hGTZd&(8T^4OuG6>{A{cXkS@Lb`Guhxw&BE z>5{vecDD*)iD+swQH-| z-3*z4J)pJ7ei%J4=Efy#8OB-T6fdvif*YL7V+>6o^1OEmFcxXiXtNxYnc34NewM_K z+FoFzq(kcZzRl5icnjNeog7?I1R|+T+@$aQX;b+9{jVV@mhztT-W-0_Nr6PdQLl4>sL zd)+P_1shZ`br8N8Wa@!pEHv=U0a9iVhx*hgzNqCkV3z34d_gOsW0J2&iw35iu%Rq} zmN@a@2G7pQ6!Ab@?_e(|!SVk%+eKmD+k z*_8?=Kxh5kOmF|#fJ=^?)mq#%jMb}7TfF;xCMN!tGnfXJ@S~`xsPx;HgjjL}9c$F-ZH6gIo>meVV+GT(D1`30uv7&cQz3I`{mv{IBOHEgK-qX@RlxOyF`F*!;||kbZm8H}}LCs`#*O zi>q395f)#;KWAx2zIJte0W36|hZpAa2#bg6y>FV$k6l{_3(b`K1EQncceYVA8;K7V2K^ct%DuT>rimm5<9Du|L&>WWy|S*V?VMzOUWn#WdR6`dIRto$BR-l_=KFp6`d6S@4+c&vAeID^(><0_^Nq zAPwkVq0XQcW5>s}E-J864dl2A}KCMaBCcU36L*HYT4)QK^H@ z@!%;scCjfy`wP!3P^IytiAPd`flZZ8j*j1NafZxShFY6yymRlO<9#cg+w~rzST_A; zwCSP>iClVbT2GntN?J~R?fWELL7W=)?67zSO;jmm#P#wA)+T@-S4i1+c$^Iiz1KL8{^Dl93MHkyw)3nt#J0Hmy2%}X&HAXU<`MB z;B?7;nQr(du3h|oCe2$$m=H1^pB&^EG}CFUu|eXkARzLsMcuzIj730=yzV~72i8~y zF>^)nicO`=>muO^2;BW@OWM1&S|!H!o^>{udcEMgY=KNFTd$`VEG>gX6ox~tb zX|EN!eC5AHNmH-8lWcNFE7Pqikhvi@mB4R!r)cD!bd-Cg4acPg8t zgqXA>ZB>1qQg)-ibr!5+Ynj4NMMN*}TAE}o8YiNgRyOl`%(eCDYrD;|Z-z*Fzr|N< zo$320fyp~G;BXJ{oCtnc1##kyZu6$@$eaGM(jhUu2ZSV2*2u{44Glxq32V56g99l( zz|RDHHtkPg+`FXizmBwVC<$^dL-g`gt z*p53zrF7KEAz6i51AcG*i<=)k*b)*GF|tzGFF3ROsYN=!@Ql4-B%e5x2aXml*kqPF zXthacK{)b1otULITwVCla&6z9Rd8(pggbX=K|AJ7_c3>KqxkCd3$6J-8LNRfwZTMI z1GpODEcpbY+Y#uo-Oz@2)-631PR3sc8;48Y!_fcUMRuH4i5iYisNe6Ha*0XR7hbpB z9)FU2;+N$LJr{2Mp-m=cyuVeBJ7wey73@!p9DZ6Kzp!bF@!_ zx&vHGLu0ni>DkAgMHj7%C(n-Vc{ND@bPvL;5)lW{=VghJP;NYgUs;*}zKeK?e)cf) z6pS}x{!hec%8y@#|8ANp$8xGOZFh|U$nsL=f;_t|6%HMx=-V*YD0Knj-HkIdGm}$N zqWfGChq|l1J+XC^s?a-rRZSrNWi{z!~}ZwciIk zdBK7SlLLd>c@=Z;Z)nif0y!KBy;s+e(u}Xeq~@{$Mhc_5o%RDls8!ILe|CB%)3)fE z08c~?Eu?cT_!LxA|ASDLA`#VqMCP%4K;RHi%hLZSwNP~a{T`WlSCs8RL;QB5*y3(9$zWZ}{jE}epwk2+w?CqAH z^!}FzIRbwD{w>L8%~Ti6Cqzaq>G|3j6;;~}tQb0$dzK5nJc9dQ-OIOL^3iF= z0$6jVP}lZ4xGQUYz2_41brZff{PTrF$uWDWmsA{^)fD=Nt3f7hX=y2I1F*FOv*uu) zff|`tEY(d4BF$DA)r)^0TCxaN6JQox|NiA~(Ju;CO9W&ADl9HePAmNo_i?5wi{N{UuuS5PLVvIRI(Ha4n5mXOpM!MH+usrxXq|?N7KRw3weVGTCOzJwhLfCw?wPu;4a{bnQnr#DxZ`EACvEm zO9Avc$1=p#w7jBC=Rn3|-KGI(D?3-S-p#HB+=9zjDDaw9K0 z;zyRe%h0E|<%L?J`}&m9H}4TO&PI|R!U{I* zdqpl(Ju}5!kLnfN2fTX&cG>sFF5X_AsPM|e#PY?sr)FU<3xs1RcshxyhDPGt8QEaT z4KL8iRtBh?%@5>HiI)Se3O@xQjur)f-e9lem`cV5o{q={pwa@P6!;QwV;~D32-r<% ztw7GvaJujAkP`R_kY!&r>;B4TR}v>;-1-D)w!rd=aWHo2#^h8ZQ@3Hh|D!tdG2oFyj(ueDT7j?SZrmgUC1&<=^wxaSU7Z5wBcM%YhyT?oLVruoVd0 zUoeOw`Cr|c05;HFQ52h(&Y+VKC;Al@Mr}6N&z%%|yV{+sIhSF1OYv2<_@#WAg)>>E zw2c4cb8N)}GXx?Y9T9fms3%ZD(OsY*uq74f-E6%GtQa?V$uEM5g`GKl=J^rt6V{e$ z*AFl@47sbkltyiGol78zb=2}Z)t!l#n0&wxybafd8LxSMIgBg!@z$rX5 zeY2>5_|fx%1uWYovJx#e*M?4tOZzIAtP?D7`7Lmg3JiS%OozM??54RR$UyxkyBbEU zH(mYJ?)UMd{1A5*5E?SV#NlTOFCsI^p)=J=gosBbmkzWDmwPNWH%(lNG`qjCOJyAV3&CH@FCM=7?pIwq= z&G968!iK$>Y4*)awdXLc=0RNE>#v!6L<@@5!#5+rv4UXLNkqeNaE^5}G&I;!{?{wp zP>);KN!bU64bEELvVHVtbvOSn>jV|T86$Vp+k1{m=DRBC8Y~EO;ViPU7FRsZJuXh{ z_7V><-wCFbZkS?o-yA_OnFQX?>A#S{LwBR)F1UNFmKvz?StnolPQleDy}IA?h%qql zeNxZ!fpi1rw~KvTSn85~G+uS?R2-ZT?dh5MnKWti%4Iy+lVoe54}bbgOxbeV^aYlj z_pZgrAYk*`3lLNMD?zB|q_g}xgqUXY&{9s=b^nUdgOH~9}O4%G&b zYESu<5Ns`%8Nrp5uuO0Y=pL8KPyLDV%&)lmPiJd-yHsV|>UW2optrIxe|y#kl8d+R zhQ%mB`T#~4Cj7jr?AZ0ncpCn%%38wk@ZJRGdz4O9Z?QO7RQuRotl?l`~_3ogzlkp1&t39B3vUq>PBxjIw1 zRj!->QUP&To^KF{+esg}TcrXIQn5Uj*tOh{Y2{*uG97y2(aWb^o{Bn$ZJX^Jt>v>^z=81siR=1j@cb-}o=sc7N!&*g!vj z3HI~5hU@hfUv|+PhkIZY#8tSO@EAZQV6|cv@}t%BkqotP4q85Iv6uAL`(bVkh5evI zmi?rwrsF?jV*jt=rwOqocbB(w+#EfdM(Bv%bcUz~x|PSD!J#S9~J>l*{cet(54Y z&5MJ{2cJFbQf}s~AIK!tm%*III7^q(4!=DKPuaj4Hv4vqJPw>J;L`!kMX^oqys%~i z%p{P7@_9ica}Q|z$h$W};jo_($9OjlqWtN8`GBAjoa(S8i&G6)DODLgBO*;t2E}f| zBH9;Ve*L*T%Y=afTmqRU>BX;|CB^lsuVi+(^IXS)v~&d^dIll zPEz;?0gpxNVKKdZgYoldd~1pz^t>mgYMIf6UKOZG2VXZxcsyWr85|Iq`EhQJj(>b* zYgZb6#XTP8y2R6E_SfFtq9EehOSPw`J>`QL+RIxOa7VAmyCg$_zpp#BKM9g|dfux7 zAA^Km>@@NJZaT0T>AJ@cJwNbCcaQ>R*8eFwdTbfh7iHwtcleIx@rxmbQrBeQ#%T*z zYy0chw1&XiCr!Waj4!PJHa_~KY>w_=I=#cxoDJP*3y?*^2=a{v3O4_>J6o)+joVy* zXFQR3SIxecPI4sh*k$MEc%4yQ$vXy2)%u>VYjK$<9d>Zb#pd|Ectb0TSJ@h4UCa;X;kC7zR2{_gFU-s+rEoan-r z=78CqA1?DLC-O7z(|-P#<;)u-Dg5-4pEk)m$cIRTxcJ_d*?EoDzGSIsPHc*v?hSR! zmYf8EVlpd|5s{=$Xk%w*J=qvGR^a{%|Fx>NqT{aBc8lv@+L|4UU#c&=QO7jZNqSkVe$O&)2xF z+~BZtf6{Q;ok;0+0AKj&?F*FaR z@(qkQJ3*AFsGVO#hI5=gSy!PoA+*vZ#Tk zF>dO3hlcm1VbK!07gAF_i5PRkztr|H6Ir&Qz6YYRTp+bNp3l zhL2<_%r6yaBm1x0ytOHL%u5M5Tfl{0g_2)8AL~6E194(>1*TIgmWQ?z+55J+`;e6D z-B(?%&7=hoxD{Ps-dw1=NISiTJpSHmz@?Pm%k6fXS3Z=rHHV>d5 zK{a83TLeDSNbvI9nGH@UgqAKj=j!Sf8Br?h)CZajzVsN1KJEil{Lgh-Fp>yFf;egi z{}d?`JvWPTbD=dt|1AS>4ChYGSqVP0QyB!CLzZ&K7^VPmB>?ImEO4K6hl>>yYR zlq+RQk0_ahgnn#F&El4G>K?r&nrHi$=+v4|K{p`Bkl~FhckAQmS9$7FPYBJ;FkzY?Nqo?0zgas_j zcI7%Z1F7yq&WiNUnTYNw$LZvNfjGFjX#j< z5gNrQK?J$l7o{PtvH;6dgD<>uO^*cEJM$bSyk~|LECK2EPtG#d3DI-?$~&)9Rn3XR z?EIT%gT7_Nu7$@RI-+IZZar4*ZEd=yrabTXrKRbCgAE8gfi47;oC=)6lS|DgX1h@D zF&bD!w(yGMJ>`VokkO#t9b&DNDV~=4ON3^xgw4Gg252IarNAz@uf%xR5hnn0kjAp z7LPNTuI-dOIy}k5JN+_8^58}sUC}S!zye=t1Vyrjt!{(u^LHT(Og+Jr%0^wt*?Lyo z5G>gVpCp^4XXwK!05jR4mSH{i8eJGd^}^Y99^d+38bdt#4lYhLuJ85=;pFJIM?pEU zb8A3Eb#g51P{FKX3ZPj)fydiRLqGi7CM+)Pwhg}$t9IiXD=z+2{kAr2lu&Dc0JjPs zcq*-D;wOP}O2sL)%RoBok3=DA4P)-wdyCeZ->Yi-EmS*s- z;HKgfA!P4qG!=A%Cg|Z{Kxk8{ML-mlxi<{NL#7jP&w*bmROGOw}>4>guKI z*qw;H#W49fQ*g-JtC^Po&uy_J+g&fS>p#BTW!ivE^7JN9M}p^&;3~~kC3;vMxk3M{ zk0C$Q8_iZB+=|1H_Ky4br6rkr2#f4}xhcztiH!Bd9wKk%JJfs9$&o<(rMjnUc?ncs z_cyTf9?XuYW(j13L1yvz2H}(3W$-q$iKPI)$Kz|@8o-6A6XP!N*`g~?pvRI7^^45y| zX9cgZ;PKFr(AZDkGX^hT3}~F!FuvwBY3IC0S$Mq8o#j7EGt-e)JQPP+AK^%f-7E9L zCeu4nvr=xJ%A`)u(*!@XId{0{`}|PZX`1^juojywP;BZ8 z&jjd{>s5qtnB)UFAL*+%AXh;nTrW9ons-%q(He!jG>#Hb#v9J^($UG82q=qhCS@ z$N8tb2$|#Va_($aSXUYd#x=R$G^6zs2(?B<{GEy4PWAd)_dGDTQAT(!#kkx~JSaaU z2<>mwDbNNXDSTBk3SE|-tf`i#*tPTLiTDj~-}#@{FH!^t9FxBO_(^!&Onx}fqM`7& z@^oCgy6}Kb*dCc~Mr;azWU}#Nd*mm;-Z%$Rq#mIr3j>dj2J8H*8*e-W&!7|&6p{%X z4a*j%u-b={5)G8ij;rh7mRORh+&x_%cq~dc-bp7AIKTuwW>h0=aL)vyR$OM)z(Wk~ zCoSA~Ya%%-MBumw^O5ed>k$K=yB)6nII@pZwMm-TqGMKmPNt%xB- zZl}VxoM&G>zTTX^w*M<*&i7Ra{%h-XAnAdQu9G;D4Xzy68&ch}jLIUlN}kE~Y@8Y? zeu{k=KjL!AAN9B;eD9tbZhgqyQM)bBK3R(yEoVu7q^psFO%HEk=Xs=A1QMIexcQ670#Tfsxo zuY%x4yz>1mAmf%ODk{q8+D3J*rJU_AvS$enkzRjT{)t`l-xklEgXW_6%NUsdj70t{ zJuf-%Jo^|q_B>57A%hfn4+TiT37lqs1*4e){MRkNSPs4_(|hT|2Zy}s5c!gZ(Ax2- z2;VBJ-*w0R{!&Cags_Tv`x_o8+Ogg#35c))j21mDpy5+-a2(V=1eB8qXUKj&3c4_Z(p>KhmRK*oY-Mjj2(eR^>&RF_JiHm*H$cx2FIYzIJOzzyye~HSnHk9;`N@) zPjka>2&+Ozco(zQypDFlvfuq4v2m8z5L!4Mdiilvck9)ImkTf2xbxz{$#)mtC~XdT z>P_H*bhV+rQdY)Rz_gj`Dg~5AXB8R?OxUT^F@oyoecVN1JZMm>$4unR6aSy zHrOu;`qV!M!L{2+3k}->PJb&v0qkD^LWWm+)lr>VTo{k5&Iuple%|=^Cdzu zOGjF|aCesU3&So=%h7KQ8G#+TQSD=ePi=`5`#Jt#@bnKkn*Z@w z*riHmd168j${7R~$R(@pr+{X|@oUP=O^-mY1#-Id;*9vN;CC$EDd6A6rPaBqxH1`N zX+FsuP1B;Jy5Z+D7bQ!gOLo1bRPX3&Zh7+QmbZkcq2TO!XRT86r;UeN#!sw0Tzp4Y zIKVXMGr!u-+P-p5Cn~AmKQw90l+Ntg6gdW_0qPdj$tFStZ6Z!G7++{X=XY#?KZL*% z?q!-EsCz54r_`k5?b976w)M-wmO@}qMFS*1_P-Gt{QQmXwMhk@^_S>7>cMO{PnmB; zTwv)jOPPCyEy!C=ayOu0wTohM^;o~U?nAG#KT)8W(M8J_^ZbCF0jYn4XCYy3F~8={ z-X9da6ZEwN-${)DW)B7}ebuz|&~?!uF6`j!@vg(0zH}+-`!wO78#szV@N-UcM!BB7 z+JhQ@3foHk??4IxB6iQBdC1@C-GYvWz+q?oM5+7zo}1JH0aw18Q-_e$DkQf|&_I{W z2)hZ@nZ`8+Z=j2xQ|g!W0W=BMdfaqkz@E$n_hwqASE!}-GC@I(f#miGL-2n{~*x=)p*rx{S8$Vk#I@v_M{siyfMKUs6sdB)9I|{9q z%zz-|N6M#ld^|`=a}1mwt3)-0fT9~3MGsDdi;jQq8vYBeb-2Sy9XDf~>p7ciUp&|E zH7LGugCOgg|FWF_2PwY*cgm^{?`kKcL~d`&$*bKgE18b;a<9B~b-0~BJyX8``O1&8?E{F=<8k?APnF|tOxmz} zZ>+9*QHR4}h6TSN8tOOdW!2WyA1(SRn-Sw1%|EF=| z8D3oVl=w%MupM@-oKwi@>17ya;kaeKgMk!w?`Nl;?Jz%eJ{709cWp$LBFG@`itMjn z6M1aHm9NEOM@yjm6!5L=p#Q}&y$aBMXzz0$U)Y@6O7-8YwG*6_>HWV|vfpYEqst?uI|ZLIbbM zUL3@=?_Z!0y3|5{DjN@5WFVmvk2_6H7CyFCiZB~ z=)P*f`ggYlBFk8P1j;YN(xp`%aNBHeEZ>@V1a2Ef3|ctS zeKjXgDK$RBe>ULDQQ4f76yaMw-mNEubZLkVO_WPMJyXRR7=+sy4dUNU7A+oaLF#UV z*Ke$SD7&?S`R3u3V}>1-YCduk>gxLEfxVaUivIpdY*?0`AZ%nPoytqdb8j~Jsc91? z@-}(ckU;q7h|zV5?*@!)joG}6yvYxq)z5bKJRjY08>DogA`>D!mI8zNEW0}Ht_a!5 z{Qmpre%F%+wzCh$x?#jZx#yv;Hsx01i_c`ZLtj&B&6C0hC6 z`V}@&EsYP1h1T{}VxL@XgYVuTA=hBKM}J}bWK0;k8k*7VdTphf2W~=j6a1X);J(0^ z_$5YpQ+6@lQ!8Hor>{rueFlc66{l%k-I}4qh{-xb#BCkMvKcfUv+=w(I@@Vf3s>@m zP>>*QlfX_HR(2}u-sWcqMb|@eK0dzuIvzDU24c^aI7InTco~slQu3|v%dfDq&*sSQ zk@Ps_pbb@%*u0nCwLsFnL>t_FSaG#Q2bR8!#fX^rvaU0ooJ@Q}9e2>x#h5kK^H2B}?i0)Z~7T_q|r8E{#k3khbfX zMGcJY*7L7KWk~Ym{4H}CT?O+HuB!|`{A8zqBcLD7lpmG38PouOvExGm_uD7K*({x5PLu*H(efi^S8UqIBQ=wz zDjY5q90!RW=|y)gW*--Ipjsv_*ZbqmM#m}m?TJ-WZ7$P9;UDh4b(5PsNL}lCCuEoL zw+2@KWW)Es$Kc`@mFjfCEwP1c>LAHW<3T)MsHPgi@0sQ65`8j1u6%kj63z@7?zb2| z5<-lC3{SVel2)tO7TZ-{m9b!Y9#?;HBhYV*IFBe&5;tPSxoMKcf~$D;3Ne0b7RtQD z5(WsY-;-nR+vM)`XlF(O4P@nbmT&Ld1_t)gM;2FWRyLgBM$g=IKCt-gUt1C%y!|He zq0t2y{RRwp>681Qhr`$zVLP4d2Ig^9d#x&KYDQ;l&DM?z;C%D#@1mQZ!M=QJr}M|! z?1%b>7tiTib@1=nijIVzesu^|$M+uT^@6vjUTy(q=4DS*Z$39a|NH7J9zTv8Ff8Jk zhr@im`)o%XNok$+mC?t|^m!l-BTVe^|D)=w!=n1WubrX0L#Y9zBoztCK~khe>5!K0 zL2>{U1SC{S8YB#m?iiF(QU&Q2K^jTP_guiw@B9AegUWO7x##S&_u6Z(^?rx^!B4<( zP*1q2q5fzo>N3JF>rt*q)b)4E?|9+hh{=Ouyf4WUrwD%`A@p0kNnQEM*lzSx2J=L14n!^SnA-(Iu*gHQyxHy1{RMl*1Y&u~ z=-Kr|v!%CY%fvWQ+=zn{W?`j^Zr;PTr%q}@FVK#(D$>b9Mj7cgIUZ&jf#o>DoJXX* zC&w2BA_Exjrp$Z3d}I9*Y0J?F+_Jc*AUxQoJz3eU41%YL7hA_6=g1-48!vzKmU+=j z1v|!1m3_srT6_QV7D-O;^@9%_nN_7&l}Ak5Jz_qNP6S|v+NLh)132M$*a|;l8v_om zS-!-waa29Eouux2hhDB>XxM5hFQ*0-p6_qc12>H+yzyp3)L%u64L|v$-1w&$j&h+P=fGd3pugJA)*S7jOc3Rn7;m?-x*?1Z*bg#g|>*qNR zHSgmV*|5O^WlkSe7d)!W+lgae$Ml(!N55OA^JyxuW<*@#FW}+ zci|SFEX|X_iLp6W*lOiFFmvHaGwoM~Z+9eX<7GaTayO(Gcr=CJ$=)Jp<(S{?A75XT zkqsW8n+X55C^5eMj42yUfo*k?k#fU>6LjvL30>ZaJ2QPWw}rJm zWG`(rZ;q5oq%J?iTJ1D?Hb*uYG9m3bV&)wQ{tx;o1dnEa-pS93kmR$pu+RDUQtyQqeWHwrJ3&w*w2JXB25hiIMt$H9+4zMY7+XX`EKo222 z6LEB8k~!j5^ct)@_~=PmkPQMcW#2qr*->YjPvDo!KOOy9s`o^@e7!_yIm2e90=yF& zvZ3!J0TmcPI=~VGilrY?5zC~44O^6kMn)SO?JTo&VaL&Q**87K!fV!;;P36XuDDLgdqfW7_TwYS?Zw|zI>Q)8n z%bjsnW4=z_xS!kTaq1{p1|51n;T79iFbeL*=?RQDK=%e|WR#No9Z=vtnw{+HUFS3j z=7n$Fee~94ZoXi=j0_}tZ7@+gP3zmF;dNc&EVyY_-bSiOQ4!(WGG1X^E*BgeO&Go7xhH?*!tROWpfovG*rDl6M$q`c$&p(P0|c$K4G zuga^@5MfWzE6o_bD`UTNmrl*Yc(|?$62%6RyUA^ju|z%~LDiMOXOK+*R``{Iqqz_a zoL_&xS|+{t3ZWcyv&^04A{QKRLsT70otx8QUch}xr~!rT0tnj|FHe2`jB=lxTrmg_ z3}un0V%z9o81UKYIdOH@9E&>L_){m`URud2m^qe~B zymD!?v73#II@g^)22DNR^t&Rmv0r+y1DQnI=6%3omWX1Y#^I>gwC789wA^`{4qP zF&%kO+0J5aMoPeYcFG`zNcZ?Ig$FcWziuXLY6(qyTRF#8v%Z^0@J(okEhpXfyney^ zF?0|gLLV``W~VOhniewSj_*th0Wgfs&dO?b#*@E!v$*P45V>FdEjnPI-HM((oD^l; zAH1)G7)=uy)ax)MrcnkO@`DXEc$NemxL-k=vgB8M!(F=S(%#&Hi;M+zIttl?AF9f; zwq{ba3{CQ7@QZRU;PAmAF63lmfQZQ6NfIXo1Gt}{_~*%mIM{tFCfS34x!6kvzTXiP zMQrb1e;F`P)UfevbA6%_XCeOsVa3Ri4H5L-fox#;Ws_BeNdbj$KgH`TaQw14_?iv; zw}771$CKl=lbOL!Gf;}uTSge}2$yF`5BoiQV_$wdhxzk_b=z~S!0W#mBjn+S7u~`m zpB*91)eZ}Ns@s*Jlx$UL(|A@7|5gyTuuboP$>b0#;exg~5T<}IWc$Guf&iN~ZH^h$ z^Ft|svjs>wYNfS@1;uyH0y+|GXnNOgLvv*y{p%a#%F4~f0V+Cn4i06bB>=7ruM3C6 z;e1Y<&lsNZPJC!)k4qdGZ~M;sBfy`SA+VaV!jH^nUn_5-X_+M+;DC!UPk-a}zGuig z!A~Ncdb)gZuQ3~wKZ&)RMOJi7Y#As^^@3CVCeVX#urbzv5qjeDnKd$kDZasS{j)3O z`e+I4EQLVB_bGi=Dh444bKEn+I?7X+W*PtV@AiVh6?|w;*)8#?uTP6n#(!va{X7QM zE!BJsfFJCxLDnYru}Cm3Qzd()<#62p&gFZ@e{QxM9XhU~4}5bQ^2uTXViuYL+OF@8 z=n7QCHtc;acN4hkVJS;tuJtTK?Q$3^hZfLr;96yf91P0+;G3U)`K!w=v+|5dQ%My2 zl`DjRf+!Ej6#dmh;cy#DP)iGUi?*NL<~B63B!pW}$U?E}v$`x#w+}iQ+R1stJ7pyi zRFxbY9Nx;UTXWtI#%gcA-v83ut2b^7!V`F2>QUVW?P$NXi(!KuJw}}ffi8$h*AoY`e_NrhVHsa zSozqmMie%lU=zj3aFAAho{2EKm*HnIYN0o1wa+j)_P_~6EpC?6%>aToy(UsH7%ptv zQ*I?Jl8*3$Dg&_h`#yjL-;xwtPJROZa2;J=Tw73p4_fTrG1T<*;bF|aD#F%lrDLr= zHphu!*2hhH4s*V3;{!z)4YE*~#EFri*QmG<^9Quu!~@&|}~mB9Wp*Vf2eY z#2N(iX}p|YHucZNfR{7cVH2U@KF#Z0f7y*0*#QmQ?G~?3KNjIePmgM$C!a2|DGj`(SPIHmdDhm?>Re9|% zme%CK#Oa!ie(Dq;i7Cj-hmLE2sn)4xJeEh?6s((jpRkhkn~rH>Y-!2oaIbkYx8|PA zOAud_ruF#MHT^U&nZP~!S@qCxm5bQ`C96f%%<44?ElgckD)JX+ATJ{2%fpUX;BNY6 z!w8SG>jeANt5=9?9p#mkhPZ=2e&AC044tOf;*;}Ow8PlmM|{LsF3;|+MB>wfk^bv5 zGc&rQZX7s=cabm20e$1gVS4*X{^2$i#!VXvN?=!&_cZ&Cgoq{j7$zO$pI&RiMBp~< zDsx#caR|6Im&Ti(Y&d4fmfPGIHhi)xef1`VXSH=FEjCWgiBuNXerEu1g@<-&6eM<= z(@lTir-9^Mp}C`zmg7E3q-}IvOpI=3Ro0J8Z)rC|vQv0XwY7q)2~&m@I_{3j65lv! zIheFKaWn6W#SVY5|0ArL-F2e=AwDT-rNB%X12!aE2deCD>8n?aUxbsW2_TFPlIbke zj^)1oU9YpDSQr@xl%K5S0lne}>$xEIY7Y*W`pGU9veTy=iRanQPXTg_ew{b}B?gAv zXqJl?pZ@%5)$M*;N79m=aKRXmnLK6WmYL5&8>So1W>nC@`Evyi1PK0~*|Sf15$s3v zHiO+4R~^P5d?t!rOTn)Tg210rya>p-yFX#anZos9ds8jiu1tuT?)2RGabwC5R zCeN2;Af-_qFgeEhFTl$JNO+2isPS47h${7B_W={|n;L5dFsj@suls=x4rzMdlI+qX zcbF?{?wWDo5pHT2gRe>7@0%a1@<%tC#q#^yXYiJ#+~E~wX<^KXTOVH^8@_4A|R+GFuuV2v@eqC*Wh^AA7Tofr=llzq_lD{XvD>yoq&LZI#n@e$s5 z)a#O28qFO+lUm)4A`BQg0EVWc6GAn{I>^eOXna0{3j6q&w#?}!m$J>hu zj;L>5QI$)3ui#2lu}A6c7?GTAzJuFmU$|Hhq57z;rDe!vDp=&O?gv)Ee_#eQ7`BXY zEjfmQ?C=dru6-gRqOW~@=DSEh(D7Ms#v_S0tpeG`%+K6!?EL&Bz{p1RUA5Xxq$o{c zI5FLJe`rX^<()5_AjXjcH>E(*C4$u7Z=n-~HdwT{^M63)wgAS4lUOKs{PpHgA z)#iMj-HlzIT0sT_F{9Ucd9`nL$4LHdQ$r{_I5y6=}*%SBOo7QR?@MWefSIlKn*$ev*2 zb&!K_Lzw%Io=pvnci0&X98;>auw0o4qUXY^8IQ;uc-Q?;5XLN?Svtar3DSdmG{?Ss z#FdAIZNIk`EXF|Q@HRQoQIZFKSw}~wK2SNqi3NNekN^bs&N88U2t6dv?>!kD$WRRd z3kbv=;ywM@x(i76_I3~Vw->A$3({|Rw0$6vx++d#9G8;v&NnQzppI8nQvVK)Up7nn zSiX3P;1}ZZiV82uD0(AbU*D=Z<4ghQj_n-JS}eSfzz8i0VF3{^>*#U6uhT$-ArdjX zX}l&7d>*zoPIN~t3#=)Mooo+$w>yK{T!O@Kdbie|Z8-?I-S!Sf#iX}bKAW8Rlj7VK z@o}3xIJZb98c-7>A_&9axT}o^&cK(hcR=35kI+XE`D;z!D@9=M<`>U(#Cg8*fv^Mj zeci~LWMNyw9_89v7H7?Wp~n(@G}o@kLa)mRkB zcj>B$KT1Va)%^-a%l}utMm^nHeHm6$!=nFdQXrU(fjL*)v0R{w2s+GBx$Reu$hso}&T`Orj98u*G_Vy5p_#?5@;Z z=6{tx{XGl=RQU@nw}&#FUGcI|9AQz*HV6aP;cicT^}%&8e&dxtBaoGpL_Z*^WXSV{ zm^)sNoNLv#E9w60tiAFNr6_R#Piy8s#oJ|v4HGj<4bLdQC@Rb9z^085kbI$tnb1`y5G|YPX4O+W%<6qgX88UZX+4@jj2CO|gmB#XS_@05%S1oBfaU2ZwD-*cKJkSu zQv|WWv|iZc^vQ2+wK?%bCMw2|)bgaAe_k~5V2%WT^*i3t0T~7jbUvhfij7o>Qzg+q zi6|Z!+`GrgB!pNUVZPyh84LGvz;M?l{92I9Q`2?7^!fP^We6ODa5+`vj*F)4zL-@9 zx#Mu@<9X+_=2_A6cNGJ-9Y#HN?3oL;@76+=ulWzVdvn2E1VL_MQ=w!s3srCS%mV*f zU%!6cGn8^RUD!7I&V%Q5D_xp&9eGIGD~7U17hLsaXr_8IH2isYo6CDii(JaBI1QySC)EZpI9rX3-0iXDAUtt?~)3Sam;7O|oA~NNXFJT7`C$w$Qc#hv3tA*!(f{gr}v(Qw$*bmAFjGRP88@X#qRxP%sW# zwVEgip|H?D!nlDTChpwE?+J0q=Q(rEO4bOv_Ci1E-!6)2Lc*!bbg2DdNw<@&@0!9m z#oO}tH~IX6-NDTd*re&*3I4zWzWNSQ)I6*rXZ($GdcbEJ3i9B`sBy2v$$0KREozY$ zT0syRc&=I5PIECD#9826ZV7?6Q`)Cu(k_#YuiDMk@2j(ko_~6JQ{>XvyBsX5Sagns z<9Qfbe-Ca!sin>aKYip<=an_9RT1^|F9*q~~l_ zR@N_E54SCKb#+LG`@mK3WJgp?4Ss?XXR+~%S%ilo(^wF4uc?T17ZyA|=#wW)-VfOWvMw~5{gLHO zz|iN~_x9FReQQl-5Clzb7G(neD5##fh|fvdMTyNky&IV(;T|SghF=}@MbXv7z#!Se zn>{z@DWx{MakgFwzReu?A#xbRU0Io&Or?pL`(S52v5R!akb=P;ujP6X{P zFpgrdc*`Lk`~F+I7>&^TC{sPXux>xyje>t#04ZWQg@x)~#9eHeUuF~H+04yBruNF? zzH|~d9V91$MqgNB10ALZu)?Oo_mVbW%t4ZI{9(l@v zR~x`rjfU4!LRqGyoR^4`%nETC{@%;U)#ldSs{hNr*0|5&lGyexbzP=0E62Y=y{vHf zr12Z}f0a56M*;Kj_H80CYZSaAXyo?mV2v+l$YX&;Ur%T8TewY6>QkTrk+{?R?EHPG z*CDHKDceqIKgj!JC$5#we?!rDWfd7$dCypA#>7tcM#PIvFmQg)lgbqMy~~P-e&tPI z6#8oAQSgKoJb1D1w*BpZhw@|zX{cLh18n1&%`a^;lAnRW2Mrc|yaeK!LhkF=)%?=$ zzg?~Fk_9MZ@WO(foptWTYgYI`#pRGHI0u4;L1ao&Ls}8(pCrcnRJ_I(-b!Ek@<=WF#|+A70+{_wQfP+kw8xFJ6SMudip&CQZdVeukw> zi8hA?%l9{2xJ|wg7~wx5XG5|6ms)%bcGZ@Kh6dO`CXU?0CLRme9bXvykfG}*nvK4< zUU4^^0}lj_e^-TG=GK6hkADwe>fe2$*N2BvAhii!3Kh_0>;vWC`huSsZ_#%{?j8!K z;ZhtBo($D%(*MO57D8~(h3!d|FTo8_H&U)Pl#VDmzpKX241fBxq;ak+1kEj+sawi= z?HZ-9O_$ct#~`}TfN1gA^0W`!MhTLA%kD%mzfL~*M8Ee*2>?uh6>fA}o6?)K$TdEo zLY_zD`tsxb91r-1hMiZs>p~7h8SJyCh$|=-SaDj^5ltyV%*w|P@q?o9KNj>MyWPgB zW<6d8Ecz(Ha0%?bm7ZgaNHMVLecign>^-0Xky&O4O0mUpo9rh}0Gz)mFE8oRff~nd z`Md{Q5m810v`IXDH+dbxXu`Mc3&COmnWUDreU94+cHU1o@UK5T%no*>g*g-+0KlY) zzN(4zroZxVwOft_GUr+x^|Pm+PkAEZsP@roQlgc*O01NdJ3&-Dn|CphrJpED5^;{vaO7Za+kO zhKm>716t9!%p#%K%amS5_XXE)8mmxT!~KYha1kIydU;aR@D8?JlJ@|61 zqm^E8X;vCJciuz#|H#hCaiZH0IlD;Ea9ncoAUYXQ58UI*H~2x%8oQjI!u;gju4_#_ zkKVNQVwx+(CAnnfk_chIr_~p_2Y{oQWd!$8j6iwz0wM$(m_+EVU7?bL8yFY_2HhVm z(TwqSxGxE;)1ZpPAvGz9J3g;s^a-v3!Y@vnIE(|2_}ZK0yx-VXp#fd`dF{k3xXPvY zQOwepKw)WPzR`Kr8DNTFz&c`5uabe;hjNR4E>KhwKyA*zsGclY}MlRs^L%>wV z*G>fOjb9OFTe0tw)SKrsSR^Y(pk;c;(Ybjzb_m&sGYs#)v zq2>TYG4s-c6^ED$lVSZyJ=sWt&u==(0DJ6XVPsnPR2vATVOY5OHaF?M&u?pl6X69M z?6gjQ3cDNA{T%D9lrczWPIIRUx0W}~z&nAX zjIk`bX9uzA!BN}w97`fa+&c_T2=5}zM$fcIDqNTb#vZz5#80Oq>%Jr_Vq|6U*maHv~fKR+ynl$z3g>z z$X_9OvNqJ@UN<*42iQG?g@qq}8zjlNS`wL;$H`|}ukYpppghHNh`t54%lE9;| zFeAE+VNH+;Ymx`u2GqIRd(jT{OkzCPh^wFr}SI+9jOa{s*q5xZlJEE#Uun& zcc9@xASW6}qhIUE1F*(A5jxZizhQa7sC$k;toofsaGhCMSQu@8X7JK=|jbs>>s(4^=R~)(zXqJ{%7PEBS z2s_Kje^D(2nb0%WZ!x?!q5Ao4${LOX0gu`zG7;?t!&@ZY$83bT@dFV{HL?G0fB+Co zkt~}SQ*jF+OwCh^Z4_Pys}YSp!M+eE8soOT~dmVs9VUcWXhmh?j0`ndmpOsV9tX zx1lnTvsz>88Q>s`$p2}#riDG^tz70qG}3)AYhnhqT$zHTjr zWr6nEYXfVrF;8D64QJoXkB0egD~g`x($K@lRBH-KXywTRL$}VMPQjLaBYD@)f2C;# zjpz(Z35~tkRUIY)(k=WiLQ6mb-#wg<#@&oDlUPvi6eLyyRJAijAAovp`$qL+c{rQ- z>}ReF-Fk-J1rMQzf2@FKrGqb^)sRLn2o}^OqQCjC zWolz)5Q2Qs@s0#kSI}@41V6nynr)8aLcnnqral9E=hqT~a9mEcBs73o-t0KOW`PYO zuHrsgwCx$9I)9iPxWVqkGlq!|3ZSVH?fiBqaMe$_IQxwU?c0+*1I^`I)i;G-pjSv+ z18=sew;twaRG4%Po)>3?3((N3L^8hjr-!g05^AkFgwp`!8@Y$2Ki*C^%mV=gQ0SG6 z!W938f%#XIQPDh3_D>{$jNJCt?+-ZtAr*#TBXrjPApH1*?!8YrgoqhSnpq=qztKBYfNe8zNRl>$9^E00n3s|;`0tZJJ(Yo znE1qPrEY$@*$DKe$xx?e%EW0_<@;w0PH}nN*Ac&ntag3uS~8tiMh0fHJurHt0nh4H z*?&N)DFinpHa`^KC9=!?%E!;IXm9`afNZ&0nwEy`aTmkFs}RJgJhwVl0vNcE?xii*2# z4PIG$rWii>JrWs7UWAOx+}vD5bFAtC^&CG?9wbRtzx6=sxK@obk{Ijek=M=*fzvN1 z6p6+sW<>d2pQxYJsUjH`)xr!TU=Fqr>jg~MTLyHVQO7_Eb_zW(spHnoq++VwX0>#5 zEKrv00|vA_=I1asN@}KPJ1UA8mtctIPY}X>+rf85MiZ@22=olNcm!J z!U%oTk-ui*7NR;_shF5nDgrl<8J*ry37dp{6s) z+yA7a9ku8I#N(pjbNPJhabcVKt%$r2w1fYc_~TKv(&(Z{bbMq|XC3r7^uS!#F7x{V zb44A=ZPm&@YpVG;HRr7u%&F>GD*x^>2P_DP3)A61>6hjgK)r)-ZR1Bf-?k2xg)ABs zl7lNNd$vau!hl2IRZxs%Y^(pmM89Mt_xh6TYpfLS#BI(88}{r6qC(>jL10_+-L%^Q zC8ZW9r)`vomY6LpEujfDaQx-`5Ba{RK1B!?{?P$et8(h<&tm*KKb@yj+*1)4&`{nh zTEC@6ur~L_S;C38^g?*pJCND1ut_$Te-wi_M;i%bXRrSMeG{}SHetB#4j?i_(4`e# znfd7eGOum^3@?NhsHOoQnrFufR9Lk=`v@TD{q}7{PzO(s5SnS|d{r81`)|G97JFV{#D=)e#`bYPq4(bzrq@Ea zXplAvo%{-cagZ~fq?_G|(pb8jG;y2kUk=7) z22{qMf$)E)EB`Al1m))bqIuv0pxms^>F}4H*DjLjyCW07w`*u)R^WywF@|Iy3g08x zl@*Re9{?8RE@3z_;Vf<3I~r)%MATIQ!##80?Eb2I`xy27*M*_8ekw?yBGGkNI1_-_ zo15Vusd#iytq5$nobUhCI8Uyap_wpc{hB*2gnY);K#SGMk4)NZnVQbz&*tWe*LbP% zmMkZyQU!_Lz6R2G%m84q7I;9&H3A5m@)T_j+#J4Mu4np?1JoRPWi7Khm^yfA6CLoV zI@jxV_@DidAT z4pe1Ok?uj~i@pn?s{sjPF94(Z8)poYBrTAMky%VT`g9swtZ3dCf#FE6w8n|!g|3QU zfV!^)96qY#B45mcHvZSUizdU8VFvD*qn_?CoI`-~tlj~mdXw!)(Ma@O)6dnqwI3!^ z$p@FKx!4KA(Xfpp!`}xn-+VrP{OCGpf?}1DGJYVESfh>Hsbel{sisJ)I|l{!^dbIZ z`UU3m`qC74)a6LPZpqHw(n2E94tR4vsMllB_&z*>g36Yb zIsFt05Ar|@8kbn*!HtQ6b4+e5R?ZA?&CW6iYN40^bHZ{v11CW=SL5}^0?hgE)+a+e z=P@N2YD@nb-hENRz@vpUW`rQ(_YYjsNCBrHK0fzdU0-3qY*|1sfTAIsH-aQRaQ^`| z9v*Me<`3x8gW;6Ii_>o0nX<6s^!3A3@#l)je!BVb!SoC&=4NS#tkVv-v3(;WS1LXM z$`L3qzs*m^yq0i?Rf1T^;BsRPOnfJ;ucV|NwsgJ;+NjB!Lps-vT;4a#&DSzw3dWuE zbxHx9>}%~Zi=^UNeIo=>b-Sbv00dB_5qr`d+?9llmJ6ve9R!{P85 zKLrrhW_DR~H`X_cUeAn`0wZZi2F=4VaPWT!DAT9@FK%v4A5*GuewVH#CL9yl_N0Eb zp8Ri-hoME@(L}RE1pyTufRt!6J^j!W6;bydn78SMVsE|7Hl=yNyFwWL(6q$j@yvc| zDr3g@#VdZ(F%Rr$@}r4(0OxZU!O&sYeIAnnxn1Y18?-$-TdUco{7s$}t!`5J91rzs z*X^Q<_FH@YWyG%cz{Wnj3~=wYVE9ZCQ(>K-g1S$kIo>DDm$z_>fm&y)M8gccFIzFP zz6g<`n)<-5K}JLhf#b=GIPeS2RqfB$GUnqyW8C*c3M22_O;X;(zt{jV~X^&v`ml3${({Xs>;?xb&Ap#lDLC3!1JW_t^ z>gMz3azJ(fcy>h)z7|+%9 z18+)Wb5AE;W_fK-Gym|}2nBdE7byN3eUplAKi%wr$ozKIEH!EC-p{wyDiB}#7vLTR zP@dvBrgi3yRqV#o6tInpNz#6m7A6zm+`rDx_xo90DH;E_hI|D~Bi(xjapjF*d1aLY zeSzTdN0c1pV61)p{f|IG^fpkhGM*BKwL6Yt2?-5#qDwih*kj^v)LRAR#6SoKp6=!|mx3YO$jP>v4<_63uYl?nzj#pK6kIH^1cfgA!Gh;t-Zh%QgGl}D2Kg8&W-Tk zng&}i?&V7>^1I&O@4~cgkUV9Ns)e0_>1fgF#Oe+x921}Lf^WnHRR!-&8%A_=+MmkP zds8K!9JEg9J9iY{>5BG~%PcA)gGOZ9$!UmrK!>z0G9x3DW~ySaWj!!U+PTII>9!q_B0bO!Ub3 z?2o?`6>@-4|K8=VBODfIh}xKGjbPvIpO>rri2<*LPcxqTwM$jG=qU$xEceR_r(p>LjlpwH7oYO zU+1G9nP@JY1epr_+|G=){pQ4Jz;pKfqcy(6uSz=h_eWyV-~Lkqd~M-8{9i7)1<9cW zbmRHWzJ_KZ0V4s(5V%y$&nVcOA;T+`(}X89#k+sWGl=5c5C{aOFf{Ro8y~3;FgH{H zJg4oUF~DpGGfsSR>)*#BrZ7Ov#=*49iZ(c-YcB#`3WuzP06?~gau^MG{OA#agQHcq zxwf$b*HTPUqx6(1Vy=J9>0Ni&2T_d|JWnw=J7?^=h%AAJ=G?IE;{3E^JI#F zU9sS#G{lmRT`myqJEM@?HU6kxWvbtEe?V#GQB?u!C}_nx8Lu{W>h82`0Ld#Mr;&2! zqv_MPwuiGVIDLN3VXw}&B()J#^h>Ux4|ynKc1CFviTSlUx%LXr3X5H~v%G9<^Iy!G8RO=x&(>L~tD?jo!0|JBV+L)Wm`C0of zU?}4Nuh}r*j#k(1y8BrBl&P4nA2cySaGGGu?MSv29?X}yVOKhj+rMde*FT$|5qPP& zxmoocqaml!JD{V%Xkl#$dMHZBmk3};4^2@xCU^(^j`>|=?NA_$EIybIz6}ZrH218o zsi^=>CVMp18AGp$guah3y3(+!ygVtJILfR3Uk5}B(}?ciTd(@2w=pOuS1q$}^MJ%e ze{;HIZDVxY%%Ww_GtGulJXk({sd#1Ts{hU}v-SSGUCEfr_Bnb{Yfc@u56w(X7sG;^ z=TWA;sPk8rh3a1x&7$Ndzq*kyw;6sY9U*mlS5qDPJU!hRsxQq~$7<}HoK`qlHSBtm zuI7xoE7NwE)>9o=eTFGp4TJXL!W9I=%b$X%vTJ~EsR|v<4zjC06Qv5|wpWuPdV4{}C?;7tGB6myyFW-7xm!ewA<@)TH^%ErG86_w)-#D#0_sH=R_tsEctw?FpkJoYYaM--sp_L*bF5x z>jYN2y@WozTFBJbeB9hxB$Vd0_XDrQW4AE50egmfhL1t&lDO(ml}qxgH{(OK%jjb% zH^+C-en*#q!oVZSGz9eFQjZ?FN)q~Lm=6}20MeZYw>c7900$e}r|6F9&?dmXLFgvg zc)w%*i(7=L=$spnWQ2Sv^lJM6Z+*H^de745g3@DL2@(qua{Uocc-5G<K^sWD~P z{1q|Qd0JeeYSqsxr=b{(Ohyy#ImA+y;fwFz0x;a31`?b2oVER8@zGwR%>9qp zpK!K8XS;u^l;R)kND9i*V7-$+l&Y!`B`34AuZ@`=y%Riy>EW36Jg!Gqwf~tOlzi5lD+b0~S0o>X6Uj{Gj=O@Re@cZm1=}xkF!_EH zBjvhoCR!Z!cV70yU_-#w0q@htkFjxa%?KDTe#Tv#7J`-T6fu+oyM}2mfM~M}Yi(|w zjFUtXfjiO!zvGzn+Rb4qs0oIuz+VO+^!HZAVk&Tk5rKicSFY%W`Z}d4aOS^!NiB~} z+>yWt4dm>sjyp)IHJ*n(@I2q(Gm^@nl|2!365xNdJ!p4QI#BA=+j+AOc;t@x%syKf ztr7&3rq{c_x1Z$8D#$5axi-k;x|xL~G_}j1Rb@f#yX(4HRP&F_M+!B_LT{PV8PlyR zfS3DS4P>PN>3MV|6JXF^zFc#>bf*q$mN14OeCh>&26^+o0GgR1z&-j)tk-^diKINy zwS^K5pM$09@E4;gqE0#_>lG`n_L4O; zG?c)+#)^eWL%P(%Qk&nxU+-(P@dI^Yy#QO=^OP2B_+9Mi%$=uxuJf~ZcRERo{TcEE z)jl~@#kB7&WT+C92i4#$5msgHJ(hBOAW<|n_upvM+_v%g0HjbcsMd(39xm@|!dFSb ziM!UWx^QgPrZAcN1?Eod%mg<4fN z+owM)0pmV_lG|{lz9!uOyZ%kLr8W>HJY$@oKqQHgLqb~MGXB+^V<9+1dGnAh9 zguzx?za7l7ZeUG{%LVwICil_OF31tf+v{7)R>p<*q>&=*au=MF%^vS_Ik!F}44Yy* z-+ysX`1?6*IoV5KH?ZpY6aKh5ye{y_cK`viUHqJ}#Al|JDR2J!TW$nbv0?&YoEvh5 z)ekkiIPi|&`fC49s~2=l7}N9-{wP~D)8xQkjt?%`8QAYSP2BHbe87$hIBmrC=JgC* z3dV;6Hri`}(kJ1WS`?2szFl}Ht?(JLR1L_7-D9{gcp8&WnF)G08Y~!(TkPxYZ#C=I zem@tN(yJl3QFZuIF4K=ah2zRW?LxGazm_DHUfCg6wYLAwMX7X5U;w+2DM3_L# zyW(~_0cnw|B3loLlLOw#$;nR&9=w&nFWRa>SC2YDiQJLaLDc676ELFb%!l-hvA@P$Q02XQaL|Yv9i8>}-Icp_(Z7 zOTbAeYx@s$?c!O-GBiyKU*e?p61>s3sU-DBNhG~tQ3BB8QVWfFujYp{y`+eJGgw3em4a3^?Cld9EsCL_Bl+mblM#oH zbJVUXmp=RRp#@W0AMu>>KId8boAPBld0je=MRpU z1ZiAV79Cy3_D|x>L*W{i|$Q_elbY)mKc8 zNvi{|*iU|zPrcLJ+^&7nfB^8jF?Nku9=FbRYzER;V*}EwVIXxz%fm}K@ay9R zQVg&eU&(}utac#Q+o+Pnr*Hhd5G=qhwN_iV{;ig#T2`sl=0W1h-yhZR85cB=H3Upe z6_T3(qDU2|n4X^2SxIv$2U=_4Ye9xFz_b=faH_q#IQS3G22~Jkv4`Abyd#{paLvtJ z@k-#|g~6o8b%DeAhsfT}pvQh(Etz8PNcT48pUOiCjkzhvK$1|aNA^DfFIfg9H!y)| zVR}5X%CIC~-wV~GDWfAwV_#pK=Z-!K|FXx*HQ?e6h|X95nV%ZB9;dH?qFOdooC0M^ z{ndr-{_$e)MO$y}wq{9`;-Aez*REEmFs<}*xcL;hQ@K0eFGlBh9``lAc|!pDCvTHm z9ou;tQ#iP|{4Sd#IoA!g9?NA_3V-}prw}IHo7xhPlCZ;0A3ssI4KYRCzx|z8h2Z|1 zk{%)dbH;%dlmV9#C0^3XR3_Ul))=j$Yh41B(K}oMK-C!G^14N$(koC0Hmr^y)TtK) zo%&W#sl)>MVfBPJ>zY8>z!a!MF?#Su0XZ-afByXQqSn zIcdI6;$ma#s}eBE1{zn8)z_)Thw*~YAxIQXWS31^{o|8pxaPaUAHIN{az1qOeS~9h zNquT7jJrcc@mDRD^QyrG+`~Yu#d}c}653*&mz+>DC>?+_dScKWGNRtg`iWT4R#@*< zZ{Lo4^~(PGy`99f5)i_N4W`=vTp-i)oN$TZ6?$Wb_vib^{|gIXBn2X836SW~t0OtJ zkgpYrK76Q>EXQDwR<^bd30I5jumCag1w8xSD)4Qgu99(`x-j9p#NX;=k;VO{`8k&N z!|D*Qm4eaS1k`5Ne9ID|IYe1hIase;np ze&?!#S<58Z8GjIU#T;2_6^X9`1_&#e|F7Sog{g9-~7Gog-d|q-jdXuqgUXgCdvKJ$(sKqo;+DgSp+@hdTResDlAL zv-@C2c`R7VgF?Tb( z*OvZ;iN8Fd$XzAT+qyemUzr~6WqJeg2kAQuLXFf08CO8IT=snU${D_)_%HqClm{pxci!dg2LG*|f#5 zOcvG~3^ti8XE90nkOLaj&OMJC|0*_Wp)?H2k2ZU9s=cq@nToq_mQd}=Nbq5~(D#R~ znDeNq2hlGgn5zcj(JP-t<8Dngog6gXJ>U{h0l~4L67V{FjF84!DPX2yZWEwTPu3a_ zNCAQqF-|@#)E3Ru`TSyp#Odd`h1of=s|XzCl>_a2d;8-Cjg8Bh^nbSRXdh@W&xEA$ zAblCPHKQ<0F>4D^?a1IBX8-&v#SVi4FXQ8R5~6fQwESHdKw3QKm*#k3;0)uso;&V# zsxoo9-a>_#k}nVN8#Zwm%brppV@_JwY3d|OpKV_)B*HuABf3J~ZaoEV3?_u04ft`O zNw;P4nbZ9K0W;pc*E5MRfzN&ut$hqi$J?qu)T>6_ zB=#*^L|>HAcV@CQF<}yQnINW^;@Jf5B;3paTiX7I8|`nq!+>_~t5=r>ME}wLz|Gag z4e(0g9*Op|TsIx^s>rvhG5bJtK48D*Io98=3&xsLjX=NgtdEdNHBBT<5Kh%TTIC`; z$U(v9+R0y4kic3jN}I6>s#}}S#UapGZeJSY4hp2~i-rZkt3ZYE?j%6{R(`++r4S4Y zdjA`pWOizaj9!__sfG&=V@s?>tZ`3~!YKN6&ywCZ=wNW8PlIt6$mh!KaL=t(05FQq zV(s~6;ZV#zkPr*hVIMhz4?~!4F2HgL?yN`7*cu(n!)|OpiL(sb7_R@=XrR zpYUh=U0-+29WYh`Z9_~x9PnC|0J?9_(z4b7V*V6}&Tm&*zJUDrwo*J`*LPPsnoGgv zsL&XiSt2)cc?p5p@sqX`UD>q~e0pvk9=U*khJF=A+7OVK^#1Af#!6r;KFwMnlmcWK zeFjro%7QaH9bqM#FP&C`FN|GldYjWSRH$JxRJB`$=bRe0yTgx^ZR19zW1or-Az@8W z4}MaO1J7dZrUMA>eILwZ!{+pD zlGDq7#*J>I7583Fn%hm|R?OV0{+*YyZg#S5Ms9t#pBao2p2!GKRRHW|EoVG*T{-+SUiU>PM!~RdqNr|CK^PNb5ZC9By6b> ziU&v-FK&hxM_naEmpg3x!Bu9VshMMISg}I*ra!+B!3xtdUNAS z>hef*kRPN`bo%D&@(^G+{rZ)c3Iw|Lb61KTpKIw%*bC&NHL=U##_!W8A5_nGr`QG% zxlz`b|)Uj|h%-gkuFZ!Mu2tvZ{Rf{6OA!j)q`VK!GLn z&9UUe+kfh0_8^QqI_SL-odaRr7@7AIz3>R%9Zcce269)~q|Q!Gg>$RIai#Z`SOrSK z?1bHMs@+Q15(Gk4UxD~HAtCU?Wn0;$%6CvK2q6+!C{H!OmT*g&dDh^WJ>B#Hb0El> zwaL{&eVOsK9FF7dq9LMa%3*AogJogB(_UaK8~vB9`8_RWqopTQ5MfH>HOJT*L0fFjG5++Dn#1XP|An*PA3iNpfU$6W;ew%}#+AxZ*H0v3`XSj0 zt}nTHD?kPaB8B42d7lkLicC(|{R`>0F9plq3wS%USijU}F;VWe=d~}Mf(~XMDhXtt z@ZG%Ib@Y9*F7ZYI6z>ZW31!Cb{DsikW&=auInq`(y@QDVbL3)m3iOKnVLAPckZ zOt=roNIF=+ppyGXkqPfzJiTS1|4}tL1_TFihY14|^m|X5WNukTxTQbnw(bR;tewam zP6mSD4SI=}E?&IY!+Ko?+4&?F^cjuG_h1=!f0?`LFuxi#8B(uO2P_;0f-_J6Z3anMe8?X(4!t8u zVHn9jZ^YBqll*vD;y6;0gVLxHP1?_ z5x^&4E@Pj>01$+7Bux9=rUDq8?b^HEv9GP*9lB{Es?1ZeNWN(TPA|pFw zk8?=K-jtQqkP+GINZC>L9wFJXx8HrVzTdBY&mTSKc|FhTxj*-PU-vcN*ZWE&am1s^ zNqYQQ)DX=#^avt;NFJ`W@V>}plMfdnd*1>}-2wi)HzHuWnzc|C;NxGFO#XtYVgg1~ z9wgPIwLa*4wXt2_S(O?R#FRMCgb#s$eeuC-9F?VK&wjnYb&Q(xYA5uVeuNaJ>dVpQ z>ESBYD9f>Ji>0vlLoUdhJYd(B%m)+**M2rz2zqd9c(O4w!#OzQi@1&{h}6hol)E3N3%a@Sv6=cux$)QQPN`AG5Vjjd%8;fiw1Ev&y6ZgTyWsvj*ATJF-LsiHY zO3NXV)@vwh=^K0i+zL>>!~sLNEGwt4Vzk!?RQgIZ@Goaq*^F_&x`e&#;X3s32JTDc zV$nPVD7K4VYnRU|lB7{a#aD;+y%?DqMt6An`r?;PqQr$v4(naUJHzrru}GQtIQWu^ zWs}LLZ#hHn70H^KnSt8=0^EV`kAoUx1 zU_(ItscITw`!ua)tREv0EH{aGpD!@}{_xWCQX6oH@zpdAknkh$}7<4%uU& zsJeX<1aWBYacGAg!+6(r0+y>kz+0H2KSfJo$U|8t{b>a=S;+7+sNcK?7*rSy%^Qx$$Q`aS@vrHpOE};+vfL@kX=h zp%x{F99^UlE6+qUVJ<`LW&&L@_XvNQ%t8>HR}r<>yJ5gYDI3$H$&?XX052BZz<6W3 zA(YnV-DM%gGZz8*iM+f#9v)rTY$=i>L7u7R zvHu27p}r6-+3Wpfr(YcR?vJpm_Zsy(-OA@GnO=H*r5oOHy}6~8S-mtT?6ysyam^Js z@*;ya$k|+V!UL@}J=UT>Uz1gd$uAP2;lN(x=haV}F`PCoRQo`Nzbx7w4m)MQvUDZ{$1ne_fLBBr`nBnsZ z|G~&ts#WXpcq~|1PurNt4=1zg7Z^p16X>gsU5DjAplxNWfnb9gvHJ_(anK;C+uwVx zPud6^54r%7t{;R41FeNF>WFfED zs$EAcmGF)_i-G5%eA{8yScj>w?qiT!P8=^qZG`knMCr0$CkIPZV;g=4}2#^SQ=0YYM8iP8`Zg{TR^ zWkJD-WS7wg!U#kR3n>-^H0;|_mnjk0JtzPzKwzBSdz;4Tuyy9o8y}uk?t+5OQq+`p zJAuW)8RLgvmpT9T6odCjkC(Iq#UZzq{r318gVon8*u)l;Qy`E%^0&Yals2jdxN z228Sp7xUml#5@)}gaDawDM0rl-PXQ6910@@e9OSxrh&$Km`lPdaZfODA=l72xy%Tt zHkr#pN87mJ4W|KbJBD~;-v3(YbqAo=$62v{tQi1E2Q8eLn6o7!P^84qrM{`D|C$q2H3uTl2&hd$unIXRX z*PXC~KIGpde~~I_jnms3ot805^Z5=FN3OC)XiQ0hxK)^^f{BE_yB*ylP9$jks|kD` zGb+dX%5yXNuv@QNMsBL}B;V^m~jJTT!;Ov;gd@Wqxz{G~-5D zS&=_hG^Z(6CMA0 z3XnlE!iAO@;gD+?85x4u*MSd3Nb@_y0{1Nm+qUhvwobi&{^?-Hsp_ToaoO4tWSzO+ zke}pg8&PLpZBN&qjWZ>pD0n{cGcUE02(HRgrUTpK8e-lT0=lf9w|hCC<+W@^uaVsB+rxVVRVyXxW_OHU*E5kRaJ}jeiD}O8#AI7od1xYr`m(L4lWvb ziX#>M+;eQmb}F2_^M--g)SVBg1PE3#e#W2@82^XlhnvL~#(6x*9?md<9JeS~hQ?jg zgA!rp+4b>IQl0|@1^~IVbe>rcEMiLuldSz}ygPQdDfEhnp_h1jcSffKT8G5KzEt}x zWYeQ(uFaeZMJ}wB7(RR>xZw4nK;ju>iV)mp$#Y7nmKxjdX2)RjVt6k5O2#pIgU_VF z!ku?A=r4aHxCckPq)~ra+)#AUUhV98J04~p59)l?%4Qmq8TR9t1W}5CX)JaZm|$9x z(Nu!AlPXvPQP{s6I8l{*-#eRn`}i1yxb;D^kh zl6qqX+J#`Vg#hcG8|=CHX5*-`*rW<>9=ozNL;tzr1$2IFH2f*$>QxO-y5-B`YkAbc zm`uxfgyF;}KHv9GqE1(LRzl|?g{bF5Mh+uSJ${u*z+*mSu!k(?mQnMO)OugJuB*nl z>Jo3PA)V$!6rS<(zpBR+8wxZV!#MFRMh2KvdKX73weM?kXCPZ4x~Y=$So{J4ZvjQb zyxw(5AwfY*BHF^%HtvF~u`weEg+VtRyyv*Za|TBmp?c#64)EYJ6&~w^Gee+YV8yFS zG+i9LpdFU##c6*T^!>HS74*WF?eK_sN88mXt4l(jXoyfBHXzFbMM&IrmVBWIi z$8Q(LL4ID)iY--ysV3rw6xJ32hJI3mT2CxTJg;h1;XJ^<4JLfJDm#~qd-^mZ14A7d zZ(6Zo9_aI@p)5F!*WK5rXlH#;P6PO5mLSkKeh~-at)i~E#-7flh`o5xANw)#O#|CC zb9s9o+CS|2;+!XKuk_P;PE=9&3&X!4E|7E#@cnHPc~OWy5WzDTN}erVo?-~g`l7^p zM04zB9$?_CM~)xpu3}J_xCQ;)xe&%&Z{Y2IlL$3%K-eI|vV}l#MGC-i%v{n1G%Bna;6zabxRaFCrQAoKP_$ z(9U2ISt0np<>UP=S{%BTof^K(P1wbDw5gwnil)E$_2W$HhXu)l1M5#;!s6o3e7as= zLRGfunZF)|`@{DS!7*IK%?sqSE3z&?QJDA3Qln8tM$&oiO z5xR~4lUtY;0HP7H(RF!c+~Dl{&XnUeLhotb-Lq=O66A_J^aNS0PD&mOD**SY)3~@f zi*fTHoTSH*ZYFua_ZqZ&4Su))2edOx)uE#oR=4s-%|49ntzk>#wAGDgR?(}j;J{Z? z;#Va7smdz!PN9Dn+vcnb5FJuFRT;zseDT|&Q5+3yYc7wGyoyFIIarhZesM{I=i82# zbO^(tdex-wGTxYrZ9lpF;zvtM0B?j8q&?7q`lo-M25f*>VCx+ZLqsO_Voy%|9O{PG zHX!6V&OYD98Xb7e#j@l)3P=vtKemAkDKiTT)e^V44Sq=)(gO-Y@Lw*v3p>9HW+o-0 zA5u~*q4^`d*%!Ep@ESe=>u^}x8OhUA7Cq#PFA5r{>7J$l|VA^^? z4@k=yZ+*)KwAxKr-rr5Ik_+#AIoFo3F8XGF&)i&gbkc#jedy7d@Wu=mlIXkkWj~!I zJdCqs{Ip+Q7Emv#9rdzs7f8#>iWVxCWFga$3;q{-TO?X%nm#I9jX06=qH%H9PX$KN z6&Dw~&gYiW5W`mbI_ZeIzp6eKf)3X9BfICj)HZL5etrc#YfldEldQ981;JC>`-!)+B5T zJ>~8Xy|2}XjAOe#Jbn#l!S@Y(ahcM>x<|T0alM%NY)52oW@CDKwl8}`{8<|2_{3r} zvyQeJ&zaQbh3d@;&y=@H=8{Dzi#c{t!}D|pZ}bzJn!57i0<^i}=ersok8)`f-QC?o<|h++L}IT?lwIcY z({b}+wDD-pxq=qidtKSe4M(Bd!&MRRalIUO129pz?Wy4W_v`hC%AZ#5-WERp2wfv| zMRDVspTozE_*M?oD)ioX7{su0j?c~J(?bU`Q3B|5oBV9n)E%vBQ3~v89bz!VDJ6z zQEFsR&#bF}>*&&tA3s#|9QxO|xVhs%3CTthKm?FNah-{RsRQK%@0_7>9o?fxzkELwR6woQY!qhu|5D0+UAK zolkb&OrkDJH1@+|!LKx5C;-w7HoGUg0uyCLINd(j)IzqGRi3KnY`FU(dA?RxS05Zz zzEL2Bx~uuZBZz0-koI%tU#gYqwDaCw$qypSR#5ivYg44NQQlf7yNT7+;Tb~{XsL6a z=-27P3WCsS4%iRaTDuF23%|8DJ3uOf2AE}(urdDuS{f;hMb0uPtN{Kl>)-kyT5!Po z7T1#~?C1Ws%5C}!q?{KAvlh`V(ON7-D+u%}$a7{q@I-EzwIwis#xj*VCDUuRmX=Wf z>TFXx{JD$l3;Z4myKvq=wK;^tboH9u*mHxgEE{`P_vTkQx6^s=b@dxqtR{v>VZRL} z&0snfU+~9Uvi_}^0Opx0GPk^~@#h>L2u1NfTqFw#J{Ir1Q$dgk0*C5Fs2oMX9XWjg zhOR|(4iT6^6KEmh1W`{UK)JT2hY#yKclCs zWY}XSUSauhHAC}ZW<3L)i_tF?FA!U|0g0$Sjf|8!F}DQTfskvW9?rqN+PL+U{rw z&(w4{lfkGW*YDikTVUen42r^@*psQnz1hGF=7u^=D|MUKeEat8yJ`d}>7OTNe9?X- zSuo0_L4RPSW>6X4)EMYEeK=Fv@_v2NuPWl(Pbmhx3DT&hvQ8@^9H(0LslHJ@qA@uI)5#u#4(P?w^JY`>C?l8V6^2(^g}Y;P zQHGw3yXFe8@1YiVv(S~R0l11&qHEgC?Hx5tVjPny_dVyXh{PvD-#iMlG_1%p8N0XL zHWNKg^;;}gqR?2g-i2x^scU5mgr)-~d2t(}DG^jAvi=T&eHe9s2RAAQ7atehdG`&D zlb*jus&FrcgJqjV0>0D+3X2xsi=)6oONZolSefiIk4|9CpiW9p!L&l+7B#^n`HPM;1 zI>WU#9a9#;J;V4@^If6@GUA|dHKZ81l+7Ikc^ei{)Sx}2`QpV3E4ELYMY@cw7hdKL zY3Cbod2B9x*%WbKw@BnSRq%O_Ea5u?E09C5;8_p8Yc8{XeBoBLb`LRx8SfE2^rkO@ zMbF8}3A&&ZOP2pbtzQh(&MyRO1i|h;-5+{xJM7l zR|h5^i2e%AFAa^>1={P)My3!oUIe?5AH3`yLAKDdZifNyD-}{C&YWAI%c}+8B}1Yl z(ksbi9HWsJqrY@^k~#Sz70ePrimB32XWNWdd6m)a^ED?`r-umVV#!N=9S0N77q3ZP zt@_Gia7og=@Ycl_Y|V12=gV9ovq6^by|J$DQx66mD$Jug)}p`va0OC|)$wRhl#oRo zE&4WWU4fI6GpnjfUzBL5!!n4m7v67ZtD3+o1&V(vpSA*Te0WBiQ+OVz%@{B}hZ_6k zRBSL$O#&3lh7Kn$FbKLzq-+b?FR>_48Ee=atDQ zb1PoGk_ULHcbuq2o)!EO&5 z2^4~#aCjj$3L8a{;i7UNRPDT|J4sz^8SJvrS_jY}-<6{x_1RYDa`K;4lv7yyo~qQ7 zp242eDU&7GFV?Tpho5cYn8Fjj@>*84s;o>06sW=^E-#G5(43F+_xs;Vg^wEnDWjng z%a*7@HYOjwMoSkeQqqmVc@i; zx%Y!)mdj?3vNdH`-T5B0%lWmO`H*}`v$eP7sVG}3{)6`0I073yh-R##D@jtnosU0m z$QEG)`}pB`4_kj`0$1*-IPU(bgN)N{9Ubu%6;~A-vjwbhx&ev5M^?Uhv%Q>`jM$WjC(CI2y z6+|Nmecijsl#WWJV!7rQ7AO>mY5W^gPaX&XI0SuCnR5@`A)7-DoGFBLLu$Psb)I+M zjGE5{o6v1i{`Sod9{MYrwep~qV}{3Ms_nGWYw*(<=c1chui z#}-spfc3+5N)q3XnI3%7H2t<}6JYMtpj5g53ubx28H)4|sXf(mJfO?wH z))Ok>6xJT_z>I1Wc$o;HU%x_@aTP<=2_x?G``-^81Y8Ro?Ckc>k(Ke?BO_dxlnmzG zaXA5F!4Fu=Y_~7mFMpw45X53R{o2!C0ftFKFH=(2o&{53ShKRS2B|cB5sFhmr*X$_ zGr5_!#hzKt8G-tGh6rmf`FQ7CQj(NtBD_z>h1E6n91udi%F7GueRm%Bx8eAsfnkBK zfq77$HY>2e_={Zkw?a&GGvcDK+rxLlXho%2JQgr=&iCJj!JOdb)<;5~Dx<2kN4ioEG(_tY|p`3Lb|PIL6lp%6AUHmjj8=v)YH=3etP5wG^iLY^3wuyWnrW!GzFA*Nd> zwQWW(WnNJ^P&)1WlIHVMCnJocTkA+;Ri3WX85~S1NUOb;-jY^bbAH z$;mnLS$^1civ_PY-%wTAfi_03W2>4ayuDodN$&#DeSgC;r&s%Kt)3z*E2p&?Ha^{$ zAWVp(v9Pd+sH&=3!Og@y@u>)OHt=ftFY_vKiUFy7zqU5m6GOA2`$5tW5Z!z1&40EV ztr}pzix`2Y@R`UQo%!0~F~P$|@KQQb^6*Ev6dNAQhfT&~JV{v*1~_))Zr>h22fO(L zRdkM5`J6E>hipJ%^B>>p_ftSV2M6?6?XqJW8nB{bA6~8cDU!*>>}J%YSPI0_k9r;? zpVo`_e*Xxc-)q+ic+w8Ty44bQW8noDRncb5?x-K&4+(A@a+_&`D zA__2Vz-V<$LR8LePyGGd|4R5u6j<8eNl8HORUomi^7FSUzXBjT2M5Owj}-Dz7+&_& zDM8EXA1U9Lmf}Ed;lfdiC&m{#++yY9`$CN%zLPa5a&2VgU}V{w(#iJ}J_GTY3bVoN z+KVd6%CWt@w-jI&Vj`hPa;TCE`K#RASbzj!yA&kyGhF*Gm3p_#qTMr1+V1y6_6Xpr&L?iZ>rct)$^LqOPQ5`ae^ zh?}BZ!k|qQ17ATuL$OroqStF578gZa6h|M&82_z`S9~!?A6SUnCzp=I^`rX`<>BtW zYo(GigCNU;%iOQZ^fl1%wGPQt zD3<1B%3^_oEhHI~o8YSAk0f+UEN50hfm)@lLiGJ0i~l<2W1-{5`0S=wrXm42=eYIR z72_`bXnku`2f+}zjIAXBre^NQ52fx3#K1AU1*sc-l)`)0S8Yt<>N4%kix^^}ZemC_ ziJ>ogsyUj2>h6L!h_QLF+U1Bh++B27?-MxuL&s}sqZ_z+Ay80LRL zd2yQ;a48?s(tn+ttbcY&yHW-TaC8JZVw>b~@FOcLvq&#R~&PvX!vnk!pqW3779jm2jPTOu#;WhWg5RHuINsTKItr z3k!#|(8rriq)-V;-s)FPVFIboeo#W3&wgKA1O&ho>hcH_9T*{42sktl%EAFBPrd9G z*m&RL-XTJ;@u45|wj0?($d^*$Npy`)5Ra{;*uMt9JAWv@{M!XvgLxy8WRl+AK8$ey zJQE<%w?q6R1bGYjh@Xq}49xx!0x-CDcXwONye9kH+1dGS@gJgwSEdeD7Lth-ln0=l zsi~>$ST()YoU4;!RSZTIS?_jl-UddTnVH!Y`|(X(?v+uGsVUP)=a2eqz8v<`V#K zr>CjXb7XOGF+Jz{NMGNK15VrwIq!i#d+o)wpdU1ONCj!#O6Oebv6_*WoVK<;?G)C9 z{>utPUSwhcoS2CLiGo&fCDcHbfwzR{#_{D4K{!}+*ViL|Yw$nkZ6pBHiS;>Hwn-?s z?_S}G=ahG-2*xh#+)PyUHRr?uG{+1e$yf;B%Ne$S5QYOFPvf??T{AXqEh!Xy(SX*$ zv?YckMbJ@mf3f6TmrW}*H8qQ<=#Xufxdfi;Eg;`_qOObYox3}6Vr}~vu%ztWl@}f$ zJ7BMRss6USwst%yBwAFaO(Ej~Pv0eYNlgY!Y@E2qriF)wQ0>k4=uG+OcQzxR5iFY4 z!)0Gww{VKh$e_b8H;??fb?xRDc!3K|Vsa@|N#cl;)EWCn@1AeZ3EhqK0h`_*vTvw^ zuo_ZmxTEW^t|YmG{sX|jW1+W}gOQd?u>MMye87-@13;XuV|aN1+@k#3Q>&>rMeT(~ zPJ0hY6?~EWiF_uHOWuVX+-eO;VmH#ANqQEh62k51{0ynEq|Rb-_3Bj!cj#0KQY=aO z#7&0-`!W6&>2bdt(7$5Ew|<&HiiJtaH)Ayhd@d%$a}GYhCueN}15;TzIGWG0&iQx< zS@#?1he>fgg<@$x8jTF*9cj>yJ&*C}rYL?|W1V>1u5v)M zNpOa&6hVY$MSA2}4I(;F;W7pE+))t`D?1;Vc-|?ZEzKBJ$d(%EbU-Vd*7{;x!qLV*#6AT>G4?Hm2JRuxL)fjnv)|sJ< zNgc_MPVl=Cu0&;epLL-kW|8#T-!bQkuHma8#{HUd_Dmc_$z~mrAsT()78b) zLvpK`DO`-v@Int;w5k~V=<@D)dDZ>*5mp{s^f8O{-WT(|3REwQ1xMR?PhRHO_YRus zNThb)S1y=7=NbJJOmTU66gE;A>}z*NOjHy74k`se*WEomB94)57q9{d)Ev;yQ6_^Y zKb{ctsz^_>lH!9OjL$8PB$O42wQk-75TT$0$`3^1Olc)khQC$mH!!~-_*j>A7TDbM zMSmT#i)EXiV>#k{pkc4;n-pokP+)UFJG{G9t+CbTBR9F1F%_q08=L-2F_iqYmYYB5 z8B)iH={veHn)f4ub$uiqcsg7Nd`QlXSrRNOkQv=lr~7ch&i!O`<${sIJlDdf8Bow> z!82!;Sc@yDaL_Eg<;u%~3nqy}Ov!T5E z8hKg{I)zss%_`Fr3o8%{^M6Qr>Qo>!%%+q82kpNay6d&m<9dJxuE=jp3W2+yy1W=2 zrG3P3Kbj}{wW0VGBNx=tk}aTB!oMnI1XNjrDTUM8q!=Xf9B-~)yK=LftnVSI@FgP+ zOUvQ12iI1Km(QPc!;M_QJwJAtcu9y+Wl}Idook>qEG9nQDjzN-jmc}`zocII6>;Gv z_3SOz@`S2NvN$$|M^$zlYM@IdlJ*-9{Z=!=T&8(~%$vIOj^K8+2PHtssf*TTG-_4C z^*w)lWX7WjeLogP%6m}y*ruJ9T!VE^4fxx_m+h6PPxy=0EQoYu?)$oXZzzEG1hUOr zrn{V~8^Klz#QmaiZMLo`T2be~iXUa*yBN-BMIqL@=I_l8m`pwSkr9v-{sA>)=%(#` zG#kXvXZ4!Kv5imW`b#K^O2AAGm^r<8OOJ2C~&Gh2u-d@&U+J4q#{i5eiME;j~5;0Ws@wy4HR#U!?N z{$*H!Si#keTq(mU5JrIast^p(uI0R4V1xpE%l*r4D#F*1HfhPEY-ab9G06)6KISTP zTQhSr+nD4RysIkRdItJ*$Hl-^R^kvO&AmtcuRnbSMa_1-P}I=qjBunupl2c3N1JL( z4YQGEhtA|WT}wY@2k9gDZW;e=W=8SOon))NWq<#~!`IO| z9up(u)q_D=WyIcs;FK3r_p5TvIB@s;rdb_>?PwONqyE`s|=&*ZK15 zmy6F4;%f*r7i5mL*^Of?<_skbk2y09b(d+bdy#hBMo7%ik`DqnJgfle#FtFs#{>iM z{yjlkT^UZskV$7Cjb-BR-;kV;#&j;=Sibv6#bXH~R_V~Itk|;(ircI=_4GIaWVt3@ zjA@(z<@0Ro&d$4H1~V4i(@X}#`z2ap`<#gn7l-R!xP~HgaGX*FJ%i1uol>D4c#l|r zyE>c3hkBrXal*nvVZw}vbTn{!?Vll+T0KfKTx>r)d-@bWD3sA-=+7*Hmdxs&6bTA6SbRvx!s$|Q$wXsot_f?9n-)#Ix5UzI* zz1IxR-TsnWm}6UR<4nFXVOY3v$8CLp>tG|ptI<^kO$fPj=gxr;-$Sb`C`u)84UdRN zO<(_#prBxVs@R-`o*wnlY1-+!lgTP1QU%;^GHiyH(?~-!K^kkuzi++O7^n=MlD2-O z@r5qc*viJmNO{9Bp-Q+y?$Xd<4UMQx%JAE56nkhBFJ8YaqBJA{T0UgkPaqq;ZRIr8qcFJ!!6PDg%9##z#a2NlR!ZVjMI+%pp;Pw0{l zpzMXxX%?{kfFnl{DEvSRm-nDhCzeL-VcIKI>Yf!|ofxQAVOyb_+v@z9N=Rp5-5E#Y zdPDzYU0;EWVh^a2BFtX1OPtS~1@M%Y*+0jSy^G2LR!U5h@zB3S3A`(?Eo^buX-2GQ zo(0MMmFu?bqRiib2en<;&wyq6lmTChtHw~mdx6ctq%$ERFMR!m_WsO^!YLu@m!!1! zEp|(DPl2)k^##^%yjanX42v^=Cdr)p=MuP2>=1550z_tk6xWQD0aaF3h7>P4utU%d zC0zzthWL*q_*CRU5$MjLbIokjKKF;DQ9j;>%MN?Hw}8Vz;ZyPOF<2`Jqs|Pmii5Re zWFBMc4oS?FayU~H%qLe%0|jUVFjQ=qZ0po_+LX%I%_(tltaBot{dj`0XYJd$AZunF zF>gv!qB$GFOQxf=(h(Eab69wII^xz$H1MRaqu0l{6kyU!7eXeaQJ~fjcs~17lJ8{x zXYu84nhgR19auy_vxNvnXr{{FG{t*|}p{m_H%4 z$4aoDD(Z#yvABkKl3bcF0TmuaBW;&+f8w6ZX{ffaGplpp5B>b}||6j@k3VaxZn*L?yr$B+b zfn_PxyK!QFdHhN)^@aYDnfFTcP zYPPmpsvjTFgiNYjmAaDov6TwHanN!kXLKR9PxD1<2fbI@tufxaBcDj=W<{OOTG>)wAPdit*cJ+1c$B`>XzR#>P35Mu|<9h9ry z_l+IiuTz@-l<;%WBK+)-x97G?m(R-YJ6cW0;cpJ!9DgEN#d);5G8q*gPie*2Ad8NJ z0A^t@mns_%qJAVSn1m z=3-ijmMV~sD=&_Na&AhsjYM;$8tJP%M^9{S4H`P9dF8J!9HOr{8Z+YcE2ou~mL83X zY!J>NNEBeu&WWUFP{4S?;qUK1$fABx3Fw%U=MHDaJRIXm#uuQJIUXYy1gKHu6%~V> z-(%nxU(nc)B347@B8kHyeLnAZT(>;-IG0;ykM?Bwj~Yvdj3tyHSd|#KxV(x=n6vwW zYbG%5^*HPz<1455y-qA=?+^BxhsPAW!Uh~sNh%N8zK_j9ddJzr%?jk;O(TP)dpqI5 zs!_lr2;^w=sVP;hS@YSH>ofFV2GzT4$Gi5>B)E~TFQAmT1=w$s3YfnIB0fWaEs%qq zc^XIf=Y9TN+XU<|GWPt5C zhY&axDj~e;K+%G(pth!_200@_bsOt%5IxZRSrkG{rY1xkt-8Zj&a;z{5DQlWBePUR zR4+U&xhfa-iV^04)kU4fe7Qm5RvRcccS$Gy^mI<(H`iJ#$iw&clJjb&A-aOc{u? zhpblVq@Ug9FJR5F@8HQgJ`|a8P?+BYUXV=B^z4$jgJR!{&W-revPklqZl@iCf#Dh^ zlkX)BL4pp^DD1kIo##$2ga;NteI71rVE}rmUr4QnI;n!n%*;%|(Xl9xYKC;>4Wd3e zX9~BkBl-o87@uV`6gXBh8@ydR(s z2|VZcw^H@{7Y{}*{n8ZE-nX~z993n;{;6=z*J?_n_Ud;ObO(QBtY{kGh=nFaTh^Z`)vn*GDTFDR+k-3J30PK@D zAR5)UbH}DG(CHt_5pD$hcw`Wn4Gjn)kY4Z2-36pSC4ez}LDT{dQjirKjvY3VF*8Ri zNo}f6NI33}e}|%c_6|b}$4mItg)X`Qq}K){TS!Aby=XLrj)~J~cR=-E519U@2H(;D zEinPlz7EI~(YcA@d5p)PDyLkUoHd2?GHy^x;!3lXknmhqozmWzpRRmC$EI1cdqafh zht7`f{>!_Ro9@eOorg;jw?__w7Peg<4R7;|#ecmniWdRry+nyiTA+pWVd9$36nKW0 zP?HZ1F&fEt=EMVD!6Zgg2ug)071NR8Uu}1-n)mlTU*+T!HD)H_K)6Z2&(21EqE3y= zeRieE4rB^^JeIK&9Td_rjeI`-^qz%=27?=sNpZq5W(FeH`STK3L_|bl&fClH-+n9r zRq}Dmy;Ywl+8+3-&%D`OGQSpE=cI?-kEuLV$_DQEbJ+vOM^08YV9Xn!+nIsG2g2n? zJVx$o*AFajd%bGQkFOVcfeT<1FOOK4%O;84);MC%xvmE1u2Hv6Q-uyNfYd*;^vQ6qu!fUjIJ)1Tl4M`cV|0Js)FP34OU z(#E_P0G^JW8K+E-EUUGPJ2vL|RR_a7(=V)!F0c5OMYHTR;P9Kg@0;#iZi{mylRYEW z!3lj&!8=k>ot}OTH2l2B;0p?5jDDf07DGCz{+-4k1r9v?>jxs371o{Hv0runjmvk0 zBv^3uDK*Yb@uM2)ee}U_wtzR5AD^7P9_BWOK0Jytr#mbk_8Q`u@*kBb%A!}e&{Xk5 z;OD%ndn1IMY1* zI=687Wj(`z+dgx--J0%gCx&JS!_I>*FszZ8^J6%mI4i$GLkRLJAFd_y{X2(op?9NfeE`SZ*WK@gnyRM0)Thp{L!=14lhJ#PsQaSX>KrcvG(gl>FRD-r z-qR)Nuvuga@dAWMvMVnEto8rB0kB3r&Dv_^lPdl?$=##%e*xBWSnpi>C zjRO03vL|y~))>>M45_=!h>^D`YcFw42k^}Tkap4)r*!mT-&Dr`(;?U^(EKcc5< zK3SQ5@lRcb^S=9{E(=-T@K=m)NiA)Q*JPKpr#SEPmB+wM~zz|3LjBD*D~u!N3ZK( z^dZrqRY2KcdD`%d<(<95{fMa~ZN`ob;=z>hDX{V4b3Q&@B%1{f(;J}ZIezD8Ek!$aD_Nux1DpMY%?7tf_w`Jjx` zbVBYMk?%$yChEJ-tYPa#v-5uT(~bjR#l6;0-PWer@9e+QERu6gD?WwM-7DNQH%>~( z0!BjaFxH7NUIY?y&sEvyiyY&z{zTbBs}kAAXC8YCSK_bZPLR9VxTWmp(|g0SVzpa^ zWS)cw??85zlAJ^s>{E1w8K!i@2@{sSYiemR19DorKI5HR_z5xV8YVw#iWHK({~;<( z7?6yJo_k3o5S3nBlV3TF#j>A9=4S=|%h{ECKvJ*lOq9nrSLb^R!#vu0z^f$ZH?d}~ z#3x#KqKzl5nf4Q_`0n~@t1a-ji7-JtQkwka>7%8};g6~L`&V32aZ^xgH7XATR*Y?#m2y#G7FuUK|W3e24c z-U=FcPoRI&ZCCf#L)|3re`^vj>~nN*o%P$OG^(Qod${WU_|wT6)-^gG1IuqIS677d zVfKNDpMV4l^VVA{z!pPcyvk&sj&YC`r^FxpEA5Qt21n7)Le|EV4gnK};{dk3*5p?m z65wXG_0j8n_hM+aheK*%1sTX0GMwGfDA^f(#v4}7#6-BJxToD$S|Tx)P%gyjo~sSt z{b*+OV#ga8^t(CTiMl69|88Q^d53()kI#V@V1)JcIk%FJvP?6URn;sGN)7-x)s1|f z?XLipG^#C0)8uZJSkb54nOj(h0rP~cPG?a95MLm&j|_*nrbuSBJ%N*YR8kl%FuV6` zN|j`~1(1%eLa=7=6XM`TR<1K2ufz!eEatS=*;aPGEC0~%Pr+3_FMkkQkFm6pM%lHj z86nrVd)JYO%h0Mg_JX6(fT<{aq1NEkP1=v5zq?X;)f>0Tv1SO=e9`4^i{{%Nrat#t zW0mj5=;jkAH%3K1{u!RKO$looWP3@eSa1RXqm6R&%0lcs+Hqbps4Q!muUWBPOX6hiChHv z&|FYfg*)AN*aZkDuLIP=yW|zCW6VrJDCQX5h2opGTj^s)GghI@OVkdn*;mT9R(S?k zxs{+Odz`_cAr^7*J40vWqjVWng)q^{yLXq>J5Y4YL8m(>Xijd|$PR3GftKFP9P><` zU%z$hmV%pGc?mYSM@(#6Lbejm&I@L{CIWm>?l;Si_kiPlEm9(h(P-Ds1<*H9P*4b) z-fk0k0<2UZTY>@5is;v-izl@pRx;S+RfhJ;Yltm>Hld1u#{=uE zeiczs*w+t)eUT$L|1CsxcRb(*+?VWPdMmjJW*ZJynsEadeSSZb!o5o5PWf%*JxL*1 zAqOkX)FS{df5(CgT-9GWQAQyu_mjXP@FZS>&mf)7a;(Hl@Vq_R=d%8Zsj^MXd5w3Z z1O(G>gGrf~U}69JY$CY2pY84$M(;WBM1wNZl>$k8o*g4K^D1fhC9Fd(lHiVLkQ<;> z1hes+7k_%|$BFoV9K#;w(T?@=3J?PK43D_$gW^U)YAVmehY#x>s#rl`1^$@DGxIf< zT3aIYN~;7u`+O*OI9k&)y}SDZI~><@<8M2fP}xXAj_E-d+r0*z67us(_4D5e3q;k{ zKP8J9|5B6jYU=6?rlzLH)w+|RLP04wj?FNVq9Qr;RD+Nx0Zqe|>Q`xV7dW*y+CX+c ziEX@G8f)jfS`eyrs#7$Tp6pF@VO&dVZ8Fabs*xd*2xfZ=AyoHreq?TIegyf~$3*p5 zP-8};RVq2<5Pl^-{BWDoAZ=iCtxo#nS)g8G45Ogs=_gsp=J+XQ0-CVTzoT0FpIvOI z<3v2V1|l?UUmwwofwwp~ItBnEg$g%(9aGd^d)=9aM&FXvpuK92&h2X?eUnJgVG+(| z^^Z5h2O0hjTZ58kIv)H+kR#~p4V%=VjGPF3-=^J)=Psa7%kbn4*2#mx10_S$c$LMX z8VdN=RFdAF=m8fO7mvffAK7~C8V;bKiPkW_L7h z@$2PP*>0-iV=uR?<@hiyeT|a(%?04&x+GtIKv_$#Yl-gb3bbq42)Bs{m|<;|8{--P!&f_KEA=Jb2u z2~nCCgFv#k?&iEX-wAb%M8*RVUU66#FL5Trf#421wcb`BhB{AOf~b(K7^EYgwEscz zpYy+Hz^cDju-|Bcg6!blG zYV|xGFmx-sDvEq~%H08(Q;64@jPj_!<@xzz=F3l`&ST6e=y1V+s7s!Pt0$%NEr`q{ zG11b}Vg+chyZ|9lL}2tkPSOPI!`}LPf@=Y!!tk@(Lq#u;qqY{IUJwo2#OpUicLhNX zCI&d~JhQs@W5h-A;rx&p7WEJ1L=TVhOBbtU+T}BQkyICQ7gzz_sqzN%?i!yLiR;GN zgLS6&fhU(ktb^qOYebkDBgjWX*+BeDarUhI!-vJx_&Vf}lym2~ z)#1P>=2b3T4KJsOxWm)aIwx6k@e>Jm?1resg@VJ#7c+`GpW+q1_7hH3I(AM!D#LnF z@BMZ;^pnQgy1Uz({kv|@OP7ldzfS8F9o@8TtK?@ic`Sbas(lrb8`W8gz;v?>uUS{* zNdGLll>b}C5k^RGE!e`Y6J|gFPRo?Ocp(ciXwu@i?gi7<7?8)fHEMzk2Mqp&ug!G0 zY`#z8_o!dRHEhv==g{Xyv_{E@nwqwCVL|51-YT5?>_wagMPd6=O;IRENq5iGZf-iO z|7Y{>U!64!Ai+q{$(GlTL4T4F{7HL#-_zsjBB^~U_@uYNvvngK%{Fe@%^f9&;a`|# zn7y=nCH4Z_7;mGSf@REp*J*7gpFAPp;N-FHyf1!1(}(+9@f zX_b6SS)JI~LYS6|wNfr}1c0)Ge;Ff$nDL3<5K@gzY%TTbm5HU&VRspJDAJF;QgW1V zQFi5XhYzFV;m2juG4tHN?tHK!Jv4#+g&0Vbcup~a?W?{o@37no1R zG4rV^PQ9gyTimrpKkN(9fBQR41Rlsd%sQ1ys87{&b@EtFVQpb* z5Ggu^WEiM!07|Kr8FU7IJM-~AR3B7b5JbNHfBG7Wd{+M0s?b$|_ye2OdPkmU55t2M zH9tQ7>bdY$r{>cA%ink*&+!Yo)GTaODq1c#7!2NRmH%Oq_m5}8jLBO*y682i0n);x zT|7a67zqxBC6$=cBvL@|bl6t9QV+G9m&82l2m9tRt+QZyEkO0?aTP4PXgFEsiS2Hp zu2d&c?aunOcjGaET z!_}J3Et1Xm*+SPPyOFebK!1{aOti;Ql4JwP0+sIGbx##MZ87)0XRIhS2e>=mUFMVb zfCjys1pN#IZ44J7VD4`8?IN=sTE!9gSc%QEu{9ORySpq8^z@zq{FR7Tc^f?zln9(0 zFXG9AXust*kyQfUS{`qafWZ1s%Tz;=v1=(o!jc$hz`=yowx;$y6;7!>UctFPI~vCn zQ%Uz~UGbaIWt*UZYUmiopLUNV!J7z=Q!cHEl2=tBk}ZT_M@`f^0%745hO%!sn2=+lActZ&RNPKBUbZ`&^zyhN? zJ2klY49Tnjx*o}i*_)z{WE61LqZOAuy~iJc+|t4`?WPPxvu2JM>hl)$Ie~OYLM!}r@^a2Ov}*RxNrp(uq0 ztq!pT1~BsaxTM&8`N|aqXXldt2m0~H0PN#^`1o;0#BsaeS;?3XU_s2D3PjdT12_>8 z=2H-Yx^Im?MwHM5Mja>ky>~cwraR(Wc4_Hp>DIU99mc*D7kaoI`TYxZ9t8TZ$y2Lj0BVn!iLC(s`oH$x zJD%$I{R8J@ri=(>oiZ!4$cz)IWJhKq6jJugJZagIJtBn6>^+hh8JXGHdt~q5{W4GS zd4GTZ{vMBCf1O9idENJY-PiTJ_VtROz_~N=<~XDtHEJP zU-244a$=5@)bWt%t5G!gaKa=5Ud$Zh{2lD`kE=RwbaSxs|xGUMRcaCd>dF^#2}>xB_gjIixzP>!y34CZWO_y z!#J3(_xiI(Fs7!araMQ(4LJTYG-U6_08VU|YR6!7g4T#tanM6#HO5k>Nrb~Rv)u0~ z4qoWvbroa>$xYmjHIVTv*PgB7*GhV7wIwr&l;ypfW*1o_zA8M=2#h$&Q!;s^kF>Qx z4)PX)z~CnKfOBOPD9{zt)BBuh=*@FCL$fOgtUp-r99zc3aSI=e&S0FG zNUQq}-gG}`V?7Xf14&ELvTi(Ii$+`PyB5}S5wFp+b>0q<8^-L@I4r=rCSQG}dcfD? zIIq6X&}AuF!t3bXr)b|&XtsiyOb|AVC0e%iGPxijm(ex&bS&`RVCUts?LoB+vykjQA2eF<*gVa z9&$KJ48=P5)l4j-p!3X+{U`2W*69@ktRry7VwyE~Cs?`;+v(~I?3uF=c(^()XfO7- zVuXVcLlfkM5b5hn-!D}sznc#Z6U^sFl*oV9GxV~qihL7VjAGH%{hr$!yK(B5l!g*J z{>^)iYHktVsf^fxrB3I9r9E&&L1C{lv=@r!hOGCOUd1&ULYlsv<+kY-19lv$|)%f+yw$28tjH3DEy5$J0Ig?_y` zR%sd_iNFj9qCulyM-5E;b&1Ck7uE!D5!}TGPAWFDT=@9wIh-59#q(EiiXMBuEPR_3 z;%C_OpXop}BrO01%8$JSY&h8$WWzu2Oe=V3+RlpGjtLc-GDA#q`A)ybVnND-p}M^5 z)GHeu=*eeL5|R)aA(%0N$N{rLlpoVTLErhslP}=)dKZ|Pn;wXGzXsh>hmAJJ7@(YH z%$5tXtg`5i$9f+FZL;l?k1z1{7A)~xx(O!-xA3ANHFYxY&-z$TrE93_05Z>kmY=}B z#j>-?+pZrLXf5VvYbli*LZ~wsF#ifvQ9yObVYkdcF=~NS<+=dKw1{lZ< zUexi5n_Tfw-!m`{vL65@YJC%cNzKwbe*+|CDXAnkZ=!)9D2Qk&dW{ny5Re}%!_S?O znR!Q87-n?+cz``tDbH=Cpd+3~4ej~c*QLInp&z!D+D;>N0l8)iFh#CINhAzXNg@P1 z9B3_={Uo(l!?ajqo#RA%uRjFPhohcOz}h6oC=s{BWEQU;L?9>siJ!tRy z1)gu*U+x?vq`KOE{wb)!>wx0%YlIxvF(~Q7?-0gngAmdO$GOKg>y84)q~huvL2KmC zh6aw_N8_O9^iyG>U~hi2*bc{_!EWJB zw55nl6>V)sdQfhfHbjVmQ9YakJlN0^L-%@$TP2nk^88|puJ1PFP|Tf?IkNZz4GHCq zPQZSDTEh(S11jScKRtFQD6x74^-@JRHb+1;711oljrMxLqvf40|%(4I)yLq=iqp^&5hn3U!=SLrw> z>&RlGwivC&i_-=QV$g%JUhAFfJq7B&uK38DC7;=8vp8dARDbDFOLsTxSRnRcB?ox^ zBP19uXIptxigBG`xxA~eU2K5bNWk~kA)kkY%pC|026`iv_f9D;-%FxS^D6Mdov8G5 z`MlU28?Zhz@~9{q8f+5~e1`FxF=)^+D5-c3TAkRMz^hfG5)x`Mj5QwuBjJn}3Y5R)*-OEg_X&to_~GBl_PI=XC6L6t6hBewvt+#C%Q8P(NK zo;(Svl!t9bS9LNPg~22ti;7n}NiK^5Y`h~Vo+J3c?*DU+sMfh zdBPblNP>=E6&EeWf_?RY>lzv^)hw>=<~g6eJ)N35-m%>0@=cBZcAA+y)_L zmX^1Ei27&u2IFw)fqraSN5^~H?dmIY!vv!SLp6b56a@b+MqPs1P{4#@FtQGLTBJRd z0GLxdP4QP%laONPj@UGhBDkdCa*x0_OyQZSlG4%yR_5yJ>cw*kp`b4WA*m8yItm7^ zVBvwB!AU0qARUkgjs_zIX~)6kHfo2^5sJwV7bKV;`Irrm zfcBZ_$dpLLE+GE`asqkoI4{ou6O7#Iz6%=cA#Q*@sfj9>R6%uGz6uFK%3`-~<5UN_ z9-*W#2+5&@#cz3@p(p74bjJf~u45v>b=qG_<-vFgp6SR^QcOoU6Xlru@WZa7*D7St z$s-#U+LT5?4ID8M66Irufu`3MNNEU-hyvjYP{eV+IR3mcUdz0=G1qkv>OVou2uu_h zB*%y-JYYX67wcEOj~-{ac+q=oY|Kk7=ZFwHq?Z8wpj6;pxF7HzJ0|X$6#E)pBYow# zJ0dTD5cYw1=DQAUqc%vizqmsRQb@y_avEO`D|=Ag2x4*7iq1z4II4AD z0LiN*yb~aQYCa|pO#)aWt2++%N($;P>zlPh^EW> znZWFbU_ON@QG{WsO4W__uor~-+7KGqH;_sXg#c~YJNPf>@cUE_?-dQH0iYV^rfX&r(O#vfnrE{*0?ccM`T=@oZ5hEZ{ZC0r zwM*VQ;P>!8aJU;G0q8)($soF9ivE1R1g0B_+Pr#yhj6b(RR6p(G>`Cz<#h+?V}K9uy>A#}sL?cLgR<(j(S zq&M0P0dHJzj}Vhxmr>{I9e9xQUK_`&VMXn24-!gc7*%;;iS`v0oS_ zhx>r)oW#eab6x$~&wiMd9T7JmUSVPj?YIY5afV8}#`zts&pA3u_bzrA`hyuFD-RZh zpy6!bjrT&m1}7z(siYNSTQ%g#P$|-7t?!tI8X-HfW`umYX0;! z#n>Ct!vA}>c5CAUH}CZ-bTF_IOkzyHq_*lshX$rN>UlA-J35g81#t|$k!9_mu}70# zd`l##Hq86j=1E?sBUU4X{LAy>;?q}8*Kd?myRbb2E%wm+Zb40so@;?Hsgm0n35824aaDD*qR`}eq#lW)K zNkn!yzs>Tpj52P3d1Kh(!_n?dr9%m!-pRga(9ALM-LbQ6xffZKY)9K+CiYb zi_WKYDCA@kP!j{;5r;U;=Y1_;P)$IV^~t+0!Vl6tJDg-1&Jc1$Y#gxnPKw08cJd-& z6)+VOAMcLjNV-vj2g1gX3vdhe<1&%a7tjDPKMCqn=1t+?6|?KFwZ{&ljH5U>qmdX~ z`KA#p_a_<{fc}1JYO0s-T2uSH>r;S)lGT{t`p!YOfgzp+V(*_FM^7$;rnOD=c~!pb zhdR4aay?bKo$%FRLlMaY8FZN1ufK#LKxt7_Obm92(zaT2k`6N9iuWdD;$>IzYz6CF z0_$kf*JW>;6r;llO1^M*mTRZuS1`3CRgaQZL-6Qccb>HG*J1BKAb#T$1?Vltbn*ue zkc0ANO)nJ^l!zLwIiOAqcBSH4e(uxTMmTmdc<>omv~$qA{Og9PBECPFw^iU1o#M;l zM_ouAbRmH9SuJH_qn7ZA;SK@k*JvaFLs9V8!4Z=_wJf}UZh2c$5V8g0!^6Pn`8K{^011=Yy zXmJKq2Dqb0S(&jL)jsVsU%(SmvdNDyA<#gzAE1Yr+G=J7f(e)>>iBCN?!_%|36Me( zfKtkg-)bX>j6xwRn|~@>q;O->kVe@73|$6wwUpLWBLrZL1Yq`Y5*#(a000ft#;3sa z^9?QL3rQf^COkVGmQ;ErQNZ4@0U~9^Hk3e)&V79O(sg#t8mo8N?@VT#issqY0@%VX zsBivh)gE|Dw2{2^=lk?(Sl#*-+dI6UjNd!sle~axc<3x);d}SfNq{}fG}uRR)>8&> zj32NPvk1oBREj33&W%+M@0!o+nMXw!_Er~N8w_{7x1Yz3}^l! zo_8X374&Kh6?TJ9S4tEM%}3gFiwf-iED2Rr7#3Z`-b%f9(|%qtXZAJ4qfN;kV)K6W zYZ8{e5d)6un%lP1zcAGXO?L>!d^vhU8kC+*H*}Pbc^|h;19{_ZJ~VQHd`p|DoLgWL zWoYV&AqZ9~?-nT8VUHz0cQ0hE4&?b_H{qoPU*;4S&j+_UAQ&z&H8zcA^SVQ!E zFuo_<{itht6sIB*AXCLxpuhZnF{doz8BOqq4@seH>iAIF42%pX0Tq5WzWZ8HAEFZ$_gSxO$@_0TpJTaq1kd3XK? zU}?|-zRm%e=ZFa zk6syknKq2U%%;&f8vc4~zeukNuIM5ZCoXbrn?-<9|J*4)G$i&MQZuLp42LACcdzmP2K))&XNE$^U}_dX z0mE4))&-NjN7IJx2v3>t4n-xe=|3KwrLBV5VPj1&@5D-4eE7eZm6*Lgv;YLWXn;Pj zUZd}JP@v%_NC=;-ORt;`vw5)jbK&mR`CD$GM%wZ1$F8x_2ribToR91q?8SDN#p8gy zDqQTHuubm`@Q@I4t|2$aEKCq1p&N7vy+qqccEgqt`Ss^U6>Pl7l1a9AV^eO&bG({; zCiy5KBm4rjCT9{55vL0y>^|O~ZCrcQ9M^2pySycRlip6%g4A8qAf8& z@ZQlsvcp)s%iR>`X>Z=?>Fzq#-n#8%e|6N#hx9`T9W>1M`=7z;Hxkx=g$tJ?`c1w5dmF+l%y06gsZvrqUi}1ZQJJY}SvVBdzAr;_3-O4B2 zyGeiV?#g?cfkru#ftO7=mrdqBhQeOhQnsau3yvt!uknSEj*j!o2WKkoVru0FT@Og} zh-*}MP?>;NQ4$1_?78n(36*B6)vJo`Z3;`RFS2R!1s4hB$!yZIN&2yGe^xq*zP>r zZllNP{L{}r`1K97Uq9=8@^|eSPpW@OKd@IaSZg2{T%_5Ir}C?zgnSJzfEGB;?ykUo z#e!LGJ^d`d`P?-`V04k(P0*~;GP9fI(Uw49-3t*t9uU0#MxkG$7wNb7V#w1sRNj@iN?`|UHAUr~&(4&O3cb{}S$sAj>0}p{O2jvO z>o1t8uO5nC=PyGqN%y#*LgGp<>h3k*uR+U8PAyz3CRJ-a9A?_nYNsweJ-+^~l{d_^ zOOGDTa^ZKIddi)jO&3VVydt3DTlefm$em=R6C4#cX?qa>S z!S8RKGAa0lBQO&gR@!4fm)G!cS?KDW+u>Nze9SWz4Y2`>3muv>;qgyg3QoZh;mSDc zL)=BqpTzy`pC*9!zq`sw@eXPY9tVf0CxKaZE;ua@PUluQ7YO!{bkU^NEvc*0#1zan zw$0Q8iFEhF_K&&$0cl18&RcPhBlCoHQQ$ z978>-(2DyvjiJS(+#SOa?aZ%`oHu$y3KdKB8P&R?iY|9-2+QWLm6XJ`6%?G4ll9-b zm~ecg9Ai>^WcsM<9mOtcp}ehelWWdiw(Yu!0>(VS%$WE{ekD}|1{N=$0V&hRFR6|W9Bn&)0_R#VJKNW zdxm76TKgZ6xfmFjV|Qg+o??L4aP+6w&ASmN1)ZsINVRhKoFaxNtvOvB3bT=E@}YZVJD?4s9~b91=ZB0B zci}U)bUqMIZ&mrEgK-j%pHG9~wRvF|-@5c{s7t<6+!q8a9Pk4)>3e(sv43`Sm^t{W z2^kS}F3J>SZCk_RKh8PK z9gsr&m&}1Dr^+B?;CN)$efaxs(end2lDeIDpm8`C#+ChODS@(GtC&aMUo&HOWyJ}GQ?zTkYfKtEgKSjsUj zqh=a%X(|g9+qp$meV2S7U^@#?Iu(PF+Eb{)Y!XSIzVAM+`CuY*}UGrCytT2-?~giBkBp!*RCH|wVwKwuj(FEZk>wVEJ_}K z>0Qa2H2Kcp+5GsYt;Inm`=|Ez%^F1AyR-0*kQvH&$ME8m@1I*irPyY5DnWNAiOz&A zT}$GF_9!Vn#@*d(zbJlm>`jHA^a=NtFsWp={^}mPOWkUGf)+&^!4eu*u@7A+lKB)N zNo7P?ZPe!^^&-KfwQ9rC!s<;ymY91~B+K(2Z~BWqDEayIUt!w)DZ=#Ba;>FKMGu?p zRX(-V=Jc}`3;oeaLYC?z-9n6s<&OaT9uj#EIyjRN-MQAryWpx>{LA+F{Q{BI#zJf4 zt!W|W_enN9iPS=0=DyZd5SLX8SitGylVlHXSRTlC)Lq4n3&a4t&LqCxBbMF;nT=)S zj|zgfnL77n*(`^lvLB~R@Appq zKE>$`COaHyET=Ctfm)ZW)CZraiqYu=swu zyjfA*F1zDC?H(~=V~URnt%?meJbMyiyFu$-nAWs;3qK)%QvJ-*N`QIm{Bw`a$s@St zC%7HBT}#RBF>tl6sjf+9i6)am)$4*j7Zj`>8{j^k?9!EIc?*mJzuOVmvUfIRCiaJY zEGb4SY?0rF3tM9!B17Ygh&QA?nKoDYnB}=29cC#BNCu*sYf5s=#*Gaori?|>ul2{J z2X0JfF}rS3IZ#!TZiu(LRU!6RD3C#hm{fNu#`0!$rrKS;f}l{0T!&Qi^*=&>cuvvs zs{Myg<9-g#R8RhW^>$#fh^VV(ANRDYg;Qa0s^D1148Ny`E9Q~|H+Zd~a!=gUo-`B2 zzA0vrn|g~)oh{UkpG~Xcup05aNnllVX==5nZEhf8*>=k`at29*gG+fWlz)!^2W$US z0t9p?tEl&o>h-J@V6@+=;Epa!fLbw2iUo_1Ig<3&`d(v*t2!73uP4OAqhwl?bE>jH{nD_Ql{ioMoxo zFiGdvw{qHKOTc$1T$}KWRLqhtrzKD*SR8v5xVPTH4iGm1{%SXwJf7B0swrUju+sI} z_@@pd=a~{n0_a1#-}Z&E8KiQI1nmCS>j!MSHQ6&+DwL>BaL$aXPJz5u|Bgcx138({TTxslhRMLg6E!w9V zQuchdNnb5xBB=zf@mDHS0+H(I{Vd!y-l=baH5mSmpbt;$SD7bk8yd)%UGc-#9yomvw@Ei^K8I8jFFIw}wc?&Vsjq&m{mIlmJRI>Xgd^fT zWV4(p@Q|b57U$lNZz8^h{g523g>muYx=~$WI6fWrK5kTZ-%LE<22}nKI}NnN#hCJ+ zqP<9(4LXBBTN}!)Uq#NuHg%h9Hr!zUaNa$Ic37_A^VWdwuS{{Liutd>!F`r>8>Y5#CbtAs=B14rM9H|a zB~PM|Y7ZDuP9Dwy80oB>s=7O?jEc(HrJ4<6@VdDx-P3LS?F)shDh|1u>YyH{eU+(e zKDps}YczOA#}nIyOAcqiIBEU)l@Tx0Q}2t_{)dx*4!W`7t3QOp`PgUbOB~$u9@`6b zx34JpggqAnT&I)1DId1cU&ovDjIwq1itMIAb4E{5M$yCu6wdHtx zGXH$E--JZSjcL7<-z#StV(w|qri9#^crIX&t8GSNz#^H@JvYQ;{lwz_uM4=LbeR)w zhsXmj1N?dct3_b%+0b9yW&Co9bXbkLmGr69M8w8I%JL`d{vg(q8>hAk+GN=IRtH&} zQ`@f%n0XMHx@gI6S~sLxq9J)JMqPZfOLVYKd8=D z(ECsiiVf;>7)opf$fUER2k=nR5m8_}lwOJ_g=WeS%-L1}Z-qc_9W} zIrMu^b7W}il)NIws_{sq&ng{V*c^1Qo=`GYy)R&9@l+gSBqT$GJ~Jtho$$n8Fu3wi zR|)f+f5AG0=2HbpIzv{>Js9WcO z%jBLHlA-|h{0h`euB3MTQyKAECPl>hr>Qr;XtwqnH_}H&8tLoxxfjHgy&l%NK@*Hi zm&Hyg)f9Ijva6Tr{+6!Wbo14tH%tmP+)}MG zgSz3^Q?a%zL++zA#4efiCi5RiDL#Y|*{RTz(%3F;Ug_}=BQ-82 zG->efPkP#($dDL3INNU%B9oWiVtgP-@v-+p8hLNlT<{QG85XTpDLcZEy$m@JS6UjF zQ6#L=1j$E3<7N6-Lb;F?a6*njsNHzqVqp%)+^a6JlPdBaso-VRb!Ts^KQF(V_@yS+=*a8%|!E>ye3mte*; z$=)JDz5X4>V*qgu?UIW@e|h`(Df|dev1+Zx&X2~O*M(2#QF{ino}qfBDo&haM#Uv` zmDJtUgBQ2#2>iHw8ywt;v{4@Q;F`$qt(x5dn;ai?RbeH##~xR-c&ql!zhv87Dh=W{ z)heFPz+Jlee?Xrb5`uJ%7j|sXPH_TcdOk~v^%U-WIPoiuYCx5FZE3&T4vQ+%EFE+c%Ntcoz z;##(z#EbsTIl&02VRDWb9@4=uG8Td+v%PT8!S1|10@ZjX_e-?s0W%;LaZz~Tc@*M6 z=MwoQm*sP8XbCrD-U#8NmiDr8I%1!@jv*%*%05$7HF|Qh3k(fvY#%P6zX`db_5F`I zhiPS%?6Ai%mgf~dkIL?~O7qH89KlpK@c>iJ{|TZ*;WuS^nPCs~Nv*bP(&gUy_~ug# z_2y0MZAzT1d_&s4K2?XD@KhDRN%8(Y11PGll2CsA1R5FzyV#BE^32FF$LBj8BURZaYP)p#>1LP?EXfDeQf{ai&*ez+q|L(suD&j#;% zKfnY~rrXy*E$uwvwWC=@f}}3Y$0nhv8@fa61&k9L@H;oYc%x#<600^A`j zI0%<`7bWBKw*)+@^X_@ANgh=jQcvrnE?SV5ResRwPqDmssza>L1Jn5={~_aHkET0Z z(g^N`bK@-e*M}H)6bCBwM5f$-eplfh3^^m`mmL2*daNqr__)pT<7SHlr}#`QJJytl z;S7f7C^m@^5Ar~DyvZA%X~i%nnC`!bB5&J=fMY_&KcZRt55{7F0QYKGD4FzhVkn6& z&9^D*%YFl4rZrT)&q67jSZh2P?+{xOkMKRQf69E8jLOs_NAL7LwQ#iE5nWr!t%j?c z*vVo3c?VzYo~#N=DZP%kdY4fq^D=`O#o0LvAIA9xt)G9|I4yN4{bio}Fdjfr>G2P( zK_kBpoR#8-D3EK>wuWz+^7~xxw~VQ?|DMPZ(FKlVp2s9fhQf+3i9*>mES|FD&`TvR zv29grRk(EY^?gj!QZ0E5d)BuuCYq9~VPzI;8H_7pieeuXf5ceakgmyls4Zco+rP=r z_kiP?!bS&ogkg-ro4Ai5s22R+1ig%DQP!7V-QWJ=hqJrIo+XkroVAK#iA*l!^WOOo zM5Vauc)6i$pTW_P+PFBlbmDjU|K&6!7Q`9>GRHojUWQ*Kb#pVF^JBDV+_yx$l_5G`JaiI)ZWp=oXp|A zb4h=pA;Pe;z`|T&%2G42ac5MAd5ix-^x1R&YIRO|;KO`;F&6V6{OKc!k*l0TlT69Yiox0Qs_}7! z>qQf>)MQ>WzqJz;ay^1*C|t`$D?(IWp;~dbJ&Ft6IrMc6B-9pe-m^L2pqVX86Wa_F z4%(qS&rM)PXuXt6B*MI??xuV!`R!+Veqz3Bt86PhZe`X^fK7%^jlE%!CccJZ;N6ZZ zwn_P@VK!f_dg@X0lN?rsYdvD;D!P%N|yrBU&Qy)`KoI{6yh8I=llj!D;~Et;coiz=+scME48OJx;h> zUe8f1ZHs1W%>?U(!jYa;_BB6-r{*g0x$0M%ON?&s{~Z1c=A8GOYQsRIiu%ps5%&*_ zw^(seiLEoX~!V0q7)w>eBN$d*>6P1lyg9Y6n?e14X2gE{NH7Ww9De2UA>8 zZwpnj_wi)DoiKnxYh>i9+g4^u2c{!_8c~~U#yG>j${r0F1!cqB?}~gTxouTAdW$N< z^Wl=f8Sy`LE3pH`U-z!oI~(u1Yd6OuW#fC7moO&KE-H&@B2O_mY zKBP9tc+knoseUHrDV;*Q#=zn!Y5Jzz7<=J<%J^*dsCR>Gb_VBf7oA83Bc1~ic-=<_ z&uf*_KlD5ju-zyeg)}kJ+snnXhz`aR0aU8u2F{JdnH*ij`cpNKTSc9nvRauOck5z_I;HV%}^C47?CI*~1ZF$*Sa z-;Q4E$P89I{+0{Xx7o)0+unc^z2RK%pCJFV70#582tK~<(>xV-V2%61oN1)&&GhSn zBL_bJGJX`J;{3vsmG8h-MudNao#uDd)UHdJZR5X(>^kuNDcTcO|0NJQ>;k04(Bl~7 zXuZC;>q(LwFz1^1Sf?aB#?&o9j?(o{ovR<$!a*_>)JfbxI=v_`pIG-Tt>r|q%y%WX z9s%G13l`UH1;Z_pi5tyaWb?hK?rN>ITJbXC8JD7v?ZUZ75vU$S6=_=*i2wa?{~X^P z`y^-E^#{goHHP{7$%Jc%?!g<}gEz+Kz?+0^`^;?@p7#IFXcs$uds#i-B7pmIfi8+5 z>}p=PGC-Xw?^Z2}30J+^Pc@ky%mY<%5G@hRgfer#II zVrxa8Ked~FZhG8YsxZp$ylK5=gq{yOrwlc(ML6i|bq$O>d9}+O`|4|d^P>OT*e7up zNjso4jy#?%AqC#e@*L~xCDVP^+yvH5jq*p^u!&3u5u4lNgB;nOxIT$)xLvQy^Qi`D z^5$i)??#s^9>Zwf@t}y$KDr-#KtMGu?k`)1&dIE3)||Bxm0A&y{-DeXZ0SKm$06*Xh7^mpm%=_4ne~fa&m+c+SfZAhWb4rDt~a-JXw8b-;B$PwYP{%g zPc9VZAQCb~g!6{|E^5&8A5j2lAUP-v2*^^~Q6Ws%7t9_b}9k$&<>{l3@>>p9go68Zq_Gn|=ubLLbA%_H&FqK9C6i%pXH0oy~O31c&L$yjn`jmVe zx<6Xt9z^A7`J~;rlDo+^REIfj;VS<{XmDYtAjEZybv{ALW5tc!k9=q1RQBHEK7<&k zq&&j{_`jiA-{jLR3XrPj2_u8W%r@X8<$gmb>qxmCr2Vxl;HdmF_ ze>xdUR&2RG0woCRBU?X`_qt*cbod(KPa z7~IjxXT_N7TpS4@#yw1}6dv7mM3K_MzFDpfrEl;hfZ#5?b#hQ_h?? zhxTe5Q+(39My_%;C5l&Cc~r5}e`S3RlSK~>ZXe54180t+&1Om-_0%b=%F8 z<}Wcejj9XHjcs~516E3=HZb=szfqoRS}4ZjVM&GSgE)Vx#i-=^7nm+?gw8=Y^ zrn_a2Km0wP(aXdKg1E30IXi!*U+1Zq;ilX&)Ns||Eo1EGg!C>N$;gE7i)NmzYnb@1R0>rO4cp*

iP2$jgviXg*#8+8@ zV)uXV9hEWwRC0l!{uJX1kcAzDTyA|(xOZD)CjC}~?&>|74)fkh->&ON?DBvakmVpq zuS=;@z^Q|@+ajJbAU{J)-EK9aoW9(@JojshM5r=P{&sKuox>4QRDAZr0%X6YjqImwl84dx@1G+L* zzu<@80o9M(L+X8?e5!lUqET~xNB!k5s03bZ{8{FIa|qmP5L}A#CKmhup;ZYhSpjh- zR1DMhYmWc9j@^Zj%uvqY54Yp!e;Q)IKCpnKp!In$%TfDB*|v5LptfHTx8`mw{q}!%Ni%tqn#W{0kF&JWoNg0g>T|&jo7$atC7STqVYKkO#h#Lke zHOe$7^%FvbM1@4#uSLmCmWh66Ty@?0<99yup7(v8_kEw``#k6K`HrEp9n}P5ibENe zXO@?i7nPqw!2ksCeFIP=5=dk7I3j-li)a`il^ZBx3qWe1kBDu@X7X8VYipEHBw+gl zp+ZtFdN@WcS4``8jrO%VaWJQB!?s%`SLRvU++H%jLxXfyakF}@#Mh0(!UvuYUk%61 zI;U?KBcbs{Bcp=@af1a}yI-0YbdC-W)`G2N`l9MiepuLY(&*?Tjm~Y!gJClIzTs0< zo%=(VlfFJ!64LaQ!1>tvG5_0X&hYE=Qu*$GJNx&E6=);xnHvVGjA zq{kdZwNBC$ll$ehGoAcF%HHF1?%z3K5{+p!$oQC7+JdGRtbcae!oZDdd-kgb){~Wy z7r&HNJ5WmXo40iIOAfGw`z_dQ3%2dk7j*u}(5F=d;EhJ6e6!>6h0!4r)6)H6tf%hT zDF=##4_;$rQ!zREPHU|&rs_qqhcpGg&h2(H!AlwN6zWZSv!F_JBh4+U{=n3iM?QJf zZ)v-6CB~Eek(a%7{-I{VAx!L^w#EO+&@=eU@21()MVe;x$f{$NTWJ?<(`Wf@>S;*~ zsl0q7y<=hFMkpeJ>b6NbseCPX{7XCBkr-C|mcBEX>dZRo) zBbXJs7<1%Bi(+)qEw$>Z2(_D2a&$iWTz%ydLdXru0R!Jjit;)>NMX;bh>PM^)03#z z+H$IBFG6!0G<`ofMB;{5TC2a%&cz6<^V@@luCI#ff4mHwq~Wlkovx;{Veg}!qLWjJ zE%I9J!cfum(}PJ)i{|Yxuq?A+&y1N)xe<14pUmP)Pa~f}OX+d{)-LG&t-eJ$|1pOH zw=6KB^}pwF%~|sYuK42=d!oMxo8qN7(8&THr)9OU_FZ1nNP1julV#MktO3owaO=4C z6VKsH^X-Q>II-Wljq#^DD8Yk z5arGGL^dxLR=Os>#cm4?BWczoXuNiwW1rCGw~_z*LjUZ#^L}FWB$p?y#kyrxkqlOhZ^fW;=34}yXe)f;aoMX)%@FS9wt7kl~#!^*6go|LlL8VRW* z2N!lWaB0ntwdKo*-WrGRzj)N7JhaXr9_-y@Sn2q|#)*jP(_`*w+s>dGp2nwH#Owh^5ECU&xH^%~AJ^zx~>)+Wgpj%qZ`mC9D@#qS6P+GogJ zfJ7Jd!(0^Z%A!7;`a!F@J?VVuEx#dW?PguAmS7YvE9m#S=eB*%GrTR`EF$w(gu<-? z#S|lLP@|>3p)yNC&9(yLFGbeh*y8fU$hAX*NS{sn(kG22Z`9v7>UtGf{rMH0wL-W) zH?Ax=AR|UXf7w`bZsm%wfQ#1CF;#lx8cpKLo1(DN8XP)6434FZU*LSK?S1s1nRoC#L#IbNaGxoPwnSVt^Y?sp2N`Lt- zrRbs!TDR@&_LP0ao7<%!i{-&RZA#tlr_$k4_oeO1=9cX>z57t|N%c;HS&5dL+;mSj zkG_PD^g7D7b=?o4NO~{$cgbHLWGH9cdOC6(%|5ynNy^NAkKE=)$kfBKi!MnHmS0QM z8QVWwjff~(Wkohxg+(?wttIMkmj9q!8GdJ5jpav?R@TlM`AVt*e$}(t^PYxk88*!_ z1#Ql>^qTO~V6vJCwJA`@%fz{2dQQG|^F#@1piN z{(wrTS#>0(0BG|u0z%0~f3>d%W=qo2PFml7QB1asefMZ2`l)%VV*itMz7-A^m;-it zJ*QKbC3O@Vh!@=2JLj`t_UHa@TEl}|5ApT}G-UW^+Davr*&2&k0S&j)H9GDFQf2Ch zkP=CAv+=W>spF8Qk!qBhe!&+lTs^(Gltr>N_R1CC%CY?K$`7B-^9l@6^bdKUuq909Ba2RjZE|<2G?e6!_JNkyMaYnB z+;l>^EcD&(V57KeyNu!#yRs!Er3u`M8~LeGtXZ8&a7hmhiJN3x7}0)70=u6$jSHzfK|a~8G7B7bXG~WRvBDRD0}|w<;M%>IAsg^ zhB(g(!s>=^R~NK~sU1}KVNq-zYnOZcG$ zseGOYP@Q6+KxYA;>B1Iyf(Y#bUD+Wb?}?W%|6Fnn-N6Q(eK@L@fQ=ZysSJP&wvaCt zFxf&t<$wV?u~}RnGC#x{H7`0#`~R2G<_AjJi=V+V7Y zY{ptLxDF5r#0Y}}DM)yT10kT|eT)eym?82w52WzLNC+5&0t$p8fH>xw=JS0yF$LA$ zzb-+bD_0cACQJamCm>+{lLv~A$S07` z8LNx%?~0&6`VKbFmMP-$d4I=s?TQG74 zQT;uu;HIeDAz|Age13(U1q@*b5D}>a7RE#PRS;|pLB{9_XoLvZekcgQU{Hva@dqH_ z@mM_I2aM}r91iJp)dTRp>mUe&0QqknjK%$`Gs7Y9{Cv&~0qAF)Int^>>F~r~?;;KM z%Q@n&@xkUeq?Nwg6A65{foy^5-;WD-HycS8WbpYSK-CM#2fU8w#|OrWrV{J`Pk>}b zgvpRS)|?FEDL4Yzo@$Pzni1_Gdz_gHZ#!$$|39MYKRZ5?;?HJo7m9-b&>l;KVH_TU s@wT=&vKg5`!4Z+-P|UD62!q99RXrpW`3OW~Rl#B~c$A@`J)MgBA9M1-Qvd(} literal 0 HcmV?d00001 diff --git a/docs/img/fig_vertical.png b/docs/img/fig_vertical.png new file mode 100644 index 0000000000000000000000000000000000000000..49e8422e956cce093f8153356718742d1f608b6f GIT binary patch literal 112725 zcmeFZcT|+ivOWw5k_03xk~0#9BsnPp!jSV22FW>t43e{eL@5T z2T@SaZ(-d8e&OXtkfEU9)L2VPtIA7D)2g~ST3Fkeqo6zqPl97=YCtG@cD**El5a;O zeh^iE4X(%mW8R~~W=y=TuKJceh~cTU40;kO6^WXYX2eSj!mjpg-ByW*j~`QE;tFUU zp@LJU#ov1!`3(=9uMK$oIDWs|l|NI7#GD zq{<#H-vl$ZV@vpRD+QCha2h}_RC1m|FI0SM$IHUpPrh`Fve?Iw%!-Gy->LV~;2FiT zv<0ml!y*y`OQm$HZT+nfmZQukUUqh`KothNw0-idnikK3UU5h%zp%V}lVTxZ$?F&IIhy^++9J{?p&+$m zA2)7f=oZ1NbxmIsjU5K#ZpCU=QH7lm(sMZ#J}`fEGC#wlelr)G?Rapdt5e05OOnew zt?TYbIzHj%{dZ3&m@0&|NN5*KlTSeB%Y196L{P<8@0ilmnRmwaVvMl(!_+Z!=0Fj zWVn-T6h)}-hE#w!F(22%XbNn9TzR`9R*(CknJ29%^s4Cm*{E)@$9*KasG^Hlv;O2r z%r;uIPXEW1Se2**W^_p)RQ6UqRg{QU?MmXrAcaaAN;J7P=MT4(gANvn-raQY7hOb+ z4S0wodg)K_^j07(<>P=iu{U4i-hD_GOSO8-Q|d~Ul>~$7QJ*RWqyOswRM;pmg1)c{fvV$^%RD-nZH^?IMO_eH@KhfIJ3sAkM)PFe_Y2R9B>{WT=lNu#{ zVL)|j!wS6PH1OiEGqN+R(}R&nK5O++n%r{2 zWNiPFo+ohm%hw}jq*cPxaD+bxlOrp zDQOB0Wyk~$DUK43ASslch$M=niIkjeoh|u(rYfTRL?JxetaP=cQ(Z4#GsjJ#NX1$+ zwb-tNGAAS7pwwIUG-BVCG$Z0|L<^1_laYB#KSmPj{RkVW!~CkEts=}f%ezpys)ypkVz?U+MTcs$zj#=69_#CbxXc%o!Z z^ZbnjcnhfgHlaF!@AfIQ`$K{$fh|>CVfpHQoO`z zau0G``;^nQjJ0e{@4U9LMz+?y=0{C!lAQo#^vI%eJdy0*G4JAM0wnbgm3zun(<_^h#gV_WBQ z{il{xQ=TwU%Y}^h_?ltdVR-o8@E0gwQOZOgMt3NOD)%XSWt?SL562C64Z{t&xg@ws z43*z|KXZ3WdPnVXIX2YK+3~IN^Faf%YlADnAeGaUhh>9K!+_Ww>ON{o&t9=ZF-Yb_ zrV8Q~LJ0Z*Dm`y8zd0|5FgpKq^1_?KN8+sUto*zEf%GZH8R?n(dC5u78T?!l{{z7j z+&8#)u-&i>s6UYfW8+}Ykd60@TU4di4#qt#Tuf@CqFasq61yMElCz$Z`(ct>h(pNE z>r;nQp3_5^EBcG4vUV{r5^JJV;?pg$ZA7mp=|QHeGkvY zJ@rbzol3!7g;U-qb3L<8+&*QakA1Ca`Fwxi)NwRulzmLn;;|X!;GL0|BX+&*mQ0qf zh~8lFqEk(aOoOYvg2~#^ayWV(SXvIhV12B~5ZZ~`#Ysdldg3OplrQ<@+*RC#-9?f! zT;Ii{XkWc;wA_4sK!EFBKW^{2r|@%OFVAzFCQ@6S?-k#nkXjtQq^e?{+P1G36G66U zQsY_3i>&3X@GnG%d{!j70sEu38BKB*#fvNnYTb3B0p^b3u5?@S|rZ>gb+_ z;{~*r4WVbIp{e)1HpS4}J>gqHmC{POhM zTx4RBdQt?Dk6)fJiAMu*TJ2VA$>TiKKGeed9-3>qnDYHoq*1)zrZ}|=;xhOk{47D1 z<(Oan9&UPefw>aH5W)fiHUWl22b8Oy*Gu*9e2e>!a*1YjUP7WJa?+j*T!@VDl!_sde@$&t<)B9 zg_U%xn<1bbLN9<~OiAng4n4^VCl8!scewx2Dha*(%TA zuE(cMgIYx4DiLb-Jc_@NU$Idp3ID{33eky**o1~kKOssdPG=n|_s8UK zSzjK;=DhHZ`Hn{0joW&3w7;hkuJP?#0piKf6I*o=i7Wg~VrL5!@2z_@G`7?5XEz*uajClFT@6f&mk|ETin zj7Ss|)GTXFT{m4NMG;d+doE)$M-y`{n7tG5HVTRuOa%C9Z|-JH3$wR#a20`x)Bk)! z1o(daGl-t{=PPcu;`F*os}P|&Jop(!_?z#p6b;nBaEih-`D@P{e=BCnsn z0s@o35d-}udf+B??|M;OM z40XE+ug1{abN76E@hxYesQTBUsvEOkRf&iO>(U5<0xi>NogUKJbrQFk^cqXC^V2mj z)lx^bfgfZtFM&gru!S|_*v(lMW6>U1-)H5ac|LpVVXMI*Gvkv;ZFz#)y6LT9kC3}Z zJqETz4>m-zPJJ&X?})mV32&TBj2xW*sGjWi}Q9!AD~iL zs12y>{o4KCj?X`c{2!)z+h1xN-6}?kRVl#hi|9;ZsJY`%sZHAk>>LucYUzmy5#kNqRwyr7rrD18Y{ z;>;41C~{4Z?KxcW*cV|$V}LVHc&p^lBVMDV-u{z~|1)=scLBMCV0Z~MG;Wu+=e?&E z+H5n!(n)n68VW;=+GW&%8y3JfUpq3teb=u@AQblx@j$CPAfkx-{QC`NtM)Q-9+Kr7 zGpkSwau3O9!X0AO_v(90B;r|$i0{zjhFH4am*&q5U8{stI0{~MZnoSy3qux2q_Op# zAj<&nyxLPr=?oTbUP1Y{{pX8OEKYjM`TE_vs}KIED_T8(DI9q}ZbAL^0CnW0~KAXuuIIqWO+Wk&J>KqDV!=$ia!>;>>6+%JH zG1l%xfABz zB9)DU7A2b(Va>Pk+Bi8pzhgL!!lIqjf+~0q>0}Qz@l+w0Q-}16i;L%;wrN1<6t~e) zFIIzZJ!A}k2gZI3r$@#U^P(r9`{Ky<&)~L!Yr(e_(O_(M*la=Qpb4&rs@RK#*{h|s zckDqQx!LJGO!ZwBip*tXlkZ7)lu}BP%S}yio65_Q&#CGw&(Mg~k}5nUMLUtla3y(& zR43srAqj_Ta?25;OUf{~uZ1k<_Y=wyqkw)VNKqtZm^|{kY`nq&kqX~ikf*SKlir0m z3<65J;x#bTW>t?hKH;5jyq-*8Gc0=^RJiVfZN!6mp~LWjZ=!r zq9;bQyX>ilI5DJCfMl3qA#2O|I7_lZ4}2q3zegxc9uWbG_$c~2ITgfdwnT>6o>M5< zc!vn4ZMMp>_I*ABwu!%%66~7le}ow`z|mVK1`Lx)$4ANR4|fXWM1%E_$*=REBZ(y{ zWl%uV^nGG5Y=Tep{&2fEGiqrR$)y|m=rv7`J4)ZUFz7Hb(6C)AyK4e-&ml%PPp2mu z!v+i6l`hW^I}zyet%=y924$)9pdNSXRh?)Q-}^l=A3?;eU{LiUL>LWLL@6m7!{ndU zy}zjC43I*ive8Wb@IR2sjQ&~7yv8a#Xfx1UV{OGkGx_ccW=R@$k;ZdpaY4qi(}0~G zo^~I!C{bdNk1S|Jo`Csn^jM@E;|8{sA5w@Q7#&R5s*;c7fsvph_;GHk03`UDq(r^o z$HxNU0^EdrPug^oAXn8M{HoYygVySCkhW z4cLqMvu}jn-ati+quvg41sG^hb2h<8J7L3QRPK|J`V8hDbm<#iW-KcieAieSf`a;u z`qV&u(0+n^+||n^e_6jIMjE{W3z#1aBh2hQvwv2jY_~rd;FWQ?w|CM|pkFEO-k7=F zs`AItLs6hMH^QV1f$1AAEwAFEBQ-FkD2J7P84f(Q+!+T>xUpdC{GoeiEdE?+&cu{{ z-RYIo+A;6B)N+1Oxdv?u#r2rus`PZE*o!POp8~?#!2!6Xy3|nhV5*0#7bF3_3QWsP zOdkk?+rWe`N;T=B>S$E*cP>c6Kj4`E)P8HV5(DL3cln@|NcqsF{Ax2a0xbSak@)lELvM*w;Xc{W!6n=P1;pc( z3G&meehpxYm5YRz0Q$V?6itT%jd*DOiefJwe?vESu~0X$yjdBcDZZjhznK3UkwU*% zm@-~or+sZDs`D+YKRVm4X+Wyao_#K-mYim$ z*!8~FdW#8L3s2eV;AMLZ%gdqFh$=~WUoI}8Oyi~7+|V;O_2DU;WHZF~kEZL?LKg<_V!#cpIktGHcQ1zSCFrf>QcdZd? zOH6;GI6w7cqCIWxa8*98n{Vhz{cM$9d85t5(i?FWmZ&J}zJnF~mUR`Z8io(wN6S0G zEq0j?Cc&=6pw#oZkBqiqv8L1LHIlHZ>ZgvZfOfMgOd4TAi;aR*d0W9btedh_Cw+KS zk9yHSA=$b29xO5mx?$1bB1i%wcE}$Xs)m&!Ts=FCkr0 zqflAvm=a$%_6O8Sta}u}%+~0bGAS?@wT=%UQ;x(2iQWeJL6p%E~p#nU> z4}+tT=``>oUjN#{mK9HO0>zN-^^mOYyxaBm;HcnD9v&n6q&}m#LW_!6&1TIk2!@N1 z4u7oKOUP@7rI11QRr%#Ky1~&nE7p1^eHu%YiuXXmtPzE5OH3*uuH5$5ji?KQvo44# zqGffVkFHfUS?4Zz!y4C)&Az}n76s~}N}{6C4$j2cu$npPQ!^G*#07b>rsc7%Fv*4N z@pE3R7CGS;M8PRZy2cWb@rCWeDFWAxVl>)S9A0d=FmQnAu6Nr&$;pg5sg*IKdIfTjZ z*gCjX%9>X9sOJotzCyY+C8PfzxJVQ5b;gqSY)wZ$lmO+DQX6s~go7zGVK~hlet`(B?m7WG_fFQ{% zaMuck1B8Xa#q|@-kUzZk3*n%sbNQHIP;$f?z)h)Day1y*Tro}&!0w^nZk1ljZv|)q zNwymVLX=P(U4ykzWIQjjwPUFec)&pbVr$12BZ1-=Jnh=QZh`g4`aE1#_g%p#z#yMF z$Qrj4FjOSK0{v!>45)npL**odAgO%UpTERpz8DbJq4JxDgRvrhvtWe5RckC1#)K*P z(&2T8ak{~{4*tRD><^x~oR!L2kBfp(pv6JYQ+W2&QyM3=NPUYVyaes)EkYK*IBrrLgqR;$BC+ zNTk!jX=|%LrPuabmqDJM-oaG9^mB{B6uJZ!6^ARuS0c3yF_O#7$Z-voz8dSX$JHTS zq!#^|D;;uoVXaF@_15M4@KAVS046oQBnDTdi6=~+@Y!MxQhgDP9UhhiJfOq}VQ`eY zSi|IhZK21?X&)?pFvwTsxX`@*@*9P*w6wGwg#~{*2iEO2k^B2t8w4S6 zz}7a1UHoQJ|JDeT4s-x5NFQlUU=5^;`1XVEUmP@AN1K2AxiL6M@6Q%;I)&tOyAJ_) zZ3qpWgc^jwSa2hMkp=TH79$3NLcTCM;4Wt&FX+88jSVq`jC)!7wn-^Zr1BM+Of^%$*YhcW#Xvl|p{W5Au4;VFk zQ;O}R50wy29uC5q0~Y;@z!oymO}E%cVsNElG#6z z#s9Mc$_#+{+il47*bvECiT%#ZieHA_a(ws+FY+ISoY!EKR{|a*6|k_sZVR_;k_$lR z0$%&W(FjKac#-ip=+P6=d~u!(rh$QT=3kfP0bG6ZXWt**BF!&Xb%kqR6PvmY`b!|~ zq^W0SVskL>x%?pRi~ezlW6e)tL-B!IL)u5^wj=)2>`c#+3A088}O(4fx*zo zn%YP8PK(-Op(<3it{kNH8#}Lsj}Ala6w}PkGr`BrHFI7e{Ll-Um1o+=@u3iRJ3h`|VQ{2l8277nv{q(6+wD^&(@Op}=+4^AWkPn~5U~FFki=bbD z@+&9uKRfq4!Q`sLsZfD>iajLA44@_g}vxRDkJNd3Et?14bH4kq*o0|<06IF!FV0m9

G&mbuG{A1 zF%TQ)lWkXM{oW)J#DHTKjM0_`C{ zR)FJScOlSDA+QV&E$2LZUAGS0sAt3D6*1U!T)G+8cED^-UEP!rnrfd*wT^o^lq-Eo zSqA`MNbucLO&FWY4P7o8Nytr#JvSJe$sQ*>8qRSe%3I`uqyg0>d7)e_Svdf%PGmO# zI5Bf~ws}z8p@{*;WU~C-XTcrRxm6t?(Isu;8YmO`BaYTjv zr#T|qfI0578&0T4y3Vt@XuFV3=eA`gAG3~KrGaxuGuF6d>Xb6==UUWK@R2+?p50#?)5F0XWuV+7%4|Ff+AH z-b9_ccFNBLBgYV-We_)f0EgUed3|FE;bX_{SVe664#|WIGA#3CdhP)INHYnvonDr{vj5|rfO_^ zA6Ie$U81!T#wMj^dmkGF5FaW}fG47f=GAJ+q3!=k2{e}gB@jp?bT%*QLQH5bVQU}dtRaZh~{yJ zB_@!$tAO>zYAOFbQz#sfFgP8&7hgRJOTFW}uUUBURY7U@%=!r9dg2 zxv8m1;E}9w*a(<&omBC>2}o;0&2ch47F%2TiB+0e7yOpH)^AODuik|GC^C2H|DLyY zvf?LQ@XF1tEwzK<=wR;ss?PZ_i{yDX$+N+X`aVacRQeWogN2mKw@pGYWUTk)?v*_C z^i|-6)o#UFNk@2AywCdHyvO(cT}26ndGG#@E|c&5FjcxN@No}q{E)iWF)@$_r6lXA z9!JJo1`20zL4lZ(6%QoAwj3 zE?%Eztm_g79ZEjZvvKCn2$DBW7qE{9gPPpF@2tCiJ7%V^AZBE3>EQ3%ng5RPJ{lMq z$h7pG^KGBcGn@8`j*}s8)>|J;o4uNCjZR~pu9o%~7@jfOpx$cK!suO_SdZ~TPU4HW_n3C^srdxfnR}2mcz5Y z3}M&TAFcSa4&;Qg+!=vul^ewp-tR9aYfPd^Zd8mT2S41b%- zqK%Y1nW3~i^XQ)kT_#<8Nzh4BzDTO@$wO@78YRys6eMR3)M6`EZ7QE3P!VR4j6hOX>}djuZc6) zE=_5_2cjH&AjTUA-35uCFq3(kwaGE8mU@$22i8>|K03 z_f89xn)c|s#++UVi4njo>~W z9gKsWp?&aLeo&T30*+5n7YADR)YfEKLaptT1r>EQ$o{98r|;Zt^iz|m<``Ng)@hd2 zzB~=z1Xt9n_?`VozHv{|wCUy;zt@2d^{r(EzRx$#I`$?C7luMZL%V5g&S&_C6kU}Ld|~{9GYX?u+15^qzMm@(O9Eu&F3)~= z2XVlrc3;k~%^moCs?*AR=VEN`nZ9;+!~5HAQ%m7=SouZH?4DrDo0kcxCz$#c=zcK> z1=O1&!bT=QDxshXsiCxZ<5nabjik099;prxc#&DNWCEAwgV(TnqFAfS$=Lk|J|`IM z55D22r(n1P2C|n-7j7w~v{kbsZxcT|K`%xMbt3NUk>~@kq1(eU<>M8p~RwN}{ZMSG?$*vvj%w7f# zHZ2zQuON=8np_s(g<8~MD87V?;Fm_v8!|b4T@2~`_B+5=R%_1-V14c(7ySg14MlL3 zsH#NTzj(8kbE8=I1{iirQZ7kW)FbnJoVhnK?WU__v@4*T9UXmLvSieo`!C4~z6xRJ zq_{s$^a<1s>UeDAXP?FK-bUdWee`ab4O@3<)yCCj!oGgbp*fX{?}8@{OuhMRDCoZD z&L@5?z!;ViMbi4-esKzZNrQ8m#N@jV2EnVlz+5FOIu5WtP?Nz3nPfwLY|LMHl(K{7 z5+>g_8WIqY>}niewmJqPi0-I+=0j-@Os;&VeGNS3-1pzc_zWJ^5U=MM3!ut3Q&H%^ z^HWnw27OPg{LV(x%O)qx*=`$E@Y>E}I<~D?-%WA^T}sBp{qWqn#~ve%;)m6F#MDh= zelbWQd0Lbn30Ywx*V^M_L97-!lv_k)&8l{9$U+ou|4BJ%NkAX5z@%j!_ad_N)LsI+ z=A*&jLWBF((6>q{E}x8rXs*${#QW+X3!amc6OPF~GiJqqKh1Nw z<{GPQzAKDo5A-&W-Hnrky`p@+XG!HbBWAq*N`LJW#Me-K!gZM|9m{3N{rr%ncbTAB zV5%X5L?S@7xih#Tk%n}cMbd4b4vbh0Qt(}U0l)ar2j|#JC|9Qw_=~9OngG6-?rlug zW!11&jhonxrVBbIPn75k0EyA91P^O+7EwefvneOpgfwYPsK-%DLIIdGV@;KlnC# z!Ns2KT@176Np#eQP7lw4dS`Jns}yq%-1rAY_#!rM0mrT>pSpF~eIwKkZEU*16HZai z+KB6e#EF+##iq6v(K%|#M1PL)fV!UsSIdL7qi>(nKR0w@>9NG%zy?K)1G zyAAtVRJa<>k{P?}_v3ts>FwxESwUAPF7z3~JCu}m)CGi-(L_D_x1GMZM(2|qykRzx zuxv(#fO=krzI{a|7%ubtrl6z&rR%!S%||v1v$(Ev zH3=#bkOIZj+~kDrX`(>ybuH^K+!S&$e{rOE^HxKSw4|U2THQkG7}B5RxIbEv#=f z%+Dc6buWpxR70OB`S7au2oU_4SQU{#oF&u8!xbrI=xYLCiU)eTpAnwbrO>8E&k2{U z>TPNsovO5aa?6oSdk+(EHBf$ovLC8{H1Htn^3KzkzB-eBXNMS7WsbdhyWS-i zXN8iQN>6_=0}fZ_wiwIOI#;SYC+S8tA_jd|ROto%8IUu)A%zIz(NXlTXG9tz&dg5n zD84T0WT1q0FtmT&Y!ucf4?!s5NG2sj%T5nmAV&u{OuV!Ugp<2)K;~-3Mub4d1M5L| z4Sn_+AX~e9sjW*)Vyo*A;BafIM4(^0RF`f54uY5dxcDfde+gIx9PWjEj~|Sd-c~9B zab01z<}O4U77;q+S=pg+zk;{|ic_lh#{=xoy|he|@#;34H_VY=Y2dGzf1J$cz1};~ z-_IIJZ6p40Mno+31ji1@R^4g)GvvH4n;<*=*a2oupK$y5lS8yQPdgZ*`AH`ju(6M# zc*?LeH?R4XX}Y|+j%$7coCYh#X{I_*&wLI@Rp+TJq(2By#oU3iBi`rdE#iDU6OL}F z=1qk(?{=Y{XL?O*gnpK7=lMUXjo#FMLHkI+JiwzYFu;3X+#8{%hOJa%Tzo-9%sKu7 zw?~CA(y_q}VRqN5b8IR9MOQ_(+r>DQCWU#K=TiQpows=IN6G<{Ab8cA$_gwXYZ6Rh zsz+G)?I)9s2fnT{(|Cfjv@}0XdPKAAHeX&{`8I4<4VdjMca3gMcX0x8OobT7{+!S^>V(>$X2!GGTFHDd)&>8iZKaJL$H>61OH4*OKsR>`Q z`)p)l@>Dje4A^Uvc0!|6Kq8U~H0+>+0;;v>h}!{v_8zoKbYBmKi}1OezH9_?*osJX zGSVYbz*a=4nkurRnO}CQN0#-jFE~<|-xGAuY|0(v@4q&!5(`<$p%CAgDB*+F0KX(Q zM=!g<=3?l{?BdDC4Tt0=LEoDD{Br9)Yqkrlg}y7Ia?tam#{()7f<``LzTtaC z+Ad|8fbGw8<{(6>b13K-u|NSrkr1ijRcoDCyTNzH3Z+o$__o3Ipr`7|Qa;E1ZT3ef zAkv)f6m9f~lkj{p{g2zeKLBWHe)x65=9u*!C1qj~yH5Z1boG0c$+v|3g9=zxXP~2g z76+g5&Reg-&gK_Sj$bD<9nAjVXt~-X5GB`Nez{riki2j&#mFS|ENtFsz_($YZ=#&$ zJcyIeY(`#g%828xkW6+j{7xYb?cGAGMV7)xd_s8DiH`%y8ji;yb6nIv*ULd2;PB2caTnJXaut4!^29>>0LJ~1HAmfyT z-izcxmpum*q$m6B*>Y>ZjVgyZ5y<;zWfPv&%zQ&U)!WOg62Ru&EP`!{vL?e)+RNmcnefw68fyxw#+xM7{Ku*nc|eLFisS&Kqq0 zXmS{DAO9BEnkRqQnq(VyTo-r_KGnC-d1VO?KHWO%IH&#$->#pa1s7x<$i`JGe~Xg| z67Sm`@WRK8yckJ$Xp{%epc&S`R^6uw3JP-B>mna$55XCjllF1x zK_!_q;(K~Jo5`y*MXJ*rdY*L33R4n3$bZL-3<&K~3Bb{phQNw$ehCQe1OtQytq zFe9{B)mX|0h=vQ@3t$>q~;3PIy4%Bogj5+TD zQ305L-TxG7u|ox9$Li`flEmMcbm2gny*>BeuQgX`c#3C+rp9}VdbE6a$?q}$X!@-W z-@vN5lOSG=-*n55-nfM{UK!u5ckg}#QU}EC1%r6+s(#XX0r>6UsE3?DDHz$)Pje3d z2lasWW(`m?qvvnzL3e#9BnzR_C!ru|gf|8}axJ~5M?BI6CQhv^yaRaCm-?^rG8}?0 zy<9{ybuJvhVjQ)fh=$^BtRL*<(OkZ{g@;EqZFwFJAZv@+5OU}ppF9q8@Xwt7T8A9A z8@e%49i5&%=>Ql0S;d(z$FEUQGeyH=Vhs!jaGor8t^ zJ-lxdXYQ836&{*8F$0Hf;rYq0lm*-OiB3144ai1>pSK??*>7DO@SSRw-=YzOWcxq<_a!zJHe%7aj;G9j*)9Pzsfp4!nhTevELg$0BpcWr@rvHT(c*VqK zcWTsoZr;3U24qsM{fhcQCvOT+Yc-01u>OyARcrg+hs065=6S1F8l)sB z>O(15SwT{YK+Z-RL73d3Fr;C!uK{Jw8v(bZ18y{G-u}?j&#$FauT}vFQmowPKd|H( z7nE}U?~d6QE)uVJEk-iMFWMhZv%hL@_`;NC`byLF;9VrtC4mIuPJcTPZWXMxfVH)n z_Z^1it6=-K;Q^TIsxRvzTyvUr3kc=`h$OJ`X;3`50nkJCq4bddg&vNO|I|a-04H}Z z;G`DyBPEr3J)E-Zksb!tyKN>4;~hW)^#;r(*UH+~Jqy(8{^f9otXT%#=iI0Is>}yo z*VlWr^E_K+kOBfS7F)|b3u8R<@5D>_UpSxQp+KubD8tR5+?XMSj&bI|&ny++64SBst zEr4jnkg{v{?yn7W85nlwQ3BUy)=u2T(*z^S<9M>KkLA&By$8z2;n(Hke;X|u0E=8z zo?q%CrFA|2`gIWNPEM5#7s~)FHDq7-?0voad;WJ)fACZ4{#{N22;dYfmzG+^yaP-PSNTeYy}AcX z9+*2Pyhp`zWB(fhFgOtn%eFwZL)G{>ka55@Itl#1S{(yuv`>M(<;&BdT+TcA&j9%E z9`zCxFuM7G(KUPlTuT6K@Lk{_@g~VLQsAZojoa#X<(FBxiB0jjd+JQOc^M=i88$C-nW=BU#!r$jk#H6THogYDqr_iDVlEc5c|P{X zMqB9iMUGBU0tfmy)(0#N={a82_ahC-L1H3L%jw6gV)LI2HuY(j8?Z%^gOcax=iLN9 z<6wQ@E0T27|JNMQ5>xv}rA!&xssTiQre&^-S_ z=Lystnp;`Dx$H}hfdWa}D!XZ33iITKy0qv}2s%toNDr1&qei84fwI*4Q<{~7^`V3@ z6-mSE3P#{Wvns_$Y?=9KvfgZSHQllJcr573aF*kZ1U?#nd;1vVTr|Nx5EGv=mP+(l z@8$6Vh(HYKpE5NQ!vs!#VV%6!Bn214<1?IJJ_j_GtG)JLX(Sy$Uv%CHHQYeG_~SJW zOaLX_pvCu)ks1KDZDV{_NrF=Kw&Ct`oda^m^W)tO_)}<+j(rtJtuCjLMMW4&RORz` z?{8~ui6#u`jhT}H_Zxw8SAoJEoAS1{w)OqUYRi!w8%mbXbW$CI_&u}LCnpwh?}d~# zfddjvXH2SFAPmlm=guecRGyWnd+L>=#pI#z@in9Ad}?AfLsxo7pwHh*JKB;)jLg3Q%ZS(SsX z^nysmoo{h{ea+S^RP!6QKC5MN*~Pnx=Iq=H4bp+D;nH3^25vWM#`PJ9Ovu6t zy9EanC+eC=Fy$pnN^Od|fFhVx1fSPxJMKJvl=Q*@F+N08#{ZZJ2z4dy>Rh-X7avYn znS;xY>1~+`tP{%&8h~sPlmp1GmMNRI@B)@~rq%1Y?z4xfo^O^6P8YG092MP{@`3vz z&Ds}#y-D(EAF!z~gMSBzS#WfN6WGb+zUW~$sjPo682sig8)@_6vI);h@ zupOCDdA)B1h+HOe)xxgv0{}w1SJ(O@ukCxWR*6+&1AXrsF{_lQz?{9BVL{xevllsX zD4@-^_~JxR97E%|Gh_nSZfxFCe(c0V86IeJA75M#a;+hAbf#Oj03Vh~hohgKAd1ta zw7{Hc=Z`Pp)z4SA`JU@>g@nPsYD;FEz}P4`4c|QetilDH73X&Vz-88LOv$92Yh{TU z5I}#URSSd_E+-^|_K)Sii%St_fMs3m?q#hotRBpOj)U*!e9|HgD2D502o}nnMQ?ex zup%@QP|KV`9lfd&tlg~A!Pr1-!&{F5YeV0;=YXZTRQU0j;d7<2wBY8rOL;?t-N#%Rm!oORfs4}v z+rEJ8IY0>~#jDjBiec5PNw>K%dIWhxO=^Utcfj#A&p)7#uB&Lh@kj~bZOx;x{kT}T( zBA7nl5JoUZbA$@WYafhA8Uf(7=dxF`ZwmtejXjNbi#`kDu6@VaAOz~G;&h`6BjA+e zz18253BMJA{Oe)_ta0T!YPuJ4Dsrb0cG}jbY`}hpt6k!mjI2933knt>Y79fXliZ?Sp(; znSzmG2Ts7fIwI=7yZMujuKrU96T1x&`~!+zMevLA`HF2HpsojG_-}%*`u`aF%BZNq zu5CqBNk?Jc)LT#5r0)#~XqDWx6uyPy2&!^0!38?u~Fp zefQptGkG_=whLP5n%L)mD9CM zEhnlnX#%Ce1SHRu|B!}0PP)7xteK2=XTQJrku;g6+NrV8g>1d#a!N#nqU>q((5BVHvyJq2o{i^NKDjUjN(2!}S2 z3^yp>vWz-{V7W03gry4%lK&hXF1_jI2C8F+15{GvC+K@gXj}RGad}?dkR<}sRm=g1 z^~mu`B>*;9Z__bAAA*sLR*wjfPw0)0|7eosDH8TMz^u(}(BQ;^jc=GyrVT5DWOsh( zBhyN<`0?)UU__QN?L^BicKa`rnn&%kK!etrN=R0Xu0!l}FS?)-LnKwz!q{L|90z-P zNq@fW7BBGb2$H|eVbC)VjO8BbVcT$c3ulqJH`wcDglpF!0|9j82zbAHS<*A8z<5#< zjJyxvB@;UhP4$iIxnd zQtW9=X`8CY&km%2)KTc8LnNp@4{lr)5namc4gxxUhFS|93d*c;DO>t4#-{L+<4JkiSF~6 z{HTNn*IH6RlD5$NuM!Qj%W3B-5xC5GDVQ?8gTbp-K50%tp8T&yPCihh4HYM*SZbcY zrIQS~j5i2~h=}-ou!<8a>SWpb_WtWAps{S|Pyaka2zNXG_zYWvZtNM3;EcD!Bf%Y2 zsdABEIhd0;W<_7jW!dpS-{fhzMWmklTpCtQo@=>BoW-37RBj5flK(Wu&C}=8jt{a* zk}YT>hZRP0GcTOekJHOZS6~u@pMBYiB=i}-Xp$6DDS*=iblWjY%HM(HGK z_UD;*?nYkFw;wq*O@L%gbbZ@**#ktsxr3y-WWRL z2DV+4^mG(%BstAf{|2cP>MH-tfm3-^^>-@^+t=bv1KUpypP_xao+H^qoHr^x*GVB< znW=rgI`%RViYwD`Nu^j?3&lzsNe$_AW)sl&WkQm;|7G$@M@n zM4AL4=C^$J#Q8MBy%uyvB3Hb3DzJFR{o*|1cd!_z`%9Z1OCh;S?Xu#5-l}Dx3m8KI zt4$bFGuTxg8Tz5dO)b<|`L(Nqb(lmg&3FY48S)1KCSRhgQgv{S+7!T8&JUoa+n%!R^rjYg%&hm8ww+ zMms4!LXy}|TEJ0ey-Z(woho0<8@m_5Z7RjJ*H9_E=VnD|EG*!P6FOE0>0yBZ_8WeC zxW@x~eI)m4YB;M7O}j|4pwfa20+mJ!!KOvo&R8xz z%Qdi7NWLtW{yE{=7SJlwSNni!CY@tc_@tf66RCT~3?B6Yb~MmUtW zQzs1!IUJ5ye$0I0j)6fM@OBK)HOiEZ{sR-^Iy%q30Vvx=r!kfe;#&^pd}| z+SE-coiOl(L6Ed^XQ=3E6s>5$%hUPYX>tJ)k*UT^cr=*eF@+*OhTT*qcGenkAyt zOay)Tp6vjGdG+c;q^VXMnIh=&@()Dx$!FH&zSGU+&&JEy=~^e|Tw!+1I)RytrqgHj zM^wVwGbg>)Nn3d{a-w2Gpx(`#m&ado3!p=YimQN&T#FkhMvODhnNH0ieJq?)5QmVR z(CY7OLuSQSxXuZeb0;FMK3tYD&844UN7~a-U*ZP%5LWbv`}NKQvY?Rquza%ljmH4p z%>C=QvIp1MV5R-`?g#vP2OJ)of$2m~S?g_q2SVs| zi3(}__$q1~Mun3rfL2g~VV~$JBeC?G9K@S<(hoOPd?w=cQ}R6DqS8|26qH-|g3Xe= ziW8jsEk0E>8L1>{2LpGl9-Tlfv|ctDKG59q(qWNYzH2euArWn%Pil_2UUqgkkRTI! zuhNA3mf1O@)4jt1qYRqEcc*0D?xF}|EANu>xLt{&lrJ9z#czF0W{pVhT{exk36252 z!L58zx)(rr9KWFD#us$+0#7JSTNe(ltie`y?q0pU&*@RVI)W!4l@R%{*##S}bmQ|V z)@ltpi+lMc0&o~rrm`fsT(^kfy1RW~`RT&GSY#SC>m&K}G?Zb|`>Qv?5?(XCb^6>f zNpBIA>yrAo^YN1thac^41wOX_2pu!ma$$DOv^o9hMcxv4vN2cIY|=+ku&IyL7+l>) zAnjXs2a3M2aA_j8VFEh^HtfQ_aG)42u^odMr>toL?&RnCaU-4BSir@}-HCA9<#;|= zPz;CaZJ)x5bu0dwqmAXq_1QRq=i|S#U%ly{!&j(4cLiLqjEt?PKpEq#@lPhFD3s9r zjx%6#q9<_`w5U!@i<#kwdfLZb?;2qL`sT1tPKEy2y|P;;q{@<|@w7(MNFF$*JY~na zv~!^X1@AhcaZYt4=qc0Kv5{u1_WO%%^u!1{6x)t~>4hnt_;c~1@WmT7AwuD6ip9Su z8^Xy<5}~g}x0)8%lR~PStsLn-TW%U-epH!zhNgdZFu?D)yrCwtzWoHt7R4T_8yAAn z|MKN@!)rLdhz;BIuq$mBsrG`I>hosP`y4#hc?wJB8<^q>aJ7FJ5A_Aew^-opqAc7h z@15syV@7nL`j1QmrVeR?Cw(Oi6b%I7g6QN4ZjoK>$h2WBI_XfO_MT_GT2Hz zp@o$^@_CY_JpcF`Nq%&QxP;uHhF1BeVBhAurs|nRJm*hVcesVsj@H2Zo=hHeizhHW zlr(l;9_+IYlK|k#DEB$vxf}FYnkk}_4qKX_Wt4!;)LfPaufb(y-0({8U>T|eXh!7W z#+5luP4>nt5-}lCxr1~e&f&i98{L2my3^OINrJ-U0pUsSHC4jxhyLRk57X3mhPvA!NgDs@$jXy8?mutHDX1W~pL;GQw^eYFq(X z42j?w7zq{Dil}{iyUCw?@t(K5%&MXE@tJ8K{LxjUZB0|9h=l#RhaAC;z0rz0ZDIu( zB2a8Ep1?oH{Gt2lKG07E#=+R2KzK3N8XxZ%BjH$e5-fEWT&s4~adimF|fjn1*OMZ)POOwkEpWrv)Tp>OfCA$OG zWri>YKT;57ygN@uebz0r$6Ul|Ol{_8qzgE;0--E58!wxJ8E0BZ%X}K8g6Xyf%6vgD zom-(O(L{W=Lz);>wKGgRS=DG62ziVrtn|^dgrC@;Wp1V+i>@JD?ov%#rj;91+M@G=K~7UK9}Z?bSJ9z>`7A6c zDkbXBRCqm%hR?&P^ZJ0cwC?TlyS@gP(sw~MB4eyRMGjmJm6LId2h;utOm|y-%UYC9 z)@~=~Kv~5V2OjDY+=*e1J4$pF_8==QpUTUz?qumZDtiu_h+jVO#1b!6;}Q95FUKgW zO}c4<>l;b$WTni#L(^*=@NQy_g7~YGT1V>HDC=;JNM2)ImiuUl7X5hs7JB~GVfHT;)?oRcV)wgAcxdJgFc7_tfy(2HVgA=1=)=_Oc1z#KO_;E zJG|(?+_#1HFsc2RYR9_TNshyh(p6?$7H<%G**SG6s~XE|{4{17`ptjpWWl=+#DL53 z;~juQSol!+8DZIe-Zv~BFE&NcX1K~dH1UpH-)Oc-ir<}c$8wRqI#IK0r$#Nc@d@fZ zKW+5Q6(ysFX~E|KgZSB66RbkfYnfWb2L)G=nCXT)kD*n!m4ttcz*dC9;#@-{iY~s9 zSFgam#d6&{v*wFp-bE$hTVYZMyH}mMMn(PnSg{V|0hYrs$YT;?G8rBm#(jOcFtyeW z^2CnFF7acNTohSNdc0}7LGWfmDs7Z`8&*DT_a+ z?bHO^NZ|s@xq?d-vS4l~(UEu{4xh?)2$V~)n{%H(4{lVLid5oBeb35MZ_Q&w3The=bdhSR871MbYh?2wjb)B+P20l=j|BHt#Gb^GF;_dMC6gkVA*RE=)=IfQQC( zt4y9r&)#mSzsM;)g2TC=1d{hKPRLf@AwhOx8aun@b9i7{`5OlxfGooPlbWlOK%a_a ziXr_k+N3WPCLqpgu0JNC*FU1={0PCq&iqeJGm&3PUd3LS>E(z#)5tI*j_9gn>TtcA zYWM#3eBRy4%}as80jn>7oY&;Jb{y)V%T|++r>-c)%&(^&^irSS6hcSc~dfcYQ^tzkZK9BdQ*v zgr3l|-<3Du1JZulfefdMq!ltN5H(Y`KfAzliP@Dc9!l=vveeI9GUOa@-TNUqXpXSm zg2tGTg7c+N)tq82e0zV9SGLM$7T_zxn-3 zfbkS0%S~hh)NCwhN+TxM&KhOf=h3)4q1KB_GjgwTb1b!i!fJ>z_n=4~graRw1>ys` znM8DLkzE1ISIRbiv;*^%!j~&KL7t8MlL(~Ur|!vAc!DgJjFkn#yX*nJ*ixSDFℜ zwWP0u^o2Ap3smoYqO88mJs97@Ux%&Hi7XX%O@`cPdvi-&UAN&E?ng*y>Ntf~^R zFMNHD_+3}g4+TkVQC=%@E9p|il#tosOyfoUtH>Q%xl1RXBf{Q%ST_M3Z~K|oF3lYs zFJJ8iDH(xs+s+lSt*@%E1I(UGh9TWCyQTnOs5QOl>EPC26-Di%YxGTswKWvVIuXvn zdsg}f>uQ&YTLUo@43l$*4RxryxK|UI6b$*9WQT82J@}S=LF#u;vFiJ3B>e&v<5&$(ezcX5|nXDJS%o-YDx|{FeHa zviw>P=UV<{i^_u??uN&fEx5al?k)2Jd6%U6%xD%uT-ns8mU==Fi#I~uWHArkf3Frq z6TNca5nrmxKM2+`bNMK=wh-wZ1KxD#|gc~yPouvH#5 zc-gpuGxNFdCDiuA8=WpLKB~if3zv9gBqaCX;zE0|uPdj6kcc-QP>dZ^N}eWgcy9y- zgLG>+l-z9x-*)lYPb)Z-MtXUTr%hTS6J2Tw!_VQaUl?Ur`2SRTK((5K0kB1ct40)n zjQ`w_5F=%7i++%Dn9{+%F_O&Kc14pN6xqYY^XB!Pzi^hg_Q4P+Fx=PlqiG$lLF5-J zD>0CyDn%$y{Xg3eSXQG4=N>1;!%$hie0#4^KQatjOdZ$G;zDz8Z*8GoY3Q?2>N#FOa}|@m`0?}HC%!=QRVkwW%X3W_O4~vC!?;iG zJSsH4pe*4AWYWrjrh?4aods+un%EN4316law0@~$Tcy_Gu+#{qN=-oPmCxtEbE%`w z{}&DK8(H9>qQ^GUG>^%p{!copMVy@~tzrik>1lnwPSWOsM^ULOrHL&Qu51^Bv1Re< z^;db=&7K12yT`Wf21r8g(#u-69R_LQ$Qxjm$E>@9U?4+5i?EuEXf+w?mauPq#>!nElNbuP7@n0l4JEa3mYL@=RbbZ9TK0B1OqG5=-|vn* zwgS57$)^5a3FyXO)mgVi=G{Fj&Z+^DMiL`X7!=#mJBiCBP5aV5ySwU+mY9swz8tI(i?8ntWEAqu`q2$DH3`wrS~xHP+ywTm z0zG&;Ea+`ck7I6b*H{3`5nAuWgV!MW1ig6vqiJ|DdGAarA6)3Yn^6yqATYSq=bDN0 zE<))Af|0!XEpkoO?n5#^`sp%l1fhm0PyIEy;L4Yr^}PcFv8oD3CxIfr#c+}}aHp?N zj}aX!!R41AQBJq^j{$0s63R`d4`@)D2^t`;l0)r{Fl1?qugF3X?6ovgsjh7I`Zzdf zq{yXd?-eYL-iko2h%>Dl2|n}qw%Ii57KJ1Z8=x&mwERE`+6-PR8S$KyCvOxn-v<2Xe=&j7qa0DWAuo z$S4Mr2>s(@4=F(hk@ACZ(N!?6bm2?D#oOTLKEb_!U`yR@zi7P#QnbB(geXsaOxXEu z$n=2;cK5qtxg65s@cx;WvmJKV`C+Z!BtZ=wV%LIM_LrADfm?YreUN*>Zwq^+4T)oY*f>jI5q;*QRIixy;Y=XsBFG z@^G&m-O>=GeQ;QKshr(OAY z*V46BmC}Aoji-ph7tbycOF4ZL0%tujqveCW*-{A+tLB%VZfOQ*-RqF!0q`0E_6$<0 z7meI~kbwFqeYf}a@h51W+yjm~8`o+<-V38LSWXl66|9l`Vcp3+Bh~%>rtsRDim>)K zJ$(HvohKj0Co;C(pMC=qOo`a!!^g#W2oiPBdM;`HV4m+rJXF8`=cb>w7kEb>KCtQq z@`{4f4|$HU2+0Qp;MibqUZ-!&LF_WLh4p!WJb4aZzMV*W`8B780`s8eOtOhD;d4@-RVox3`N;JC~m?==1Hc4wTv%Zv^ob z;HFIb??+j5vQQkKm;$0At)lc;*@8h3ISI)pz{z8R&(!6*RihJu_QI(Y0$`tqdYsuH z+FlA;Iaw_$345M|>?X+9f^P5vFA)G;8V6fM$ai>DL*aSsdHXg)LDy6ma%@Frkj^x-><;q}A zrrw@P!YkT+3}{LpRmj&O(et|qTJLq}4UHQOkX-f&1ASMNRI6?ql~h%Z(z{kZ)v*^S zm5}{JkFsxH+P~=yUG4EK-370z>*RsrZ^rJy02)Y(d#`NwzkkPudZg!`J#V|)pYJU6 zU=fCT>M{saQ>3}Kfw#L|8lf190JP}i_G_y3zE|Vud1RQJ>(j>5f=e3Sm+SaNNgjs*x-Xdg{CAZc zU6wI2&}Bc}6cO+d5`p*r0gE?ax4uPH`J47SBA^z^ctz+}ZQwn^XmBk}p3h9ZERCwG zhP_`uD@9olxHL)wPu?9?h>Y3s=No)Cj+7?#M;ZWQ|E8RztVp_T1E*g2Wp>4&he%d7 zrs|x_ed9u5rQ_(wlUgM;fd;UMnCQj7(;o zbboVYP36s}Zx1NEv+zBA4cAnB-+p^Qst^8nV`#jsI{QCOu3uO2gC&oJP_w2pZ-8RK^1N|wlsUZK;kpy%4}WuQ;aa5sTh{~?1?+jI zIj+CronqPYJbl^pXJ>?w!qvXj$SF_iN4CB*7x z_QMIFEX@H9VB6&nGxB3F90;g}A?JA9SYkfUjWa*1U%OtUi6Mg&SpP7SGudkmRbWj# z3V0tt5TeNmL9}=^+mw!-+1qWDN>tK771z`lV3%n@-QkV0I=&&xF&O=bo$HAvX9JjC zMD3G^5_n(kmvVDs%;9zGm(?(nPwpnBK1YaOX!hq{i#*Hd7Hvf29hCMza?nM9XZow& zAp~uQIW_d0)Y5j?Ahg&qWQ8u+vGsu>7l5V;TGMY0f?jnoC7cE51^i+pqyGAxYtYU* zXd>R!tvFZIsR(iT3p{R&(X19vmoqfuU=21i`u2C1M_xawOp{NVeF{}`af*G>{~o~) zH%L{6FP;5uawNpSNjGb|?1Q(F_~ZuWO5K>?6Pe@8ji=W+B`}k*56@==BmR(ACE7H3 z2W4srI*vToE#>oIlNHgP3^ZYD$g=Hz)dMlztH*LZbf!T#_6KgIVB23rzrv5N8TPga z;XB~Av;#H(1^55W;?2Kw{|A|B0q2|)YSM+C)Qxw*8#K9qJA!D^e9$r&l zB$y^*^@S5b-^ebfFo#AOKH-=}`7v3N;%Ij=U7#p^0_VK@R`yD7dPX5R+I_!(uf*c3 zhO)mzy^3H;(~f&IC8m&UCX=t^ezD=_GkbNcl4FGloG3Ux#=zxaBYs4(>wDW}~@F0IJBil08nLoGPqWu#e; zwR!bDhjkRhG{{l?uhvSyJ2K>@l|o-h?2h^)O=pyIgc5y8#%dSdGOML7mH5LK+w+_89(_@z35Hc{hc}m3 zX+g_c61eThqT|#0~^Pb)4_Wx zXbC0cQ4#i}C)0(6V7S%7#-{C@+;%~3t`eA$XAuw(h*(0&{`TA-ow2uOCcMK?AJl5p zb{oeW^(mEwj8yv`US*o;m#DNCikCSHqfa*2OkZFNt>@EV4~~ z!Qu;Fm#wc8$9t0@ewmk<2gjU^M?s<4|Acn)DN@ySmWNr1o!XqV^b~Bbg&6Jo#Of~% z1f?3<@1kP8lUPUaUO@B-A@*~`9F>Gz6nL?UDyoTJw z;KzyYDxiYFj}u;uT4(Z2T*s|1WW#@x#&&`C3KaY|^u7W**!P7ySjKBnoQ(^;+S$MK zx<9a+QEs5S$cWB4%LS|>g%zGo8Sq{UHo&~))GPlgc31ZJB|#?w=fazZ6nGkAQHqtd z?BbTr&%vt%`CE$a+=0xsc<3spWRy0g+Zq%`S0^-*^J)hLTZ`@pC_*i`;al5gFq^J- zdH2tH1f^NaMyzaIHH7*C^HnW(I9Eq4)7Mam4Z`1CaZ~EAUm@-fz zM)_bGpJUn$4wRgIw$380x*%RF9H!Ok!+NUYgPc`@La&oZn&N#eh`ra(hrD<}aufRE zVM*sTZ6x3O7r23N(psDM%2Auq(?+oz9|=ABz3K=Bt6%6nxe_uX+5BF720Sri<;oG-xJc0HtEmNcAM}x)HUVYTgl{jQx^D5 z?z{xglAI4mJ7!%5DrtOnWUnzVqGoS%^TE6Y=UH!J*^jAbrD_?Cok?*!R7t-s6~2Hk zwpfuWC#NyJ8?fpklSr%5EVep)mopH*MK<0t$4IG^xi`@bhg0|~hPnR8{CC_zAOhW}64>Quy(>0kfmT}5GH{{Nv4?Pb1KV~#z{2X-z zsu%KYvV%-*@)sN2XC!O_$@CgeIRt%>TF8ye(&}n37!$+ICF%H4OKiaxe8QcN!$8y{ z81C2>rd6HHT(F*2Q6jqV!YKCcOoXz<;OJ10a>79Jl?-y@1RsZ3^evW)NY$p8!;_^M z&DFd0JXiqK7_!dsJ6H3)nE>39%uS(rBds>(5}AqInkvbi?%5>pICJgZ;_Fn_>~Zxf zoF1{ltgVcQL^axZ%xEKf2OItwi8Q7!1fT9e2rg&T;cDNLe@h;inVo1K7za2(<-MvL zLf8WtG6>&Bw>gwo?efph_Np_T3l28E>M&l1kHxaw%4F#I`T5DQXAJP>=H^}u%=j;z zeE}Xz(S+1Ib)hLQt@hDps;-PKi}q5JH65dVz1CtI+J+kp`wvZ6(G4C;)jf-POCxkr z)`L4c-m|()kEnj&7&y#x~$eU%vs`j$aZzaOmML>FK2VeY=Wz@nyBSb>+WaU z5q^4x5mlv|C1<7BpwHIEQdlQc-zlCjmydrywSuS3CmE=H`b|{gHMrg@*}$%#wYTjY z^)9*WkwqL67WGtW`PP8$YCY7})1M+lL$%rVPcXayT}MYN`;#lXQi0x zJ#qe5F}($1(ooC)=`gEAuTPi6cFAxj1Odx

+22^vqs=Pqi#z1FzeJ#&5NINd|Di z6_nI!J)!*}!s{S_Yces4tKv|#3$`)xKtWKZ!m4Io&AVIOZTo7+C7_uk52LwggU zo&mnR@E~$$iY*wPnfpD9abrAMuKE)K zX*zGyqFE%B?8HP|Y;rd5Zxxz6vb>1TDKDYSSs^&}T2402&fkSY z)=sp8SmZqwTJoiAXrxh6oDPcv$Sj}v*n7WUHGltzHH!e0-GM#9=>eRvzM%E z{sfQNUe5H%<>cZVCCSdKu}ylw?YyyAh?W4tgkHA!f6yu1`v^V;Us8{ZUL?5Cn zmGL%2KBAJB&9Av5zylS5)JpB)PX2d_|q2c+ycLt;jHa7&*8TbR_KtIPE6YA z_nGDge!7P}*^<8BQ;)d-2>PX`j0jBGEsW9OH&uweF&M)^YoOtan7K&_?M^mY(|td9 zOIwo7Fl#c<@QZ36OedZfIz#DPRxjD-?e2Dl+E@OjTKf{rbL!3{taLu+=<|QNw$}M2{gG=ds#i@lD)p3zr{G zW{yIpmE@VruV7-PUt3B{5eR_fZo*50Cphic5#2a;L}#mBqHkTPN7CAq-okECTUc2U zS|;xObvOb3{p*A$q?`(B{asHyI{-116Wa z`l4G+jBi0u>m{w_>_DG4)B%xt3ev{*+s03{CbKX1WMifsk(fvi_1Gf<=7j=OUa8sH z5K&Rlb*)doyF*}D4fjYwKBIGJ`w2e>tP<4~GB!$SWciHrw}vOa1i*~!jP}?(Q{iTt zly@iwgX$wlsf%95eIT*><@Kop8R-)eGM^*#z9x?QmJH$U)1b=4G=^OoZkyztr$7&?!j|y`T#^QCR1d>e#0O0X{@{g= zRKRmLe8gL~`l2Xxftns4nfj)Tg+j!;b5ogmeqZU9-B~^HouspqPB)tUxFL=Q=T=0H zFfU9pr);4WxnP^8VM;9h{o?(#Q8^z1SE~OFI6l__z7gY8eB_us3~X~<`iE>ommOpG zbMs+nxEn3nm|ufa%o|fGzWVW$J>VD>SLZ$b2f<&qz=jvohkY5|+>Z`-SFUKBH83@b zn7F;)GexPhe2(6_i9FA3;M{tms?_;1(J~mZ=bGQ(AuNdn!&};2wsQ%I#TmV^T#C#j zJN^u+Wuq>_IJ0bYLh6c4zHsnuO(wdNg=As0*_HTpnZOK(TZX>0T}|mMt*szWQMgEL zugwdO-pwPnuWyxBQ`_w!oD707he_K&8r8DZ0X9z9TT4#GDtoSmsi1?9WZRNH@&C8+i9}W9P0Vx%dIxJSpY8rJR9ZtnT5jS;- zy^S1W-wOb*!mku&n48S}nKV7W!d!l0l4@3)&i$>XU>V6bYr2SUHY*~8*)n0pay>!$ z`T5GCVq#UHioBx5n$t>7JMM@3BRts}d5K2Mh~Hu01g_(io3%N<3hzW=rsH!+x^IW& z(Gds4H5#(qjXFepTaH>`U9VJ4OOk@Os$6Zxwpd4g*eYd5&e~jaIN0d2#<3IM<&DQ1 zro4SZg8X)Pkuk`_^Z9qHNN~(EN!)WTK`&bZo2G^C_ODd^(25=4cV8NfvHe+%GHcd-_CHVmE$S;bnTUlssS%cX+DMju|+4KJA@~)6<`6pQk z&OqjYWBi&XUhMlt#oAo8gjf5XUbFEz;d>HOLF`!Pd`Ux6D;rzJ+H@P*Xj79xI>Y$i zQw`~otq=2+T!(`yS8`!t;awS-(-~;b_2%opO}yh37S!IjAui6yfSL6bcWcW?c0aja zGm~zy{@PYu%Z~9{?za#djs}sD-LU8u_c@+Df{*X(JBRkqz?P)PzE43ZZq5w5G)30i z##$5fs3=@+2BE*p0;gdNJ80#&~_wZ(v? zxy{W@*Zw%m582rZaef*52LyA8gqVIx-M7J3Q`!a^RF zAc{YLN~P?k$IKK;sw`WdTWW}w`szRsOys)3{{GBlR8mdMzV<3K2zL5vp|}4#dWrkbksIPzlh zlmW{xUbb2&VY$IBOQ^(FG@bC{hwNRtzeXFdUCxe|KxL^S$0|CU)p3CkHlb`5MhB-nn2W(|?+P^-#H|iVC~p^sd+9b)^ynB|izlhP@fb5{JM~P; zzF+$w9KHUY^(m`@oxlUJeJAAVsUDQyUXNmF+X-BjtM{AylTh1J+M=Km={PMB_U?-c z-Gi<&R_%Lc!xKTGIRH9NAui1L!a?49u={m}eC%hD(9SzvkvIGOdqp^xj$l3K3fd+= z2f5zAV?LvVxFqSRX=!O$Ny+zTSK#s+0dOC@8I4{weLSyyprZBXcM6;*Z2F3vLcuh0 zUSzyY0*jFPLBy-~l>QH$Mr;Kgy>A{kjGC?QCm)uY&e2+paOFAZ$6(5-D>?UQuD2mR z2rW$PfMP-iDb4f96YfEgrsX@gA#-g$(O**1u2~>8sDOt<8wgnmhTXL)*})tY8dEIl zV>oPJeAKqaQ6a^}#m)2G*{m(IATyR@@N&Ez-++IS`sZx+ks@`#Pn^C{pNa3_pVlkW zLJcmPI`Z17!8?#MOsLD0U!rcIItSGXQ{DWC?1{Okqd8CMp34I9bC0#?7mTDN+9m@- zql_a9x440OY6`m~=Eqsa*RxOsD^mn<-T~G=U{k>*R0sX7xF=421|C2);lmqHSsnVE za0|C1^3ORA#1ho->!Cw35E|CT>pdNjhWK5BLzjU)Q*g!}fycImk5!UC>-@l?7eN^8 z@7LzJ5fxUp!sXCYF4gEW*agEbnuAY&m|~c#f6c36TZ~k9eEnj_O-`59sw~n>6I})E z3`k7F@>}N6DC0L)v#5`$7V~`Co&3CK$^+p<3ULkGGuhyEYa>g4IXqqdpAN6k$nhM% zpx`|-vy6=Qs58WuM42E6?yUsu&>oh0;%iTjhrZeI%qPeQ!Q?Hd$7`EL6 zHTmC%JuMGRBvMRxYLmizF3RW~#2nR8XEEI(-xi%kh42}G;+DIpn%Hu*(Y$D_hjLlq)td7jW+se5+NkgQu4R!JeNc0)=@Zr9{)*6y{b^qf)V1)GDs!q@;G| zHR`7oe%Kb0=`s{s7paZvb~AZEBjj3Q6m7vM4N`;1$kBcG_m4+zV5c(jAW>mO#l_z# zK7$)|8FtG6+x5cUvXir^!;dfg6`+-XY{yd<@^?&xH0isj{d{>&7ET5~B|Hg0f$zr{ zNjBii*yy>>T=s?Id^DO*#qH=!buFcO>6-+s|1|EL0>g2Oz~fP4F{6d1?xGiOoq0?b z3=mVn8Nhm3b^QlWxXpa`Zk6j@;NvLfzmg+-kdUNPRgZwygwS=flP3#JZjlP$pX9QL z#DfgpguC-o>2h*GX%K(Pt0aN%UAjpAG9aUJpiWvTwRd^l8gC; z*VQlSlN4C4PEAc=m+y7%Z)i4<{1ulPj_x?D2b@TG-3%X`2!0mt6qm(I++f|CPzvT7 zWA{5$Kqsb2TpM{UmL`Bt|LDZy)Q^(*x6SgDu23&5E)o^y{0umrJ;$-oYv%FEKO+9f zd8EEp7;xrBOu=)|EV~wq|I80gs^kls{t^YlSG-PF7=76jt&qZuCXIV6Y`g{3%9D>h zK|O>rX-B{2IN6gGf~u*hVYO;$Y3Ws598Ej*XC*0AXQv8)3eiX;U%Lp=pD7N!7&)xP zCWKbis#w!Fz%Sf{z?u1LA9twut#a{)CUPoR-oBEY7VuHWoPtX$<}0prGr-VVyG)Dy zQtS)crV>IiB6^dbGH11U%Vh_n9)g{=)xsGe;qEykcEu75hzaXWpM7cf+c`bNmTZD9 zA}YXn6b=-mHz5=(0?98sRBn%Q2?Qr{Dq7U{mM85Oxs^Q2*^IJ>241WZC zllVx$PiUR%KXM5^gL5k7j(2oK8jZr^rJ-ol(A-!- zJ2alFFdC(ZmxfTVYS@{Z%kw@)wt=)Jbcl(AquXdNlr3J8|82mN@AeZ#N}P;dwgWdd1s<*%QeIIp z1wP;#;~2JL#QkU73eOyh)iN@>&$(2k_0t`Z^(Zwu8f_J#L3@W;)H*H)5iu8Az@sv@ zLK8Q?tLNLfke)jgBUS6(>?Oj*V0No)>zmHh_JD0oXV3+Sz*E&nB_5AFJxwnALWzF$ zkleIiU7r%=q%WCD#N`=iFs#vnFU{ah^d~xXff|Bp!Y!K*;2L`Aw2{JPLDJHYpeH&u z=OvD=A(=Vw1D8-BoD=#OckH#W-RaLr3%uUTmo9i4?jyvky^ z%kBi{E04qU`D2qek?NS~5y9=2irl8o0bSQy5->_JF+09ca|}TBYV#p;6^-+jkaKuCL{V||`WD?ztD zIhh&QiO{n|PUweEs~wTId*Si7A#hgV_SJODqL>*$zr=Ost z_Wo)AR>Z~tjl0?PyaNi~Io#vO+65>nu_64x>Z$%-r!EiD!%7_3e^&tTwt2nHGl*a5 zj%0ggZtmbtG9al1-m1mnivM43p#k)t(}iLGzVN=+qwlo2rXlzeQ*v1wP4D<^R&DC9 zb=`Ri+&we%-V(XvH}ImVWo05ANYs(M2ir|nga}PSP=6iQf23am|HW~LQQ@D*DJojJ zYNk34KW~Dql>Kh!2#vS2agraI?*eVtAN0`2{isXysCi z20Vs;cA5&oaEYS>+2jo^G1;BM{-Yp5fqTv24+^V2iqQ1`)>u;(O{eU7PqyR?l>Dz2 z{z__Xq7hsCg`7826|s$hfQvu#{rj!bajCBZ0~}z~<01<-M7tz|B&2uad%$U;zx){0 zPe1lwsE?9gl=aWI6UqG5b)Q+(Rb#T!Xy^(0QbaTT1J;+bQ}M08zkhPAkpU4Fc7DZ| z>UeSuq;Ma?u!3Zp`lle}M{pOuTJjp)n?)skJ*u$gxvW`CJ4O&@j6bTMb*I?;0Rzfd zeL5cy&H-9Z@lSJnhtUpys)L$G5Fq;BFO{m{=k)04r6v{G|7z1f9yxiwr*hDL1stEw z^VXfN{(g3BJ?eT@MTHK%>*z^va7%#I--7IgVC``5-)Ba9`YHZYlSxHO<*w&+3p>@{ zj;s#@Zs~Tc&UM1|H4mRyk8*3UzXnEUgxmifoe?-v6oG#$%KSxZqo0cMBK3bv&hXjk zeZ2}+D1hsyHP^}8L6q$eI`@U+sd;iQH&~9cMcNR86?JxKxytuK;Cg^I%~43%2%xDs1ey?!Wct zfu>(q=aF^HvHvo}Ja2~mfujOoS{#TTLxvebCKeWRnw067s*(~7PoFC<{2+rvla4JwCeo{&j0 zOGAT|6`JS?(_aqg2Q*pHuU=L0wekEFHRc$xxn?!U;fV%~9L!PUlkDfuaRG=@Cb9|e z8Tr#^aI#OugJjz4O4r|0RFx?R{6EINJDlq`Y#R|#NU|a_voo@H*(*dw_LjZ(D3rbT z-YK%aky#=;*|PWEE1UQJ+Vwoo@&5kNa2%g|UH3K5>%4svpPq)dX0kuGh|5bpaH~!` zcKrVRFd-pfSjxd4h|*DPn2u@n5_poL*WZCg`!b)-_stTzqZHBq{~i5!BeQ#49!t|r zygc!L+)<23(;XXAwb#z5r@4a4!dh-s1$lX!sahpal?*nCzQpPhX`JHSV*h|azvO~& zAV(^%W3(+)cChE(6X~8TMJ0CwQCy>iTNz5?5M_V0VZ)vL(D(N z5s=OnDGDWO55;K7eiX_=+it|5rj#Wqt!?RQ57_XErNh!^In)q7*WrSM#$>I@AH722%~Q!n9c47>YWk)WOvzvHtZ2rB*A?@qVLQg!5z zia^rgt&IFZl0Kc|4_~|N;yXF*Z$i-JvhVi4&vG_UV9Uj!=UL&ln)yJiR4(g6T^G-4 zD)rh~Sw#i_($5p$bvS1x^aKbYmU%aA;hDO6QddupY z$Nn{{b{XNiG0~_|_OD&=URQN8**(h-!Z!__SJMz3I4k+2ZKogmyl<0*LfZ@rq1&HC zF4-f86Z>~U>P^Bb^2FPbSX!0h27o0#pp(WS+&?O;s}m{(fCzR~77vVT7h6wM33lQ~ z=)Pvw`}T>0{SQU^MGlJgZ{^tb(>GQ~99ne6%8=NU@eZ9RCZ%RZ@-~pTvobjJ>AZe( zxikED5L^uKBiw2rm#$W2*iS&4K{>EiXA}yihLeYt`g@3!Tk!VEX)?W1uBh{e`F78w zR8*-TN5^=l6)s-rduHI#u$SyHw$N5`Q1J;5Hy;V7;K}4a?-GMu0QSO<<42zv zyH_DSTlvunI_Gi9#^l}xW~)GOk!9m-RD6Dayqo6d$;~PHvH>w>_`9?o5bhnH$ou61 z6z++2cgCC)0i=$Gy(fSoxg1Q7jYr$jtyfiArelGNkt*O`;joI-Nzsv9>6Il)3&`^f zAMN_z=dcz5Z+B2Z#W>EaYB$0zW>-QV=Q1uK(gnLG7-_yu+9sr5LijX)BA57wN`lLd zcnOjQ-WRWr8DFgrT4$`spO5{tfIxo5ct&OtM@%mmg179Zxjy<5aAy;nd?kSBxCP1! zzE6W5Db;@aK)Z}|yD%l=;$Y}$2wmxP9zx5s8=R&SXo5cjqIT+yS4Ku`s#x#v!!GVH z9|yFWL;6bwi_H?1J$K7@NWmGC!}6)@8q+?+N}!!K(u-YwS{R9TH$?M*49?JD^+{DX zz_#u6=)Ux5;~{a%Ph$?pmB`mEp(}c!V?v7Z+lj#1==Z-waDR`h)OE zr8SwFVK)6JqxAUZF2!u-i8p*OuTr(U0vlKTgCdu@W#354Bx+$&1> z`WdFy<}S8(0*QG>Muvw36jwf8RnuZJF`LLtO-+pgNxXEmVBr3M1dD<}pm2g+U}L+0 zU5w6QE`3Q?oDOo-BGIF=E}sbr|AOQUoH4vg8@&4mI0-~G@xs#HWD?qT?V)vz3@wi% zlvBG?HOG<0K8bOAnbU`oodPGE%s!uF(qfFMg?fvYmra!lb$m*~c+%>F2&1fKS{p$$ z;@PRo6O7}15yn$efL%d{)p1*d{My%f0N`ZB1|=dgs;*Wsy5t^f094^V#D02>aO2G#~!_zr|X)eR|S;OP; zjGPRv!7DxJ<|+(y=BYmGmAbCx(`?E{8|4l5VmE#tjZ$P7O41l_hT}Lc+)N`|kd0O3 zlX>N%;OCPei&7)AAnKQ_W=s6H(_aXp{TDS5ZEF>VVdssX>i|}w4vK2l-vh6@6L?U} zH4ARv{h6n2vH2h0e_B?{Jx~pxr~93Gbt^`IDc{4I4*zV1PCvfY=`9 zB`W!l6cUbZkMdI}g-n@N`JsRC1HqK6;7o1xGaN*mAry-$AJnM}QVzB@HjRL+iYSO{ z{asgAmpx+R!%GXnHxI>+P<#9yQDqq}5NvHsruQeDEWZUWv|^oVub@e4$FAI;+;%_H znnGja4dGDVz$~F-A;ZVUTF=PPG1Ii}M^I@A5PL_S`N+|?&a$H1`aW(%-L2tV9NpN4 z)9RcW;rbA_0jH4N2&e=-M+HT>3U^Jew*FKkD~M7V{|2Jl5|Y1= zH4wPw;`=0(Hce}^Kdxx2I_S?RtqnRi$bZ6r7ZAU}SJI_Z1)V?!+&i`$=Pk^FKE;xI zwx*&dfx{n`&VpPKJS=P{_2*o8#RS@*6VZ6MLJ8!#(=45@)DVb9p-wYewCLL+X-?YB zqy2O_tx`p2mPnI6W`4LnLQdtqbVtOtP{(OJ706n3P~DeYyIb5!Nm{zFsbsqEb6J^V zfJ!rP*&{(SXXF+%m1y@qEsd2FoRr443_!GrlSwcE-oJ3u9g1PnuXv+_d8uAu;ON1n z#S%jPFrps05waaUCGva2YExW_MqP#w4S{K^<#DmW#2dl@3zZc-$3mVTO>j@eo1Uqd z8RVRdTc(X>o|CA>n)!{-%uQjMC16h(?=*C%>XlbFFg6ZvZEfY#<6RF4Y5<&SP(_M& zRgpqPAZ$C`7ayE~j`+%BQA3~CF~eog786X+TrnqV1_7kS5FfqD8&p>W(zM6e;66nH z-(zZZtfr&0lLTtopcE3OUk`05sj2U=#^s?$$Ge*xmt=o8{9(W5uo8WW^gP*zS*!{W zKvX7d;oA7=nsU|w2XThLIJUy`XZ;KB3+)aA9}08_jRYzNm8J7$nshU-^#^A&WgNDg z8v+_;3&#((hxdz##EE14B8jUT$j>z~j(`SmO4d`*Tttjsy>c)x6=}7$>CAlHcLuP! zvvmT8pX>B(w<*lcU1}0#kbdiO9UE}{+6`K~1uv)e>tC-q{~9#E@dY@Feezhl$C1yf zCD=40-2x?IV!|VRlgyFUup=`YNKJW799QqKQvXScg>j6Hz0I@^ihJu5wK>_K29%A< zK;i}moT|U{!)G2)M7#O(Bx!x?kJ<{9-EenfhBADH-a_vpmh*TH6$*@Ih-Gv0+<5Zqln9;H*Z3 z;|JIXB1kkRE-Zu@m*5iunZ}#5;3Vl-o%^Kq=Ka7zmGUW|jK7l%q|GWmK6>;R>msk@ zu?hTTESBxG5_W1g0$ZDRHY?`7_-~)&X#4zu7t*J;T~iPi-^U!NjcH&F;0Cd(b#5_p(a3U`J)JCM%?dOT?8yJ}dtXZ+ttPM&}&4nfG9tnt~n0p&J{s?2~|z=8rE?|>jh9PO}&>BmRtTNPuuGVXh|FA0}l>f zzhXA!y>}c=iXDmWb7-y*#CS%;cDW{u``TX&D22XI4&c#d(2Oy>eN~ot3R^N0Ims<7 zbo;h%K|nGnl16EK3G!L|<>#HSXvln?$<2xxFIqwdGkCn2sptND5^?^ld#tv@Q z4>)Bp@yza8!@|U~;@mXkwB6k4(to69*|&hJbBJFL%A!&@Ur?FE-_8hIHNZ z9^wxMhPh{>xsL06A07ixkkv&+2U=K&H7#0|7wBsIUZg$s6pnoU9}VG}i}UvF+dLqQ zI*fPjFf=x#1@TaUC!KmpnZCeZ+D#Z(Ga#K6rSSIxavziA;=CdI{<_cNvtIhDkq6s2 zN_8zF?^?UYi~tx{;6@>jFbwUK$ZF9u09AYTI)_mF+tV}6pq=(*&&jH?VL8_^k#6C% zo8hGmOVG&hW+rQ+nd?RzmmcF~Via7^JHTkfHINR9ObRm1(2x7FdTw~hU%!K6+wW{@ zeQ?Zg&G3xIFEAs6GiGPBzT>&#Z7jz^;s9lMk4T8fo4ZoUWYit+CDAkIvj5HD$J!bv zG3f__hi6!xEw6k%b_LxOR>_YgF1<>0cRr8;F2eVD=&A-9J?i_igY9D3S7@mBRA$sw zG4B_=&4w5HI%$6t0as}^tfVjrLCLtK?Sa3fZrcBbaGy!b*7JXZNynvAWTmB?Mk100 z=kUkqRfE&e_xk!c+R-T5js;tkjq4RZhS^$3k>GYbnX^r8|HUf+Zw9^%QH@7N$by8_xH+m+P;pyo8?y0B zTD;Ye(V-+Qt<<2=@^S6$L{T8K?~=IF$lMcCr(&ju+GJyk2u&6ZD3ucpM}A^*pH&Ua z%Fct7EmC?NMqEmIdbGvwz?|si3CheD{uZ?SA5T9or z14j3tPF5KLtx|%HO?Axrs4&58n!EoO!15a=xBN*)gm-v9s%qt=b}-)u6sb9Qx3FVy zaB->P*R0dXzM2Cd(02aBc;3DZ4NE)@K-AbAB&Sm(QCjUwqru`E02&={nar~YiGz07 z9N4rh#kEW$tG_yBS^1^Uns0v%o;KfB0S7;82&5({i-?k-rQx@{2V+FyS2Jla9yfq~ zkmSQyalgT;xQ3yz{T>*^_acR8$^zG9F!(@BF0rjxLtUDwvfp#^dPTFI2dsI(buy9}Oz ziCXTl9klJTVs{9i{5GJx{+@DJ*6!RVxN*|ACcgq2S56`4D=^MNP-ZaT)@_$wL3@I{ z`a0Anb5jUu=>j8#67@X=5D<{NqViA904X!2;MtSZ;9tqKxty&3fp?yb%RDVE-I^RF zJQ^cZV^>v3wbJk))?&O8nS_Gvqs+{MW(F~%se@sZsnCg<)gQP=s_|FT-v*KVZ9W}8 zY5-Cbc;^ew(3f=o3%qrA=W4#lT*d8>=3$5V(@u;^dLza%@R}^U{i+6$*>csssiBz} z+CNu#m4vUlB}hVIqHI{>2PWM>od^u=4Rs9& zH`#Ot7CxNaOitWWV?N}|5Y{N-;81D@6XuVKiYlh0)c23R_Ba4~vpGqgl*y*D+Lx4# zMMeyUmhy+mX9kFljbjAj+r)5^k{%-ELYB`>*Pe(sg{YW36F$Fc^xG!|I+;l))wd`r z#l|7vO+Ez6sYn&m5WsuM*oH@bB}LA6)|`5$<}ra;z7|p#?Ok-ft1WmwiI^Tz9=MS3 z*QC_Qw`nQi5-^xJ=FeSr_|ux){E)B3?>P!3d3mxESsxx&%g)ZeQRrEE)k%&H2g*it z?Wvng{k@#oG#%l)0RDyCG?(uY*%6{IFOy1f(art=sXdgcV=g}}F41<0Fv7;EC~;L^ z3cJ)?3^ZhanmA43GXBCjxDI`oqq*o8&w^ddB}xWB={!Leo(8jc)z7-T8kz70br^_B zLR;Or|Fnm4mv%kC*i3(dwYUmP+ZQ5Ckjf{{p;LXI(7HozT2`TH;Xs8U(Ud;2->6cB zp{yzT%+3Uhl$DjG1Cji92dRL%$|S~Ivq><`Cq@le!USH!ITT>yAOAFSF4dc*hR%yLFteBmJdQSYVUMOk_2^NXlma zwV&i&VF1qntw;y6+y`$$?zZUj+%dRy8SVis7>ouK#_ls;^FTUQ#oOMs3O4|{>4ry1 z8;x?|qwE{ro{1QqW-*2k9VLM~HxX&ckVXVLIc5#d%im_-&dc9|0YZRCD80CtC13co z=#6DhfO0jmesmWMC3o9*rwkPg^|sdbzYTSYQ1d=PsFfc9WxMhsNAUq7+Wk$`TK&zu zqxrl^KvP@zOa5xaNc0Q`4Cb|lQlb3zUd}hEB0h0A1JPy(&K#yPBcn+_M?fM(GQc7* zg=|7LGq=V-bf61oPO-!BwBjbCZhKA{QAUT&7~>DnDu}lFAe{tI`q%QbVJ}q^P=ug` zfQC@)cB|;#b?WcWep6`zl{*HnsB@oIDO}=~0KYUIX*C9RTj(WmgyjRnGqbgF%lb5t z4`5oKw$Cj23ZJtOupwy81={uh+N|$DXHds$CKH5KRU=Q*gnb4Q50MIzX-y$Z>&U1q znibv0#GN2gW+a#I=k$$)NQcOvl4oa~rSU29PTX3?w7V1e8yLkOK2TXm#~|$D^SZt? zEnfmb^xsyC52#&)@A5m7q-OcZ+ld6$#O{&SP2PP~$z(+vS#jI5q9y*(3uwxWtafD% zfsxkD@A{5NGqU~$yziB0Sr>ghC5%8N^@KKfB?8`dzE{zI8RKN=SY!&L2kEBy$kuUF zcmVZ=NB-kk)CqXB%Vaa%Q0t~O3N3Clv!tq!cr1C{vy#w`&eKf$tz0N9KyJ=#(~?)GU9 zCgRr8xLUqD1K#?8QrZJ8soTI*B3UE1BxhifDrvr(y#&>r0M2ZYC9-?MHp=Kv86)nQ zUZlcn+JQ3#y2+)9ew`J3{*BBup_=1KuKVK;;KhOi$ zw7C5j!dia8@br>hhn$qqKojNZRqo-878OE7M9j#^F%HFjdC8Fic99wEqF@cfit6(b zjj1J^I{_fo0eaz!QPapw9A1(B5^=>+AG^+@%|e{>q?Ap0 z{<%Re>B&>4A1L`B1{n$X)Jlq$4|=mJs*&f2n{NWce^WFZ)-;vuTBuI2-)v*Dj^Q$u zat9ax_0qSIH^m}hKYi$ugSHlrZwx=PJ!vie2{7r6I_VV^gC~o+em%Xg(DL6!Swk09 z&zP6+d{|>@3O&pp1j_!B3ATZ0WJ|JIkSG6JBlyRTi=gr7^mb+4O}qE>ENEo9|F!yu zA9nLt&D%pdCe$n8nnwe;%6|BBllMN9RmTpZeCh{OGkw8=+**9%j+50`^lf3ZF%mqe zkZ@;I<2CTBP*LvjXDR>Ou4#xC{{8!r=QkO=`Yt9ZDH%+5wXo{W#}C$Z*OPun>n^Ke zb)}Y<53tETN}ZkhkaT@Di7jOAm{|7110nIfsoLj3ZK2sQotOaMSk2*5sH zSMEJ&DweVdM!TwbF`)%i^x{`k%ivaxY0&Wlz01E786yNm=Z=}T$z6btFEGLrU16Q2 z&rUPK-aY3T;V$dVTAw>m(oMc$D2I3==xF;_Jv{{X%I~$7JNGz{!O`n-v;f8psD{WN zB$G*EX!_inZ47;CgnnJQgXl)&7w{|5baKtw>~p8E?Iid6)9A;n(0Rq5fA_pI!sVZ^ zWCuD_Q0`2t#ju70o=%%G&`%fL&!71>q0Zs;w~^uF#r-(Sg&dVlZ~VliPy5T}@BPw{ zE(rhmaJ}-=gx4v0+e7u<38uO5+rpI&RuKLOKut-w# zhX*S1@)2LMT3$O}Ke@z=4H(lGiBfRCh@eC)#7|;Ige(R%ru1W!ih&3g>oS+xx5-*A znn3&$ja!Tu8B#KIz6==LrR|IEp49SH{wLgm9+h6!Y2yvnLm2=K>u7Jk{g3a2P6m7o znT{6iy!U4L>yH{S#cw1-sf_p(4Jct}vErUeU*L&PQGRiJ1~LnVrvNR~^b?b{0Zdl= zg~^IOH(C4}f0--+uzQr(3AaG}v(1nNH6JpJH|-E;L1FCYps@61Cq;`5BwY#A%qYaT zZT>C3)N*N@!Lu6H`ik_MLG1x9SMy4k1^C%OQH;hYRc;Y`>Q)bC<`-jjfa^a;vq*VR zZe9ima7+f22%-u&TF)IV)z-)|HiVwiIdP6$+$m@4Gnf>g+F1wCwf}RlZZMeMmv}gE zfx#y+iIrvHImq-n05Io<(ZgxuwFmv{Y63XI{+Y_!(mO zU+ZfDZIDOo7X9Nte*IPGQfW9bSdw64Ap3I&CevuRNl>C}aO8;*3P=}D_6K!IQ6QWDBs2J4LY0P~ZA(>64as`Z#_>W^!=TFi)aKWUP1~7! zfE~)AprF)^pN+g$#PzV`sOrXkMXQ76=7L_hB#!h3pV@I+#A?0KTu`$bwQegf4?5D5 z(l5O zAO~3g?lZAI$XZ1gm9)I2yZOP;EV5&pCb+~^BPE=+s?#np7X%w4jds!p3aDIv?g1(y z-Q!0I;|sBz!w*q-%@b&%-4c$uKJjVbdkEY=@E`EaLmwr5jQ7qT=D38CF+6(&i8ezG zE(8wgzQXIAA&AvW=izqw|;f76sw&wHP~p(GIp zwPFKG4T5Io_IAswXoS59c;&)1mqw2a+Aq z%|U_1nqxZzuOd+#i#aNfd^Fcj%iqjE1r3H3`M*4p^T=|NHk zp=;XWVfljLp%PLpX3njRQ3?b_BnDDYGr_=bCI@EmPVgi2P#9(88-cos@ZV=obdXCn z^|2>-9R%=aumXCjdu?E*86Q3v2^fdnVrFI*5f-l3k^6Fmhy@0A3dpmdr}GJ?PC}%E zj7!hpmR{dhNw4td1gw1>&~BQ=yT8`pY@zbFb+^tgT}MGz$7w!arZ+@gyyk;GiPaQ0 zt7O=UbCGYVlFrkmfrl+1hy4&;25{ZUopIfrB^HCkiQXrk@gHIjpZf%+-@VQdQgEgH zCq7lH-@wjAi47P}J`$+$6cOtBQ5Y}Ia`$H{UE~x_sbJB(+d)c&sW(9`7xaJFWSE;n zMzgRzlF?i%{%G6;x!hSw;nzuCKC(eapEGj2_UZWQb;RFSMSXjdy|)K1^L_ezimG!N&-*W)fBB~t*mJ?Du* zL_T>F!3Q`4e?zp2I<>z{qyEbA92ZxeWUc0{-CO6`v*7@AT* zNDeCh7GkxeUvCB_+Okv+*xPlQxz&5TeiVn|s^9mV{{^!f|EJ>xc{$0>KYRHbp9<{` z)sLS*bwa_O!4p^(1wP6lF3=AgPE5QA0}c*GQ6eE6a0z0fVP6BH2CKnfBL=toZxIN( zaf+w;fk4XAfP~RRx{o=KkGV&`cXkLaD^cH|6-wlWq4DzTlPc0TvGbp}ic9j~Zu-$L zMWEy+{i1J!(<{xzHQPOsTQ_<#;8^P-yLJ}>*dbB~e@?sT<-v?$NAXpmxh5lcI0LEx z=Z6dwgusLminTPp1&8_;!)l!@IPXkNuBgT(Oy4UniFGS$37|9JtUL26Dykn{X-S6h z@C1$~N(>B?jA=Q>zDWY~z2T=VtPD`xl_dP-@_0JSsSL^>B^aMX#iM@sU1GOZy-L!H z(ilg8Xateh+w)D_4L|~LgW?+-*-rfXp>#al1udfRG zhK6XtGXkZ0#vkb4q7sCHZXT(}0Si}EhlnRo9l^hQU6-*7_X|cmp=?rzNwIWs$C<=C z-{D`JUdE4~+;!*6Tgn0tl<^d}Khnsplp4k!tPZeJ7QKCNcw=&7RQUMe>N9M(Si?MtA*b~)0(7n zsd28Ui%)|WQhfvii>OyZM(!UAiF_Z6tju$3`%>)R%(G=>xL@yvhHc&;|Ax1$uT;jx zq@kOpDB#<3G*@H4DEo0xuNj|;%N%N#jV&ZF%Q1jr76)9YxN}cHv%}9%;B6()Y>*r- z4K3{!YL`9)bhSw(a?Sl?=14(PhtfOzn_MnRrZ3Iupo~d6^j(<69^>7|FL4*IQz37g zx^8+_WcSlZeelbpn^_@n%mVgG6|lt*^sX4gPc zM5$z273O*U$R>==)3E;csg6P!{98xK0T~w{o!)62XMV(Ig3is`#iyO+6>qk13GnbNhldo#|MyK=9AYK%M}!lfXtsMTwJ#9doa(Pc=`07raWR zuC&gfyPvdTdx@Zc`d^C7qqx`*&Nr`>T;rqf(!+iQ^&~dFUYNiKV$% z?V-6blH_#sfB9=!a__)ks`F<$5?%~u3Fq3_Ulkf?gyIEre3ik<^Nya}2Gf$K68Q!v zrQ5cg93s}*O{r+Xn_a6*6##B{A7hyU$OA|zGja`b&TIu5_R`iv!9UHG0i@9(WRI>q@v#zs-;7Z zGzR{6Caj=dxeM~cOdqK3FmW>Jg@VMPy%kyBw*C%jTEv(62EOmm6^ii5X1y-NHWILp z8w{nGIgF{;8T=K&VZM<@F)7)tXW_W=wzV;AhK_n_v+~JGIKoDroY5WaJq(=VQDJrI zwHA2bcY4yqjFBial1P)=`$S4KgtGGlo|md_Cmn~coR+G7-k?L1{*}Z_4_fCOS}HiE z+ESiCyIMY8yGrf`11ou=T%+#|Tb=lwP|~75=j=|^*yLZv&luli=BxMj5U0Am4{yv| zQ?DS}cH4Exh`;V;T@)TssgNz{KPRrS_=eD>vX)5>*iAXcjIT)sqR03#;}4V>$>j8Q z*cGJDkZdC-C9VNzEGr6- zVI)th`Ah_uebJR$5`6ij&eI;8E+-g#%w09sEdg!qG;3Si=_OZH)daw-!??10wfD}Q z4_Zw0O3y2!o%j_vLMmSrPrl>r^1N8L;t0?Rw{Ke1AXQF&YP#ch=1j7N-jS+*LbN?f zkvP=%*aTSYe#ZOi6$X?%#fAg8v>CJ$Yjts*CuxyX>9BXF3Zon-jOT9??uTb&R1eDy zo%VVKvQjSol>vMQGCkBVlc&a-Enq?W$a&I*%yP0o!)09CmU3muZ7rkHGG%MS;TIa` z+BxasL_fpy_|gT-k29~h8rc)x*rf{l!Ky!x*7%FuW(I*@NJ1t6N^fBorMJ5>sbqJ< zJGE=NJ6l~ypcG?FvnOF+;l|g`((!B$05$xo|A3kKX_@2Spf49XS0aKBQZWh3AHw_R zHQX9s@F|AS%Ud+%r0$|v9;)a04%$WBtJ;1tGuddxV|FvB;kHziAbdxmiG8pEC?BCB za+Afw__d^w`Gv-qKRhXt=vB?iTRk=1DKL8uy3`Y#Soguk!J73I3bL5=Pu|<#L0NWc z9^8&vyP6I;Veh*eot<}hXtmz>OP$6X`*@1Hs@&L_`ApXi?2I*3#qBK@uEAVStfFD1 zV`Ft?D_l9Z-h#!1_xhf=Bl`TAlzl7G7KW;nUxNtI5x#Hp4vuy%7{ST;;fL9~F@N+E zVIOnmnGIKVw*o5HZ)ZjEU%xA__nu~|x65jAE>#2!JcJY&?$?lIZYp`f=Tq2w9 z-=7(PDKK+j1%k^~2Rru40C<1Ue?EA$SW#96T3+8tBW|6d=ImRL%Uy4695I#C0qq;LS@Q7RBgL;FD6`_KF+DYQ4|CS#i*b<~;?jk_+cxZo>CEXcoY&eFqE5 zO(a_8Rn!tvbdfx_xhurM&+xO>~O`mQI?v7)%LV1neemz`~!_ta! z-3eF^K~tqD`SrE+gKgqO%qq7sICB3;{eHVA(uObitk=>qE;tk6gGNlg{vNByGu|Lk zDxE!8>eop+)ofs^r?u>k?Jy^6_M2a~X5yF*mSK6&;C>C7I!Q?xX;!3-h^&gW3rCo2*!&1@A8a3i z@b#_~U{-4lCQt*F$7cekobF2__1uzw6ldN;s;tYW30B{4)Cd2due@h}g8yGM{V^*r zd&62tn3_CjkOO_6Jf0`$p0(Il4&_@F2iq&}Z7TR4{DmDO;mKE8IZzE+R zmKEO!iWiJa4o%Ne+E513YCtgW^>>;0M|MQ4Ka>vZR=}2Yk7D6I1!r?}db}BdS)tz` zsbRZR`R-6)7iFt#KWOZokcHqnj!~@Q>7b64w;aE=SdQU%btL z5-TTr5R zsP*4)LgED}l2diFlo5tOWCjpSl%D6NzLAR}P8cU27L=}$(k+BQx==h;exK#ht9nbX z%ohfB2spKJ@hK3H%&v?5E98HZ-d6cE(({}7QdIun_Z`2 z|C+!kr|rRxMd?!UfODSJ-*@DMa^{I4=-c&w906^*yjc7YAj@aj3PB8vBo>bCzttY< zklj1Q9D`j7naw_{Z+U&Qqlx>ur#5kg803O>ssq}A{EF&lO2+d{?#SeJSsF#Ta!N-r zlX(&}d|JUk=Fj(1$(UsGCJY(rEe+e{05%LjB)9O4<1A;hVH`QLb`QVGbeUZxX*lwDC{Lk?3{&FB=sBs~D8r6*G(K79bD^$?U;b zzFczmz0|DdS-c&i?Xu}?a+SEs9TbO|3FI5Z`sIn<2}XJ#wn?`2KW*L@rGNSNXCIz? zwi()f%qC|7rpa*_ay=-L_(#m%&o;SvGIU%R2qtb*mlW-9U6cS zH+uv3oUXM^_5d$`3!FTA+!pA`C2x+T-9R3&^SAZ?M+Fd_r~o}Ef-*dt72H&Mj|Q0J zjD}~0A%OMw!J7*}s;}@hLpGpOTJ^Y3TUH$_zW*UeBS@piyGmAsBLZ!jPVaLDLa#dy zg#KcY#*+Y#*>L`z!<)#|ZoXKk2o=do&-_Y?0!W3tV3+zx@Dd0jD*kF^>(yl=MW0a5 z`1d)2&yq~z17_NbrM?8~Cb(Sp7@o)i^d=S08-hg_f){h4pZz~19`D2e>n(uQ`$t;= zFr5ziUBJZ)o}RD(=@0AkfAEWsJr4fUsiQV&+?x8~+4`_F^N0#9_EoE_W3<6ILorvb9-=+s4kW89c5_j^;ery|vKW z3>ZJ_1J>_erf^~4^iGtN8_Tc;rO><5C{R>RKcC@0)7>?6@6_3~D3p;72nYZ#W5f`; zN9k{!VnF^WrhPm9HGJqP6dP`V?-ttmlF=SrhY4v}w`9A^B z`fc4gq#IN;Z`ekw$=I#Y`ER036y6_9^l7@!b#sxQe^xWaDnygsDp=?CpYWGlHY!7$ zO64OC$IrX>tF{!n_iuTokCe>kI`aMs;)FZn5yva(*)v9XytZ988w_ECNR9 zgIMT73w*;J_V;4$H1cGPHVv}2*MCb!nuOVq7jyl*U&H2vpVs)IWFTVA-nhf77>URo z2JZ9+{DW_=Vg9cxZbfSI zc&nfjiKE~gD1!+`jHg)%xO0*5*&EXGMdr-SMuXoaz0QxLxqY)aieYL_z$>+LuE97e zrm=T!E;}bLFU~rJ_H4e;ne!v%YfOKUV5qFNl&Ckq0b3XjGnVlgMyW6}P1_g7L5zPa z7;H+RR+^kI-#}5VX<_)Ya5hZ2#K52+-B+)gMYNrqR00D7{rB2%&L_(UT2?lf1X_r#q` z_nY}|MRyw9u=WQ}i2l$N#L=X!J|+kxlP)nG3f7=jk%F`E@uhHY1e1}_ z;msh`hLhQbO=0%8XXC6xN=hgIU`|M@yu7^d>sPa`w{=&)N)G+1?|=Jh@X1$yuVuGw zSf?nc=BZU0<*?ud=vY~Gejy|{mDf|D6@?0IH}WA6S~~CM+eG5*Z1IfXfP`K$)1ebd zBk;xdfa}ozoS6n4>9P1CIrT&1Cs|K^c9QjujioFuF21bl+Ur}`(qY3l`kZn5iqJkl zfeNi-%GYYkmLKNqa5|KTZ{>vGbQ<6J)gi!5X2ZPe?1$44jq~+JfJ@Q!8PY*OFxjy- za8RC%U7}mC=3v|>)FW3%yU!II$7&Y)&xJsSe%tr~__o9ka|Ae@D__Hdn-sm~L#_hYeGhGzPp((Y@ZY6QB%7t^mdVbBcgnD9)Z3`dq$|Zg$3Ehvow|2pP zPi+Z^OU0^@(M3~#b?kGzzcc8DbgZ=1yn5-}9+$wjwyRObVK0Y4vu?hz-&dv0Z z6?%dRge&_|a5Cq@|1{?+=zib{ZhY}EbLhz6;NgkP&7~ch-mnFf>FYad+JGaGXgzg9Iz{D+%R}XsGpz)jDF#7TZbv@!cF?H;d=fqrL%cVPQU_nEPbBz5D>Wn& z#-YPARVf8ViHbDUBa<%-+>RK%5ICbAokZgpW80{obsXH>5q)$_Rad0Vzz+sR?-LdG zp*c@SiTeJBgSkq#_aoifvr;u7bgBFkhfuZTkD-LaUf zQPhK*Y;5uC=>w89;LW55CkZ`Ry~oJeHg_DT7PO1XjqU7S{Mp`aMwN8CvNNwSG^%ro zEnR3Bci)ul@pf;A#mgkV@FU2}Udj8&@@on*@!Sc4?rA3B`;~oPjrf{X2b*PTD)AAg zGWW>Na5*FzB+s4dU*9)(yP06uCzbf&5>b_qzt|^A4#@-UHLW!EDmL&Vm;7TF+bF0d zx?F8P(YM!1Q}T(l;fF%1TP}XoZgM(Xx4MJ8m{2N^%HMkxHge8aEoZMB;|-5^D>E;A zT#351-(C>_m*TS8S%>Fk{cDmk-DsElMd75Ki$JXE5L5w)7(V3fE!%kR>fQVzzfZNe zvXWL$mZP6W|!HmkfR zk&cw3T+}NsH)h6boSxPj+cLZ`tw5m~1Cf!JZ*Pp77h@@JDkOiTG8;`>wbsqyvHN`? z0ohE2`;V1Pv0Q*)5wc8L*>e%XgQp0Db3cQ#L{J94nS^9qZzCZnY=Y|Vj$oWYg$M^m zoSqNaKbH&D@+fM?+Kjk39$XHGUIwH|{(1iTkHTir0s~vh_SmEd`bgC#=-;b<=_3g2 zjn4G-S=F+FgHsXwo`9x=Tw+`Gv%(m9a4Ldv3DSLBrLo-Q%`ZinP%rk6PXa6=B6oUI zb!)bwq)Cme{J2TFn*`5+OPl4{Q~l{6&4W*6DU3mzEgR2{dyQ@W9JlQVXebedqq#=v z|D;w2w=FN?hMAfUWku{RhxPziO-rvY3n%>mS83v7Fqj|H{ma2_LI?XWUQZnMqWkW= z@~AyWY4_J`^+f8&avX?6kA8K8IZ`yi8CO<$sBwc4;HFH5AdPYhVrm1o!ZP{|RO~dv zzyLWmYmcfl!?LiWJA=%9gv#`-My*cc=#SK6Cpr zh*}CRF*?0RT|aIKWi)Xm_>RbH@=4lYTPjiDWAxVzGV!Axa!AY^^s1%Ee=gmfpUf`p zbKzhtZ;t&0GeL537@kzXI@wI=l{Z&u4h?)EDR1#_d_DiDG$nK2CyuhHxh24gFU@nc zTAD9DH;Kx@$OWG28j*N}O!1L`5>&1i(lil*BPc%lb+`Ep={`=j6ur2@9zDSQ(Nmdk z%#;7c+yAF{cDt+}cazD_)p|`$W6xuc*Phpc?dObqXi@9hgcxJIs+txsRUQOvXI!C#JJkD&OdC^xs6fPaI%ut8IW zYGq$?__>_($P#m3!EDdKM!WOzXuTCrW)v_4H_+9Tfk}VG!9fnMr^LaV)U--}XZOcS z*80jZDV{;q!8rC)*WEXFOShUnjf*ImsAo`X zwb+_mXLYS3edHiN^z%-M9b~<@p{d!lUWeHX!TD&kUWJ)Fl9Dfx(U48^Az$RpYJ)_o z)t$8Lip|g3A-^g^9o1$=TO~t!ojzI~HdAx_G`v&lp63=v*Tp2nnGhJCkboQrYg+bnxP`hhrvd~2ykOv zQGeQ+t7qX7Na>_PIMYW(EXZV$*(7sBrO_f#L&mlAgA0a2A7w=9WF0D3y`d?2oDqQe z(uYNm1()eo`*cSee$K8$WAKiO_|w3O&-4qbQTn;ts%}5Obf_&<9wq8ysqQ!SJN(MNfqNAa z@6K)WdnD$gN2|=dTSg)Ku2uooCZ$i2y}k_@M9XWWj*=;^htQgY#eTeu8M*BmBF+>1y+R^J(1WIG zEliN22UEh`8F~8Y$33`7Sr-!J=h%v!o6cKd^oesO*rlI0FbvBzC5MWc`G6jL)tf1s zO$t!Y&CMtWz?D@I5*EHzlJ(%?U|`@Jpk*fVrkNlo!NBtGoBlm-Un?hj91j7VC<>0KJ^ce;?(4oR-nvd5`7t(`7tqm;j$Co^ z@II6;uU|(&L6N1wYDU|@yAtTUL;(<@J4cX2_yNFl!R}&Qm zlQ}7hbQOqRnH}h0`FbH>5GBDev_Eu*DaC~GA(uxmy4%wFOI`UhAqDz-ir|b_a=){h z_rSaN=H~gs3hY-sre%=I@w&B(!s^KV_^b4y*b2#JpqDD`VIj@xcT_VQ3hNj4jWd*lg;vO>gYqMcdpir72kG+D}#0hGrmap=K20skch}&bA3L4q`ms@;pxMkgP85xj=eG4X>SqW{D|p||MX;U)e?`W>+#FJr(>Wt zJ9uvz+lX<8C^( zhXX+ZvnzZWJz_M%-965&YWuJSc~0B!8%olq!d`Hz(Jq@1fxe$!W79oetdN2(;PuPh zI*F@JA{USZQMmrS`8Rx_xk+qvbTA-)o#4I&;JDy1O$%O*OWQ~k3~hRrA{K4kkv>M)rC4S8}4nTo+QG)LRRaJixZu3lU ze1h+F{=A(&o@76aNMxfveOrj*JLi0zSKnURi=&*rcsV0vGMI&@n zu>|=0itWdX2NL|Upm=*Kzu(S$^2vH3k4t;IYM<+HX-7Fe$a?hXKF-d7<~C0$($$Y=jYea z)kT^i+lchJv{Xq?Uq9gQGy)eg=r|#p7sdSfF!Bcm-aSO0N&}&SmOAFPmsYW62L{Vw zrPlBb6a+I|Edq2?51Ja6LS8Fb7CR8K|9sU6=X2+0aVB9^nI_st#&n=a&7F2T#?@*eJ}8n$_q6><5tLJhu|5!c5f0wyw1n93?!`!UB>)^RznPaj>{)EkmyAvq$3M!-o!?Wh^EmRjPkhM$B$sI7_Llwh9(nI=ZHxtRpm5 zR#rm&M{=j?Y4!G!kolD~S3@NX0@D;`TSc|(FkUAmO0am-D5|tD9XtLYX0Z@YhWqkt z_+)6BhkQR40u$LRzEqln2_7zP2xxoPo*Em|oSU0tSQfri=dkeWbfB=nevR1-EWXaV z2`rxROHF5pI4UBmS8j+jWilrRS?Q#KMnth-wZsjMx19#6ae$y15R#@pr^2OVWo2z% zT+ERn#Jlv#iQK;hL2C>8uY17yi=*ifU@a*Ttc9~kqwsGTiwzJb$-cHTSGH`dPNp&i zOkKpbt(hHVEL$LiZfgrJSe~3rH|~ct{;gj4_&i_7SYjD~b{I3iIay#zWclo@#Vi}! zzhy#b*D1;D5wpmm@Na$?j|4+&%@|V!yz0!B%Zf~-W6D@sTnw*yixy{_#(m}7kt!ZS z*M9fp;A^cZZv=}|R&%n^mQHFslN-d?4JWSBmJ1UPM0oDN3DAz!R?G=?pR>V3BXxCk z{_(-|s`dj0VaTCktvoGgz^Cl&c3 zN}g0p;BpCfk^y0gi3tHc=-Ta|g&&mSng2c^bbHX>&?k;XvR-N>I9)=I-S@^Ggkj0B0Ye**uxrm?7c)qhByYy2qQ&3WXwy5gX$p*3;^i z59Ugrf0O1(LQ7QO$%did+V=K};4z86Wgixv4%{c(0^#wz(28m?=QW97N^t&iWBHB@ zlcC*X&4c5OSuI|*{RkoAV1PR)SsIaGF{SU)KV4D!4L{vJ7`wWvA$z@+;r&~-jDVi} zD2c%x+I34I)lY9#0y$Vb=B8hSA8}O9O%*+CNgiDm<{n!AmxXjXG0Kk+iX^;hvVV}; zTwH{YYikyOLWA|6ZC0e^t2X{DnkpPxRxM`Telt-NOgl`oz*6R9Ulvorw7u)fyuh@* zXyj0Ut;#lM`y5UBNfO*v;C9wY-D5Eh&d#QsaJ4?kai0C~XaXWZRZeQl>~k-t7OVMN z3>HxoBf;n-zspe~D%FvE&sh2lSr4q{j-S|gKg4C@g6(L2#j0o8)l|4dcgsfx}QH02ry~!S-GP1&LWs|*r z?+ZQM`aIw7)A2oifA!RJ9M^T-=lMS0^Ywb2KRPaE04lZLy)D$KkS4_AG=7 z-rRia&oBM~U!2f+#`NqYW#U!Rn%DApFT~`Z)_92cA%DCS@snzO$~Z?r7tBT-CC>c9 zNinuuYrem~hzq8oHSO&oX*FeKPQ;!f|35vqfJ;JL9JncNX;siM)M-mr*&V+2U}qw^ z>w^2bemJ*68uN3kq+>L}iTHP7$;41P7x`N+Jhw%RL4_qZprc74jRjv9enzcGrM1C> z`}cj}t_9ltoAK$(%hn9U_@e8Wt@=IK^T?yQJu*d*N29z-6N%u_1d6(3;wZ2)d6buJ zI-7o|GDmJeO{zl1IegvLr?q)5mH(LAz&bU*m2{Rt=O5jA8fi5Enc77w-2G_bOfnWl zUVjtIw5B#y$FvzObGBS_BC4AH;g#TA7k{x(0(UurD0N-AoJ1vksSDd>m>mzvJ)tPeSm@W+DlzQl#I;dH=Cs*4Jk39oSn*EKj<|(v4kw*Q; zv(+);PfNdvNV8;cnTM)b!Lxz3mpdF`;{^g{ybNJn25f(v5_lgF-NyQ=_ae`~r8ax8OJM_Xvpr&TzECDAke1R&f-y@zWhDxOQ+RaWJ<`hp zyhOEMxP?(MC$Zm?36<9WnAUkbb1PYbot?dvtQ}cz$HK@ z{M*?$A_-@MC%dVztV`V`!t1#tWIB>=cPz}%evH|3{=QO;ZVCp@Xg`Wff1+C~W^9R} z?E#~i_f1T0?Ib2i#s5|`VBti;=Xws^>+1Zuug(hlB3jf@xmMyGQSZ6j03MU%vow*W zB$HRU#ZYvhweuY|?XbxMN{+&RN}tE{^z=G_*80=`{EDTYf9^Z~<&1f)3pip=miPL8 z$}$>_4QJ4@hQhCwL~CL`y1$BmjriOrVPST*EjCpu*y7QnS{P``cGLS~WKb`VVxaaG zH$Z+=DLyNn6+$VJrXFMtR<1QRW>ThD*4AiZ%hNVxh<=i$tw>zYbRfW`jw6@L#V$|( z@na>B*|_jUqXuJ$*AB5woY(#=w?eI$*2OwFT6Ne#eS>~t#|fXih|xwe9aItx28YJM z62#=`6iW9b-8l4eio}Lx5GMx*`Bdt5H^qBbk_(FoS+1v{9*1xfB~$d}`{82XU4k{F zv_GR|PABG`2(Ya+7}4tG$tC8ypS--w6cMwR$3(J{F-Yb}kJJTU5l}o?a_~Z=a;-EAlQKRo zJAt!qjo(9+Fsr@XsF3Cglx;OlG9toSM2_~d_MYR zw4r{cr6}sYXFex2E_-%=XiWE)JgymYMF2f1VQc~m8MsSB()ephl!@2(e&)3V$2KV( z(e4VTLeGqsWjUZOl|KExDZ|m$u7fvAnYiD`_v&SCtr+1+{&SsK2C}{TiB4lwttG!Q zL79nt=bLXF?}CJ_rY1EuoxzlW41LfR%OVxb zxmC94$1YT9VtitC22x#(+<`0XRQG-6r4d5$9{K2{|3ta=It`WLzGd`v8urvfrVM*8 zr&=9=Q%9&;8LT?`jd)=+eLJQ6wDer@_l#u^_A;PbgNzf0r6T={9mW%Fr))mejSEKl zUl~yI%kQzwSo=AK@_%ptgU#CP;bo7Cx?`#$S#Fyf++wriKR>Khm2sn2d#!HTPkTEA ziaGStzcpT`9IR^Hc)F%0Vf4a+`-83x(Sa00(;te<4jYRZY?l`8mRpF1*`m zPaL|O--H^*4(wT~;XTmlLn9<0QW8BTf{ktwp(uLi2nH8xX0!$tF%(w+I5 zb15|z)cI2OYnc_&>jooe!CMOimiFE6q~(j4i;WkitG~89D*rYsTOj)V^b?qlpf1)B z;2gxQroY1S@(u}2a6*eQ*dlJmp6eV3=%np`6&KNIOOtGXu)y%;uaS)D?1Rnwual-W zU-GY1R8-ThP^T=0x+Po8-p`=7U%WDFG;mmiYc`VS;hI|XY{H?i(>{TjSV11i)ZDAg zw;xVujF-NT(W(o~d}$0zHE6Vk4tJio2Ccf&*e&Z$dveX6I0*_PjoBxn!9v*QQzSGZ zQG8a@uk|~#E;QM4{GR$iNrGI#&Z@9l0qo9uMg5jyE+={%E{=RUta@IeP37dHXZ7=* z?-jC4wc6~~d}4A2Rn8w)TwiQF9O=9NgW@%RQ{&#uuapYLL-C*1(!!6o`?lX)_qkWl$GqA2V~Qh8&`z=^b=m!+ zSJ1u%pl^>0a`4@9lz_@SI>mN?-XZ^9l?lNMDHG2jOMX*^;xr$vVYpsZTHEU;c3oSn zyZeFBk&$;_-u_?(9`w!f0~lVG4WXV0lb?-?2vo~iufe0m8C!92ZmS;hzJv2HJWalL zQ}@Qn&B5&2x#9lJkkY`rb~;t7gDaaeZm>P`i$QmXwU#&XABSyx>=wy=c>!Ad-jUTnN(YYZ#gE>Af&byWj3!mZLWCf zR3nDrzDNf9#@lG?;h>iiSXX{})X8GL_|y19wFiW)JX%S9Yv(FwlX{lg=H#K`USCqo zO&hqq{K(I`K~}dWM@irZygtatdGOvj#C(k}AJkDwTL8yc#dPpR|TvM87KTbTHLn@m^+ninQ; z+)rV1IIM&hXv`6vdMr9-y5K%9GhO6d8x&b=sQkX@3%7u+cx!i4f`C!`0qXV)8a_c) z{LV|}#dA^}TEvh2%?~0g`y^hETnJ`yj#vF*GA09)F*!Ix^0=<$zBr_0WI7W{b2e0) zd?4|*yje`!_xbqmZYUNm@(nLCi;MNG-F>{dfzB|*tyj0s>G06Op<}iu`A4Ce>gE%^ zU^U$hZrqG|w{v6!Xo*qfr-}ytXn%^#IU--NMmL`3LozI40o>WnQrpdd<_&KBb{bh( z+1=o}J=mj&C+v%m_bmtE{9-`?TD4pQS-fsN6{rK}?k>4;-%PVoOs)K2s_Muc^XhQg zk9{@ta)xpAw87zoM*U&1Z9FC@dcEz=Ky;yxkB^v*P5vJ(P9Y@V25 zJ)NYy@&i5JV%|MiN_yk`Fs_yCx8)=64PV$o?emz;6bhlUMZEaB?nJ|ePCNaVZcl_=e^fRZQT@E;tD?WON4ZhGA6n{bvJ)B^Ydv{a z%`@wMd5@9pQb=O{o3E*N9ygGYMFwZKMc!5^;p>%u5$(ef-&mvFLHb={RKKb!>#MI+ zmIzb+7+r8WTwWzZJ)`D!W6YOMsXguw|At=dnR&2W=%XCNMNOom z2KJIcO&cbEU7&+FNk1h}x%QE5deKFET}7W4Dr0kzsJTcs_T|vX$Y8j|$shGWl5^ktQ8sC(E+7}XdwGVy}3^M7X?m1V{uXE~7m@0RJkIq`C9TAd% zv~=oMhXxA-%`}*u?x|uwyRJ3h?{OoHr3laHra!I;K18_(LpDsN481cxc{VV~#2qbW zYT9mX)G_Z}KS@m;yzr=S)0m|xO7H3KI%f~I1=3HWOXyJ_cKcMl*T**dv2#dcm&Mq5 zG?ByfN{Hy^6S3w$pM@8#7P?EVq_-)b=U#6JCW@qwKR~!vla02cUKf=qS0i6cn-PWK z|Ab51+7m}+a8T&W;gAi4zg#})sa$Kh7Cjl^q&eq7$%_7CdR}qrt%RAP|+05&A(zY&X zZgzx>j3`XElX>$kRhOlK<^X$YA$9?Sfzfz+j_E9e=dCm|3AEoUle*djh7cXPV~9sl#YyvCc4TsGyWQ3};Vce=mO#(+Bsqx$0)ODCRF}s$l9Z`9H zqq5ty+f6WN@~l?Kr5YdIg3*<_!MVtX>3P!>ds(mCJ_=`qk%fwoPDsE7aM>_+(1(yCo?E7#U!I(#9~6@czbinzvWPf`PyFT=q=H;X>#tqw323|)p84p8!2KKf zf`@a__Fn#7r*JdQSj@35RC-emF3k*xhP~Pu`2KMswBbTVMXxl46{|`gev;AA*AG<} z*{rS555UF@yw2A(N^exLZ+tfDVf8#|rcQQ^D0(x4DXgF^Q9I~@LcX>s#C|MauIMTkdu?Y07=O>!~DQQ+Z~I0gQqqy0QY;a z>R`x6N6b>(G~*I~Z?L)evHZs5`C&#_B);BEDK#rmUlPr{s20BS{iF~$Jy=1;{SLBXfnrA5%sLy z`e$ZrYa&H2z!&FZmbptPivI z*^1R4Rh_WBZfUtFQ##h3rj>3zryH%fRuml_{Xa{M01>Dt|7xi@%Yqm$NwoI#*f&1| z@!#O})kQV)#2!y*12Is?sZwK>QI0~7wiey>uNIxNOC~9H|Vcy_Cd)7T|)xUeL z_k~b?3HkdWpokb67WUpe;Rov`^h+7o?J?>cp%O$Ctu_taK&@l!7DpRQMVLI>T1}^8 zTop$fNp(22f1#RAM1Ox=l=_gm^m%=T`p?>f`{KqQ{Kd zB?8DpSqJ{=p=w<+UtmdfR#sQx-rsk~nhp#Mv;ZdJ|NW_|(gRj66a8*px#ApfGv;Po z!bv`xg+yX}mxN*7Cz$EZBY-K%KBqL=IOojAe+ek&kDDyHhWC^JSn3TgFu$dggD0JU%t)>qp z(|?(I(5u(Feem18w=txQ$Zua-$^j-86qNBFe}mAmaK^XLNrHc|o}>FY&rA6SiY?(5 z#KxY5m^T=gP=q4&WD^u*b`gH!q$o!xi7{(bFGgH8`Rsp3Dlz(ZincEkl{L4^irT|% zw%yy47BPbyuCH(VF@1oL96zyQMghBA$%iCM-qTGDQrn#b) z)eH55_rioZm75n6~^^R236=#|2&~`MbkGtt(fqEcd-k2@4C;dGu&Om@-*{ z?A+2`jQtu$U`lQQf3aW6(gu%V1vIp^uRgZ1se=`;oBn>e1IGF;T*_f6>FNC#ZnP0n zXjwvr|6T&7p4Z=&I27rg{C1&fE-ttXq@HArW(IjerPLU`93E&2i1WS$d4d1%BpFst z59Yv=U~dnwNLz%$9Si zZMQ~NMZgMFK9&+-_^EJ2=bTCs#!vs1XIM+Cz{=V#+8;HZ_?9{7_ONI{(6xD!r*f7$ zSz_>X0Y=LCE0pXL`>*(qOOWs$j>44v|5)t%Z^i~nB4rpxfD^vuv2OnNw<=D&&RgS9 zjCp0$m2>MkwV>+g?XOxeegk>*oFDvcv2tgqFn~WL$W`REZ_xk!wMm2zRAFU%t>bU| z!C(|wS)=yU%dxSsj9Go(Zu`18W10b?1iIZ zpfgKOzdSSkGyx_F(O4LXE#xpH{l6xg2?b>Jvn#L;w-5krVxNaf4wsyE9}UJ) zkc3{GEFtZG9fiO)C&iw~v*)D-x4Y!$z##(Z9D$3<8^Cum9-_y{DN_2k0#_egkaIQG5cLmt5TRx!bzkEQhG|RyK z^l5kN9k5CBo^32oU!EO=0lFwGjE!$a@*cty|F!WtW~E+nHBt21aJTKjLx+wZ;t&u( zO-xK!tQs36h+V}@eG<3w^Nas=Olz~8j!XGh1V1er1gf<6zMtFPgho&~%;{Y7K8%CU zx0RNA|Cjeqk5LEK#|(Y%n=5YUg-#=Giky@bseSwR@15`aM*PfO6mx5~#_sz6wKZ}< z1iJhZLHcFe`2FzaGuvK-{N3TUSRrxDfnCW>@FF){C)V!!&Jpz--0WWuTijT$^5{tZ zxncimaZ1#evxiJ3CD*WmcjM}7@$YLU%)zmR&i1H=pjT&tYk~WJu7pz1Rh=V+K}Z1T z9dDliw7^m1vmCY}6IgLa0!xLxx~{G$sBC*xcBg7Mi%Ur0PL^jfu#g-VMx4Ar;T>bNf>ufjrYBC7w+>x&RZi2LDip(Oc?~z z8k;1o2k5Vq8mlv){r6t3uO_XI@xm!<(b3y-8I(6#T3Svcm~qdUNYq1peU2aHnF!S# z(^_0HHDKU zUKb*5YZZi;R0+3mD5*c+ZW-(oniPdHxZp{9Zm8k>^DmobJJngSh9W_-gbqys=31VD zwe)q$JvUpHUvMJ~jIkExyHv(?WWQ>xS1(hgtN>KSP;f*0!^tZ-ANCdPB$T z+?&Tha+`9C@;Z__wOSa}u^&4tCNvP6*{WBO-r-HEx#F(?4<%}p4l2vHlTJzCjBCg+zzM3pCZ;r{pax8@1 z6Rv6mQR&TgpEHqoN~&FTg-#uB7t7XM4XttP?)pAKu-z?|KapagzQv4cOIDIdh#2^GJJ6K~b8nGO zzCPY3!Ew9ZFID9WTvORYhEsEbhU`<woPZ&Ra7xaa=Iqp zR-ZS1gh$X}v_;tZW&LxjuSz;%VxIL4q206+l9IKt5@8Px-JOH!<(>Zxv`peljSd~> zO$(UCKk3<_mJ8Z8Gn*@4TV~-j3jP?B$(fRR_Ti{Uitm#0SUsg+vIs}_`U|Pim3$69 zT=w9o(4@k8YC9Q$`FN?CdxEQq)^&G#PG@&N3WG}T-b3HS*yNI}mQscHmr06MjsTmp2X(Tg_W&-t-Q2-X z)s%v}Kz*f5(N9d&6)P&jrj%{S64liBHeaWeub*1R$5 z{kzCd{m35U6gvG>F+@)%U+O{b&wD+eJ=%0SUBAX3n6C7put1$X9cg-W=p$E+td4v0 zko|X;c%wOv&?|~fy}is(`HAxV071pl(#z2TffRGaW`VbPTru83x%Cd#q^Va?KnYdq znDmz~_43iKf24XYo9O~%kU~oH3&Gkbzi4|Tc z;W~if2imv%z(F-V!cKPW;$TnZS%vB%^Sa~`ZV^>0x7pd`1=G=Iv*F^wI#>X-GBqE+ zOS!DAR-gW%UkR5T|D>WvfXc(ZJncIL50YBp#>e>h8*5W6Fv{GXmV}zee+#lr*o|39 z1lSlU*Hh;4>BkXxoqSh|!wOGVMI2cl@w0wHwidQ{k2u)+eXv+@G!M0`j*26U(^mgTqmIfKgo}@v26)0ThxDI(GY+p7iz7F_O#J)&(hR?)`562G&~$5{cOtS zbqziDth)+Ekdj93ETId0UG*X2VE<_P>!WGe-2|JT@2=9;q#*Ix(z_>MjlW(!5d4|K zKO_;hLcqI*68dC%4&-SYjG&aPLgMPSr!&J*CQ(H+kx}?9)W1{QC*uf7k|K<(XV80k z{$2E!5r@pht4+QX!@Ps8j+I27wud7qbB}GJzyR$(^ZQ&E801}PUX(0l$~#4yNcuhR z!?%xFv{lbf_&0>DOD(djDk>-(*<{Gzaj1^t;XF=&@g{v_Y)IiV7bnuVna6$e7|aT1 z$vkyZyz_3Zbkd{-h)qfA#n|`FOBJLItn$2kN?M!BfQJxVM+l{E2=G!VyCmIWkris} zBUTO`Km2&RCv@E4?RKi|y@GRJTmbi0)FF%#18J>#(i*xGHxy`gP^6Tk4dwC3SCePq zg*$vXUoz=oor#vIf8BVfic%18=U)%`O2yCK739Tb2bxsPA!C(VZ3S%R2iv(XcD9;f z$j8q*8|~=N=`6c~52D$w%<7$~_bA4aJ}X#LQ8_KmARN-Bs1CP6xvH5jr=@~ANH;iI z>nH{Pfy8n?5-k%u9&DX zUF`P5vhb+XRq}cHspvU@@AJdabEj!8HqD2rqLxK6I`C|)H$8KSHSqF=gux6=kLqSh z_(?tZA}I+xEr7rlF(iw{yV3l_22eEDHZ9`KaMD-bf3x?Xt7{P9Y=lP~xlNnOLKl6l z`|aH4>G%qIZPg&QpLKjUAxOWT`XK~FG3%5@d^kx{3nBY5~Wa~7xG_b@?Kb2q@>$Kh6M?=;uc zROHNtwUlpCZu9Em*(oFYBk$^jr%%i-^0(3P?Tw6yt3!}Z!;P;@hw6{>KK}WR7~j^m=#^qc0>-a<2W~1c zOKSXHu7=Mmj_kdDV!4Ox#wqE8HR&7X9nrEt=CGcF`)Y{TgmAHv#Ym*t?9Yk89KIqQ zx#lL%Vhjy8A`sWHMes2l#yDSyuw>MT%ShcJ8$6D@+klD3plQnik2--P9FGP(FH*75xp}mxJH@gWH4>9{<;- z@GoXLYn>~EmC`UbJT!+o_?-E7IJ|Z(DjrEyfKzps6-WMucZyRs{Cs+5Yzd0 z5G^tBt7H|p!g$U@XW=d3d5j-ULkiDHW7$Xjlyc#_{D;H~La46`efq{wWC7y~@# zytm#h#AGcf8=sPW&m-=U%ov6|pv!iyAzlE80(EeW;HhiRsoduWPH>(G#qf$ik3oY*cg^fe{~%@EnJ=kV44xnT^ePo&01*@~sJ&4wj;&4#?FE3V-D_5^+TjvC1Z=6?_*4X>po@;mZ9FW~6| zTdml6^H9MlL?O95&jRG5u{?BUTmpPCTY>UOD{v%7eWbD%ZpuFm?(JlvB;{@@^{(39g1E2H*M{=beLc={$l zG=!WduG~ze*6{m!nH-D*XRzTOT4Az0$yXQYD7g&-?i%LO{Uom>h{cqk!2{qH^zg+0 z!t=z8?^9hJO&`?kh`40)7PKJqSrQNab_`fJGD!QnzVUCqQTKahBr0rMm*O}zb(}&_ zLx8#*M*MlZ#RkXuo6pkUXTQ2Ik?yc*A6>EBtH`xAWGa0Zsm9cqrsYP(YkC!ENZu?e1fUDgxVLYvy2N*dF+= zGlyT9{E3J$nFZ|L9U2}10ip(S;y%4pL_ndAR;<{J+MX7=2s;$R!&Z8ch2?x(m557b|4x)R8J?8@{6A zF{Y7@)IYHM;n$&74o&C(n}f^uByvZh1lM~D<7(4w4>-;&!*0g0wv^z~($c&MOqDgL zo)Td-vShCG!6K8qFk{2b4e%E_ukWucBh=VTjFx@w^r}Q20LhNEFUl+!_--`q^ri@A z!R%hWO6nX2+(){}fN1Vkkofv_SIZdhHf*5$4Z2dTu2lMGVHs~~V#0CxODFuq4)hNh z>&-H_UG_1l1DeN2?RNwa;!)CXSFZU<1z#YmTNv*k8oBY|_Nfpm%%i~um``zDj=wQC zeH?-KxVj($lgolxtdtaq*qF~r-x3O7{F~?e+$+e&cDk^zu#zA&A|j}@wKZXVO|#?q z2|jeSFD+-M+zCg_{taj);6wa5&%&{Rc2&8;OKtW-Z24J6C8~z*(BNAAWr0EZ)*Isi z(0fhrs+y=e!GRXn`?p73MMx^p~3?(Xt9Y4;xfG2v{q8NWLy4Pi!yiyGRb7$$7G z<-O|vYCW*27a^^-;aq6T;Urdrf;v}biW9>D_10jt5xu5g%HQ&E~7g=9R+c_3$7L!q~1sG#K=pLVj zA(y8kS?qc4ag8q{vNs^ezb#8+fgn%LxpEtXh!Sdy`Jcg8(A@mMN&-QikyL6GxSv~y zG=+lHXOKD7APB{vyM}KR9$j`3#E5W0GzVb!ZoF$H_orN-*b+$%_R7&yz5;J~QV#UM z*_se9!t*f6Mu5QWI60^a(c@OHw-M$9UG}*e%b#`XD$+06EL^v-Sv6(1$Dnuso6a3R zUhHY+%ImJwJK23+p)ej85*hme9}oFX2&6wlx<7XmDAq_K_q4?f``Db6P2s&yHeerMt3q|ncgQkuwNKpho|~RJ(f@i`(dM;NP zD!IB+tDc3L{BTh0 z62JuwDq`4yTR(+=4!T~#p-^Fjk0w#%As2R3MyiekfE~je5eI3Ef^JtOcqXqJVM~P# za*@wiJJdygz6n|} zhFpmrB+y?g=rUc$l#w~|4C&D-`(ah}xnBB>Cp{t!YlJ6~u_LDTn=+Dv66q)S(y`rY zx)~$$dZEtSFmLp1?s$5g_I5IsV;jFe$TeGm8 z1+|DE^QY9Qc(N8(3rq*bi1!xLgUk-O{Ik`x2$byo@nOtEy9bLCHtfL{!I#heqM!=U z|DL}ixaJ4vw0ghb(&az&5wLLlAUXyG=m(nQYNTM7YiI%46%M`}PIb(0!1rfQSfcAL zXTS^4y;+H7y{D`DHYusW%Bj;ZUqWCm^b`4Rp({L^7jMD%&RD2X<=BQXEO&78{)7^Pa$C+1s~$oopoRK0gad@;DtBQnjRALJ1LdJr@5_$d$FzjBpAlfAKjY$W z;32d(I8i_FA?}C~=Uv`v%QqaKcli2Vm2Pv%XY(BJ!{1TwD*X05c?*>)-5Gx1H1Yo& zHL6itn~36H{%Iz6FL%g-**~fHvCEoLTOa}smg~&5>R;_;s^O-uM28k%WE_^1y3^es z6I?9Z+VeqCb=kA{G%#HfzBN`5efEhAlzMJ|mvDV>^FY?6lsz_W4x2ZYrKOw~&wkV? zU1?%m#4hj+0e%IEqpe3EyQMS8FQ`<@an)Ne93R>NR~qs!O;4x({Q2{L7y1J%bcoBk zaK}{FY5>qcsW0Jbv#lW*_D$<4xkj?awYVC|^$o-2>=`$4tDtX1s?)}m8k`;*reAH0zVh?fsYy?)#c;uMOTC!)) zreN>3ikhDBw!+EOrr?D?d6+b>k$n&y+`qt7-RKk6%_0ACgUFH$A?F54DmskwRr%D! z_x7MZMXnmYElf19b1UdOO#On|A<2e%u%Aq&w`tOjKjsdH=gNR!>C*826pJD}%C8Jl z>(;caQ^$`?ZPdhBU7F(p5MF_Z^1Wi^;C{=K+kliRtMA*)=|9DIgoF?zx_B+0psnah zt20VR^#)}o^X5rr-36`IYp=9!s$6`0RkC-zr+uN%;kH&?qGJTs)oAVf6YpNp!Xq=0pC-8Z+Hld`QC zF`#lV8j}-<=!Jsg#A2V+?3ndaW_($p!gQC*G+vK%7%s07AhWj-$#7v-{GI75HkVfCK%?C9{ZyRMrjtglhu zzo~D#FY68*unBJ!(mRp@I;@Nfi6F0aDJ}he46M0l>(1o1a_^&U(TP9x9F8rTd;ry(Yi}2H1 zEu%`?1T&JrniT8qIy5j<4E6O@kGI8GMRD(B z>&_EBg?ArKeAC%2SxV#Rm<8wdMZXfHYs{Zo(@AkdGdWEwS*$rwET*Mp(Pz0>!XCm2G9^&& zbNHg0F7*uqJLKqy%&3GbO}2?rBg9T?bUijz?*|tJOy#P-#(qxP;z`|2anu>J3{A@2 zc>8Segszex*N|V3gz9jg_s0TJflxbtshXT2o!j`j*6trB1#(d$i#^+b;9jfO&ifD7 z0)+8T0k0$1k;8X={-M!Jf8dUww2kHrxm6r;*SghIjCxorPMT4#NmxIcm6E$NomEAa zxS}9BK~J)--X0>Ba09S}R-aWrkJSc;Zr+_6AP;8kS(;{5v~<2Ci7>(&`HG7QY%_ba z2K^|+CF%RUKZfrUcw^u%b)a-eTLE8}_rQl)E_mVIoh)zQa%@~&us~&V4(+bm09bj* z3mqfv*AlzhO&G6!(o;D}X%vt}*(yqTHGc6h8QGvGH^;e#g%*x1JD-ldHl*6aRSiW1 z{W0CHo3Nu+d}KDWqEVi=B#flcvX2)~_2Q-VF6VTO4l4(jRcUX-MZNcr{Hx9u7ETLt z;DiN}PP<9qW;i(5iWZr6#vaLQ&QtzsQ(UmI*Al)W%~C1X95}l@!6|A9IkK^}FylU> z`+LT%#EP52BvSq=(^v(ctZqvJcW4t!IZc6hQ;TYSx4h6EY%b`q5gbkW;6Vjg@cr-$ z>}ZQ!Byx%MuYK@1T|()8>*vT_l#l>`YQ75TOf~&d7rZTQw{Kzn`z(sp29+P$gv& zu)beG=?*5|C{ROx;@+KmZ`6k#Xn*b|YolY|6q(9@76!B!Etxa zwn$<{TLI+Qagp*42`q)pj-w-A8gU8xWM3+x5tKi7mHcw?*bbF7j%!9GyGU4IW`D|^ zFdz4HX4u6JA76>{XCn1IStPrV!FshzBqO+RNNKrPaQ+!_^27SdeOgKtGR@<(QWbMEUi^xrvbVcVx^f@uex9;0 zSxs+Xl06iyJ|xedkxrQ`_MWzHd!&?NH}P<=PHkiM$3Bba=5Phq$cWx6`S4wRGn{d# z3Q1)P)}lJL>7mt@v_2zcRV?yXc;;&M561WT$n%|8%GqS~GV01#v#)RVaSR0+6Z_$w zI5gPvJ}w5M5i*rULe&ev0w0XC@-7j96xQnKe&*AV!XCvk>v`p**hob4X2MJ*kNgo_ zDF&B+(W|Jau(7e3&eZHlS@Y2h4-K8!aH!XGZ2fbkkfw}TA|h+jyV5Q@i~juSxX(Bl zkHAW&2oP(NOw__IGDQf`sdW~LH8$GzSapET9LStl-Y}^HN!@Slr4HZp^?I|tBUHDD z&P9hwmhnU>_eQk5gwle}>RKFKB-NqBmMoZDULMw`NETzpEdVzw8Nts$y;QOv-92T$ zC`Ny9G1=L!>>_#lm9mGMHnqR%NWU=Q4wr`FrEBp&_bxm5TsrSCXX!aR z-T`idcZzRLwf=l@)8mS7VuQs+nKYXQpGo*{$v>|e%Y)Tc2!6J2A2!+_i&Wwc1W>p# znxTfUD}&h7733b?l9*mk-50^q zs}C-_s)T-92KwjlmBg&erZ>DVk`ehz<>HxXhJnT$xk2*&#xVVO^Ra5Jtdmf>g7-yTZwR>V)=b^weo#1-}gNJKD0`|@HAzSHpmh^HH+&T&r*f%RJ2QcRx zM^Yugd`v&inwKYwhvkn}$8@>yrycSa1Av{W`Yee1`51hL0Zg>^YZ7ehToeMXD6(7C ze(>NX1tn!LA_Oxy6lMgxz+95kr5is5JxsPVe1%g9KkO2r!eIjNSS(dZ0gMnbhD6+_ez#8-h12TqweL_G#m}_Y?RaU>bJ_g{D)7>=R38y=hyTzc zctiFZAG-mx%=p{flP_#*EtY7FKp_G&G~UsCpS7rQaMJ$`>=+S|au!+cN#EgMFzT7& z?OG<|JIFHWs}yP6iLGCUU6_=m;o4)$z;$!qENITR(0D+cI`{Zz(%J7nENYm83_8+9 zb~>jy?-WP}ZR(I{3zwdfdEaRW!o=}Gy(tEXXh8Jn4=_jC&(_%JWB0cy?Pms-u+c=; zM?|1DBfhjEPh>pv$MGr98?!%^gkbl$H0;-=F5KN9{J^q4x=Y>xubmN@iJ78$QqL2y zb$i0|n^3e9BqPL+ z=ZO!&@NLDjp+~Otq%_hOx|G*#%~3E5K`05pxUUC{Iu@>S9%_4fM+=;f_c+@OUA?a} zH2ZhpLziE+U{W@4ju^`zORu1k2TQM&#|FLHgHMCs;8w>pnVTnqc$}P2pR_t0XwWOt zqxoKIaetcurfq9mpqZw>T!7W!61#=u)?^~aCwo4F$=ic&C^zB@j#>_akMSO51bsX} z;S%43CU%8r93wRPNyrKJBM}#9AFkLa<={2V_MOolOeOMemN?ATZ}z083IZ&Kr0Njg z$@Sg&(>BJY!3BE4Y_ac83!&tmAXok6YKx1y?OY}C6=*K<5D-q`=okNS!0sGiLZMJ3 zbvI*L63i|WZo_t^Uc`Rbou7nnF-BE8joJfqLy}$Lc8|Tt4R~n3qh#J=crTY#@T5;U1BxV?bmz>tVa02@Ze2~LjQZFHOQX&AE!x#GL^%7SW zZ=c}%^4fg&>1Epb4g8ew8{9)suR+q`h=;BPY;)dK4i<<_nD>a#sCOcVY+1@1&`pwr zR6Np|uQ|8dI&YqORA_$nu{y{}^J@1_?1#Y5u)jGAcH(3#`}WNwO0qB$sVTp41!ihT zyoSgi7Uw^6CSyI&Zmu$0?56)aP$e?5u(F;yapHvBr#3#;ZOYR8%U!o`;2fzAL;{v^ zC*n-Y&0S#wPLVgfhvCt`Le{dBgE1f@F}i8>Mr~id5G%b)Nl6)6db2;47BddesOKUN z-}4QiKlNqir{H`Y(KC-f2NtE>r+yu1FlVT`gWxy%;@!Y&H&#(X1x<8h#0~kY)lIC? zH)p5!t8d zZwH=-oC8$57U#u7NX8CQY+XZvwA*1JTS}7vFowwz${@Ol-8LC@9<<4o)fGIvkW>cQ}?USWTE{o z%kH2BGXb@F&PU-DV=_Q#3+K0%S(xm;yBN=0Jkiu7t(UMZpK+i&@;PR1<0zzx*yFW} zp5Qid#L4`@;EB$Zlrrk5Us_8EL&eTK8Ea&?gTsiOt%Pt;JQ0Z)Ys{B4#E&9tFT=p-g zJ+;j(ljMf%G9Pu;uZA~2snHgQ@G+Yl4qsgx%8Q-jALd}N z__((#X3%18u|+#g9~AF3)Ya8hu`1H7-NSz^!Pe)N;Sks3N59G)Fx)5=nDe4aN&p}B z0A0&%lzY1<!I_oHSxe=x24+1*jQMwReBEBl)G7YlfH)GbCm5>%q3RZT4D!oWQnhqiyV6) z=KJ~~i zON~1XvR!(5daQA0&jlr^6$`BOQE&7WXn}agmU--&0j>G88?|Mp(- zU2A>U+H8o>NK?Nd88F-Z31BN?bLtpKHiq2XeoRe8cC)Z&nt{I@T$vEOm17E{N1gfS zq@hpXIYyAzRVFAZRLeL^FRyemlpK&E36-xhvxc*sNostcB&x#vFfr!iOLf+2rLB9E zdrqwEVOrJ=SFxWuc6D$=aO@iD0aUIyje^i-LIjz4PBlY=61U=pL)*lT za2*t7azkZ6$d)a#i>6#U=~Qadme~vT-n+!!F*~jl`Cf6axITHhE<2iC;rdLHYl-gU zp3~Iumsd}w+7q#WiLL0Xx3{;4#qBIuOkXgXlIm#ukCu=#DZ~I>wkqISkJ(_HRo!ujUJ$7OXy0{GWJm!AnkjuNQEDPJXTzQOZ$3xo6w_QUSq`uc?63feWf;Eij(HNQ^|EMxeG zjwb>fkEvYTu2?fr^<9Icx>Tb2#8k=jvXxMoaerA#17=j}`TXT(^)KndId_ei9d-|E z5swN-bbmpPvNYH_K|afYt&6XKt+#0ABPTY)Z=c_nNSMp1-3Z2hfx9px78|?yv{BSm zFS%&NXf=yD_>-49sNEg#<60A?X;=ap5*>=1`^&@`sLw_-wnV0ofEL@!h}mt5W}meT zlZDt_Z4hsg06#?%D6;;j15MUfY-3QgJ)I^rp~^86p>Xnm6`)>Lxf1Uyu6H0@xW8?xb5(XS)jq>raht*XXO%~0ONiSgiVf3}dV3`2IZEuYp zqIn!T16gneJXr#*t@HRA1Dm@uHZdKu(r{x6L35LsS@S&onzw=$yKS|+aq2J^A#Xh% zYojlm1$)i6ZjQrxi-RI^z4L9s>j;7?X-*~tc_jwoH)br>Hrng!?-p&2_*O5EH$}6B z=}m$MzXit>IfN{S#lZl%YbPYK4sL?!dNO#)C`MS;m0xk#-e7c0zo6M97yYd#$?A(Y zg<#b9{c+*qko#BI^8-}jS5%Fym}#}ZIB*tuV{wAK4^`?>HJ_H9jB+0pm~>|*5vzgx z6SiFjei}n>6zj?wX3O?bqJO?zoIjc>uPjI2V7A-t~B1$w;s%KuwZU})+Q$35?f_C8H@C)@ufyvg#{k84dU^J;L7AY1G5ZQ7enQoA`|wqczyJD zCO7+~8wejzXs`6N+wh(#=8k?a5Xk3rF=t+`YbfT0vvF&KIv4PeEwdwr26!#MxK^9b zvkrRTWfo#hKmuMV(-&>_(IXRH6#p?dXEyPC@}P5kA?^+&FLpSK-hpRkC=p?-$a~_VwnU}pxK3QtN+XEYtIY}rk{+q%I^Uo_Aa2v_@s9y(HPE0hi6Vz` zboE~BU{TZT;_9nGmdinAGsPcrqJ&ZIe6#_GDA}aey2;X@M+USe7T~V)&68@lz?RXE z>_btX5tZSC8xK4rVQ|x8RY&nY(tm;PCNBj*ww39U3Ud&ky=KIo4RG-}L`9bNN()F~5MQmJ{NLRRC zA6uqoAxwC^M^~0IbJuL_Y1*s0j_H*ns&A zZ*TcKt8h#vZ?BP(qk9!fi&txS&L7cxeJ_54hbm%9;;Fv?NBQkzTb&mlw?wl?d>1}O ztbtn{l2Ol@eJcYaN{+SD=qa=qit$WqsOlXoce05b0LjqmwQlL2c9C$*w8wvb`xxHZ zS%vE+e1mn=`F*)9c4FVJSv$XLDToSH8;;0QM!Yj=55$Bl?2NYc5>v4UvH z*u`5;*}cKGa*DFKkuG++)ogkpvB4ln%km2WmTZ5KY!cB4RQgf5qb&>^_6fni^f^L{N=B2c`8P9M99}&7hT#6@`uY# zCIlLNK1<@adxx-m|Ir9hltv={m~vv3N@7C*ql;>2l1b*(vk@E-dIZ_N@CtqooeJl{ zr?^kXd3H&l7SEqt7GjF59v7}#Wf&qr1P6^q`u5X*=g8y>JKmP z3-xadvkOh%9Ak7R8xT?c1yo-`NKp zGI`k}=tUf#zyE1q1QwPX3m*4_0#HwhtqDvSj6EVl4%n*I`{-93R`Y%!uybE0fo;z) zw@B8LHlt`39AVcR&7^GZ&v?}6{G;~+1%HVYYV7keYeRO}6Gh;v|CHdF45Dn#&Dtus z)9gf2tCFVb35N2tZ25gM_KD&-DA(;2;G$VF!Ebyt;(8?xf*PMJF8qX6Zwmfty1m_Q z7;2gC`=|#KO!XZL23$`8X7&1XT{t>dv0mvYduunR;U`_Sn%51XKNHz8KnGHCW5>(Pwy+nQ*l=EFCp>uQ74Pf-; zLlgAT7I!jQ2wx7>emw2@wOzgoUk1*Ey3hE&@qq#i3MNoLp26rvo%zdVv=)ff+c*NV zfKx#ZIv7CFAj1I286P~;5)L3Kc7If0dcu}u%?iznv8*T~?kS_<$P<+0e{Mf0LmBsL zt$Py3M?8Jt-azvDMjf|m3End6Ku@i9B*KR?Zr)xUmZ6c)2Kdi>g_7$V8{Vb!CG5{6 zxpWSROpq5iO%^LZJA-e1w2}d*W1AoL<&x0pv_)XmE;TO~(iX1@ovFQXClntMlBx8m zQA};?A@012o|knB+Z(nGVQ(+5Do<+dh?y&V7K3z|mAGxhPUnS#Uqbj?2LPdj^L{+@ zH#OUTysMv-*F)md8~b7Ba|0w))@w%ty{_ZBUK(t@2Y05nc)ld-{R`iA6+aD!Ne)ya zZFFkO0~jQnQ42jik3b#5si(+F*Zf=_fv$Oo;H87;mlZ@df~T8+S$a1b1>Apm`J`-% zd2lbwD3Z^5D(t1|_iJUaO@+~&n3vn#W}yV&ldM-8`H)*s+-@snos#-y23M9>+L&3k z+<0ABIHvEJ>6ZxF#DIg=M?nK9nXrWN!*{x0{QdFjG?ydZiKbps=;TF#Gr90B)S=|s z0Ok6#lcDkrUcB2?V@l7Lube1bs-?9lPvf|qL!f&$<;j_Ymk^-_;sNdV%<}c6hDo2> zt~kF;9@pS*n&;SgRf}|csSp+t4i3cO-Zaa(Ao*n`__5-U@Mj|17cN{NN>j@sl@8#x zbE}_fqVf~CHSKj!iYU)0TyNr1IM>I8(@v<9i!-uw95o&_^S1!ZiVIPVYdpBN+I2rZ z+XxwdgD_4*)1_Dw+S(EH+>l))!3cyq^%eC)nlO^DV zr7+pmzK&6Ws^Y3?zk|Pd6~ymNBK*1+=`4iV*=8-A3zhhri1qx%R?WUdINeF@dlDw& zUs_#tvH0Ntlj_q6*nPE24?-h9IHx}NcwbDh_~ACWt;sgUwj?T7qH{|dL3N87Bs!yKMyO$k^YdjZt=aZVBZ!^v zhqdpaJd{OfM(n;m6}M<<&59kC(vCv-oQm7DTxMPRLmCl>@cC0~`5lC}yVBLqfhplO zu~VrkX;mON?pFy*OKm+Q{aQf!>vL?DSZ>-^RW~GhyrOjgFG$_AAE|Cg{(79w%E6lT zlHEP;M&f56WH=3;1}kp;rlnVGeOO8w0FIL+P4pFe^W_|a9PBDxf4xkw4EO^hXwYXE z{<_}G5ScSUCfpv*sfyVo+NtHan9u*r0zj~xFZwV*2C5;aX?Z;^O9zYzR;P>GfzA$Z zN_N$;-mF94E6zm!^MjxN`v((wLD4aoOH2kDP&uCQf&mUTYU&qr1rH_`zylUwK9la$ zi2Sof5AR?ZqLX~f?PE&M7QcE2%fCL!Ke9ZgbY~VuX1h52C0oBGs=rVtrYvn1Fe;Ge z85tLr*-pzJf)jewP{!#ZwHrve3CAGr^B27dXATE{5RGvL$+$D&EFd$7H2MezA7lfb z?R9?sh@DeOqG=Cc-+Gi;&Ped^Fvu0Se)nmQ^9R@dnsQuqq|! z5RIGHgo(&v2@a|SVKYb*@}87C!?^XTobbEjI&1j!g^f$9oZ&tS85XGJl!_d*)3`OO zMVs|1a&r-z4KP1sa(6tSyb_Q-D)7#&9AHMqe#V|YyznKkFv$Z&@4YYDA*OrVp)zZ} z4vOqyGLj8lc{*DU(M8lb#>Edkj5oG9taoZ=GjoL&%D6bR%j|0j8WS(5(_J~>GyZTl z0+-Su?&At_G4DNRIw}#V^69?XDF`-=BJI7VWXGD)`tOa2muyGZw8qwbF>Jh$pDUh- z3Z7?+E|DnaR;e~Qc$DN3AnrHijeHrf^QxSwhsKUQ-1z+J%Po0Xk~Ao}oER#${mn@5d1bKd}?| zD-P?~y)O(Ggt6E$rK&IfW!%LS7lfCvW6C4P0*Zu-Db5Iazw_yLy}=&zckb{Lk8*J* zRkYMW$M-Q7%F74syb}30V{-B~W%=F+6qfOHBhCS-=#5K67)Q$;lHkt!(4LC^eNUzS z?{ep;LA=MnYu?`@HM14H3UCn^0^w0nQL!Z-H+iaJLPkY{{`3?JVMHLJA$$DnlOBO3jcqz``gT@7eW`~Q#4HoOBZm(26qG<0;PxsoQy z5Sn|Yd0k&JPq=bSDK2XJpdl15`Dt5#5Z4bAk+FoIB{d&QY)GsheoY1*XCkxwI^K}@ zXV1zhu(KjCj5FJIrpuw#bYWD|m&z0LKaJ(?jXdBAlHp3>KaeU0*0lq&uD%qzgN(BT z$Gbl|6+cXPOkMV{_~CWSmXdIJ9uj-^rcfDzk%Dh);6>PK+ioqjo^@aFEHfc6|7K65 zGEi*cAeZ*?psTY5;gv6)ty{MAzj(~JCy=#a&nmkKitgw|qe*X{URSH6$cDtohjf<@ zY48-J!IH$T3uNmr-Ay{n5q}wd$k6K#G$lt7lzkp=OvJOA{NecgIk#zl(G38KkpX$V z`yYSIQ5!@>BBJiq+&C!VG6cMb3kUuwvAqx8n*E6>TwlrR5GKM;0?u8Qnsu{WtcT=8rs9TmiqsdF zeTsF;xb8(NaF9l(&+URglETNBw&YkV%1SQN__cixOVacIdzXvyq%c`?I9-i2Ce*H(ARy3^X)RJq$S?=cvOuHK{;HV-iTfM=yL`J1off zmxuu5co|1mTZ6hsBql@|Boj+)EXK+d&U|&Z`L?*|_`8pR55!^P!6(6y{hjPQQ0#z#0cn#XjbRx+cW4Espm<2E<4ZQr{Z|+G{H7gnrRGq1 zz@~-gU2^i|*cbYX2MyOTyw^X=vf2N3Iq1iM0Rdk5Cf#0OI)nk~#HKL-!q}u}b7GK~ z6If4WKfJO39|Fnh@gAjI0Mh8$z%X3Dex3YAuGc|V0#tDPr-i2e@5oYZ646Mk2kM|C zfV@P68PJn-p8HFv+t+<1ho^%N=`kCCm#UUMQn>9MY#z#uZFlRqyeH%CqW$KNcW{qF z4hNM-&j;nbbp#d+sx-snv=>jMw(XHs2++7%&OX|lrxtUo@<1W6Y1Q&5E2+&XY7Sv+ z5#PnmEs6J=lk804%WLUQ7RxE~7lMYiXCh>75bFW$0MidnfqflL90Ia|0Z8Y><W{sX)rIrrT+aJ# z@Ydo_24eeHv8#M!9@Lpd@55^Hj&HaLId9v@d%{WgnVi?L3#eynVci=ZB(7TmSlg_5 zA{iJC6D*2+@fm?dAMXNxlMg zN7atGm`6J9C0Zew79U2_5u!C#nbp^9>l4B5K)>CnSVl^7jWFm`5tiPH|d7=bn)l=UyuM}Bv4YAp%oa{Cx;(iW)+EQ`bgTP7o z;|piDkV4hi1pAzO}g&@WVQwb0pl@i?4V2ELSD z7KSzMWE%Hv=ONZL3S&dvKj+gZf29WDqjD{8@9B8Q5Z+-!MFI>J{d@U-bzV#E?x&cs zklgnevi;@pU<>xWyg#2}uH3yZnJ3F9cE;5WsY_BDQ%TWceL^<9lmdRAh3fr%PH9m< za7c_Zz5g>!2_k)3P+->T;c8@lWwcWRT-c#+eIafFelNoz*3xd>FGX1gM_p`MZ7)%$ zJXn^$-?@n0D_0J9xS+S*oQu|4%|$G5L}xDMmXdjQXk+;HEE5MAi}w4Ts27~py`_WN zO~|W$>+4n!KWM9|=No1>ujz)qf2bm{YXv&bSHHy!Ui5#>6&u0vdjo9KuVD)ulqUAfV?i1eQ4 ze6Ejn#mNAI|4cXueL{V!w6xp#R{POvdXYR+&|oagF1)rSdCzP`U?D2@TZlL@bz-!} z$Kdyb^~u+o^Ur^1EPf86$e$z=U?{6}zyD?b&u(w0?vZ&Dd*3`{kqF5nq(PX*5RVa! zyJlIo{SG7Ay0(c%;m}q4_KRvZb6lzhLtE_%_CM%-6HaZ6%pg7UU~Xz5Im--VZwnbu zGwwA%8XSo094Z*nI-jX-Q+uzD{$rtdDExl6ko7pC+?mY)k;7lN!9&Nv@iSz!aI3 zC4VOT5q0{s?cBTd9x`pKPF@e<|eY$(vby)Jr)YW8ZTD15$^~XlZsb!g{C!lCOc1s&i51k0! ztgzgfEA$3ao+k3g1a=g!MF-b;AAvi%KuLKC69&(43sf4Q&em40`!pa!Hw7H`U9fFK z{-u&5>S{w2+Pgw(Dpdy7Z%JC+vU?1|Wl2{@QS7dB4%k4u%4fZb7Bd_Q>%Q%_0kV{! zJOk(x)~&^U#S~6`H4XLYfNX?tn#(_j43}kCIqaGU$?HKhn|>OtRT}7-X3k)ipl2Xk zi~87A-Osr9%8gNKW1t=LUenaYcqqDx0O?XxqdFi|PFvHRhjzBxoI%5;2jBZ61l@0H zhrsCH67g^{p`wzkM}+b88X-PTwgEcM5-qafVM_c0-t%9$6N@xKELS%BXiWYat^;oJ_*Ia3Ur)}Za zo$%@4m50}(Ll(KtU8|NfdUID=Tr}}qlX=z$&Ek-OTVW#MreKJ+;sSG1oMM6VmbE6_ zPH4&RaQ-SEzwyH6d{kFWkyjHc@z!4#v0mjkH z16;JgCv%GC{wG}s(+bBQyO#3;XruXO23O(aJMW(_$MfjKJEwvMfTG>=;45U;-vl~e z>9?w*e`0UXUxz^teTP6Yr5fU$033oS?T6_0!|;Mq9y&%Qv^Ru<1=h-^(Qn`ImqEAgjKR<@&KkH8vIp z;iaAZ&Hh%wHbGYRGT;F{b#HJ23Q|}DcZ4KU7Gip0;2Ea9D@%tFD$PI)tET((%tx$e z<4@~J;kxzSyQ(^HKf9$XgRc~~zq^&awZHAzMQc}FW3WHYUM1!{lacbo7-Utus1|}`5^OP>|GUprD&L;#Tpgb=a$3xgxVCIW6 z&E*h;;`5?t)A7bofbo+Vj^*7y>;e_6Ah|eRv~YWfSgmZI)~vuh2gB+gjbR9MN>=sd zidkEY=({O88ADR->E>wIRUESRst0{zXZqbrb@8Ndays3_cLH}g*nQ-a5-*Noa5WAu z*tpB>tihE0YyD0O;$Yet*!)<4%xs4Xhp+08V!i8x ze!MGA`p0&DC=Qmf>jFU6b7KHPxXZEK zWalSIqL#OB2Vj9sqCP!{oNE*^>&~}KC=~*2fB(j;)E=W3PJm`flQ!45dI-aQ0C7Oq zEkr`U-a4AfBe`OExzH=TmZ!WkNC7LFsUFZWJeg2e(lkqQ(y`hzOE~{k zq;b@H@jU88Wke~@I4G+w^JSO>u8c!usGd9n`J%}t-O`pUbKkdIHbm+h1I#_AG>nxU8RJTW(l%$~xtS{4pq@zmq<>i?APe z+p@qW>ttsK;dGxz4d~f@*M|v!`hln5sh@52)6wR?Bzo(6IngqaT1tq?wh@ZMU)5I=6`1@+@{s705US z+^8&EaOtt+ke()&=;pfn;+}M6#13(5pUd@KXSX%D2EEJb4mR<)GzlhZDSuLoWf$X( z&RC80DP{>EMNvh}y&lcQ#BT?@t|rzb2kI6jwlLYVlUhHElZit^stuE2vQZEA9folh-g;^- zF5B3ELH01)uWG)v$^Z*J{X(Vw4^%fOe!lvn?X?+oA)xGq<{Kb`%SY)K2#+5>u61E3 z{)lB07Y0g0mjQR!XBum&apEC6Ni5s(K zw?@C&$XQ75h2&VpA*0I2*F>VeWpLJK9VSy8);9J zDZLpyOPUM#vffaladkauW8dI8!S^G(iN^ziBXDx+OO~G}GGotiMw1&xh~>;mleZcb z(xD*^wAc@Xyt4@_9!5*g0O4T9KNGt^Dh4L?D)q6&#)x#xrn>Ds`sv#TKRLxw&Tp1` zJ{cG4;4CXRnehDq9x)xryLM#)BAS|9scByg;EH&ZIGUi2ojqRY#@@e`l4Dt>xEryQrg+5(H?_}!z$Hz3`Bx%U zIT!G@zwk*{^O2EyEx4|CFJV%$w9ewTDsgLzE7Wkg5(Qon`HIcNuHbAKnZ(FkrqAW zPRM%gx?O8N^#^9UM=*yql=SeCR~4tlHSNO@OL4N;fB`-aK28D7vS9*r0Jo#9R|B!3 z7{A@@6cMV|$vlJjnLOOti&}kYHV?Jm7KV}*6`~T|-WMh>*uz~tXArkmg*ufrKSM-9 zjT<|~Nq^~j;S0RFeo;{~S-@|bP?^lj8|ld?9Hc2(a=wWS{w}bR9(x!FHv_f>Z=TE( zX|1~)eg$^)O^LmpW!VHxiB*Osm>lO+y&g@CRnSb*ldC6l9;D+_v=~^jAXI6G(F3Gn zc;Ni%t;4X6@XF6*R|M>e)NhmALXWfyiS>M`IEf)jE$ClZGtDI(NoGB4814e!%3^RP*|Rpy%P!C1!Fa)w^it{uK-0m9?<%0}5H<|C zaa!WYO?`kvY029Ec2mh5aQ2V^$C(Vs&ngE{ZQSg$vs#<}{$y#Srsni}gTvllJiY*I z@y+c8iKoBK$`|F7<>cgK113c_IJ6+j79X5OrE9qvwXal{*x}1(0_{?lSlmfo|EBVn zNIwiF33>gUj**~_5&RJk*#xa&DhLfb;)w=K~6S-xD9GtOz!N~GroSy+mCXe-Qx!fR)frZRJ^L>ky8tigGBQ# z;A#%L4B5oQPR6 zaeZP6jW&`EFrXPfybnaVmk&v(XgMDdr-0qrh#>K0x|uk{3t8nhY)*((*t}OKvdfM1tKYYSJTnzp^IV-ktKye6@^1J zW^k`hK^neBA?~J^ubA@nve~j}DSG`t>^`fo%{8_wjCPc=p!{}gn84}%3pyJs^YELciz^;0Y$)hGFetnB?=CZzsJG#Re)Bmpm4(LyvIp?Ni!?Sqep zS@fVDLI8G1mA%aWy#^&rvG#_j{;ESz_zOg%jFMO+K*LA|@RX@1sAqaYX$H}@mS{e2 zlgwgfP+S>nFGMtYDzY5W2Jv8^qOua>wo$%ffK??8Yj7_EO(fcHw;KS>#$n|#0M-b{ zsl?(T6;MIQ4H7xYSz5}0h8Kqs`Co-&%)+x1L&<<19tOnnr zkSwUi$UnfO6r(6Iw`WH_pcJ{&l@@{SB4hYJXZdc@N&l!;9iR@eV<&u?y zwR{*kf5eg~gaCZ0n47mcGlJ&0PFl#p6t2PC@t6vTABkVJ8DM1>^bi|`m<8KI^sZiH z!QO~0-v9-8chzPNnbbPg{(vpuuopjuCkbK-%Bw;$m4L8>H);vBC+Xchtq+tf?=ydY z7!Vv^&Tc8t<@5w-8I*x2bj_O#9snc!pVatJ7Ox5DNLw3;UPK+Hb{)fW0Bq9n6wDnR z7>?V0EF=lsjPe6HV+nB>5A;W%Djg3!V~X7Nah3zN-Ja=&VclwWYU*}{8mipBTUW!M z8q@k7@jpO`V~+o}?9>_iN3yjV=Ap>1JPsgvP&dJQ&*Zg*5i~csMITiVB=!8VKqrqc z60^j=~6<$j4U%xLNiIK^#fr@{+A=YTGbd&4!*S-S4?&^C^ zBX=%HrbE1J?k-akiw^$sS-|o^r?KxHq#yh=NE4tO6CKR5Po@>oI`k=1q%pBXN_5}0 zDREbJra!@%O>`S-fdil(dVZpqG)yF67QE|D0vO4~>uHBuvjC4yoDzuaCFuswufL%R z_?!k$g8i&OR|Dd2=5dI0VA@x}32D<&Y813L6+kicrDOdqJYZw~46gqiGGq>LP`i%- z&e}h<;$K*5P*0vQtRRN_yA$~~39 z@!$KU%0D397lidUK&kS=-*g$?%7*FJM?{9{XV_kU?D@qTzu~5|9!GQnE>QE|{TNh0 zQG`$*84f3ez=}{sM3NK`kbmRC^BIT#MgkD0vT&$@j&neT_hi7~juw!k>xB@Q+_B#t zu`&EJ)c*?lKpXZ1^f0FVDqY$SJ{^tUrmsS0GWc7B)2ZMyvM)kjC*MmpZZ0#I z`CZJB>N(-x-H1g&=YLc2v^mBAY61&x`#aUc!;fuFYY3ZwD&-UC%0O9wv*5Yl5rYIq zghRJII{(|-wmE4JAYA|7n9By$Gm)(BJ`2X2kg}_$8|IyC`Q@P3eikf|zdfTUfzJoj zg9{Z@-S#*4-xG|vrgQ8#%v*y*QTylS=6<_5mJ|H!|1dtTf`U8cQa)gZLW$mQ-Q8+; zHnxwte!|Y56rzaVqWCH4V*!fSnxuMjbfq7a+L?{0>m8zGK)hG?5kSHr=3yXtvU_)Q zC(U1xA@cmxKRtc>_MIydj~3fOB5$;OF4)y?q|Og-$aJg?(40y9$56{y-=bkdK6AAtnm2V21p3i1gTdoRKHh-r6d>cm`@4e}Va z{H3s_Mx#5Fd2!fRG{g_a+FNU@D%-0@hQ8G&gj%N%CJM$OCxu+@RCBTaTa8qSGI#I>RI%`CnvXQYgfeCXi5?V(q$$0+6N} ziZkbnC*y+3Vaq09tTup5=7b_5xMO$Tv~C~GSWa_-0n^gEHk@I5h7$XmWN6|Yu~m3A z$mY_XJ6W-wp9K|Lh4uUF7@>|>(0Uwg=+-7lExRkczq?YMk{n0V%0GGvt!SiacfWcm zke4{CTV5cjuBn(L+rckB(3qq%@w4LGH5G&Pl*x|2lVwnE4fd8gNJte;B}Gesh7PRm zg(J8ZWTcS5_sMa^zkn*275n=*W&5VYK(A_P+{W0u_WkC#4C{4_f%ojB4Qi)P80UqpWb&tdBn0d`Va`kLruDt>ZL7fClI*_m$;A0Ww<^pz>A$1A%RfQAIrfVh zvjBS;d#An8eKt}L!|SJGk*Fv$iCe2Ds}h*pvz0MxbhNhf?gaw{$Xf+x(qBvTf`c}3 zN=bedI^O=awp6&_iE~MZ5nQ{|J_+N}5#S&bpR+UblHiH6GaZ3D0$-^q`R}Mnvbafz zJw#6vPI~Ip*jbX;UtBIBH;4+m%Dy)kFzi1b?U`d?L~qq+YJn84)}ENnd_LU(C!)ym zy?vP3QpAA2YTu>ar42s3<(!dMxx=+mwqu+%fw~^|i9V&cM*#0#?j?PrGSO z$uLMu!FeTmh;DGv3nXkRmi*6ApI3g=-C^>lEfO>Kf~0_N{^ktId6MOr=FLWT1VO6m zby|=w_a>$H0b%%+bwF%mDbSe({rSRUEJt0!3vp^-c6w{XT#XX+tkAG%(TvMv&TzzT z?Y`)qCFhX*pnDi)8?0=xYT81_c2~uppK5D1cgkp)*1xpbJj(Uk4h_k}jL(Xr3Mx;h zR2q^Gmfz0z!Q@VWI9>~s0jKsx0~-jokH&iBA3p+{r0L#NpzG|S$=PQXV%D_Dg?1?# zn7CIs16t95?;*@`T}w+VbQ#@D@E4*Sq7e`U{k+ZdzQRe^Mh=BgJn6{OEpr##Z%!(M z8jkz%lT}c#1N!CTBAB?rP$>+x`IY6pNVKVO zCq-kDvHcP56(RYxEbw_kIa5k+M!{m-yA6+!oT&oFzzvovN<3+J7UyUMU8V z(b&~8({Tk~xW4vWJR-?2#C?D$&)vzFkt78jYA@FkcoWN4(q)P%5UQuP9m$7GQO)I0KHY3fD zD&Ik4m%)O4oe&f%Bg4KWj+kU$)kTh&J;cMeJ`=(5)@65;k<4jhz9kWDCpfje@AZkx z$m|zeh_4T!{x^DYuSRu1%`!7$@&lfz=s21twj%yTz{-?0h*E_?eg=#6W+astUbuU(at-L3fD~8wMo`d|6`%`==DXFSSvHh(~ zkzmv5)RJ{F_9VHg!~FT5hzFXn(wit!k<_-_F;`%oFhENq8K7nMiQOW2HsF&qgToc) z?G6;F&USk^)S#Hc94!(&;lrK@$sosiBsb^|Oy8L~(~YC`N0hLR*h2(2FdX5hp6tyU zu*_d#uil42?C-S9lE#&WD6)54M>3z_l)wd;@R^P>nwd7)yOd^VS`-e~l2er>uGvu< zzZrPF{yodpc0^(dDENI_YK?-uuKaFo&1O6Q9F%UsW2ejE+w371lJ36{Z^R4M@67X$R_5}-lgU=Sp!scc zgu3!ks?h1&IU2PrZF(xWcFg7FN4lD|-IF~-2(8$``owCR*kLH*qhM309hwU!X`ScL zE;QGARPx*?2T3JQ`r{W_e<5xJ4qK3@W^(|tqf;Nenfq{d3H3`tvT@P#Mi?zZoy{uh zsyZ>;(&B4*=S$BPO4oQC6xaMMoOMN!u{9-4=_inEcI+&EGA};Q;$f)DFg6?ur3)2G z10uo#y2#+)Z@lV|)rb#bN=H$yKv#9Z3M z;S`1`j8=Dyp4>IQ9KK$NQH4H_nWts3)Y&6v*6c0x(%FVQc!cB-B`E`;2~~03uinspEG+QL~pNgp0u@~tHo6fjws4b z@8JFooQ8qq-?+P|;tzX+gya}s{Q|TJyPzIxoE{Pm<(aTNSq`#5`Ap0?fvi!qWLnJk zC!vigny8%bV)KpQEN7Xn2Y()m)m68eh3_xJt49OHw0leIl3K;s{Wx_hgv+*CELyj= zf|aG0jwT*JFf$EonS-R|d&7c~HcbZ2X+44fF+=B#NuJW)^STjXeI7r0p z)T$+^wJ2J}_IE9@p447(aq0yax_#>e!!VINSRh{dQ+2c8p(>(J7lTA-mwR$_vHzIB zs&n$fijh@4tK)KmOG1EGx_5cu7nQ;%i~)5KyR-LLPUcQ3@DOWZb?KkK6)|9&sqS*1 zXKL+P1%qI)qm29Zpp3NOhpH2eFgtArkfuCEn>G+bFD|sOO=GMfMio`r9nr2P7|}&b zQ@qMZGiSisZ2QfoR5lN9LRntKR$`@rWQB!pcPYBTca6)1w>w8?w?^j_nETR&NPa*8 zlLxtS0dyn|3%*>)>7C7X!&NsiheCzHju`g1Sieva5|e_gNl+8T>cO2bri^Gu;nE$O z%&d?R5pM3)Ri!+kuTYR6TZVeR*V|n|RZ<)~*AYCkSo7?SX~WyA4ZDTC7hg{t6~tqB zvLL&~L-f7f=p2%e-jLfU(GpA|;j}L-ZPa*azA!dR>J#WZb{Pxa4Zk){Fk-rsJuBg= ziofZvBg9$e+@|B9W8b!K`Xt9+0`oG|e?jfjH~o3#WNlM)YqO6wm{+?<&v;9A~h)dJGO8gA5wr5gbxeJ?}PegAy7t0xw&rFGdZ10gr91-fpN+yCTE; ziRx(7PyQzxpzdBnjt1l{!Fehwd(Mx)xgaH&r`XA#wd7(o%7qMR`Qzc{yJ~MTZ}8wv z#wr>sFzQEuo!g)2Tx73Yj~<#L~uE1wOh=mxP9RG;C|EFb^Xo&A;7H}z{Ie1^7#1}^jzm}#;Wnxvol@EZK< z_}lL3;_K0sThUm^z=I(lbJ=_?zY*nT5WVph2FjaoSb~l{OE8El+_kip#=E|qi2Uwd zrcz&yzNbqq7Opl$i&Z30@kt^rkKzc70bUU@PN5ZZ*hzLEQ?VEs{botQCB;g5)t|kjf3?iNjo9u+UE% z?xJfkE!t_q#6JFs68sH3b(R$t{ zc}`4cC92zltCj!u6H|%m{Udc$@SS%RL4m5~qlWSn0kf|mKw4k#9Gf%I+0n=#HVUR5 zO+LZa*Em-Uw%&Q0rL1-SnE(J~^8CDOEx-;@GPrJHQ@rIIV;#hBo1-lBs5x=^1@h*C zwkDs;4z9qypreauS<#|>b2IO}8Hk7W=8*E~Scp`mW&AZ}CBLfZ~ zyt`Ectf6I>xl#(qbbk-S%Z+=5VQuSvof>%k}Ceszxe>HT$u5* zjH88v16}9wtU#`kB4Q4X)jO~cXp2Fqz%-@4$%p--K_GM_rampmax[km] GM on +- **K_GM_rampmin=30.0**, Resol 0, + +.. math:: + \overline{T}^-_{kv}=T_{(k-1)v}-(2G^c+G^u)h_{(k-1)v}/6, \quad w_{kv}<0, + +where :math:`G^c=(T_{(k-1)v}-T_{kv})/(Z_{(k-1)v}-Z_{kv})` is the central gradient estimate and :math:`G^u=(T_{kv}-T_{(k+1)v})/(Z_{kv}-Z_{(k+1)v})` for positive and :math:`G^u=(T_{(k-2)v}-T_{(k-1)v})/(Z_{(k-2)v}-Z_{(k-1)v})` for negative :math:`w_{kv}`. Note that our estimates of gradients are based on values that are mean over control volume. So the estimates themselves are not very accurate. It is the combination (of central and upwind) values that is accurate. + +Using + +.. math:: + 2w_{kv}\overline{T}_{kv}=w_{kv}(\overline{T}^+_{kv}+\overline{T}^-_{kv})+(1-\gamma)|w_{kv}|(\overline{T}^+_{kv}-\overline{T}^-_{kv}) + +will give a fourth-order scheme on a uniform mesh if :math:`\gamma=1`. A blended third-fourth order scheme follows for :math:`0\le\gamma<1`. + +Compact scheme (also the Parabolic Spline Method +================================================ + +We need scalar values at interfaces. An elegant way to find them is to use splines, requiring continuity of reconstruction and first derivatives at level locations. The result is + +.. math:: + \overline{T}_{k+1}\frac{1}{h_k}+2\overline{T}_{k}\left(\frac{1}{h_k}+\frac{1}{h_{k-1}}\right)+\overline{T}_{k-1}\frac{1}{h_{k-1}}=3\left(T_k\frac{1}{h_k}+T_{k-1}\frac{1}{h_{k-1}}\right). + +The boundary conditions are those of natural spline, i. e., + +.. math:: + 2\overline{T}_{1}+\overline{T}_{2}=3T_1,\quad 2\overline{T}_{N+1}+\overline{T}_{N}=3T_N. + +This method requires three-diagonal solve, which takes the same time as two vertical loops. The name `compact` reflects the fact that the equation above involves stencil of minimum size. It becomes the PSM method if used with semi-Lagrangian time stepping, as in PPM. + +The result is more accurate than PPM (see further). It is of the fourth order as PPM on uniform grid, but has a smaller residual term. Those who learned piecewise linear finite elements may see some analogies in the reconstruction procedure. ROMS uses this method for vertical advection of both tracers and momentum. + +Piecewise Parabolic Method +-------------------------- + +To be written + + +FCT +--- + +The FCT limiter in FESOM2 uses the first-order upwind method as the low-order monotonic method and a combination of methods above as the high-order one. The low-order solution and the antidiffusive fluxes (the difference between the high-order and low-order fluxes) are assembled in one pass (in a cycle over edges for the horizontal part and over vertices for the vertical part). We experimented with separate pre-limiting of horizontal and vertical antidiffusive fluxes and found that commonly this leads to an increased dissipation, for the horizontal admissible bounds are in many cases too tight. For this reason, the computation of admissible bounds and limiting is three-dimensional. As a result, it will not necessarily fully eliminate non-monotonic behavior in the horizontal direction. The basic difference from the FCT algorithm used in FESOM1.4 is the construction of low-order solution. In FESOM1.4 the low-order solution is obtained by adding an artificial diffusion to the high-order right hand side. Using the FCT roughly doubles the cost of transport algorithm, but makes the code more stabe in practice. + +Vertical velocity splitting +--------------------------- + +As demonstrated in :cite:`Lemarie2015`, the strongest practical Courant number limitation is imposed by vertical advection in isolated patches adjacent to the coast. The code numerical efficiency can be improved if measures are taken to stabilize it with respect to sporadic events with large vertical velocities. Unstructured meshes may even be more vulnerable to such events because mesh irregularity can easily provoke a noisy pattern in :math:`w` just on its own. FESOM offers the approach proposed by :cite:`Shchepetkin2015` according to which the vertical transport velocity is split into two contributions :math:`w=w_{ex}+w_{im}` where the first one is determined by the maximum admissible Courant number, and the second one takes the rest. The advection with :math:`w_{ex}` is done explicitly using schemes mentioned above. The advection with :math:`w_{im}` is implicit. It uses the first-order upwind (backward Euler in time). This method leads to an operator that is diagonally dominant. The implicit advective terms are added to the implicit vertical mixing terms and the resulting three-diagonal system of equations is solved with the standard sweep algorithm. Because of this, only very small additional costs incur if this algorithm is used. Although the first order upwind scheme is dissipative, it is applied only in critical cases to excessively large velocities. + +Operator splitting +------------------ + +FESOM2 does not use operator splitting at present and takes the horizontal and vertical fluxes in a single step. However, from the viewpoint of increasing admissible time steps it is worthwhile to provide the implementation of advection in which tracers are updated separately for horizontal and vertical contributions. As is well known, the sequence horizontal-vertical should alternate with vertical-horizontal in this case. This work is planned, and this section will be updated in due course. + +GM and isoneutral operators +=========================== + +The eddy-induced transport +-------------------------- + +FESOM2 follows the algorithm proposed by :cite:`Ferrari2010` to implement the Gent-McWilliams (GM) parameterization :cite:`GentMcWilliams1990`,:cite:`Gent1995`. FESOM1.4 operates with skewsion (see :cite:`Griffiesbook` for mathematical detail). While working with skewsion is convenient in FESOM1.4 due to its variational formulation, it is less straightforward in FESOM2. Besides, the algorithm by :cite:`Ferrari2010` provides an explicit expression for the eddy bolus velocity streamfunction. + +The bolus velocity :math:`{\bf v}^*=({\bf u}^*,w^*)` is expressed in terms of eddy-induced streamfunction :math:`\boldsymbol{\Psi}`, + +.. math:: + {\bf v}^*=\nabla_3\times\boldsymbol{\Psi}, \quad \boldsymbol{\Psi}=\boldsymbol{\gamma}\times{\bf k}, + +where :math:`\boldsymbol{\gamma}` is a two-dimensional vector. In agreement with :cite:`Ferrari2010`, it is computed by solving + +.. math:: + (c^2\partial_{zz}-N^2)\boldsymbol{\gamma}=(g/\rho_0)\kappa\nabla_z\sigma + :label: eq_gm + +with boundary conditions :math:`\boldsymbol{\gamma}=0` at the surface and ocean bottom. In this expression, :math:`c` is the speed of the first baroclinic mode, :math:`\sigma` the isoneutral density, :math:`\kappa` the thickness diffusivity, :math:`N` the Brunt–Väisälä frequency, and the index :math:`z` means that the gradient is computed for fixed :math:`z` (it differs from the gradient along layers, :math:`\nabla_z\sigma=\nabla\sigma-\partial_z\sigma\nabla Z`). In terms of the vector :math:`\boldsymbol{\gamma}` the components of eddy-induced velocity are computed as + +.. math:: + {\bf u}^*=\partial_z\boldsymbol{\gamma}, \quad w^*=-\nabla\cdot\boldsymbol{\gamma}. + +It is easy to see that solving :eq:`eq_gm` plays a role of tapering, for the solution is a smooth function satisfying boundary conditions. +The residual velocity :math:`{\bf u}_r={\bf u}+{\bf u}^*`, :math:`w_r=w+w^*` which is the sum of the eddy-induced velocity and the mean velocity :math:`({\bf u},w)` is consistent with :math:`\overline h` because the vertically integrated divergence of :math:`{\bf u}^*` is zero. The inclusion of eddy-induced velocity implies that the thickness and tracer equations are now written for the residual velocity :math:`{\bf u}_r`. + +Although the natural placement for :math:`\boldsymbol{\gamma}` is at the cell centroids, it is moved to the mesh vertices in order to reduce the amount of computations. The vertical location is at full levels (layer interfaces). The horizontal bolus velocities are then computed at cell centroids as + +.. math:: + {\bf u}^*_{c}=(1/3) \partial_z \sum_{v(c)}\boldsymbol{\gamma}_{v}. + +The vertical bolus velocity :math:`w^*` is then found together with :math:`w` at the end of the ALE step and the full residual velocity is used to advect tracers. + +We compute the speed :math:`c` in the WKB approximation as + +.. math:: + c=\frac{1}{\pi}\int_{-H}^0Ndz. + +Among other factors, the magnitude of the thickness diffusivity :math:`\kappa` depends on the resolution :math:`r` and the local Rossby radius :math:`L_R=c/f`: + +.. math:: + \kappa=\kappa_0 f_{\kappa}(r/L_R), + +where :math:`f_{\kappa}` is a cut-off function that tends to 0 if :math:`r/L_R<1` and to 1 otherwise. The resolution is defined as a square root of the area of the scalar control volume. On general meshes it may exhibit substantial local variations, so smoothing over the neighbor vertices is done. + +Isoneutral diffusion +-------------------- + +Assuming that the slope of isopycnals is small, the diffusivity tensor can be written as + +.. math:: + {\bf K}= + \begin{pmatrix} K_i & 0 &s_xK_i \\ + 0 & K_i & s_yK_i\\ + s_xK_i & s_yK_i & s^2K_i+K_d + \end{pmatrix} + :label: eq_kiso + +Here :math:`K_i` and :math:`K_d` are the isoneutral and diapycnal diffusivities, and :math:`{\bf s}` is the isoneutral slope vector. Its derivatives are computed along layers, + +.. math:: + {\bf s}=(s_x,s_y)=-\nabla\sigma/\partial_z\sigma. + +If layer interfaces deviate substantially from geopotential surfaces, for example, if layers follow the bottom topography, the slope vector can be substantially larger than typically found on :math:`z`-coordinate meshes. Mixed derivatives in :math:`\nabla_3 h {\bf K}\nabla_3` operator in this case can limit the time step :cite:`Lemarie2012a`. To maintain stability, the term :math:`h\partial_z(s^2K_i+K_d )\partial_z` is treated implicitly, as suggested by :cite:`Lemarie2012a`. Appendix :math:`app:isoneutral` shows the details of the numerical discretization of isoneutral diffusion. + +Equation of state +----------------- + +FESOM still works with potential temperature. The conservative temperature and TEOS10 will be made available soon. The estimates of density by the equation of state are made columnwise. To facilitate these estimates, for each column the arrays are computed of quantities appearing with different powers in :math:`z`. Then they are combined to estimate the in-situ density and pressure as well as to compute the Brunt–Väisälä frequency in the same routine. \ No newline at end of file diff --git a/docs/subcycling_instead_solver.rst b/docs/subcycling_instead_solver.rst new file mode 100644 index 000000000..a46d53243 --- /dev/null +++ b/docs/subcycling_instead_solver.rst @@ -0,0 +1,58 @@ +.. _subcycling_instead_solver: + +Subcycling instead of solver +**************************** + +Semi-implicit treatment of external mode has a drawback of suboptimal parallel scalability in the limit of very small partitions. An alternative approach is to use split explicit time stepping when the external mode (elevation and vertically integrated velocity) are time stepped with a small step (subcycling), and then filtered to remove fast contributions. This option will be added in the future, and at present optimal algorithms are explored. The description of this section gives one possibility. Flux form of momentum advection is used. We take + +.. math:: + \eta^n=(\overline h^{n-1/2}+\overline h^{n+1/2})/2, + +since it provides the second-order accurate estimate. + +An easiest approach is to run subcycles between time levels :math:`n` and :math:`n+2`, with subsequent averaging to level :math:`n+1`. + +The contribution from the elevation :math:`\eta^n` is kept in the predicting :math:`\Delta \tilde{\bf U}` because it also incorporates the implicit solve for vertical viscosity. Then the compensation term with :math:`\eta^n` appears in :eq:`eq_barus` below. This can be avoided if implicit vertical viscosity substep is moved to the end of velocity step. + +Instead of :eq:`eq_baru` and :eq:`eq_etaU` we introduce subcycles indexed with :math:`j`, :math:`j=0:2J`, with :math:`\eta^{n+j/J}` shortcut to :math:`\eta^j` and same for :math:`\overline{\bf U}` in several formulas below. The simplest form of subcycling looks like + +.. math:: + \eta^{j+1}-\eta^j=-(\nabla\cdot\overline{\bf U}^{j}+W^j)\tau/J. + :label: eq_etas + +.. math:: + \overline{\bf U}^{j+1}-\overline{\bf U}^j=\overline{\Delta{\bf U}}/J-g(\tau/J)(H+\overline h^{n+1/2})\nabla(\eta^{j+1}-\eta^n). + :label: eq_barus + +This is a forward--backward scheme. + +Other forms of subcycling can be used to increase stability and reduce the number of subcycles :math:`2J+1`. Many of them are discussed by :cite:`Shchepetkin2005`. In particular, an AB3-AM4 scheme (see also :cite:`Lemarie2015` is demonstrated to provide good accuracy and stability. + +On completing sybcycles one is at time level :math:`n+2`. In order to eliminate possible high frequencies, averaging is done to time level :math:`n+1`: + +.. math:: + \overline{\bf U}^{n+1}=(2J+1)^{-1}\sum_j\overline{\bf U}^j,\quad \eta^{n+1}=(2J+1)^{-1}\sum_j\eta^j. + +The common further action is to use :math:`\overline{\bf U}^{n+1}` for the barotropic transport combined with the baroclinic transport diagnosed from :math:`{\bf U}^{n+1}`. We introduce first the new baroclinic transport by writing + +.. math:: + {\bf U}^*_k={\bf U}^n_k+\Delta{\bf U}_k, + +.. math:: + \tilde{\bf U}^{n+1}_k={\bf U}^*_k + -\overline{{\bf U}}^*\frac{h^{n+1}_k}{H+\eta^{n+1}}. + +It is then updated to the full transport velocity by + +.. math:: + {\bf U}^{n+1}_k=\tilde{\bf U}^{n+1}_k+\overline{{\bf U}}^{n+1}\frac{h^{n+1}_k}{H+\eta^{n+1}}. + +Here :math:`h_k^{n+1}` is an estimate of layer thickness at time step :math:`n+1`. + +A recent suggestion is to replace the time stepping in :eq:`eq_etas`-:eq:`eq_barus` by a dissipative one modifying :eq:`eq_barus` as + +.. math:: + \overline{\bf U}^{j+1}-\overline{\bf U}^j=\overline{\Delta{\bf U}}/J-g(\tau/J)(H+\overline h^{n+1/2})\nabla((1+\lambda)\eta^{j+1}-\lambda \eta^{j}-\eta^n). + :label: eq_barusm + +The parameter :math:`0\le \lambda<1` controls the dissipation which alone can be sufficient to remove the high-frequency component in :math:`\overline{\bf U}` and :math:`\eta`. It remains to be seen whether this is sufficient to fully eliminate averaging and shorten integration just to :math:`n+1` instead of :math:`n+2`. \ No newline at end of file diff --git a/docs/temporal_discretization.rst b/docs/temporal_discretization.rst new file mode 100644 index 000000000..7283ab0c7 --- /dev/null +++ b/docs/temporal_discretization.rst @@ -0,0 +1,193 @@ +.. _temporal_discretization: + +Temporal discretization: Asynchronous time stepping +*************************************************** + +FESOM2 uses asynchronous time stepping which means that velocity and scalar fields are shifted by a half time step. It is assumed that velocity is known at integer time steps and scalars at half-integer time steps. The time index is denoted by :math:`n`. The advantage of this simple arrangement is that the discrete velocity is centered in time for the time step of scalar quantities, and pressure gradient is centered for the velocity time step. All other terms in equations require some interpolations to warrant second-order accuracy in time, and routinely the second-order Adams-Bashforth estimate is used in FESOM2. Other variants of time stepping are explored, and will be added in future. + +Thickness and tracers +===================== + +We advance thickness and scalar quantities as + +.. math:: + h^{n+1/2}-h^{n-1/2}=-\tau[\nabla\cdot({\bf u}^nh^{*})+w^t-w^b+ W^{n-1/2}\delta_{k1}] + :label: eq_ht + +and + +.. math:: + h^{n+1/2}T^{n+1/2}-h^{n-1/2}T^{n-1/2}=-\tau[\nabla\cdot({\bf u}^nh^{*}T^n)+w^tT^t-w^bT^b+W^{n-1/2}T_W\delta_{k1}]+ D_T. + :label: eq_tracert + +Here :math:`\tau` is the time step and :math:`D_T` stands for the terms related to diffusion. The time index on :math:`w` is not written, for :math:`w` is diagnosed by :math:`{\bf u}` and :math:`h`. + +.. note:: + Note that :math:`\mathbf{u}` and :math:`h` enter similarly in the equations for thickness and tracer. This warrants consistency: if :math:`T=\rm{const}`, the tracer equation reduces to the thickness equation. + +Since the horizontal velocity is centered in time, these equations will be of the second order for the advective terms if :math:`h^*` is centered (:math:`h^*=h^n`) or if any other high-order estimate is used. The treatment of :math:`h^*` depends on options. At present FESOM allows only the simplest choices when :math:`h` vary only slightly with respect to unperturbed thickness and :math:`\partial_th` is prescribed by the evolution of :math:`\eta`. We take :math:`h^*=h^{n-1/2}` in this case because this choice can easily be made consistent with elevation, see further. + +Although this formally reduces the time order to the first, the elevation is usually computed with the accuracy shifted to the first-order in large-scale ocean models, including this one. + +.. note:: + Other options, including those allowing :math:`h` to follow isopycnal dynamic will be gradually added to FESOM. They will differ in the way how :math:`h^*` is computed. + + +Elevation +========= + +We introduce + +.. math:: + \overline{h}=\sum_k h_k-H, + +where :math:`H` is the unperturbed ocean thickness. :math:`\overline h` would be identical to the elevation :math:`\eta` in the continuous world, but not in the discrete formulation here. +The difference between these two quantities is that the elevation is defined at integer time levels. More importantly, it has to be computed so that the fast external mode is filtered. FESOM2 uses implicit method for that. The point is to make :math:`\overline h` and :math:`\eta` fully consistent. + +In order to filter the external mode :math:`\eta` is advanced implicitly in time. For :math:`h^*=h^{n-1/2}` we write for the elevation + +.. math:: + \eta^{n+1}-\eta^n=-\tau(\alpha(\nabla\cdot\sum_k{h}_k^{n+1/2}{\bf u}_k^{n+1}+W^{n+1/2})+(1-\alpha)(\nabla\cdot\sum_k{h}_k^{n-1/2}{\bf u}_k^{n}+W^{n-1/2})). + :label: eq_etat + +Here :math:`\alpha` is the implicitness parameter (:math:`0.5\le\alpha\le1`) in the continuity equation. Note that the velocities at different time steps are taken with their respective thicknesses in the same way as they appear in the thickness equation :eq:`eq_ht`. The approach below is inspired by :cite:`Campin2004`. The equation for thicknesses can be vertically integrated giving, under the condition that the surface value of :math:`w^t` vanishes, + +.. math:: + \overline{h}^{n+1/2}-\overline{h}^{n-1/2}=-\tau\nabla\cdot\sum_k{h}_k^{n-1/2}{\bf u}_k^n-\tau W^{n-1/2}. + :label: eq_hbar + +Expressing the rhs in the formula for :math:`\eta` :eq:`eq_etat` through the difference in surface displacements :math:`\overline{h}` from the last formula we see that :math:`\eta` and :math:`\overline{h}` can be made consistent if we require + +.. math:: + \eta^n=\alpha \overline{h}^{n+1/2}+(1-\alpha)\overline{h}^{n-1/2}. + :label: eq_etan + + +To eliminate the possibility for :math:`\eta` and :math:`\overline{h}` to diverge, we compute :math:`\eta^n` from the last formula, then estimate :math:`\eta^{n+1}` by solving dynamical equations (equation :eq:`eq_etat` requires :math:`\mathbf{u}^{n+1}`, so it is solved simultaneously with the momentum equation), and use it only to compute :math:`{\bf u}^{n+1}`. On the new time step a 'copy' of :math:`\eta^{n+1}` will be created from the respective fields :math:`\overline{h}`. +We commonly select :math:`\alpha=1/2`, in this case :math:`\eta^n` is just the interpolation between the two adjacent values of :math:`\overline{h}`. Note that :eq:`eq_etan` will be valid for any estimate :math:`h^*` as far as it is used consistently in the product with the horizontal velocity. + +.. note:; + The implicit way of solving for :math:`\eta` means that FESOM uses iterative solver at this step. Such solvers are thought to be a major bottleneck in massively parallel applications. This is the reason why many groups abandon solvers and go for subcycling+filtering algorithms for the treatment of external (approximately barotropic) dynamics. Such an option will be added to FESOM. Its elementary variant is described in Appendix. We work on more advanced variants. + +Momentum equation +================= + +The momentum equation has to be solved together with the elevation equation :eq:`eq_etat`, which is done with a predictor-corrector method. The method is largely an adaptation of pressure correction algorithm from computational fluid dynamics. + +Assuming the forms :eq:`eq_mom_vei` or :eq:`eq_mom_f2` we write (using :math:`\partial_z` for brevity instead of the top-bottom differences) + +.. math:: + {\bf u}^{n+1}-{\bf u}^{n}=\tau({\bf R}^{n+1/2}_u+\partial_z\nu_v\partial_z{\bf u}^{n+1}-g\nabla(\theta\eta^{n+1}+(1-\theta)\eta^n)). + +Here :math:`\theta` is the implicitness parameter for the elevation, :math:`{\bf R}^{n+1/2}_u` includes all the terms except for vertical viscosity and the contribution from the elevation which are treated implicitly. To compute :math:`{\bf R}^{n+1/2}_u`, we use the second-order Adams-Bashforth (AB) method for the terms related to the momentum advection and Coriolis acceleration. The AB estimate of quantity :math:`q` is + +.. math:: + q^{AB}=(3/2+\epsilon)q^n-(1/2+\epsilon)q^{n-1}. + +Here :math:`\epsilon` is a small parameter (:math:`\le0.1`) needed to ensure stability in the case of advection operators. The contribution of pressure :math:`P` does not need the AB interpolation (because it is centered). The horizontal viscosity is estimated on the level :math:`n` because this term is commonly selected from numerical, not physical reasons. + +- We write the predictor equation + + .. math:: + {\bf u}^{*}-{\bf u}^{n}-\tau\partial_z\nu_v\partial_z({\bf u}^{*}-{\bf u}^n)=\tau({\bf R}^{n+1/2}_u+\partial_z\nu_v\partial_z{\bf u}^{n}-g\nabla\eta^n). + :label: eq_predict + + The operator on the lhs connects three vertical levels, leading to three-diagonal linear problem for :math:`\Delta {\bf u}=\mathbf{u}_k^*-\mathbf{u}_k^n` for each vertical column. Solving it we find the predicted velocity update :math:`\Delta {\bf u}`. (The vertical viscosity contribution on the rhs is added during the assembly of the operator on the lhs.) + + +- The corrector step is written as + + .. math:: + {\bf u}^{n+1}-{\bf u}^{*}=-g\tau\theta\nabla(\eta^{n+1}-\eta^n). + :label: eq_cor + +- Expressing the new velocity from the last equation and substituting it into the equation for the elevation :eq:`eq_etat`, we find + + .. math:: + \frac{1}{\tau}(\eta^{n+1}-\eta^n)-\alpha\theta g\tau\nabla\cdot(\overline{h}^{n+1/2}+H)\nabla(\eta^{n+1}-\eta^n)dz= \nonumber \\ + -\alpha(\nabla\cdot\sum_k{h}_k^{n+1/2}({\bf u}^n+\Delta{\bf u})_k+W^{n+1/2})-(1-\alpha)(\nabla\cdot\sum{h}_k^{n-1/2}{\bf u}_k^n+W^{n-1/2}). + :label: eq_etaa + + Here, the operator part depends on :math:`h^{n+1/2}`, which is the current value of thickness. The last term on the rhs is taken from the thickness computations on the previous time step. + +The overall solution strategy is as follows. + +- Compute :math:`\eta^n` from :eq:`eq_etan`. Once it is known, compute :math:`\Delta {\bf u}` from :eq:`eq_predict`. + +- Update the matrix of the operator on the lhs of :eq:`eq_etaa`. Solve :eq:`eq_etaa` for :math:`\eta^{n+1}-\eta^n` using an iterative solver and estimate the new horizontal velocity from :eq:`eq_cor`. + +- Compute :math:`\overline{h}^{n+3/2}` from :eq:`eq_hbar`. + +- Determine layer thicknesses and :math:`w` according to the options described below. + +- Advance the tracers. The implementation of implicit vertical diffusion will be detailed below. + +Momentum equation in form :eq:`eq_mom_fl` +========================================= + +Here an additional difficulty is the presence of :math:`h` in the time derivative and on the rhs. The rule is that :math:`\mathbf{u}` should appear with the same :math:`h^*` as in the thickness or tracer equation. We used thus far the choice :math:`h^*\mathbf{u}^n=h^{n-1/2}\mathbf{u}^n` in these equations, which implies that the time derivative will be + +.. math:: + \partial_t(\mathbf{u}h)=( h^{n+1/2}\mathbf{u}^{n+1}-h^{n-1/2}\mathbf{u}^n)/\tau, + +:math:`h^{n-1/2}` will be used on the rhs with pressure gradient term, and the predictor equation will be written for :math:`h^{n-1/2}\Delta\mathbf{u}`. In this case :math:`h^{n-1/2}` can be factored out of the lhs, which will make predictor solution similar. The corrector step will be modified to + +.. math:: + h^{n+1/2}{\bf u}^{n+1}-h^{n-1/2}{\bf u}^{*}=-gh^{n-1/2}\tau\theta\nabla(\eta^{n+1}-\eta^n). + :label: eq_corf1 + + +It will lead to the replacement of :math:`h^{n+1/2}` in the lhs of :eq:`eq_etaa` by :math:`h^{n-1/2}`. We stress once again that the expressions in this section are for the particular choice of :math:`h^*`. + +Current options for the vertical coordinate +=========================================== + +The following options for the vertical coordinate are available at present: + +- Linear free surface: If we keep the layer thicknesses fixed, the time derivative in :eq:`eq_ht` drops out, and it becomes the standard equation to compute :math:`w`, starting from the bottom and continuing to the top, + + + .. math:: + w^t-w^b+\nabla\cdot({h\bf u})=0. + + + If this option is applied also to the first layer, the freshwater flux cannot be taken into account in the thickness equation. Its contribution to the salinity equation is added through a virtual salinity flux. In this option, :math:`w` at the (fixed) ocean surface differs from zero, and so do the tracer fluxes. They do not necessarily integrate to zero over the ocean surface which is why tracer conservation is violated. + +- Full (nonlinear) free surface: We adjust the thickness of the upper layer, while the thicknesses of all other layers are kept fixed, :math:`\partial_th_k=0` for :math:`k>1`. The thickness equations are used to compute :math:`w` on levels :math:`k=2:K_v` starting from the bottom. The change in the thickness of the first layer :math:`h^{n+3/2}_1-h^{n+1/2}_1` is given by :eq:`eq_hbar` written for the respective time interval. In this case there is no transport through the upper moving surface (the transport velocity :math:`w_1` is identically zero). This option requires minimum adjustment with respect to the linear free surface. However, the matrix of the operator in :eq:`eq_etaa` needs to be re-assembled on each time step. + +- We can distribute the total change in height :math:`\partial_t\overline h` between several or all eligible layers. Due to our implementation, at *each* scalar horizontal location they can only be the layers that do not touch the bottom topography. If all eligible layers are involved we estimate + + + .. math:: + \partial_t h_k=(h_k^0/\tilde H)\partial_t\overline h, + + + where :math:`h_k^0` are the unperturbed layer thicknesses and :math:`\tilde H` is their sum for all eligible layers. The thickness of the layers adjacent to the topography is kept fixed. The equation on thickness, written for each layer, is used to compute transport velocities :math:`w` starting from zero bottom value. This variant gives the so-called :math:`z^*`-coordinate. + +- Additional options will be gradually added. Layer thicknesses can vary in many ways provided that their tendencies sum to :math:`\partial_t\overline h` over the layers. In particular, requiring that transport velocities :math:`w` are zero, isopycnal layers can be introduced. The levels can move with high-pass vertical velocities, leading to the so called :math:`\tilde z` coordinate, see :cite:`Leclair2011`, :cite:`Petersen2015` or follow density gradients as in :cite:`Hofmeister2010`. The unperturbed layer thicknesses need not follow the geopotential surfaces and can be terrain following for example. + +- The ALE vertical coordinate is only a framework where many options are in principle possible. Additional measures may be required in each particular case, such as computations of pressure gradients with reduced errors. Updated transport algorithms may be needed (in the spirit of :cite:`Lemarie2012b` to minimize spurious numerical mixing in terrain-following layers. These generalizations are among the topics of ongoing work. + + +Implicit vertical diffusion +=========================== + +We return to the tracer equation :eq:`eq_tracert`. The vertical diffusion in this equation may present a CFL limitation and is treated implicitly. + +Because of varying layer thicknesses, the implementation of implicit vertical diffusion needs slight adjustment compared to the common case of fixed layers. We write, considering time levels :math:`n-1/2` and :math:`n+1/2`, + +.. math:: + h^{n+1/2}T^{n+1/2}-h^{n-1/2}T^{n-1/2}=\tau(R_T^{n}+(K_{33}\partial_zT^{n+1/2})^t-(K_{33}\partial_zT^{n+1/2})^b) + + +and split it into + +.. math:: + h^{n+1/2}T^{*}-h^{n-1/2}T^{n-1/2}=\tau R_T^{n} + +and + +.. math:: + h^{n+1/2}(T^{n+1/2}-T^{*})=\tau(K_{33}\partial_z(T^{n+1/2}-T^*)+K_{33}\partial_zT^*)|^t_b. + +Here :math:`R_T` contains all advection terms and the terms due to the diffusion tensor except for the diagonal term with :math:`K_{33}`. The preliminary computation of :math:`T^*` is necessary to guarantee that a uniform tracer distribution stays uniform (some significant digits will be lost otherwise). diff --git a/docs/time_stepping_transport.rst b/docs/time_stepping_transport.rst new file mode 100644 index 000000000..fd228702c --- /dev/null +++ b/docs/time_stepping_transport.rst @@ -0,0 +1,72 @@ +.. _time_stepping_transport: + +Time stepping for the transport :math:`\mathbf{U}=h\mathbf{u}` instead of velocity +********************************************************************************** + +For the momentum equation in the form :eq:`eq_mom_fl` an alternative variant of time stepping is possible when the quantity :math:`\mathbf{U}=h\mathbf{u}` is advanced instead of velocity :math:`\mathbf{u}`. This will simultaneously imply that :math:`h^*=h^{n}`, making the thickness and transport equations centered with respect to time step. The thickness appearing with the pressure gradient should be them :math:`h^{n+1/2}`, which provides a centered estimate. The advection and Coriolis terms are computed through AB2 (or AB3) time stepping, and if needed, the Coriolis term can be time stepped semiimplicitly. + +The time stepping algorithm can be formulated as follows + +.. math:: + {\bf U}^{n+1}-{\bf U}^{n}=\tau({\bf R}_{U}^{n+1/2}-gh^{n+1/2}\nabla(\theta\eta^{n+1}+(1-\theta)\eta^n)+(\nu_v\partial_z{\bf u}^{n+1})^t-(\nu_v\partial_z{\bf u}^{n+1})^b) + +with + +.. math:: + {\bf R}_{U}^{n+1/2}=({\bf R}_{U}^*)^{AB}-h^{n+1/2}(\nabla p_h+g\rho\nabla Z)/\rho_0, + +and + +.. math:: + {\bf R}_{U}^*=-\nabla\cdot({\bf U}^n{\bf u}^n)-(w^t{\bf u}^t-w^b{\bf u}^b)^n-f{\bf k}\times{\bf U}^n. + +The last expression combines the terms that need the AB method for stability and the second order. We use :math:`h^{n+1/2}` to compute :math:`Z` and follow the same rule as :eq:`eq_etan` to compute :math:`\eta^n`. The steps are: + +- Do the predictor step and compute :math:`\Delta \tilde{\bf U}=\tau{\bf R}_U^{n+1/2}-\tau gh^{n+1/2}\nabla\eta^n`. + +- Update for implicit viscosity. + + .. math:: + \partial_t\Delta{\bf U}-(\nu_v\partial_z(\Delta{\bf U}/h^{n+1/2}))|^t_b=\Delta\tilde{\bf U}+(\nu_v\partial_z({\bf U}^n/h^{n+1/2}))|^t_b. + +- Solve for new elevation. We write first + + .. math:: + \overline{\bf U}=\sum_k{\bf U}, + + and similarly for other quantities, getting + + .. math:: + \overline{\bf U}^{n+1}-\overline{\bf U}^n=\overline{\Delta{\bf U}}-g\tau(H+\overline h^{n+1/2})\theta\nabla(\eta^{n+1}-\eta^n) + :label: eq_baru + +and + + .. math:: + \eta^{n+1}-\eta^n=-\tau\nabla\cdot(\alpha\overline{\bf U}^{n+1}+(1-\alpha)\overline{\bf U}^{n})-\tau(\alpha W^{n+1/2}+(1-\alpha)W^{n-1/2}). + :label: eq_etaU + +Eliminating :math:`\overline{\bf U}^{n+1}` between these two equations, one gets the equation on elevation increment :math:`\Delta\eta=\eta^{n+1}-\eta^n` + + .. math:: + \Delta\eta-g\tau^2\theta\alpha\nabla\cdot((H+\overline h^{n+1/2})\nabla\Delta\eta)=-\tau\nabla\cdot(\alpha\overline{\Delta{\bf U}}+\overline{\bf U}^n)-\tau(\alpha W^{n+1/2}+(1-\alpha)W^{n-1/2}) + + In reality, everything remains similar to the vector-invariant case, and the matrix to be inverted is the same. + +- Correct the transport velocities as + + .. math:: + {\bf U}^{n+1}-{\bf U}^n={\Delta{\bf U}}-g\tau h^{n+1/2}\theta\nabla\Delta\eta. + :label: eq_corrU + +- Proceed with ALE and determine :math:`w^{n+1}`, :math:`h^{n+3/2}`, :math:`T^{n+3/2}`. + +- The new velocities are estimated as + + .. math:: + {\bf u}^{n+1}={\bf U}^{n+1}/h^{n+1}. + + Here :math:`h^{n+1}` can be computed either in the agreement with the ALE procedure (:math:`\eta^{n+1}` is already known) or interpolating between :math:`n+1/2` and :math:`n+3/2` time levels. + + +This alternative form of time stepping is more elegant. The horizontal velocity appears in most places in the layer equations as the product with respective thickness, and the alternative form takes this naturally into account. It will be added in due time together with the development of ALE options. \ No newline at end of file diff --git a/docs/vertical_discretization.rst b/docs/vertical_discretization.rst new file mode 100644 index 000000000..4f79d1aa2 --- /dev/null +++ b/docs/vertical_discretization.rst @@ -0,0 +1,88 @@ +.. _vertical_discretization + +Vertical discretization: Layer thicknesses and layer equations +************************************************************** + +FESOM2 uses Arbitrary Lagrangian Eulerian (ALE) vertical coordinate. This implies that level surfaces are allowed to move. ALE vertical coordinate on its own is only the framework enabling moving level surfaces. The way how they are moving depends on one's particular goal and may require additional algorithmic steps. Two limiting cases obviously include the case of fixed :math:`z`-levels and the case when levels are isopycnal surfaces. At present only vertical coordinates that slightly deviate from :math:`z`-surfaces are supported in FESOM, but many other options will follow. + +The implementation of ALE vertical coordinate in FESOM2 basically follows :cite:`Ringler2013`. An alternative approach, used by MOM6, see, e.g., :cite:`Adcroft_Hallberg_2006` :cite:`Adcroft2019`, is in the exploratory phase. + +The essential step toward the ALE vertical coordinate lies in confining equations of section :ref:`sec_cequations` to model layers. + +- Introduce layer thicknesses :math:`h_k=h_k(x,y,t)`, where :math:`k=1:K` is the layer index and :math:`K` the total number of layers. They are functions of the horizontal coordinates and time in a general case. Each layer consists of prisms defined by the surface mesh but partly masked by bottom topography. + +- Layers communicate via the transport velocities :math:`w_{kv}` through the top and bottom boundaries of the prisms. The transport velocities are the differences between the physical velocities in the direction normal to the layer interfaces and the velocities due to the motion of the interfaces. These velocities are defined at the interfaces (the yellow points in :numref:`vertical`). For layer :math:`k` the top interface has index :math:`k` and the bottom one is :math:`k+1`. Note that :math:`w_{kv}` coincides with the vertical velocity only if the level surfaces are flat. + +- All other quantities - horizontal velocities :math:`{\bf u}`, temperature :math:`T`, salinity :math:`S` and pressure :math:`p` are defined at mid-layers. Their depths will be denoted as :math:`Z_k`, and the notation :math:`z_k` is kept for the depths of mesh levels (the layer interfaces). They are functions of horizontal coordinates and time in a general case. + +The equations of motion, continuity and tracer balance are integrated vertically over the layers. We will use :math:`T` as a representative of an arbitrary tracer. + + +- The continuity equation becomes the equation on layer thicknesses + +.. math:: + \partial_t h_k+\nabla\cdot({\bf u}h)_k+(w^{t}-w^b)_k+W\delta_{k1}=0, + :label: eq_thickness + + +- and the tracer equation becomes + +.. math:: + \partial_t(hT)_k+\nabla\cdot({\bf u}hT)_k+(w^{t}T^t-w^bT^b)_k+WT_W\delta_{k1}=\nabla\cdot h_k{\bf K}\nabla T_k. + :label: eq_tracer + + +Here, :math:`W` is the water flux leaving the ocean at the surface, it contributes to the first layer only (hence the delta-function); :math:`T_W` is the property transported with the surface water flux and the indices :math:`t` and :math:`b` imply the top and the bottom of the layer. + +The right hand side of :eq:`eq_tracer` contains the 3 by 3 diffusivity tensor :math:`{\bf K}`. We still use :math:`\nabla` in :eq:`eq_tracer` for the 3D divergence (the outer :math:`\nabla`) for brevity, but assume the discrete form :math:`\nabla_h(...)+((...)^t-(...)^b)/h_k`, where :math:`(...)` are the placeholders for the horizontal and vertical components of 3D vector it acts on. A correct discretization of the diffusivity term is cumbersome and will be explained below. + +- Vertical sum of :eq:`eq_thickness` over layers with account that :math:`w^t=0` at the free surface and :math:`w_b=0` at the bottom gives the 'layered' version of the elevation equation + + .. math:: + \partial_t\eta+\nabla_h\cdot\sum_kh_k{\bf u}_k+W=0. + :label: eq_eta + +- The layer-integrated momentum equation in the flux form is + + .. math:: + \partial_t(h{\bf u})+\nabla_h\cdot(h{\bf u u})+w^t{\bf u}^t-w^b{\bf u}^b+ + f{\bf k}\times{\bf u}h +h(\nabla_h p+g\rho\nabla Z)/\rho_0= \nonumber \\ D_{uh}{\bf u}+(\nu_v\partial_z{\bf u})^t-(\nu_v\partial_z{\bf u})^b, + :label: eq_mom_fl + + with :math:`D_{uh}{\bf u}` the horizontal viscosity operator for the flux form (to be specified later), :math:`\nu_v` the vertical viscosity coefficient, :math:`f` the Coriolis parameter and :math:`{\bf k}` a unit vertical vector. We ignore the momentum source due to the added water :math:`W` at the surface. Note that it could be more natural to formulate the solution procedure in terms of the horizontal layer transport velocities :math:`{\bf U}=h{\bf u}` in this case, but the present implementation in FESOM deals with separate :math:`h` and :math:`\mathbf{u}`. + +- The pressure field is expressed as + + .. math:: + p=g\rho_0\eta+P, \quad P_{1}=p_a+g\rho_1h_1/2, \quad P_k=P_{k-1}+g(\rho_{k-1}h_{k-1}+ \rho_kh_k)/2. + :label: eq_pressure + + with :math:`p_a` the atmospheric pressure, :math:`\rho` the deviation of density from its reference value :math:`\rho_0`, and :math:`P` is the variable hydrostatic pressure due to :math:`\rho`. The pressure gradient in continuous equations :eq:`eq_cmom` has to be computed at constant :math:`z`. The model levels deviate from surfaces :math:`z=\rm{const}`. The term :math:`g\rho\nabla Z`, appearing together with the horizontal pressure gradient in :eq:`eq_mom_fl` compensates for the deviation. The quantity :math:`Z` appearing in this term is the :math:`z`-coordinate of the midplane of the layer with the thickness :math:`h`. + +.. note:: + Although :math:`\nabla p+g\rho\nabla Z` gives a formally correct estimate of pressure gradient at constant :math:`z`, the errors of discretization of the two terms in this expression become an issue if level surfaces deviate from :math:`z`-surfaces. They are known as pressure gradient errors and require special care. FESOM will propose a selection of known algorithms, including the finite-volume algorithms of pressure gradient force that follows :cite:`Engwirda2017` but is adapted to the triangular prisms of FESOM mesh. + +- Instead of using the flux form of momentum equation :eq:`eq_mom_fl` representing momentum balance in the layer one can work without layer integration. Of particular interest is the vector-invariant form written as + + .. math:: + \partial_t{\bf u}+\frac{\omega+f}{h}{\bf k}\times{\bf u}h+((w\partial_z{\bf u})^t+(w\partial_z{\bf u})^b)/2 +\nabla (p/\rho_0+{\bf u}^2/2)+g\rho\nabla Z/\rho_0= \nonumber \\ D_u{\bf u}+((\nu_v\partial_z{\bf u})^t-(\nu_v\partial_z{\bf u})^b)/h. + :label: eq_mom_vei + + Here, the identity + + .. math:: + {\bf u}\cdot\nabla{\bf u}=\omega{\bf k}\times{\bf u}+\nabla({\bf u}^2/2),\quad \omega={\bf k}\cdot(\nabla\times{\bf u}) + + was used. + +- The second term on the lhs of :eq:`eq_mom_vei` includes division and multiplication with the layer thickness, and in doing so, it introduces the layer potential vorticity (PV), :math:`q=(\omega+f)/h` and its transport :math:`{\bf u}h`. The layer thickness formally drops out from the equation :eq:`eq_mom_vei` which is still continuous in the horizontal direction. However, in the discrete case, the location of vorticity points (vertices) and velocity points is different. By keeping separate :math:`h` the equation will then operate on the same horizontal transports as the thickness equations. This is the prerequisite for developing discretizations that conserve potential vorticity. + +- One more form is possible where the vector-invariant representation is not used + + .. math:: + \partial_t({\bf u})+\nabla\cdot({\bf u u})+(w^t{\bf u}^t-w^b{\bf u}^b)/h+ + f{\bf k}\times{\bf u} +(\nabla p+g\rho\nabla Z)/\rho_0= \nonumber \\ D_{u}{\bf u}+(A_v\partial_z{\bf u})^t-(A_v\partial_z{\bf u})^b/h. + :label: eq_mom_f2 + +The default version in FESOM2 is :eq:`eq_mom_fl`. Although the versions are derived from the same continuous equations, they are not equivalent in the discrete case. + diff --git a/docs/zreferences.rst b/docs/zreferences.rst new file mode 100644 index 000000000..c22d33306 --- /dev/null +++ b/docs/zreferences.rst @@ -0,0 +1,11 @@ +.. _references: + +.. only:: html + + References + ********** + + .. rubric:: References + +.. bibliography:: mybib_fesom2.bib + From 0091b7ab3ceb042b02805d1f5f79a8fe08b2229e Mon Sep 17 00:00:00 2001 From: dsidoren Date: Mon, 23 May 2022 10:19:04 +0200 Subject: [PATCH 893/909] omp_min_max_sum2 fix omp_min_max_sum2 did not initialise optionally passed NAN. Caused errors when debugging! --- src/gen_support.F90 | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/gen_support.F90 b/src/gen_support.F90 index aba704771..d0961e8ad 100644 --- a/src/gen_support.F90 +++ b/src/gen_support.F90 @@ -552,7 +552,9 @@ FUNCTION omp_min_max_sum2(arr, pos11, pos12, pos21, pos22, what, partit, nan) type(t_partit),intent(in), & target :: partit - + + IF (PRESENT(nan)) vmasked=nan + SELECT CASE (trim(what)) CASE ('min') if (.not. present(nan)) vmasked=huge(vmasked) !just some crazy number From ab689b0e52456da015642de1a7867cc9702fa211 Mon Sep 17 00:00:00 2001 From: Natalja Rakowsky Date: Thu, 9 Jun 2022 15:01:04 +0200 Subject: [PATCH 894/909] Feature added: Flag __openmp_reproducible to turn orn ordering for critical openmp loops --- CMakeLists.txt | 1 + src/CMakeLists.txt | 3 ++ src/Makefile | 64 +++++++++++++++++++++--------------- src/gen_support.F90 | 64 +++++++++++++++++++++++++----------- src/ice_EVP.F90 | 21 +++++++++--- src/ice_fct.F90 | 60 ++++++++++++++++++++++++++-------- src/oce_adv_tra_driver.F90 | 20 ++++++++---- src/oce_adv_tra_fct.F90 | 10 ++++-- src/oce_ale.F90 | 67 ++++++++++++++++++++++++++++++-------- src/oce_ale_tracer.F90 | 30 ++++++++++++----- src/oce_ale_vel_rhs.F90 | 25 +++++++++----- src/oce_dyn.F90 | 56 +++++++++++++++++++++---------- src/oce_muscl_adv.F90 | 23 +++++++++---- src/solver.F90 | 54 +++++++++++++++++------------- src/write_step_info.F90 | 4 +++ 15 files changed, 351 insertions(+), 151 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index bde6aaecd..5155b05d2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -13,6 +13,7 @@ set(FESOM_COUPLED OFF CACHE BOOL "compile fesom standalone or with oasis support set(OIFS_COUPLED OFF CACHE BOOL "compile fesom coupled to OpenIFS. (Also needs FESOM_COUPLED to work)") set(CRAY OFF CACHE BOOL "compile with cray ftn") set(USE_ICEPACK OFF CACHE BOOL "compile fesom with the Iceapck modules for sea ice column physics.") +set(OPENMP_REPRODUCIBLE OFF CACHE BOOL "serialize OpenMP loops that are critical for reproducible results") #set(VERBOSE OFF CACHE BOOL "toggle debug output") #add_subdirectory(oasis3-mct/lib/psmile) add_subdirectory(src) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index ac9fe8519..3d8a5b716 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -119,6 +119,9 @@ endif() if(${VERBOSE}) target_compile_definitions(${PROJECT_NAME} PRIVATE VERBOSE) endif() +if(${OPENMP_REPRODUCIBLE}) + target_compile_definitions(${PROJECT_NAME} PRIVATE __openmp_reproducible) +endif() # CMAKE_Fortran_COMPILER_ID will also work if a wrapper is being used (e.g. mpif90 wraps ifort -> compiler id is Intel) if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Intel ) if(${BUILD_FESOM_AS_LIBRARY}) diff --git a/src/Makefile b/src/Makefile index d4e453811..598dc8d39 100755 --- a/src/Makefile +++ b/src/Makefile @@ -19,22 +19,26 @@ LIB_PARMS = -L$(PARMS_DIR)/lib -lparms CPP_SOL = -DPARMS ###### Objects for Mesh Partitioning ################################################ -# modules -MOD_INI = fort_part.o oce_modules.o MOD_MESH.o gen_modules_config.o gen_modules_partitioning.o gen_modules_rotate_grid.o - -OBJ_INI = fvom_init.o \ - oce_local.o \ - gen_comm.o +OBJ_INI = fort_part.o oce_modules.o MOD_READ_BINARY_ARRAYS.o MOD_WRITE_BINARY_ARRAYS.o MOD_MESH.o MOD_PARTIT.o \ + gen_modules_partitioning.o gen_modules_config.o \ + gen_modules_rotate_grid.o oce_local.o gen_comm.o fvom_init.o # objects -MODULES = oce_modules.o \ +OBJECTS = fortran_utils.o \ + oce_modules.o \ info_module.o \ command_line_options.o \ + MOD_READ_BINARY_ARRAYS.o \ + MOD_WRITE_BINARY_ARRAYS.o \ MOD_MESH.o \ + MOD_PARTIT.o \ + MOD_DYN.o \ + gen_modules_partitioning.o \ + MOD_ICE.o \ + MOD_TRACER.o \ ice_modules.o \ gen_modules_config.o \ - gen_modules_partitioning.o \ gen_modules_clock.o \ gen_modules_rotate_grid.o \ gen_modules_read_NetCDF.o \ @@ -73,45 +77,51 @@ MODULES = oce_modules.o \ io_netcdf_workaround_module.o \ io_data_strategy.o \ fesom_version_info.o \ + io_netcdf_attribute_module.o \ + io_netcdf_file_module.o \ + io_scatter.o \ io_meandata.o \ + io_restart_derivedtype.o \ + io_fesom_file.o \ + io_restart_file_group.o \ io_restart.o \ io_blowup.o \ io_mesh_info.o \ + oce_ale_pressure_bv.o \ gen_ic3d.o \ gen_surface_forcing.o \ gen_modules_gpot.o \ - toy_channel_soufflet.o - -OBJECTS= fvom_main.o \ - gen_comm.o \ + toy_channel_soufflet.o \ + gen_modules_backscatter.o \ + solver.o \ + oce_ale_vel_rhs.o \ + write_step_info.o \ + oce_fer_gm.o \ + oce_ale_tracer.o \ + oce_ale.o \ oce_setup_step.o \ + gen_comm.o \ oce_mesh.o \ oce_dyn.o \ - oce_ale_vel_rhs.o \ - oce_vel_rhs_vinv.o \ - oce_ale_pressure_bv.o \ - oce_fer_gm.o \ oce_muscl_adv.o \ - oce_ice_init_state.o \ oce_shortwave_pene.o \ - oce_ale.o \ - oce_ale_tracer.o \ cavity_param.o \ ice_EVP.o \ ice_maEVP.o \ - ice_setup_step.o \ ice_fct.o \ - ice_oce_coupling.o \ ice_thermo_oce.o \ + ice_setup_step.o \ + ice_oce_coupling.o \ gen_model_setup.o \ gen_forcing_init.o \ gen_bulk_formulae.o \ gen_forcing_couple.o \ gen_interpolation.o \ gen_events.o \ - write_step_info.o \ oce_mo_conv.o \ - oce_spp.o + oce_spp.o \ + fesom_module.o \ + fesom_main.o # oce_pressure_bv.o \ @@ -127,14 +137,14 @@ default: run run: $(MODULES) $(OBJECTS) @echo "======= Building FESOM ==========" - $(LD) $(OPT) -o $(EXE) $(MODULES) $(OBJECTS) \ - $(MPI_LIB) $(LIB_LAP) $(LIB_PARMS) $(NC_LIB) + $(LD) $(OPT) -o $(EXE) $(OBJECTS) \ + $(MPI_LIB) $(LIB_LAP) $(NC_LIB) $(LIB_PARMS) # cp -pf $(EXE) ../bin/. run_ini: CPP_DEFS+=-DFVOM_INIT -run_ini: cleanomod $(MOD_INI) $(OBJ_INI) +run_ini: cleanomod $(OBJ_INI) @echo "======= Building FESOM partioning program ==========" - $(LD) $(OPT) -o $(EXE_INI) $(MOD_INI) $(OBJ_INI) \ + $(LD) $(OPT) -o $(EXE_INI) $(OBJ_INI) \ $(MPI_LIB) $(LIB_METIS) $(NC_LIB) cp -pf $(EXE_INI) ../bin/. diff --git a/src/gen_support.F90 b/src/gen_support.F90 index d0961e8ad..f03560806 100644 --- a/src/gen_support.F90 +++ b/src/gen_support.F90 @@ -212,7 +212,6 @@ subroutine smooth_elem2D(arr, N, partit, mesh) work_array(node)=0._WP DO j=1, nod_in_elem2D_num(node) elem=nod_in_elem2D(j, node) - elnodes=elem2D_nodes(:,elem) work_array(node)=work_array(node)+arr(elem)*elem_area(elem) vol=vol+elem_area(elem) END DO @@ -323,13 +322,17 @@ subroutine integrate_nod_2D(data, int2D, partit, mesh) #include "associate_mesh_ass.h" lval=0.0_WP +#if !defined(__openmp_reproducible) !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(row) !$OMP DO REDUCTION (+: lval) +#endif do row=1, myDim_nod2D lval=lval+data(row)*areasvol(ulevels_nod2D(row),row) end do +#if !defined(__openmp_reproducible) !$OMP END DO !$OMP END PARALLEL +#endif int2D=0.0_WP call MPI_AllREDUCE(lval, int2D, 1, MPI_DOUBLE_PRECISION, MPI_SUM, & MPI_COMM_FESOM, MPIerr) @@ -350,21 +353,33 @@ subroutine integrate_nod_3D(data, int3D, partit, mesh) integer :: k, row real(kind=WP) :: lval +#if defined(__openmp_reproducible) + real(kind=WP) :: lval_row + integer :: k_ul, k_nl +#endif + #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" lval=0.0_WP -!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(row, k) -!$OMP DO REDUCTION(+: lval) +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(row, k, lval_row) REDUCTION(+: lval) do row=1, myDim_nod2D + lval_row = 0. do k=ulevels_nod2D(row), nlevels_nod2D(row)-1 - lval=lval+data(k, row)*areasvol(k,row)*hnode_new(k,row) ! --> TEST_cavity + lval_row=lval_row+data(k, row)*areasvol(k,row)*hnode_new(k,row) ! --> TEST_cavity end do +#if defined(__openmp_reproducible) +!$OMP ORDERED +#endif + lval = lval + lval_row +#if defined(__openmp_reproducible) +!$OMP END ORDERED +#endif end do -!$OMP END DO -!$OMP END PARALLEL +!$OMP END PARALLEL DO + int3D=0.0_WP call MPI_AllREDUCE(lval, int3D, 1, MPI_DOUBLE_PRECISION, MPI_SUM, & MPI_COMM_FESOM, MPIerr) @@ -500,13 +515,17 @@ FUNCTION omp_min_max_sum1(arr, pos1, pos2, what, partit, nan) SELECT CASE (trim(what)) CASE ('sum') val=0.0_WP +#if !defined(__openmp_reproducible) !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n) !$OMP DO REDUCTION(+: val) +#endif do n=pos1, pos2 val=val+arr(n) end do +#if !defined(__openmp_reproducible) !$OMP END DO !$OMP END PARALLEL +#endif CASE ('min') val=arr(1) @@ -546,9 +565,9 @@ FUNCTION omp_min_max_sum2(arr, pos11, pos12, pos21, pos22, what, partit, nan) character(3), intent(in) :: what real(kind=WP), optional :: nan !to be implemented upon the need (for masked arrays) real(kind=WP) :: omp_min_max_sum2 - real(kind=WP) :: val, vmasked + real(kind=WP) :: val, vmasked, val_part(pos11:pos12) integer :: i, j - + type(t_partit),intent(in), & target :: partit @@ -561,8 +580,8 @@ FUNCTION omp_min_max_sum2(arr, pos11, pos12, pos21, pos22, what, partit, nan) val=arr(1,1) !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(i, j) !$OMP DO REDUCTION(min: val) - do i=pos11, pos12 - do j=pos21, pos22 + do j=pos21, pos22 + do i=pos11, pos12 if (arr(i,j)/=vmasked) val=min(val, arr(i,j)) end do end do @@ -574,8 +593,8 @@ FUNCTION omp_min_max_sum2(arr, pos11, pos12, pos21, pos22, what, partit, nan) val=arr(1,1) !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(i, j) !$OMP DO REDUCTION(max: val) - do i=pos11, pos12 - do j=pos21, pos22 + do j=pos21, pos22 + do i=pos11, pos12 if (arr(i,j)/=vmasked) val=max(val, arr(i,j)) end do end do @@ -584,16 +603,23 @@ FUNCTION omp_min_max_sum2(arr, pos11, pos12, pos21, pos22, what, partit, nan) CASE ('sum') if (.not. present(nan)) vmasked=huge(vmasked) !just some crazy number - val=0 -!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(i, j) -!$OMP DO REDUCTION(+: val) - do i=pos11, pos12 - do j=pos21, pos22 + val=0. +#if !defined(__openmp_reproducible) +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(i, j) REDUCTION(+: val) + do j=pos21, pos22 + do i=pos11, pos12 if (arr(i,j)/=vmasked) val=val+arr(i,j) end do end do -!$OMP END DO -!$OMP END PARALLEL +!$OMP END PARALLEL DO +#else +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(j) + do j=pos21, pos22 + val_part(j) = sum(arr(pos11:pos12,j), mask=(arr(pos11:pos12,j)/=vmasked)) + end do +!$OMP END PARALLEL DO + val = sum(val_part(pos21:pos22)) +#endif CASE DEFAULT if (partit%mype==0) write(*,*) trim(what), ' is not implemented in omp_min_max_sum case!' diff --git a/src/ice_EVP.F90 b/src/ice_EVP.F90 index 0dbcb3cac..36d24650c 100755 --- a/src/ice_EVP.F90 +++ b/src/ice_EVP.F90 @@ -219,8 +219,10 @@ subroutine stress2rhs(ice, partit, mesh) !_______________________________________________________________________ if (ice_strength(el) > 0._WP) then DO k=1,3 -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_set_lock (partit%plock(elem2D_nodes(k,el))) +#else +!$OMP ORDERED #endif U_rhs_ice(elem2D_nodes(k,el)) = U_rhs_ice(elem2D_nodes(k,el)) & - elem_area(el) * & @@ -231,8 +233,11 @@ subroutine stress2rhs(ice, partit, mesh) - elem_area(el) * & (sigma12(el)*gradient_sca(k,el) + sigma22(el)*gradient_sca(k+3,el) & -sigma11(el)*val3*metric_factor(el)) -#if defined(_OPENMP) + +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(elem2D_nodes(k,el))) +#else +!$OMP END ORDERED #endif END DO endif @@ -564,19 +569,25 @@ subroutine EVPdynamics(ice, partit, mesh) if (use_cavity) then if ( (ulevels(edge_tri(1,ed))>1) .or. & ( edge_tri(2,ed)>0 .and. ulevels(edge_tri(2,ed))>1) ) then -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_set_lock (partit%plock(edges(1,ed))) +#else +!$OMP ORDERED #endif U_ice(edges(1,ed))=0.0_WP V_ice(edges(1,ed))=0.0_WP -#if defined(_OPENMP) + +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(edges(1,ed))) call omp_set_lock (partit%plock(edges(2,ed))) #endif U_ice(edges(2,ed))=0.0_WP V_ice(edges(2,ed))=0.0_WP -#if defined(_OPENMP) + +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(edges(2,ed))) +#else +!$OMP END ORDERED #endif end if end if diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index 99c1430ea..f6c7230f8 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -621,16 +621,20 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) do q=1,3 n=elnodes(q) flux=icefluxes(elem,q) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_set_lock (partit%plock(n)) +#else +!$OMP ORDERED #endif if (flux>0) then icepplus(n)=icepplus(n)+flux else icepminus(n)=icepminus(n)+flux end if -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(n)) +#else +!$OMP END ORDERED #endif end do end do @@ -698,12 +702,16 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) elnodes=elem2D_nodes(:,elem) do q=1,3 n=elnodes(q) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_set_lock (partit%plock(n)) +#else +!$OMP ORDERED #endif m_ice(n)=m_ice(n)+icefluxes(elem,q) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(n)) +#else +!$OMP END ORDERED #endif end do end do @@ -725,12 +733,16 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) elnodes=elem2D_nodes(:,elem) do q=1,3 n=elnodes(q) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_set_lock (partit%plock(n)) +#else +!$OMP ORDERED #endif a_ice(n)=a_ice(n)+icefluxes(elem,q) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(n)) +#else +!$OMP END ORDERED #endif end do end do @@ -752,12 +764,16 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) elnodes=elem2D_nodes(:,elem) do q=1,3 n=elnodes(q) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_set_lock (partit%plock(n)) +#else +!$OMP ORDERED #endif m_snow(n)=m_snow(n)+icefluxes(elem,q) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(n)) +#else +!$OMP END ORDERED #endif end do end do @@ -780,12 +796,16 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) elnodes=elem2D_nodes(:,elem) do q=1,3 n=elnodes(q) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_set_lock (partit%plock(n)) +#else +!$OMP ORDERED #endif ice_temp(n)=ice_temp(n)+icefluxes(elem,q) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(n)) +#else +!$OMP END ORDERED #endif end do end do @@ -852,15 +872,19 @@ SUBROUTINE ice_mass_matrix_fill(ice, partit, mesh) end if if (k==nn_num(row)) write(*,*) 'FATAL ERROR' end do -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_set_lock (partit%plock(row)) ! it shall be sufficient to block writing into the same row of SSH_stiff +#else +!$OMP ORDERED #endif mass_matrix(ipos)=mass_matrix(ipos)+elem_area(elem)/12.0_WP if(q==n) then mass_matrix(ipos)=mass_matrix(ipos)+elem_area(elem)/12.0_WP end if -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(row)) +#else +!$OMP END ORDERED #endif END DO end do @@ -889,6 +913,7 @@ SUBROUTINE ice_mass_matrix_fill(ice, partit, mesh) if(flag>0) then offset=ssh_stiff%rowptr(iflag)-ssh_stiff%rowptr(1)+1 n=ssh_stiff%rowptr(iflag+1)-ssh_stiff%rowptr(1) +#if !defined(__openmp_reproducible) aa=0 !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(row) REDUCTION(+:aa) !$OMP DO @@ -897,6 +922,9 @@ SUBROUTINE ice_mass_matrix_fill(ice, partit, mesh) end do !$OMP END DO !$OMP END PARALLEL +#else + aa = sum(mass_matrix(offset:n)) +#endif write(*,*) '#### MASS MATRIX PROBLEM', mype, iflag, aa, area(1,iflag), ulevels_nod2D(iflag) endif END SUBROUTINE ice_mass_matrix_fill @@ -1008,8 +1036,10 @@ subroutine ice_TG_rhs_div(ice, partit, mesh) #endif /* (__oifs) */ !___________________________________________________________________ -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_set_lock (partit%plock(row)) +#else +!$OMP ORDERED #endif rhs_m(row)=rhs_m(row)+sum(entries*m_ice(elnodes))+cx1 rhs_a(row)=rhs_a(row)+sum(entries*a_ice(elnodes))+cx2 @@ -1025,8 +1055,10 @@ subroutine ice_TG_rhs_div(ice, partit, mesh) #if defined (__oifs) || defined (__ifsinterface) rhs_tempdiv(row)=rhs_tempdiv(row)-cx4 #endif /* (__oifs) */ -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(row)) +#else +!$OMP END ORDERED #endif end do end do diff --git a/src/oce_adv_tra_driver.F90 b/src/oce_adv_tra_driver.F90 index 3a3a49b0d..c51be2e17 100644 --- a/src/oce_adv_tra_driver.F90 +++ b/src/oce_adv_tra_driver.F90 @@ -135,12 +135,14 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, dynamics, tracers, partit, if (nu2>0) nu12 = min(nu1,nu2) !!PS do nz=1, max(nl1, nl2) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_set_lock(partit%plock(enodes(1))) +#else +!$OMP ORDERED #endif do nz=nu12, nl12 fct_LO(nz, enodes(1))=fct_LO(nz, enodes(1))+adv_flux_hor(nz, e) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) end do call omp_unset_lock(partit%plock(enodes(1))) call omp_set_lock (partit%plock(enodes(2))) @@ -148,8 +150,10 @@ subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, dynamics, tracers, partit, #endif fct_LO(nz, enodes(2))=fct_LO(nz, enodes(2))-adv_flux_hor(nz, e) end do -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(enodes(2))) +#else +!$OMP END ORDERED #endif end do !$OMP END PARALLEL DO @@ -301,12 +305,14 @@ subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, partit, nu12 = nu1 if (nu2>0) nu12 = min(nu1,nu2) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_set_lock(partit%plock(enodes(1))) +#else +!$OMP ORDERED #endif do nz=nu12, nl12 dttf_h(nz,enodes(1))=dttf_h(nz,enodes(1))+flux_h(nz,edge)*dt/areasvol(nz,enodes(1)) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) end do call omp_unset_lock(partit%plock(enodes(1))) call omp_set_lock (partit%plock(enodes(2))) @@ -314,8 +320,10 @@ subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, partit, #endif dttf_h(nz,enodes(2))=dttf_h(nz,enodes(2))-flux_h(nz,edge)*dt/areasvol(nz,enodes(2)) end do -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(enodes(2))) +#else +!$OMP END ORDERED #endif end do !$OMP END DO diff --git a/src/oce_adv_tra_fct.F90 b/src/oce_adv_tra_fct.F90 index 1d599d60f..acd772930 100644 --- a/src/oce_adv_tra_fct.F90 +++ b/src/oce_adv_tra_fct.F90 @@ -225,13 +225,15 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, nl12 = max(nl1,nl2) nu12 = nu1 if (nu2>0) nu12 = min(nu1,nu2) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_set_lock(partit%plock(enodes(1))) +#else +!$OMP ORDERED #endif do nz=nu12, nl12 fct_plus (nz,enodes(1))=fct_plus (nz,enodes(1)) + max(0.0_WP, adf_h(nz,edge)) fct_minus(nz,enodes(1))=fct_minus(nz,enodes(1)) + min(0.0_WP, adf_h(nz,edge)) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) end do call omp_unset_lock(partit%plock(enodes(1))) call omp_set_lock (partit%plock(enodes(2))) @@ -240,8 +242,10 @@ subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_plus (nz,enodes(2))=fct_plus (nz,enodes(2)) + max(0.0_WP,-adf_h(nz,edge)) fct_minus(nz,enodes(2))=fct_minus(nz,enodes(2)) + min(0.0_WP,-adf_h(nz,edge)) end do -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(enodes(2))) +#else +!$OMP END ORDERED #endif end do !$OMP END DO diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index d2360b775..6d5aad3ca 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -1596,12 +1596,16 @@ subroutine update_stiff_mat_ale(partit, mesh) ! In the computation above, I've used rules from ssh_rhs (where it is ! on the rhs. So the sign is changed in the expression below. ! npos... sparse matrix indices position of node points elnodes -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_set_lock (partit%plock(row)) ! it shall be sufficient to block writing into the same row of SSH_stiff +#else +!$OMP ORDERED #endif SSH_stiff%values(npos)=SSH_stiff%values(npos) + fy*factor -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(row)) +#else +!$OMP END ORDERED #endif end do ! --> do i=1,2 end do ! --> do j=1,2 @@ -1714,17 +1718,21 @@ subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) !_______________________________________________________________________ ! calc netto "flux" -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_set_lock(partit%plock(enodes(1))) +#else +!$OMP ORDERED #endif ssh_rhs(enodes(1))=ssh_rhs(enodes(1))+(c1+c2) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(enodes(1))) call omp_set_lock(partit%plock(enodes(2))) #endif ssh_rhs(enodes(2))=ssh_rhs(enodes(2))-(c1+c2) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(enodes(2))) +#else +!$OMP END ORDERED #endif end do @@ -1854,17 +1862,21 @@ subroutine compute_hbar_ale(dynamics, partit, mesh) end do end if !_______________________________________________________________________ -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_set_lock(partit%plock(enodes(1))) +#else +!$OMP ORDERED #endif ssh_rhs_old(enodes(1))=ssh_rhs_old(enodes(1))+(c1+c2) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(enodes(1))) call omp_set_lock(partit%plock(enodes(2))) #endif ssh_rhs_old(enodes(2))=ssh_rhs_old(enodes(2))-(c1+c2) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(enodes(2))) +#else +!$OMP END ORDERED #endif end do !$OMP END DO @@ -2013,14 +2025,16 @@ subroutine vert_vel_ale(dynamics, partit, mesh) c2(nz)=(fer_UV(2,nz,el(1))*deltaX1- fer_UV(1,nz,el(1))*deltaY1)*helem(nz,el(1)) end if end do -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_set_lock (partit%plock(enodes(1))) +#else +!$OMP ORDERED #endif Wvel (nzmin:nzmax, enodes(1))= Wvel (nzmin:nzmax, enodes(1))+c1(nzmin:nzmax) if (Fer_GM) then fer_Wvel(nzmin:nzmax, enodes(1))= fer_Wvel(nzmin:nzmax, enodes(1))+c2(nzmin:nzmax) end if -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(enodes(1))) call omp_set_lock (partit%plock(enodes(2))) #endif @@ -2028,8 +2042,10 @@ subroutine vert_vel_ale(dynamics, partit, mesh) if (Fer_GM) then fer_Wvel(nzmin:nzmax, enodes(2))= fer_Wvel(nzmin:nzmax, enodes(2))-c2(nzmin:nzmax) end if -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(enodes(2))) +#else +!$OMP END ORDERED #endif !_______________________________________________________________________ ! if ed is not a boundary edge --> calc div(u_vec*h) for every layer @@ -2045,14 +2061,16 @@ subroutine vert_vel_ale(dynamics, partit, mesh) c2(nz)=-(fer_UV(2,nz,el(2))*deltaX2-fer_UV(1,nz,el(2))*deltaY2)*helem(nz,el(2)) end if end do -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_set_lock (partit%plock(enodes(1))) +#else +!$OMP ORDERED #endif Wvel (nzmin:nzmax, enodes(1))= Wvel (nzmin:nzmax, enodes(1))+c1(nzmin:nzmax) if (Fer_GM) then fer_Wvel(nzmin:nzmax, enodes(1))= fer_Wvel(nzmin:nzmax, enodes(1))+c2(nzmin:nzmax) end if -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(enodes(1))) call omp_set_lock (partit%plock(enodes(2))) #endif @@ -2060,8 +2078,10 @@ subroutine vert_vel_ale(dynamics, partit, mesh) if (Fer_GM) then fer_Wvel(nzmin:nzmax, enodes(2))= fer_Wvel(nzmin:nzmax, enodes(2))-c2(nzmin:nzmax) end if -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(enodes(2))) +#else +!$OMP END ORDERED #endif end if end do ! --> do ed=1, myDim_edge2D @@ -2862,6 +2882,9 @@ subroutine oce_timestep_ale(n, ice, dynamics, tracers, partit, mesh) !___________________________________________________________________________ real(kind=8) :: t0,t1, t2, t30, t3, t4, t5, t6, t7, t8, t9, t10, loc, glo integer :: node +!NR + integer, save :: n_check=0 + real(kind=8) :: temp_check, sali_check !___________________________________________________________________________ ! pointer on necessary derived types real(kind=WP), dimension(:), pointer :: eta_n @@ -3124,5 +3147,21 @@ subroutine oce_timestep_ale(n, ice, dynamics, tracers, partit, mesh) write(*,*) write(*,*) end if + + +!NR Checksum for tracers, as they are most sensitive + + n_check = n_check+1 + temp_check = 0. + sali_check = 0. + do node=1,myDim_nod2D+eDim_nod2D + temp_check = temp_check + sum(tracers%data(1)%values(nlevels_nod2D(node)-1:ulevels_nod2D(node),node)) + sali_check = sali_check + sum(tracers%data(2)%values(nlevels_nod2D(node)-1:ulevels_nod2D(node),node)) + end do + call MPI_Allreduce(MPI_IN_PLACE, temp_check, 1, MPI_DOUBLE, MPI_SUM, partit%MPI_COMM_FESOM, MPIerr) + call MPI_Allreduce(MPI_IN_PLACE, sali_check, 1, MPI_DOUBLE, MPI_SUM, partit%MPI_COMM_FESOM, MPIerr) + + print *,'Check',n_check,temp_check,sali_check + end subroutine oce_timestep_ale diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index bc7a02122..7a0bc6d3a 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -1120,17 +1120,21 @@ subroutine diff_part_hor_redi(tracers, partit, mesh) nl12=max(nl1,nl2) ul12 = ul1 if (ul2>0) ul12=min(ul1,ul2) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_set_lock(partit%plock(enodes(1))) +#else +!$OMP ORDERED #endif del_ttf(ul12:nl12,enodes(1))=del_ttf(ul12:nl12,enodes(1))+rhs1(ul12:nl12)*dt/areasvol(ul12:nl12,enodes(1)) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(enodes(1))) call omp_set_lock (partit%plock(enodes(2))) #endif del_ttf(ul12:nl12,enodes(2))=del_ttf(ul12:nl12,enodes(2))+rhs2(ul12:nl12)*dt/areasvol(ul12:nl12,enodes(2)) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(enodes(2))) +#else +!$OMP END ORDERED #endif end do !$OMP END DO @@ -1200,17 +1204,21 @@ SUBROUTINE diff_part_bh(tr_num, dynamics, tracers, partit, mesh) )*len) tt(nz)=tt(nz)*vi END DO -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_set_lock (partit%plock(en(1))) +#else +!$OMP ORDERED #endif temporary_ttf(nzmin:nzmax-1,en(1))=temporary_ttf(nzmin:nzmax-1,en(1))-tt(nzmin:nzmax-1) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(en(1))) call omp_set_lock (partit%plock(en(2))) #endif temporary_ttf(nzmin:nzmax-1,en(2))=temporary_ttf(nzmin:nzmax-1,en(2))+tt(nzmin:nzmax-1) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(en(2))) +#else +!$OMP END ORDERED #endif END DO !$OMP END DO @@ -1242,17 +1250,21 @@ SUBROUTINE diff_part_bh(tr_num, dynamics, tracers, partit, mesh) )*len) tt(nz)=-tt(nz)*vi*dt END DO -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_set_lock (partit%plock(en(1))) +#else +!$OMP ORDERED #endif ttf(nzmin:nzmax-1,en(1))=ttf(nzmin:nzmax-1,en(1))-tt(nzmin:nzmax-1)/area(nzmin:nzmax-1,en(1)) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(en(1))) call omp_set_lock (partit%plock(en(2))) #endif ttf(nzmin:nzmax-1,en(2))=ttf(nzmin:nzmax-1,en(2))+tt(nzmin:nzmax-1)/area(nzmin:nzmax-1,en(2)) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(en(2))) +#else +!$OMP END ORDERED #endif END DO !$OMP END DO diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index a926559c2..1ec929fe5 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -322,12 +322,16 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) un2(nl2+1:max(nl1,nl2)) = 0._WP un1(1:ul1-1) = 0._WP un2(1:ul2-1) = 0._WP + +#if defined(__openmp_reproducible) +!$OMP ORDERED +#endif ! first edge node ! Do not calculate on Halo nodes, as the result will not be used. ! The "if" is cheaper than the avoided computiations. if (nod(1) <= myDim_nod2d) then -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_set_lock(partit%plock(nod(1))) #endif do nz=min(ul1,ul2), max(nl1,nl2) @@ -335,14 +339,14 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) UVnode_rhs(1,nz,nod(1)) = UVnode_rhs(1,nz,nod(1)) + un1(nz)*UV(1,nz,el1) + un2(nz)*UV(1,nz,el2) UVnode_rhs(2,nz,nod(1)) = UVnode_rhs(2,nz,nod(1)) + un1(nz)*UV(2,nz,el1) + un2(nz)*UV(2,nz,el2) end do -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(nod(1))) #endif endif ! second edge node if (nod(2) <= myDim_nod2d) then -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_set_lock(partit%plock(nod(2))) #endif do nz=min(ul1,ul2), max(nl1,nl2) @@ -350,7 +354,7 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) UVnode_rhs(1,nz,nod(2)) = UVnode_rhs(1,nz,nod(2)) - un1(nz)*UV(1,nz,el1) - un2(nz)*UV(1,nz,el2) UVnode_rhs(2,nz,nod(2)) = UVnode_rhs(2,nz,nod(2)) - un1(nz)*UV(2,nz,el1) - un2(nz)*UV(2,nz,el2) end do -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(nod(2))) #endif endif @@ -358,7 +362,7 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) else ! el2 is not a valid element --> ed is a boundary edge, there is only the contribution from el1 ! first edge node if (nod(1) <= myDim_nod2d) then -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_set_lock(partit%plock(nod(1))) #endif do nz=ul1, nl1 @@ -366,14 +370,14 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) UVnode_rhs(1,nz,nod(1)) = UVnode_rhs(1,nz,nod(1)) + un1(nz)*UV(1,nz,el1) UVnode_rhs(2,nz,nod(1)) = UVnode_rhs(2,nz,nod(1)) + un1(nz)*UV(2,nz,el1) end do ! --> do nz=ul1, nl1 -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(nod(1))) #endif endif ! second edge node if (nod(2) <= myDim_nod2d) then -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_set_lock(partit%plock(nod(2))) #endif do nz=ul1, nl1 @@ -381,11 +385,16 @@ subroutine momentum_adv_scalar(dynamics, partit, mesh) UVnode_rhs(1,nz,nod(2)) = UVnode_rhs(1,nz,nod(2)) - un1(nz)*UV(1,nz,el1) UVnode_rhs(2,nz,nod(2)) = UVnode_rhs(2,nz,nod(2)) - un1(nz)*UV(2,nz,el1) end do ! --> do nz=ul1, nl1 -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(nod(2))) #endif endif endif ! --> if (el2>0) then + +#if defined(__openmp_reproducible) +!$OMP END ORDERED +#endif + end do ! --> do ed=1, myDim_edge2D !$OMP END DO diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index bdc75e204..a0f88f62f 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -279,19 +279,23 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) update_u(nz)=u1*vi update_v(nz)=v1*vi END DO -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_set_lock(partit%plock(el(1))) +#else +!$OMP ORDERED #endif U_b(nzmin:nzmax-1, el(1))=U_b(nzmin:nzmax-1, el(1))-update_u(nzmin:nzmax-1)/elem_area(el(1)) V_b(nzmin:nzmax-1, el(1))=V_b(nzmin:nzmax-1, el(1))-update_v(nzmin:nzmax-1)/elem_area(el(1)) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(el(1))) call omp_set_lock (partit%plock(el(2))) #endif U_b(nzmin:nzmax-1, el(2))=U_b(nzmin:nzmax-1, el(2))+update_u(nzmin:nzmax-1)/elem_area(el(2)) V_b(nzmin:nzmax-1, el(2))=V_b(nzmin:nzmax-1, el(2))+update_v(nzmin:nzmax-1)/elem_area(el(2)) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(el(2))) +#else +!$OMP END ORDERED #endif END DO !$OMP END DO @@ -323,8 +327,7 @@ SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) END DO !$OMP END DO !$OMP MASTER - call exchange_nod(U_c, partit) - call exchange_nod(V_c, partit) + call exchange_nod(U_c, V_c, partit) !$OMP END MASTER !$OMP BARRIER !$OMP DO @@ -397,19 +400,23 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) update_u(nz)=(UV(1,nz,el(1))-UV(1,nz,el(2))) update_v(nz)=(UV(2,nz,el(1))-UV(2,nz,el(2))) END DO -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_set_lock(partit%plock(el(1))) +#else +!$OMP ORDERED #endif U_c(nzmin:nzmax-1, el(1))=U_c(nzmin:nzmax-1, el(1))-update_u(nzmin:nzmax-1) V_c(nzmin:nzmax-1, el(1))=V_c(nzmin:nzmax-1, el(1))-update_v(nzmin:nzmax-1) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(el(1))) call omp_set_lock (partit%plock(el(2))) #endif U_c(nzmin:nzmax-1, el(2))=U_c(nzmin:nzmax-1, el(2))+update_u(nzmin:nzmax-1) V_c(nzmin:nzmax-1, el(2))=V_c(nzmin:nzmax-1, el(2))+update_v(nzmin:nzmax-1) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(el(2))) +#else +!$OMP END ORDERED #endif END DO !$OMP END DO @@ -447,19 +454,23 @@ SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) update_u(nz)=(U_c(nz,el(1))-U_c(nz,el(2))) update_v(nz)=(V_c(nz,el(1))-V_c(nz,el(2))) END DO -#if defined(_OPENMP) +#if defined(_OPENMP) && ! defined(__openmp_reproducible) call omp_set_lock(partit%plock(el(1))) +#else +!$OMP ORDERED #endif UV_rhs(1, nzmin:nzmax-1, el(1))=UV_rhs(1, nzmin:nzmax-1, el(1))-update_u(nzmin:nzmax-1)/elem_area(el(1)) UV_rhs(2, nzmin:nzmax-1, el(1))=UV_rhs(2, nzmin:nzmax-1, el(1))-update_v(nzmin:nzmax-1)/elem_area(el(1)) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(el(1))) call omp_set_lock (partit%plock(el(2))) #endif UV_rhs(1, nzmin:nzmax-1, el(2))=UV_rhs(1, nzmin:nzmax-1, el(2))+update_u(nzmin:nzmax-1)/elem_area(el(2)) UV_rhs(2, nzmin:nzmax-1, el(2))=UV_rhs(2, nzmin:nzmax-1, el(2))+update_v(nzmin:nzmax-1)/elem_area(el(2)) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(el(2))) +#else +!$OMP END ORDERED #endif END DO !$OMP END DO @@ -530,19 +541,23 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) update_u(nz)=u1*vi update_v(nz)=v1*vi END DO -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_set_lock(partit%plock(el(1))) +#else +!$OMP ORDERED #endif U_c(nzmin:nzmax-1, el(1))=U_c(nzmin:nzmax-1, el(1))-update_u(nzmin:nzmax-1) V_c(nzmin:nzmax-1, el(1))=V_c(nzmin:nzmax-1, el(1))-update_v(nzmin:nzmax-1) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(el(1))) call omp_set_lock (partit%plock(el(2))) #endif U_c(nzmin:nzmax-1, el(2))=U_c(nzmin:nzmax-1, el(2))+update_u(nzmin:nzmax-1) V_c(nzmin:nzmax-1, el(2))=V_c(nzmin:nzmax-1, el(2))+update_v(nzmin:nzmax-1) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(el(2))) +#else +!$OMP END ORDERED #endif END DO @@ -571,19 +586,26 @@ SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) update_u(nz)=vi*(U_c(nz,el(1))-U_c(nz,el(2))) update_v(nz)=vi*(V_c(nz,el(1))-V_c(nz,el(2))) END DO -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_set_lock(partit%plock(el(1))) +#else +!$OMP ORDERED #endif UV_rhs(1, nzmin:nzmax-1, el(1))=UV_rhs(1, nzmin:nzmax-1, el(1))-update_u(nzmin:nzmax-1)/elem_area(el(1)) UV_rhs(2, nzmin:nzmax-1, el(1))=UV_rhs(2, nzmin:nzmax-1, el(1))-update_v(nzmin:nzmax-1)/elem_area(el(1)) -#if defined(_OPENMP) + +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(el(1))) call omp_set_lock (partit%plock(el(2))) #endif + UV_rhs(1, nzmin:nzmax-1, el(2))=UV_rhs(1, nzmin:nzmax-1, el(2))+update_u(nzmin:nzmax-1)/elem_area(el(2)) UV_rhs(2, nzmin:nzmax-1, el(2))=UV_rhs(2, nzmin:nzmax-1, el(2))+update_v(nzmin:nzmax-1)/elem_area(el(2)) -#if defined(_OPENMP) + +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(el(2))) +#else +!$OMP END ORDERED #endif END DO !$OMP END DO diff --git a/src/oce_muscl_adv.F90 b/src/oce_muscl_adv.F90 index 03fdb74e0..2a1270f7f 100755 --- a/src/oce_muscl_adv.F90 +++ b/src/oce_muscl_adv.F90 @@ -97,26 +97,31 @@ subroutine muscl_adv_init(twork, partit, mesh) ! n1 and n2 are local indices n1=edges(1,n) n2=edges(2,n) + +#if defined(__openmp_reproducible) +!$OMP ORDERED +#endif + ! ... if(n1<=myDim_nod2D) --> because dont use extended nodes if(n1<=myDim_nod2D) then -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_set_lock(partit%plock(n1)) #endif nn_pos(nn_num(n1)+1,n1)=n2 nn_num(n1)=nn_num(n1)+1 -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(n1)) #endif end if ! ... if(n2<=myDim_nod2D) --> because dont use extended nodes if(n2<=myDim_nod2D) then -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_set_lock(partit%plock(n2)) #endif nn_pos(nn_num(n2)+1,n2)=n1 nn_num(n2)=nn_num(n2)+1 -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(n2)) #endif end if @@ -130,19 +135,23 @@ subroutine muscl_adv_init(twork, partit, mesh) ! this edge nodes become boundary edge with increasing depth due to bottom topography ! at the depth twork%nboundary_lay the edge (edgepoints) still has two valid ocean triangles ! below that depth, edge becomes boundary edge -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_set_lock (partit%plock(edges(1,n))) #endif twork%nboundary_lay(edges(1,n))=min(twork%nboundary_lay(edges(1,n)), minval(nlevels(edge_tri(:,n)))-1) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(edges(1,n))) call omp_set_lock (partit%plock(edges(2,n))) #endif twork%nboundary_lay(edges(2,n))=min(twork%nboundary_lay(edges(2,n)), minval(nlevels(edge_tri(:,n)))-1) -#if defined(_OPENMP) +#if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(edges(2,n))) #endif end if + +#if defined(__openmp_reproducible) +!$OMP END ORDERED +#endif end do !$OMP END DO !$OMP END PARALLEL diff --git a/src/solver.F90 b/src/solver.F90 index ada086983..7b1c7d503 100644 --- a/src/solver.F90 +++ b/src/solver.F90 @@ -139,17 +139,18 @@ subroutine ssh_solve_cg(x, rhs, solverinfo, partit, mesh) ! ============== ! Define working tolerance: ! ============== +#if !defined(__openmp_reproducible) s_old=0.0_WP -!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(row) REDUCTION(+:s_old) -!$OMP DO +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(row) REDUCTION(+:s_old) DO row=1, myDim_nod2D s_old=s_old+rhs(row)*rhs(row) END DO -!$OMP END DO -!$OMP END PARALLEL +!$OMP END PARALLEL DO +#else + s_old = sum(rhs(1:myDim_nod2D) * rhs(1:myDim_nod2D)) +#endif - call MPI_Iallreduce(MPI_IN_PLACE, s_old, 1, MPI_DOUBLE, MPI_SUM, partit%MPI_COMM_FESOM, req, MPIerr) - call MPI_Wait(req, MPI_STATUS_IGNORE, MPIerr) + call MPI_Allreduce(MPI_IN_PLACE, s_old, 1, MPI_DOUBLE, MPI_SUM, partit%MPI_COMM_FESOM, MPIerr) rtol=solverinfo%soltol*sqrt(s_old/real(nod2D,WP)) ! ============== ! Compute r0 @@ -175,16 +176,19 @@ subroutine ssh_solve_cg(x, rhs, solverinfo, partit, mesh) ! =============== ! Scalar product of r*z ! =============== + +#if !defined(__openmp_reproducible) s_old=0.0_WP -!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(row) REDUCTION(+:s_old) -!$OMP DO +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(row) REDUCTION(+:s_old) DO row=1, myDim_nod2D s_old=s_old+rr(row)*zz(row) END DO -!$OMP END DO -!$OMP END PARALLEL - call MPI_Iallreduce(MPI_IN_PLACE, s_old, 1, MPI_DOUBLE, MPI_SUM, partit%MPI_COMM_FESOM, req, MPIerr) - call MPI_Wait(req, MPI_STATUS_IGNORE, MPIerr) +!$OMP END PARALLEL DO +#else + s_old = sum(rr(1:myDim_nod2D) * zz(1:myDim_nod2D)) +#endif + + call MPI_Allreduce(MPI_IN_PLACE, s_old, 1, MPI_DOUBLE, MPI_SUM, partit%MPI_COMM_FESOM, MPIerr) ! =============== ! Iterations @@ -203,16 +207,18 @@ subroutine ssh_solve_cg(x, rhs, solverinfo, partit, mesh) ! Scalar products for alpha ! ============ +#if !defined(__openmp_reproducible) s_aux=0.0_WP -!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(row) REDUCTION(+:s_aux) -!$OMP DO +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(row) REDUCTION(+:s_aux) DO row=1, myDim_nod2D s_aux=s_aux+pp(row)*App(row) END DO -!$OMP END DO -!$OMP END PARALLEL - call MPI_Iallreduce(MPI_IN_PLACE, s_aux, 1, MPI_DOUBLE, MPI_SUM, partit%MPI_COMM_FESOM, req, MPIerr) - call MPI_Wait(req, MPI_STATUS_IGNORE, MPIerr) +!$OMP END PARALLEL DO +#else + s_aux = sum(pp(1:myDim_nod2D) * App(1:myDim_nod2D)) +#endif + + call MPI_Allreduce(MPI_IN_PLACE, s_aux, 1, MPI_DOUBLE, MPI_SUM, partit%MPI_COMM_FESOM, MPIerr) al=s_old/s_aux ! =========== ! New X and residual r @@ -236,17 +242,21 @@ subroutine ssh_solve_cg(x, rhs, solverinfo, partit, mesh) ! =========== ! Scalar products for beta ! =========== -sprod(1)=0.0_WP -sprod(2)=0.0_WP +#if !defined(__openmp_reproducible) +sprod(1:2)=0.0_WP !$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(row) REDUCTION(+:sprod) DO row=1, myDim_nod2D sprod(1)=sprod(1)+rr(row)*zz(row) sprod(2)=sprod(2)+rr(row)*rr(row) END DO !$OMP END PARALLEL DO +#else + sprod(1) = sum(rr(1:myDim_nod2D) * zz(1:myDim_nod2D)) + sprod(1) = sum(rr(1:myDim_nod2D) * rr(1:myDim_nod2D)) +#endif - call MPI_Iallreduce(MPI_IN_PLACE, sprod, 2, MPI_DOUBLE, MPI_SUM, partit%MPI_COMM_FESOM, req, MPIerr) - call MPI_Wait(req, MPI_STATUS_IGNORE, MPIerr) + call MPI_Allreduce(MPI_IN_PLACE, sprod, 2, MPI_DOUBLE, MPI_SUM, partit%MPI_COMM_FESOM, MPIerr) + !$OMP BARRIER ! =========== ! Exit if tolerance is achieved diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index 926e0ecf4..a9c5cfd78 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -101,7 +101,9 @@ subroutine write_step_info(istep, outfreq, ice, dynamics, tracers, partit, mesh) loc_wflux =0. loc =0. !_______________________________________________________________________ +#if !defined(__openmp_reproducible) !$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n) REDUCTION(+:loc_eta, loc_hbar, loc_deta, loc_dhbar, loc_wflux) +#endif do n=1, myDim_nod2D loc_eta = loc_eta + areasvol(ulevels_nod2D(n), n)*eta_n(n) loc_hbar = loc_hbar + areasvol(ulevels_nod2D(n), n)*hbar(n) @@ -109,7 +111,9 @@ subroutine write_step_info(istep, outfreq, ice, dynamics, tracers, partit, mesh) loc_dhbar = loc_dhbar + areasvol(ulevels_nod2D(n), n)*(hbar(n)-hbar_old(n)) loc_wflux = loc_wflux + areasvol(ulevels_nod2D(n), n)*water_flux(n) end do +#if !defined(__openmp_reproducible) !$OMP END PARALLEL DO +#endif !_______________________________________________________________________ call MPI_AllREDUCE(loc_eta , int_eta , 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) call MPI_AllREDUCE(loc_hbar , int_hbar , 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) From f7156a51adc0465b7897c3c851af4e2c44d507de Mon Sep 17 00:00:00 2001 From: Natalja Rakowsky Date: Thu, 9 Jun 2022 15:15:37 +0200 Subject: [PATCH 895/909] Bug fix: variable lval_row was undeclared --- src/gen_support.F90 | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/gen_support.F90 b/src/gen_support.F90 index f03560806..231a671f3 100644 --- a/src/gen_support.F90 +++ b/src/gen_support.F90 @@ -353,10 +353,8 @@ subroutine integrate_nod_3D(data, int3D, partit, mesh) integer :: k, row real(kind=WP) :: lval -#if defined(__openmp_reproducible) real(kind=WP) :: lval_row - integer :: k_ul, k_nl -#endif + #include "associate_part_def.h" #include "associate_mesh_def.h" From d557ae104ba597492e89e61619430f22491858d0 Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 10 Jun 2022 21:30:40 +0200 Subject: [PATCH 896/909] add new variable for the residual ice flux, is only used when coupled --- src/gen_forcing_init.F90 | 3 +++ src/gen_modules_forcing.F90 | 1 + 2 files changed, 4 insertions(+) diff --git a/src/gen_forcing_init.F90 b/src/gen_forcing_init.F90 index 3f5652c5b..7d2df1954 100755 --- a/src/gen_forcing_init.F90 +++ b/src/gen_forcing_init.F90 @@ -95,6 +95,9 @@ subroutine forcing_array_setup(partit, mesh) flux_correction_north=0.0_WP flux_correction_south=0.0_WP flux_correction_total=0.0_WP + + allocate(residualifwflx(n2)) + residualifwflx = 0.0_WP #endif diff --git a/src/gen_modules_forcing.F90 b/src/gen_modules_forcing.F90 index d71060223..d3f633ea7 100755 --- a/src/gen_modules_forcing.F90 +++ b/src/gen_modules_forcing.F90 @@ -68,6 +68,7 @@ module g_forcing_arrays real(kind=WP), allocatable, dimension(:) :: atm_net_fluxes_north, atm_net_fluxes_south real(kind=WP), allocatable, dimension(:) :: oce_net_fluxes_north, oce_net_fluxes_south real(kind=WP), allocatable, dimension(:) :: flux_correction_north, flux_correction_south, flux_correction_total + real(kind=WP), allocatable, dimension(:) :: residualifwflx #endif real(kind=WP), allocatable, dimension(:) :: runoff_landice From 7685e3931d9def57f826803f313f4d1b28353448 Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 10 Jun 2022 21:31:58 +0200 Subject: [PATCH 897/909] add residual ice flux to the freshwater balancing when coupled --- src/ice_oce_coupling.F90 | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index d77daf814..3d46063f8 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -427,12 +427,16 @@ subroutine oce_fluxes(ice, dynamics, tracers, partit, mesh) ! 1. water flux ! if (.not. use_virt_salt) can be used! ! we conserve only the fluxes from the database plus evaporation. !$OMP PARALLEL DO - do n=1, myDim_nod2D+eDim_nod2D - flux(n) = evaporation(n)-ice_sublimation(n) & ! the ice2atmos subplimation does not contribute to the freshwater flux into the ocean - +prec_rain(n) & - +prec_snow(n)*(1.0_WP-a_ice_old(n)) & - +runoff(n) - end do + do n=1, myDim_nod2D+eDim_nod2D + flux(n) = evaporation(n) & + -ice_sublimation(n) & ! the ice2atmos subplimation does not contribute to the freshwater flux into the ocean + +prec_rain(n) & + +prec_snow(n)*(1.0_WP-a_ice_old(n)) & +#if defined (__oifs) + +residualifwflx(n) & ! balance residual ice flux only in coupled case +#endif + +runoff(n) + end do !$OMP END PARALLEL DO ! --> In case of zlevel and zstar and levitating sea ice, sea ice is just sitting ! on top of the ocean without displacement of water, there the thermodynamic From c209e2395b9d838bee391fbe11888740ddc50a3b Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 10 Jun 2022 21:37:09 +0200 Subject: [PATCH 898/909] 1) scale freshwater fluxes that are used for balancing with rhofwt/rhowat since this is the way how they contribute to the total freshwater flux, otherwise there are always 2.5% missing to balance the freshwater flux out. 2) define a_ice_old, since this is the sea ice concentration that is used for balancing snow. 3) define residual freshwater flux (resid) that needs to be added to the freshwater balancing. --- src/ice_thermo_cpl.F90 | 68 +++++++++++++++++++++++++++++------------- 1 file changed, 48 insertions(+), 20 deletions(-) diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index c3d40c775..a9206ceab 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -37,6 +37,8 @@ subroutine thermodynamics(ice, partit, mesh) real(kind=WP) :: a2ohf, a2ihf !---- evaporation and sublimation (provided by ECHAM) real(kind=WP) :: evap, subli + !---- add residual freshwater flux over ice to freshwater (setted in ice_growth) + real(kind=WP) :: resid !---- precipitation and runoff (provided by ECHAM) real(kind=WP) :: rain, snow, runo !---- ocean variables (provided by FESOM) @@ -61,6 +63,7 @@ subroutine thermodynamics(ice, partit, mesh) real(kind=WP), dimension(:) , pointer :: u_ice, v_ice real(kind=WP), dimension(:) , pointer :: a_ice, m_ice, m_snow real(kind=WP), dimension(:) , pointer :: thdgr, thdgrsn + real(kind=WP), dimension(:) , pointer :: a_ice_old, m_ice_old, m_snow_old, thdgr_old real(kind=WP), dimension(:) , pointer :: S_oc_array, T_oc_array, u_w, v_w real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux #if defined (__oifs) || defined (__ifsinterface) @@ -81,6 +84,10 @@ subroutine thermodynamics(ice, partit, mesh) m_snow => ice%data(3)%values(:) thdgr => ice%thermo%thdgr(:) thdgrsn => ice%thermo%thdgrsn(:) + a_ice_old => ice%data(1)%values_old(:) + m_ice_old => ice%data(2)%values_old(:) + m_snow_old => ice%data(3)%values_old(:) + thdgr_old => ice%thermo%thdgr_old T_oc_array => ice%srfoce_temp(:) S_oc_array => ice%srfoce_salt(:) u_w => ice%srfoce_u(:) @@ -111,15 +118,10 @@ subroutine thermodynamics(ice, partit, mesh) !_____________________________________________________________________________ rsss = ref_sss - !---- total evaporation (needed in oce_salt_balance.F90) - evaporation = evap_no_ifrac*(1.-a_ice) + sublimation*a_ice - !---- loop over all surface node - do inod=1,myDim_nod2d+eDim_nod2d + do inod=1,myDim_nod2d+eDim_nod2D -#ifdef use_cavity if (ulevels_nod2D(inod) > 1) cycle -#endif A = a_ice(inod) h = m_ice(inod) @@ -166,18 +168,34 @@ subroutine thermodynamics(ice, partit, mesh) #endif call ice_growth - a_ice(inod) = A - m_ice(inod) = h - m_snow(inod) = hsn - net_heat_flux(inod) = ehf - fresh_wa_flux(inod) = fw + !__________________________________________________________________________ + ! save old ice variables + m_ice_old(inod) = m_ice(inod) + m_snow_old(inod) = m_snow(inod) + a_ice_old(inod) = a_ice(inod) + thdgr_old(inod) = thdgr(inod) + + !__________________________________________________________________________ + ! save new ice variables + a_ice(inod) = A + m_ice(inod) = h + m_snow(inod) = hsn + net_heat_flux(inod) = ehf + fresh_wa_flux(inod) = fw if (.not. use_virt_salt) then real_salt_flux(inod)= rsf end if - thdgr(inod) = dhgrowth - thdgrsn(inod) = dhsngrowth - flice(inod) = dhflice - + thdgr(inod) = dhgrowth + thdgrsn(inod) = dhsngrowth + flice(inod) = dhflice + + !---- total evaporation (needed in oce_salt_balance.F90) = evap+subli + evaporation(inod) = evap + subli + ice_sublimation(inod)= subli + prec_rain(inod) = rain + prec_snow(inod) = snow + runoff(inod) = runo + residualifwflx(inod) = resid enddo return @@ -273,8 +291,10 @@ subroutine ice_growth !---- NOTE: evaporation and sublimation represent potential fluxes and !---- must be area-weighted (like the heat fluxes); in contrast, !---- precipitation (snow and rain) and runoff are effective fluxes - PmEice = A*snow + A*subli - PmEocn = rain + runo + (1._WP-A)*snow + (1._WP-A)*evap + subli = A*subli + evap = (1._WP-A)*evap + PmEice = A*snow + subli + PmEocn = evap + rain + (1._WP-A)*snow + runo !---- convert freshwater fluxes [m/s] into growth per time step dt [m] PmEice = PmEice*dt @@ -301,7 +321,8 @@ subroutine ice_growth else PmEice = 0._WP endif - + resid = PmEice/dt + !---- add residual freshwater flux over ice to freshwater flux over ocean PmEocn = PmEocn + PmEice PmEice = 0._WP @@ -416,7 +437,7 @@ subroutine ice_growth !---- total freshwater mass flux into the ocean [kg/m**2/s] if (.not. use_virt_salt) then - fw = PmEocn*rhofwt - dhgrowth*rhoice - dhsngrowth*rhosno + fw = PmEocn*rhofwt - dhgrowth*rhoice - dhsngrowth*rhosno rsf = -dhgrowth*rhoice*Sice/rhowat else fw = PmEocn*rhofwt - dhgrowth*rhoice*(rsss-Sice)/rsss - dhsngrowth*rhosno @@ -453,7 +474,14 @@ subroutine ice_growth end if !---- convert freshwater mass flux [kg/m**2/s] into sea-water volume flux [m/s] - fw = fw/rhowat + fw = fw/rhowat + evap = evap *rhofwt/rhowat + rain = rain *rhofwt/rhowat + snow = snow *rhofwt/rhowat + runo = runo *rhofwt/rhowat + subli= subli*rhofwt/rhowat + resid= resid*rhofwt/rhowat + return end subroutine ice_growth From 48987dbcc8ade2a5f290e29c23768c1ed228e4e7 Mon Sep 17 00:00:00 2001 From: JanStreffing Date: Mon, 13 Jun 2022 09:50:31 +0200 Subject: [PATCH 899/909] remove partit here, as we extract on the level above --- src/gen_modules_partitioning.F90 | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index defbb3d46..dc727700c 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -94,7 +94,7 @@ subroutine par_ex(COMM, mype, abort) ! finalizes MPI #ifndef __oasis if (present(abort)) then - if (partit%mype==0) write(*,*) 'Run finished unexpectedly!' + if (mype==0) write(*,*) 'Run finished unexpectedly!' call MPI_ABORT(COMM, 1 ) else call MPI_Barrier(COMM, error) @@ -102,17 +102,17 @@ subroutine par_ex(COMM, mype, abort) ! finalizes MPI endif #else if (.not. present(abort)) then - if (partit%mype==0) print *, 'FESOM calls MPI_Barrier before calling prism_terminate' + if (mype==0) print *, 'FESOM calls MPI_Barrier before calling prism_terminate' call MPI_Barrier(MPI_COMM_WORLD, error) end if call prism_terminate_proto(error) - if (partit%mype==0) print *, 'FESOM calls MPI_Barrier before calling MPI_Finalize' + if (mype==0) print *, 'FESOM calls MPI_Barrier before calling MPI_Finalize' call MPI_Barrier(MPI_COMM_WORLD, error) - if (partit%mype==0) print *, 'FESOM calls MPI_Finalize' + if (mype==0) print *, 'FESOM calls MPI_Finalize' call MPI_Finalize(error) #endif - if (partit%mype==0) print *, 'fesom should stop with exit status = 0' + if (mype==0) print *, 'fesom should stop with exit status = 0' #endif #if defined (__oifs) !OIFS coupling doesnt call prism_terminate_proto and uses COMM instead of MPI_COMM_WORLD From d959e0a228530f3fed2967a62edbfeb3027893e4 Mon Sep 17 00:00:00 2001 From: JanStreffing Date: Mon, 13 Jun 2022 09:55:58 +0200 Subject: [PATCH 900/909] define residualifwflx --- src/ice_thermo_cpl.F90 | 1 + 1 file changed, 1 insertion(+) diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index a9206ceab..63c4c83a8 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -138,6 +138,7 @@ subroutine thermodynamics(ice, partit, mesh) rain = prec_rain(inod) snow = prec_snow(inod) runo = runoff(inod) + residualifwflx = runoff(inod) ustar = sqrt(ice%cd_oce_ice)*sqrt((u_ice(inod)-u_w(inod))**2+(v_ice(inod)-v_w(inod))**2) T_oc = T_oc_array(inod) From c8073a74921ba13335fc0a976d66597e5bbda123 Mon Sep 17 00:00:00 2001 From: JanStreffing Date: Mon, 13 Jun 2022 09:58:51 +0200 Subject: [PATCH 901/909] define residualifwflx for real.. --- src/ice_thermo_cpl.F90 | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index 63c4c83a8..5ba3dcfdd 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -40,7 +40,7 @@ subroutine thermodynamics(ice, partit, mesh) !---- add residual freshwater flux over ice to freshwater (setted in ice_growth) real(kind=WP) :: resid !---- precipitation and runoff (provided by ECHAM) - real(kind=WP) :: rain, snow, runo + real(kind=WP) :: rain, snow, runo, residualifwflx !---- ocean variables (provided by FESOM) real(kind=WP) :: T_oc, S_oc, ustar !---- local variables (set in this subroutine) @@ -138,7 +138,6 @@ subroutine thermodynamics(ice, partit, mesh) rain = prec_rain(inod) snow = prec_snow(inod) runo = runoff(inod) - residualifwflx = runoff(inod) ustar = sqrt(ice%cd_oce_ice)*sqrt((u_ice(inod)-u_w(inod))**2+(v_ice(inod)-v_w(inod))**2) T_oc = T_oc_array(inod) From e8053df747875213b3afea037940649da9decd7d Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 13 Jun 2022 11:19:24 +0200 Subject: [PATCH 902/909] fix gfortran compilation error --- src/ice_thermo_cpl.F90 | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index 5ba3dcfdd..716274cb9 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -40,7 +40,7 @@ subroutine thermodynamics(ice, partit, mesh) !---- add residual freshwater flux over ice to freshwater (setted in ice_growth) real(kind=WP) :: resid !---- precipitation and runoff (provided by ECHAM) - real(kind=WP) :: rain, snow, runo, residualifwflx + real(kind=WP) :: rain, snow, runo !---- ocean variables (provided by FESOM) real(kind=WP) :: T_oc, S_oc, ustar !---- local variables (set in this subroutine) @@ -194,8 +194,10 @@ subroutine thermodynamics(ice, partit, mesh) ice_sublimation(inod)= subli prec_rain(inod) = rain prec_snow(inod) = snow - runoff(inod) = runo + runoff(inod) = runo +#if defined (__oifs) residualifwflx(inod) = resid +#endif enddo return From bc07dc18b967d6c528535fa4736bf2a0f3f0266c Mon Sep 17 00:00:00 2001 From: Jan Streffing Date: Mon, 13 Jun 2022 11:52:56 +0200 Subject: [PATCH 903/909] Restore default namelist.ice --- config/namelist.ice | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/config/namelist.ice b/config/namelist.ice index 6c08cdf2d..bcd86f145 100644 --- a/config/namelist.ice +++ b/config/namelist.ice @@ -23,8 +23,8 @@ emiss_ice=0.97 ! Emissivity of Snow/Ice, emiss_wat=0.97 ! Emissivity of open water albsn=0.81 ! Albedo: frozen snow albsnm=0.77 ! melting snow -albi=0.6 ! frozen ice -albim=0.43 ! melting ice +albi=0.7 ! frozen ice +albim=0.68 ! melting ice albw=0.1 ! open water con=2.1656 ! Thermal conductivities: ice; W/m/K consn=0.31 ! snow From 447d68280ada1c748838c75e7d4bc7127d770116 Mon Sep 17 00:00:00 2001 From: Jan Streffing Date: Mon, 13 Jun 2022 11:53:30 +0200 Subject: [PATCH 904/909] restoring default namelist.oce --- config/namelist.oce | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/config/namelist.oce b/config/namelist.oce index 555190b36..7af6867f7 100644 --- a/config/namelist.oce +++ b/config/namelist.oce @@ -6,17 +6,17 @@ A_ver= 1.e-4 ! Vertical viscosity, m^2/s scale_area=5.8e9 ! Visc. and diffus. are for an element with scale_area SPP=.false. ! Salt Plume Parameterization Fer_GM=.true. ! to swith on/off GM after Ferrari et al. 2010 -K_GM_max = 3000.0 ! max. GM thickness diffusivity (m2/s) +K_GM_max = 2000.0 ! max. GM thickness diffusivity (m2/s) K_GM_min = 2.0 ! max. GM thickness diffusivity (m2/s) K_GM_bvref = 2 ! def of bvref in ferreira scaling 0=srf,1=bot mld,2=mean over mld,3=weighted mean over mld K_GM_rampmax = -1.0 ! Resol >K_GM_rampmax[km] GM on K_GM_rampmin = -1.0 ! Resol Date: Mon, 13 Jun 2022 11:54:02 +0200 Subject: [PATCH 905/909] restoring default namelist.io --- config/namelist.io | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/config/namelist.io b/config/namelist.io index 5635d8686..4ad3a8b96 100644 --- a/config/namelist.io +++ b/config/namelist.io @@ -32,8 +32,8 @@ io_list = 'sst ',1, 'm', 4, 'MLD2 ',1, 'm', 4, 'tx_sur ',1, 'm', 4, 'ty_sur ',1, 'm', 4, - 'temp ',1, 'm', 4, - 'salt ',1, 'm', 8, + 'temp ',1, 'y', 4, + 'salt ',1, 'y', 8, 'N2 ',1, 'y', 4, 'Kv ',1, 'y', 4, 'u ',1, 'y', 4, From 178f5e6322d94c32f42c4b21fe92330832f9e02d Mon Sep 17 00:00:00 2001 From: Jan Streffing Date: Mon, 13 Jun 2022 11:54:50 +0200 Subject: [PATCH 906/909] restoring default namelist.tra --- config/namelist.tra | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/config/namelist.tra b/config/namelist.tra index 7031735c4..9108013b7 100644 --- a/config/namelist.tra +++ b/config/namelist.tra @@ -4,8 +4,8 @@ num_tracers=100 !number of tracers to allocate. shallbe large or equal to the nu &tracer_list nml_tracer_list = -1 , 'MFCT', 'QR4C', 'FCT ', 0., 1., -2 , 'MFCT', 'QR4C', 'FCT ', 0., 1., +1 , 'MFCT', 'QR4C', 'FCT ', 1., 1., +2 , 'MFCT', 'QR4C', 'FCT ', 1., 1., !101, 'UPW1', 'UPW1', 'NON ', 0., 0. / @@ -41,7 +41,7 @@ double_diffusion=.false. ! for KPP,dd switch K_ver=1.0e-5 K_hor=3000. surf_relax_T=0.0 -surf_relax_S=0.0 ! 50m/300days 6.43e-07! m/s 10./(180.*86400.) +surf_relax_S=1.929e-06 ! 50m/300days 6.43e-07! m/s 10./(180.*86400.) balance_salt_water =.true. ! balance virtual-salt or freshwater flux or not clim_relax=0.0 ! 1/s, geometrical information has to be supplied ref_sss_local=.true. From b05cbd2dd42d1fe579fd1b102f786121229c8106 Mon Sep 17 00:00:00 2001 From: Jan Streffing Date: Mon, 13 Jun 2022 13:32:14 +0200 Subject: [PATCH 907/909] remove obsolete variables --- src/ice_thermo_cpl.F90 | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index 716274cb9..9cc3e4e45 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -557,8 +557,6 @@ subroutine ice_albedo(ithermp, h, hsn, t, alb) real(kind=WP) :: t real(kind=WP) :: alb real(kind=WP) :: geolat - real(kind=WP) :: melt_pool_alb_reduction - real(kind=WP) :: nh_winter_reduction real(kind=WP), pointer :: albsn, albi, albsnm, albim albsn => ice%thermo%albsn albi => ice%thermo%albi From d7ef47c655283e5d155ffb0953b23a43262a0ed7 Mon Sep 17 00:00:00 2001 From: Dmitri Sidorenko Date: Wed, 15 Jun 2022 11:10:39 +0200 Subject: [PATCH 908/909] =?UTF-8?q?fixed=20depth=20sign=20in=20the=20param?= =?UTF-8?q?eterisation=20of=20Langmuir=20circulations=20(LC)=20in=20TKE.?= =?UTF-8?q?=20It=20works!=20The=20north=20corner=20bias=20in=20the=20NA=20?= =?UTF-8?q?is=20reduced=20by=201=C2=B0.=20Aso=20the=20SO=20warm=20bias=20d?= =?UTF-8?q?eeper=20than=20~400=C2=B0=20ischanged=20to=20a=20cold=20bias.?= =?UTF-8?q?=20Might=20be=20something=20for=20AWICM3=20to=20test.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gen_modules_cvmix_tke.F90 | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/gen_modules_cvmix_tke.F90 b/src/gen_modules_cvmix_tke.F90 index 4d64768aa..2078cd88f 100644 --- a/src/gen_modules_cvmix_tke.F90 +++ b/src/gen_modules_cvmix_tke.F90 @@ -389,10 +389,10 @@ subroutine calc_cvmix_tke(dynamics, partit, mesh) langmuir_hlc(node) = 0.0_wp do nz=nun+1,nln !!PS k_hlc = nz - aux = sum( bvfreq2(2:nz+1)*zbar_3d_n(2:nz+1,node) ) + aux = sum(-bvfreq2(2:nz+1)*zbar_3d_n(2:nz+1,node) ) if(aux > 0.5_wp*langmuir_ustoke(node)**2.0_wp) then !!PS k_hlc = nz - langmuir_hlc(node) = zbar_3d_n(nz,node) + langmuir_hlc(node) = -zbar_3d_n(nz,node) exit end if end do @@ -403,9 +403,9 @@ subroutine calc_cvmix_tke(dynamics, partit, mesh) ! Axell (2002); results in deeper MLDs and better spatial MLD pattern. langmuir_wlc(:,node) = 0.0_wp do nz=nun+1,nln - if(zbar_3d_n(nz,node) <= langmuir_hlc(node)) then + if(-zbar_3d_n(nz,node) <= langmuir_hlc(node)) then langmuir_wlc(nz,node) = tke_clangmuir * langmuir_ustoke(node) * & - sin(pi*zbar_3d_n(nz,node)/langmuir_hlc(node)) + sin(-pi*zbar_3d_n(nz,node)/langmuir_hlc(node)) !!PS else !!PS langmuir_wlc(nz,node) = 0.0_wp endif @@ -497,4 +497,4 @@ subroutine calc_cvmix_tke(dynamics, partit, mesh) end do end do end subroutine calc_cvmix_tke -end module g_cvmix_tke \ No newline at end of file +end module g_cvmix_tke From 8077fabcb52dd0091ec0f7b7ffb0864dc73cfb16 Mon Sep 17 00:00:00 2001 From: Jan Hegewald Date: Wed, 6 Jul 2022 10:20:41 +0200 Subject: [PATCH 909/909] change levante environment to explicitly increase stack size and use MPI and networking settings recommended by DKRZ --- env/levante.dkrz.de/shell | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/env/levante.dkrz.de/shell b/env/levante.dkrz.de/shell index 60b4e3633..c303251dc 100755 --- a/env/levante.dkrz.de/shell +++ b/env/levante.dkrz.de/shell @@ -11,3 +11,23 @@ export LD_LIBRARY_PATH=/sw/spack-levante/intel-oneapi-mkl-2022.0.1-ttdktf/mkl/20 module load netcdf-c/4.8.1-openmpi-4.1.2-intel-2021.5.0 module load netcdf-fortran/4.5.3-openmpi-4.1.2-intel-2021.5.0 module load git # to be able to determine the fesom git SHA when compiling + +ulimit -s unlimited # without setting the stack size we get a segfault from the levante netcdf library at runtime +ulimit -c 0 # do not create a coredump after a crash + +# environment for Open MPI 4.0.0 and later from https://docs.dkrz.de/doc/levante/running-jobs/runtime-settings.html +export OMPI_MCA_pml="ucx" +export OMPI_MCA_btl=self +export OMPI_MCA_osc="pt2pt" +export UCX_IB_ADDR_TYPE=ib_global +# for most runs one may or may not want to disable HCOLL +export OMPI_MCA_coll="^ml,hcoll" +export OMPI_MCA_coll_hcoll_enable="0" +export HCOLL_ENABLE_MCAST_ALL="0" +export HCOLL_MAIN_IB=mlx5_0:1 +export UCX_NET_DEVICES=mlx5_0:1 +export UCX_TLS=mm,knem,cma,dc_mlx5,dc_x,self +export UCX_UNIFIED_MODE=y +export HDF5_USE_FILE_LOCKING=FALSE +export OMPI_MCA_io="romio321" +export UCX_HANDLE_ERRORS=bt

=W-CU|(t;Q4JP z3x2;RO=&tgp)9@@`+tZCx7(fP4&NRLEXoQCdQ0Y0)=E=@8>@Qcfe`^^JI}l;9f>-( z{eketO0uzyoAZ@ytj$F}k~{(@14%-$7l;krArlia(a%?4(RCK|Wd}SFFOP~{j5sQw zwg1|keD+Y8q`0_zr=HU4L&c`ywxd3#GNI8B6h(@Qr(`@gG|YIu)fwWI(Wbce1X6~g zb*hJ1^Un^nhl_SF=Zq7Fk%_%|hEL1TS>~xEV-Np^#XHP71OQBlh z)v|_0J#B4ms;GxFLx)&CD*XbYj@ctBdFcOdrmP1Zh6(H(X2wkD_ZFUh2h!bG1+l?^F}qu*elz}uZhOk8YU9A{ZDC5`7dY#( z`n%+(fvye@r{6mx{~qsV#r90D0-gt8)Yih5DEsMOOZ9)3PA;|z=6XOAhgkVfGNonW ztr3Erke=H(G}QjfZVlZQZ1i9kC9nyAP9UWIQ=jn20!@=YWa!$jlrUd74okmSOL?9@ zLRwk?d3#f&efEenhD`L^jsHe31;HYtam+*3ZsTxAJY`Fw&_p1HuHcvflif4mr?SZ` z3$6%h85v+&HedPzH3QeUiq%4Epsv)r_5KT^56fjcg&}LICAcY@&CNRK7f2i{(%Ji} zPGljLbfSpVIwGC|YAjLzj#_qrGrBrZ1yPTmQ7#@1F}kHRnqbC z<5r8{%@-fAH~{Cj8)Ve)u|gTh*33QgnbvwKXt$1!VB1I9XMB;=He*hCZm7MFaI-y* zu#}UKAQK%HZjy3NZLf)9N#D~yTAm;a_2x~!riqLUdMx8|SZyGr_$|WbDku^*g_|W{ zvo&BPpnX$N(YliswD)y+tFhQD2}YY`B*}jkjaDYz*v(eK=i)^i^K$7w&Px%tl?lZ5 z8m(!%QhwC6aSI!$r4XMzy0UGp(A~4-V~;=|Jpv!C-HNtdITP>_!&cZ}+ig_SS{!?( zatqV6`UBtOP&#`aO@ik~Ec>9<(&doUsE-n>*o@r@GE5e_e@h^!txGznd8y}-9y?8q zF<|*|=ma7xKsBceAGp5av=)TW*0ox|-k>27d=0o7j1ip{!jcYD6_%9$RVF%>Fd-3K z&;o)Qe0@%NkCkjX*39i|b!nCvAUA_3Q8DPW^R8A`^B~@q@w+Dl#M3vi#TKRD>p>hA zhfpViI%LOW4Xq&n2^FndTV8DQFZe`U!BRH3w;76#0f-LhB6u%T))O?6qkXiMBS6?Jj*3Aw8es0_&rb#Hz9*dWIzebe&dk|u zdSt1rdO>Bq`(erSiDJ~oBSEYws*_FHJxM+TSSAF~3k{{`nDYh%xdrXkEmmukmVYrr ziDpDCQi@EAb-fOOU%Kwi#I80m*!GEtKiv!B9HZ>wc4pv7#9^?NnWg1m3;Kj@^Hwv$1N($OW{1Bk>OAD==uwDlvXx!K;R2lNS&T?jf5L6$fSI^`98*h~pL z$JI;{u4aH1qnr8mcBrAOwwpO;mcn8iJT=9lW6_U2NwS@KcNhFIQ#_~>TF2F&+032gR58~TUB1{#x?0QC_}UQvdF%`SpD^982!=8w>Mb` z@7_G*@ABe4(f2QdW%%CZ#m3@|=f}Qg)zO?H9c{<1!x8Kw z;GeOw#apJ_Pbby3Q|)?t-Y?#B1V_hb(=j@RTCKYPy%!g=AViPEYzsB|c%w-*cXp4j ztkKP_Ls7!eJ4rDMd^R_L1oHs&`+gSt5W+xQ?0FOPkA$^GZUkD5tP5?8aP`>zQsF3* z+j~bkuWCV(oc-yfGBKzZW%ZZO|1H$i_iz6$sejq+ubU{uwpXvAi%=!LF~mYE>v9aj zVC#TTfsXz0YnyL>-8uxm;N-Z5tyj&R-sUT-#lCf6ltu>`8q!+;pL$u}H?w6WtD!|xCaqpz1^T3m+Pl4|4HBRG6uY$r){R4K{4muURKuZVFhLFRJ)(L=3qxe!2K*)q zPa|%P7HUFa+UUvl_h$9GYrxH`+;3`)srb2IZM{Q{etJs1JLJi>;1h|-~Mk^+i_HV@V`~+9IK@N*-NU>Ck$HwT@5FP-ldLJ zY|3%e3;G}GHDv9j^E9STK;2B>)CMX2z4~+xu}6Mz%3fyvUg<@qVoERQ7H+*I`D$LX zbED$aIV;p)+^tQlG^-B3G2CvHM$7Ll3%tXWE`T_cjYY4fZv)o4)J;FC_5Nu@mu>G~ zYf>+Km{6gT;Edy%ZREmEO_*NQUS=Y!_b`QMO0+HjX2en>Hx0nf_-Hk$k-!uN9d!XX zWWcLA3_pqK>H+kl8QWs$KpnWE=d3)>wp>88@QvZnvZ~#Zhf~XX%5czc_`g+cz)|(^ z|5h~#$8dF1xXa1aW#2Gu@2g^?#{t_AoH!;hVh?281HWJdIA9bq=Rb*kl<;3Z{*5x= zXG?bA0kRN>hCA;-#-4l3oe_)vp(hsxb;MhZ+A9XRd4l9fbd1;UKbwBVUYY^$O-kLS z9t&A;SkgEa-U;0Szu$2f*~RJ|C9!We)96cTQcuqP6Ph$TF+l!8?ln(`Q}gtd1Ll=C zlZymfY%A$AY(G$i0*@+YBwZ@3wkajNheswGP`mF@k}Gr*2MgEr(LNF$UT7=R$6Hb} z|E9K8|G7Zbzak=vCuviJOM^{I`iACjxjVx2P1{U^870dfv~lRFtz869&R@}>3mz5A z?xsFG(YhZ7e~1%Et`6XkRA)6b6g}6>B^`57{hFo@@N1t%=`CB&I<9_P573(3|BvT< zw^3*$Ep1%S(x8O>?`gxic~0s=p2yv0q$?BUzeafpT4Kfs8iw*88U+pUcX4{FOwx8u zgJ&@dWBAr_=0wfMVRRE$&^qjAz87-!2qtqAD5}X4B_D(xy*#S=%`Eb3N4Cw?FA_8* zUqY9hok>=TS7xi15-iLUdAfP*qxLnWONgXO5&z}C8(v;|rZKNPe^p(nOr9q4YdhE) z_YJ2}w5h>iP^Wg3-liL32H)7l(ZANrRG;8y14e$$ZZvR7Ye)$b^D-%BU7QheOf>Tn zxHyp`MD{jcf2vkjI?j%vpz8W!Z*`H9LtY9%xUSm*=b>Y-WM|NaHXKn*xWpiG$FO!C zvnTC)C*T`7+V(w>e9U!LIy%wLY<-}gRDR9Sa^}EC?s-8uLEFkMnc6pM06f^d#CQ)M ziofs|H~`FQ+K$p?pBYN9bcF$3Ggbu1S-E#ROa9iSlGDUHPe<`b&yeN30uit;4x9_4z=2fNcg&zo z6PGlIuB0+Nu9Y{{*=6~@XVn}2x)ey_jx1;|^7IfjyX$ILp`S)k=Z=-A${5s|QCq>;X}9g?lp_u8rM z6;$YVa!IclgSR0+<~llji5GdP%;9ddDG}2X@Z4m4zBZcDXLF?du=Nt>2npD3Z@(qz+x!AX5C^jA z`GhewU63I7)z2Y9Nj4JvWq{3zmrXgc^h_rDP*|((-1VgrLr2fQd|3jnnwETQqn0&H z&uV0xN3|BIjsQpeaZmUbU$@sKn%&(*10Zk!U=CRbh9uJZvA%U9Aui*7=Irbl7Ci3b>7UzZJsG}P=k3knD%+|sVD z7P5H0(b)YezWUEUWskqc*Jn9~&Rjk4=Lucd^AjY%Ht~?}1VBOusB`R5nS@Oy9(}kM zws8gSSO|{OkowZIv5^uBJ63Q>`;#&uIqE7!mgC}AUuvYcExVdFG{?7#RpF^4R!K-3 zU&>YAc6OOCy#sWq5Lc=Ket)k`f}c=9`v*8Hsz*lv@<6~67sb#i$XJ@rsMX$)3GfSEiNn#>=u?R)29xe|Q=N#3aJ@8vV(6K5CZLBJa1)+>!HH1%t`X zab(=RY7XTfebMyfu+ma&ieduQ%&MKgnDT~buF69Bt#U&U{|b%(ItrI+tv9Z0r`|V- zh;QEObD}JEl&*2)X~E0DvqM)C2c5wui*<5Q`T9uA87@Y$LW7p-Gxql#z>hQ_IzV}_Qp#ULgyZ*k&u=uiow5lvH8Ie17`hHyNH7<>v z{1PtjJN#$Is44teu`a7!e$IJ=qZdAHqE>Js_1?H!R9x1AN=EnDiNMw98L5hL+eGBQ zKL*m-&SVL_f4Z$A8>33SD&vq%+>5=5vzIy;9ezIu4@<1M3!Vg|VWjqAkDJoKArSdl z=#-uiKZqx$OH9Jzkd=S=sQ3Z|C$9fWyY*v5<v|oYvcGAl#!~RFX@;sOdoy3^xDU?pXQw4iI4Iv)AurC~+Y) zxWJC-z3=jw^X%QuDjQXM#%>YGv0FFojY?c<^Z6s9pGG9+yI7aV=}c)mH9xD9d99Jt z_ipG19?f|JUP6zHQoOI1z4-rlL`6ujLsASa>`C^tj17Tw6g~^(o?0 zQ?~Rd8@HB^myy-X)g^vq4Iz0u7fAefk5f@7LOg~g8BX! z&#dmS9D#=es%Y$_tD>R|rfxWNOhT^wm}%Tis=T*F*NCm&bQ<}G*{t#3u|El_g?4 z1a5lzYn`@QTvqv4;p0yp>pV6MlF`#C5*iBJ_+_56ClwN@{TUT6U+*F?4&EP1t=;q1rR^?Zx>uFkq4NAKxw7N*_kXq=|&=kot`#H{7C~J`8`h50|uY(Sg;3%w8xPy5seBB(rf>LUff0Nk9C+IO5~_rRYi1E(dEL6%>WM;!j6%4YwFIBPjEGARXJ=0Gc7Vac z#Bhx@5nJaFG&L6CW(n^iblsg3b;su~Edksf=`5xTx{LDMiOYIiQ9w{3ewH_xNAA19 zQv&x#0!B}3(>>r9zeluphRHA$hl3Z@IRh|_V*#RZm)JNpRA*J!MFKFj?gu>@AMtbt zgu@Zc%%C&*1pF`l|3YKSXv$6=E4|y-FST<1xvsJtxUKB@9)Hbx8e7k}3J~_)9ssb- zpNHlQV<4|UV!NBUPu%sQ8>7mYQC^^{M4%Tz-G#UXr6<4!8$>}FHO$g;3EMQnXCNj0 z0JnH+C-|2ySQfy?w>yp)mVx4v8r#`|hs8IOR2XVKH1=od56ndg{sRvMrfS%ov^Ar~ z4yq6vXNL6K4{PK*u$)+D{>x|F^#f2NZ-@e;zrhOGO)xR|#qbrkiUgJ0n%-^q!qjg| zk7@7oc8iT$a4mn>Fb3X$fwD${9((u{LX5qBRu}p-LcIs9_|cyWpC^}IQ0NIa zuDr0AI@RR>3}hfKTdemT+u0l)tgb4lwc!89d(oLws-Z0KKkf!fBn{(jHov0^T|!Z)<1y53T>_#c2Ls@K%qBl$n<+3eWhCR!1dGF#$fe2~wC{k&+c_F~WS-9X`6tTj})LZM@! zv$K=eTcG~%5}pZrBbRbx4`Z;--g8$m7S==F9jR$bGZh^Tnp8Yi!1Og@Kip{2uKp2K zoOgNGdV#snUD+4{t3e%NXXA;N=J1DzlW5=6`WK0W+I+Who_b7Azp-Wjg$_tzzeVn7WF*4UUgBWDCI0IOo=t!ax-tL)n zc#6#vY4ShTmt{SUIoi#yGKgD^;~%U^tCE&hj3&rudz74g<#B;1TVh~@XK%*8Sx@8v zIVr>O%xX8^&eS0qNt!3gKg+1N3}3GHW~}SCai5uN^mdy@rb!6?GU@%-c+Z_M3ndVO zBf+8d+8>=36E+m4hnQ;`V{T6}TaZd^527chr5?)(d@U9%iQsOZ1M1LMCK@>s{JLPE z&ztf+WhoG8Avuf@A()a4fAs@7>Am$rQ`^c{z!QpWwa(ce*LobIIp6(wQt*5Jux8yX zb821AGg0=;CyDhwZ;jWYdzIpP%hD`hXYMY(xEjaqu(}Z^E}!_X9dUStZNR3ObjV4{ z>9ez{rIb5^zcc%s--o^p{kQE6$Vm+(x7k-eLQaQ4>L+JZZS7|6mu%nW1cQ(5nhhb@ zzy^J_uwdSW7RYWUa$#s4tl6)(Di(18j8)wNs3iHgDBRz+o#lN-6W>O@;7uOSYv0HW z=2|r?Vwm(UO0P5ZA|}tWmzKvC&Umih_D>j85#RGiZR!#OjjJ@#0dR zMOZyhw)-vm-S}#}CTd!S;Ou$Edw7i343b<5fYCt|=`38LG!loMKEKNRn%es9qdzT{ zwnR_A_p#@TzkV_J?o)wXDzEOnKb`{pSH2aUyr$6P=VH|#Y-dqhz_;d*mN#~sLDZjr zS@xd$c}tBCcmaj1hv(7jJZ6D`*a9&f!FP{sNjrhmo|aiRlbYn^R4wxwpozt+p_m}s zx+5FW4E~K?6L(|GZk9y7l8B_<{y0hYW!OmdLYy+NYlQE`PRkl{AATyF2VWWM5NxV? zN53vm#;aBtefxCua!j(_s<&vURU5+YH~xt#?$Q(PbDzQhrG&D2ObVxL=VDxMDTq@W ze4vp0aDQ7^;S@!?jI0=#&3)bpjw?1B>#J`JfTw{|0GpPc^*8%KHb?{s5f-EN*dUzW zbiOxBXTpmr_$3Y-Y9(J%`}E5#x2D@;O|moOUto4x8}U@wRc5Ou{zoaL4+9c4)=TqP zrD!*N#?IN&Nmvj|bk#0|#9r(DP2LvGtUo|`t0jg?OPH5cdneNLw#Bx(uEJ!9S~2rF)vnIKp(ye|81pz-Zq7Q*;jIAdR982E5L73 zLXHfyiz>0xJXkvmqFztcrs=*F3ZL5lTi5R4=hRm-{igvq)Ov+6y}2Gaa}sHN2J-h1 zigbAKhQ(B1#RH)+E(*Jn(wzV6o9RabvcCM+fWY9&HhKTIM3n8o6WsTw=Nk;`^c!GM z$~DhA)RDv5zejZW?fca5VLfkvC+ptuBH3I^T#p3qI7UI;Ir;RgmE5)c(!q^I za|ka3EBSk008MqtwrkqOY-3O4InA#ut1B@Ks@4lU442r5RV1~UyX;c&<_1T2#g6;D zGJ#Ej5Cd@4(YN8DTwlQM+o-ID;z%H3z->YzhAP5k@1dJ1f7!L4*mTMLGa7ZXkIGQTW6Uo6goyq*m zH@i=wGtIe+!b3_j=fQlYlUZdI__@32MalVqv)KOZS3iO)Om zFaQ2SwARg9PPUB3KUapZ$q>3-e0cp56%a8YeGF%oJU;PvZMAz?=v-F(fr8@})uSkj zn6-&BqT-H=(=jNg93AT~kcyQKiHch+mWmX`|Pwso0~Fx&ASCidSzo@zhm} z%^@g*?dD=at$s*HO0n z3AZ+^gG|k6e_heOcRRU`l%apIL1L-KPyBp78W3t7V~z&8zEiXg);b?VRQv=-o&z0g z?|Is;L$M#G{9gJh>bm>kkakt)KTx@rW{yXo%KhvbEyckN3Ej^Qr0lGTr!9E1?4lLF zkOm3dqz|!Dt_e9+A<2Q{TfW8pgJruHpJr^&2hT=9j&t*123BSnSR!r(NP^@Rd=!%a zAJVq3WfAJrh4+f*g1I@CEn475V~>MLJPZ)2-(ZZ&@lK+rQ$&8pX80U!JeutEvm7jB z-MW2EKjtcbvJP(WY|F8TXB}-z@O2u*6HTrPqI@v-I*qh!B|EceCdTc6h)jc_*4Vnk zxtRmvsF(zzcC{LtQt+_%)6k7KNkD$J49krV8xg~_RLB{%Oa{52>pfm=I+nrtwWX|@ zs##!J>(6%>EPn*>Kk$mk*e$*WLcxs^$-l|6$@Y?hRkwb%i91s?We(b3VSgv0(ED%w z44L+a3#G~@G=DjV3j8_1?3mMUKj6%|=A&oO+%-Ea&U^drJHwu$h%l1QE&<3_pvuy2 zS18Ij-^2!37=~ddmEUs?!5U`2LIIi9u?&=lMvSj|iE?;VKK&~`W8wlrApRx$l~|2q zp(}Y=)_6nA%|+^SCiNc!P0rkNOq{is_pZ#Tmp;{$$v|D4Y;~I3`TeRvM+(E_=>A!c z*Go!eZ2L|WvNh!k%{G0Ho|^rv!LP^Eo7p|J@i=9goZ5KNEx%m@_~Fg+a|9ds3t31z z{WK`XPd3fassxwFKWke+G{0>pZ#@e`ggDTUeB{gPvwdeh!m29|wP1FvfUZ|dZC`Tl z>OcFw%bEU~Q{AB}1I~ieHo3wDbM0O~Voz{S^mVd*wlWQMS}isn~7)-M>Fm zre4X`*DV9lOjq3%SP3!Is4|tyKnhFBiy zCAJREPTU>jz>Sq)_hpZ73?N{`31gGZ+lO?1z!zSnL{^JHE%`0{0i;peOmiBX|gC6lVtgNB59)-jTW>R(nzS}rt@MHsKH zadFP1xkvnXkgq|mnlwW>TpxIJB9$&h+*;qcPrT1Z+qvo?dH5eMGj=?t<=g^A=m8 z{E&xewoIV4od9yo)((xh4lQ6JuBi0#xAX<8C1G$v^%Bo-4=zV=Yg}(~8|%G8yB=_G z)$5@G7)nqVMh+q=gAXrB#kRTK9@gK|hmQo~5P#hnpe4OeNH`@Bzh=VT`&n9V6 zX174sOIw&rj|vKZU%T8WW}BeG$Jg-1CEJx`VAhD0rm4^k#8hmnQgZ~Hv1VBj z__v{JM7n?9^}jlC$6ms>UO*)cl2lX?#FCkKx>1|Y7QCWwHSq27r*9lL_kYDNGyPk2 z!?1n zay%tIhu3;`wj;I^2I2Rj11d%;P6)T0VR`!iXv;aE4dom*9oel1p=bYFTpn1N9rNDE zUt{}q-k>x!)0GIuIim;AX2YwkSqRshciren9^*=Q@b$n!R>Pvy%#ID|&Sg63^W+kF zZ;bc{_n8Nt?~D;@NwFS_wBcJ+x=pjhyi!rdp#60SaX>EC`w|%q#oeF8*F>e)`_t$) zc@NCA0G4~Mv{|t+eHNhSFDLeV$SP<`GEIr!IbQlgXZ~Rx)^td0S~0}tOAo6#c#)n? z90-JvTimY9xN?R8IiOaWf9Xg9k;a1bVQZNYKRG5gSAzH1Y)tzo9#rkZU4b=(bVc9I{RT@dj^duKJ zem3&1RXyT2V~{zSp>;iiLT^!;E&JS8y3BZP)%~IEhiSyx&?qQTXtGTjKACCmS^*QZMcY_ahNc$3iW-Kun zoP8y`P>|T9@&u*E&5uA>N`9p*C3EpKyN%-T^%E13YRc+RHMtbZBJfdbrm>GmrFP+F47eH1~$`Vygwbyx$$AF{1j7qZJYVerRp%&AMM^s91>lxK>$Og=-X(&x^2 zglkzZWYD;61DIy+_`Vd-rYaM$-+(<4vt2yyklWjor(qL+JMqj)G+|XB%ctJK0+Uq^ zmCo5u@9B=Ovs5=1lem*U^e(Pd)_#?p{e)(H=$ynNi=aa`+K`ReU`nV@Bl7l`I|0lO z`ENR6!hBkp&*IV73xm|uQtpreh{JG)isZ#6Itlv{gY5sD1hm3qzDo(J z*MT{&u=UEJq~~M5A6%X!X4JjF9sNS>bP>2vVsOW!!S*esQrEpq0@@6No!W$Nu#*umX6L&;TsfCJhWL5T39M_qtZfEFXA5DC^k>vk<=v%Bfg5S$s4FQ zefZR2q^S0Pmco1XI^+g^;P+}fxa#;P!1>sdn-ERtCOM;Tl0uYfy>+Zbs#?0UY)lya z>4;-$l{HBZ#hHEtTMAZdEbANaOJ11MMyTg#ZF~~rRzSxKFa##A;Ff~|pUH!6uQN0; z9V>P$tbKRr@D1qNC~u5eJEVSn+FvC8`-_oaCs@$k#><@#nP9NSw^JlQ; zmR2*9q#W%N#AM8Pd5FvOY*WY8(2}z6BrFGv>4%zntegIp)bx}D71Y(K)DRBTFKC;P zlQvkPsZhuAYCYXy(FE$hp5Bc27f#X3)2NyCAozV=^a>PA_nL7{tbkd9mNTG4A|>O; zoBLu)ybNSK)lNfhM(834pL)pHEFb1;WM~ZmlST7Azn84kQ3Fd_gNJzUIr( zA69o!5*PCwmB){|VCa|P#z=RyfwFP>%>iEBuYPAr?)RVxfo8Yo=>=+qf>?F4)Ky~%DkG&5!J~iu?4tlatZM$;7}u`V)Hi*dE<!?y)p}sfMRV7??TE=`KFc zX6V`RgbQ`P)?9sO{`y?#Z~SQG8UcA~C##D?M33Es4ok%!zZ!VxI5qi=QvRLgSVt_` z=}~;Ek3Tuzf9Boum+=A?2UgZs?Pz`JHl_sgNZyB?5Bd6D!OV^wp!Z}9UTIw=k0835 zgOI{pRci|+rN{zeG zV1eeZK(JXENdO@2m1_H$k0(jAxB_)5Gt4T=)Td!l2fTH+yAvY7GO(~i`D;(X0W4Yj_!w>B@P$ z(_TWtsTv69v+FkKP+wwK7WX>H_q0*Xw#~v+UN-&y(|6lsdQMIT$hsDLoFl2xM6U7k z6riic7jw@E69g&uM7Rb|o?;yUQ@ncKTDlZia99*CGs}l7?@y8UGk?QVelOBpaK|c; zz+0`A5j}SKX>7@dHG~EI;d!Ue^M;QyvNHG`G~hrCmjVBs+{X6tVOkxxw=-=2AEsTQOfA~jQLUxm@WyYxFY z7|D9DNkn+0XDA9-W!N)}^bzce@gIfI;|vjsZ}4KL0UALjIx$0Q1VIV_6uCeP7D9?L z9#kiiR}gH8%-;h@s9`;(H5-dFsJrUHh2SO!drWM@62WY^FGkWr{h|Mz4)%yR(%JYM zQ;JcL^Wnir2f+#VU*qvpYhgHLc5B&Zqh^+eL_r=>Jn++&n--WdDtwwKn#q!Wyz<(T98xnLWkbxhj;13U)n=KH;j`9%LxhT z2N;Fe72C&Y#&Yg^z8YI{vimYJKLxy7<{T+d9}pY*4A4jDb{~czn7RbI__?;jIoNY7 z+zCTQ=}X;Wqs>aGlYnD$kibNTZe?u;N~R~kU(`DSwodZt zG+qtvEg8#|j=DC~4>;2?jN$GUrG;DeDXO_Lsm6Yp8!EHZAd6v`|2OTlZfbeXj7vU3 z^7*^8e;vT@=x@3~paCQS=ED^>_^|cZNdboSA5xIfL6vjgFr5nwHpx-_t^@x>a4e~4 zsc6FCH}v^czyPrnVc4f0K@}K}co%6!*;sOYX~S%IVa=N$JQd-3v9%)wdPGNZY7Q(B zLB?Y6#HQPWMeTR)Bf3MF7m*tTgAryq-LYM<$)P|&PKXGbADA@2rzt0hSnhOIr8ThV zDm;@A&`NluW(I|Cx%C)vsHgAhWBVB-mmKXaWj-eD=^JZ9acW}^@W<}fm?i@8KCLFoDn2f%;!gcP%jtm=ipgxB?XYH87?=UJIr$mOJYT zroM-5->{Md`#S%HNnCVT7_kcMp7DgN2CA$!V6kGd$A?`x%tI)-hVF@M>4oUh@BMfT zM}TvH3t>>=^R>O)KKt=uhJBZ>oaiN2xf)CU(AT<@*^UrA+U#(o+>#u)Pvtd8^Z`}S zy{!1{=Q#uRW*-;p@d~^s?^&vHR7j5M-t0S8_b`(i)p#Bam-dx$PZfCIT^rW{UPEFW zBWSsG$B~aKw8EE&SRUD1naTJ^F)#cscPCB8lQ*( z_iTi=Z7(psROCFjol5WS`a%Q1}j(AFF0lgO2lY_mg)5yB&C#t8r+{^Fy zf^@|eVb@?`6wdZ(`4U>1>u2sd(R5^sfK)Dqf7skm>P& zEa7_`PJkI#(2QX`H&H27>d*f16O|Q9MqUmF-qFit79kYC=4}=vwt$%U_X=^sdh$U~ zE|6n1loa7VMj&kon*4_03eIJkR53WO%*zOBC~f#&Z8&<^dSziq$)-sd1 zFjnFf7{#n}H2z5ST72f?6Oj*m58sPEP2bfC6n|f@{c*4*0+boUeB{~Rx$|=F%GQP_ zVCQA|5xLRZT8!w!av^5#<%e?d5UexZjWZ&P>96Igq=$A>MZfGUU2|$YqVWU1qn=N& z{Fz$LAKV;)v4&~ka!EH>+En<~dY^J0Gi7~ zT^R6*c5@vO&$oJtP-u`E4XInW( zS>7zG>+-Btvatg5PPCcE2F(&1YWrw5&geuhQU6Z9EWeyyKp0;m%$Nxx6<2SzALtdi z;rjll3l=ZrC!9vVrf8SGeR%`!ap!At0q z)tTV6mJ@NoCmm^RO`WY>)f30*349XXj$0DEco={ zQi%Jbm?u3V7wCppO^TjPCELy@(KcB4Y)s$YNN!1XbZSl=a^_gck%}lV6* zMSbu@M+@jbY;!jrjLepm5cX(L#jSaq6fY`nHd8XYKzdPAVUtYo`3Gw^;yvA-Qwh!% zXV1bseMjHH&H26qv4qrcdrKmmTV2tB+G)%rMK`K9+%&^|$m+q3^}jh0XUfXxdWbB| z32L9sw3ipO-!#7*>nj6XQ#i_Xz(96$sFti*0l)AS~Nl+`M z_keryILn=ZMX?VhSx1AXioo+QHXsV=Qe`Wz*01|3O>@|*=hQfz9%H|AhRcvVIHgu? zsPbS+uq1F+2SM@z7N#-{o-ZbueGSIav6Ll|sP>!t^pwYTpSx1%zLjRn%I>FO@NzaD zb@ryF0!ANtfhLf+**$ zZ^ipv?w{5OpZcXzo~S$@sbg;t8JxF6lcQXX|3%9MfrkcoZiXsg49*bNtSAD96iklP zUBz7gv|_&>Pw|Dose#c@xgp$Z05SRL>_cvdx!;Tmw{@!t^y=9-E##@Ja`|TBHSlE6 z`p!b;S4{K_@452yijThL6p-<1p|6~oze1HP;MdU0ZvT1G3vqIF7-3yiHT~l*7j`Ih zKVEmVDa$kOBA1s%_=`^$W7@d;L7vDvKLrSvy!Gifs851*1-HzUH36Dapq?$ryAq->zPJ){e3q6mlD!Z` zpT`lI&K)f=g;(iLHA&uuKMGc@$(wnPTa8+#YCHJ`Hj4^8^y+-^ z`ZW((j(GkjO+FDMp}h6itpy^iv?7eCFPt9{6ZJKwgub8gH4(}hkftq$_kTW;MUYF} z(9nD5o;-Lp@^T|}`h#bNrtJFmFGt^Ss~Yq>v`P3_+L0~1Km-)(L~+EY7teQ#9q;=3 zRu3c^KkdzXf#FJ7sKUDr5j~lG+fpb99x=~|%NjysWH%{vXgJl4+sHMfJm-tmWrv6) z_RAhrXn4J?#xbYXb~zo~tj%|c z_qDjs;p@J>YNj0ENz#=tefG`F8@xkv;@*SDZRZzWP8VJyBk{6;PK~zy-YiV`tkLq6f^g z=O_rh(HmZybKTd{g@zRitUk&;0s(=U+oizMWo`YDquP}wYqNrDN6#fETXFAK7=EIu z21m=lEMf;-Qt2q5vms;sE??w}j`5Ivi|BUZn;R4g_()_4An2ua-vv3%U_g~0l?3u8 zH#|A&)?OU+>-YEl($kHPSL(~z{_tyH_x-%(yR7%eeh1B?{FpN(v{7+ynt0!>gbrrI z3V?~7Si|9}Uin$t-I?s*+Zw&xy?f?I-aJSsa==<(07h32Jm(b#Dd^1ms`KyQV>t^t zwyBpZF{qrjw21rA9h_fbb!M=NBiF`MXV|{8MnIoM!hIJmbmg`q659S<0~AlT>-Zc? zda{yw3R=aPc-QEm-IW(rAL*-)mxNJUFeyLb_v{_EP5jE?Mhe6*zkf4#yjM359R#HZb_{#l%2;D8su@6TKF+{kORJVd8IqsV}C3H&yEcx=575GM+eQIBjQGlyiU6SDSz*F%| zM*3Rt(jeu7mCK<4lx z&6&|xclgfP5%b(M$_c@isr2IRPj?(rv&ue+DW*u*tnU}g<^8t#K~=rvfCZIJY~f&Q zvB%DwDlE5GeRRxNF)Qb1hJ%-TMPJqO?d`$704Djz6Y%~2xtRrOu$+?YbV|^d69Ox? z7qd`q{mHVh1}^mKecJ_s9TtIrs7V!p+2NeQIw^r*aU{_dkQyhWP^HCWN7K+~z>NxL z^{Yyuv?{Si%{QdYW768pwXgp2f5Lj({~^6n$y9`yc!*@ED?PK{iRoL7zqVU;&Mx5g zYG%#DAHG#hw0&zTdu{rztf{Mr!E!Or zzzeXr^s)F7=?vong%J0fQ8?4O46gL)WJ!Y%$;l5{w$poUzC3em_THk<4M@xZzLR|k zFV~1+r7t|dn2puf_vjdAbe2E+7%X0O_9?f+{4l}+r`s0h08U~wQjK1<(bP%wR_mOo z1O^S$^Jfl_YVVq}rw?xx>D3Q@e&!CVg(mn^Pa1sOiH%gT?4yD{pHX(RaXI75z@Lci z+M#^WldSa|&cDQurwV>tHPa62un`Fde6sUK{)r>-s*Uh#QE&@T1~2nt%E3D7VE;x) zx794kU8nJie`xs#DeGm9!>z@myj=T2eFX1-WBs4S+fpTsRQZSYklJ5-Ou!PZ)jmr>kW~($4_##z=z?^eDheQtRh! z?7~V?SU*4OH%S_kj;sA}ju13xNVl;r=Jfm=)y^9e5g9(Ob{c#DN(NS9o862JoN;`j zCo%IKt1y$khfZr9(?_{0xW<> zG0)NYrMbM!Qj|{r1}nZk=YfQ2l*aru+6j7Aa(NKhlUsQ#bsGk7Z9|3hcNqu2fZP@` zQO`ed99~aUc&=Z-nLl6I+wH?g{ciEo373^tx2%2@*t`=Kz4<2Ggq*1`y!0esPZZf) z;y$yS+y}7#3bQXce<1}$Kw=eG(dohN|EzyT*xxJl-wnkw;VXGN>E-IGZRh$xSPTl} zNHoY$x>1S}*q0Ie(y0Y}>qkCt^vUhZ5#^o>90JT0%_r8s%Kn_;6_dRt<1J1Z!)`DU zQo)yez{bV`E4y(HyIXR=HgZbNe0#E89@kJ`DB!Ph!FY{BRktBzybh`{PFje`TCqw* z76%5gI;NR!J6Iv^2<;renHAz-^M#LV7?tB)g`ucsD=#`Ew+~-=?RrA*gxO?=f4NX`rk11`9Pe$cB?d z9Ew;pYwV1ZCygoV_$Ht}fT}_^kgG+#(vj-gy7vo)fr4a&TW%apfQ}!szz65COz*I_=4K+C z^4WS5AXR6xg`k_%os*WGQ~_H0yDJ`r#nBt*@-WwRxdl;{sNpj_fil%ey!*YNga45j zwj*z(3-8-@2UL7=h95(0ub%n19ZNJ8{8{`Lf#E!Q?1j&aocbX{Uo{jPKXz4Y3o{;V z&kLrA3roMZ4yU!2{({KqFF~*2r|h$B=qNFZ;In>U5!0PudE?`|X75>8-WXdvBlOS* zSDmaNZEyF=zBZS052I5l?_>iCR$wGu1NOVhdXMpAWU$<;O~L|BEiZXT#oo^wPv|QN zQ`Eft1a_647zguLziRBX9(>fglR(kn^;Ix;{j62iFyG)<(HYTKxzkVOmsotJYMM(^ z$UeuDse)BhY-+M;`bQp6eK;8xcdG?nU_IO3I(^=;Kv){2$-r(P#VHnEmIPke8EZVF zF}O?hRU;OHz^=CDZ5`fFT2`w~6C&Pwh1n1K5n`1iA>aw=?WixCv=Hp_De7TQu%{~2 z+H-D>4UpP8V(X8IPctg(dfM`4>>(?lC>g0!i9HM6*78uXu~`#bly4{KC(StRCPj(k z+v5I_^@%B=YzG4y%FRuo*o2q}^>6D)p>NuP1=N5ZR`lFw8$n+^8b zSdD{iRWS!HUCcV3b1g8Wl!iA^zbsN3qy0rb>EbG}*DKn&)gIV1zGE4E*Bn^cx1bGX zq~co3l5HLHvK=A{vE@qCil9a=v3h4kDKybLFeKI5PdQlWUe?kWL)v3wzWPpOfpk84m-_L>9s7MEO_uW*6m(;-3blsg!XI*C{@!rn{b5!rDXFXd4umdO)a!m{G$@@M6!Qhcc zV4Z+lMDZC|58!;|CWy9o2r7|gl&tKJI%H8W&Q>%1ozGxnz^QTm!Bpk0nxI(u@yH>n z+~I10`X1`*T@$f_J;Ao@*am=xDBl%j^7yi{Fl&!Xw-YiBnVztQafch01tq$c1`E^7 zMc1=;#YzCpqXD6EuTABizU;_^+(GB*4XOGmlQu1rm(nY%q9VvIgw?F=8K?;R;(WJL ztvENUW9Uk{zX~U}G%_NTAFpP$5%%Bn$u?TE6X@>o8B#s}) z`pMO3c>y=_wExHTf(`8!x7J6kbuI0s>a@6pE!+IO>N5qX3`Z4ll$AqJ|30 z++D&?!GN<%qwJmy=O~U@l^}(HO>o18uq}AOFzh^VsYF{|dQ}%nw zpKO2o>c3-om-jCSw$(9W!-JD?5(O5pwGefLEZH+)1d57c%%ueH-NQ$p3yS_+pL>$Y zs`80|Rb)_ih<=2EyBTGvelu3kD}0mWfemuu8Ea$O9*ud2 zbEi$2&Fct6hfoo$;bO7%hwoiRU~AQPF^PZ?5<$WT+N!Wz(!u!UK zThfp{;Iyh!3D!8ytK6fcGe4x-N(V${eB=qj-2my>oS2K##T2|E^e9hkhuR+FK zu3t)@ge^%m-)gJ&1B zO-MaB(Tc;!X^Rfg*k8*K6_kUmM zqs{&OsftpfN>Eb)O$_I^Jn-dBi~aQY^da9<%2Q**86Z#Vn>WJuMcd9L2F|=B01S_} z-+ex-O%^mWy8zU-!dG7J9_Po%H@@}@awz+>_v~Kh$(>sS^?!}suhMefZs`8s=*drPX{tJf$O*B3ph`V2$}yZ-hP2n>kzqG)a+fP8{WP@}>k!0;$p$RFru>wtaTJ z{njqIju-uxySRHqAk6y{`cnm}(^CmWhc196*k+CN51=6CaNYlR*T@SSg~ivB?2AIX^p-)S7fwP@qE7% zuNYBN)-=BRzc_(6^2R>rb|yXBaBOI+gi;EZ-gC)8{n%cqwEvQvW$sthzfX(ttnM%O zDR>_)WVNY#qCs`~@WRU9!i7_P9pw>4;IaH{8_2ueZ%)U(VdmFBKKL=mD3=9By+<;N zBIp3w@pm&=l!2(Y#SU+l^i_kL&%!EG=(9IDu3+mWg{Ycu9Zzp+t$kt@aYe-HT^5-Q z6W+j5Lf63>1>%Qw*4cTok<{nkS0g zqJ@POU3oAO6CY05P48{6=q{)0UvRDyC9e@x$N>$~SZ8Jr9zRfv1Er`VYnc+Z$ggdE zwTTkHWlP_Q<^?ypnxuXX6A>&JIbYtNrSL6<`Jy}&pr74;XkS5>(fY>=egy3ipB;X-&z7?ZAPl1bfcM}aDz^Rs zK+qFoMdHK2HDQT)eO)qk*WafXW8k=gy3x5%eC(A`Y(M zOO;^jE8=?A+};c4i$Pi>9z@U`8&48tfNKF{2(HhSLBYlUdS-%}KOiuL#Eulc5&&^Bzz3HFn^34pxlQG3AtR%gkq=2hj7nZ0Ckp_(vAPciH>Wr}-zGhT)z2>t{EJ#;M!cO-GH-TPe6lZCgD2 z{D-IsYMWB(#4ul;$Eo1vOZd8QPGOHwy3P35r&I0l3JCKpJ`qX24W#J*nn|Ia4+7LX zl3^g}j5FqW7KmYT6@r2mK_n6*H^0{|RuzH9(DszsLpsDcpr$ z$jwe6L*;TL1r6epV?JGR14~`bHiw*oO|ydc2RwvPMRMK3d$Bnabc80FkTkDVGu6Bo z!weRvnr%`yW@t{>t-2IBQ+w^dcdJ6_cH^(zZ0;giGGe}{Xc10y>JxIr>FXpi z`4>^;-hm`*#=ocsULI+gl*&y>Z0hUin3dl1k~@zpj$R)*=+&BExX>K17|l$>(9D{J z`7ZGP4c8G!;z~)zoAUw$-7wil&1Tl?BshmJBEe9EbkAi7dm71l9Kh}$EbmBU6sh6F zY>OiinjHO2Y%!#S*>w$x;g)|Ib2XBU7AO8mC5@20uwW(W$%bU%3&$Et0*bgX= z^W1~~3nJYqw*T^?u@FI5d>hiA`en6Km1$gxoUi-&u%w(z%v*Q|V6HcY9;N4}f9ffT zM3kqNq-QQdOf6^=5Im&Q{TCB%jZ`Qf5*Lnc`>T*0>9qc3L}xXmwc-C4#jx0jH2n@& zi0$|3o0@BoMzsMM5Q5ECh}C}&(%2-Sph)5uQKBOLG^Ap1gra5*1j2?u1TJM z7hqw0{9UAeG5q|ymPn$E?EWtSGTjX%ws%N)iXL+uM#`-7^?y;5Y!v^onnY$%T^7$z zJw(8KjoJ^$QT#P@K27w?=vDg#!~!S z&o9WHnu`DALPU_}d_<09P-K{csY7OuBfv%yWzlHekaYtbsp&x%(?du@P+ikrK%sdF zUaV%O(J}K$L?UtyKV%MwwIz`@^dTRk4Egh)hO9yg&v5i#BM*xZq^PD`CUzev;eTo- zg89O=#MAWohH%I@TO{LG|C`8&BW;_@Kly>wS4ae3k=RvqV;-T2FmJm^^G5XaSOOVx z?o#_}#<>wCH*(NT&yKAr|5Cj zgb^|h0HzVG=(yky!9h2Yg}C*9H^k;B;ds#ByA7KWPf{1(R8=ly)7LW0Xr9){qe2fJ zERgzR)JPecDj-(o`?jM)ro(H7;kumj#6(z6OL;%zL~#rLLPYoLbYp*z{neeaQyTx30$5SSLF_N@ME7zXKJHNsiy0 zBblL!ap69GI`FvyaU0oI*h+Zj{W&JF19UYBj55rYC4$_|N=`A!PO^Q)5$Cv7SwR(_ zim~eW?aIo^EUmd-Sj&8Ymv}=&Srk{tO0@Gw-V1Rjsfu$YzO^lDrrUXj?R;G7B$%cr zo`TioFf1b#@sEqk%Jxew6g+jhJ?}fHlUApoj6uRR6Ct&`az-ZP_(D57ezC47Ca?(C z#ebe)20!s?^@2=pbiQ~O#>v2;R!}47dFia@&7-HM_j~?@NL;E9!W&f7Z33+S(}G|$ zR~r_l*!jyvi-Grh{ClJH;3?sVXm$coTHJX3@xt58033#0g2}$;LEuml(}W-8l)=UC zbt+#0dXxZ`Ie=AtXKqaFxqJb4-1+%B>HJKvEyU!9xZT91n8XyobF6wB-Ky$4Iy%PS zE6mw;fNS{jU9j#0NmGp$AuK&$!$rFyQu*EZLmD@$B-L$W9JH<018)dodu;y|ATjfx z#1y%{hTiRQVh*6R9SO+IXQ-#wEj%|3+%qJ(dG0b1Ey|M1LjCVt0%<_ZT$CN3 zPoEfn+T=JzNlMk`TY{;7DR@$A-yfxC zOc@@Qzp$FA&kH1tvZIJ-1mW#JHnxItQ$^K3%_WC}2pvquStjSoKo;<=+<||Vy*AZY=$r)WB;o4{lu!e>+1&r zw}~!~AQ!PN8f&U1w*$uY*?q-z-MwzvfOEqGQF=5%VPTR!3q=bLB)qGi0O1nF1r6y@j2ddR1GD%S4dztYMwq>zES7d;@i$N&t+hzs{ zLu6za=1$~d9&u{hH#L21yuw7;n?s*AawH-Ltk#EX7L3>H#>T}Zt(Y%Oa{&2B%HD*u z0u~9xRsi=K7B65OsdaSV|1%A{3Tb2`?-6&r>sT2`gOi<=?u}Y{tP5W&ChkJgV~AMS zu}QGmd2iXIBHBeG5uOd!aRlFrg@uKk0akXlZEHk|j6N|nFpAsbG6eUzO^m@l$0IHd zwjMJL=Y@3F6~)BHs~1MRrI`#^a)apjt-fg*k2+U-(dJ^^zKx8W~*->5?($sRJ+@XL#I<&{?#USc#niPX&d#2DiaE@-BR zip?!BW&8NQOj(S?mVSb%ZsVBnAB2&(s`yk2z?f-l-#q6TKYt2y?=lg`AbrhQMr9K* z3WZMDy@XLx_%64J(P_()sq{^_hv~7aCijkSvOlhEe^ch2i$UBQebX2a6^w_5ojEW+ zGMe1JCbYpA8VRP1zF?X05;`1PI#}PqE}-UdiszIkAnJ$+12|;FGJ!?PDNBRBU8Wx!*>4)sY@Ls% z=6ezT=Vx#xRBLR|TM(W7+=7)!jov*m8uzhr#|gt@@>A6C=k$v;j3N;S0?z{6p>4xs zf*jh@z1;6wTCi7&b>Av*-G1TY3v16}4LeL|tiY#!nICK&Mn5(ts640=XUec%zXbiU z#4(-!j3B8T)1Lk+k=`5J<7u>;Z%ekO8^fokJ)iY=E8#G}!4E?z$p^!BmUwFvwIF?FTNrAnb`@pCO|GCkRc&6!=wi zBV}*jC~H}o5j(FEr~+8yAaDrC{;G+Be2>I!vloQ3+lcJ;b95V$JX(VQ=usUG2&z{4j5;6}|0$3zqEm*v*B#BYj;0U8r zugqIe+y5BI8WwB2h!Wwfoq#A1R;#+*x?i4qoN6|WU92zWG@+3KlN^SPW%bXjY%nMn zD0?i{hzErXm0E+)dlu^ooB%l?X)x?CAHl|<>CXB_-p21-WWWD} zSG!-VIm8epd`cJK;BiWgt9!58gp-(nwm+P*E!NcR2?fG%^^8|ad@ zRX848q+xFo8zh{KXcwUH6K-070$quIPfEffJ>k=dMA;DZ2f{WR5< z6;MXsr7tX^UGDR*+@cI_NJ%tTk=eOo>Y$Vzan_Y69p+A9Z3!g`bB{JI$R%^<^%aTo+~P&u@x{l-<_i#%U@?I^0S@Nrzx_N`NL_1jTs`C}?l)f3 z(r@pWwF2dS>k7&X`ebhuL_z4P)JBV@tm!px6%)dYyu601vxi zrdnP`9uM=L6Oi#J(-K6$?B4{~dB3);(ru1BxAW4+TZW@f}O}9GnDE zr2qX{C8Ow6Tn5Wb|1L2geSzkA{zV5GhnhX{ctRc zJs&v45w5NJn`i>q>58Dhoe8&hL77kQq^MKtp1K*iX6vV^?%fV_&#U&OxOBb{s~f(t zuNlwnL6j$BG@o`7K*GJREQ?vfG=SyUAABP$w0D2d6W}9bYr&=9LkZxJ9wvqo_0kW5 zd->gT^z_9G7;O8G^!7i$v3yoH2&^D9TogZ*TCbB=tSQJv_zL%*9}WZ5p69w_(s*z}SWik(p@kA@KZ4(e?qY=I4$GjE*SXTdx|W*Y9@4{zULriZ z-{?)cp3upYZ6>{u@Hd|J1>r-G>Qhx?y}NbtxSD%U;C02=+MTkadu%Nw3A>OSr=BMw zlFfHo4n6|xN$v*mhQJ;^f|DO2^d@olxtb3x6<0#3Dnx@!e1Qi^@A{Uer)Xc~Vc)kD z|1I7T(dCJbqENJrqow?OwUJ;le+~~3O%j(-_UhM?_tR}5WI1%9nAUi>GJcRIW9aGL zYk$BZjv`MJA?6kr)}xa0l|PC|QAm1W4ioW=aENoY<4m*mHVD@0h3TqXKRa;xx7oxB>IsxtG5@|V|%ZY2JpmO#Wr;)}$;0-}j- z&CymYl)^|{R@Yc65*L0?)Cp`Aqc{048#j|rUxrZ6t7?uKl)x?_@cS9h-E z4Os%Rksk7V*-L10aGT(|DyaRbL?LHgT@X z>JBR|h)bisivT$3Ax@TvfX_d|E{gOfg5Lp&MAG_1QJul(M|^F(op|25uS*ae!apVU zD;}|75l0JYO>=x`YX>+BlK(qY_A27UP=er6)|i=y69MVKcpF!iRh1bpDPDMGA{_Ao zdL4QleC{^pTNH-6?ek&7M1UdbVQK*p3lkr3X;h6pUQy^vF&KJu$*2LRN;IyaDeOu~ z!4v$(aBn5l1FSL(f5@U(H@vWPi5cPqiR3x)1+1#Wsh}o{YhR+9;US{gRp$c7Rv%J( zaH(t&Vd~y^rvB&%v49S?5k+ zysrXNYV+Ic&&CpjH_sg#uoCqdhx<%uI$C~Y(EmN4*h0I6H8DI%Op;1kV@FTr-rEnB zDq&)RiD-gTcl1m&{oQ>axEDN;U$kLdhUqmwEfu*@q?NMz5MJIzY7(GdIs3`^xpSxbQMxcQy#qhJhlWhg(2ekz2s^x zEY#~rDfjEJd|6Lv-pS&Fc%1SF95%&*$fVhn{w=^WAaYuNAfX&FFWJ=;bcyhbs1JWG(Myg**NA^C16`&m zu|zELYgwcgAq?s1WsO?aC{md~{k^PFsANH&T-K;%jY3KV%9&-2LLmy;sb!7&7mX@C zX4N?VE&xo$W${9jC6sf^;`J}$wJcK08nv9JqnM9{Q?4~9c9B63uR&is>iXFNFKk-1 z@N^Y(Q;YB&pM@?c2bbLmAE=|;uyAS_=rTK7q3&>7A~YBOp(bCs5CtrL*UT07cZ4s=E_Jiom1Uqy(dW;#Wt#q>#ga$M zBDJihOA5&R#bu3}r^H|WFKg7YM*T%ZW-tH0tWmfaKFWOn&DEHI=|FGl`)o^mjy6v$ zznlLW2jQ-i{i}>E1Dz-DU;Z!CbXlbSA|kVwmo@62$OKj;ShiCCNU3F!`bR=7i`24_ zUP@Fnxmc|T-vyYtcUilZA{oK8W$jwlE&_2_{BT*L7N-r-m1T`u)+i!@SnS9DPa1VF z#aI}8w2gsQj|nC3IN3^$lo5>Pm4AIAYvFFX7*R>iF9ThsDTzp+|6CR+bh;qUE^E}X zMj?a(?ZmQ1p-~0p+_FY3YZQuDAWto86p|*WXO=Z;S))(|gY@LGMv+ovk>AT2wX9K# zhz9ZLWsM@H2l_8eH1buE<+>rVCWKDC|DOtHC=^&mAB8Arr*TsbUW~8+RHOtjsgBi!xBf!y1xW* z2rn&b)Urkq%EO|^|JxdciB>mAy4@kZS$NB*d(4=IZP+V=llVIkFL}&__HAXG{jaN#K+9kOx0i|VKZFPdggZ|Vs$uJ?#}>UbPFJPRAvVf5 z-(22hmRmr0_n%{7r%kuNWHZF6^hW(TIeUZ+u&dEe=v-uTuYCupJ?y4rI^OwjAno|< zdLiK!V+5Cl?`}H!39(NCcHdEMzoRvWh1+}GdV;ynAGT&kRCT%gBK=kP z#XW436MMl=gNA!u;8#hgqLB6W`?@3gIMbwIZ|nbxCN0+R1VPw$UX%NJhts3euOS!4J0B7z1v&HRE z=jt>b?fk2io2Qs=AC&Ge^x9ii;YOu7w!UFX^qx$6#*SkCGY-R5YvWEQ8yBXluY-Kv z{3@g|Ky1^3jn~W2Pf#>N>~nHi;_)}9AHIQQUs=39J6CRKI&u1$k?S$Oqgm%OG=KLP ziD|9XJenQz%qS_pE%=PJmrtdsjG_f}HM zs8z+QCRDNVAxL@nwa5*z)=C+jdza4F@782j#%mUY$>4P*DOQZ)d{DCVj0!!V2kkn zNMw@caC!<1m^DuzXFQ*GK&Dx7PczH(;lTf*PP53hpy(U~u~(v41FR4%s;r@r`CEoY zo^N{n^03!*(Z)EF?M5GCQcE-t*n;<8i@Q*4_LM9T6ns$DYu88b7#=_7PN_V|cubNb zQ}0G1Rzda~jeA;kSv41_IDp^yWHiGw@ey$8?2)=~i7Xp$USIdKEo*FC)l)`>&?eN< zi83J^H=xxiFV7*tVinp83f8)MEw3WNi3azV8YPm z^!1ldT~~7$*ieat0k+hRMnZuwdR6h8UeMp6dDZZA^J-h8;~8%eIst&;?9k%33ULv$ z`zp^A&ph7FZKF_9QS@7nJ$-WgQ2Wjt6f!{Fu_zNzGlfMa;#iik+oLrTe$635s6aDSz&hIOwDNtl-^ipG=GQ1!t?k1m_Wvqd58{e?=YM-8ro%- zU{TscqbvOV`8P7OUAcI2{g9uWW6r=FOU+v8EyESmf8qA3LWEHOQ^z}^;mN`b@I3Mm^dpmnC~)r)fd!bs6k5iiwi}ip z>0>E&uU19iL1`M>hc(*XwV&2_=rH)C;4L z*xmxglmh2uR^ml}Cl}`LwE6Vj;o*!9bv!gfR$BNrem~7o|P1h7Mid?Y<=!XK~v)ccwr5~U!8r-Gs(ed^izGPH>z!C z>e=aWUY%%r8>#7?b@5>$i9&bulL8TQ0|vCPmT()ixjRB&sD{y_W}L~#X+|%<6XCoZ z?&vpcG+as$;{JN^4g!6i)-;L{NMMhJzjm3MM^PXfq>=`=hTC>~DI{*Lif9 zc`<#?w`X$`Z21sZrCyXF#(3fq!W)A*?X^N-1$IJQBqv!c>`;fDihxc}M8^@!-@-!f z-H}_>c-U(-R@9tj+B2MjwheA;Hj2tZY_|bpuT{l&eTaLx4>08ywOz5aqi@zoXu>t+ z;LItps1oC%u;Gx-tn5Vp6tsPAabhUALpU3;FaWFHFG!?%lzsHawcW$gFHgEXqGC+- z``97oA>VirP5vK7qFGQQrZ>G$#KIgEQj2AD5`GEl2u0hsC54Aa4{{tEYh3fa&#o&W zi7h$V6@}O6Dus!Il^6seI@4#?QyU5LYOX#se6JWyBL+~@15H@9@G*g9?8ZYI)8WYJ zjl;O9OT5FUd=dZQj>gpk4XWHWF{Lr?h_De%a->!frCM;gL`KK#{!BO^P}T>+7KY$^ z?3O&1%zai$CRX}E9zoC1(vm0qY>Pq22Idrbk^|hfU@hO zYADh*uxvnfn!SbsZ8GjC(EtSXxjo7%b-qfYAXI+dy6uX$pK;+TU*ifimZ1g$kUMO{ z$eQKgq~#1%PhAziHvRRU)A!N*TUjrP*@4uU#lo2O=nQTGN)yL>{<|l6YqfKPihkcV zfdQkCF6x>S*E7>T8w)Yy@A2qCm@Q0D+D(*ll8f7;(09&PW8BjL`>%&Qy(Me(_|@Pc z+ZR_K;@HUd_Ye3EHQ5g+54|hT;F%g+v$iMuvDo*^J?7ntp7+Wgq6|}vI?Ps682k6I zXNRzB!pB9&C%&^}cpSIZykL>cqM;XBch++Ew7Se`%eyErB~W1=gZ+Uq(1iAQ5xThd zhu4Exa)@2VUqPu+fO%K}o88V4H+lVC`K6z_Wj*WiIg1Yr42@gm#F(wB-cC46@rL))h1{_~z+%_xMUFaq1jEBwl`aG)`YGfK_p;(^Ux(v$YVW)^sFi zfLhCITRD})RHs0#p`L%O_G62*`%)UVIVx6V=L(&@dW@{492z*hIykov4{$S(YKBiM znumV=9+(c?CZ^v2!7uy+-xHm#Dh~=5j%5Va(m0Lp|6!l$?^ru@NX4eVQ~S3pRcy#Z z)uT-&-9M*SdeG;e@sg~rLuBkYDO7vRIWxv8q08E$un_Zo$>W?z2w8Wfef0`M->CFY zb;VS}Og!@68AY{H60EFq`>(m)nfg!##>+S9K3cZYgErsvD=lI@>%*vl2S7gLAn+xA z1AY?#vu}l8Sw}O4^@4U@uV%XbLyJnL=jm790^hInZ@lqa8VV4q(u@`cK}N2RhMqFS z#jA3b-&ORxm}5zX`R@8?P5MQ?y3ce>_OEpL6t;ixzw;(tvAx&ZG{(47=Pqk(5GN@o zTFcPvQ-K`N4B>5MC)UA^i`QtdIxatupF`Y9CTad%0IY+jid1rqM8YQ(&W>~yxjhec zG+grn8~JN)r1)v^adf>SGbJ@g!i8u7s9`i51=<MrZs?{ozAF&ATTLtW-Oh?nit1 zZR4h>Z&LQ0CpXyYf8rR@A7FS$RoD^D0NMAE`d|znT55^2fdkMVGdjW4YAKSw@s3h) z-6;XxNQeJe!{X~QB#VU&cDAufC!bumgL7!|eI4%U0K&3QqSeB%Odv&BIYWtSC#1-r z=eJ%cyvWjXe{@ECD$^^aR>Q|asH*M2IfaKIHxsdiS8jMqpVj^9pGry+^!!Z&nEXVm z$7hx31#<<~`MhgUY--o_y9O~7^?NkaZ8fm?)K!?=bY=LAwI3oo!t6#(YEJ=DoN_Mt zY~yq_W-kOy_r_attv7h;7FnWECELR-dPvjNN9jQC4txt@qc@9hXo44hX%8U5TD48x zG>LV<>a5)L>295yV?wH0=5+MgEyV`LW9yy5r^yEW@h0o^N39<}82OH%tutB>AiNID z^>YzC(MH+NrSkeC?JW9D`eYW561qdQMm7|0J2pvC$E0uk;^6^}yZZ!J*c~DTjHN*n z-ydf|;R_10?7VhU9||~HPUWNXkt41|!=bp#BzgVUwX>ulW)b!|8l>~xaF{af_Gr@! zU-IpW2IXfBETPLUdjct3C*EDr@ibNaFF2DjI`eF7%TZ!@Ad0U?V^BccCQvbxeS5{S z=X?(g{C;eXG5?9k%?7X59~|A;@5WK*)E(6vZs`>CfOYUFULE+Lscfg3E>WirZHRTd zL%u*_RJqDf#9^h!w;9)beCKH5%QdPx78 zhP)VtoB|Pkqg3&X=s?FjG-;Y+pSpu!~z!RMqn;l>4nNhJe!13MT@Z{l_$=3&+=gRu7yM)8&B-Z#8oB>YZjYM5>eh@+ON zJy=kra79`VQaXK2$iBg^zhCvEcy0Z|ufxB@0(MFwa31PIfeZ1{QWs9Y@{Bt-G}w@q z=uCY(>ey6}=f@KB+X*?>*qJHyKgXs8D_4&6Gs^74e>X2sc)8-z$emv|R(-g1kwVar zl(1@Ij3^vWeCl~(Pq3`bFOL4aP%0yNcc*nbsUK{y`|7k7N)5kJ)?=MuIi>Sd_3h9@ z3jfZ_jzZJ2>8lch)}0q6g^vqFcvuKq`s`8r>gnSgA$xuDj1|LnLX*C~naO>WL*)BQ z6|F+>wsgZ$92{XMRJry@O_JG}jI?CfhrBFpznSm5$sLihh1a8u0j(0v%2S%I26&VS znX?z-nItG}zW+**q_m>%kvV}qX29*^KYhJwUFL5!G4q};<}tw=YMZB$_Uk!kK0eIG zuPX3V*}5CA_#VNQ&Th2fxWI6sQn$rn3A!wRlPQ<*cU*1v?l@*uj$*!3=J2SrTZT*+ zmxrgPWxr{7-`LdD$V*AHc9D#@gLzkqvdRo3EhWA9n#AQh53?fdY{5+wWfJzJj+%PS zVLW--`N$qWq0*B)p1E{n;gYZW7~EFS>1idQm#j@n>RlD<9-gv;! z-_txrDAc|#CCt!GxomX5_u!CDZKS%~QFHz9w@@d-HqdFJpaUr!nK26i*s++1svpH* zI7sPN^nMNwot>4@CshZ%n!mgrV#W34w_EnG_H4>1JyP_mMzb6L7*Qv~oVJxiXel(r zxhHsUsy47;xLyl>QIYQ|d3iK$9UXq`?^5r`7H3JdyWHE-`aZZc)L&9{OzlKNE5CYk zMDSQ8t4Y?l%3#B!Kt4;mi}%g$iM9=lwC|keFi=ykGA@7k!-mlVs<-=3aAcEK?h0;Lrlc%4-_89Zu;`QumGN(zhL~)wU$n$bfrcNFxgIGdO4G`FNHdk_XP2 z8mHnu=pHDY&`kzy)A-S!>}|uhf=-eTA)kY2%8mE;yh4BN!K`u{>Nf>8?w8$ZaoB*T zavdKtqelvh(5@!3#1+OZew~P-Z1pF3r1yiSW+1+xmZ2x(aa={;AlNlbo8$24E*X#S z!?2WVZ<|B68(w-UWEd+;%Bm01D6GO1!Y=*{tVW3sZYqCyO@T2;E=<4{D($v7<6j&l z_?YAK?J=KFKI5;X#KQ@qV!V?hhWGOxAjRZZ$K-A<*FvgQARzE@E!;smp@Xq#6RPv} z1Q5Qgg(3X#V*6B>(^0>_c*n@=)Gla4zX#t@^KOk^C!HWtXcu%)vy!#<^;I*^;wjve^lv|nZ$LoQM}s(hgagDKPcj{Wrxg}k z=aors)p^r(!@5%P$dtZXDdUIifc^&pKrMiz(d(kb12`rQV-5Krp0Y=%81D;ID>NHV0rIFSa zF@k7;+666O;YOl0n}K{a9^Cu;**H1fwUZIWhEnyPWu3Fv3@g+nx)qlmh|J4V`nAXA zP~H6`V;UCjs~se`LKt~8@K`>L=7wINRVOhVo}M~mUmsb-A4;Scw7K7}qHecO0TiQc zN^^7~$C*jtnZ%&U%5Z+3=M~@U_OPr#%B~w8NH|p*HC3(Ue7%zt00bE_qIeJ}2SlZ- z$6tARSb8wRS`biiSI{u6*;1gXLHGrJThrhg2%8j|Dm>;+D*u>=XeeG#2NbOM)$a&^ zZJgrR6Rx1;Qe(c6o5%5^l3NkWUgP>uEECoicCgjCd8wAAH4}pdx#{J! zz`XOkq*@iI1?2AgBa108^{H%2(kd?|9t9i}S(0Z1I*i_hO>ua$k|e;aRYQc|KKLG# zQLl?|XwoPS4ddk@}Mw8qpufGIj0K2d^9apdLvTr}~cr!DERW(VBB#Y=jieTA#)SR?-#`jHW03VJC zX|rsnAF^(b>=BS&2?VXn8&`Z~;KA?F>Xr z62m^N8fhg30Rgv*=JBjN0gXU6IWddubLxy)+STr1Ez-OtqE(+Lp$oSFxL~rn#$BYA zQkVrz-EV1X6y&vU#?~kM4gkj8dB@vnkpNufkCP78v`W&Y;apt82tsz}DaYX~nju$n))PgN)5S$als?m?EYr0J%dAyP*kz%WAv+Hhzp`ZA3BGnBaHp0mBS(YY1wx7$gLdq5c)4BQy(BE~K z2as^uOirKB`1)}1HG3F6yTIUOFL=$F>iWG$i2;QRI8u{`DTY8nn!??is3-_a8Z3~A+Qn<6c&~m zljQx$I=Dc}!2pqqC8FkMhU~yheO`Mh;sl3;pEZ-mmCW4Kv-b3rUR@My0jK#^RXj-f zb?6l$pMpBj+#QA%HZj;XSbxS&U>~z~)YDUJg;L+NugDHXL;|bO2!ri{4S06)#5!_m zda7}_z5-QCIVfIG4A;clUTQgOOv-soAlg)LS+I2(DFgrvskdGJzAp8YJ-sN#k-{lH zvC!mJ*bhhJ-inXgu6&n0Vou63C|nQCIkH?8C7f?y7<<4a8}5a-YGI7 zHKI5kG#8A!1XJ9e12~9CZ&}6!SqGs9eVO`C@2q3#iRx%;Pi=h|v|_7aGy+BWN|YGG z@aEp%qq6jk8_O*sJg}J}bWQQvV<`s6ZLs(W1 zv@8@CEt*w0JvnhG@3eF)51bk`!MJVa z57q1*p(2eWF)V1BWym*Evgh&lv`n@iH4S2W+=r((+sIaUj`H%y)Q+j|CqKy_AC1?^ z?aq|%KdEg_B62qH+AbV^IAbe^?mtVhQ8{NE4nIoCOt z>jTWJz4nSbfA_uC?9HYR((THWBVVre^~TaOE*M#}t2?|wCd;^Wo6*ZioxK!?xA~@f zroZ!(tNfK$y7O19DrEN@1hXW>By#iSH-UM6YvKoljhBbDvA>;zRAV^34Xy@^|Hb3OG_7lXY2)LuHS)Zcmza zwdbBtxdn#|=5M)&l;@r*It$Q0l1xpbD15UdNHC>C_l? z)sXY}2j%Wm*NAH|3a<1$Xa5|ayBb4;0<9$+1?gdZeXkSpbj%N9@nqbXri4UQy$5jb zMMgYYeR1@4Ja75-gOYdj$|D>E&K~?3A4N3`$Tr=$L)`ZdIMG+6x#S+!p5NML=H4?R z@+3xsN-gy6Tk_}DnYg=#faz;x&n_F@h0F@4(-_tWkH@+$?^~qx8f@%U-y*#_;4|cu ztNyzFlhVk7((b^UuU3WH^LYx(t-AGpE)Vk_7cw0T#;-q-V-Rw8@wV}~_#bBhaw_;< zH_z;Tp1M4>HIJlJEg{IDHbySxSFzf>5_c=KeTJWbfqN>AwKH!|hnYZSeoZ!Su& znA|sMrYx@u{?n);EH*GeBTEk0C-T8go`1vX>1v}>#9YZk>8Rxxv8!F>4t!Daw~t3x zKbFa~G-k{FP(6KeG>S3tL&;KWu6SQkq3d#Zp33L5Vpe+l?^;Hx%yn5cQ0}}08)|au!w{!~od#U}iN`5YU*PN}Hk6&rSLjd8diae6yOZSArpMi1)~;S& z6pi!V0})H7Fr_zK{T_q7)u$xxS6dCIzy|PI+~Mi?dcTvAWpQ6Q4|- zJRx34V?fBg$-gqg$Z1XS7&}F>;RWu}#sN#kIxidp?EvR~Gf@SG*oQ9q9fefEhVET| z5(!D{c5BbJ1_bea*10LXCQUtBVAe_(8fr{U8n$>l83K!vve7QL!De<_bU zbC2`#;UUWT>^*0nSe}w%NP4fu@d?@D9(zp$2}qgK8MJF%6)Xs{>nm8@eUYmhW@f+m z_3NjxKm8;!LB;mZi33LRyZTPyy9cs;JZKxcX^IbZOx$gjW#C!NLz2~+^}TQ-|MMRG zju(I8H_(K53anMhKXy`=30iD2_!4smNu=iV_*A!3#K5(TH3{iIyU{$zxCh^YBkoyX zWggDYYUqUuu@H1cdC`(ySZT((jPl)gC*&y!zCxj#&_(E*L^hOa*{KePc;@jJ|Unm zvtb$|HuUPwLOc7xKl+BOjWKkd@+#mc<>{Dbywf_tuV&=AuUsR>MP#jssS5;E$d<5i zq=_rtsubau&w8iA-2Kx@rosi!yA?*ub1UL?nh;EIL3lt%2x-BJmN z1RnQz#a?mm)s#+=uV2lO$?w`p$lHU-k74Rq=?0Lf8`*Z;*;0wpQ#w;b{JM{z@LO{h zcVa((J@-NLsd>X){jXymMqO(@xm$N;mf71)w(b#(+9V8?OY^v^FHoQ-5CLhq<_Gsg z+Vc@~P`~2x!QhM(Q>PaUAHAjzcQM>g#$OLft)kWom2{T>@WQwRlNgJnVd%dIo;vpK zXPsAU{)tbvO}~5{n#S3mBi^AIq{R^;<=UIB)~gw&oV%r*7JGg&s<^E@ihV`&l3mrz z_9XkB^tPIrN6_6rKuoeoM5 z?beFGPx7E0kEas4r|y((lxGR(pPr(#up)5Pxk8m zsU%oJOZeUct1P$Jy7{?@kJ4Z#-hP-T!r8f}=DZCtlZ`TQ|H&qupb^V<%Jf2{ zT&aHY%-q6-&)*V~V(Xn`2h{5tE`H;>)ILU_8#}uH{k>Qpjb}o8{IrJCz$!2I9y`x4 zQec+MR}okT*UOywahLMh5|lDXyQ?sBHYb)^Zi5qc3L7x|8? zCc3sa4R$xT6>LNulN!qj=ySixO~8{;rzTquu+N#uu&RpXugbZoVk$Q&-C~lhu8npW8l|?MhGKj^t~} z@Gq1hGHa(!2WC}ioOf#PG?_IJRoSNeP-CS`8#hz6{7{N73+EQ)i#Uh1lrawdm@&QZ zj0m58C8-#&TSd(CXCQ4AWwGUxC`nU02_FH=it3t}jFk;G|jbH>UgZj&YAv zi<7C|=}}&LDejdVbI;!En$Ahvs~!t`#0=S^zUms#^yiomw72Kp z-I!?{=-g^QVGG9sSIuDIjO55-g>bpG&`d18hQ)))FG*?d_Wlyhe;Fey;n5IA{RMN8 z)Sq=!zX|Rg-2jtA*xL-QNqL3A;$UtXpM$|GVs474!{pU5x4ralvi~RxLI1=6`X>he z!~odHe`4_exEN?&faChT{wW9pSl-(TRr6Oz8ZJSdfZP+%Ly+nOW?%yMCiljyZ=`uv z=hTkGp5YKmS)1j;27Bt^its_RzSx~YS6`fT3v^Y&pg4V!M{K}Zz%AgP5>Lb5ekeI|6f7^WqCMEs~h$vv^=3%n-1R}r#XBh zZh?Bvqvq@CX1f%GEVH!!{%qC0euDk0*ho8`1N)`X2RyPHgDe-%L8>*Ve<*VFp{^or zzHps#LBo9HmAI-AQGTy~p#(;G*<1N9KKmD+?Uf2lHUEO%|8toHguDGD0zQd>b!5ri zmhC|c-k*osFmQyY9>cjZ4DBEYnfvO0np*kSDMkwJt^7yRemBQm(G<~jn zenkPFjydx`-9UgsZvBtkfwL%GFViohy0-?5*#34`nbtcoO=sY=w*Fx6=O`FBtG``4ikd zQtc+nxvvrZ<;?xhImDm*KHt2f|37uyN#C){;>yZOLDsj?ic-@W1uirJ;`!~{f0E+& z4~VZMmj%W3VMYXM14hCHITg#zzd9)%oruP*@jNBqDVi9HfYZT<=jYkryF}Q56v0JP zl{q?Yb920@4Sm#B=GnlWcdwut#`8RCXnA%LzU$@x2J8aTiin$=iKf&tthd({hP#SS z$_?FFu`752KS3k##kVT8JAU9@0=ZFperF4}9-S)KR4bvg;XH~&_qxfk5JHqaD{hIL?Q+GW-J)FyYIFTBk+tU8&wqg)AZ z@);&Iyw0o`dVKx@6iRs?}u&u}oyX?@AOwVsq`2Azv z?VQc+QAs*yG#knLT{V1e&%& z|H1L*65vr_7z zf^2Is)24-%aslc6Q#F4s3uzhc2+tz^N(Lc>wCI+^I9s8FQObQU15c`49tCX?ZfF|` z4-38-qen)6`{VB-4c-E(isb>_Z}F9Yq=(R@1Tr4s%e(fh+O`UoBsHZ(>8w?d0f4uVs>WsJ*+^MZ+>T`Omsl) z50U)iPM#Yj*PzLb@tA5q)X1vegB`?C#=h1$L%6l&Bi9&LF;d7e5VJAT)J+@EI`T^n zZ|d$xEGVsi_Tc~d>;xoT$DRjHe4ogcNIsfa!6e^Z%ZZL0N;N+eZ_H9Gz&mNRI0^rl zHR|h$Xk&*H_iyMMe(;4G-(vyBjoP++$F7r9WzvEeZo`ygTEk4%^C|-tn|`U>^D(jX z$AGcR!-KZ|B0P(J`|w^)XDom<;r?8c2KbO#p?tgc84+mF53S-Hzh(6BIOLx+&! zQ3$(7S~kyu|I_}Si~JrRBP&ilgXs{T7_gk8A3_*s2%d2p`{@J5X-&OzRx^%DasXYC z%Taw7_4mK)ra+F~wL$k@jW?3u0mm2%ayrNOyYY&8@_XoCCSqjP-BtYMIL4tNsHrV} zLM;`|^^POI1B#8d`1}##^Ju@YXHaAY$GjujLeF3PJu9$e*yA5sO}NR;Swgz!E>s0_ znrY6lYYN>l-hWiuO--eL(VGh0@1DDd=Od4!O&3&1{QFezB^Ui=dBfKaekaTB@!#(- z*X;4%Hvnd-;kxHB8|ZNn*JLNrQ*G8r`0!pZ})|M_^HYRTRC}yT(NBq%r9u^cS~mIQm8i<3MFv!{M>HGsM{b=~x|EZ_k-^Wt>eU>T1FD`kx#$=uNi$J1# zh)?`|oZoug`|-bNsQAZv*D_DoYfRF6{`usy|J-VL;dedhcx@_d{}*jWlwY()Q+aT3 z@OSnI&?%B|&(64Pg#V}KKfg3?RSgcD<;Kx$6l-DBC@`v{*uNBaIiUj)!)j@(&2*4v$%Gd^|)WV3C#LI?81F7&HJvgp0B z7B{ZM3^shnm!@k^dDO2o960helBm7tgq)=hAmr3%Bk+{9+dQQmYty@i-f7-<;?C`E zT;n#RSY3&qG2|R)VArbqk{{{jzQGamv8Arbn)0nqf2rq`t8?%4(?&^0I?3?M+m44# zUwml1r2~6!ByDGvS9a8$W{uU(m z15!U$Xg|av+lN<^Rbk}%nThw!=!!<_z;%awgQ_IM&GSuzuIy)yFaJdP2Y zVGp`2GObtpIxOx)YhiD-Y))KOK+V6NQhL<c+TpbM8uskz}RgFlREketnOm2@`x{ zCs-8fMqSQ~1DInbmcaJp(Qi&)7`Xy_qLHDhR-C3ztZo~3gV^l?M<&0~10o4-CA;r# z4khSJ&G7Gr?Q(N_DD6z{?r01N{YkZblt0dp# zP;S`i1so>-C?=_0q!@jaPpeMrR=9K4bdH8uzuwV#d^6$dy1+W3R*t#$RI4-nsa+ml z5MQfI4-_U#ol%t`=G!k7{#DGpQ^POjI6X&dXmsc9n#HcGE&l7Rirr0jAukB=`BAex zGjuLd^)qW@*Toxi-oXo!B}R=^jsoji1s(%=sD8{DCDHx#;27T$#}-gbBjdYW2DJo4 zdPsMiLyLZXs_5EHNo)ar<70fcgV1#yNf$IM`}s#vL5hgGlw4}++j zO&ZYWWE45XLCQ!?{Jf?@L1)_rG%v~r`mv`uda=L#)hxyre&|;3^gM2@^lBdqEdyCW zMr8UnU0|)Y|7!%<=)I$ny_1UI22W>lN(gEHKJ&41#KsXVqpx>2re0?}&8F=AP)GRk zej|smM}VZ$1pP)3Ls(ksnxF?$J7rgz6x~rFmAg(09bH(#r%W{nE#}6PK8Kk&F<%TQ zpMGWWW|q?n{ zT%5Wwt};kaXRg9+-_C?BM?INEeL23FG-RWeP8Fn9ln6Iq*t_TKQM{{M#7iX#%95y_eoS#;A)(~uRLj)| z>|LSG;zwoeZ!KuszALj;O;K;BY&hGLsJQvYr@d)4ZGhPfZ(1*){M<1u{DDwI1fE61 zO-2+_iF>E)l3vzCnjEvBP@>U$u&v&{UF2NlkvEL35W!%$x5}~KIP&u&=|Q)PQ-n6L zWaMcT6SEJ#-tc=}aw0G|_{%d}*36^HHOl}*Ak7Q=0uDp*S*u-PR|#aPu{}HPHux8v zRzvpn6}jd(2&b$So*ZnIrJ}4n(UY7r`-T64X{Q!W-!syS3={Z7C)&hV`_|R*WTzPA ziJg|tKe|~)%F}U^GxFwhn4*Ls?p}>ggtb@|^RPYhWE37TKd}=AJ`Zj0k&$-%WmAbwu%QpLH;aDT#l&0)JmQO~-rZ7~!aMj_x z_9voZLR<@v%Ql*eU{VooMogx{&P(TCg$qsFhHV1>MsettTkcQ?)f_ay;vy z_PSK@SL;5>vwyb!ZTjesN@qsB<2gNeo${ZrsZG>&c6=hrE@@z)mARWJ-nLwHbCIgc z8}OiE1?cQ}J{+TE zc&f)^;Qj5c_e7zyLgS3qTZ(rH%&od{RD#vim%^RL{otS#aZ*>gqVCvap=_4r;2n-E zFFcDQB}O-cT<;w^?w+DKAHr$9XTG2!DP-ejOW(oCkn{NrR@OkiN)$hqUd$S6VT_Qc zS0<+wj|Pp*=7je{E$W$%VpUrQ6J?DNw(GH1vC!*yHeHINsqQGh zWMZ(It!nm+UIkF*0!(j1hS6iv@53$Vi&a!Er#c(me`L)eA7lueZhV@Pt1NS4=}ADy zXMA1bPq-BGOd5iwrLFT3!Ww5TcGBsz({-ZI*b(412*;k-`#D|S7R@BhjuOJH=B~yJ zR3x-xM+NBhB00lKpEm2n@n9s(oq)gXVgF{wZ`nput5p^+_NQd+LJb`Q}s&cOLXLA z&r`smclcXi2G>kTMGcvjx9f6#_ZfJGe$l{m^Hh(hqosO)y3Xg zStDSh6=2U*3z0j&8Pwqy$n6Md=Zs>#(0z^=Y`WPrU8}xBTy7i&okX>LGDO0_X}1Q8 zjxllMgjR==hZgCHt{2=<#!aPE%@kP0lRoX}xZM!pIQWWp19l-&Z8lrUJi@k6Z0%34?yZM#p;S68JK&wDgG2ju*d1P&6_ z$2za9WhvA>>aFwrc-fnT?QVr`b5{a0Mbz`{!UikP6=2pO#9WB76Mc{fYwoi6ipyNm z>(?iHhcL5QMq)5Q=9|!tLZWJIli|7e2(R(H;xN%ti8G*|LYR8lp?1+U`ZJH6zQ)kD zhZwVWd}SEa&@g@(9m!PNhwXHBSM-T5N^QiRGRIBj6B>>=sl*EgCJIJaTr%JMVMq z=yd;>!xb3|%Y6+OHAMs)S`M>~IcCU~hggf=%!Na{mMD&tiP@y%M_3XNrwLgFuQddY z=8LW0sXKqP5o2yaF4gv&QKD*R?Z?B#FVvbQtIxLJk@Y@*elxBNzgDI+JdN`6`Mb=( zUm>z!-E~y`I38u|lt@tYJi=yD@U_;W33v6^f$@fmIt|-n#(t`;e89Zu%G)X3Y4jO> zLdX+P9_Y_{2+Ry>R8X@|k0g70oWjw*orY2eyxDC}K{%a~o@_lGD6vkgT2G6g9k{;O zdoJTPM`M~3Kuj)hr=Y_JxkDD{%Pso!*H<+v= zYubwy9F>LC_?6oNcxa4fbR-ThELDE1S}YjoHeGM4@<>RTIn`alOa0iTk_ir`jdPK5jFRCvUQHWT&4vZuvT=~@Wf=$ zzGGLY2$r_5Ja1KEgri`_TVk}N!^wz;*~qYjwkO{=32Htd=Q>;UbUNT9$Bod{t`vNN zxGD`dm5cpwC|@!#4~K3JGkG-mmV}16kJn%*d`>kVDFm)G{} zexBrl3+2azuX9VuLA^P2RI!~`PH^!b;G?kUByr$)Fym9XmHyn<^MKoy{KDb1E@mp% zE}tvTEq9Ry6>LeK)N`Q_Q9Tbi6^Wrn%?%Rpq*gjvbo=JST- zbYHKDyXyL;7~%D4{#0k3_JoH?O?yv%m2|-=3Zj5T!o$0}qKi?>IxcP?ML<)M3?l18 z_6u(U0NEqhD5mVtf6C!_D>?4|PB9oB*D?V{@&0q6Nf z9!gd{@yZ@G!}&r?y_50f0CQ%+y#Q89Te5(1dOQiIiM!obD)~0Im(J!&dZN}Lz)MKB zJ)6XFlpk=UM|tuP@TOu|a9G_cfVvGA_2hbAs}Q+6pvtx!~=t-@+v_1dm+(a`(irJ4B4 z+sDQpe4QeozSqF`rRHw5$+!Cz$BVV#EZCavv+8IW0(1NJc|A_XNE))>0roY(rSq7D zY^i01q>fzpl)L4Its)<@6!(P3BdX0W%Mtscr;0>aM|L+h&2j4=aX%kGMJ7}$u$Ewv zGc7d~XLp|+!tP_^=sa>a|AU&O)i!_Zq_HuF3b4n?LM4Z$WumaCE8026sVYje$%03EeAmYsar%Q>+SoFEx<#XfTIo|GlL|Z?|tBu2kEUv~j*VUFG&c2QdS` zpbEb_hl`*`vhUeIUT=;mV~=D4h_|2iVB8Lg_o@`WmpQUDJ?o+#UC?kKTc``GbbP${ ztsnCCR@~Yac&Ii~2~OIWg7nF1pVsHo!^Vo=0=ez#D*1WG7CH-Z1eteNr7L7!B}WFV zyOG^apW(e-$7;7t@+F0N+J3v_`FyNs#W~)O=L>{V-WEih^z?$m##*y7;X|pLi6J+e z?^C%Ny=Z?_@rC8_mCxkrv660+BNbF>3UKg1H-H zu(Lwd>7?zB6H&8O;fR<$>`dpxVf#q3 z%Cc+REt}Vge_VpFI2I?WHY8{B25G`Rmu`YN*6VEtj-is4LpjE8fVMN$h1B}bpS zK_sO~5S2k1q7iCbvVH1h@dJEw=M9JL0Ot3u$ekkXDc8`@~5NiZN6JQM(gr?gL1Zd6RTwT`(OQ8gWvWdh=fW zN~Y%bzLydX6oy`qh{&2vdhLO6YY1`qGo|Poq3%%$9ifjN%|65tTv#g8OQlE0RHn#; zl1p?CqV)Jk9SoBO%u1y?wwe{xBo5`fb+Lq}ZQI?A*4xg$pn5WnWt`zfe)xegCc#MY z{R3C}GAb4w7qdLg-o>pv^a`s)&-luNrL}IB$o}C6d1LwYgseMeu&`^EG2A_oL889+ zI;y~Lt={u(mAF=AM&bcEeTwV%`ps2{Ft|3v$WCdQ-(f6>rjS8c(1K=%HR^4ZPt|0# zQDrpr_Erh*Cu#CxlY?i97wzhZ54Qr8^#~53Xy`uvcuKqgk@1f>93d49 z(m6b@+D9^5o7!wQmI@!7nxi=9d*K-2%MTI6m-9BE@Wl&bI=v*+3`Wv7z5SFPxu0+w`9DuQKPYNE%h1fNPBw<^le5i&b2?&YosnDd88U3ppyaEKSn%a*l(r9X@+CpKEaC0@#Xr3Q&O^Bf)I#c zrXM2Cn*v9>sKR|a6l^)d`G5)PiSk;zo#gi9nUjOht8g?4ZLpVGww|6{vm@sz9Y(vg zSKGh^yg5;RspASliUKKkXn~x(Evkb2<4t0wItssv$XxGzg97REFZUfd8z~lGT=dfI{1w)-J( z3?F4svk%)7F08MSHVs6(fi+h=9n4>&tW6#YDy?`ji+p5je$fqrr)RSO$*o%{@x5KX zVEW>K+`K`o_o_;|Xi=;TnPQ@OT5!Et&`YN*8O1v%RqRxV?YBMXa{WjIO=7(+UYu?2 z>(g-G<>iLodGI32;I!d0_Jt7b;>(m7+#tE?RvFwQ^yE25vmgnCBo`z5K7F**pueCv zQe^hE{i@ul$U5@pZ@SkDzdRymk?D>OdR~IWk$}DUTU0Qn6pfd9_369klHoOrt>vyz zh-_vd1$(JOs0r2Qz(LbZVH|r6WT0m{r-h5ctD(`C#I(owj6QNqcs;|C7jyDF-P;#! zwl+s|XpMz`%lJ|4ch!Rs-VNElcT8C?NpV}983FXJC!~IF-`*Ln^?2oq0YxuxS4y9n z`=UPo*Z_&&5AvVrf+l_W^3F!iTdbE-t^E>y?a8j{r&Ll|X_3dglR6RmV}(hR4MuMQ z)*Wet%@3pUz9$Ug3%cGm3>mh49&>D3b!s75Z;BYzkTz|SFZCfoL1J|oU>NnS{W3?V zT_`Oxhwl?Cn8KEND5rlvp`_z=kg<>Y!0`qSfEZrhgOLMVrmjo=6p)9JxDbK}WKqL* zudLlYyyB6iN`@Tk<@0zMCEt$6^TZRh6w%@bct9VUNrdB&QB>K8p*wee9-`lh9hFsM>Pl==$i1xTf=UXOO=WWcYU!f-{Ryx zGJJio2UwHgf|GaLOtV~$Ww*~IJynbbIY{z^Zb|r{PqxQjiHP7Tt0;Hbu*l_~)0BSy zga`X<&A!0L(|KtK-WgQFedrztM+7La$X*tOra$yg&b@_<8~a>0C)v&ST-N?eXlnue zkcLLVsT4WF8)O6de!{))>t5cuaI`=xw8X5E;F5gZds--C%gK3jW}!k@7!Xas)xKT! zVi0~`_Fbz!4>I|bU_TIW*4|qF4;9h0jrZcWuqYXjQpSdkNPZYq5{z-W7BHKX$gn5DOf?mEGizq6}NVI<#tdwMP@Rw zMuXKLM_MmkiJOsJFY%?S-q}Ypcspxl&K)e_@f_FaLFNuJKwQ z=*WeB0^5lG&N^SmykWOe1K@3MRX+n&(lpo|4r{}tA*lIrdpiv?dW=8A3YY6!uV{Gt z)!YYkHjSFq6Fj zn!_kvXbBm}%La)wD+!c3P_VrRIlULQKXqXQXlxD~yuk&nv5j25!5l8?r{YVElJAAK zB!delVZ*Wr62fI1%11jTIi_kTE|^9G_kw?xF|1=dgxk?A(Oo&=!rtj&s=y_%5tN|Ohj!ON%|@!p4`RZJ3zB21YypQ0>- zrNS@Q7dI!xM-8*06ixG3Xn^%=LunUc|28mXuGJnATVm))=r1-9|KTG#ZF(7>pQ8*E zt=e~@GWx^C_u%zUNLtt(^B7h>q8j3B5XoZg_sG>#*Z0jLKdjkUf911Z-t6jAkPnX^ zr12QPP&!73EW*?N+H?iXJf@?swzm9T81K&h4hoO#evdF7yc2~G)V}d;CF$>G$%|6j zz#tGC5?>p9r$`u=Aj`Sn(HxYzNdRm|FKi+M!Y!{GJ4=Qlm*layzW~9Yy;Vo|U{;L4 zS9{ek%xP`hFPg%Zo}GOJFXYTP$@R*rNMB3o=|vV4Uo0Dqbw%GCCy!iwAokSy3C)NW z0A7fbDeC`W2l~rW`~@s%7|TB}@w9Npys8{jAI|90`W#$Lg$2WslR!56WLO`XB~?dy zvhruYgtfj5q_?w3I!yH2C)9e+&dGHNIlSe(0;*>l$*+%d*~+ ztC1M^EL&u*YE>0*00Kl*%@AS@Z1{aIzd@XPYk&_7V1)%>ly^oD;tr2;VnRJ=mR;;eT-dyF0BMMO`LYAn z5c!VdK0HShcJuUP?gFRelJw@W%Q?2b9D+0tBm!wb6#*jm!9f^hq6=zyYnD7Qlz!qC zeCWf?fvvFA$V6Kx+>m8b4-b5E9-0_yU1d6$RM?Rn;%lSSjIv!y9oU*u_9y4HInBgj zndrHw_M=x67({3)!tzcR0j5@TTg?Ikq_YnADBe`rTN!-cvia1y=-w1bl#~LyAv%* zlDZm+uYuDH)&=3E2yD+V+J6)nk9Ir&z3TX^T-hZA`lgA5!76_8hbP(Nnwm6nNBD1F zug;R7fyO>H-E!jHSL{xP6hoPi@CXi3$l7Z11$=lInTJ`5VDpvTHwUfQn9Nc4VK z_pcPd%mqoT7^3^_54QCo#Q9QXEP6B+f5~tVAQSjFoyc{J-V}wP$fQcoU5+C|9R}&{ zhN%oe#i=!)?mF%FJjMh~H4dyWZAa2C;85@kuUi^!K15r+#8Mx&&Q&O$)1j{fK3aJU zY+HXXzYFJcVaW%;0U%l`3=|8{Tw|@}^4-gW5F+kkc;@8x@~y(lM3c3E z8z3>p4h_>5sC|YB&z_9YhE;tVi3ds*zrRKe8_F9ufkpisjHmNiCQwL08-|!bHSOiaUdyC6%Trjl1l(hQ3apAp9a9p{ zq1=n_N6Y8VoB0GKMn~d+uoS>!DvhpNf*tX+pYhE^bP9Tf5QGxR@b>ZU4{lym*D|-5 zq~i8vMi3(=c%s0(vJ>HKDLf|Qr97zUYHU-2`9W%TMKkJZlx2kCA)uJ5u$VuS{kf#y z#T>P!jQWV682@s3O-bX5=n>#vQzqdAGcb)k`$C)~FR26*9rKfAjK8$nAL5_pen0A>IkxVb-Hfv_K8eyLAxs zN-ydlq9fQC%QEZgF1|HEf-lu%tuyRmpm_MGPk%kUFnO>EfYJw;G?4B~fC12DYGmC~ z24n>!(%W0Zaw)Z^myCuZxv<_mv9aU?Ei7SB56Z<%6IQVx>cWKPBWd}N(%B9i-)rO? zXis(<$RgO$#mW{+*?+=;9n$3b&5;4p{c>d2gn>^Zz82f`T&hCi-E=#W!v>{lY-ln< z4`k!^$E0*($9o$x%NM=9{SXxxC^2<>IwY_C`X~Z*CQFn~4Lx;$GJ&rD>K)}K4O3Qn zLJm*j0(pa5X`-hO=|9&bKuiDum^!k<9~Kp}Ntudbh$ar9BGC)R0LVm14L82nFaTsZ zfm#2hzkgy{iS(M{u=COxw&b*yjqrXs1d+jD5<-^F-W7Do`23AqZ9&rQN^fCds@Pl7 zz!*`6oYCo1i%;K&pXNYmQ#Ls2Z3sXTeCaY^kbn@c7}+l`bdB~Bs#oyG70%ow9basz zmx<;hRAr+@mZ`#)SEdi`I3SYb4O|2vu1m&^kC)Nwfi&4jk70EqfSS_8OjiBf6Cv{= zsqZOIl-6DLh9tvLv>FbAU1)p}VkjFIS{qh=H0UBgiC_fY8xg=c?>RP(0%7o#h4*O33B^E$4}!*O?^LDF?<(ACrH3B4X2IZB0%de20T;8N^rfcfqW9diK#lYJXQ3v{hbuLj^vYAl^~K{DZom3=SqmEpy0fvl;0e1_=!mjMwJ5aa4GRF?Z@S*>RRz2dFed22KAD0GXsAAd|6+PcXgra^)L!3f75;KjQo_cd1VXdZ(U*Sdu*4J6OD zvh#`h+Bv)RQddDh;@%w^&#0n_~?2^N#dBp+QUMA_6u=fMDaAR}v3ci*}v^^=$lo&0-+^Dm$z=j^kyg~O5U`|WI1=viWOSX+X zhzw(1P^!(NT_{{lm5YX|+46ap`7U%N=%>#SVGGbxkdQjJoxO%~2S@I6klp~$8_5Hk zile{cGYrnHF8y9g4!p^ZMjwu-0OFl?6fOvNXPd=)Mw3|{5+l|?qL6~Le;78`2#Vfv z0!k;pGuEc!x&d*Oz*c;Mvyz9Bf7uE+q(zq&R0?&xmuiK|tBU8xFiAP6MYITf%%Qs2 zphwWvVTu(fQqdPGeDmNU3P)uqUAk`q6h!15z#1%f0^0C?$}RFY6`=+y0_jhDM;O-c zIl!oBFb^_O`{JM9DLl@Br8{}lM~3{c$Tv1<`joW6D)X8*3I&`1F7r7MCQW@<6JN&? z?f*H(@0&9y*YoJ(wfOG}c;sgKbge3Yy2$tF;4`)7)C1qI>cviHlUb^ch09dcDZc+~ zMaIUH@)qTP;_|j>-M12#*N*O-vlt`MSHdRFQIquq{MY3ROS~7__aV(V4xe*SHMk4**zM8cmINp5 zK)~pkM$j$$UM0wv6$g4be4W6pZR^oo49p~iY0|*^#v;vUtok4|z+;F7>2RT3-!Ol7 ziOxA;>kQub^%HX@gev>mTQ860FetxheG-1l_l8+8t@}b$S@Hwe7qM`1n%n})77*BS_iBjb$j_ukcWpVd;ORogk|a4 z5rmA28SBRVvq=wEr}Vh`U0 zgh7>f;s{(Q{=_6>MItAn5Lnw$70P}yAuy(6BonGgg$?D8_YO5GFupDRxZg3l zI1kXx@{s{d4=xT3yt@3Ic$FkQyc5G#6K&7>tpHKHe{RmjJO+puU3hEHfn2kG$A?5I z3B19}Xhbt=p8%3Vh#sddhOBd^Cv^Kkrzait;myBIpbX&7cyFp!JlM&gil)_zpUUhd zfep!cD>?=}c=!cN+xjdFVHD5I1>)_+C;9(Gs%*!n>y-@c7U5D41qfE=dvRhPE zAznqK*(!T|i$Q8oY7qmLeu)+K)o%>|C9xdv;Wtn&z{%wbg1#i+8*1-NYyFt|N}0)1#GyNWj-P%JxY_OXGD2LW8(P8V6`tM8ckP?rbTGi_G! zAdgQ)PT$A+eJ#X5Cupus%HH7j|MtjiE%WxXD0a|Hs0UVv_*W{}V+uOr`9ky)TBTIU zL_mysX1hM3hGDgL*=Q|4#RsSIO!Tvo zIeFnDfXFu5H#9$U1`Ja2=x!lsBM^jAUyYrw6p_(Cw7_B^vN++CKC+di1BN?c$O)GH zDaZamVAw@Ldu1UO@fhVPX{fd`@_>L1Y7p7n`dq^8Ly$7O1e%2HH)0rB4@-dnzw+Vt z&9bxt0!cDp^v9ZT=SiBkPG~wiO%8g7SZqC*>=~quStXx&UNbeLb)u>3NVm}Eo(Wr? z=v@jme3|TBaSR%5KP7C)Aje2Wy3#IB}+R zEGPvCSmxAIb;Hg3-=`rUbQu+2fv*BYn|8-R3dh>p-keK3#5{VW?TPa7<)SwH^z}X= zYqdGE^ySa6P4Mi4b+mX)<>)?5aksO4BpG*Ke3W}8<@wrW|NXwklP|?@>)LBS%0_Vo z-}K@5Q6HQx<>wDjm%#}$V$KHe`?H%rQ*l)HbdSN5KovwaNNz$r$kenr?@qSX&(AL@ zlD``(0d)erum?L5!%2p1+R*nnYH{{* zaj}mfpkvS?ZGhX5Y51NF@)`6gJIbCw4HtTO_Lt?z)=>QEpHUVHv4$Pnd@}nSpYBVs zgI8Z+i9aA{9CCa1DaS(nWmqkk<&n3r$(5k3e!?Qa`gU?qd(%>ZQ|@6ft9OlapE;e-+u-!@EO!iF%%d)!Ttb`5F5cAP71WFU`A~yvx%EFzl-JZQHPdpwx6OP zTgG#>Z4n!h_DO=->JpZ9#7O71Esv>c+orquCi#5O3z6Ddm`q3k<<9ePtLt#9*HN9* z8mH8&TxCfPK#R}+SI%0%(n@ODS5DNj>YA}5mhEu^mr?mP=>VJDliHqVTQnXq!X}QY z5OyFzmvR^Hxdn~SxF^(O6V6>?HF`IuOhPz01i|UAo@*| z!AF6EX3Y`NW0p;x2Uo+pe5xo_jWiN#V2<*NS$D$OLaSMTE(8w8tn6Yl9UPjk+K?;6 z$4_i`lT9(YB+p7`I;8XdA{|BB*k${)4?Btnj$E&}>%cbV%$m+kaQ8&;2x@i|lgnOn zyKRLPx#!CynL3}`pPfxIqtjO~&?3Nr){bHH!MR1TcW+RgAOw4p24qrQ)RC z%~bawxUk9;6#^fYU;8-B2tW45BL`;TO=Xgu42l_vR#KWrnI&R68i!1&=Z{kOs)fc) zWM3#Id7d$sOi4`HB9nM`^=9D%D={-A&ugM5%17c&dd*656hbFXe-`&Em{rD@+%^=P+-11G1eUSFcT@AgI7VWW$334j{` z_92*)7o?k+w?oaPYXlq4%ildJpfI^^R$uH;a58^rLBHa4l$o#C6DL8u!tPmSY#YiJ z!3B<>#N8~HwrQ))3X{Rz84z>LY^*lN5%Mo?Ay?w~-PpGzK*SMT>t5N{(-X@_%M9{3 z#`f((FaU264On^r3^cm@4#DB9u?P4mup5}JZ^Ho!Lq0C{Q~8CDoQj2T`{KUxesh2w z9G5Fh)ej_&%oBdE$Z(F@#@IF2l?Gj{ZLSIJ^^c)0FSxSywbGI|^Bdu_yX8>=>M`a6 zeNw&&=Jj5$=s0#IBNhY5i~8Ku!t_vv<)wbUeS|Xy(wv+vJ8EAxYl)E}?187=T7COI zMFFBlwgWy&KW3cG-~(^%AbVpc`!~-SBujs*o2gcAII!g&5YW4My=Pb~Me8A1ys1sy z>OKt#MH0{p7HZ0>_tY{6+-`p)J$$5PUx}4?bFo8ID#t9J>XXI}s^jK117Fp<7qI&? zHPmZLahF18I?Kjh74<~Qei(e{%LAXK8`G0EAmNq90#Z;4qySEA#&ZFYN0)f7?Ke#I zXMSQiR9X&^9nCl3-Q^P$%yl|pQy=GS+s(|gEm{C^Yr_$`*EmHZ{rH^IkLUhw;k6v(pfEbFG86jtOmVaB`hu-zP0Io z3t{oGHA7en*YG8(U#>p)RTdmhN7GB2b1r0n9e(Btf|+&N6@FTY0P&`sTIj8xsOCSZ zJ=pjOm^CxsH5^Fg9b8X!Z@uvJ6aB;`KslNm3pgrCzrXNa z>NWeJmQJK{SWP3Fi@i`y+wP&)Z%G`!tSd4ti(a@(Q{*(i-R8~1WIi^09gQ-xv00)| zqY~d_;0Wq*C)NI1EQk(sm;qx=p4#d5;s%?GnKr7zth55S-x^i`&t*Oo!uBrQP7A>p zVK*cm+!n62c8qX;U~k~Gt*6l`!`hc`!-}4IUO*88*8mUd_8nlS;S1WQ@kyN=61n-4 z2xvHt5~C>q?Eue!V;?p*Yi8i3B&u%81BYK9UdkV5tIl-FWTWNDRI<6s_G)3>4%|Ug z1cR;GM)Hfu(1te_(|$6CYP@U-f-btJhFQ?GR7K0xL}$1KNNuO`|(Uo zuU8*L8Y~^!(+&q}dx{bEb!Vjm6+{lyQAa74->*KAHBv`(eSM#-r^zy789;pkE-33C z8zKW!BTUcfp4*3RqLU`&w?~rBYm=NvUp%f$(G}{=)v1B1KfPXGxKj z6(YA~heWc;7NKm?RZ@vEvS&7Bgvh=nqsW$(71^s~ulJnq)xF-|@AtmvAMW?w?-|c| z#^-sSGuFn=3>=hV>0E@kVs7s(W_!B*)!ZI?Xl^q!MUf4kTctp;o!b#$B=~%Y7ymtT zPX5*LsOz~`%^yXDDh_(3b?0YHf2`%J`~7D<0W2^98EpuDxpP?d^qp#PskNh7DoB`q>6O){zr5cGRqpcu{kVgGo1;8_-mOs?;_XqRIu#B{+24JZel~kw~q? zBgZuuA{A_kZU&h(SbqEvlk*vL-ULBd$u~IKiLs%vu?8a%PQLGi-S6ghRGjXuQs1XL zcWt9|S5&&h+i;=T#8yKgM0iDwV}=hFKToHAKk(HBO{sB$=I+Qn2L_a?{QTI*qXeI{ z?gt^bLnkSpSD#Ie`t>~DlSOVcaG86m?SxhgjbrPDwe#m6{IYa_*nL4 z^^pU&?Z8YH5hiGHk{jQbKWxFcxq_ZQ_}4D3?Xgt~-{x&sotVPk=}9apZB_l|A2}OS z)~U|+!-!@Y&; zWi;n$5aPIrWd|&~OYfXAEJQ9sE;m`Mi|eOIXWsd>Yl{xQtC-3ds#z*DGlPQIQio_pk~y>GXs zHD9E;)7O@I9>?mWrSNMBr=)>LN0dGq?LXUcYU8SE3VXmy#M!>yFnMIN#a1~?F!hqA zXM&p7F11E+up#Ms3Ofp=`QQ5YPc6q2fJ=hq9O}&y)JndzP-;yKUr6v$RJS_eW=K*c zQ`B<)Cm4#-WDo4sOsTJ%1Pe(=^J9^lY>^CkGv0ZVt@Db*SEKAM9Xc(so7Tfq$KxsH zd{AHEi%6khr;bwSLgFjO0Px*~tdU57kWXB4m?_mIsfFag3}xnS~NpD>%4Q8GZCy zfA~9M?*>UJq9O)fuRB&GU#c^0x;|W`(#j@qJ)!5}-os|YtS=icjhz8IZ9yOr2@b+3 zw#fQxQE(HE1%4`)>gfT_4W;4A3(WggUxi;Z%lY!e)hni`zHk3;Arp&Y-}TJWhDT7V zpN0TM;2GPke&e5d4CnIDOfz1Nva|GdWXj3Ibga;LIeFSR| zVVdXDXi`m10yE8o+DfJv=%j-0UD(Mix=pj)iFy7O{a+?^kA#rNnSi%`EO%rZRLFER zb=+Uc-m-BZd%GUDwRB%LO!e!~Yw|o^NvTfb^pd*vJM48w^@?xDt>`$FkhxnHa7Nwf ze^6 zsmQXiCE4C>N+UE2tZH7J-7*F(maflki5Gj{=suTM#`Da)z)MGtUl-`flWYV1BL+Vx zS1>>pEk;Dz8D9VB0Gro3y$#a3B6W@X!fo2l9Uk2jQVgpef2*lgd;p~#sQ;d=Y%X+c zS5v6c$35E&7B3p%y^?DScfNr=kTX^Gy4393 z_U6d*t2N`T<1S^(Cxe;5;VHE@YvpXZ^nyKxZd1?Qm`&C_VGzEu@69g;z?5`~4TB#m zwpMOAkS+bCW|)8deo;Yp7n4^;4aa?w&Yrwd|9$b)uB-#*vv!)@yqr_VuaxWcarPhm z_(rGXaZmT1Qa8Uk=7q2dNrvZ^)8&z_2jY^Z8Mj#8O+;shGMlJ!7NzyI4EI!7-tpqh z7o2-8Wov$5OX!4c#}Tt*^AAW1`hi-_ks5!rANW?Lj^|J*0ZyaaLfKpT=G*a)9r9dv81;Dh-8fDyzn{s9wB9W z*U@}=z6{O0ch2c8!wi`gHy=hV-js@va#p@5D#-6*xONdJd{NFayYm1=F8lT7XGgkT zaSSTxxzf|ZLEHWsG@}lT{B>cYGt;j@#uHneM9U|~?;+<#WO3nNG@$&)>M(0G{7Ypa2GTi&4R`x{~O#^#(2Gp^&Ihx&k{QHmrAQOY$MFTL3&f49F z#*1aF%P0pfywK_}>G?eL(A0v&YC%uviMpvT@v>6o*+tisFC+Iy>RTQ6bk>#*cuhBg z=v-&epN}vXGjP$VbwC2R;&E@IoJhgnrYQKuqefs#ULZrj4X2GW*zP>%ppYQQEB#As zmi?4=Q3Xd=?hz-I1ENwy$M1I6-_(9djy^sql6A7|J>1^T!ZvC**uS+dQta)%J_$5U z;;pednUOt=5or33FhAbSUBV_`zW%5T6ht@YcQ1l)L&PAMZrXBfw>W_{qZqD`r z^c{r=HpoB$Z|FOSabnG<_VvnE5TOsZ?59nl%>wFzg#&gMXkSp$uE5Fqcm#C);lH~6 zKEajK2;f8ocPDHUnaam5Q< zW5Sx(2P`$)Vqb3*7u+*jdokNDl>=N|ly!aNiS6%DD8~Pe1b1dn=JMPGrjpT4#$vnS z&}^7c`)&>qI7m`e0Z{Q)$AYsr!Qg|gAq>4D> zfFFe2gSbQFaMda(czcdv0q_m)$nSt}f5!!WjG;o@I`|%nyzSWklMX?Cf|}Xx0{Dcc zM!*{KoTNMZ^&gx*0>STx+p6h*{X3@tu{(I}Uh2@R&!du36~Ze_;B(d-bfB=+a6JPY z3*OJ)nXV#a;RY0ONhH|plPU=Ck%GPP-%%_nK{7bCNKtLab8wCx}t@V%*yth2ys0C0ZYLEtYDVxI6w2#`@n zaYt68T?v(+KxP4(%ab}}vMd}Pu3l~3n~ghk{9v3IC{<BR9QCQ`Ykatw<7*hhpjUu>V<{es|S?(|v40_CaxLPeaj1YXSoAqRSd zJHw)nQG%4k_J;11FkZ15-q}_uA}mfokiz{D?z;^mnxI5Z{+D+zB+g9vf+_INz!r!n zbR8vs6NCFy$42xPVZw|(mD9DU=ZlZKp{)N@4E=P;J`hok!Egv7YUZ{yNFQOX~Kw= zp(6uIvI`}cPi#G+z3pOf7HI4WjKvK|j6WJIwT1ScjSC{~bFEgai_@c>8ceUJ`&K7Y z>94~Y1}*~KSeFaEQly|wwgrgQ?%ZB5%W7+mHfvs^2q|3Ps&G(r*&z?kMYR4BDBc}f zg@(_Ppb%ppwf<_dT$^40yh!tStYeyl8ujGObeZp25Xf520DSVd6|DNUs7Oe5gQcz5 zmQf~GM9}tZ>)@|DR=+~@fYI!cl%C;Q-~~#lkNGwlDmTn2yg@Z&+3q!Xte$6}Nf26M zLOxGw+xt$sE5TB*URba`n(|KT1mhLh{egWjUw{-W$F^zV1FHr<)K(>CO680sa+f%O!2vKMFmFg@9L{Ct`r8KxwkL2^gg7}X`?BGf1D3nNy z5KgTLTzWC~0B<#O;5H1yn=CB+W@|)_WJIXu_tIea7Wlc3FU83ZteG1L+aPTSsQy>P zsK8Nz1+h+rch&%%DIt3y1=GDf0s9!vHLn88F_8$K??v<#c6i$e-}slRG*qNu)f-g` zLk4C9a5k=>jR@p#Lk5RDH{|(;6_=ZEkVGg72+n{HGo)D&^c5GP@sYRb6#sWf8S@oH z;+k=@loPXaJr~`!g_Fx36z2ibmw6716@x)0O=SDic)7BiP2nt<=(RqCeM+Ai^2i+(5XCpL(&S^E&)n}H-kZX z2>#;q4)(;Y0m8kwBZ&T`6K{GhBUthF)rN`$a;hOW$i-~G_;=cnk2(I^vV;r=i!9WNgR9~} z>G_AT#)xgx_kopTOWZ z;EIuY1Y}~zc%Fe78R)gtQm#{54BuK`zPgA3mBhD~}H&sJ7xIACHC_Gp+rD{Q{u z7@Zg@-kpEzp?UA^O}JXa%L9)xy9IGKT4Z)tB(&$=05zU7{R$9$6H%Te_<(BS!-L#- zpH{@)sD4|x^Bm|`OyBp4dw&LK16Z~IAA_jS-w|8X>L4hfDQB(a8|}H$n^$;1O$KLq z>nbWdk4O$kOM9-5jy66Z1urazb|LPUg{J@5N8u}Yn26yAbU;6${kD03;2NaW`e9(! zx{u-oB-7;?f?gucnPKfBH=r-}=Yut3>4Ycpd0K@DxUXsj%15hkX3w;nxU42*dlXue zpqt6=;$e%&OnKS8^LY9qzwC#Mi4+V2eCgy*P)nGAw`0j==t2NhM1>!Gt?l!_s0ve& z3;hbDJRyil8w|f=i+d9k>8fy)1iu&%6WQtOG%?|))8H5L-z9)-p?G_vig;<4b@@jT z#0oZ5|I8=2IRW1Y@As#@HSCE5h6^bJ_UFz3Sc(*+BdB+J{G&0lHwtyUB#@^EPK0iM zj}rfipZvM9)?9?d$alL5N(|3!Rd7VexAErZzy=gtA=^>K$y?C9fCYmlC}fIMB;soL zV)7@G-i5+%(=-TtMq&Bz(jV}ZFdUT}JWcl!oNjfP@)k&e6Vj9iD?%(CS`t4ka;Ai7 z8Qn3EjGZWx(*P{-GJK4GALUBWTr30@k^G7$CtH)H)<2jd{^jJ)()qoUPN!56%#J=l z7{@2VII;=j_$O}&MB*6>OND1rwuj;fP=)l#du#YsQXC_0`{g%ek~p*w-mU3?n9LU$Y% zkv3f30&^X%3tJ%enRNY7Xn##ut%GnHq2&Y4!-H(MVw2p4(}LVmDEImGzhN5)9v6P*36>l2w0WLjKJW(38cJ9P=ZKvSX#s)5V}v1vD1x} z-Z6qlvvA8eQb26=?-&sUC{b{`a9i)tftmXRbRtsX3kvb$NCOIKYjag&4zskUXKQ68 zZvd_dg$47xU5+1F6l{l?{JB2e4|?Oq{MaSwZQMi(srIinIrtY^|M z5_?`WPxhA7Msz8FHN*p^K$=>GpF;K%2@n0!$YZ0;Pw9WY>tbz^aTb@tuQ8rlJz5mo zi8uw=C@2`WKD@q*m)OB!$WzI&^cd6qGY54{WP6q#n)H+wOnH{6Z0Htb)n@k@(uYw# zFXF2ncXFOpvx;4oHjZfP-1@?PUF3lnWAn1|rV07$M}aY^P&*_?wqKm?0sh zZRzE&kEctRK5!U^`OBK0p8-1L%3S(iQ=K-J=l&<3LUFNj8FZX|Zptt?aA3byWAI}x?)$zAEPkbh#8R?uOgBzE;X zn6XCYMVbA==mP2I0`+C zc=cHC+Em!6((R4XFHOO56C+vsVV(vtDWH4QOEh@nT=SM-jS%KS1m)zIn|MVIvvs%B zZG%?TyQ`hYTvNS=+JB9FA#-1AcsDQ;lf$bFY^B5iR4VJ^@6NRFc5jWi20YK;7>wFF zJG(V|=zwZXUVipd=EPT7FTKMm`^iQ4Sf|!Wm6lJQk#bsd&wg?LzE9lc`&NINBXUk` zrHr4VMGes5Esg3o0@pKr`KLnhV(1=O)R`_Y?PT_tr_@rp7+g`9S}svqz)AAw{O6t@ z`CuE2BdiPiA*f=df^lWz{w5KFtHaE1zpMSmfvItE?Mv_50?gm|>m~kc@fwt_0&(${9!EmFs{-swtN{b`9Sh3@M_WsD2E5y*;_{ec{e+=b=7l zX5m3Dig$&t)TG$`-Yr|2ETZF32I+4B~r#uPNiRMvZo`vS>F{**uL&o z0fYRpIu?*3ThI$GHHw!(R^$)3OgcT|dqvduljY6$nJq}r{h5VX3#+Wn3XD-Ff7@!0 z57YkX&XUp|557LZq&n&BN4{X5_9{rf2W)ck#@kh_G6C~kB3O8wUBwb<_cIg>>ACki zc7b)1?Uyb_p8S1Y>YZuox#EO}*Bt<*Q9j#)n<<@G?}qTuLPlj&O~uBw<4Pi+NnE#; zwoZE=S%hCoW}n6D?(+^pC$Cc2ckDsYMdPJy>NH_~1a@cwM0KjSucqW=c5J6#n{#gQFRlX}F$wJw=4f&Bhw(R8LC+?##GQFXGcqH4GMrx6Yp*_%nH}W) z_Wf;x&(QLS!#6Jjy3}M8il1Q5!wA-psu9n%Y^wg-fuCzbJs(7iT^Ct;^9!SV*7?j6 z?_AeV=+9i!X0@43JOUVp6e3CsFm_+2)Q*mVwH}R($7|mvSMGMmPOe&1kxGeVjPJd4 zIOI%kWj1|g8LM#bddjm|`7mPp8;Rc?K~gmaPl(bT2A-N=!*<2s7BXUTBm4FijQ?1& zC>y*t`#w6;vTGvU$Xj$X{l)dAW?#U$q$td31kds_4obPU+6XH~8IWem^?48fY)>4V zo`@&AGCx+oby%FD#||7ejmIh2{RD6Vw#oeUB(|NAL=e>^M{zp028{SM z%^%A@?OVPFwfr|b4y^ty0<5`27&Q*ooL@GEhDhjpPCE0iP$SST` z0NeFP0f?lbo&MmaCB(5wqTS-Hy6#b@mQAM%@_WB9CImm8@Os6dNHG+BVL@9$nRU~j z1-PUa{FNTy^%pY%9J0~&2mTGiZ+Ad;vZuU2|2~(^!bP3-g4^|dEU(&+zWOof0(lGNhDcro&GFRArPjyJt3Y$?$GJd=62OoWm@Yus ze@_oV54@N!LOj7u;om6@#|q2XeSnINgx+LRf@8@Mab*?$J4=Ph>e)`|sn>pL9V?7^ z;3gI5W<$HO;5M_+4ct;gJ3KC$dqRL}wPWRmYyAcl`&j1VSfw}T=gmJrwTBeuv%Jp` zOxKvJG4DC)RI(K}*HEh8Fk59p(|5Jz8AD`c@oLvM3YP=a9kQ(7ZddK^&|_r=cp8F# zrB)8;Ox_dV4e7_|HGkQ?+KV9;=3x&u_IBoHrkmrL_+2mntZNe$0V~;-UH%I;(fWkJ zdm&2DMPXS#kHXbNCP?` z`k8W_AkG;1|Nd6U(yDfC)sibO|FdI1`)B?D25+UCj{RQ5`qyga$E=@~@S)IW2<}Pw zBEin5YvEJ9yMfCXo+1x}oZvp&;En4anBVkMYLxT@+m_zz)LL>mvAz;^PnSVM=jtL; z32UbQhtK@Jw14bFj2*I6>Y8zUh#gp`t|! zHuvqs4ot4()5@K{z$yPerL!?Hj|@V$GD0Mnzmz~HryX=cMvTM>zJAUbx1MG=(DUic zE8<99lVu6IM7NNR67-C0mU@v?=B@p+`h~ZFWeJDDmL?{Cms6*C7tVi#6iRe;68p2R zY{s3()ps>3oygU}48kCRy?Bp$If}qQ78ruJg9{#)tJ)29lAbB~TA^Vxt>-SiQN+-D zr!1QCwjzm0q^_->wnM#qa$`mN*WR^dAL@19)RQ1F4k5=hh(rVyMjAJuH*U|3uzt>0 zzgXWg^7cN@fw7eamg5&V$&w>|oUZ)hdeIP8G`S1qCj$iyDd!dl6J|s3199#(HdSGo zcCC%%ZN-*-h7HEJut={^C!;SdRbS}Vi-EOYI`rubj!rpwj;!w5CGJ~XCO25cFP|0b z2ca1q*e?OGR<$?<5nj_!C#pS<6RR&b)3XO3e4ipS(j4IrI0sv6WpXIR@(?Zr0(irc zU_6C~@_9@tT@3I%1eOT_^@OszQP>m9YdgEO^7t52#p9WV>(o4n%ct|ENp;g>AIb`R zCYyivLyx(0V!f<4fde=IC#;r7CL7d1U|JruK-M+Vuxf}v{$puv(hIAl zpf1|%h}$+u7;-q23{(ofJYO+gI%3o`L;H_PbVdC;qu8g|)TYcoYPv#+8|0Om>e zwY;pJN+EJ%(4SWaBb9mfz5D$%_KDQ{VM*j5_W-lY0S4)3B92v9E{VP`m}3#;U0}=0 zcWsN!sH1uPBqZ~U+L2a@4(bpWhjO-_g`f;NV2BX!3C+R&W@Yf&n)jiED z*PL!>j?L$v&~M1-{uz1m_gFo<%~$c)yHIq#1eDWD2+Caf7s^q#bS}H6y15!07A70? zTtQ9dc)GS`PB?>m+GyDHjHU^-+PkrtzQ!&$5a>Gqx(}la^$KOhOp90C9mu2<+sjgt z%L7c3f?gh+98f;Fc{fv+dF8}rP*?{Qpa@5iT=2_ZykL${<%Dc{tMHB~;{GQ=N_plB za{Ir9-Um~&XHqG5(&?9=8ii@bwbzE4qn2$Y-pAi1)MZWPUWM=j!dyBp%k=g7mPM=K z0$L7}-{~Ce!4E}*zqO4tYb|u>eBx^PyB45go$n%8F{` zAC>$0CL;sO`J%Pw0_R2Gx2rU;E8hqKVzdK(8QKvfDE~h~ZZ5iq-TODSOiD@?MYR`G z0dIUrG@XgCZ7i&>+R6%%aP@|*CqNuCtUgg@-$fR`UTT|tspgw;=;D&JSlY|gk3DJy zm^s;}0c3+4-?~KFC0;c2Z^gIFp3{-n_79~%HPPeMos1}f5c32=;Bdg)6Q9MvGTU!! zn-ybsPQH11+MoMQ^mM!8!rWe|Z=a5XFp7f`P*WIknec;Ronm@c+7nq%xl%cOzgv2X zvZ&OWn8-xCMT(VN@O1#_H=YOIs?QGZgz}%QqLq^uS<3YrE0eJ`a$H z&r6T3HETsd8+d4Ev~c(gVcaKn^Z=(+wO}Mke2a*vYv?uM$ z-x*72GjcmrU80MJqCAG++Bw?z})$MLRX|%Hg}MzrG-V{n32PE`?m7!`a9BAu)Pm`p*#%?Rw4b&XCx{|cSYwey^V&Vd917y6nDfE-f;FbyO2V7X> zUWKKt{jTE`H16+SS*Dlse}0wkl8iRX;@YfEJPd>wA1=l2-9e>Lg5Ccc&w5=>3CC{n1X6az_1C7CC6|~_owt;?(&ljFt_kQy$|?TE2AJ|4 zDpj1IQgQ3fh;wO0E_d>V8^1R#W6V7D-2@m{VyFQJ70PrL8$u5XpQatUIV^WYm%53n zLwzq~+OZouy##qeD7#7epI!nG5s3yO>)ZoiInZUO1{6xyPo!RNP|C~qBAHshp0%Ih zXXFn$;#f<0%{l|54P+h1;auhi2w-h??{Ru*&RBsvNSG` z=%N*We)kZ|paOVJTN8Iq#9Ta|fnZqV>jwql$u=>uk8^dedDKBJsUE!c*KRoMa97{{ zvu+d{dr8y_B$y8R^3onB|Gx#+u&L2>b6(GO0sa8(wc(i#twLZ9y61Q@pyL^E=b>OY zrY6b17U?&vQU2(*jX4NJ#_b0*JM0Q&R(yhY-x>g@V5|)u0G1*6)V%~@G`zG!7?tag zO*~C;NFoN?`n@dGvRJfq6>WEMV?R1EaZ601q??KR3>{IBgvXo^6#7sCPL zc+b@*Vr&N7wX>f9uLGiC)SuBEDNJE2yyK4|n1Bb{nITt@(JDo%SIEIA-026M%xo%P^s>(e7ygvfjW9Y2)#lunB=~ z=XQ+RL3?`-E*oG*k1YcScEB1)iZxSg`QDq33ZySn__dlFxFV679I#@6`1K@{EDMei=>96jXrqKlGx@U> zCASN~$?zx)Ei!@0F5={9`wN^X**g&Tk>H~sY?%b-dVn$2G$Q3QA@JslWLTSo*A{$$ zv+O{I_H!($>T{W`nBfD%%{&<^9}e-4FVI-;`rk)3JXEWHf%S}45TJh1U&QVvqB$(o^;w@L0LhYbGs01?nYrqqeD#*%3^W_OEaYWG(VuG zPB$Dr%tj3TnCGK-6Tl&ihjOSF#rGA;(AAKXQCxpwz}9PIojdKEU`vw=-M+k?Z-BIf)4_N>Mkb(H7A+?^V5N@_lo zI0gCT+XZiC7;D+;wa`)*IGc`lIBG+R3J>nkyWWI&Cx*8DMve{Caa%Qgy-ZxWLiAoLB2oiwtI zx+%7Bao4b;Sd9DQxrn6?AO>~=o)DP8RUT>ON`vW^yoJ&hgyee$j$X!<)$RoVnQ$ex z&_L52zkK95a%OK+{o1egXYO8%2U73OQZ4rmIvfyFV>u0s)-`|R#BZ$kuB{&5g_0NlyYvb3+^OtuejAfHg^z+rr~#6o(8PV+`vrn3$2~xgWq(DO13kMt z-#`2T=`pOe3xocM_`Je)NC+ z!30jb{S8&4Os6{DO_6T8x}UlV zR0sC5t}SH`z(&{l`z!WHQ*Hm4uJh-6SL86qmp`h?ur>$IGJG}_9;#oTt!wheViH0(XJP_Zx*d@iIO1m=wQ?cSQRNv zVF;;x=YHtF4^H%?`@G=xe2&^hy1W>eIW`7(y-`!3>0P!rd&e(hT*D!mL-3VryCIc3 zh`vSYN;3XuwW1R)BcBHyvi^M5I&eokx*D&nPF~#aQ*Iww#+_%U8835UWmsY;PXi?&z=*$rO8wm=m}*>UGbKz#AaV=LjOJ`rnSK|L z)WtlvcFj7_V;$sT8jLvCUvfm_Pg+-P%yRx)muid@eu0jx$o8S!b|-A7_t3SQ`q+wo z?r{GA7Kh{nLLr#xIAed^8fqy_Mdrxjq|Ec3LC4nS#;Ij5 zcsTRYlI;il{-SE+CK^m!n~S||7v*lY_jUCK>t4T`fo_}!toTt4`U9zzBWljx`)K4{(FXs@s!o+n#Xph!kqK!>I zx^}4pVy9QgeKYYy^mb}u(G69NH+S*9K%D_*yIvkB2DC8{=ch%~Ny(_&b3DWdTcY*x zJTDM*G<_wT^`+K}cA zm>Wc40+F$%G)-P9?wAc3#fWQU{QVlf z+x>c5F4?jlM3}A^a3(2}lsgvj@1o%c#CewvsJh%f=y1E#Lwk6V<5F>O=&qxxCcBz~ zv;c+5)qvPzTWou}7u@ECh;(z`|}oaClAFrVte1K{SL&(6) zW&R+Ci3@i0D{^A7;nhnAg9_P*kDX2h+gqX+Co|v~m_DJmQZFBRK;upy`;2MkRqgKg zzsIbxKp&ybKrRcv(n>DlljENCxD&HKD8da6WNU71M;1QbmuT0i| zeill!{L~!iE2TPkco0SBQ!=!~p(oo`0V-~cyoq>{1ahT43fm5aTp*HKJdgT3>@*53 zgeW9|9P!sxBi4>mU%-WwsQ7}7;tUX>0wv?OFL)z76jnrZV+c+#ij575qj8;!fMazy zq>rI?Ngo)k>YbmT?_36h0!5<>kqaA@Fl$#88Doj{6Ag#KPchxd@xD2aJkH1$IlM}K z;2f=X6l8=i1Hr%+b`Nv$q>-@m?y)mf*p1WIbC6@nxz>!@x`v?9#>h`WkW2Q;OX^$9a9=O^CWm>58qAf5*~ag3_9nwGus`iX+% zvOe*9salA72`pbn%C@#~2|) z8K(#!XN(5~1w~cC|L*WK0=KG%BoBfoX{j;`7l7&lKc@#KsE-|`ZsZB%Ap()3q=BIH zD`7b?JR~T|PcX=|7g!kBk{aXt-$nx|u-{-`0hk>0?aHJNKDeQwtZ^JAB32vDo-PgR zRzN`G9F-~A2l;F$?e=ZBn+4`FkIElSN4q@d;a&d}yk|L7*8I2`aAi1$(*Q^K0lZep zPfgj62CtxM$C3fB1aumbt?=9l z;XQx?hY*aJ%OLpqj7cK$(*et{#NqwmTC(f7EzCz%@531mA)GMI4Xam;p2k5AMDf=e z7pOh%>fYZr-{kSz(?etnEp%H24AB&Fg`DMpMy3~Ta{)>P2V@6WCct5`sQ4g;Zcus8=ouh5BG9h4UE{DZ5ldhHnb@)H`xzT#PTC9*S?^6`er=n@X>ldTFXqNH_@u+6`axkeV!t zDeR~)y@%){lvPjmVVLyNWg1I7U)rJ@X&Lyvd~)N@TW?1Wk@o`+eP{0hIs~ z@hR2@@THjgnbQw{bhzftUwX*~g&Q2*4ZrqGMq)~_$Q^EW5@Fp>$0>dV2f$N;0L#ew zM|zuTLG><~dETh9b>&a?{j2myi{#dD0>jn)H{O~`UyH;+?{i(*x4I}x5p0QBvw3rv zu|&XHanj6XylT*4b2-&n4AhDZMpPHtIB4qYm=$<11~m!_H0s+(M})m7DFF=ZGEU*M zJ;_LY*&(*s9oP*!B#@&w3uZs~XnJ`@(V3@G@EIyCquR#?aoaG987!P?3l>mYV3AYx zIF3h^a?E-IRq<107VQ~!6dIkqwNS&`yG&l%*P_M+4kfyzrG2u_l#)5QoJdL{F0lrv zp=!8ItG8W;@{Mz+V;vGwLNOp1x8-Ee#I0c4QwzLKfB>*1%{9k&hz~|5lU%t?UJb-r z%E_{tLDY7cH4i1@0>^jW zX*K&oVd)Z}jZmG+2g8zDmk7%iyID3Fjw2{+Jm}+$ziew zj4XuQ5#}5ensGjq#%yTyvA?8*>l1KdPyAUZ+^&1RYh7Pd2NDD{QUt<$UjB@z+!HNR z_u(^e9Rkv1e3`$g<&efB8_TPU3>69vaAv^iEkJQ#j+;oKu>tQ}0XpxPHcxKkj7TtEseJEF=a zg1~&h#=CLjtxnv8@^=Th!!4jAZAFWJW<#TsZy#glU6NY|Ug`GDdTz9yWdd(#!%Fk$ zMefVm1nuGSW=_fRI`}n>lB*EqePnUb`fxY|j|^!ze{3KzSMk9wT4l=AyX$OF31VI& zBq;VLtJwQFZ!d^~fP(ErU^LozUuEq_t$OgJ$q(_2eQ%Dt$wteA&Mo& zsOqV>GUz!lf=G0Pi-X%zjv4|6l+tYkUvvpr3c0hx2dDu}5mDkHiWyWJhHrfN80Pa@?kBry-!n1yr0eErO(!EMS-R+JxJn z)PNxer3ve)hl3+BYDt@R;Z{npGl)lw9bQeb=zo!IJo@Am^H6J9e=+HthJV(9&icn5 zjQ1yKD0qHX(TEBFnmqzB1&B$;6tyQ_TUc^wX{Xibp)UR&BUc2L1e*;N2G#ZCs!@`1 ze@WZBuw6q*1W@LX)cJN%!x;q5b}n*Tq$tU2wOcBP1xiGMQ31+b)K1wCZ!)Ro1$wO| z^_~5A%fnhKO!H>yes`B8KRK+|Ga4Y>;F?Ii5IAV#$y>B7YP5G#l1(f>%LS4(HG2p9 zAl!2;7=QrP^miYos>rv&=34}e3m>(G&=Wv>oU`X)%oKiB=G2302#%ott0DsA);v}v z>DMzX|5W!bdwRr}51jkbGIw@JsW~*Fs;F~AM?zlM=e zT=eUBX!^rHY=aMztH%O+M}F44tGKBg()Yw;rBf~Ld&=rfjI(!t`qUWJwJ>(RO@Hoe zP|qo-uKX@(@6*ygbk$_nC4t7oR_GV$LEO@rP`mHd6p$atiHfs;FVKl8s{nH*isG;d zch`Sm6SRKA)*kTA`^Qk8F%j)&Pn-RIvjKe06{}(Yhhi&+8yV`-wkuXCEmJ{vy#O63 zU~ARiIFCDQWQiJFl5J6A{M>TE##hNufFBo)o7$uYHm|A{y42Uit?5kT3(^?xbm@%W+rRDNJq!?4)6XIUe)oS*KYnAWuPuh z5>#&j4~P)sPsNlu{Z5457O75ZQ1Oz5WQe z$tNTSZ+_&2EYp^zkKED|Q2VZUU_7bl+N~eqc03GmQGdp6eVpAox%{nW@)HjrC2|iQ zU3l2pdKqosxG|%YgxcMm4U%@!kPp5W7ov(xl}1gU=$Cxz6}M~apqt-S z6XA)fAbO1D;KBMFbGY@tMJ9Eng@St9L-+5*zb|6@hm}NNnP0(Bj%t=O4%u zEDbKI<|=-R5j0JLT&0OoDdu<}tAKvd7P|aL)M0oI$jku1JbIy*VZ4JlAaq{?>O8-% z5*ydjRW<0-EPsON$)-~lL5lf*z%-AIyvo(9TSB3=mbR{as<;S;YbXF)OH>Qo)6`r| zYy))QwXgNpL?CV*IE5QB#g=~0uR}|flwE~x{zK7a6utguz|vOK0`xMgubq`Lq7db@ zOdl9wVr$)FKyjb{{nV?MgQ?GKkHlT{nU#T5{{M$4R46Gcsr0Y#lRuvhh8NAp zmAjeG>u74)^2gP^6ldM}aD5p)>< z14SJFh@qlql#AjqS|$Cq9Y;O%wp(JO2qk(PtcmGA^U+Yae=85jQb5A z|3*5!swv;_t4Ger3%Pc?Xx6&$J`m$9s>l$%TY8Qu`!a|vqOa@{;5M%yzNf`IH701h z6q&!~>29Obtx=-mGrm|Irs|(vcaBQ$#~J%0=H;v7uS+~^xuBWDhyu-yDbCo;TC6*a z$!U>FS|!nS50S1iM`TM6+PtL1qx&&?B?Ten_aRrLQGJuk&ZbM}Eu`pgQKcY1LVh@M za9{K*e?WK*s43|l?wKF(VT~VTlskA#@5Hfpdu}pp^4)5}2CmvA6#!)yg`fh=^XQEO z)}v-B8573_uD*J(A|JI^$LX@~?JfEK0mXaneVemCTBhwKc+~~s!n1unjO$=@0?8wB zTg?rL{$AaSr4SWDGj7JfzM7^;AME_xFq@%i=dUe@c~n8 z07ENr+t36Upf3c{q$$gj__0EeUXrkN$#EzIjG5z4QAZrSgmWk|*@LYX86 zXDsLLHY5kE9)7L!v$(aP%^xez>T{Hb?gj|>I2Vb0Ty*NhW(XPplTry2QW+NOt-btC z10OkwF8p$tXn=4R<_+E&^#J!ap67@bip%mGDwC~@pEWsRBo1uEBqI1XdvSC$`J<|seZ zNGB9nRgnO;1D3n)&Q5-EBy9Z{#H6th2zDG%IGTs#BHB=|M zm=FYVor{AFEL$o!3r?8<<%#A4C>(3Cu1jiWn03o?dHUorJ!K%tu*+k$fPpbDl5P?$ z*WQIxj`!M*8Sg#yyUxCngywug!&49cUR%HGx_Xg9YR^bQ?UMwnK_8M*0Kk>YYDGe_R_vtV4dnztolaxrjOvei(&jRRRDud|59YdFP34j>r zq}+UFfiMk6Et^pj*3ei=>8_eE2jVi`0F*`3d$JJsc1fa@h^3Dv_Bs$di@<(0J%Y?& za3>zs`WuEIPyh z6zvjq@27G-SxEvwO#MZG-~*gPTDq47FCDge63wuDy%-Zmlat*1qe!rdg6E^g8Kc0b z?%Z6&qs>Kn-k7h&dglNmvEkz33ifmSQ~@Dx-F%`KYb?|gu5;`Qner*^Bmw_(azH6~ zVg@`$aCk1|5Qd#7paU!a(xWC_%6NP33_GZReET%o*W0>ra zw-+2~l9f6?j4)$*zsFQ?iy z2hhP)81A(W`DHFi70UI9oubHtDfRLQ;#R@*4__V3sxLS?VCI9=xSPd?z8xN{ha%pilA45Ir_DuH%Y3T?Ks5FNdL*5hlr z)X>#MSJoPO_qD{ehuJUvR~HoL^%;LjIgDRtc;e@*+IK70+O|Xr>}os&Rt#VQO?Bys z*izUvt!r)_#LXSV?L1(vMpC|?+?5EQ2_gB0fHoBo&Hfb35sRu{P1xSeQ{QKuSo`>* zo3^a&hgR)70P6}NXQ_g*pu!&`GZA0A+vN!er$EO63AVAJL>k} z<-}uyD9QTsGd+yCzkV+-cRU_%SGdsoROi9zG1`&Wf!@WSvutmGFNNjwG$h7K-7%%P ze;0oFI37uXrMsp7W&!hw86bWn2(@G{4Pw1i;9yl4x1IoKJHzyxa^9>iT=f4MwK8tN}p z?mvCcjg9ZVeL)6=4!_Xmw$AjYX2H*pR0fH!8 z5dnVx`wv|e0WYJ@ynIk;FBbEfWlV3elC=E5GxAFx`jng7DH;;8?>(#^f68YgPIbBb zWEJxtVc4gDZh)HRBrCj#Bm~J)h3W&68`Py@uD)NdnvDBs1T7+}3qoggK_xSK%zj7C zXUgzdn#})B6vAh~c#?>ap=klE{CLO1S;?mQr=0}tdICGE)1I|dYuht(e=r5sX2tr< z@?i~UzMrX=gaFeRS@ASh?Q0_8!y{6OAU0}TO#K~&t1r@nJ?Z+m25?-k{eK&9xRLvM zFDpApM>l{>+}fz*AAjBozGn)=uL^H13*V6VAIiQvkm~RM|D}?&q*6vkG-YKAxs?>j zC`B3B*(1A*+aQsQG9$Nz$lfbAvXY&>u8{1NmGwL4b-CC3^XdKl{=Vmry6!!%v!3hm ze4g`qp6ra7`tQ+}W77>fsF!&X04kJed3j?uujZtnadAIXXl7_rTYJt{M*wkeiC`kof)tXy*vR2gaql_}ZUIII7o5MJFHezdL zD=Xo05;yyhBa^moB%9=162LvYfG_;fC@x;rDelIR<04z*%Cb32U{gol{cGb0OZvp7 zv5?yirB7o6p7}(?M!xoFv0z8}Bm?pZ;@BM2*TZfp_j9v18Wh9aLOBv?`ydF{F9Y2~ z0v|w74(?eJ%Nkpv1@)3-lmDeI`>8 zYBfXja1+|n`o#eEG{8W24W>6Wm!tH$+#_L&C!f7HYO|ngy&0u3`=SLE_T61>R;i`@ zC$AQsoP{(*O0Me_WdQc(6CO~&CSMk+R%~cy99D$UFga^98xX98t~UGt2DICN8gu4Z zIVF9dO&**S&Xc$p9$_Ka?I>W{^>uXzfsOaEoUL+FTfKFvKgxB)yyN~)WZ zKs6&)#NA=oA`}E@wb4RU{dmW-5fLn$^cQc>i62whc#aYf=;>_$Wa%TjW)R@-Nw&Ot zht_9;hgi1DurLtBo_~vcE|~fexJYlj_`#2CZ`bzuVNGl?+?quThS_8{_Jip6{RCI_ zoxiL2y9}R99Xuz@xh1;&+zpfjoJ4j%RM7(NRf5m3K{X1_78Ebf3xin*q=0S(%xDf795t)H6& zLDlg?f1Sob_UAQr5o#Nhw!Hkh0g*j0!vh<}F}RFw(B5C9gD8Ef9anA5kyd_^HFf&3P$o91D(F?Y@Db8*T`w;Pl2n;xB6f7=5+Zg=>fn7UJq~V=Rv{@9Cso) z%)I3@mr-f5!Y*8&q|WcKFou+>lHkX1zKS{YP8_bSI5`d88Tdj6TCiLAsWAd(+}p+j z3>a;K5h3de_#h*T zrM_LWFuTqjp>Paz(V)QIJha%6BHzRB^WhoPi)qo-nn+rCJCH_Ll(s>7Oagkm;IJLZ zai5@@7=dee$oAg_BPOE5fC8(>RsV|_4h*k2oxKe!=oRL80we=a(5=o?Y!|Hi$+5ss z340I?5o$y558wPW_vYM;+rbh)Y#pJh3F8EwZ%-}y7=n4vQ#9V6X-Q2p zXP>(?F>PA7aRWm2ylev@I>`8`Vva{In9Ruk~ari94J`+ru>3 z1?#}{8*>-gw;@#nrxIri+TGz#x5)`Rr9FwMA&Dc1h~JG>ZC+qTVhVMnSb~i6R+y)( z56!w#YC?I_ssp@z_JGvU+xu2kiP00tAs2SvKqM}H3TYYq^+uz=0qx!|f|P)esAGs7 zfcw=$HxZm>T&ZTPF0fXg9!#v$trVTRpDO>h>Zho zpX@Qu-^;-Q`fUn=psw`}@C|rq#JwOWi!}up%vGHaU3;%FhGma!usG!TGYV&-_T(i$Td2 zLm_m;jPRYwaSZ!597B^+yvRuXV$+4GqfoZT$=Cg^H>+#*DD7+K82)pR@oyX=TsCe0 zaN+S?D+A@R1upRDu`XZ;@zyhnRE*518AOn(^3#(D-WM=GEMQh3CPyrP6SP-A(6Eyu zE5#`-OXrpdV)8+8K&9b7ptBh1X>NS}kk_LKiK(#DG7Z-rcSj+OQkJJnZ*D>=;B=Ft z{*+dR6@sH~u}$>$kW6gBp6f2SC7bh2Am_K10pthg*+|wKT7p(9N(F9bo!p^bES(F; zMas&z>_;S;S6-P3K+1!S``qp4n(Uz7Dq#1gRC+s<*BCNC@g7dp>ShM%VhDC={oU(L zky#*OX2_(4;Phv^IB#g#C&|CPC3nWe`5x4ID`Te;Z?MQ!2(qlauB?v*H4c0L;)5UqBcUQ+`IU8MnBh}6 z9yoBsDpSBh{|3qeZD{6C_#5(1tA=2dhj;CKXBW{ll*h98n#I7_7=$Q^W)oqwj_vN~ zxy=z%{J{b)6@q;Z7ajnUhVzB7CxB)Is{4PiY@mKp=%TS^|1=@4K)%PHwTX`#Fe-im zDsW`$oe0&9>4N9npuI!bS`Fxz7iYM;DY`5&r9Qb{cErH1LBZ+Fj{JPTwsP2BvYZpm zQJ6zS)53~c)CAU7J!)cUPs@?ta_2Q7^m3pns{PXrmfnfJw{H)X1I=@Jw8iP1g3i;g zGuyaFD1b;imb}_t{WHs-9yM38f42_MK;dOI`lpxoFP+hHPP!mHlz;2b#)Q+L!D*DP z7>;jrsDcQ84)i3TOnIR(?9KI1KKu`s_2v^W4uBT~vzf7`e0jYGr;t}x5lz1`w>w4! z5?o*BD+7MU9tCCxtc)C8D&;kBw>c>I{*PeKfmkNJF7m#UWLy19iG%dIG{i>`x|mo; zG_@brN70y3rJdH8+nLfPW08vuMz2-D+F-Av&Euq_oY6KMWhW`yBG9pywx-HCoohEz zKU;pq1pathC)a7UBet{BXu?C{pT7XdBS?6iomRdBZc|$)!vwwou!?pnuu%xc;OT{_ ztFlKV(7G{z!sOBOle1Z~H-k+kV%Y+7jq9gD{MZ`p->sT3bvL3Zt<<)ZnQNX7v`bKd z^u&li^}UKKyiL26mj*tE-SO9UpDK>Xb$|}!>VCN;BY*3CRVODX zGYdo-Hd5468+xFlaU`dF^{X|Y|0@ath7)1)hFBA0ijF$gHbOO`pHG~1IbknoMfNx3 z48l8kJG9C5nRR~|R<=9}S1sZ$_VbzACeaY8PHvt=fMVAy2Iw|2ITrIB__8paU zf-1VCF;%bpnx8AbF@M4_fGUy<@*Tkt0j?HWb~KMVzCUB+z|LLzE8f%+jo>HTR$yrn z-0^GMpQ$PzGIruXn?SOgN$<~={oTd^y9;vy7$;2S%y;qBhkV>A`dHH8KtX!{>)txh zu%8l2vS(-1M6pch)&fC-eV^I7YzWo%Q>$ZxAY}C;n>{M}IAs8kuh56F(-rxfrQ zO1)AFWLISG$ds|!^LDhhL=XRVREvJst=>A-wb?7<@G~wDssy|(MfODgTNW@!+Gd^? zq{c~54=B91J`d_&jDspV&9M4FHHG|{i0jJ1i7~opAEHlDX@j3JGXInMFXE~X$$b=L zj0RmDYXCAsuyvu}UTIL+=uqz#e#AayUza@_bNd2X0oH*xQ;e%OIaO+Iz}!IT5565| zLdhd-xNLM~@|ig&1O#icbgb*u$J(RGknm5Sjzj8l{v8V;2lU!xe)L-haM#b;K71}n zd!goHVCeZQa`Hs`cr%fbhU zY89#)3<1~;%CJ5w&~n)8OM)6{+}1DnCQ-(bBHy07p5+5>?>+@G9T<5^=C@e3xlTIV zWrJ<-LU$T*=O38#Z)Br$L3Ih%mwje^j$1A1y9ZIz10TRLl5?BT@)WTJ0Xv}{b<%;l zoU49r!m_^>2tjH{1WHPb0BX)I@SQ7hy-N$9Mreua(D*8X`HK(yBF2*p#sQ9l?GcM@ zh8(a)I@OIoXj+?%^L$c8pZ^@Vl0l{jZ(84p_<`?(Km!s}kRYMuilI7p>DC3SMMwvqKMP7{YZ+CND z2pY!%hYcM6KO(^f3~P&76{B5lkIWAsTVRsB>^UGJLjwQWWKn|Ar(Nd#u02$3dGUYB zN6a$4?ru>MqJf-K0@X71%}6BnWQ^Rs--CAm2^aeu2c%G&UDk(UKDT^H&;x^PJnTrx zzk=^y5eNR^P+pXzHl4T+Z@r^h#DaQO%n|NbdPkhwzQbdEH4NbS)_SN6T7#{P?D{Vp zjhyWLW*gm`(Lp$J54Mh-B&X2qC0~ZI9*00BP6MQE23sKrH0?|Xb)ZID;@&G$%lrWj z!gr3pQw&m!tUkNa%K-PTV-!%us~K=Ik!#~e8N2x}KcB_lqSLK*x1xn)TJ*c;$%t0cmaxY>e7GL>P+ZyC_U=~}q1oPPRc3uT|LzsMvLRC5mLN)i@nO~x5$HL@DiCLz@<12-uf(gy_(P4R(?Bt2otHu zeo=$n%d_CqXu`|goo9dg(@t^s-K}0%Yx{s8IzmAtXzZllaA}g(+9v!((KVEDaLz>E z9%0LUm@4z;GVEqA`O-H}KZ)lxq}+~(W)ld+A#-fD^NPO>l?y zie|j9Eb3o6*mSw6H=;9OBaa`{?pB*o6|j>!TK`9hmU=|VTYE!6R4kjU*CIKn&%G- zkgwrOaH>y7-)d7+)z|Ng3zYgwbK8)fNX+Q}jtZhZA($dy;53`a)Fm%Ds+y^w1K@V6 zU#Q{rRRah-QlFAyYeY(dA=uluf>WF@I`(GInVtvFO|Ks^`+EO=41Ct z#w@UBPEbMBN36fSOzb7WvX{;mn?hPY92#p-(XUoRQ7mR3YQM%fcjT)?G>USWs@R=? z3ssX*!Dp*^G@m$55SUwMe4)m$a%dLr8FQVsW}Q7)ozm@0$Mm{O{m8WsffmDx$#pZ; ztiE|y>1O9TJc78!?X{w6i@avitnUE~M2C*ZWcSJL*$J%TGtXil_iKV*6uE6V(XMur zli&OcmIAR5WE9vCfT=licAH$}i7}UAnL3sDa>e{k;=XgwI~SUlmuq_Ux}6Hl%d=)= zr-yS`^%WIGomg>d43R9%mUxC=a+P@iY=u~49hER8<{hGcN{zhg;l6fSv86m3d(!vxqIDUtipeiNy zd-I&CUUX5MEwND(5Wz(O+|DB~I0fF9CkLQcVKuQL=5`!Njn!3^(Kn~l-)2&jaaYSW zUZY*Oirri8WkOvFSbriiG->$8W-M^@tT8cwhU`TB4UE8{XRx6d^g=$g$^VMgA%xLG zOb>4XcU{0dQKwCugYkK?cZRitio?DJI{AIjF;aOA!K}*1`v;RYD^N6-jGzvfU=cY{ z(mE{x;Wn|L+>m9uY%rUxko4Zed_cn}fG14)e5IQ52T)`2I!Xie{Ebl{K$YnjP!DYz z7;G2{kRMZjWqiU18B8hNbKU)aH7z8^(G#XLYzKk1^2>#(k*ZAbttB_c)u*+7?dIft z?Q!Uab|eeByJfVAgP7joL<0A*aV;ouwJ;#w47B2wfOa{r@WM(D89N(A8;Fa51z z!DqiVMZedni@vu@Z7!caNT&??E23qFK*DK*MQR!IDJ%iGxyOv zq7%(R$pO+Kq3cF<@zuFKFcO5|4wEkVTJI07!s9^@{*lYv!J9p)yKO>uv z^ZV4BnMX z^2f0!*4;gCJnj60aO#{CkKRjF+u{!wFF|b}_!jcF;E4r3)9yV!bYj<-UJdGZH8?ij zGf9|mhAfC^*%2icw>4UhKg~9Uy88WlL--5U7tJRM>tNS|0SK|Az!ERq8aDHews;1(@8nHuZXSSL>4uYu5sOZ)w zx0P499%?H+Zp#-&cYmF9Tkcu`&KzDAY%w<9W?=RnAEf*EhjVx!VfvM$PjFUA7#I-& zSy^yfl60UpxUn)M1@^7{>cKwcdr1O7y%uw-b=znl`_0w*Q^#@LylgR?XB978@o-1H zZ9ot>tmz@kD*^8HtKvG918#&zQ|p778NHM*+z7(@y=V2P`Kf6@a1xzwM)@HLU}+Q* z{4ymZ6sx-Lhi^ovL>=|!GaN=vy9O`EwoNtskM54S zhVy1W!DQc?3!b$Lp34mW*!&gDm+irmZ&drtHoizQRKMGOAEI}`fzWpU1DEVt01dR6 z2HdIYw&BYU`|{Yb)@}Rm;d{0zgWKZoukM)E2!}qR0^aE1yuNOyJ3DE547$ zN8_SLViYsf=tian8xnuK6HSG@hRrA!lbCCgTJg_)#En9asw&a_?@vl?g=EIu<0~!+X(U8FG-5KDg~pFBlK~kG<*9) zvLcizCnX(5$k|VKm8b*!3k&uU>de+(4g`p$&W`j_n_-SEbNXj(WCpfECXl;Hu7GRQy13+Z`{L z(_cs@W`3zVl6YU6yi5OeOZ6dVr@LNjZ02WC1*8ir7+u7J=9U0xRrCMW)St=Ud+!l< zq!6~+a7g6Kt*q58XrLh=QGW$U8JZUW!OIjjnYk_hJ~)Gg^8p3ZRY;!e^HqZd@NtPH ztCUCS?oSeu#cLBPwSO$z z@t?!MqLx{o8creVB5cs&%*5O4ldFwm*hv8b961JyBZcq?@nXQ!0xuOyy-HneV3U-V z)@oy`jjaFA2&=iKD>Ko)FItiNE_syLxID9TdyQ2P?56=CBK!q0KJw=8i(Ri|=l(1n zSd#^|Vsxc_TGmz?!8aDFnz}743;eIED+9Q5Xv$`-ODva-{puP5e|6if_@$ABY7&Xvb#?L@M&8X0Sv)DdUMHA%gDN7(VV^#eIVQEy7Y+Y_$DXp~ zHKFkb@EK2r)GuiNV{T0Wkc+t2&COnyIJa>lw0FQcvxQ8g%R)8CTV#hH-QNA_v{dOD z+gucZY=e(dc&uqpw4XH|%?+(xpH{X@Fs`;$lRqvqwfY&^PgX0`|1m|ABz+QtDMkE$ zw~~x5)cp&q)-D!hWRO^_)Mj-=NtAKvYfLPiOyWMQ>bjxWuc=6jOgIRD{EQL(`vV6bg#5lKO!NkP!?!c{^2#{@{!1nGBc%98(E#;_0*u$Y$6ju z12uY%9qh2v883ZwP+j zKy4`qrkE+tZ0|O231nm96k6}tIglXG$lsv_Lrf zz)f2NLsL_kV{YD(M-U_;+o5YnYWW_MHp;Sshw|RSh^Epr&@^+F1AoCE-24I1CGH?; zvE; zraP+(U0dS!_&}6vh;`U){niL|#~3a;T3+Bt$2pZeudhCbF!nM3 zopkBeZzJsx2Z&`D{6`>i03*=8k3G*;1PikhyRUaCxpxcx_Uvmfx_e>N`MM~!<8TGrlU@J6}pKn`~%qECFqb?DPtYy@4`^6DRVY|)bQYy zSenZgM@r32c7kz<)PZ;)Znu^{G88I{6o%de($iQ2Hbc;I@gx}sB?awVQ!E&EAJP`i zSzFQ;jvVQ^_4p~t>p!2v)`suQjeQ1rlDQss&+?D%S)#6?K`Ve^jmk;h7{Qv%gHHrW z(!aHq6wf?HI&D}cL>~4y8(H50d*_Z`h=&+`9!QFtf%d(|hn1HXQdp(<#BOa#0cwos z)e~Te_12ghYr77y`2L5~lZC-JZ0!LAt4mVB-?i7x;52y#+2jZRO^S+8t3#y2DDL4J zl<~Xtc8#F8P0Z-VKB$8|naOT z?;7sLT18z4&9sqx&h${S_RV_Iu%@{&3C*kL1@ZdcFdh%C7zBz4JK=bK!^hi4D7M@- z0Xjd2gtt_TAmk?&LgN2P#O%VzBbeLvf01iU`nt92{F`*b0cntg89Cg%=HJJ(pl}c! zFrIgSZB1hxsxyMP8ke>-5spI6-WtGo+WPA`7i(b@h&~9I3H5Y{e-Ntws~Lh8{>lA4dS&{B0D&WPh14+5F91A^S zpP&Z7-GJPWam$n0#OBV*2EIukElj)`SuM@EeQaFA*1L;k_M^9MgWLlf?}Zk)evSGG z9>hZPV_R-2ja-?d03QinB-81EI;EfvD2uFr6UkGP&00T|S)sMKc@zY~5Y0(OTI^m| zEclqEy9T9j-0P&VUhpz-hW<7z+=Ydn?AEtnjWOv^E^qe#Lsu4(5RT0{3qW3}0P=Pz zS>+?{UrI>AiZx$1p||yCAalS{M%QVih5Ew3nwK3D21UVlN&-+$bN->O=`|eV9^v5t zVad}YO9_1#Szq!H_3UW6>(M*+LV?2M+W?z7)w6y!^!2{M`{ThRVZqv0l0jEb9eG^u zYoP|g>#=nDI=e}J;O^sn2Q6~#+6?vGl4tBoO z+J#0}WTr|r%M~AYjI6+8tvS)arejL;*eMzXo+FP0(g6eX_{kEVM^3GCXP%ak;f<=< z-7{#ohdvzM42=*10T1Ct|IrF_>KZ7ic}#{D+M;BM_`5pYtt&uHy5QUIHdzzXdy|*= ze=}vj%E>s0Zx>meN)y#7oSr*eMq+t^TeDp?q3edamps&e6W%q?uKz1vX<;6DqA?wV`oUmh;X||Og_49qRjur7$u!A>>SF#;)?vfW_a!an|sA%#y`k*=3RUXa{YZ9%dg#~dYVr#pYu zPKUo3lCO)$QLcNGhWzNN34mSheM83J57sa^K4jer{UlRl)%?Ndj!Ba#vWY|yopls3 zC#%DY?@+Y^pHuSu8%4_2_2)r`2Zy|Tc;xHWf(Ll1zb>o=d0~wL$qW0Y`Bx$N06DnY z85$F`?A2Q{BUrL<;w1l1Eck?#$U2wd=9_;4I;?S+ z!s8A-)^^rvf4lJ#&2xI91YeeyEzZ8ehCvV2XjYU(=Dmap(|`|A2@o&#amOT=(W48P z&N-Ee9y#H0{C0w&-bUBARQ>yR5$x}9bxfshL6kPGkq6OQN1n=>e_91ToX|^+PDjCo zBd5*!erdS0WB6Xgl#h&9I|ptE?PI$>P%z|N!8tLGc_|E1WJCZewhwjQKC>jbVX`Fp zX6IzmeLn_#rOZLsw-+)D?I%U3`SywhoheHOJ`i|SkGS%LuVh$NvzcuEHIfG?K@19K zn5?eZIo;JFII+)t^RMY77vI8-s)=LU?(}sI)i)Nm7~TmS($BO^<;s)jrFX z!{7VBh2!e0K{SMNsja}Zisv;?zuU?^duq(lkBwUA?sj{v#(`=?2Ha27ZNt21{(Y9U zmNN9w3E=&uSyaUHSn8C1$rNUJ8+dNl74u>9q?C7~W933k(Uaa+ix#iXAC?Oimtpc% z*dfxpIU_sv$jq$|Bf>+|SEQUH;{I&w=gHrB7;*5I%>rlsrE8VDxzU!DUV~}(z1nhZ zc`@4KXk_56H`1L3mS%X>LmYXEA9aG-Fn#+|JqG#|1G6mRs}D`f|7>$}s^?w4%l!Do z@)h^~QL1A~Lsw13(mM?EwM@kJ;b{1&RrPmckMUEt`PpQ#_Y-OO8|=t_U0IxUGYB3h z28Ph@arXZO%|Y6^wvovOQyt;U;Q<~4lacl|OHH?m>UEkb8yrphraw&mP!TGyn{nT& zQp9I2c1kFGOn5MSpNvw!WMf~Kosh=hi0QZu0GsXriKZQ!SkkQRQJou)_PA}GD8YWO zBG*hVPG@&B;qrwY(b+ybTum2cQbRvlS_~CD)bw~^q0*hbK=CH+)YwU}APWnsWrUv=lveHwbbb0NYDw1 zp{V$RSCW;3@$H&XpS=TTwfzV0;C5s_90&-%m)hT|l^#7z+oDqXYrFWao;>ys z-NLz7ekH_>%Rd&XvivP7AbU_~`=6wOhY!7%1{1W3DzpgP16YcY7^}i;B;h>Q&jm;_TywfxhJ9#f~w+Yj4K&eJsl{U&vh3Rub zvrQe0(@(RkHz9LZNxn&*B^j~Cydc8`2&b@Bz`wscbJ|&rD%#51getDJFr#;?N?i6r zYW9P3g0^3G1m5IsN=j>t_HP{O(uz?1SQr3fVZaZu^7e_uzmppOFeTP39(Ka~(#THm z_ly4wfnXF*wds!EIRzY`>}R@ztchT@6Vt3pOPTH5t&-l>pOmTvAYXIiTI^XB(QJ8-3)5E-p4{q4a44%&#+g$|_hUEVOXjqBpqJ+tO8rH@>Cm}(t`0wRg82|iOl%t9s z|J10MH2r5lF<3-D_$b}`K<|3F2w+pG;gE`zd^JuvGIo7Fp7-{3s_A=$1VLNh{056Y ziElXHs6blFyF8Sw4SnD0`()1>_x(t_aumTF1PX~*MAo_4gX@sZG2t9O2sC76n)NeN z_n3{^hUJ!mSn2oIt*0ht&qrJ$^yPae$J`R>Z>W4yllIfqd8}@>Me<|3Y2Ed75WDFs zt?-7nLO13yw`7EwV*g}p_VY=6wixqMRjO$G$o5&Kn~Rr?6SQ%f+zJ7iLkq(!jg3Oa zaW@exhBUa$buKXH-v(FGav|xZfcdO+YkYX4UG|W<+tEH-wi-4E0$a3`w~X7~n;#iK z1RG!gyibYKx4K_8NwaqLS4Li4$_?}_xwyo8Z#JktYxgEZ&`a%@8MZiyLTyKC{DRK$ zAG^0a>F|vmv=NLHkS%DhvJI^0)h*^ZJZ9T(F6LLpu3pLBk$0hPWzf(}0KuQ>T#Jp3 z`%Mk6xyi`E#f8P+X>0oge9CR#-JcIrBHAABEF~r0VBAt24&GZRkC}-*QJ>4tJRhqc z?P;0L>Uha+M&tw4LAxFh8#^IJjkFs_Yz%6ZjIYfd&M)g+rVHV?ldKyY;F6P?$?7T3 zQt{vl&Dp_{jflk8wSy7}i`}b^RRSa0r|C#hpzN^yVZr%ecPw+*d*f9H3A-NNl;1!4 zM&34DFc_FLkH1@Mr9GysH*Q(cS=ySY#nUdNd();ozGcXJLkDjS2<;dtoTo%a)2eE} zZk4R;EFB){`u^+uwPM3)HHR!iuO{Uq{jX(su||#U>W#zG=kJg12Yc3d$*^EKqdDW% z0qI2C7Fioxh&<(5I7^$y9YhsVF8wL_@~(1O`TeI|EvHU!TPjXvEV`cf?ay{y%dBE1 zU%s@$7Q{KilmH46kGF|=6Ft#qKA$e<^5ILKpEq6ZGhG|akK%aS6JwmD+Jp-$r=QcsDKEm4C@BVgNFr@^MbcK8 zO?su4ABb(^Bfb{2Oe{^`;NH;6>DJIrc<_KSK5sx%U7zXWsR3_!kn&?u1`K8{YEZ0M zG9^4PF!C+e9%c1;gU$N-rW>+8ADM4pUx|(()=k<87#wkEweIXYFd$m=soKBi#>Op` z%)TL6@c8N#$1uRhSjlCHV*88v>TS0cCwa}xZW7~j^4s&hx_AyzJKP5Hb0lc<( z>i%~;7^VGSsHF@D4%2v-ID{P8&J5kHPDF4#V}ai7r~Dq(fPsC)K`e-J^NBckA!e z!XxuBlP_assX%5ID1EN=z1X2Y=S(8EvfOTVFm*h%F!J^~y`)is(~B?eV5WYB^*}#h zu2WpCRuz4m@7B!gX5+3SX3>0C%SmfHKV0-(UR$9y>80ayON{3TmGa$LH8b1C81Z9= zW{jPMJNT*Q;aRYUywGlA;np+u$m)_o3HA|i9rT#q5OPQK&3~ui)w{#11 zT5ENlF3gC}CU^^fym6v;c|NS6a8A72qA%0E&<2FtL}FsVWfEUmcbQ8&D^z{Oy?m(J zm~@$7&$KU_IsA${ZByur;&_2tS1+r|9iCFikY%W_`Z5WBVw9>rli4|Ihvv4u_dRNF5uCa@ZvGO9$7EBwebM1sN^I zCG$P3m&F3r-xQoIgLB6Sq5|os(!7|@X7B4OovU=|go>VUpMVM=F1_MwCj|RgP$IOz zS!K*mx_8U>B&%=dICk+}-1IWRJ9R`R+2v-z=C&3EU-_2luBWNB+|x2PEJAn8hdZx) zS}OW%`EpC{p3z2QWb8nUPQB`#QMj>22W)5;`_U5i)BF9aziyCmR;bY67&1Ejx{^0W zCzM;tHJy^xYe4NwyOCy-Fg~Ikn{|ez!rXtwE%T)W0BU<*0L_vWV)w;mzEg-MZ2JPO z%$Ue!nIogm%uE*N1QrQ z7x}69Jqo9Ydhtv%TgyD=JzeZckuoKqcnMpedK|4?*yQ>{{aZg{O_z&JCgG`Lgw>w> zSuaeFM;%O@zBS)%ju^?Ld15u43q z(+O^eEm_UWX_Jf3LO)LO#<4_|Iv0mVxa5m>PTHzBp1QKAGB-8Fp`md@@IYM8b8+oI z{LA{_Bs8;%3b>o5r`q#tIN`}$THp`tO|D$55>=0{Yg3u~(WmtH?+N;vIHnHe(?1%S z^I_W6^={6xFfwv%=e+_Ysir>Q4lc?lfRJYMxq{X{{~ZfwD5&@ffYkVAf;g~u#hoiEA0<+Bq>e2k3Q~QelG63!^cn>dzQU6eyd_r& z@!-WVZA!&daMs{kx4ym5hRqvSRW3i#AY`7ur{MZ@uhXsUvMV4iO%O4^x?dq>KiG(R z5FS^M=ORcG@yH7^y=}YQh@P+0lehL`n}6lZgeg{j>A;Q44Si4i{JgItU)t}tLt$iB zZq+t1!vVo9OQRY`b|Tn9FhjoL&qYBMQY&{4e4jxsfY%b=-Tv-=`(5nyDWTc$8)p9E z#RV7Yr8~tVl9;X3>sB}x=ksS8I(q@pZ2P!r5d2qu52I}_E(i}(9cIKU^^{zEa(&=T z_FYa_phftKo$TSYZJoCwZI}8fUD=jKQ3v%00e~ zt`E0g13!H-ewkwx0N|6#1TY|+KX%2Qys@1=&1mRQl*Z=|8u8}3e;6&=u&n~3vketW z-+sGG=2N&&FT4NT&j$G$aMM*m34F`5hd)@iUbcMYS$*zH%D}<0r&Pw9b+{dUwQ53S zPflY_*3YI5>^LURvJJslLSV^t;+|9YyAj3KVec&?k)2p(_NVGcuPqcy2Jj5Os|4b7 zaRdSjyt9Jgwo7*>U&U2!2A~&sqpRS`P&VuqQ1loZKp-T+-9=L8+jZuqK{?6akXuyw_VM+Q_|M zQgkAR)SkXCE0I(COt>-n+wMfE`u(=vMT zobRQMIi}c|Ax{cDeuw65W=1V-V0+E40;R^Ttp|8)xTnJnjh%z+mybd03)I8C7&O8b zC#Lq;fIoG&&U`+rmw2ZWXB>8EEJE$)oah!S(!tZ-HI!Zsfq>SJ8b!$t~ z$-_msGe=Hb6d-sSRfsN+RcmafF`P;~;j|50MP|<}`?%XD zBfeViLrfV0XP>g2Wx6Sw=2cR`Upvv(lH^ViY>GcJQty1wosr=lGe^Uxuup@vNpm9U z8N&(yC-?vu`ZV|#_9BoEx#*7yN)-YHn{Geh(fuLqow-4;OixTV-KAlr5M=3Jn4SNi zvH_^%!VBj9x%;xssdfzA3_ZzJ?{679V;b4|R{5htaSo8=U5zL?)DBcNDjBkHI2^ju z#&RVJ089X_R8iZEF&XT<5@0I?$kT8PKxV7%;>X4Ek(C2Ga7i9#@{_iuUn}CI#ONtp z|8`yZ+o>`Bu1%I&R(mc~e*rA&3$(I!o-~yZ`{~)Zy%S=TA&CP!kVgHf8)qJE$^#$2 zJ})g?LYMoo8CFeTD8EpV#Yk60OmT`i)SjY?m%RL9crW+``B+}x3jdcrq6zGZHYEyZ5hOb= z&&7F3AXiLqpNYabp(CDiuVV<@+dK0-Zfc&Ul;J8?2zcYKuR_?}`I$2P;TfRejKca4 zUA%-46tk;58^sp)PRw(D=iJdij}vC9+IqI~PD}|>{C&>1{Oxd@IK3c(?S#k7)#^wXN#J$(u;NI17zw0#FiraUjEe&EVQ?@t1bNOof$cK8wq|$N7*ZZp+#|!Cg zDNa{S$1YhL3M*U!^W_1+o9`*dhlDRF3D&TiZ;p9R!>U*gfa0g0t5q4AN# zYm)6EM7*U>Wak~F0*$A4;O z*(*35F-=XM;Wy1@RK%D&U~RY6u{cKC3Ty*O5F+*Z-l9d6U-m&?K06et5WK3*5`#&d4?`JXRbcPffg z?z^&&xeC%cd36x5W$a^-2WYJfAqpx!76=*Hs;p=EDy}t|zNk~qck`Hd+~Q1PozEd@ zn?<={TLv#9K=h}Kc_9dB$Q(L}>5bcrCCDhrU^UTmX`$2-g|8WVfTP{OUy@FuY7N)D903&hyfwaj^t4GDDVSC#cf7e^p z+Xw~jb-K9(E^XGn3=PitUcEP6qT)A|yxa%1_ZXchZY-re9+frH5pQo!MNphn%i^9) z`!mJ@GP?rDT>G*njs4vNCYq-Oru|>&SRN}L_%l;Zalw+OTRFk*=5EP^Guyz{JMKAZ zaw>s-Fmp2$dGz`wbLp&>So~=>xh*ySm;kmj$1vur8W7pCgjFDl9KiLK9WmU*X&Be~ zHEYAmG^gg?gd{Jlf9q(Q!A#84OdtryhP(Qs!NT*Ym2jkJ&_)UwXs9Cc-9Z#dTuxk4 zcff5Vzb*r8lN9AjWwi8lzwe@#!uER@y+79+e;bM%40U;l8L=L{IlDZRX}jbMRxC`k z+5$MST2mYZ{%Y#+mnU!kx%Sqco%?pIY=@At&e89{`M@OQaqtcGwkO5h3mo_Fg*I|6mW~%M0$lee2(D9Kwf6`vN%zmnlH#Ca!apTn5IeCjC4M)6jfX z{;b&1OQUBPwwWR|k2)4kz~dgT%VZJlH}8&~XFvVr_I(Pf78wL5Jo_i{DF_N!n%D0J zAPc$i+j04>Md**c=L2@!!=$#}(*E@+_WOR1P&+!c1klN6G=TOirn@%jZv8c}QdT^X z%H~8ZjJGwv+SB_1DD8Lkc{O<;;E3qT-Q;02PTjU=r#Dx{v@YEl!5j88gUK_iu`K!C zxx*hQkMbA?KNn{DEXSU7|3&89-Zta=5BGJQ4=romhy<8uaiaSCcfAA)1i!u5V+xlN z7-oJxqZ#;3@l@c+_EgJ`edj*IKD(9S;i_NKJ};QIE9gHdjM%r^`;u+?PL5fm{Y-$q)V8kg`6&qgQ~}d3lpWv5ERou_!FFyvVTi&6gIx zTJLb>g|WruUlZWc25S9h74&QH2W-x5IkSO2pQuxD+^9~)K0YAN9y@C^>(5eN_Q%H` zP)yAS23x+U@>ZokWP4^{6@6r*K|V!X@)(`oS+F6x6Zp{~C&py`Jr(yo2swJf+~bIz z#Wa6#{9ECvZB~9hY4m%x0=El_vw^wZCx6A{v+0#dQ_;ZrvwRzD%4d^qALOEbCNw!; ztJlWe1n?cQzaMod$CFGYDWf=+7hdhSbDtJ`5AKGRKo6;6~z)Udnsf7b3gJ4mDmeVVpB2{ zPnAj)-*sB>thrd!S@cXUTWPcV-C+c868`dx626pWbhy@9=}pw~apha=B0_iCj+xzJ6YaUP)c6xDRZ{+z zHqZ+;5M_OA?T_##Rx{b&(hWP%kXg;DKyn50dp`SpSV2xr%>yg|F7Ic{wjENy#t<$9 z7Dhw}GO#C~W%Is1OS#aw!ve4s{xtAWLU^XLBRy#l_v~{NN`VqV6cBzU8NYmP6%Yhc ztJRO=xH|6lR^whE@ah2*{)InUomG+m()FS;nLv#$+s14a<#5`J3kV{EOmE@?A@ zwDDJKfvAGgu8101f+&MD6$1u*aXqRA3sFY$0xso4qu~;kF^f3rP1~S4cE9J)Q{{TOyFriV0P2tE>Qq z5=65aw>NIQTJ{{Q?p@$o0pH=Co6ElTO$d(8i3_|9U9;+QUo2(l*y|BtP%Z}QV2jb7 z;9vl2M-!Lns2r!hKC~uLO3<3b$jwn}TgO{$>V&U-B)tUi1=)3GOuKYpC>4=RVtcJm zI0%E9xNH#Zfh@1FrBy+v`M$Rc(}Nl)f+xfwNFf%k%cwjdUdq6S1^6A}RV;Q=CL%0P z{GjD5_zqk*31kVl^J9tCZILZI;8(~XbOCF^slucAhWBbSK+#-~sI)-jr)s8DcM?F5 z1ZcJn#R~-mr*m&~6%i?efuX0be*Tn)QaB#6|16m{x}ZDr!2g;4u877<>^EamK`UGd ztBN0Mci;)|^uPo+fbQ8&)3U04{e0VpHO0lh6l^GgrBVRA?X3h-79UHLpT1Y_2BUkfi@FYi-E0?MyuM-wMH`uw5|Z;#a({Vo$A}b^PF=Lpjf0c;L8C0QN@c<)L!(w7g5N zd>XYvNe{13gTpul8%wjB0S_JziSm!S{SMSb2B2y?HM#E!;$>Z*{knlb-^;XpC;UYa zS}wofr(A5_6XLSbmIvBtUqbtiXWt9ny4Ygj-TtI&MEi>;{?sNmUS4s)WNP=m(S)-j{t*jS^wQp-LUg2A5p)aCNF&ha=1305HTF7-YhEf1VKQzY6;VtxKwa z7li@>{3-QMCKq06OMKPlf4r;WB2{FgmX5z8ju}k7a7_cI^>Fuqm_s=T5P^$@p=A4A z3g6{oiFOC)>3NX z&Ho=|*BwaZ_x~TIkV2AFNLgh>$d21YW@MDiWQ0ie>V`zaEPExPWK{ONY1n(ODA~!% z%KDx2T<-1j{r35OexLJ4-FrRHbKd8D-tX6Yob$X76f?B%od#p3jou?n;hz!H(({nt z<{RLCxm`ExU?xdU78gg%*UM^RfFe*ma@EcKWit4*_wF{;s(2E56L2I8m?Yrj!|qMX zTTa|HxOaf5;;fwWC3Z_;a3$$YU_MPPzM2JkazeJx?JRL$7$ClN5 zN9EMrhiwM!2W(t(yGeXMallR@q2S3kQ6EYIE*;u@&H-#=NgtuO(6k>(pvic-T0GZ0 zyyZc1k>tfg;EFgl=(Rj0eQ1M9H0m)Sao(oX?Kdcd@n_2G-^2$lCl&m$3Rd6*nlDtM z3Yk1~3!{A+`c43zgdD(_lRwEF#&>BAMGSXR;0?B`^ariKFpsf}VfMBk1~AWr(_FU= z?KM@U200o}UU=$xZnK_IpB&hBDfcmWK)x!p6I@PCOXLjM^glqRZSp-}26GGERFv1s zZMpdMeoSe`Y0izvhD&*Zn41M3%G9vvn4~p22{QK1uv0XVi>u z`*a7ETlTX!oskv*_6KQx1{^00W4x70q&Xw_j%@)h40sW2i);HFSOQmJ_TlNPg}!(P zwDnvh$y`d!tRRi0&yXYSL30iE{Q$CjKh*(}AGSvCZ3|H)L*~`M@|UdQIeHqs<>=8E z`b0+(%rd>e-MerKk3b!#Idi|91(8XvhVcFhx-D_cI}hYKf?+$JEyuy@uxEjsZQJ!7 z*|d#;N%xLH@6(pJJSP8}M#Z!>7vdA&h>2da2h)LNhNIs>D#^L6oeeo$x8ktwq&iH4 zOliPnT)94ugXGq=4*DB8g7SPUOnU&nBNm;;e6l(qqE(#Sboq$hVLcMm9KM0dj%ZAR z5RgBBNq88VN<`=&C&^(R{V-PjU|;O~rW!Y(JlS+F=MfQ^Qh8RUDGzIus*4zbUL>f{1| zJAQ`c>*f+L_dfP_$}3PNyaZWGeMy5o<+bX-9f^Hzu*p-7k>zqJMJ{v(4aMuEF28dD z>V?(?xPdSZ)RX9MzjKe=ui8UmFdFZ^S_eSm_uWhCPG6&?nzXQ}4xf@(mx7fB^vk?j zHi+^#f^dTgP(dqtXx#@f9Xcm6IPSAHR5UUvOh-DEfU+Q-PF0&#BI~!(1W$gT^FHAN zoHLQ(3{{1i;&YuO#$v_E)c`#(oG|V5q^Lwnq8a%rd-E5{O5_FfXX~Fl%mb9A5GtN|7Cg~pDp`toNOY3y$Gq{j04yENQUq-}} zbmu?~7O9bA;OA7RC+fZni2vw{?CNRBP1Fe6A+RunxC1g&VgyMLABzGtdFkOVves;l zN!9pm_TKZ{la-*Tn0>i?3}8uyD74L3^OMZRd%$~F8vXNt)o5B1 zkxXb}ZTIWli`y6Zb-~ZzD|lSKGuU!+xVC|l=G}sW7!+d!rwR#SICqYIGTAfo+797KM#&Yl=jYB~)G&Zy!#T>Z&f8;Gan&Vi!~Q@fWdrU9*hZ4z;afBX8zo`M~Qw z(BAr}D~7;R?F5l1$`FZ~8nc}vz*qi|E`bLV`Y&sF7_FD|3I^?w!{QHt65i-NNf${T zx~8An>hBAfvA%90^Rf09)INk|ADRgG&^$NQe2Kb1`1TYw$$sQy;vw+2>Wx6G51V!~ znAiF6H?l~Lwo?JtN%E1OJ$UTk?M!g{502^$)PAtuhgNy9J~L761$3s)^t5hjnx32r z_|s;{hS6KC6bcP&tzRDtvjhuZ@jD5AOij!*!4(s=^72iq&8#a|UC#r*MRN7^oI&Xu zQBQuDI$_=5g=kzaMc(}5-a5>&m|>7S1wsmQ#inO@BCkDCcHa2`N*ES8FH>IlW(3Zfou3bp~=BdmzkQypYzJaU0a_PZWXw%fh95R`JznX zMY&$XjE|w+UwiyN*@EYGD}x0~j8lKFV(qyG*f%(y4MuRAXsbA0@RAs7Q9a~_h=Y7J zj|^df?oG&Xf&C{>Z6Z6uB8cgdZ+Q%U+K#S##f5v5Wht4VbI>P(Mspu7a*Vxjkg42G zcQcvn-QADiIs6JUz`X@j@CPAl4WCe|@)<1~DIKmFojOc1am* zS1XFOKiU*1S~@3m`K9ihdSL$4wJWVCm>OpjJZJ3yaBZp94O7MzBW$=BTedZ&K#KZl9G8 zCTMgX#|1_4E_a?&MGgHP$p!+;K@($1N5NAsx3M*rbED`@3F?g<7? zdv+dRN#{NNi36Yyf{cNnmq#8$ifm$Rj_DFBkTbFZI+Cq8Vzn-Gl9rlVj*?9PPm7bVRT zn4Lr7^}E}v7n>ukRvf)R$|L+PePk-^vu}N zbw7LLGqwY%d!Yl;;p?PZ5fd*M!{(YfMZ-VEvhtQUf974kn)_H&e+D^N75O{p|A?1UPf9y$xTSR-Hz*Siqo8Z>w>z~JE0e3`D?<-`1 zT%nDtT4=zAp0%I374(JFqe%n|sUftPh>AbVHsc(d1ZltuqdMziO<;$d_{*j}Y$9MG zCX5C2PY(N+5W58OMjmId*JTzwY*Xi0phdw*Lya8+Q9l3K&*Pd$&#ODNeWQ}M%ig68 z<`aiNgZ2v39{*Is^9WU`r3N{;2RROH2*0{X^P7`vZ^TeKJIn z>lCfW`(<;P!HbU)dH3d15`P>%H5{iTd*iSAkK_&H!bjFlv_ zLfswo)j;MBW8hX95{RT&7O<3?cwkRX>Y(D$rqBa>x}FR>gG+p%im1ebkZUJrUS6gf zn`3+P&}HMTC>FJTGXn>xmm3+q`)aYta<6h4?;9@q5#w7y00|NAjXgj0cLZ-~g;cHW z-yC!|9NJf5u&Y=dsBZC28RXOy9e)OkHq0tq15SB))k$_l{cLX8i|^49dECYY%sNlN zpKhYlL0-1^$D4emqDkB*yf9d!@O_5<%ez^QN?X#z;G@SClD-L{Q{lf6_u-D7y5O7e zK>WEmKZayZ7Yy|g@ISHNKY%i}ED|tTMmGo*6Qcmce`oEGg!-b=Q27M?KWR(N zYw>nq0#zEpjOa_7U=STKoZ{26o{TS3D+A9|)7kNP>oJJtT2T*qkUmjgg^#v1$}>y5 zVEP;y5yW`s4>pdc>OP5|SK@X30SPeB1trF5IJc zFkV!5canctXwcpq3FyZjfNSy@`^W}QJodNmsT2LFe^E~H$iX;n2>>yqN%0_dxMJ9` z=M&gXUFuJxbsz4~oPs+vkvHwN0Yh8;C4aiHg;x$Y)MfMV@`DGygA48VZ?%)qu?W+2 zXCw`4x=U8$>2QK{7##%G@$3E&`@>&G5A~=lTmYIe$%#$^EFrH-5ZVm zL~*E<7;9iuF$RaEz6G&oP~sTe?^p;>dE^1D?>nWm3A00K8PTe-pNI^reB+j-ZLMxB zd_0sB=$t;t(qd0D9zq^Y0heRvKzTO}8Gd7^N*LTnc=W?L5`8uY-BzOk@T}$J^$eit zsTYs7gxg|E_ZjnC%-6d(3bbZ&KXmWz-^`@MuscndJ82hl%$yaP)^fl2f)8)qtU2vO zNmvp=@Q{~dX43yIJnl)j8(@Y3M64&iYMVZa=0B=dG+B3Q*x~v&%g&qH2p5IB9_8mk z)@=^FTl#^tO+H)C2O_&qi66@lVTejA^vq*;i;=TA1!kBB5Q1G2dzUC;{_x%_dN=Dk z$j__(Meq7l8n2hIksusF!N7-IQ{0@93uy6mcz><`xXdTXg}B9=cDy5Dzyl%Evj9vz z4%4QLv(s~5Xh`(KfY>D5tL3&Cw-K6yO&y zfxM;TW86`MG>0i$IZv-mbTqxpKPP*7mD&C-z<{678&L^4y`wLM#HBPtO@AmmaNrRc zn<3sI*$yGjCNK>|gcVQXd8LOym+t7wgtIBb=y^?}(+?+dZG)-&-;@K`*WU>dEFNqs z!uBQAq-{(f_~J3&wYyT(%OG8%;xgx_Y~wN zWwb8>4{ElCDJEP832wv|EbT4)GO@oE_UZfjB1Rknsk%Y4dkgY%TEc{$6-=Y`I9NS} z3&nA~=`=q^7bZjeBQD*+xDMoey(iS2vjdC*Cbhq$GKZU^)!I4jhD%_Pmx#$kkDvaZ;8|4mH`TkgxDBPOfCl)93 zJf3Z}Sq#bFzfhe2E;8Jq;@oDIJyUvap8|Qj}uzh=99Hm#=vKf(|;;lZ()e6R2 zj8Qu1W2;F+aY%bKvPgr3=E&$Q7rS358*cZJgBR6-D*!!2v`5nK$2>{zRj zOZ&WW&*n8ZKQ>{5v}L#5H*nsHxs!l~moC0kNW@J^(n*YelQw5|MT>KYWw2?Pzh)4e zk~l_K{~%__odyYeC~bA_r8?YJ%CFkhcvlhT?xII{ZU#$ISG?cMl-Cmebw0IvixCW++(_KfwW5C z_VO+^l*JK~=D?o!R$8}&9o&lO$0|MA{FAl3F4h{1LE20^KjQ^3pUHfSA7!Q|bB`$s zeep~8`j|B0EOvY4L(0-#EfJey0g&Ry(5@{nJ_Drk+?YxseRI>DN}^qK^5D3=?0&5{ z3$Y4%Wr`r-U2iFXH+P*D>8!LaiUi#y56_PGw3#x$I4Z;Q&`aF1{URlxS$`9hjjOMR z&M3XW((ZHD5_`>iaC}Rb@*VXnnnBwEVMxUYNc@0>Dju*RBBWQ=w4E(4v&9ztVQ(g{ zPWweZ;DC0JmdIJTSOy9n6nhro+0A^=MV&6<#i^&Nr)E06FUx0IW^OwRP?zF3py^hY zl9c&PP}i$Y>q{5gWU*aq%IDo7J7zAFHBPn|uUKzJ%5iGUh;uzy8#+JM<5SRjXYFLi zs=y_##RtQ$4N`&C$9n6=Z^q8W*A~8s&LWZQV>LgSGxKY^S}bSK;PiH0Wj085z#aJZ zC)tZ~a5%-Wv*zHJ+s~bWn|4`fZABwH)93-Gzt?!ip2G8Qr6lBX3&{@eWV>hp7S3^x zo5B6jq8DBr>QcD&;NBBn$4k_~6?4%+yfKnNMjUbsxETv=aC%5*2z#;|vY-GCBTP;k z%X#T(YieGj3r#IM*hq@#JS+*mMM?g+Y1Fgorlr^g>SCSI@B%zbo!|@l^nKyPf~u1y-Hc5SDiXoVT;HCFSlJsZUIh;mikd09Pe(3 zEAV9%Q?L;<-1}HR0#It>3>3?GuRX(;Fxup*vbM?_y?R9@A3qh!U&d4V1-{m6Dtne5 z;W76}AEfXOc$9w}%)k(=*~6c5sjedUXXK4*t(;FdD6TqBy9SVI1llJyZf&jnx%rC9 zwAd`qEwJM;2)uBKv8qs(8nX$l;cu&2oRMz$GU<>=QCyk9C-9;EN|&^(p_9Ai+9!@e{*%VbVvaJh17q5+SUN9xF!~3rzlm% z*rqW*W!>FBw6H_(x&WU1eki;3QGn6dYe#gzUF|ZkOjS%^lG*7PSpmvARY!z9Vbnus z*ge%LImDcrTQca2vzQ6x$lFtq5oH?fiU%6Ef|+52?rcs>dURBrj=Ql91!G;o^g=q^Mk`N^{I-jpLz94h5fQo z08oJ8N*YGu(!1om^f(a7$zOBK?ybsP#r<0YjKHKI!!_x^u@b)!xCL2mowd0!dp5oM zCd2JWfOSF*+0G|N0HzhyIPYnCmU8YrcSdjV#Dd6O&-gj{TMqlJ{Z6`tm?XL{T?*x# zW;#z+r%bDqlCQTu2{>5_WvO{NyZ-*8k(efn(Xvs$n}HM_OsDMcrRVAGm6qX1KB$LL zJ1?4wPJv1=LURhpci8Ef_&BemmFSHpG-4Stw;pw7X94m#sK-mNZ zV;gPlHbx8z%E_g05YB2~5GfV){`xt)R~xkq9)jTwWTxt>*OxBJ)}q#B;e)SZyP}Uyy*Ii3j!7ohz2HdQy%>0GmYQlXcwb@g(@=_+h*WfL z)u3FsNY>dclKcyiX-%A}G5)J8=RS#DnA@fl4h;#wDBcp_vN4j{{$58$?9Nt3G+m_E0Zeze?z4QIyr%88U&0iD&OZm==BpxngbaKmD4s zAL#NKMIbucMWjS3tXw(55;OCzx!ka)y%AtBbtaI^QNP_JG&;8n+Z-OP#AdG{w>qi@ zUWZZfNRmX@o`QpfFSgA+5PdJU#vl`9FDYN3_wIm>T~yJQ^ zm+$Y7rR}~JT(%(pD1AaV+WZW|t0uBG##q_9v0rUI9edyJp{avR$882>1lWdbw;){G ztQ;~T0$vK=P69l`-2^rgL4vK10RREY%R;QSgn`wTUcDIJ{QbC(=?`Zyk?QMRnx1a? zbGMbV%B|jg^W}V^6mAwdV-}-sEqPGy?R5ibGaT2x88N{=oy3ltz-I^q(1@eD;c6XP zV|BOMX2v0EdqZ;$j-C?U$JZ!uaAaqgpXJo%cW-r&(i8`WyfSAbQwDP41qKDouPJIovxB^X^#`Du;M~c!9EYJ)NHuaX0WIxsOhPF>X^ z0+b-&%DRh$o2L4lD(ODJ%%@$7P1~2#R%g$b7)@^G_NscSW-6(J?y9h3N_Ygv_{#Ss z=D>E2>*I-J>D^C~N>FxySSqu7fpO?)q?fm)rFq}pw4Gn}l1b;%m)MN2Ja_%3yz{mf z>GT#lS*9w%M)%}RfEEr=r0(X5`2I<3Cg1alijw1ywF=+qpo}mM2v{LdN4khYi`b25Losaf(S2k6D%D-6lOAR4pbnrX9=^pe!7SVp)9urfp)exVN~< z=-koi0+C;1t~C)ek?!GZ%WtIFoqlwm^X*hDkp8um&y%9K4LFa7fmeb%dBgR*b*SRy z=HVfLrIWRj+KC395QgZpiDd{ zZET>hpV?r(EAMTXH^fDnF?1Y&T(DE}wRj^TW6HFDNm~x7pnw-v+$|n!Q6Jd@ z=t_tO^JiNANZEZX>Ow(P3}!Iv`kj-!#j6YzJSw%~2ZF$QsF`+19JmTNUz}_2zyI)B z(@tZ_6SNoyKUEL0L*d-XiI>dB>s2KEkNART$K|X$L6e1J1Ita)E1zv6mO{<8PUE&+ zb=8#>=7{vD@D^mhMR%$*$>?%P5by`l>LK+}Z;O{T!k+E^Bpb&@$s@<0@^PDXVkJY$ zhrGJ(X9brG9#RUV>Qw*_l)FZoUC0%9G45jvd6(Llx4aFqAHM$W5w-^8_>eEaw*>fk zdb#`Bx)AOr>bn-ycWaYhdPd69xbQF@=Zv&z?OT4WjK_;EX~tFnne~G87l%Li9(er< zlm7-6cXUa_|I(0Q>&@5Xlna6#U`6L^tn_VsRm|LeE+8!|^}vxqaFS`Ew`?H+Ko6AA z${<)R8TY-#&uXpp%;ICy0a5i&V5L-7^RN5wFM132`ba5A3YA{cSqy(R_03ho*uE