diff --git a/cime_config/testdefs/testmods_dirs/clm/SaveHistFieldList/user_nl_clm b/cime_config/testdefs/testmods_dirs/clm/SaveHistFieldList/user_nl_clm index 81b7c43b11..3e29214769 100644 --- a/cime_config/testdefs/testmods_dirs/clm/SaveHistFieldList/user_nl_clm +++ b/cime_config/testdefs/testmods_dirs/clm/SaveHistFieldList/user_nl_clm @@ -1,3 +1,3 @@ hist_fields_list_file = .true. hist_wrtch4diag = .true. -calc_human_stress_indices = 'ALL' +calc_human_stress_indices = 'ALL' \ No newline at end of file diff --git a/src/cpl/share_esmf/lnd_set_decomp_and_domain.F90 b/src/cpl/share_esmf/lnd_set_decomp_and_domain.F90 index 5a2536f63b..187c9a8b51 100644 --- a/src/cpl/share_esmf/lnd_set_decomp_and_domain.F90 +++ b/src/cpl/share_esmf/lnd_set_decomp_and_domain.F90 @@ -69,7 +69,7 @@ subroutine lnd_set_decomp_and_domain_from_readmesh(driver, vm, meshfile_lnd, mes ! local variables type(ESMF_Mesh) :: mesh_maskinput type(ESMF_Mesh) :: mesh_lndinput - type(ESMF_DistGrid) :: distgrid_ctsm + type(ESMF_DistGrid) :: distgrid_ctsm ! This appears to be local but is used later in lnd_import_export type(ESMF_Field) :: field_lnd type(ESMF_Field) :: field_ctsm type(ESMF_RouteHandle) :: rhandle_lnd2ctsm @@ -84,7 +84,7 @@ subroutine lnd_set_decomp_and_domain_from_readmesh(driver, vm, meshfile_lnd, mes integer , pointer :: gindex_ctsm(:) ! global index space for land and ocean points integer , pointer :: lndmask_glob(:) real(r8) , pointer :: lndfrac_glob(:) - real(r8) , pointer :: lndfrac_loc_input(:) + real(r8) , pointer :: lndfrac_loc_input(:) => null() real(r8) , pointer :: dataptr1d(:) !------------------------------------------------------------------------------- @@ -149,6 +149,7 @@ subroutine lnd_set_decomp_and_domain_from_readmesh(driver, vm, meshfile_lnd, mes end if call t_stopf('lnd_set_decomp_and_domain_from_readmesh: ESMF mesh') call t_startf ('lnd_set_decomp_and_domain_from_readmesh: final') + call t_startf ('lnd_set_decomp_and_domain_from_readmesh: decomp_init') ! Determine lnd decomposition that will be used by ctsm from lndmask_glob call t_startf ('decompInit_lnd') @@ -163,7 +164,7 @@ subroutine lnd_set_decomp_and_domain_from_readmesh(driver, vm, meshfile_lnd, mes ! Get JUST gridcell processor bounds ! Remaining bounds (landunits, columns, patches) will be set after calling decompInit_glcp ! so get_proc_bounds is called twice and the gridcell information is just filled in twice - call get_proc_bounds(bounds) + call get_proc_bounds(bounds, only_gridcell=.true.) begg = bounds%begg endg = bounds%endg @@ -197,8 +198,10 @@ subroutine lnd_set_decomp_and_domain_from_readmesh(driver, vm, meshfile_lnd, mes gindex_ctsm(n) = gindex_ocn(n-nlnd) end if end do + call t_stopf ('lnd_set_decomp_and_domain_from_readmesh: decomp_init') ! Generate a new mesh on the gindex decomposition + ! NOTE: The distgrid_ctsm will be used later in lnd_import_export, even though it appears to just be local distGrid_ctsm = ESMF_DistGridCreate(arbSeqIndexList=gindex_ctsm, rc=rc) if (ChkErr(rc,__LINE__,u_FILE_u)) return mesh_ctsm = ESMF_MeshCreate(mesh_lndinput, elementDistGrid=distgrid_ctsm, rc=rc) @@ -255,13 +258,52 @@ subroutine lnd_set_decomp_and_domain_from_readmesh(driver, vm, meshfile_lnd, mes end if - ! Deallocate local pointer memory - deallocate(gindex_lnd) - deallocate(gindex_ocn) - deallocate(gindex_ctsm) + ! Deallocate local pointer memory including ESMF objects + call from_readmesh_dealloc( rc ) + if (chkerr(rc,__LINE__,u_FILE_u)) return call t_stopf('lnd_set_decomp_and_domain_from_readmesh: final') + + !=============================================================================== + ! Internal subroutines for this subroutine + contains + !=============================================================================== + + subroutine from_readmesh_dealloc( rc ) + use ESMF, only : ESMF_FieldRedistRelease, ESMF_DistGridDestroy, ESMF_FieldDestroy, ESMF_MeshDestroy + integer, intent(out) :: rc ! ESMF return code to indicate deallocate was successful + + logical :: no_esmf_garbage = .true. ! If .true. release all ESMF data (which can be problematic if referenced again) + + rc = ESMF_SUCCESS + + if ( associated(lndfrac_loc_input) ) deallocate(lndfrac_loc_input) + deallocate(gindex_lnd) + deallocate(gindex_ocn) + deallocate(gindex_ctsm) + ! Destroy or release all of the ESMF objects + call ESMF_FieldRedistRelease( rhandle_lnd2ctsm, noGarbage=no_esmf_garbage, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + !-------------------------------------------------------------------------- + ! NOTE: We can't destroy the distgrid -- because it will be used later + ! As such we don't do the following... EBK 08/01/2025 + !call ESMF_DistGridDestroy( distgrid_ctsm, rc=rc) + !if (chkerr(rc,__LINE__,u_FILE_u)) return + !-------------------------------------------------------------------------- + call ESMF_FieldDestroy( field_lnd, noGarbage=no_esmf_garbage, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + call ESMF_FieldDestroy( field_ctsm, noGarbage=no_esmf_garbage, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + call ESMF_MeshDestroy( mesh_maskinput, noGarbage=no_esmf_garbage, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + call ESMF_MeshDestroy( mesh_lndinput, noGarbage=no_esmf_garbage, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + end subroutine from_readmesh_dealloc + + !------------------------------------------------------------------------------- + end subroutine lnd_set_decomp_and_domain_from_readmesh !=============================================================================== @@ -331,7 +373,7 @@ subroutine lnd_set_decomp_and_domain_for_single_column(scol_lon, scol_lat, scol_ call t_stopf ('decompInit_lnd') ! Initialize processor bounds - call get_proc_bounds(bounds) + call get_proc_bounds(bounds, only_gridcell=.true.) ! only_gridcell since decomp not fully initialized ! Initialize domain data structure call domain_init(domain=ldomain, isgrid2d=.false., ni=1, nj=1, nbeg=1, nend=1) @@ -469,6 +511,7 @@ subroutine lnd_set_lndmask_from_maskmesh(mesh_lnd, mesh_mask, vm, gsize, lndmask character(len=CL) :: flandfrac_status !------------------------------------------------------------------------------- + call t_startf('lnd_set_lndmask_from_maskmesh') rc = ESMF_SUCCESS flandfrac = './init_generated_files/ctsm_landfrac'//trim(inst_suffix)//'.nc' @@ -569,6 +612,7 @@ subroutine lnd_set_lndmask_from_maskmesh(mesh_lnd, mesh_mask, vm, gsize, lndmask deallocate(lndmask_loc) end if + call t_stopf('lnd_set_lndmask_from_maskmesh') end subroutine lnd_set_lndmask_from_maskmesh diff --git a/src/dyn_subgrid/test/dynInitColumns_test/test_init_columns.pf b/src/dyn_subgrid/test/dynInitColumns_test/test_init_columns.pf index ade2e6d955..79ec506f90 100644 --- a/src/dyn_subgrid/test/dynInitColumns_test/test_init_columns.pf +++ b/src/dyn_subgrid/test/dynInitColumns_test/test_init_columns.pf @@ -66,24 +66,24 @@ contains ! The first landunit is neither natural veg nor crop call unittest_add_landunit(my_gi=gi, ltype=istwet, wtgcell=0.25_r8) - call unittest_add_column(my_li=li, ctype=1, wtlunit=0.5_r8) - call unittest_add_column(my_li=li, ctype=1, wtlunit=0.5_r8) + call unittest_add_column(my_li=li, ctype=1, wtlunit=0.5_r8, add_simple_patch=.true.) + call unittest_add_column(my_li=li, ctype=1, wtlunit=0.5_r8, add_simple_patch=.true.) call unittest_add_landunit(my_gi=gi, ltype=1, wtgcell=0.5_r8) this%l1 = li - call unittest_add_column(my_li=li, ctype=1, wtlunit=0.25_r8) - call unittest_add_column(my_li=li, ctype=1, wtlunit=0.25_r8) + call unittest_add_column(my_li=li, ctype=1, wtlunit=0.25_r8, add_simple_patch=.true.) + call unittest_add_column(my_li=li, ctype=1, wtlunit=0.25_r8, add_simple_patch=.true.) ! This column (the second column on the landunit with ltype=1) will be the target for ! some tests of initialization of a new column this%c_new = ci - call unittest_add_column(my_li=li, ctype=1, wtlunit=0.25_r8) - call unittest_add_column(my_li=li, ctype=1, wtlunit=0.25_r8) + call unittest_add_column(my_li=li, ctype=1, wtlunit=0.25_r8, add_simple_patch=.true.) + call unittest_add_column(my_li=li, ctype=1, wtlunit=0.25_r8, add_simple_patch=.true.) call unittest_add_landunit(my_gi=gi, ltype=2, wtgcell=0.25_r8) this%l2 = li - call unittest_add_column(my_li=li, ctype=1, wtlunit=0.25_r8) - call unittest_add_column(my_li=li, ctype=1, wtlunit=0.25_r8) - call unittest_add_column(my_li=li, ctype=1, wtlunit=0.5_r8) + call unittest_add_column(my_li=li, ctype=1, wtlunit=0.25_r8, add_simple_patch=.true.) + call unittest_add_column(my_li=li, ctype=1, wtlunit=0.25_r8, add_simple_patch=.true.) + call unittest_add_column(my_li=li, ctype=1, wtlunit=0.5_r8, add_simple_patch=.true.) call unittest_subgrid_setup_end() diff --git a/src/main/abortutils.F90 b/src/main/abortutils.F90 index 8afa4ef195..dd51dbd303 100644 --- a/src/main/abortutils.F90 +++ b/src/main/abortutils.F90 @@ -108,7 +108,7 @@ subroutine write_point_context(subgrid_index, subgrid_level) ! use clm_varctl , only : iulog use decompMod , only : subgrid_level_gridcell, subgrid_level_landunit, subgrid_level_column, subgrid_level_patch - use decompMod , only : get_global_index + use decompMod , only : get_global_index, procinfo use GridcellType , only : grc use LandunitType , only : lun use ColumnType , only : col @@ -124,6 +124,7 @@ subroutine write_point_context(subgrid_index, subgrid_level) integer :: igrc=unset, ilun=unset, icol=unset, ipft=unset ! Local index for grid-cell, landunit, column, and patch integer :: ggrc=unset, glun=unset, gcol=unset, gpft=unset ! Global index for grid-cell, landunit, column, and patch logical :: bad_point = .false. ! Flag to indicate if the point is bad (i.e., global index is -1) + integer :: i, j ! 2D global gridcell indices !----------------------------------------------------------------------- if (subgrid_level == subgrid_level_gridcell) then @@ -201,17 +202,12 @@ subroutine write_point_context(subgrid_index, subgrid_level) write(iulog,'(a, i0, a, i0)') 'iam = ', iam, ': local gridcell index = ', igrc write(iulog,'(a, i0, a, i0)') 'iam = ', iam, ': global gridcell index = ', ggrc - write(iulog,'(a, i0, a, f12.7)') 'iam = ', iam, ': gridcell longitude = ', grc%londeg(igrc) - write(iulog,'(a, i0, a, f12.7)') 'iam = ', iam, ': gridcell latitude = ', grc%latdeg(igrc) else if (subgrid_level == subgrid_level_landunit) then write(iulog,'(a, i0, a, i0)') 'iam = ', iam, ': local landunit index = ', ilun write(iulog,'(a, i0, a, i0)') 'iam = ', iam, ': global landunit index = ', glun write(iulog,'(a, i0, a, i0)') 'iam = ', iam, ': global gridcell index = ', ggrc - write(iulog,'(a, i0, a, f12.7)') 'iam = ', iam, ': gridcell longitude = ', grc%londeg(igrc) - write(iulog,'(a, i0, a, f12.7)') 'iam = ', iam, ': gridcell latitude = ', grc%latdeg(igrc) - write(iulog,'(a, i0, a, i0)') 'iam = ', iam, ': landunit type = ', lun%itype(subgrid_index) else if (subgrid_level == subgrid_level_column) then @@ -219,10 +215,6 @@ subroutine write_point_context(subgrid_index, subgrid_level) write(iulog,'(a, i0, a, i0)') 'iam = ', iam, ': global column index = ', gcol write(iulog,'(a, i0, a, i0)') 'iam = ', iam, ': global landunit index = ', glun write(iulog,'(a, i0, a, i0)') 'iam = ', iam, ': global gridcell index = ', ggrc - write(iulog,'(a, i0, a, f12.7)') 'iam = ', iam, ': gridcell longitude = ', grc%londeg(igrc) - write(iulog,'(a, i0, a, f12.7)') 'iam = ', iam, ': gridcell latitude = ', grc%latdeg(igrc) - write(iulog,'(a, i0, a, i0)') 'iam = ', iam, ': column type = ', col%itype(icol) - write(iulog,'(a, i0, a, i0)') 'iam = ', iam, ': landunit type = ', lun%itype(ilun) else if (subgrid_level == subgrid_level_patch) then @@ -231,18 +223,45 @@ subroutine write_point_context(subgrid_index, subgrid_level) write(iulog,'(a, i0, a, i0)') 'iam = ', iam, ': global column index = ', gcol write(iulog,'(a, i0, a, i0)') 'iam = ', iam, ': global landunit index = ', glun write(iulog,'(a, i0, a, i0)') 'iam = ', iam, ': global gridcell index = ', ggrc - write(iulog,'(a, i0, a, f12.7)') 'iam = ', iam, ': gridcell longitude = ', grc%londeg(igrc) - write(iulog,'(a, i0, a, f12.7)') 'iam = ', iam, ': gridcell latitude = ', grc%latdeg(igrc) - write(iulog,'(a, i0, a, i0)') 'iam = ', iam, ': pft type = ', patch%itype(ipft) - write(iulog,'(a, i0, a, i0)') 'iam = ', iam, ': column type = ', col%itype(icol) - write(iulog,'(a, i0, a, i0)') 'iam = ', iam, ': landunit type = ', lun%itype(ilun) - else write(iulog,*) 'subgrid_level not supported: ', subgrid_level write(iulog,*) errMsg(sourcefile, __LINE__) write(iulog,*) 'Continuing the endrun without writing point context information' return end if + if ( subgrid_level >= subgrid_level_landunit )then + if ( ilun > 0 )then + write(iulog,'(a, i0, a, i0)') 'iam = ', iam, ': landunit type = ', lun%itype(ilun) + else + write(iulog,'(a)') 'WARNING: Trouble getting the landunit index' + end if + if ( subgrid_level >= subgrid_level_column )then + if ( icol > 0 )then + write(iulog,'(a, i0, a, i0)') 'iam = ', iam, ': column type = ', col%itype(icol) + else + write(iulog,'(a)') 'WARNING: Trouble getting the column index' + end if + if ( subgrid_level >= subgrid_level_patch )then + if ( ipft > 0 )then + write(iulog,'(a, i0, a, i0)') 'iam = ', iam, ': patch type = ', patch%itype(ipft) + else + write(iulog,'(a)') 'WARNING: Trouble getting the patch index' + end if + end if + end if + end if + if ( igrc > 0 ) then + write(iulog,'(a, i0, a, f12.7)') 'iam = ', iam, ': gridcell longitude = ', grc%londeg(igrc) + write(iulog,'(a, i0, a, f12.7)') 'iam = ', iam, ': gridcell latitude = ', grc%latdeg(igrc) + else + write(iulog,'(a)') 'WARNING: Trouble getting the gridcell index' + end if + call procinfo%calc_globalxy_indices( igrc, i, j ) + if ( (i /= -1) .and. (j /= -1) ) then + write(iulog,'(a, i0, a, i0)') 'iam = ', iam, ': 2D gridcell indices = (', i, ', ', j, ')' + else + write(iulog,'(a)') 'WARNING: Trouble getting the 2D gridcell indices' + end if end subroutine write_point_context diff --git a/src/main/clm_initializeMod.F90 b/src/main/clm_initializeMod.F90 index 4530fda860..8505276174 100644 --- a/src/main/clm_initializeMod.F90 +++ b/src/main/clm_initializeMod.F90 @@ -222,8 +222,8 @@ subroutine initialize2(ni,nj, currtime) !----------------------------------------------------------------------- call t_startf('clm_init2_part1') - ! Get processor bounds for gridcells - call get_proc_bounds(bounds_proc) + ! Get processor bounds for gridcells, just for gridcells + call get_proc_bounds(bounds_proc, only_gridcell=.true.) ! Just get proc bounds for gridcells, other variables won't be set until after decompInit_clumps begg = bounds_proc%begg; endg = bounds_proc%endg ! Initialize glc behavior @@ -283,7 +283,7 @@ subroutine initialize2(ni,nj, currtime) call t_stopf('clm_decompInit_clumps') ! *** Get ALL processor bounds - for gridcells, landunit, columns and patches *** - call get_proc_bounds(bounds_proc) + call get_proc_bounds(bounds_proc) ! This has to be done after decompInit_clumps is called ! Allocate memory for subgrid data structures ! This is needed here BEFORE the following call to initGridcells @@ -424,7 +424,9 @@ subroutine initialize2(ni,nj, currtime) call SnowAge_init( ) ! SNICAR aging parameters: ! Print history field info to standard out - call hist_printflds() + if ( .not. use_noio )then + call hist_printflds() + end if ! Initializate dynamic subgrid weights (for prescribed transient Patches, CNDV ! and/or dynamic landunits); note that these will be overwritten in a restart run diff --git a/src/main/decompInitMod.F90 b/src/main/decompInitMod.F90 index aa575bd787..166b58959b 100644 --- a/src/main/decompInitMod.F90 +++ b/src/main/decompInitMod.F90 @@ -1,9 +1,11 @@ -module decompInitMod + module decompInitMod + +#include "shr_assert.h" !------------------------------------------------------------------------------ ! !DESCRIPTION: - ! Module provides a descomposition into a clumped data structure which can - ! be mapped back to atmosphere physics chunks. + ! Module provides a decomposition into a clumped data structure for the land + ! model with gridcells assigned to clumps in a round-robin fashion to processors. ! ! !USES: use shr_kind_mod , only : r8 => shr_kind_r8 @@ -12,6 +14,7 @@ module decompInitMod use spmdMod , only : masterproc, iam, npes, mpicom use abortutils , only : endrun use clm_varctl , only : iulog + use perf_mod , only : t_startf, t_stopf ! implicit none private @@ -29,8 +32,7 @@ module decompInitMod integer, public :: clump_pproc ! number of clumps per MPI process ! ! !PRIVATE TYPES: - integer, pointer :: lcid(:) ! temporary for setting decomposition - integer :: nglob_x, nglob_y ! global sizes + integer, pointer :: lcid(:) ! temporary for setting decomposition, allocated set and used in decompInit_lnd, and used and deallocated in decompInit_clumps (Can make it allocatable) integer, parameter :: dbug=0 ! 0 = min, 1=normal, 2=much, 3=max character(len=*), parameter :: sourcefile = & __FILE__ @@ -52,6 +54,7 @@ subroutine decompInit_lnd(lni, lnj, amask) use clm_varctl , only : nsegspc use decompMod , only : gindex_global, nclumps, clumps use decompMod , only : bounds_type, get_proc_bounds, procinfo + use decompMod , only : nglob_x, nglob_y ! ! !ARGUMENTS: integer , intent(in) :: amask(:) @@ -69,9 +72,12 @@ subroutine decompInit_lnd(lni, lnj, amask) integer :: n,m,ng ! indices integer :: ier ! error code integer :: begg, endg ! beg and end gridcells - integer, pointer :: clumpcnt(:) ! clump index counter - integer, allocatable :: gdc2glo(:)! used to create gindex_global + !--------------------------------------------------------------------- type(bounds_type) :: bounds ! contains subgrid bounds data + !--------------------------------------------------------------------- + integer :: i, j, g, lc, cid_previous ! Indices + integer :: cell_id_offset ! The offset for the starting gridcell number for this processor + integer :: begcid, endcid ! Beginning and ending cid's for this processor !------------------------------------------------------------------------------ ! Set some global scalars: nclumps, numg and lns call decompInit_lnd_set_nclumps_numg_lns( ) @@ -83,58 +89,26 @@ subroutine decompInit_lnd(lni, lnj, amask) call decompInit_lnd_allocate( ier ) if (ier /= 0) return - ! Initialize procinfo and clumps - ! beg and end indices initialized for simple addition of cells later - - procinfo%nclumps = clump_pproc - procinfo%cid(:) = -1 - procinfo%ncells = 0 - procinfo%nlunits = 0 - procinfo%ncols = 0 - procinfo%npatches = 0 - procinfo%nCohorts = 0 - procinfo%begg = 1 - procinfo%begl = 1 - procinfo%begc = 1 - procinfo%begp = 1 - procinfo%begCohort = 1 - procinfo%endg = 0 - procinfo%endl = 0 - procinfo%endc = 0 - procinfo%endp = 0 - procinfo%endCohort = 0 - - clumps(:)%owner = -1 - clumps(:)%ncells = 0 - clumps(:)%nlunits = 0 - clumps(:)%ncols = 0 - clumps(:)%npatches = 0 - clumps(:)%nCohorts = 0 - clumps(:)%begg = 1 - clumps(:)%begl = 1 - clumps(:)%begc = 1 - clumps(:)%begp = 1 - clumps(:)%begCohort = 1 - clumps(:)%endg = 0 - clumps(:)%endl = 0 - clumps(:)%endc = 0 - clumps(:)%endp = 0 - clumps(:)%endCohort = 0 + ! Initialize clumps + + call clumps(:)%Init() ! assign clumps to proc round robin cid = 0 do n = 1,nclumps pid = mod(n-1,npes) if (pid < 0 .or. pid > npes-1) then - write(iulog,*) 'decompInit_lnd(): round robin pid error ',n,pid,npes - call endrun(msg=errMsg(sourcefile, __LINE__)) + write(iulog,*) 'Round robin pid error: n, pid, npes = ',n,pid,npes + call endrun(msg="Round robin pid error", file=sourcefile, line=__LINE__) + return endif - clumps(n)%owner = pid if (iam == pid) then + clumps(n)%owner = pid cid = cid + 1 if (cid < 1 .or. cid > clump_pproc) then - write(iulog,*) 'decompInit_lnd(): round robin pid error ',n,pid,npes - call endrun(msg=errMsg(sourcefile, __LINE__)) + write(iulog,*) 'round robin pid error ',n,pid,npes + call endrun(msg="round robin pid error", file=sourcefile, line=__LINE__) + return endif procinfo%cid(cid) = n endif @@ -171,77 +145,111 @@ subroutine decompInit_lnd(lni, lnj, amask) endif lcid(ln) = cid - !--- give gridcell cell to pe that owns cid --- - !--- this needs to be done to subsequently use function - !--- get_proc_bounds(begg,endg) + ! Get the total number of gridcells for the local processor if (iam == clumps(cid)%owner) then procinfo%ncells = procinfo%ncells + 1 endif - if (iam > clumps(cid)%owner) then - procinfo%begg = procinfo%begg + 1 - endif - if (iam >= clumps(cid)%owner) then - procinfo%endg = procinfo%endg + 1 - endif - !--- give gridcell to cid --- - !--- increment the beg and end indices --- - clumps(cid)%ncells = clumps(cid)%ncells + 1 - do m = 1,nclumps - if ((clumps(m)%owner > clumps(cid)%owner) .or. & - (clumps(m)%owner == clumps(cid)%owner .and. m > cid)) then - clumps(m)%begg = clumps(m)%begg + 1 - endif - - if ((clumps(m)%owner > clumps(cid)%owner) .or. & - (clumps(m)%owner == clumps(cid)%owner .and. m >= cid)) then - clumps(m)%endg = clumps(m)%endg + 1 - endif - enddo + !--- give gridcell to cid for local processor --- + if (iam == clumps(cid)%owner) then + clumps(cid)%ncells = clumps(cid)%ncells + 1 + end if end if enddo - ! clumpcnt is the ending gdc index of each clump - - ag = 0 - clumpcnt = 0 - ag = 1 - do pid = 0,npes-1 - do cid = 1,nclumps - if (clumps(cid)%owner == pid) then - clumpcnt(cid) = ag - ag = ag + clumps(cid)%ncells - endif - enddo - enddo - - ! now go through gridcells one at a time and increment clumpcnt - ! in order to set gdc2glo - - do aj = 1,lnj - do ai = 1,lni - an = (aj-1)*lni + ai - cid = lcid(an) - if (cid > 0) then - ag = clumpcnt(cid) - gdc2glo(ag) = an - clumpcnt(cid) = clumpcnt(cid) + 1 + !--------------------------------------------------------------------- + ! + ! Do an MPI_SCAN to get the starting index for each processor ---- + ! [Doing this both simplifies the code, reduces non-scalaable memory + ! and reduces execution time for loops that run over all gridcells + ! for each processor.] + ! (Doing the following few lines of code removed about 50 lines of complex code + ! as well as loops of size: ni*nj*nclumps, npes*nclumps, and ni*nj + ! that was being done on each processor) + !--------------------------------------------------------------------- + call MPI_SCAN(procinfo%ncells, cell_id_offset, 1, MPI_INTEGER, & + MPI_SUM, mpicom, ier) + if ( ier /= 0 )then + call endrun(msg='Error from MPI_SCAN', file=sourcefile, line=__LINE__) + end if + cell_id_offset = cell_id_offset + 1 + procinfo%begg = cell_id_offset - procinfo%ncells + procinfo%endg = cell_id_offset - 1 + ! ---- Set begg and endg each clump on this processor ---- + do lc = 1, clump_pproc + cid = procinfo%cid(lc) + clumps(cid)%ncells = clumps(cid)%ncells ! This line will be removed + if ( lc == 1 )then + clumps(cid)%begg = procinfo%begg + else + cid_previous = procinfo%cid(lc-1) + clumps(cid)%begg = clumps(cid_previous)%endg + 1 end if - end do + clumps(cid)%endg = clumps(cid)%begg + clumps(cid)%ncells - 1 + cid_previous = cid end do ! Initialize global gindex (non-compressed, includes ocean points) ! Note that gindex_global goes from (1:endg) - call get_proc_bounds(bounds) ! This has to be done after procinfo is finalized - call decompInit_lnd_gindex_global_allocate( bounds, ier ) ! This HAS to be done after prcoinfo is finalized + call get_proc_bounds(bounds, only_gridcell=.true.) ! This has to be done after procinfo is finalized + call decompInit_lnd_gindex_global_allocate( bounds, ier ) ! This HAS to be done after procinfo is finalized if (ier /= 0) return nglob_x = lni ! decompMod module variables nglob_y = lnj ! decompMod module variables + + !--------------------------------------------------------------------- + + ! Get the global vector index on the full grid for each local processors gridcell + g = procinfo%begg + do lc = 1, clump_pproc + do ln = 1,lns + if (amask(ln) == 1) then + cid = lcid(ln) + if ( cid > 0 )then + if (clumps(cid)%owner == iam) then + if ( procinfo%cid(lc) == cid ) then + if ( (g < procinfo%begg) .or. (g > procinfo%endg) )then + write(iulog,*) ' iam, g = ', iam, g + call endrun(msg='g out of bounds for MPI_SCAN', file=sourcefile, line=__LINE__) + end if + procinfo%ggidx(g) = ln + g = g + 1 + end if + end if + end if + end if + end do + end do + + ! ---- Get the global index for each gridcell and save the i,j incices for ach gridcell on this processor do n = procinfo%begg,procinfo%endg - gindex_global(n-procinfo%begg+1) = gdc2glo(n) - enddo + gindex_global(n-procinfo%begg+1) = procinfo%ggidx(n) ! Change this to gindex_global when ready + call procinfo%calc_globalxy_indices( n, lni, lnj, i, j ) + procinfo%gi(n) = i + procinfo%gj(n) = j + end do + + !--------------------------------------------------------------------- + ! General error checking that the decomposition data is setup correctly + !--------------------------------------------------------------------- + begcid = procinfo%cid(1) + endcid = procinfo%cid(clump_pproc) + call shr_assert(clumps(begcid)%begg == procinfo%begg, & + msg='decompInit_lnd(): clumps(begcid) begg does not match procinfo begg') + call shr_assert(clumps(endcid)%endg == procinfo%endg, & + msg='decompInit_lnd(): clumps(endcid) endg does not match procinfo endg') + call shr_assert(sum(clumps(procinfo%cid)%ncells) == procinfo%ncells, & + msg='decompInit_lnd(): sum of clumps ncells does not match procinfo ncells') + + do lc = 1, clump_pproc + cid = procinfo%cid(lc) + call shr_assert( (clumps(cid)%endg-clumps(cid)%begg+1) == clumps(cid)%ncells, & + msg='decompInit_lnd(): clumps(cid) endg-begg+1 does not match clumps ncells') + end do + call shr_assert( (procinfo%endg-procinfo%begg+1) == procinfo%ncells, & + msg='decompInit_lnd(): procinfo endg-begg+1 does not match procinfo ncells') call decompInit_lnd_clean() @@ -266,47 +274,32 @@ subroutine decompInit_lnd(lni, lnj, amask) subroutine decompInit_lnd_allocate( ier ) ! Allocate the temporary and long term variables set and used in decompInit_lnd integer, intent(out) :: ier ! error code + !------------------------------------------------------------------------------ ! ! Long-term allocation: ! Arrays from decompMod are allocated here - ! TODO: This should move to a method in decompMod - ! as should the deallocates ! ! Temporary allocation: ! Allocate some temporaries used only in decompInit_lnd ! ! NOTE: nclumps, numg, and lns must be set before calling this routine! ! So decompInit_lnd_set_nclumps_numg_lns must be called first + !------------------------------------------------------------------------------ + !------------------------------------------------------------------------------ ! Allocate the longer term decompMod data - allocate(procinfo%cid(clump_pproc), stat=ier) - if (ier /= 0) then - call endrun(msg='allocation error for procinfo%cid', file=sourcefile, line=__LINE__) - return - endif + !------------------------------------------------------------------------------ + call procinfo%InitAllocate( clump_pproc ) - if ( nclumps < 1 )then - call endrun(msg="nclumps is NOT set before allocation", file=sourcefile, line=__LINE__) - return - end if - allocate(clumps(nclumps), stat=ier) - if (ier /= 0) then - write(iulog,*) 'allocation error for clumps: nclumps, ier=', nclumps, ier - call endrun(msg='allocation error for clumps', file=sourcefile, line=__LINE__) - return - end if + call decompmod_allocate_clumps( ) + !------------------------------------------------------------- + ! Temporary arrays that are just used in decompInit_lnd + !------------------------------------------------------------- if ( numg < 1 )then call endrun(msg="numg is NOT set before allocation", file=sourcefile, line=__LINE__) return end if - allocate(gdc2glo(numg), stat=ier) - if (ier /= 0) then - call endrun(msg="allocation error for gdc2glo", file=sourcefile, line=__LINE__) - return - end if - - ! Temporary arrays that are just used in decompInit_lnd if ( lns < 1 )then call endrun(msg="lns is NOT set before allocation", file=sourcefile, line=__LINE__) return @@ -316,11 +309,6 @@ subroutine decompInit_lnd_allocate( ier ) call endrun(msg="allocation error for lcid", file=sourcefile, line=__LINE__) return end if - allocate(clumpcnt(nclumps),stat=ier) - if (ier /= 0) then - call endrun(msg="allocation error for clumpcnt", file=sourcefile, line=__LINE__) - return - end if end subroutine decompInit_lnd_allocate @@ -332,25 +320,16 @@ subroutine decompInit_lnd_gindex_global_allocate( bounds, ier ) type(bounds_type), intent(in) :: bounds ! contains subgrid bounds data ier = 0 - if ( bounds%endg < 1 )then - ier = 1 - call endrun(msg="endg is NOT set before allocation", file=sourcefile, line=__LINE__) - return - end if - allocate(gindex_global(1:bounds%endg), stat=ier) - if (ier /= 0) then - call endrun(msg="allocation error for gindex_global", file=sourcefile, line=__LINE__) - return - end if + call decompmod_allocate_gindex( bounds%endg ) + call procinfo%AllocateAfterGCellSet() end subroutine decompInit_lnd_gindex_global_allocate !------------------------------------------------------------------------------ subroutine decompInit_lnd_clean() - ! Deallocate the temporary variables used in decompInit_lnd - deallocate(clumpcnt) - deallocate(gdc2glo) - !deallocate(lcid) + ! Currently there isn't any memory to clean up here + !--- NOTE: Can only deallocate lcid after decompInit_clumps ---- + ! TODO: Move the deallocate for lcid to here, after decompInit_clumps only calculates the local taskj end subroutine decompInit_lnd_clean !------------------------------------------------------------------------------ @@ -468,8 +447,9 @@ subroutine decompInit_clumps(lni,lnj,glc_behavior) character(len=32), parameter :: subname = 'decompInit_clumps' !------------------------------------------------------------------------------ + call t_startf('decompInit_clumps') !--- assign gridcells to clumps (and thus pes) --- - call get_proc_bounds(bounds) + call get_proc_bounds(bounds, only_gridcell=.true.) begg = bounds%begg; endg = bounds%endg allocate(allvecl(nclumps,5)) ! local clumps [gcells,lunit,cols,patches,coh] @@ -574,25 +554,53 @@ subroutine decompInit_clumps(lni,lnj,glc_behavior) enddo do n = 1,nclumps + ! Only do the error checking over the local processor + if (clumps(n)%owner == iam) then if (clumps(n)%ncells /= allvecg(n,1) .or. & clumps(n)%nlunits /= allvecg(n,2) .or. & clumps(n)%ncols /= allvecg(n,3) .or. & clumps(n)%npatches /= allvecg(n,4) .or. & clumps(n)%nCohorts /= allvecg(n,5)) then - write(iulog ,*) 'decompInit_glcp(): allvecg error ncells ',iam,n,clumps(n)%ncells ,allvecg(n,1) - write(iulog ,*) 'decompInit_glcp(): allvecg error lunits ',iam,n,clumps(n)%nlunits ,allvecg(n,2) - write(iulog ,*) 'decompInit_glcp(): allvecg error ncols ',iam,n,clumps(n)%ncols ,allvecg(n,3) - write(iulog ,*) 'decompInit_glcp(): allvecg error patches',iam,n,clumps(n)%npatches ,allvecg(n,4) - write(iulog ,*) 'decompInit_glcp(): allvecg error cohorts',iam,n,clumps(n)%nCohorts ,allvecg(n,5) + write(iulog ,*) 'allvecg error: iam,n ',iam,n + write(iulog ,*) 'allvecg error ncells,allvecg ',iam,n,clumps(n)%ncells ,allvecg(n,1) + write(iulog ,*) 'allvecg error lunits,allvecg ',iam,n,clumps(n)%nlunits ,allvecg(n,2) + write(iulog ,*) 'allvecg error ncols,allvecg ',iam,n,clumps(n)%ncols ,allvecg(n,3) + write(iulog ,*) 'allvecg error patches,allvecg',iam,n,clumps(n)%npatches ,allvecg(n,4) + write(iulog ,*) 'allvecg error cohorts,allvecg',iam,n,clumps(n)%nCohorts ,allvecg(n,5) - call endrun(msg=errMsg(sourcefile, __LINE__)) + call endrun(msg="allvecg error cohorts", file=sourcefile, line=__LINE__) + return + endif endif enddo deallocate(allvecg,allvecl) deallocate(lcid) + ! ------ Reset the clump type array for all non-local cid's to -1 to show it can be made smaller + ! TODO: Remove this when https://github.com/ESCOMP/CTSM/issues/3466 is done + do cid = 1, nclumps + if (clumps(cid)%owner /= iam) then + clumps(cid)%owner = -1 + clumps(cid)%ncells = -1 + clumps(cid)%nlunits = -1 + clumps(cid)%ncols = -1 + clumps(cid)%npatches = -1 + clumps(cid)%nCohorts = -1 + clumps(cid)%begg = -1 + clumps(cid)%begl = -1 + clumps(cid)%begc = -1 + clumps(cid)%begp = -1 + clumps(cid)%begCohort = -1 + clumps(cid)%endg = -1 + clumps(cid)%endl = -1 + clumps(cid)%endc = -1 + clumps(cid)%endp = -1 + clumps(cid)%endCohort = -1 + end if + end do + ! Diagnostic output call get_proc_global(ng=numg, nl=numl, nc=numc, np=nump, nCohorts=numCohort) @@ -685,6 +693,7 @@ subroutine decompInit_clumps(lni,lnj,glc_behavior) call shr_sys_flush(iulog) call mpi_barrier(mpicom,ier) end do + call t_stopf('decompInit_clumps') end subroutine decompInit_clumps @@ -744,6 +753,8 @@ subroutine decompInit_glcp(lni,lnj,glc_behavior) integer :: gsize Character(len=32), parameter :: subname = 'decompInit_glcp' !------------------------------------------------------------------------------ + call t_startf('decompInit_glcp') + ! Get processor bounds call get_proc_bounds(bounds) @@ -969,6 +980,8 @@ subroutine decompInit_glcp(lni,lnj,glc_behavior) deallocate(start_global) if (allocated(index_lndgridcells)) deallocate(index_lndgridcells) + call t_stopf('decompInit_glcp') + end subroutine decompInit_glcp end module decompInitMod diff --git a/src/main/decompMod.F90 b/src/main/decompMod.F90 index adf85fa5b7..b3206997c0 100644 --- a/src/main/decompMod.F90 +++ b/src/main/decompMod.F90 @@ -9,7 +9,9 @@ module decompMod use shr_kind_mod, only : r8 => shr_kind_r8 use shr_sys_mod , only : shr_sys_abort ! use shr_sys_abort instead of endrun here to avoid circular dependency + use shr_abort_mod , only : shr_abort_abort ! as above use clm_varctl , only : iulog + use clm_varctl , only : use_fates ! ! !PUBLIC TYPES: implicit none @@ -46,6 +48,8 @@ module decompMod public :: get_subgrid_level_from_name ! Given a name like nameg, return a subgrid level index like subgrid_level_gridcell public :: get_subgrid_level_gsize ! get global size associated with subgrid_level public :: get_subgrid_level_gindex ! get global index array associated with subgrid_level + public :: decompmod_allocate_clumps ! Allocate clumps array + public :: decompmod_allocate_gindex ! Allocate the global index arrays based on the input endg for this processor public :: decompmod_clean ! Deallocate memory used by decompMod ! !PRIVATE MEMBER FUNCTIONS: @@ -67,7 +71,10 @@ module decompMod !---global information on each pe type processor_type integer :: nclumps ! number of clumps for processor_type iam - integer,pointer :: cid(:) ! clump indices + integer,pointer :: cid(:) => null() ! clump indices + integer,pointer :: ggidx(:) => null() ! global vector index on the full 2D grid + integer,pointer :: gi(:) => null() ! global index on the full 2D grid in "x" (longitude for structured) + integer,pointer :: gj(:) => null() ! global index on the full 2D grid in "y" (latitudef or structured, 1 for unstructured) integer :: ncells ! number of gridcells in proc integer :: nlunits ! number of landunits in proc integer :: ncols ! number of columns in proc @@ -78,6 +85,11 @@ module decompMod integer :: begc, endc ! beginning and ending column index integer :: begp, endp ! beginning and ending patch index integer :: begCohort, endCohort ! beginning and ending cohort indices + contains + procedure, public :: InitAllocate ! Allocate memory for processor_type arrays based on nclumps and proc totals + procedure, public :: AllocateAfterGCellSet ! Allocate memory for rest of the processor_type arrays after the gridcell bounds are figured out + procedure, public :: calc_global_index_fromij ! Get the global index for the input grid i/j index on this processor + procedure, public :: calc_globalxy_indices ! Get the global i/j indices from the global vector grid index end type processor_type public processor_type type(processor_type),public :: procinfo @@ -95,11 +107,14 @@ module decompMod integer :: begc, endc ! beginning and ending column index integer :: begp, endp ! beginning and ending patch index integer :: begCohort, endCohort ! beginning and ending cohort indices + contains + procedure, public, pass(this) :: Init ! Initialize the clump values end type clump_type public clump_type type(clump_type),public, allocatable :: clumps(:) ! ---global sizes + integer,public :: nglob_x = -1, nglob_y = -1 ! global sizes on the full 2D grid integer,public :: nclumps ! total number of clumps across all processors integer,public :: numg ! total number of gridcells on all procs integer,public :: numl ! total number of landunits on all procs @@ -114,6 +129,9 @@ module decompMod integer, public, pointer :: gindex_col(:) => null() integer, public, pointer :: gindex_patch(:) => null() integer, public, pointer :: gindex_cohort(:) => null() + + ! --- Only public for unit testing + public :: calc_ijindices_from_full_global_index !------------------------------------------------------------------------------ character(len=*), parameter, private :: sourcefile = & @@ -121,6 +139,179 @@ module decompMod contains + !----------------------------------------------------------------------- + subroutine InitAllocate( this, clump_pproc ) + ! Allocate memory for processor_type arrays based on nclumps and proc totals + class(processor_type), intent(inout) :: this + integer, intent(in) :: clump_pproc ! number of clumps per processor + + integer :: ier ! error code + + allocate(this%cid(clump_pproc), stat=ier) + if (ier /= 0) then + call shr_abort_abort(string='allocation error for this%cid', file=sourcefile, line=__LINE__) + return + endif + ! Initialize the values to something: + ! beg and end indices initialized for simple addition of cells later + + procinfo%nclumps = clump_pproc + procinfo%cid(:) = -1 + procinfo%ncells = 0 + procinfo%nlunits = 0 + procinfo%ncols = 0 + procinfo%npatches = 0 + procinfo%nCohorts = 0 + procinfo%begg = 1 + procinfo%begl = 1 + procinfo%begc = 1 + procinfo%begp = 1 + procinfo%begCohort = 1 + procinfo%endg = 0 + procinfo%endl = 0 + procinfo%endc = 0 + procinfo%endp = 0 + procinfo%endCohort = 0 + end subroutine InitAllocate + + !----------------------------------------------------------------------- + subroutine AllocateAfterGCellSet( this ) + ! Allocate memory for processor_type arrays after teh gridcell sizes are set + class(processor_type), intent(inout) :: this + + integer :: ier ! error code + ! TODO: Remove the data, and only use the subroutine to calculate when needed + allocate(this%ggidx(this%begg:this%endg), stat=ier) + if (ier /= 0) then + call shr_abort_abort(string='allocation error for this%ggidx', file=sourcefile, line=__LINE__) + return + endif + this%ggidx(:) = -1 + allocate(this%gi(this%begg:this%endg), stat=ier) + if (ier /= 0) then + call shr_abort_abort(string='allocation error for this%gi', file=sourcefile, line=__LINE__) + return + endif + this%gi(:) = -1 + allocate(this%gj(this%begg:this%endg), stat=ier) + if (ier /= 0) then + call shr_abort_abort(string='allocation error for this%gj', file=sourcefile, line=__LINE__) + return + endif + this%gj(:) = -1 + + end subroutine AllocateAfterGCellSet + + !----------------------------------------------------------------------- + pure function calc_global_index_fromij( this, g ) result(global_index) + ! Returns the full grid global vector index from the gridcell on this processor + ! Make this a pure function so it can be called from endrun + ! !ARGUMENTS: + class(processor_type), intent(in) :: this + integer, intent(in) :: g ! gridcell index on this processor + integer :: global_index ! function result, full vector index on the full global grid + + global_index = -1 + if ( .not. associated(this%gi) )then + !write(iulog,*) 'WARNING: gi is not allocated yet' + return + end if + if ( .not. associated(this%gj) )then + !write(iulog,*) 'WARNING: gj is not allocated yet' + return + end if + if ( (g < this%begg) .or. (g > this%endg) ) then + !write(iulog,*) 'WARNING: Input index g is out of bounds of this processor' + return + end if + if ( (nglob_x < 1) .or. (nglob_y < 1) ) then + !write(iulog,*) 'WARNING: Global gridsize nglob_x/nglob_y is not set' + return + end if + if ( (this%gi(g) < 1) .or. (this%gi(g) > nglob_x) ) then + !write(iulog,*) 'this%gi(g) = ', this%gi(g) + !write(iulog,*) 'WARNING: Global gi index is out of bounds' + return + end if + if ( (this%gj(g) < 1) .or. (this%gj(g) > nglob_x) ) then + !write(iulog,*) 'this%gj(g) = ', this%gj(g) + !write(iulog,*) 'WARNING: Global gj index is out of bounds' + return + end if + global_index = (this%gj(g)-1)*nglob_x + this%gi(g) + if ( (global_index < 1) .or. (global_index > nglob_x*nglob_y) ) then + !write(iulog,*) 'WARNING: global_index is out of bounds for this processor' + return + end if + + end function calc_global_index_fromij + + !----------------------------------------------------------------------- + pure subroutine calc_ijindices_from_full_global_index( g, i, j ) + ! Local private subroutine to calculate the full 2D grid i,j indices from the 1D global vector index + ! Make this a pure function so it can be called from endrun + integer, intent(in) :: g ! Input processor global full 2D vector index + integer, intent(out) :: i, j ! 2D indices in x and y on the full global 2D grid (j will be 1 for an unstructured grid) + + i = -1 + j = -1 + if ( (nglob_x < 1) .or. (nglob_y < 1) ) then + return + end if + if ( (g < 1) .or. (g > nglob_x*nglob_y) ) then + ! NOTE: Log output commented out so that the subroutine can be pure + !write(iulog,*) 'g, nglob_x, nglob_y = ', g, nglob_x, nglob_y + !write(iulog,*) 'WARNING: Input index g is out of bounds' + return + end if + j = floor( real(g, r8) / real(nglob_x, r8) ) + 1 + if ( mod(g,nglob_x) == 0 ) j = j - 1 + i = g - (j-1)*nglob_x + if ( (i < 1) .or. (i > nglob_x) ) then + ! NOTE: Log output commented out so that the subroutine can be pure + !write(iulog,*) 'WARNING: Computed global i value out of range' + return + end if + if ( (j < 1) .or. (j > nglob_y) ) then + ! NOTE: Log output commented out so that the subroutine can be pure + !write(iulog,*) 'WARNING: Computed global j value out of range' + return + end if + end subroutine calc_ijindices_from_full_global_index + + !----------------------------------------------------------------------- + pure subroutine calc_globalxy_indices( this, g, i, j ) + ! Get the global i/j indices from the global vector grid index + ! Make this a pure function so it can be called from endrun + ! !ARGUMENTS: + class(processor_type), intent(in) :: this + integer, intent(in) :: g ! gridcell index on this processor + integer, intent(out) :: i, j ! 2D indices in x and y on the full global 2D grid (j will be 1 for an unstructured grid) + + integer :: global_index + + i = -1 + j = -1 + if ( .not. associated(this%ggidx) )then + ! NOTE: Log output commented out so that the subroutine can be pure + !write(iulog,*) 'WARNING: ggidx is not allocated yet' + return + end if + if ( (g < this%begg) .or. (g > this%endg) ) then + ! NOTE: Log output commented out so that the subroutine can be pure + !write(iulog,*) 'WARNING: Input index g is out of bounds of this processor' + return + end if + if ( (nglob_x < 1) .or. (nglob_y < 1) ) then + ! NOTE: Log output commented out so that the subroutine can be pure + !write(iulog,*) 'WARNING: Global gridsize nglob_x/nglob_y is not set' + return + end if + global_index = this%ggidx(g) + call calc_ijindices_from_full_global_index( global_index, i, j ) + + end subroutine calc_globalxy_indices + !----------------------------------------------------------------------- pure function get_beg(bounds, subgrid_level) result(beg_index) ! @@ -141,8 +332,6 @@ pure function get_beg(bounds, subgrid_level) result(beg_index) integer, intent(in) :: subgrid_level ! ! !LOCAL VARIABLES: - - character(len=*), parameter :: subname = 'get_beg' !----------------------------------------------------------------------- select case (subgrid_level) @@ -182,7 +371,6 @@ pure function get_end(bounds, subgrid_level) result(end_index) integer , intent(in) :: subgrid_level ! ! !LOCAL VARIABLES: - character(len=*), parameter :: subname = 'get_end' !----------------------------------------------------------------------- select case (subgrid_level) @@ -224,8 +412,18 @@ subroutine get_clump_bounds (n, bounds) #ifdef _OPENMP if ( OMP_GET_NUM_THREADS() == 1 .and. OMP_GET_MAX_THREADS() > 1 )then call shr_sys_abort( trim(subname)//' ERROR: Calling from inside a non-threaded region)') + return end if #endif + if ( .not. associated(procinfo%cid) )then + call shr_sys_abort( 'procinfo%cid) is NOT allocated yet', file=sourcefile, line=__LINE__) + return + end if + if ( n < 1 .or. n > procinfo%nclumps )then + write(iulog,*) 'Input clump index out of bounds: n = ', n + call shr_sys_abort( 'Input clump is out of bounds', file=sourcefile, line=__LINE__) + return + end if cid = procinfo%cid(n) bounds%begp = clumps(cid)%begp - procinfo%begp + 1 @@ -236,8 +434,34 @@ subroutine get_clump_bounds (n, bounds) bounds%endl = clumps(cid)%endl - procinfo%begl + 1 bounds%begg = clumps(cid)%begg - procinfo%begg + 1 bounds%endg = clumps(cid)%endg - procinfo%begg + 1 - bounds%begCohort = clumps(cid)%begCohort - procinfo%begCohort + 1 - bounds%endCohort = clumps(cid)%endCohort - procinfo%begCohort + 1 + if ( use_fates )then + bounds%begCohort = clumps(cid)%begCohort - procinfo%begCohort + 1 + bounds%endCohort = clumps(cid)%endCohort - procinfo%begCohort + 1 + end if + + if ( bounds%endp <= 0 )then + call shr_sys_abort( 'bounds%endp is not valid', file=sourcefile, line=__LINE__) + return + end if + if ( bounds%endc <= 0 )then + call shr_sys_abort( 'bounds%endc is not valid', file=sourcefile, line=__LINE__) + return + end if + if ( bounds%endl <= 0 )then + call shr_sys_abort( 'bounds%endl is not valid', file=sourcefile, line=__LINE__) + return + end if + if ( bounds%endg <= 0 )then + call shr_sys_abort( 'bounds%endg is not valid', file=sourcefile, line=__LINE__) + return + end if + if ( use_fates )then + if ( bounds%endCohort <= 0 )then + write(iulog,*) 'endCohort = ', bounds%endCohort + call shr_sys_abort( 'bounds%endCohort is not valid', file=sourcefile, line=__LINE__) + return + end if + end if bounds%level = bounds_level_clump bounds%clump_index = n @@ -245,13 +469,14 @@ subroutine get_clump_bounds (n, bounds) end subroutine get_clump_bounds !------------------------------------------------------------------------------ - subroutine get_proc_bounds (bounds, allow_call_from_threaded_region) + subroutine get_proc_bounds (bounds, allow_call_from_threaded_region, only_gridcell) ! ! !DESCRIPTION: ! Retrieve processor bounds ! ! !ARGUMENTS: type(bounds_type), intent(out) :: bounds ! processor bounds bounds + logical, intent(in), optional :: only_gridcell ! Only return the gridcell bounds, other subgrid info assumed to not be set yet ! Normally this routine will abort if it is called from within a threaded region, ! because in most cases you should be calling get_clump_bounds in that situation. If @@ -275,6 +500,7 @@ subroutine get_proc_bounds (bounds, allow_call_from_threaded_region) #ifdef _OPENMP if ( OMP_GET_NUM_THREADS() > 1 .and. .not. l_allow_call_from_threaded_region )then call shr_sys_abort( trim(subname)//' ERROR: Calling from inside a threaded region') + return end if #endif @@ -286,12 +512,44 @@ subroutine get_proc_bounds (bounds, allow_call_from_threaded_region) bounds%endl = procinfo%endl - procinfo%begl + 1 bounds%begg = 1 bounds%endg = procinfo%endg - procinfo%begg + 1 - bounds%begCohort = 1 - bounds%endCohort = procinfo%endCohort - procinfo%begCohort + 1 + if ( use_fates )then + bounds%begCohort = 1 + bounds%endCohort = procinfo%endCohort - procinfo%begCohort + 1 + end if bounds%level = bounds_level_proc bounds%clump_index = -1 ! irrelevant for proc, so assigned a bogus value + ! Some final error checking + ! Always check that gridcells are set + if ( bounds%endg <= 0 )then + call shr_sys_abort( 'bounds%endg is not valid', file=sourcefile, line=__LINE__) + return + end if + + ! Exit before checking subgrid levels if only_gridcell is requested as these won't be set yet + if ( present(only_gridcell) ) then + if ( only_gridcell ) return + end if + if ( bounds%endp <= 0 )then + call shr_sys_abort( 'bounds%endp is not valid', file=sourcefile, line=__LINE__) + return + end if + if ( bounds%endc <= 0 )then + call shr_sys_abort( 'bounds%endc is not valid', file=sourcefile, line=__LINE__) + return + end if + if ( bounds%endl <= 0 )then + call shr_sys_abort( 'bounds%endl is not valid', file=sourcefile, line=__LINE__) + return + end if + if ( use_fates )then + if ( bounds%endCohort <= 0 )then + call shr_sys_abort( 'bounds%endCohort is not valid', file=sourcefile, line=__LINE__) + return + end if + end if + end subroutine get_proc_bounds !------------------------------------------------------------------------------ @@ -381,7 +639,7 @@ integer function get_global_index(subgrid_index, subgrid_level, donot_abort_on_b integer :: beg_index ! beginning proc index for subgrid_level integer :: end_index ! ending proc index for subgrid_level integer :: index ! index of the point to get - integer, pointer :: gindex(:) + integer, pointer :: gindex(:) => null() logical :: abort_on_badindex = .true. !---------------------------------------------------------------- @@ -445,7 +703,7 @@ function get_global_index_array(subgrid_index, bounds1, bounds2, subgrid_level) type(bounds_type) :: bounds_proc ! processor bounds integer :: beg_index ! beginning proc index for subgrid_level integer :: i - integer , pointer :: gindex(:) + integer , pointer :: gindex(:) => null() !---------------------------------------------------------------- SHR_ASSERT_ALL_FL((ubound(subgrid_index) == (/bounds2/)), sourcefile, __LINE__) @@ -479,7 +737,6 @@ function get_subgrid_level_from_name(subgrid_level_name) result(subgrid_level) ! ! !LOCAL VARIABLES: - character(len=*), parameter :: subname = 'get_subgrid_level_from_name' !----------------------------------------------------------------------- select case (subgrid_level_name) @@ -495,9 +752,13 @@ function get_subgrid_level_from_name(subgrid_level_name) result(subgrid_level) subgrid_level = subgrid_level_patch case(nameCohort) subgrid_level = subgrid_level_cohort + if ( .not. use_fates ) then + write(iulog,*) 'FATES is not enabled, so cohort level is not valid' + call shr_sys_abort(file=sourcefile, line=__LINE__ ) + end if case default - write(iulog,*) subname//': unknown subgrid_level_name: ', trim(subgrid_level_name) - call shr_sys_abort() + write(iulog,*) 'unknown subgrid_level_name: ', trim(subgrid_level_name) + call shr_sys_abort(file=sourcefile, line=__LINE__ ) end select end function get_subgrid_level_from_name @@ -529,9 +790,13 @@ integer function get_subgrid_level_gsize (subgrid_level) get_subgrid_level_gsize = nump case(subgrid_level_cohort) get_subgrid_level_gsize = numCohort + if ( .not. use_fates ) then + write(iulog,*) 'FATES is not enabled, so cohort level is not valid' + call shr_sys_abort(file=sourcefile, line=__LINE__ ) + end if case default - write(iulog,*) 'get_subgrid_level_gsize: unknown subgrid_level: ', subgrid_level - call shr_sys_abort() + write(iulog,*) 'unknown subgrid_level: ', subgrid_level + call shr_sys_abort(file=sourcefile, line=__LINE__ ) end select end function get_subgrid_level_gsize @@ -547,6 +812,7 @@ subroutine get_subgrid_level_gindex (subgrid_level, gindex) integer , pointer :: gindex(:) !---------------------------------------------------------------------- + gindex => null() ! Make sure gindex is initiatled to null select case (subgrid_level) case(subgrid_level_lndgrid) gindex => gindex_global @@ -560,13 +826,79 @@ subroutine get_subgrid_level_gindex (subgrid_level, gindex) gindex => gindex_patch case(subgrid_level_cohort) gindex => gindex_cohort + if ( .not. use_fates ) then + write(iulog,*) 'FATES is not enabled, so cohort level is not valid' + call shr_sys_abort( file=sourcefile, line=__LINE__ ) + end if case default - write(iulog,*) 'get_subgrid_level_gindex: unknown subgrid_level: ', subgrid_level - call shr_sys_abort('bad subgrid_level') + write(iulog,*) 'unknown subgrid_level: ', subgrid_level + call shr_sys_abort('bad subgrid_level', file=sourcefile, line=__LINE__) end select end subroutine get_subgrid_level_gindex + !----------------------------------------------------------------------- + subroutine decompmod_allocate_clumps() + ! Allocate the clumps array based on nclumps + integer :: ier ! error code + + if ( nclumps < 1 )then + call shr_abort_abort(string="nclumps is NOT set before allocation", file=sourcefile, line=__LINE__) + return + end if + ! TODO: Allocate only a smaller size (clump_pproc) + allocate(clumps(nclumps), stat=ier) + if (ier /= 0) then + write(iulog,*) 'allocation error for clumps: nclumps, ier=', nclumps, ier + call shr_abort_abort(string='allocation error for clumps', file=sourcefile, line=__LINE__) + return + end if + + end subroutine decompmod_allocate_clumps + + !----------------------------------------------------------------------- + elemental subroutine Init( this ) + ! Initialize a clump_type object to default values + class(clump_type), intent(inout) :: this + + this%owner = -1 + this%ncells = 0 + this%nlunits = 0 + this%ncols = 0 + this%npatches = 0 + this%nCohorts = 0 + this%begg = 1 + this%begl = 1 + this%begc = 1 + this%begp = 1 + this%begCohort = 1 + this%endg = 0 + this%endl = 0 + this%endc = 0 + this%endp = 0 + this%endCohort = 0 + + end subroutine Init + + !----------------------------------------------------------------------- + subroutine decompmod_allocate_gindex( endg ) + ! Allocate the gindex_global array based on input endg + integer, intent(in) :: endg + integer :: ier ! error code + + if ( endg < 1 )then + ier = 1 + call shr_abort_abort(string="endg is NOT set before allocation", file=sourcefile, line=__LINE__) + return + end if + allocate(gindex_global(1:endg), stat=ier) + if (ier /= 0) then + call shr_abort_abort(string="allocation error for gindex_global", file=sourcefile, line=__LINE__) + return + end if + + end subroutine decompmod_allocate_gindex + !----------------------------------------------------------------------- subroutine decompmod_clean() ! Deallocate the decompMod long-term variables created in decompInit_lnd @@ -583,6 +915,18 @@ subroutine decompmod_clean() if ( allocated(clumps) )then deallocate(clumps) end if + if ( associated(procinfo%ggidx) )then + deallocate(procinfo%ggidx) + procinfo%ggidx => null() + end if + if ( associated(procinfo%gi) )then + deallocate(procinfo%gi) + procinfo%gi => null() + end if + if ( associated(procinfo%gj) )then + deallocate(procinfo%gj) + procinfo%gj => null() + end if if ( associated(procinfo%cid) )then deallocate(procinfo%cid) procinfo%cid => null() diff --git a/src/main/initSubgridMod.F90 b/src/main/initSubgridMod.F90 index be30dae009..fcff7d202c 100644 --- a/src/main/initSubgridMod.F90 +++ b/src/main/initSubgridMod.F90 @@ -93,6 +93,7 @@ subroutine clm_ptrs_compdown(bounds) if (curc < bounds%begc .or. curc > bounds%endc) then write(iulog,*) 'clm_ptrs_compdown ERROR: pcolumn ',p,curc,bounds%begc,bounds%endc call endrun(subgrid_index=p, subgrid_level=subgrid_level_patch, msg=errMsg(sourcefile, __LINE__)) + return endif col%patchi(curc) = p endif @@ -103,6 +104,7 @@ subroutine clm_ptrs_compdown(bounds) if (curl < bounds%begl .or. curl > bounds%endl) then write(iulog,*) 'clm_ptrs_compdown ERROR: plandunit ',p,curl,bounds%begl,bounds%endl call endrun(subgrid_index=p, subgrid_level=subgrid_level_patch, msg=errMsg(sourcefile, __LINE__)) + return endif lun%patchi(curl) = p endif @@ -117,6 +119,7 @@ subroutine clm_ptrs_compdown(bounds) if (curl < bounds%begl .or. curl > bounds%endl) then write(iulog,*) 'clm_ptrs_compdown ERROR: clandunit ',c,curl,bounds%begl,bounds%endl call endrun(subgrid_index=c, subgrid_level=subgrid_level_column, msg=errMsg(sourcefile, __LINE__)) + return endif lun%coli(curl) = c endif @@ -133,6 +136,7 @@ subroutine clm_ptrs_compdown(bounds) if (curg < bounds%begg .or. curg > bounds%endg) then write(iulog,*) 'clm_ptrs_compdown ERROR: landunit_indices ', l,curg,bounds%begg,bounds%endg call endrun(subgrid_index=l, subgrid_level=subgrid_level_landunit, msg=errMsg(sourcefile, __LINE__)) + return end if if (grc%landunit_indices(ltype, curg) == ispval) then @@ -141,6 +145,7 @@ subroutine clm_ptrs_compdown(bounds) write(iulog,*) 'clm_ptrs_compdown ERROR: This landunit type has already been set for this gridcell' write(iulog,*) 'l, ltype, curg = ', l, ltype, curg call endrun(subgrid_index=l, subgrid_level=subgrid_level_landunit, msg=errMsg(sourcefile, __LINE__)) + return end if end do diff --git a/src/main/test/abortutils_test/test_abortutils.pf b/src/main/test/abortutils_test/test_abortutils.pf index bf1f7babd2..6bb555f21b 100644 --- a/src/main/test/abortutils_test/test_abortutils.pf +++ b/src/main/test/abortutils_test/test_abortutils.pf @@ -17,6 +17,9 @@ module test_abortutils procedure :: tearDown end type TestAbortUtils + character(len=CL) :: msg = "test_message" + character(len=CL) :: add_msg = "additional_test_message" + contains ! ======================================================================== @@ -83,7 +86,6 @@ contains subroutine endrun_msg_vanilla_aborts(this) ! Test vanilla operation of endrun with a message sent in class(TestAbortUtils), intent(inout) :: this - character(len=CL) :: msg = "test_message" call endrun( msg = msg) @assertExceptionRaised(endrun_msg(msg)) @@ -94,8 +96,6 @@ contains subroutine endrun_addmsg_vanilla_aborts(this) ! Test vanilla operation of endrun with an additional message sent in class(TestAbortUtils), intent(inout) :: this - character(len=CL) :: msg = "test_message" - character(len=CL) :: add_msg = "additional_test_message" call endrun(msg=msg, additional_msg=add_msg) @assertExceptionRaised(endrun_msg(msg)) @@ -109,8 +109,6 @@ contains use decompMod, only : subgrid_level_landunit, subgrid_level_column, subgrid_level_patch use decompMod, only : subgrid_level_cohort class(TestAbortUtils), intent(inout) :: this - character(len=CL) :: msg = "test_message" - character(len=CL) :: add_msg = "additional_test_message" integer :: p = 1, l integer, parameter :: nlevel = 6 integer :: subgrid_lvl(nlevel) = (/ subgrid_level_lndgrid, subgrid_level_gridcell, & @@ -126,9 +124,31 @@ contains end subroutine endrun_addmsg_pt_context_aborts + @Test + subroutine endrun_pt_context_bad_pt_aborts(this) + ! Test pt_context with bad point operation of endrun works + use decompMod, only : subgrid_level_lndgrid, subgrid_level_gridcell + use decompMod, only : subgrid_level_landunit, subgrid_level_column, subgrid_level_patch + use decompMod, only : subgrid_level_cohort + class(TestAbortUtils), intent(inout) :: this + integer :: p = 2, l + integer, parameter :: nlevel = 6 + integer :: subgrid_lvl(nlevel) = (/ subgrid_level_lndgrid, subgrid_level_gridcell, & + subgrid_level_landunit, subgrid_level_column, subgrid_level_patch, & + subgrid_level_cohort /) + + ! Loop over all the subgrid level types + ! Skip the first one and the last one which are: lndgrid and cohort + do l = 2, nlevel-1 + call endrun(subgrid_index=p, subgrid_level=subgrid_lvl(l), msg=msg) + @assertExceptionRaised(endrun_msg(msg)) + end do + + end subroutine endrun_pt_context_bad_pt_aborts + @Test subroutine endrun_nomsg_pt_context_bad_pt_aborts(this) - ! Test pt_context operation of endrun with an additional message sent in + ! Test pt_context with bad point operation of endrun works, without a message use decompMod, only : subgrid_level_lndgrid, subgrid_level_gridcell use decompMod, only : subgrid_level_landunit, subgrid_level_column, subgrid_level_patch use decompMod, only : subgrid_level_cohort @@ -152,33 +172,63 @@ contains subroutine endrun_pt_context_lndgrid_aborts(this) use decompMod, only : subgrid_level_lndgrid class(TestAbortUtils), intent(inout) :: this - character(len=CL) :: msg = "test_message" integer :: p = 1 - ! NOTE: Also test without an additional msg call endrun(subgrid_index=p, subgrid_level=subgrid_level_lndgrid, msg=msg) @assertExceptionRaised(endrun_msg(msg)) end subroutine endrun_pt_context_lndgrid_aborts + @Test + subroutine endrun_nomsg_pt_context_lndgrid_aborts(this) + use decompMod, only : subgrid_level_lndgrid + class(TestAbortUtils), intent(inout) :: this + integer :: p = 1 + + call endrun(subgrid_index=p, subgrid_level=subgrid_level_lndgrid) + @assertExceptionRaised(endrun_msg('')) + + end subroutine endrun_nomsg_pt_context_lndgrid_aborts + + @Test + subroutine endrun_pt_context_cohort_aborts(this) + use decompMod, only : subgrid_level_cohort + class(TestAbortUtils), intent(inout) :: this + integer :: p = 1 + + call endrun(subgrid_index=p, subgrid_level=subgrid_level_cohort, msg=msg) + @assertExceptionRaised(endrun_msg(msg)) + + end subroutine endrun_pt_context_cohort_aborts + + @Test subroutine endrun_nomsg_pt_context_cohort_aborts(this) use decompMod, only : subgrid_level_cohort class(TestAbortUtils), intent(inout) :: this integer :: p = 1 - ! NOTE: Also test without either msg or additional msg call endrun(subgrid_index=p, subgrid_level=subgrid_level_cohort) @assertExceptionRaised(endrun_msg('')) end subroutine endrun_nomsg_pt_context_cohort_aborts + @Test + subroutine endrun_pt_context_unspec_aborts(this) + use decompMod, only : subgrid_level_unspecified + class(TestAbortUtils), intent(inout) :: this + integer :: p = 1 + + call endrun(subgrid_index=p, subgrid_level=subgrid_level_unspecified, msg=msg) + @assertExceptionRaised(endrun_msg(msg)) + + end subroutine endrun_pt_context_unspec_aborts + @Test subroutine endrun_nomsg_addmsg_pt_context_unspec_aborts(this) use decompMod, only : subgrid_level_unspecified class(TestAbortUtils), intent(inout) :: this integer :: p = 1 - character(len=CL) :: add_msg = "additional_test_message" ! NOTE: Don't use msg but do use additional_msg call endrun(subgrid_index=p, subgrid_level=subgrid_level_unspecified, additional_msg=add_msg) @@ -186,6 +236,17 @@ contains end subroutine endrun_nomsg_addmsg_pt_context_unspec_aborts + @Test + subroutine endrun_pt_context_badlvl_aborts(this) + use decompMod, only : subgrid_level_unspecified + class(TestAbortUtils), intent(inout) :: this + integer :: p = 1 + + call endrun(msg=msg, subgrid_index=p, subgrid_level=-9999) + @assertExceptionRaised(endrun_msg(msg)) + + end subroutine endrun_pt_context_badlvl_aborts + @Test subroutine endrun_nomsg_pt_context_badlvl_aborts(this) use decompMod, only : subgrid_level_unspecified diff --git a/src/main/test/accumul_test/test_accumul.pf b/src/main/test/accumul_test/test_accumul.pf index 423a0aea18..3da3d7b315 100644 --- a/src/main/test/accumul_test/test_accumul.pf +++ b/src/main/test/accumul_test/test_accumul.pf @@ -272,6 +272,7 @@ contains ! Verify expected = sum(values)/accum_period @assertEqual(expected, val_output, tolerance=tol) + call unittest_subgrid_teardown() end subroutine timeavg_basic @Test @@ -296,6 +297,7 @@ contains ! Verify expected = sum(values(2:4))/accum_period @assertEqual(expected, val_output, tolerance=tol) + call unittest_subgrid_teardown() end subroutine timeavg_reset1 @Test @@ -320,6 +322,7 @@ contains ! Verify expected = sum(values(3:5))/accum_period @assertEqual(expected, val_output, tolerance=tol) + call unittest_subgrid_teardown() end subroutine timeavg_reset2 @Test @@ -341,6 +344,7 @@ contains ! Verify @assertEqual(spval, val_output) + call unittest_subgrid_teardown() end subroutine timeavg_wrongTime @Test @@ -366,6 +370,7 @@ contains ! Verify expected = sum(values(accum_period+1:2*accum_period))/accum_period @assertEqual(expected, val_output, tolerance=tol) + call unittest_subgrid_teardown() end subroutine timeavg_onlyLatestPeriod @Test @@ -392,6 +397,7 @@ contains ! Verify expected = sum(values(accum_period+1:2*accum_period))/accum_period @assertEqual(expected, val_output, tolerance=tol) + call unittest_subgrid_teardown() end subroutine timeavg_onlyLatestPeriod_redundantReset @Test @@ -422,6 +428,7 @@ contains ! Verify expected = sum(values(5:6))/2._r8 @assertEqual(expected, val_output, tolerance=tol) + call unittest_subgrid_teardown() end subroutine timeavg_newlyActive @Test @@ -449,6 +456,7 @@ contains ! Verify @assertEqual(values(accum_period), val_output, tolerance=tol) + call unittest_subgrid_teardown() end subroutine timeavg_veryNewlyActive @Test @@ -481,6 +489,7 @@ contains ! Verify expected = (values(1) + values(2) + values(5) + values(6))/4._r8 @assertEqual(expected, val_output, tolerance=tol) + call unittest_subgrid_teardown() end subroutine timeavg_activeInactiveActive @Test @@ -516,6 +525,7 @@ contains expected_lev2 = sum(values_lev2(4:6))/accum_period @assertEqual(expected_lev1, val_output(1), tolerance=tol) @assertEqual(expected_lev2, val_output(2), tolerance=tol) + call unittest_subgrid_teardown() end subroutine timeavg_multiLevel ! ------------------------------------------------------------------------ @@ -542,6 +552,7 @@ contains ! Verify @assertEqual(my_value, val_output, tolerance=tol) + call unittest_subgrid_teardown() end subroutine runmean_oneStep @Test @@ -567,6 +578,7 @@ contains expected_ts2 = (expected_ts1 + values(2))/2._r8 expected_ts3 = (2._r8 * expected_ts2 + values(3)) / 3._r8 @assertEqual(expected_ts3, val_output, tolerance=tol) + call unittest_subgrid_teardown() end subroutine runmean_beforePeriod @Test @@ -594,6 +606,7 @@ contains expected_ts4 = (2._r8 * expected_ts3 + values(4)) / 3._r8 expected_ts5 = (2._r8 * expected_ts4 + values(5)) / 3._r8 @assertEqual(expected_ts5, val_output, tolerance=tol) + call unittest_subgrid_teardown() end subroutine runmean_afterPeriod @Test @@ -618,6 +631,7 @@ contains ! Verify expected_ts5 = (values(4) + values(5)) / 2._r8 @assertEqual(expected_ts5, val_output, tolerance=tol) + call unittest_subgrid_teardown() end subroutine runmean_afterPeriod_reset @Test @@ -642,6 +656,7 @@ contains ! Verify @assertEqual(values(5), val_output, tolerance=tol) + call unittest_subgrid_teardown() end subroutine runmean_afterPeriod_resetWhileInactive @Test @@ -671,6 +686,7 @@ contains expected_ts6 = (2._r8 * expected_ts5 + values(6)) / 3._r8 expected_ts7 = (2._r8 * expected_ts6 + values(7)) / 3._r8 @assertEqual(expected_ts7, val_output, tolerance=tol) + call unittest_subgrid_teardown() end subroutine runmean_newlyActive @Test @@ -703,6 +719,7 @@ contains expected_ts6 = (2._r8 * expected_ts5 + values(6)) / 3._r8 expected_ts7 = (2._r8 * expected_ts6 + values(7)) / 3._r8 @assertEqual(expected_ts7, val_output, tolerance=tol) + call unittest_subgrid_teardown() end subroutine runmean_activeInactiveActive ! ------------------------------------------------------------------------ @@ -730,6 +747,7 @@ contains ! Verify expected = sum(values) @assertEqual(expected, val_output, tolerance=tol) + call unittest_subgrid_teardown() end subroutine runaccum_basic @Test @@ -754,6 +772,7 @@ contains ! Verify expected = sum(values(4:5)) @assertEqual(expected, val_output, tolerance=tol) + call unittest_subgrid_teardown() end subroutine runaccum_reset @Test @@ -783,6 +802,7 @@ contains ! Verify expected = sum(values(4:5)) @assertEqual(expected, val_output, tolerance=tol) + call unittest_subgrid_teardown() end subroutine runaccum_newlyActive @Test @@ -815,6 +835,7 @@ contains ! Verify expected = values(1) + values(4) + values(5) @assertEqual(expected, val_output, tolerance=tol) + call unittest_subgrid_teardown() end subroutine runaccum_activeInactiveActive ! ------------------------------------------------------------------------ @@ -858,6 +879,7 @@ contains expected2 = sum(values2) @assertEqual(expected1, val_output1, tolerance=tol) @assertEqual(expected2, val_output2, tolerance=tol) + call unittest_subgrid_teardown() end subroutine multipleFields ! ------------------------------------------------------------------------ @@ -896,6 +918,7 @@ contains ! Verify reset_output_int = merge(1, 0, reset_output) @assertEqual(expected, reset_output_int) + call unittest_subgrid_teardown() end subroutine markreset_nopoints_nolevels @Test @@ -931,6 +954,7 @@ contains ! Verify reset_output_int = merge(1, 0, reset_output) @assertEqual(expected, reset_output_int) + call unittest_subgrid_teardown() end subroutine markreset_1point_nolevels @Test @@ -967,7 +991,8 @@ contains ! Verify reset_output_int = merge(1, 0, reset_output) @assertEqual(expected, reset_output_int) -end subroutine markreset_allpoints_1level + call unittest_subgrid_teardown() + end subroutine markreset_allpoints_1level @Test subroutine markreset_allpoints_alllevels(this) @@ -1003,6 +1028,7 @@ subroutine markreset_allpoints_alllevels(this) ! Verify reset_output_int = merge(1, 0, reset_output) @assertEqual(expected, reset_output_int) + call unittest_subgrid_teardown() end subroutine markreset_allpoints_alllevels diff --git a/src/main/test/decomp_test/test_decompMod.pf b/src/main/test/decomp_test/test_decompMod.pf index 8112e4c6ce..d5ed396f43 100644 --- a/src/main/test/decomp_test/test_decompMod.pf +++ b/src/main/test/decomp_test/test_decompMod.pf @@ -39,25 +39,11 @@ contains end subroutine tearDown subroutine create_simpleSingleDecomp(this) - use spmdMod, only : iam + use unittestSubgridMod, only : set_decomp_info class(TestDecompMod), intent(inout) :: this - integer :: clump_pproc - ! TODO: When decompMod has it's own allocate method that could be used here - nclumps = 1 - clump_pproc = nclumps - allocate(procinfo%cid(clump_pproc)) - allocate(clumps(nclumps)) - ! Set the procinfo and clumps values - ! TODO: Use initialization method when available (currently in decompInitMod) - procinfo%cid = 1 - procinfo%ncells = ni*nj - procinfo%begg = 1 - procinfo%endg = procinfo%ncells - procinfo%nclumps = nclumps - clumps(:)%owner = iam - clumps(:)%begg = 1 - clumps(:)%endg = procinfo%ncells + call set_decomp_info( ni=ni, nj=nj ) + call decompmod_allocate_gindex( ni*nj ) end subroutine create_simpleSingleDecomp ! ======================================================================== @@ -103,4 +89,101 @@ contains @assertEqual(bounds_proc%endg, bounds_clump%endg) end subroutine test_proc_clump_bounds_equal + @Test + subroutine test_calc_globalxy_indices(this) + class(TestDecompMod), intent(inout) :: this + + type(bounds_type) :: bounds + integer :: g + integer :: i, j + integer :: expect_i(ni*nj), expect_j(ni*nj) + + ! Expected indices is just the pattern on the simple 2D grid with no mask + g = 0 + do j = 1, nj + do i = 1, ni + g = g + 1 + expect_i(g) = i + expect_j(g) = j + end do + end do + call get_proc_bounds(bounds) + ! Make sure begg is 1, as that is assumed here + @assertEqual(bounds%begg, 1) + @assertEqual(bounds%endg, ni*nj) + do g = bounds%begg, bounds%endg + write(*,*) 'g = ', g + call procinfo%calc_globalxy_indices( g, i, j ) + @assertEqual(i, expect_i(g) ) + @assertEqual(j, expect_j(g) ) + end do + + end subroutine test_calc_globalxy_indices + + @Test + subroutine test_calc_globalxy_fails(this) + class(TestDecompMod), intent(inout) :: this + + integer :: g + integer :: i, j + + ! IF g is less than begg i and j should be undefined as -1 + g = 0 + call procinfo%calc_globalxy_indices( g, i, j ) + @assertEqual(i, -1) + @assertEqual(j, -1) + ! IF g is greater than endg i and j should be undefined as -1 + g = ni*nj + 1 + call procinfo%calc_globalxy_indices( g, i, j ) + @assertEqual(i, -1) + @assertEqual(j, -1) + ! If nglob_x is not set + nglob_x = -1 + call procinfo%calc_globalxy_indices( g, i, j ) + @assertEqual(i, -1) + @assertEqual(j, -1) + ! If nglob_y is not set + nglob_x = -1 + call procinfo%calc_globalxy_indices( g, i, j ) + @assertEqual(i, -1) + @assertEqual(j, -1) + + end subroutine test_calc_globalxy_fails + + @Test + subroutine test_calc_globalxy_fails_when_not_allocated(this) + class(TestDecompMod), intent(inout) :: this + + integer :: g + integer :: i, j + + call decompmod_clean() + g = 1 + call procinfo%calc_globalxy_indices( g, i, j ) + @assertEqual(i, -1) + @assertEqual(j, -1) + + end subroutine test_calc_globalxy_fails_when_not_allocated + + @Test + subroutine test_calc_ijindices_from_full_global_index_fails(this) + class(TestDecompMod), intent(inout) :: this + + integer :: gidx ! Global idnex + integer :: i, j + + ! IF gidx is less than 1 i and j should be undefined as -1 + gidx = 0 + call calc_ijindices_from_full_global_index( gidx, i, j ) + @assertEqual(i, -1) + @assertEqual(j, -1) + + ! IF gidx is greater than grid size i and j should be undefined as -1 + gidx = ni*nj + 1 + call calc_ijindices_from_full_global_index( gidx, i, j ) + @assertEqual(i, -1) + @assertEqual(j, -1) + + end subroutine test_calc_ijindices_from_full_global_index_fails + end module test_decompMod diff --git a/src/unit_test_shr/unittestSubgridMod.F90 b/src/unit_test_shr/unittestSubgridMod.F90 index 531f0b041e..c3531a253f 100644 --- a/src/unit_test_shr/unittestSubgridMod.F90 +++ b/src/unit_test_shr/unittestSubgridMod.F90 @@ -40,7 +40,6 @@ module unittestSubgridMod use shr_kind_mod , only : r8 => shr_kind_r8 use decompMod , only : bounds_type, procinfo, get_proc_bounds - use decompMod , only : gindex_grc, gindex_lun, gindex_col, gindex_patch use GridcellType , only : grc use LandunitType , only : lun use ColumnType , only : col @@ -63,6 +62,7 @@ module unittestSubgridMod public :: unittest_add_column ! add a column public :: unittest_add_patch ! add a patch public :: get_ltype_special ! get a landunit type corresponding to a special landunit + public :: set_decomp_info ! set up decomp info in decompMod ! bounds info, which can be passed to routines that need it ! Note that the end indices here (endg, endl, endc, endp) will be the final indices in @@ -168,25 +168,45 @@ subroutine unittest_subgrid_setup_end end subroutine unittest_subgrid_setup_end !----------------------------------------------------------------------- - subroutine set_decomp_info + subroutine set_decomp_info( ni, nj ) ! ! !DESCRIPTION: ! Set up decomp info in decompMod. ! ! We need to do this (in addition to just making sure that the bounds derived type ! object is set up correctly) for the sake of callers of get_proc_bounds. + ! NOTE: + ! TODO: Use decompMod/decompInitMod initialization methods instead of doing this by hand ! + ! USES: + use decompMod , only : gindex_grc, gindex_lun, gindex_col, gindex_patch, nglob_x, nglob_y + use decompMod , only : clumps, nclumps, decompmod_allocate_clumps, decompmod_allocate_gindex + use spmdMod , only : iam + ! !ARGUMENTS: + integer, intent(in), optional :: ni ! number of grid cells in the x direction; + integer, intent(in), optional :: nj ! number of grid cells in the y direction; ! !LOCAL VARIABLES: + integer :: g character(len=*), parameter :: subname = 'set_decomp_info' !----------------------------------------------------------------------- ! For now, not setting up clump info, because it isn't needed in any unit tests. We ! may have to fix this in the future. - procinfo%nclumps = 1 - allocate(procinfo%cid(procinfo%nclumps)) - procinfo%cid(:) = -1 - + nclumps = 1 + procinfo%nclumps = nclumps + call procinfo%InitAllocate( nclumps ) + procinfo%cid(:) = 1 + call decompmod_allocate_clumps() + clumps(:)%owner = iam + + if ( present(ni) .and. present(nj) ) then + gi = ni * nj + ! Assume one landunit per grid cell, one column per landunit and one patch per column + li = gi + ci = li + pi = ci + end if procinfo%begg = begg procinfo%endg = gi procinfo%begl = begl @@ -201,6 +221,17 @@ subroutine set_decomp_info procinfo%ncols = procinfo%endc - procinfo%begc + 1 procinfo%npatches = procinfo%endp - procinfo%begp + 1 + if ( present(ni) .and. present(nj) ) then + nglob_x = ni + nglob_y = nj + else + nglob_x = 1 + nglob_y = procinfo%ncells + end if + + call procinfo%AllocateAfterGCellSet() + call clumps(:)%Init() + ! Currently leaving cohort info unset because it isn't needed in any unit tests. We ! may have to fix this in the future. @@ -216,6 +247,19 @@ subroutine set_decomp_info gindex_col(:) = 0 gindex_patch(:) = 0 + do g = 1, procinfo%endg + procinfo%ggidx(g) = g + end do + ! Set clump to procinfo + clumps(1)%begg = procinfo%begg + clumps(1)%endg = procinfo%endg + clumps(1)%begl = procinfo%begl + clumps(1)%endl = procinfo%endl + clumps(1)%begc = procinfo%begc + clumps(1)%endc = procinfo%endc + clumps(1)%begp = procinfo%begp + clumps(1)%endp = procinfo%endp + end subroutine set_decomp_info !----------------------------------------------------------------------- @@ -238,7 +282,7 @@ subroutine create_bounds_object ! object (if other routines want a clump-level bounds). (For the sake of unit ! testing, proc-level and clump-level bounds objects can probably be the same except ! for bounds%level and bounds%clump_index.) - call get_proc_bounds(bounds) + call get_proc_bounds(bounds, only_gridcell=.true.) end subroutine create_bounds_object @@ -283,6 +327,7 @@ subroutine unittest_subgrid_teardown ! Do any teardown needed for the subgrid stuff ! ! !USES: + use decompMod, only: decompMod_clean ! ! !ARGUMENTS: ! @@ -299,6 +344,8 @@ subroutine unittest_subgrid_teardown call reset_nlevsno() + call decompmod_clean() + unittest_subgrid_needs_teardown = .false. end if @@ -357,11 +404,11 @@ subroutine unittest_add_landunit(my_gi, ltype, wtgcell) call add_landunit(li=li, gi=my_gi, ltype=ltype, wtgcell=wtgcell) lun%active(li) = .true. - + end subroutine unittest_add_landunit !----------------------------------------------------------------------- - subroutine unittest_add_column(my_li, ctype, wtlunit) + subroutine unittest_add_column(my_li, ctype, wtlunit, add_simple_patch) ! ! !DESCRIPTION: ! Add a column, and make it active. The index of the just-added column can be obtained @@ -377,11 +424,13 @@ subroutine unittest_add_column(my_li, ctype, wtlunit) ! ! !USES: use initSubgridMod, only : add_column + use pftconMod, only : noveg ! ! !ARGUMENTS: integer , intent(in) :: my_li ! landunit index on which this column should be placed integer , intent(in) :: ctype ! column type real(r8) , intent(in) :: wtlunit ! weight of the column relative to the land unit + logical , intent(in), optional :: add_simple_patch ! whether to add a simple baresoil patch under the column ! ! !LOCAL VARIABLES: @@ -390,6 +439,13 @@ subroutine unittest_add_column(my_li, ctype, wtlunit) call add_column(ci=ci, li=my_li, ctype=ctype, wtlunit=wtlunit) col%active(ci) = .true. + + if ( present(add_simple_patch) ) then + if (add_simple_patch) then + ! Add a simple baresoil patch to this column + call unittest_add_patch(my_ci=ci, ptype=noveg, wtcol=1.0_r8) + end if + end if end subroutine unittest_add_column