-
Notifications
You must be signed in to change notification settings - Fork 137
Expand file tree
/
Copy pathm_mpi_common.fpp
More file actions
1813 lines (1471 loc) · 72.2 KB
/
m_mpi_common.fpp
File metadata and controls
1813 lines (1471 loc) · 72.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#:include 'macros.fpp'
!> @brief The module serves as a proxy to the parameters and subroutines
!! available in the MPI implementation's MPI module. Specifically,
!! the purpose of the proxy is to harness basic MPI commands into
!! more complicated procedures as to accomplish the communication
!! goals for the simulation.
module m_mpi_common
#ifdef MFC_MPI
use mpi !< Message passing interface (MPI) module
#endif
use m_derived_types !< Definitions of the derived types
use m_global_parameters !< Definitions of the global parameters
use m_helper
use ieee_arithmetic
use m_nvtx
implicit none
integer, private :: v_size
$:GPU_DECLARE(create='[v_size]')
!! Generic flags used to identify and report MPI errors
real(wp), private, allocatable, dimension(:) :: buff_send !<
!! This variable is utilized to pack and send the buffer of the cell-average
!! primitive variables, for a single computational domain boundary at the
!! time, to the relevant neighboring processor.
real(wp), private, allocatable, dimension(:) :: buff_recv !<
!! buff_recv is utilized to receive and unpack the buffer of the cell-
!! average primitive variables, for a single computational domain boundary
!! at the time, from the relevant neighboring processor.
#ifndef __NVCOMPILER_GPU_UNIFIED_MEM
$:GPU_DECLARE(create='[buff_send, buff_recv]')
#endif
integer :: halo_size
$:GPU_DECLARE(create='[halo_size]')
contains
!> The computation of parameters, the allocation of memory,
!! the association of pointers and/or the execution of any
!! other procedures that are necessary to setup the module.
impure subroutine s_initialize_mpi_common_module
#ifdef MFC_MPI
! Allocating buff_send/recv and. Please note that for the sake of
! simplicity, both variables are provided sufficient storage to hold
! the largest buffer in the computational domain.
if (qbmm .and. .not. polytropic) then
v_size = sys_size + 2*nb*4
else
v_size = sys_size
end if
if (n > 0) then
if (p > 0) then
halo_size = nint(-1._wp + 1._wp*buff_size*(v_size)* &
& (m + 2*buff_size + 1)* &
& (n + 2*buff_size + 1)* &
& (p + 2*buff_size + 1)/ &
& (cells_bounds%mnp_min + 2*buff_size + 1))
else
halo_size = -1 + buff_size*(v_size)* &
& (cells_bounds%mn_max + 2*buff_size + 1)
end if
else
halo_size = -1 + buff_size*(v_size)
end if
$:GPU_UPDATE(device='[halo_size, v_size]')
#ifndef __NVCOMPILER_GPU_UNIFIED_MEM
@:ALLOCATE(buff_send(0:halo_size), buff_recv(0:halo_size))
#else
allocate (buff_send(0:halo_size), buff_recv(0:halo_size))
$:GPU_ENTER_DATA(create='[capture:buff_send]')
$:GPU_ENTER_DATA(create='[capture:buff_recv]')
#endif
#endif
end subroutine s_initialize_mpi_common_module
!> The subroutine initializes the MPI execution environment
!! and queries both the number of processors which will be
!! available for the job and the local processor rank.
impure subroutine s_mpi_initialize
#ifdef MFC_MPI
integer :: ierr !< Generic flag used to identify and report MPI errors
#endif
#ifndef MFC_MPI
! Serial run only has 1 processor
num_procs = 1
! Local processor rank is 0
proc_rank = 0
#else
! Initializing the MPI environment
call MPI_INIT(ierr)
! Checking whether the MPI environment has been properly initialized
if (ierr /= MPI_SUCCESS) then
print '(A)', 'Unable to initialize MPI environment. Exiting.'
call MPI_ABORT(MPI_COMM_WORLD, 1, ierr)
end if
! Querying the number of processors available for the job
call MPI_COMM_SIZE(MPI_COMM_WORLD, num_procs, ierr)
! Querying the rank of the local processor
call MPI_COMM_RANK(MPI_COMM_WORLD, proc_rank, ierr)
#endif
end subroutine s_mpi_initialize
!! @param q_cons_vf Conservative variables
!! @param ib_markers track if a cell is within the immersed boundary
!! @param levelset closest distance from every cell to the IB
!! @param levelset_norm normalized vector from every cell to the closest point to the IB
!! @param beta Eulerian void fraction from lagrangian bubbles
impure subroutine s_initialize_mpi_data(q_cons_vf, ib_markers, levelset, levelset_norm, beta)
type(scalar_field), dimension(sys_size), intent(in) :: q_cons_vf
type(integer_field), optional, intent(in) :: ib_markers
type(levelset_field), optional, intent(IN) :: levelset
type(levelset_norm_field), optional, intent(IN) :: levelset_norm
type(scalar_field), intent(in), optional :: beta
integer, dimension(num_dims) :: sizes_glb, sizes_loc
#ifndef MFC_POST_PROCESS
integer, dimension(1) :: airfoil_glb, airfoil_loc, airfoil_start
#endif
#ifdef MFC_MPI
! Generic loop iterator
integer :: i
#ifndef MFC_POST_PROCESS
integer :: j
#endif
integer :: ierr !< Generic flag used to identify and report MPI errors
!Altered system size for the lagrangian subgrid bubble model
integer :: alt_sys
if (present(beta)) then
alt_sys = sys_size + 1
else
alt_sys = sys_size
end if
do i = 1, sys_size
MPI_IO_DATA%var(i)%sf => q_cons_vf(i)%sf(0:m, 0:n, 0:p)
end do
if (present(beta)) then
MPI_IO_DATA%var(alt_sys)%sf => beta%sf(0:m, 0:n, 0:p)
end if
!Additional variables pb and mv for non-polytropic qbmm
#ifdef MFC_PRE_PROCESS
if (qbmm .and. .not. polytropic) then
do i = 1, nb
do j = 1, nnode
MPI_IO_DATA%var(sys_size + (i - 1)*nnode + j)%sf => pb%sf(0:m, 0:n, 0:p, j, i)
MPI_IO_DATA%var(sys_size + (i - 1)*nnode + j + nb*nnode)%sf => mv%sf(0:m, 0:n, 0:p, j, i)
end do
end do
end if
#endif
#ifdef MFC_SIMULATION
if (qbmm .and. .not. polytropic) then
do i = 1, nb
do j = 1, nnode
MPI_IO_DATA%var(sys_size + (i - 1)*nnode + j)%sf => pb_ts(1)%sf(0:m, 0:n, 0:p, j, i)
MPI_IO_DATA%var(sys_size + (i - 1)*nnode + j + nb*nnode)%sf => mv_ts(1)%sf(0:m, 0:n, 0:p, j, i)
end do
end do
end if
#endif
! Define global(g) and local(l) sizes for flow variables
sizes_glb(1) = m_glb + 1; sizes_loc(1) = m + 1
if (n > 0) then
sizes_glb(2) = n_glb + 1; sizes_loc(2) = n + 1
if (p > 0) then
sizes_glb(3) = p_glb + 1; sizes_loc(3) = p + 1
end if
end if
! Define the view for each variable
do i = 1, alt_sys
call MPI_TYPE_CREATE_SUBARRAY(num_dims, sizes_glb, sizes_loc, start_idx, &
MPI_ORDER_FORTRAN, mpi_p, MPI_IO_DATA%view(i), ierr)
call MPI_TYPE_COMMIT(MPI_IO_DATA%view(i), ierr)
end do
#ifndef MFC_POST_PROCESS
if (qbmm .and. .not. polytropic) then
do i = sys_size + 1, sys_size + 2*nb*4
call MPI_TYPE_CREATE_SUBARRAY(num_dims, sizes_glb, sizes_loc, start_idx, &
MPI_ORDER_FORTRAN, mpi_p, MPI_IO_DATA%view(i), ierr)
call MPI_TYPE_COMMIT(MPI_IO_DATA%view(i), ierr)
end do
end if
#endif
if (present(ib_markers)) then
#ifdef MFC_PRE_PROCESS
MPI_IO_IB_DATA%var%sf => ib_markers%sf
MPI_IO_levelset_DATA%var%sf => levelset%sf
MPI_IO_levelsetnorm_DATA%var%sf => levelset_norm%sf
#else
MPI_IO_IB_DATA%var%sf => ib_markers%sf(0:m, 0:n, 0:p)
#ifndef MFC_POST_PROCESS
MPI_IO_levelset_DATA%var%sf => levelset%sf(0:m, 0:n, 0:p, 1:num_ibs)
MPI_IO_levelsetnorm_DATA%var%sf => levelset_norm%sf(0:m, 0:n, 0:p, 1:num_ibs, 1:3)
#endif
#endif
call MPI_TYPE_CREATE_SUBARRAY(num_dims, sizes_glb, sizes_loc, start_idx, &
MPI_ORDER_FORTRAN, MPI_INTEGER, MPI_IO_IB_DATA%view, ierr)
call MPI_TYPE_COMMIT(MPI_IO_IB_DATA%view, ierr)
#ifndef MFC_POST_PROCESS
call MPI_TYPE_CREATE_SUBARRAY(num_dims, sizes_glb, sizes_loc, start_idx, &
MPI_ORDER_FORTRAN, mpi_p, MPI_IO_levelset_DATA%view, ierr)
call MPI_TYPE_CREATE_SUBARRAY(num_dims, sizes_glb, sizes_loc, start_idx, &
MPI_ORDER_FORTRAN, mpi_p, MPI_IO_levelsetnorm_DATA%view, ierr)
call MPI_TYPE_COMMIT(MPI_IO_levelset_DATA%view, ierr)
call MPI_TYPE_COMMIT(MPI_IO_levelsetnorm_DATA%view, ierr)
#endif
end if
#ifndef MFC_POST_PROCESS
if (present(ib_markers)) then
do j = 1, num_ibs
if (patch_ib(j)%c > 0) then
#ifdef MFC_PRE_PROCESS
allocate (MPI_IO_airfoil_IB_DATA%var(1:2*Np))
#endif
airfoil_glb(1) = 3*Np*num_procs
airfoil_loc(1) = 3*Np
airfoil_start(1) = 3*proc_rank*Np
#ifdef MFC_PRE_PROCESS
do i = 1, Np
MPI_IO_airfoil_IB_DATA%var(i)%x = airfoil_grid_l(i)%x
MPI_IO_airfoil_IB_DATA%var(i)%y = airfoil_grid_l(i)%y
end do
#endif
call MPI_TYPE_CREATE_SUBARRAY(1, airfoil_glb, airfoil_loc, airfoil_start, &
MPI_ORDER_FORTRAN, mpi_p, MPI_IO_airfoil_IB_DATA%view(1), ierr)
call MPI_TYPE_COMMIT(MPI_IO_airfoil_IB_DATA%view(1), ierr)
#ifdef MFC_PRE_PROCESS
do i = 1, Np
MPI_IO_airfoil_IB_DATA%var(Np + i)%x = airfoil_grid_u(i)%x
MPI_IO_airfoil_IB_DATA%var(Np + i)%y = airfoil_grid_u(i)%y
end do
#endif
call MPI_TYPE_CREATE_SUBARRAY(1, airfoil_glb, airfoil_loc, airfoil_start, &
MPI_ORDER_FORTRAN, mpi_p, MPI_IO_airfoil_IB_DATA%view(2), ierr)
call MPI_TYPE_COMMIT(MPI_IO_airfoil_IB_DATA%view(2), ierr)
end if
end do
end if
#endif
#endif
end subroutine s_initialize_mpi_data
!! @param q_cons_vf Conservative variables
subroutine s_initialize_mpi_data_ds(q_cons_vf)
type(scalar_field), &
dimension(sys_size), &
intent(in) :: q_cons_vf
integer, dimension(num_dims) :: sizes_glb, sizes_loc
integer, dimension(3) :: sf_start_idx
#ifdef MFC_MPI
! Generic loop iterator
integer :: i, j, q, k, l, m_ds, n_ds, p_ds, ierr
sf_start_idx = (/0, 0, 0/)
#ifndef MFC_POST_PROCESS
m_ds = int((m + 1)/3) - 1
n_ds = int((n + 1)/3) - 1
p_ds = int((p + 1)/3) - 1
#else
m_ds = m
n_ds = n
p_ds = p
#endif
#ifdef MFC_POST_PROCESS
do i = 1, sys_size
MPI_IO_DATA%var(i)%sf => q_cons_vf(i)%sf(-1:m_ds + 1, -1:n_ds + 1, -1:p_ds + 1)
end do
#endif
! Define global(g) and local(l) sizes for flow variables
sizes_loc(1) = m_ds + 3
if (n > 0) then
sizes_loc(2) = n_ds + 3
if (p > 0) then
sizes_loc(3) = p_ds + 3
end if
end if
! Define the view for each variable
do i = 1, sys_size
call MPI_TYPE_CREATE_SUBARRAY(num_dims, sizes_loc, sizes_loc, sf_start_idx, &
MPI_ORDER_FORTRAN, mpi_p, MPI_IO_DATA%view(i), ierr)
call MPI_TYPE_COMMIT(MPI_IO_DATA%view(i), ierr)
end do
#endif
end subroutine s_initialize_mpi_data_ds
impure subroutine s_mpi_gather_data(my_vector, counts, gathered_vector, root)
integer, intent(in) :: counts ! Array of vector lengths for each process
real(wp), intent(in), dimension(counts) :: my_vector ! Input vector on each process
integer, intent(in) :: root ! Rank of the root process
real(wp), allocatable, intent(out) :: gathered_vector(:) ! Gathered vector on the root process
integer :: i
integer :: ierr !< Generic flag used to identify and report MPI errors
integer, allocatable :: recounts(:), displs(:)
#ifdef MFC_MPI
allocate (recounts(num_procs))
call MPI_GATHER(counts, 1, MPI_INTEGER, recounts, 1, MPI_INTEGER, root, &
MPI_COMM_WORLD, ierr)
allocate (displs(size(recounts)))
displs(1) = 0
do i = 2, size(recounts)
displs(i) = displs(i - 1) + recounts(i - 1)
end do
allocate (gathered_vector(sum(recounts)))
call MPI_GATHERV(my_vector, counts, mpi_p, gathered_vector, recounts, displs, mpi_p, &
root, MPI_COMM_WORLD, ierr)
#endif
end subroutine s_mpi_gather_data
impure subroutine mpi_bcast_time_step_values(proc_time, time_avg)
real(wp), dimension(0:num_procs - 1), intent(inout) :: proc_time
real(wp), intent(inout) :: time_avg
#ifdef MFC_MPI
integer :: ierr !< Generic flag used to identify and report MPI errors
call MPI_GATHER(time_avg, 1, mpi_p, proc_time(0), 1, mpi_p, 0, MPI_COMM_WORLD, ierr)
#endif
end subroutine mpi_bcast_time_step_values
impure subroutine s_prohibit_abort(condition, message)
character(len=*), intent(in) :: condition, message
print *, ""
print *, "CASE FILE ERROR"
print *, " - Prohibited condition: ", trim(condition)
if (len_trim(message) > 0) then
print *, " - Note: ", trim(message)
end if
print *, ""
call s_mpi_abort(code=CASE_FILE_ERROR_CODE)
end subroutine s_prohibit_abort
!> The goal of this subroutine is to determine the global
!! extrema of the stability criteria in the computational
!! domain. This is performed by sifting through the local
!! extrema of each stability criterion. Note that each of
!! the local extrema is from a single process, within its
!! assigned section of the computational domain. Finally,
!! note that the global extrema values are only bookkeept
!! on the rank 0 processor.
!! @param icfl_max_loc Local maximum ICFL stability criterion
!! @param vcfl_max_loc Local maximum VCFL stability criterion
!! @param Rc_min_loc Local minimum Rc stability criterion
!! @param icfl_max_glb Global maximum ICFL stability criterion
!! @param vcfl_max_glb Global maximum VCFL stability criterion
!! @param Rc_min_glb Global minimum Rc stability criterion
impure subroutine s_mpi_reduce_stability_criteria_extrema(icfl_max_loc, &
vcfl_max_loc, &
Rc_min_loc, &
icfl_max_glb, &
vcfl_max_glb, &
Rc_min_glb)
real(wp), intent(in) :: icfl_max_loc
real(wp), intent(in) :: vcfl_max_loc
real(wp), intent(in) :: Rc_min_loc
real(wp), intent(out) :: icfl_max_glb
real(wp), intent(out) :: vcfl_max_glb
real(wp), intent(out) :: Rc_min_glb
#ifdef MFC_SIMULATION
#ifdef MFC_MPI
integer :: ierr !< Generic flag used to identify and report MPI errors
#endif
#endif
! Initiate the global variables to the local values
icfl_max_glb = icfl_max_loc
vcfl_max_glb = vcfl_max_loc
Rc_min_glb = Rc_min_loc
#ifdef MFC_SIMULATION
#ifdef MFC_MPI
! Reducing local extrema of ICFL, VCFL, CCFL and Rc numbers to their
! global extrema and bookkeeping the results on the rank 0 processor
call MPI_REDUCE(icfl_max_loc, icfl_max_glb, 1, &
mpi_p, MPI_MAX, 0, &
MPI_COMM_WORLD, ierr)
if (viscous) then
call MPI_REDUCE(vcfl_max_loc, vcfl_max_glb, 1, &
mpi_p, MPI_MAX, 0, &
MPI_COMM_WORLD, ierr)
call MPI_REDUCE(Rc_min_loc, Rc_min_glb, 1, &
mpi_p, MPI_MIN, 0, &
MPI_COMM_WORLD, ierr)
end if
#else
icfl_max_glb = icfl_max_loc
if (viscous) then
vcfl_max_glb = vcfl_max_loc
Rc_min_glb = Rc_min_loc
end if
#endif
#endif
end subroutine s_mpi_reduce_stability_criteria_extrema
!> The following subroutine takes the input local variable
!! from all processors and reduces to the sum of all
!! values. The reduced variable is recorded back onto the
!! original local variable on each processor.
!! @param var_loc Some variable containing the local value which should be
!! reduced amongst all the processors in the communicator.
!! @param var_glb The globally reduced value
impure subroutine s_mpi_allreduce_sum(var_loc, var_glb)
real(wp), intent(in) :: var_loc
real(wp), intent(out) :: var_glb
#ifdef MFC_MPI
integer :: ierr !< Generic flag used to identify and report MPI errors
! Performing the reduction procedure
call MPI_ALLREDUCE(var_loc, var_glb, 1, mpi_p, &
MPI_SUM, MPI_COMM_WORLD, ierr)
#endif
end subroutine s_mpi_allreduce_sum
!> The following subroutine takes the input local variable
!! from all processors and reduces to the minimum of all
!! values. The reduced variable is recorded back onto the
!! original local variable on each processor.
!! @param var_loc Some variable containing the local value which should be
!! reduced amongst all the processors in the communicator.
!! @param var_glb The globally reduced value
impure subroutine s_mpi_allreduce_min(var_loc, var_glb)
real(wp), intent(in) :: var_loc
real(wp), intent(out) :: var_glb
#ifdef MFC_MPI
integer :: ierr !< Generic flag used to identify and report MPI errors
! Performing the reduction procedure
call MPI_ALLREDUCE(var_loc, var_glb, 1, mpi_p, &
MPI_MIN, MPI_COMM_WORLD, ierr)
#endif
end subroutine s_mpi_allreduce_min
!> The following subroutine takes the input local variable
!! from all processors and reduces to the maximum of all
!! values. The reduced variable is recorded back onto the
!! original local variable on each processor.
!! @param var_loc Some variable containing the local value which should be
!! reduced amongst all the processors in the communicator.
!! @param var_glb The globally reduced value
impure subroutine s_mpi_allreduce_max(var_loc, var_glb)
real(wp), intent(in) :: var_loc
real(wp), intent(out) :: var_glb
#ifdef MFC_MPI
integer :: ierr !< Generic flag used to identify and report MPI errors
! Performing the reduction procedure
call MPI_ALLREDUCE(var_loc, var_glb, 1, mpi_p, &
MPI_MAX, MPI_COMM_WORLD, ierr)
#endif
end subroutine s_mpi_allreduce_max
!> The following subroutine takes the inputted variable and
!! determines its minimum value on the entire computational
!! domain. The result is stored back into inputted variable.
!! @param var_loc holds the local value to be reduced among
!! all the processors in communicator. On output, the variable holds
!! the minimum value, reduced amongst all of the local values.
impure subroutine s_mpi_reduce_min(var_loc)
real(wp), intent(inout) :: var_loc
#ifdef MFC_MPI
integer :: ierr !< Generic flag used to identify and report MPI errors
! Temporary storage variable that holds the reduced minimum value
real(wp) :: var_glb
! Performing reduction procedure and eventually storing its result
! into the variable that was initially inputted into the subroutine
call MPI_REDUCE(var_loc, var_glb, 1, mpi_p, &
MPI_MIN, 0, MPI_COMM_WORLD, ierr)
call MPI_BCAST(var_glb, 1, mpi_p, &
0, MPI_COMM_WORLD, ierr)
var_loc = var_glb
#endif
end subroutine s_mpi_reduce_min
!> The following subroutine takes the first element of the
!! 2-element inputted variable and determines its maximum
!! value on the entire computational domain. The result is
!! stored back into the first element of the variable while
!! the rank of the processor that is in charge of the sub-
!! domain containing the maximum is stored into the second
!! element of the variable.
!! @param var_loc On input, this variable holds the local value and processor rank,
!! which are to be reduced among all the processors in communicator.
!! On output, this variable holds the maximum value, reduced amongst
!! all of the local values, and the process rank to which the value
!! belongs.
impure subroutine s_mpi_reduce_maxloc(var_loc)
real(wp), dimension(2), intent(inout) :: var_loc
#ifdef MFC_MPI
integer :: ierr !< Generic flag used to identify and report MPI errors
real(wp), dimension(2) :: var_glb !<
!! Temporary storage variable that holds the reduced maximum value
!! and the rank of the processor with which the value is associated
! Performing reduction procedure and eventually storing its result
! into the variable that was initially inputted into the subroutine
call MPI_REDUCE(var_loc, var_glb, 1, mpi_2p, &
MPI_MAXLOC, 0, MPI_COMM_WORLD, ierr)
call MPI_BCAST(var_glb, 1, mpi_2p, &
0, MPI_COMM_WORLD, ierr)
var_loc = var_glb
#endif
end subroutine s_mpi_reduce_maxloc
!> The subroutine terminates the MPI execution environment.
!! @param prnt error message to be printed
impure subroutine s_mpi_abort(prnt, code)
character(len=*), intent(in), optional :: prnt
integer, intent(in), optional :: code
#ifdef MFC_MPI
integer :: ierr !< Generic flag used to identify and report MPI errors
#endif
if (present(prnt)) then
print *, prnt
call flush (6)
end if
#ifndef MFC_MPI
if (present(code)) then
stop code
else
stop 1
end if
#else
! Terminating the MPI environment
if (present(code)) then
call MPI_ABORT(MPI_COMM_WORLD, code, ierr)
else
call MPI_ABORT(MPI_COMM_WORLD, 1, ierr)
end if
#endif
end subroutine s_mpi_abort
!>Halts all processes until all have reached barrier.
impure subroutine s_mpi_barrier
#ifdef MFC_MPI
integer :: ierr !< Generic flag used to identify and report MPI errors
! Calling MPI_BARRIER
call MPI_BARRIER(MPI_COMM_WORLD, ierr)
#endif
end subroutine s_mpi_barrier
!> The subroutine finalizes the MPI execution environment.
impure subroutine s_mpi_finalize
#ifdef MFC_MPI
integer :: ierr !< Generic flag used to identify and report MPI errors
! Finalizing the MPI environment
call MPI_FINALIZE(ierr)
#endif
end subroutine s_mpi_finalize
!> The goal of this procedure is to populate the buffers of
!! the cell-average conservative variables by communicating
!! with the neighboring processors.
!! @param q_cons_vf Cell-average conservative variables
!! @param mpi_dir MPI communication coordinate direction
!! @param pbc_loc Processor boundary condition (PBC) location
subroutine s_mpi_sendrecv_variables_buffers(q_comm, &
mpi_dir, &
pbc_loc, &
nVar, &
pb_in, mv_in)
type(scalar_field), dimension(1:), intent(inout) :: q_comm
real(wp), optional, dimension(idwbuff(1)%beg:, idwbuff(2)%beg:, idwbuff(3)%beg:, 1:, 1:), intent(inout) :: pb_in, mv_in
integer, intent(in) :: mpi_dir, pbc_loc, nVar
integer :: i, j, k, l, r, q !< Generic loop iterators
integer :: buffer_counts(1:3), buffer_count
type(int_bounds_info) :: boundary_conditions(1:3)
integer :: beg_end(1:2), grid_dims(1:3)
integer :: dst_proc, src_proc, recv_tag, send_tag
logical :: beg_end_geq_0, qbmm_comm
integer :: pack_offset, unpack_offset
#ifdef MFC_MPI
integer :: ierr !< Generic flag used to identify and report MPI errors
call nvtxStartRange("RHS-COMM-PACKBUF")
qbmm_comm = .false.
if (present(pb_in) .and. present(mv_in) .and. qbmm .and. .not. polytropic) then
qbmm_comm = .true.
v_size = nVar + 2*nb*4
buffer_counts = (/ &
buff_size*v_size*(n + 1)*(p + 1), &
buff_size*v_size*(m + 2*buff_size + 1)*(p + 1), &
buff_size*v_size*(m + 2*buff_size + 1)*(n + 2*buff_size + 1) &
/)
else
v_size = nVar
buffer_counts = (/ &
buff_size*v_size*(n + 1)*(p + 1), &
buff_size*v_size*(m + 2*buff_size + 1)*(p + 1), &
buff_size*v_size*(m + 2*buff_size + 1)*(n + 2*buff_size + 1) &
/)
end if
$:GPU_UPDATE(device='[v_size]')
buffer_count = buffer_counts(mpi_dir)
boundary_conditions = (/bc_x, bc_y, bc_z/)
beg_end = (/boundary_conditions(mpi_dir)%beg, boundary_conditions(mpi_dir)%end/)
beg_end_geq_0 = beg_end(max(pbc_loc, 0) - pbc_loc + 1) >= 0
! Implements:
! pbc_loc bc_x >= 0 -> [send/recv]_tag [dst/src]_proc
! -1 (=0) 0 -> [1,0] [0,0] | 0 0 [1,0] [beg,beg]
! -1 (=0) 1 -> [0,0] [1,0] | 0 1 [0,0] [end,beg]
! +1 (=1) 0 -> [0,1] [1,1] | 1 0 [0,1] [end,end]
! +1 (=1) 1 -> [1,1] [0,1] | 1 1 [1,1] [beg,end]
send_tag = f_logical_to_int(.not. f_xor(beg_end_geq_0, pbc_loc == 1))
recv_tag = f_logical_to_int(pbc_loc == 1)
dst_proc = beg_end(1 + f_logical_to_int(f_xor(pbc_loc == 1, beg_end_geq_0)))
src_proc = beg_end(1 + f_logical_to_int(pbc_loc == 1))
grid_dims = (/m, n, p/)
pack_offset = 0
if (f_xor(pbc_loc == 1, beg_end_geq_0)) then
pack_offset = grid_dims(mpi_dir) - buff_size + 1
end if
unpack_offset = 0
if (pbc_loc == 1) then
unpack_offset = grid_dims(mpi_dir) + buff_size + 1
end if
! Pack Buffer to Send
#:for mpi_dir in [1, 2, 3]
if (mpi_dir == ${mpi_dir}$) then
#:if mpi_dir == 1
$:GPU_PARALLEL_LOOP(collapse=4,private='[r]')
do l = 0, p
do k = 0, n
do j = 0, buff_size - 1
do i = 1, nVar
r = (i - 1) + v_size*(j + buff_size*(k + (n + 1)*l))
buff_send(r) = q_comm(i)%sf(j + pack_offset, k, l)
end do
end do
end do
end do
if (qbmm_comm) then
$:GPU_PARALLEL_LOOP(collapse=4,private='[r]')
do l = 0, p
do k = 0, n
do j = 0, buff_size - 1
do i = nVar + 1, nVar + 4
do q = 1, nb
r = (i - 1) + (q - 1)*4 + v_size* &
(j + buff_size*(k + (n + 1)*l))
buff_send(r) = pb_in(j + pack_offset, k, l, i - nVar, q)
end do
end do
end do
end do
end do
$:GPU_PARALLEL_LOOP(collapse=5,private='[r]')
do l = 0, p
do k = 0, n
do j = 0, buff_size - 1
do i = nVar + 1, nVar + 4
do q = 1, nb
r = (i - 1) + (q - 1)*4 + nb*4 + v_size* &
(j + buff_size*(k + (n + 1)*l))
buff_send(r) = mv_in(j + pack_offset, k, l, i - nVar, q)
end do
end do
end do
end do
end do
end if
#:elif mpi_dir == 2
$:GPU_PARALLEL_LOOP(collapse=4,private='[r]')
do i = 1, nVar
do l = 0, p
do k = 0, buff_size - 1
do j = -buff_size, m + buff_size
r = (i - 1) + v_size* &
((j + buff_size) + (m + 2*buff_size + 1)* &
(k + buff_size*l))
buff_send(r) = q_comm(i)%sf(j, k + pack_offset, l)
end do
end do
end do
end do
if (qbmm_comm) then
$:GPU_PARALLEL_LOOP(collapse=5,private='[r]')
do i = nVar + 1, nVar + 4
do l = 0, p
do k = 0, buff_size - 1
do j = -buff_size, m + buff_size
do q = 1, nb
r = (i - 1) + (q - 1)*4 + v_size* &
((j + buff_size) + (m + 2*buff_size + 1)* &
(k + buff_size*l))
buff_send(r) = pb_in(j, k + pack_offset, l, i - nVar, q)
end do
end do
end do
end do
end do
$:GPU_PARALLEL_LOOP(collapse=5,private='[r]')
do i = nVar + 1, nVar + 4
do l = 0, p
do k = 0, buff_size - 1
do j = -buff_size, m + buff_size
do q = 1, nb
r = (i - 1) + (q - 1)*4 + nb*4 + v_size* &
((j + buff_size) + (m + 2*buff_size + 1)* &
(k + buff_size*l))
buff_send(r) = mv_in(j, k + pack_offset, l, i - nVar, q)
end do
end do
end do
end do
end do
end if
#:else
$:GPU_PARALLEL_LOOP(collapse=4,private='[r]')
do i = 1, nVar
do l = 0, buff_size - 1
do k = -buff_size, n + buff_size
do j = -buff_size, m + buff_size
r = (i - 1) + v_size* &
((j + buff_size) + (m + 2*buff_size + 1)* &
((k + buff_size) + (n + 2*buff_size + 1)*l))
buff_send(r) = q_comm(i)%sf(j, k, l + pack_offset)
end do
end do
end do
end do
if (qbmm_comm) then
$:GPU_PARALLEL_LOOP(collapse=5,private='[r]')
do i = nVar + 1, nVar + 4
do l = 0, buff_size - 1
do k = -buff_size, n + buff_size
do j = -buff_size, m + buff_size
do q = 1, nb
r = (i - 1) + (q - 1)*4 + v_size* &
((j + buff_size) + (m + 2*buff_size + 1)* &
((k + buff_size) + (n + 2*buff_size + 1)*l))
buff_send(r) = pb_in(j, k, l + pack_offset, i - nVar, q)
end do
end do
end do
end do
end do
$:GPU_PARALLEL_LOOP(collapse=5,private='[r]')
do i = nVar + 1, nVar + 4
do l = 0, buff_size - 1
do k = -buff_size, n + buff_size
do j = -buff_size, m + buff_size
do q = 1, nb
r = (i - 1) + (q - 1)*4 + nb*4 + v_size* &
((j + buff_size) + (m + 2*buff_size + 1)* &
((k + buff_size) + (n + 2*buff_size + 1)*l))
buff_send(r) = mv_in(j, k, l + pack_offset, i - nVar, q)
end do
end do
end do
end do
end do
end if
#:endif
end if
#:endfor
call nvtxEndRange ! Packbuf
! Send/Recv
#ifdef MFC_SIMULATION
#:for rdma_mpi in [False, True]
if (rdma_mpi .eqv. ${'.true.' if rdma_mpi else '.false.'}$) then
#:if rdma_mpi
#:call GPU_HOST_DATA(use_device='[buff_send, buff_recv]')
call nvtxStartRange("RHS-COMM-SENDRECV-RDMA")
call MPI_SENDRECV( &
buff_send, buffer_count, mpi_p, dst_proc, send_tag, &
buff_recv, buffer_count, mpi_p, src_proc, recv_tag, &
MPI_COMM_WORLD, MPI_STATUS_IGNORE, ierr)
call nvtxEndRange ! RHS-MPI-SENDRECV-(NO)-RDMA
#:endcall GPU_HOST_DATA
$:GPU_WAIT()
#:else
call nvtxStartRange("RHS-COMM-DEV2HOST")
$:GPU_UPDATE(host='[buff_send]')
call nvtxEndRange
call nvtxStartRange("RHS-COMM-SENDRECV-NO-RMDA")
call MPI_SENDRECV( &
buff_send, buffer_count, mpi_p, dst_proc, send_tag, &
buff_recv, buffer_count, mpi_p, src_proc, recv_tag, &
MPI_COMM_WORLD, MPI_STATUS_IGNORE, ierr)
call nvtxEndRange ! RHS-MPI-SENDRECV-(NO)-RDMA
call nvtxStartRange("RHS-COMM-HOST2DEV")
$:GPU_UPDATE(device='[buff_recv]')
call nvtxEndRange
#:endif
end if
#:endfor
#else
call MPI_SENDRECV( &
buff_send, buffer_count, mpi_p, dst_proc, send_tag, &
buff_recv, buffer_count, mpi_p, src_proc, recv_tag, &
MPI_COMM_WORLD, MPI_STATUS_IGNORE, ierr)
#endif
! Unpack Received Buffer
call nvtxStartRange("RHS-COMM-UNPACKBUF")
#:for mpi_dir in [1, 2, 3]
if (mpi_dir == ${mpi_dir}$) then
#:if mpi_dir == 1
$:GPU_PARALLEL_LOOP(collapse=4,private='[r]')
do l = 0, p
do k = 0, n
do j = -buff_size, -1
do i = 1, nVar
r = (i - 1) + v_size* &
(j + buff_size*((k + 1) + (n + 1)*l))
q_comm(i)%sf(j + unpack_offset, k, l) = buff_recv(r)
#if defined(__INTEL_COMPILER)
if (ieee_is_nan(q_comm(i)%sf(j, k, l))) then
print *, "Error", j, k, l, i
error stop "NaN(s) in recv"
end if
#endif
end do
end do
end do
end do
if (qbmm_comm) then
$:GPU_PARALLEL_LOOP(collapse=5,private='[r]')
do l = 0, p
do k = 0, n
do j = -buff_size, -1
do i = nVar + 1, nVar + 4
do q = 1, nb
r = (i - 1) + (q - 1)*4 + v_size* &
(j + buff_size*((k + 1) + (n + 1)*l))
pb_in(j + unpack_offset, k, l, i - nVar, q) = buff_recv(r)
end do
end do
end do
end do
end do
$:GPU_PARALLEL_LOOP(collapse=5,private='[r]')
do l = 0, p
do k = 0, n
do j = -buff_size, -1
do i = nVar + 1, nVar + 4
do q = 1, nb
r = (i - 1) + (q - 1)*4 + nb*4 + v_size* &
(j + buff_size*((k + 1) + (n + 1)*l))
mv_in(j + unpack_offset, k, l, i - nVar, q) = buff_recv(r)
end do
end do
end do
end do