-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathmake coefficients MPI.py
More file actions
191 lines (158 loc) · 6.35 KB
/
make coefficients MPI.py
File metadata and controls
191 lines (158 loc) · 6.35 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
import os
import time # Used for timing the coefficient construction
import pyEXP
from mpi4py import MPI
from os.path import exists
#
# This script makes an HDF5 coefficient set from some Gadget files in
# parallel using MPI. This can easily be adapted for whatever
# snapshots you have and could be run on a cluster
#
# There is a companion Jupyter Notebook called 'test_read_gadget.ipynb'
# that reads in the coefficients made by this script to check that
# they seem sane.
# Usage:
#
# Run command is: "mpirun -np N python3 make_coefficients_MPI.py"
# where N is the number of processes. For Slurm allocations, you can
# leave off "-np N" as usual.
#
if __name__ == "__main__":
# Parameters
#
h5file = 'RunG_halo_test'
ctrfile = 'new.centers'
beg_seq = 0
end_seq = 1000
nskip = 20
# Get basic information about the MPI communicator
#
world_comm = MPI.COMM_WORLD
world_size = world_comm.Get_size()
my_rank = world_comm.Get_rank()
# NB: we are not using standard MPI commands on the Python side,
# but invoking mpi4py initializes MPI so it can be used correctly
# by the C++ routines
# Just for info and to convince yourself check that MPI is working
#
print("World size is {} and my rank is {}".format(world_size, my_rank))
# Now switch the working directory where my Gadget simulation
# lives. Change this to your working directory.
#
os.chdir('/media/weinberg/Simulation data/Nbody/Sphere/RunG')
# Make the spherical basis config.
#
halo_config = """
id : sphereSL
parameters :
numr : 1000
rmin : 0.000011
rmax : 1.99
Lmax : 6
nmax : 18
scale : 0.05
modelname : mw_halo.model
"""
# Construct the basis instance
#
halo_basis = pyEXP.basis.Basis.factory(halo_config)
# Make the file list for the snapshot sequence assumed have the
# format: snapshot_XXXX.hdf5. Change as necessary.
#
file_list = []
for i in range(beg_seq, end_seq):
file_list.append('snapshot_{:04d}.hdf5'.format(i))
# Construct batches of files the particle reader. One could use the
# parseStringList to create batches from a vector/list of files. NB:
# a std::vector in C++ becomes a Python.list and vice versa
#
batches = pyEXP.read.ParticleReader.parseStringList(file_list, '')
# This will contain the coefficient container, need to start will a
# null instance to trigger construction
#
coefs = None
# One could replace the above with a read of an existent HDF5
# coeffficient file and update various stanzas. For this reason,
# I am keeping a separate center time and center position list
# below . . .
centime = []
centers = []
for group in batches:
okay = True
for f in group:
if not exists(f): okay = False
# Skip a file that does not exist in the sequence without
# going belly up. I notice that Gadget-2 seems to not be
# exactly sequential by 1 through a restart?
#
if not okay: continue
if my_rank==0: print("file group is", group)
# Make the reader for the desired type. One could probably try to
# do this by inspection but that's another project.
#
reader = pyEXP.read.ParticleReader.createReader('GadgetHDF5', group, 0, False);
# Print the type list
#
if my_rank==0: print('The component names are:', reader.GetTypes())
compname = 'Halo'
reader.SelectType(compname)
if my_rank==0: print('We selected:', compname)
# This computes an expansion center from a mean density
# weighted position. You could compute and cache the center
# array . . . or supply it in a different way
#
startTime = time.time()
center = pyEXP.util.getDensityCenter(reader, stride=nskip, Nsort=1000)
# ^
# |
# Choose every nskip particle for sample ----+
# This is 10^6 samples for these snaps and nskip=20
#
if my_rank==0:
print('Created center in {:4.2f} seconds'.
format(time.time() - startTime))
print('Center is:', center)
centime.append(reader.CurrentTime())
centers.append(center)
# Now compute the coefficients using this center
#
startTime = time.time()
coef = halo_basis.createFromReader(reader, center)
if my_rank==0:
print('Created coefficients at Time {:5.3f} for {} particles '
'in {:4.2f} seconds'.
format(reader.CurrentTime(), reader.CurrentNumber(),
time.time() - startTime))
# We need this stupid idiom here because None is not mapping to a
# null pointer. There is probably a way to do this. Suggestions
# anyone?
# This is optional---+
# |
if coefs is None: # v
coefs.setUnits([("length", "none", 1.0),("mass", "none", 1.0),("time", "none", 1.0), ("G", "none", 1.0)])
coefs = pyEXP.coefs.Coefs.makecoefs(coef, compname)
# Add the coefficients to the container
#
coefs.add(coef)
if my_rank==0:
print('Added coef to container')
print('-'*60)
if my_rank==0:
print('\nCompleted the file group list\n')
print('The coefficient time list is', coefs.Times())
# You can call the file something convenient. The suffix 'h5'
# will be appended. You only want the root process to write
# the file.
#
if exists(h5file + '.h5'):
coefs.ExtendH5Coefs(h5file) # Update an existing HDF5
print('Saved the coefficients to an existing HDF5 file')
else:
coefs.WriteH5Coefs(h5file) # Create a new HDF5
print('Saved the coefficients to a new HDF5 file')
# Save the center positions
#
with open(ctrfile, 'a') as f:
for i in range(len(centime)):
line = '{:13.6e} {:13.6e} {:13.6e} {:13.6e}\n'.format(centime[i], centers[i][0], centers[i][1], centers[i][2])
f.write(line)