Skip to content

Commit 8ecdbdc

Browse files
authored
Bin a large network into a smaller one (#88)
In massive_star, this script takes a MESA-formatted file and bins the inferred network into the specified smaller network using pynucastro. The outputted file is also in the MESA format. The script currently only implements binning into aprox19 but is set up to be easily extensible to other small networks.
1 parent 28b8245 commit 8ecdbdc

1 file changed

Lines changed: 91 additions & 0 deletions

File tree

Lines changed: 91 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,91 @@
1+
import numpy as np
2+
from pynucastro.networks.rate_collection import Composition
3+
import argparse
4+
5+
implemented_networks = {
6+
"aprox19": ['h1', 'he3', 'he4', 'c12', 'n14', 'o16', 'ne20', 'mg24',
7+
'si28', 's32', 'ar36', 'ca40', 'ti44', 'cr48', 'fe52',
8+
'fe54', 'ni56', 'n'],
9+
}
10+
# the aprox19 nuclei list excludes p_nse. We'll deal with that split by later setting 'p' to 0
11+
12+
13+
def main(file_name, new_file_name, network):
14+
15+
# READ MESA FILE
16+
# this code adapted from py_mesa_reader.
17+
bulk_data = np.genfromtxt(file_name, skip_header=5, names=True, ndmin=1, dtype=None)
18+
bulk_names = bulk_data.dtype.names
19+
with open(file_name) as f:
20+
for i, line in enumerate(f):
21+
if i == 1:
22+
header_names = line.split()
23+
elif i == 2:
24+
header_data = [eval(datum) for datum in line.split()]
25+
elif i > 2:
26+
break
27+
header_data = dict(zip(header_names, header_data))
28+
29+
30+
# SHRINK WITH PYNUCASTRO
31+
try:
32+
start, end = bulk_names.index('neut'), bulk_names.index('opacity')-1
33+
large_network = bulk_names[start:end+1]
34+
except ValueError as e:
35+
print("The given start or end species is not listed in the MESA file.")
36+
raise e
37+
38+
small_network = implemented_networks[network]
39+
40+
network_col_names = []
41+
network_col_bulk = []
42+
c = Composition(large_network)
43+
for row in bulk_data:
44+
c.set_array(list(row)[start:end+1])
45+
new_c = c.bin_as(small_network)
46+
dictionary = new_c.data
47+
dictionary['p'] = 0
48+
49+
if not network_col_names:
50+
network_col_names = dictionary.keys()
51+
52+
network_col_bulk.append(list(dictionary.values()))
53+
54+
network_col_bulk = np.array(network_col_bulk)
55+
56+
57+
# OUTPUT TO FILE
58+
with open(new_file_name, 'w') as f:
59+
def writeline(array):
60+
f.write(''.join([str(x).rjust(40) for x in array])+'\n')
61+
def replace_cols(data, replacement, start, end):
62+
return list(data)[:start]+list(replacement)+list(data)[end+1:]
63+
64+
writeline(range(1,len(header_data)+1))
65+
writeline(header_data.keys())
66+
writeline(header_data.values())
67+
f.write('\n')
68+
69+
col_names = replace_cols(bulk_names, network_col_names, start, end)
70+
writeline(range(1,len(col_names)+1))
71+
writeline(col_names)
72+
for i in range(bulk_data.size):
73+
writeline(replace_cols(bulk_data[i], network_col_bulk[i], start, end))
74+
f.write('\n')
75+
76+
77+
print(f'Binned network (size={end-start+1}) into network {network} and saved file in {new_file_name}.')
78+
79+
80+
if __name__ == "__main__":
81+
parser = argparse.ArgumentParser(description="Bin a large network into a smaller network specified by user. Input and output files use the MESA file format.")
82+
parser.add_argument('network', choices=implemented_networks.keys(), help="The name of the smaller network to bin into.")
83+
parser.add_argument('file_name')
84+
parser.add_argument('new_file_name', nargs='?')
85+
args = parser.parse_args()
86+
if args.new_file_name is None:
87+
file_stem, file_ending = args.file_name.rsplit('.',1)
88+
new_file_name = f"{file_stem}.{args.network}.{file_ending}"
89+
else:
90+
new_file_name = args.new_file_name
91+
main(args.file_name, new_file_name, args.network)

0 commit comments

Comments
 (0)