From f0b0f268c5bd8f3ff31d22132fd4e8b3132d353d Mon Sep 17 00:00:00 2001 From: Melissa Rasmussen Date: Tue, 27 Jan 2026 02:22:47 -0500 Subject: [PATCH 1/6] Just opening and copying a MESA .data file --- massive_star/convert_large_to_small.py | 43 ++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 massive_star/convert_large_to_small.py diff --git a/massive_star/convert_large_to_small.py b/massive_star/convert_large_to_small.py new file mode 100644 index 0000000..87bffab --- /dev/null +++ b/massive_star/convert_large_to_small.py @@ -0,0 +1,43 @@ + +import numpy as np + +file_name = "20m_pre_cc_1335s_206_isos.data" + +bulk_data = np.genfromtxt( + file_name, + skip_header=5, + names=True, + ndmin=1, + dtype=None, +) +bulk_names = bulk_data.dtype.names +with open(file_name) as f: + for i, line in enumerate(f): + if i == 1: + header_names = line.split() + elif i == 2: + header_data = [eval(datum) for datum in line.split()] + elif i > 2: + break +header_data = dict(zip(header_names, header_data)) + + + +# OUTPUT TO FILE +new_file_name = "re-"+file_name + +with open(new_file_name, 'w') as f: + def writeline(array): + f.write(''.join([str(x).rjust(40) for x in array])+'\n') + + writeline(range(1,len(header_data)+1)) + writeline(header_data.keys()) + writeline(header_data.values()) + f.write('\n') + + writeline(range(1,len(bulk_names)+1)) + writeline(bulk_names) + for i in range(bulk_data.size): + writeline(bulk_data[i]) + f.write('\n') + From a95fbbb7ac304db8a693de8b486f43d9964c6bde Mon Sep 17 00:00:00 2001 From: Melissa Rasmussen Date: Wed, 28 Jan 2026 20:18:59 -0500 Subject: [PATCH 2/6] Setting up MESA species to be read by pynucastro --- massive_star/convert_large_to_small.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/massive_star/convert_large_to_small.py b/massive_star/convert_large_to_small.py index 87bffab..8322c5c 100644 --- a/massive_star/convert_large_to_small.py +++ b/massive_star/convert_large_to_small.py @@ -1,8 +1,13 @@ import numpy as np +import pynucastro +from pynucastro.networks.rate_collection import Composition file_name = "20m_pre_cc_1335s_206_isos.data" +# I have a branch in mesa_reader that could be used to make this code shorter +# https://github.com/melilly/py_mesa_reader/tree/write-file + bulk_data = np.genfromtxt( file_name, skip_header=5, @@ -22,6 +27,24 @@ header_data = dict(zip(header_names, header_data)) +# SHRINK WITH PYNUCASTRO +try: + start, end = bulk_names.index('neut'), bulk_names.index('zn66') + mesa_species = bulk_names[start:end+1] +except ValueError as e: + print("The given start or end species is not listed in the MESA file.") + raise e + +# this line can be eliminated with the PR: +# https://github.com/pynucastro/pynucastro/pull/1253 +pynucastro_species = [x.replace('neut','n') for x in mesa_species] + +c = Composition(pynucastro_species) +for row in bulk_data: + c.set_array(list(row)[start:end+1]) + new_c = c.bin_as(aprox19_nuclei) + + # OUTPUT TO FILE new_file_name = "re-"+file_name From 50c0bd1c6a7484c6de5314fb2fce10a3b7259a85 Mon Sep 17 00:00:00 2001 From: Melissa Rasmussen Date: Thu, 29 Jan 2026 15:32:54 -0500 Subject: [PATCH 3/6] Add aprox19 (18) --- massive_star/convert_large_to_small.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/massive_star/convert_large_to_small.py b/massive_star/convert_large_to_small.py index 8322c5c..4536fad 100644 --- a/massive_star/convert_large_to_small.py +++ b/massive_star/convert_large_to_small.py @@ -30,21 +30,26 @@ # SHRINK WITH PYNUCASTRO try: start, end = bulk_names.index('neut'), bulk_names.index('zn66') - mesa_species = bulk_names[start:end+1] + large_network = bulk_names[start:end+1] except ValueError as e: print("The given start or end species is not listed in the MESA file.") raise e -# this line can be eliminated with the PR: -# https://github.com/pynucastro/pynucastro/pull/1253 -pynucastro_species = [x.replace('neut','n') for x in mesa_species] +aprox19_nuclei = ['h1', 'he3', 'he4', 'c12', 'n14', 'o16', 'ne20', 'mg24', + 'si28', 's32', 'ar36', 'ca40', 'ti44', 'cr48', 'fe52', + 'fe54', 'ni56', 'n'] +# this excludes p_nse. We'll deal with that split later -c = Composition(pynucastro_species) +small_network = aprox19_nuclei + +c = Composition(large_network) for row in bulk_data: c.set_array(list(row)[start:end+1]) new_c = c.bin_as(aprox19_nuclei) - - + print(new_c.get_nuclei()) + print(new_c.get_sum_X()) + print(new_c.get_molar()) + break # OUTPUT TO FILE new_file_name = "re-"+file_name From af1ac09a0616149491fd9338ae08ae61c2e5ab57 Mon Sep 17 00:00:00 2001 From: Melissa Rasmussen Date: Mon, 2 Feb 2026 17:20:35 -0500 Subject: [PATCH 4/6] Output shrunken composition to new model file --- massive_star/convert_large_to_small.py | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/massive_star/convert_large_to_small.py b/massive_star/convert_large_to_small.py index 4536fad..7ad8cfb 100644 --- a/massive_star/convert_large_to_small.py +++ b/massive_star/convert_large_to_small.py @@ -42,14 +42,20 @@ small_network = aprox19_nuclei +network_col_names = [] +network_col_bulk = [] c = Composition(large_network) for row in bulk_data: c.set_array(list(row)[start:end+1]) new_c = c.bin_as(aprox19_nuclei) - print(new_c.get_nuclei()) - print(new_c.get_sum_X()) - print(new_c.get_molar()) - break + dictionary = new_c.data + + if not network_col_names: + network_col_names = dictionary.keys() + + network_col_bulk.append(list(dictionary.values())) + +network_col_bulk = np.array(network_col_bulk) # OUTPUT TO FILE new_file_name = "re-"+file_name @@ -57,15 +63,18 @@ with open(new_file_name, 'w') as f: def writeline(array): f.write(''.join([str(x).rjust(40) for x in array])+'\n') + def replace_cols(data, replacement, start, end): + return list(data)[:start]+list(replacement)+list(data)[end+1:] writeline(range(1,len(header_data)+1)) writeline(header_data.keys()) writeline(header_data.values()) f.write('\n') - writeline(range(1,len(bulk_names)+1)) - writeline(bulk_names) + col_names = replace_cols(bulk_names, network_col_names, start, end) + writeline(range(1,len(col_names)+1)) + writeline(col_names) for i in range(bulk_data.size): - writeline(bulk_data[i]) + writeline(replace_cols(bulk_data[i], network_col_bulk[i], start, end)) f.write('\n') From 76fcb6d539b1a5799b8c743536e4bbacf8b531a7 Mon Sep 17 00:00:00 2001 From: Melissa Rasmussen Date: Thu, 12 Feb 2026 23:22:03 -0500 Subject: [PATCH 5/6] add in proton, set to 0, in additional to H1 --- massive_star/convert_large_to_small.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/massive_star/convert_large_to_small.py b/massive_star/convert_large_to_small.py index 7ad8cfb..ece29bc 100644 --- a/massive_star/convert_large_to_small.py +++ b/massive_star/convert_large_to_small.py @@ -38,7 +38,7 @@ aprox19_nuclei = ['h1', 'he3', 'he4', 'c12', 'n14', 'o16', 'ne20', 'mg24', 'si28', 's32', 'ar36', 'ca40', 'ti44', 'cr48', 'fe52', 'fe54', 'ni56', 'n'] -# this excludes p_nse. We'll deal with that split later +# this excludes p_nse. We'll deal with that split by later setting 'p' to 0 small_network = aprox19_nuclei @@ -49,6 +49,7 @@ c.set_array(list(row)[start:end+1]) new_c = c.bin_as(aprox19_nuclei) dictionary = new_c.data + dictionary['p'] = 0 if not network_col_names: network_col_names = dictionary.keys() From 51809c79f4eab7b22ee486fa6185904fb093b0ff Mon Sep 17 00:00:00 2001 From: Melissa Rasmussen Date: Fri, 13 Mar 2026 17:36:13 -0400 Subject: [PATCH 6/6] Generalize network binning method to take arguments and infer large network --- massive_star/convert_large_to_small.py | 162 +++++++++++++------------ 1 file changed, 86 insertions(+), 76 deletions(-) diff --git a/massive_star/convert_large_to_small.py b/massive_star/convert_large_to_small.py index ece29bc..1e66b95 100644 --- a/massive_star/convert_large_to_small.py +++ b/massive_star/convert_large_to_small.py @@ -1,81 +1,91 @@ - import numpy as np -import pynucastro from pynucastro.networks.rate_collection import Composition +import argparse -file_name = "20m_pre_cc_1335s_206_isos.data" - -# I have a branch in mesa_reader that could be used to make this code shorter -# https://github.com/melilly/py_mesa_reader/tree/write-file - -bulk_data = np.genfromtxt( - file_name, - skip_header=5, - names=True, - ndmin=1, - dtype=None, -) -bulk_names = bulk_data.dtype.names -with open(file_name) as f: - for i, line in enumerate(f): - if i == 1: - header_names = line.split() - elif i == 2: - header_data = [eval(datum) for datum in line.split()] - elif i > 2: - break -header_data = dict(zip(header_names, header_data)) - - -# SHRINK WITH PYNUCASTRO -try: - start, end = bulk_names.index('neut'), bulk_names.index('zn66') - large_network = bulk_names[start:end+1] -except ValueError as e: - print("The given start or end species is not listed in the MESA file.") - raise e - -aprox19_nuclei = ['h1', 'he3', 'he4', 'c12', 'n14', 'o16', 'ne20', 'mg24', +implemented_networks = { + "aprox19": ['h1', 'he3', 'he4', 'c12', 'n14', 'o16', 'ne20', 'mg24', 'si28', 's32', 'ar36', 'ca40', 'ti44', 'cr48', 'fe52', - 'fe54', 'ni56', 'n'] -# this excludes p_nse. We'll deal with that split by later setting 'p' to 0 - -small_network = aprox19_nuclei - -network_col_names = [] -network_col_bulk = [] -c = Composition(large_network) -for row in bulk_data: - c.set_array(list(row)[start:end+1]) - new_c = c.bin_as(aprox19_nuclei) - dictionary = new_c.data - dictionary['p'] = 0 - - if not network_col_names: - network_col_names = dictionary.keys() - - network_col_bulk.append(list(dictionary.values())) - -network_col_bulk = np.array(network_col_bulk) - -# OUTPUT TO FILE -new_file_name = "re-"+file_name - -with open(new_file_name, 'w') as f: - def writeline(array): - f.write(''.join([str(x).rjust(40) for x in array])+'\n') - def replace_cols(data, replacement, start, end): - return list(data)[:start]+list(replacement)+list(data)[end+1:] - - writeline(range(1,len(header_data)+1)) - writeline(header_data.keys()) - writeline(header_data.values()) - f.write('\n') - - col_names = replace_cols(bulk_names, network_col_names, start, end) - writeline(range(1,len(col_names)+1)) - writeline(col_names) - for i in range(bulk_data.size): - writeline(replace_cols(bulk_data[i], network_col_bulk[i], start, end)) - f.write('\n') + 'fe54', 'ni56', 'n'], +} +# the aprox19 nuclei list excludes p_nse. We'll deal with that split by later setting 'p' to 0 + + +def main(file_name, new_file_name, network): + + # READ MESA FILE + # this code adapted from py_mesa_reader. + bulk_data = np.genfromtxt(file_name, skip_header=5, names=True, ndmin=1, dtype=None) + bulk_names = bulk_data.dtype.names + with open(file_name) as f: + for i, line in enumerate(f): + if i == 1: + header_names = line.split() + elif i == 2: + header_data = [eval(datum) for datum in line.split()] + elif i > 2: + break + header_data = dict(zip(header_names, header_data)) + + + # SHRINK WITH PYNUCASTRO + try: + start, end = bulk_names.index('neut'), bulk_names.index('opacity')-1 + large_network = bulk_names[start:end+1] + except ValueError as e: + print("The given start or end species is not listed in the MESA file.") + raise e + + small_network = implemented_networks[network] + + network_col_names = [] + network_col_bulk = [] + c = Composition(large_network) + for row in bulk_data: + c.set_array(list(row)[start:end+1]) + new_c = c.bin_as(small_network) + dictionary = new_c.data + dictionary['p'] = 0 + + if not network_col_names: + network_col_names = dictionary.keys() + + network_col_bulk.append(list(dictionary.values())) + + network_col_bulk = np.array(network_col_bulk) + + + # OUTPUT TO FILE + with open(new_file_name, 'w') as f: + def writeline(array): + f.write(''.join([str(x).rjust(40) for x in array])+'\n') + def replace_cols(data, replacement, start, end): + return list(data)[:start]+list(replacement)+list(data)[end+1:] + + writeline(range(1,len(header_data)+1)) + writeline(header_data.keys()) + writeline(header_data.values()) + f.write('\n') + + col_names = replace_cols(bulk_names, network_col_names, start, end) + writeline(range(1,len(col_names)+1)) + writeline(col_names) + for i in range(bulk_data.size): + writeline(replace_cols(bulk_data[i], network_col_bulk[i], start, end)) + f.write('\n') + + + print(f'Binned network (size={end-start+1}) into network {network} and saved file in {new_file_name}.') + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Bin a large network into a smaller network specified by user. Input and output files use the MESA file format.") + parser.add_argument('network', choices=implemented_networks.keys(), help="The name of the smaller network to bin into.") + parser.add_argument('file_name') + parser.add_argument('new_file_name', nargs='?') + args = parser.parse_args() + if args.new_file_name is None: + file_stem, file_ending = args.file_name.rsplit('.',1) + new_file_name = f"{file_stem}.{args.network}.{file_ending}" + else: + new_file_name = args.new_file_name + main(args.file_name, new_file_name, args.network)