-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcategorise_plasmids.py
More file actions
193 lines (174 loc) · 9.38 KB
/
categorise_plasmids.py
File metadata and controls
193 lines (174 loc) · 9.38 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
from opencloning.syntax import Syntax
import json
from pydna.parsers import parse
import os
import shutil
import glob
import re
import warnings
import pandas
with open('other_plasmids/ctk/plasmid_names.csv', 'r') as f:
lines = [line.split(',') for line in f.readlines()[1:]]
ctk_plasmid_names = {plasmid: description for plasmid, _, description, _ in lines}
with open('other_plasmids/golden_braid/gb_starter_kit/plasmid_names.tsv', 'r') as f:
lines = [line.split('\t') for line in f.readlines()[1:]]
golden_braid_starter_kit_plasmid_names = {plasmid: description for plasmid, description in lines}
def merge_syntaxes(syntax_file1: str, syntax_file2: str) -> Syntax:
syntax1 = Syntax.model_validate_json(open(syntax_file1).read())
syntax2 = Syntax.model_validate_json(open(syntax_file2).read())
syntax1.parts = syntax2.parts + syntax1.parts
syntax1.overhangNames.update(syntax2.overhangNames)
# Reassing ids of parts
for i, part in enumerate(syntax1.parts):
part.id = i + 1
return syntax1
index_file = os.path.join("syntaxes", "index.json")
index = json.load(open(index_file))
for syntax_entry in index:
# Load the syntax
syntax_path = syntax_entry["path"]
syntax_file = os.path.join("syntaxes", syntax_path, "syntax.json")
if "uses_syntax" in syntax_entry:
parent_syntax = os.path.join("syntaxes", syntax_entry["uses_syntax"], "syntax.json")
# Copy the syntax.json file to the syntaxes directory
shutil.copy(parent_syntax, syntax_file)
elif "extends_syntax" in syntax_entry:
parent_syntax = os.path.join("syntaxes", syntax_entry["extends_syntax"], "syntax.json")
sub_syntax = os.path.join("syntaxes", syntax_path, "sub_syntax.json")
# Merge the syntaxes
merged_syntax = merge_syntaxes(sub_syntax, parent_syntax)
with open(syntax_file, 'w') as f:
f.write(merged_syntax.model_dump_json(indent=4))
if "syntaxes" in syntax_entry:
syntax_files = [
os.path.join("syntaxes", syntax_entry["path"], syntax["path"]) for syntax in syntax_entry["syntaxes"]
]
syntax_entries = syntax_entry["syntaxes"]
else:
syntax_files = [syntax_file]
syntax_entries = [syntax_entry]
if len(syntax_entry["kits"]) > 0:
for syntax_index, syntax_file in enumerate(syntax_files):
syntax = Syntax.model_validate_json(open(syntax_file).read())
addgene_plasmids = list()
for associated_kit in syntax_entry["kits"]:
print('>', associated_kit["kit"])
associated_kit: dict
kit_path = os.path.join("kits", associated_kit["kit"])
plasmid_names = associated_kit.get("names", None)
exclude_plasmids = syntax_entries[syntax_index].get("exclude", None)
associated_plasmids_tsv = os.path.join(kit_path, "plasmids.tsv")
df = pandas.read_csv(associated_plasmids_tsv, sep="\t", dtype=str)
df = df.fillna("")
for _i, row in df.iterrows():
name, addgene_id = row["name"], row["addgene_id"]
content = row["content"] if "content" in row else ""
if plasmid_names is not None and name not in plasmid_names:
continue
if exclude_plasmids is not None and name in exclude_plasmids:
continue
content = content.strip()
seq = parse(f'addgene_plasmids/{addgene_id}.gb')[0]
resp = syntax.assign_plasmid_to_syntax_part(seq)
if len(resp) == 0:
print(f'Skipped plasmid {name} ({addgene_id}) because it could not be assigned to a syntax part')
continue
elif len(resp) > 1:
print(
f'Skipped plasmid {name} ({addgene_id}) because it could be assigned to multiple syntax parts'
)
continue
else:
resp = resp[0]
left_overhang, right_overhang = resp['key'].split('-')
longest_feature_name = ''
if resp['longest_feature'] is not None and 'label' in resp['longest_feature'].qualifiers:
longest_feature_name = resp['longest_feature'].qualifiers['label'][0]
if content != '':
name += f" ({content})"
plasmid = {
'id': len(addgene_plasmids) + 1,
'plasmid_name': name,
'left_overhang': left_overhang,
'right_overhang': right_overhang,
'key': resp['key'],
'type': 'AddgeneIdSource',
'source': {
'id': 1,
'type': 'AddgeneIdSource',
'input': [],
'repository_id': addgene_id,
},
}
addgene_plasmids.append(plasmid)
if not os.path.exists(f'syntaxes/{syntax_path}'):
os.makedirs(f'syntaxes/{syntax_path}')
if "syntaxes" in syntax_entry:
print('>', syntax_entry["syntaxes"])
plasmid_file_name = f'syntaxes/{syntax_path}/plasmids_{syntax_entry["syntaxes"][syntax_index]["path"]}'
else:
plasmid_file_name = f'syntaxes/{syntax_path}/plasmids.json'
with open(plasmid_file_name, 'w') as f:
json.dump(addgene_plasmids, f, indent=4)
if "plasmid_files" in syntax_entry:
plasmid_files = [f.split("/")[-1] for f in glob.glob(os.path.join(syntax_entry["plasmid_files"], "*.gb"))]
for syntax_index, syntax_file in enumerate(syntax_files):
syntax = Syntax.model_validate_json(open(syntax_file).read())
filtered_plasmids = plasmid_files
if "syntaxes" in syntax_entry and "filename_pattern" in syntax_entry["syntaxes"][syntax_index]:
filename_pattern = syntax_entry["syntaxes"][syntax_index]["filename_pattern"]
filtered_plasmids = [f for f in plasmid_files if re.match(filename_pattern, f)]
plasmids_to_export = list()
for plasmid_file in filtered_plasmids:
plasmid_file_path = os.path.join(syntax_entry["plasmid_files"], plasmid_file)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
plasmid = parse(plasmid_file_path)[0]
plasmid_name = plasmid.name
resp = syntax.assign_plasmid_to_syntax_part(plasmid)
if len(resp) == 1:
if syntax_path == "subti_toolkit":
file_name = plasmid_file[:-3]
plasmid_id = file_name.split("_")[0]
rest = '-'.join(file_name.split("_")[1:])
rest = rest.replace('pSTK-', '')
rest = rest.replace('pSTK', '')
plasmid_name = f"{rest} (p{plasmid_id})"
elif syntax_path == "ctk":
plasmid_id = plasmid_file[:-3]
if plasmid_id in ctk_plasmid_names:
plasmid_name = f'{ctk_plasmid_names[plasmid_id]} ({plasmid_name})'
elif resp[0]['longest_feature'] is not None:
plasmid_name = f"{resp[0]['longest_feature'].qualifiers['label'][0]} ({plasmid_name})"
elif syntax_path == "golden_braid":
plasmid_id = plasmid_file[:-3]
if plasmid_id in golden_braid_starter_kit_plasmid_names:
plasmid_name = f'{golden_braid_starter_kit_plasmid_names[plasmid_id]} ({plasmid_name})'
plasmids_to_export.append(
{
'type': 'loadedFile',
'plasmid_name': plasmid_name.replace('\n', '').replace('\r', ''),
'file_name': plasmid_file,
'left_overhang': resp[0]['key'].split('-')[0],
'right_overhang': resp[0]['key'].split('-')[1],
'key': resp[0]['key'],
'sequenceData': None,
'genbankString': plasmid.format("genbank"),
}
)
plasmids_to_export.sort(key=lambda x: x['file_name'])
if "syntaxes" in syntax_entry:
plasmid_file_name = f'syntaxes/{syntax_path}/plasmids_{syntax_entry["syntaxes"][syntax_index]["path"]}'
else:
plasmid_file_name = f'syntaxes/{syntax_path}/plasmids.json'
with open(plasmid_file_name, 'w') as f:
json.dump(plasmids_to_export, f, indent=4)
# Write a minified index.json file
mini_index = dict()
for syntax_entry in index:
mini_index[syntax_entry["name"]] = {
"path": syntax_entry["path"],
"description": syntax_entry["description"]
}
with open('syntaxes/index.min.json', 'w') as f:
json.dump(mini_index, f, indent=4)