-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathutils.py
More file actions
282 lines (231 loc) · 10.1 KB
/
utils.py
File metadata and controls
282 lines (231 loc) · 10.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
import pandas as pd
import selfies as sf
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit import Chem
from rdkit.Chem import RDConfig
import os
import sys
sys.path.append(os.path.join(RDConfig.RDContribDir, 'SA_Score'))
import sascorer
from rdkit.Chem import QED
import warnings
warnings.filterwarnings("ignore")
from rdkit import RDLogger
RDLogger.DisableLog('rdApp.*')
from multiprocessing import Pool
import torch
import numpy as np
import wandb
def get_mol(smiles_or_mol):
'''
Loads SMILES/molecule into RDKit's object
'''
if isinstance(smiles_or_mol, str):
if len(smiles_or_mol) == 0:
return None
mol = Chem.MolFromSmiles(smiles_or_mol)
if mol is None:
return None
try:
Chem.SanitizeMol(mol)
except ValueError:
return None
return mol
return smiles_or_mol
def mapper(n_jobs):
'''
Returns function for map call.
If n_jobs == 1, will use standard map
If n_jobs > 1, will use multiprocessing pool
If n_jobs is a pool object, will return its map function
'''
if n_jobs == 1:
def _mapper(*args, **kwargs):
return list(map(*args, **kwargs))
return _mapper
if isinstance(n_jobs, int):
pool = Pool(n_jobs)
def _mapper(*args, **kwargs):
try:
result = pool.map(*args, **kwargs)
finally:
pool.terminate()
return result
return _mapper
return n_jobs.map
def remove_invalid(gen, canonize=True, n_jobs=1):
"""
Removes invalid molecules from the dataset
"""
if not canonize:
mols = mapper(n_jobs)(get_mol, gen)
return [gen_ for gen_, mol in zip(gen, mols) if mol is not None]
return [x for x in mapper(n_jobs)(canonic_smiles, gen) if
x is not None]
def fraction_valid(gen, n_jobs=1):
"""
Computes a number of valid molecules
Parameters:
gen: list of SMILES
n_jobs: number of threads for calculation
"""
gen = mapper(n_jobs)(get_mol, gen)
return 1 - gen.count(None) / len(gen)
def canonic_smiles(smiles_or_mol):
mol = get_mol(smiles_or_mol)
if mol is None:
return None
return Chem.MolToSmiles(mol)
def fraction_unique(gen, k=None, n_jobs=1, check_validity=True):
"""
Computes a number of unique molecules
Parameters:
gen: list of SMILES
k: compute unique@k
n_jobs: number of threads for calculation
check_validity: raises ValueError if invalid molecules are present
"""
if k is not None:
if len(gen) < k:
warnings.warn(
"Can't compute unique@{}.".format(k) +
"gen contains only {} molecules".format(len(gen))
)
gen = gen[:k]
canonic = set(mapper(n_jobs)(canonic_smiles, gen))
if None in canonic and check_validity:
canonic = [i for i in canonic if i is not None]
#raise ValueError("Invalid molecule passed to unique@k")
return 0 if len(gen) == 0 else len(canonic) / len(gen)
def novelty(gen, train, n_jobs=1):
gen_smiles = mapper(n_jobs)(canonic_smiles, gen)
gen_smiles_set = set(gen_smiles) - {None}
train_set = set(train)
return 0 if len(gen_smiles_set) == 0 else len(gen_smiles_set - train_set) / len(gen_smiles_set)
def average_agg_tanimoto(stock_vecs, gen_vecs,
batch_size=5000, agg='max',
device='cpu', p=1, no_list=True):
"""
For each molecule in gen_vecs finds closest molecule in stock_vecs.
Returns average tanimoto score for between these molecules
Parameters:
stock_vecs: numpy array <n_vectors x dim>
gen_vecs: numpy array <n_vectors' x dim>
agg: max or mean
p: power for averaging: (mean x^p)^(1/p)
"""
assert agg in ['max', 'mean'], "Can aggregate only max or mean"
agg_tanimoto = np.zeros(len(gen_vecs))
total = np.zeros(len(gen_vecs))
best_stock_indices = np.zeros(len(gen_vecs), dtype=int)
for j in range(0, stock_vecs.shape[0], batch_size):
x_stock = torch.tensor(stock_vecs[j:j + batch_size]).to(device).float()
for i in range(0, gen_vecs.shape[0], batch_size):
y_gen = torch.tensor(gen_vecs[i:i + batch_size]).to(device).float()
y_gen = y_gen.transpose(0, 1)
tp = torch.mm(x_stock, y_gen)
jac = (tp / (x_stock.sum(1, keepdim=True) +
y_gen.sum(0, keepdim=True) - tp)).cpu().numpy()
jac[np.isnan(jac)] = 1
if p != 1:
jac = jac**p
if agg == 'max':
max_vals = jac.max(0)
max_indices = jac.argmax(0)
mask = max_vals > agg_tanimoto[i:i + y_gen.shape[1]]
agg_tanimoto[i:i + y_gen.shape[1]] = np.maximum(
agg_tanimoto[i:i + y_gen.shape[1]], max_vals)
best_stock_indices[i:i + y_gen.shape[1]][mask] = j + max_indices[mask]
elif agg == 'mean':
agg_tanimoto[i:i + y_gen.shape[1]] += jac.sum(0)
total[i:i + y_gen.shape[1]] += jac.shape[0]
if agg == 'mean':
agg_tanimoto /= total
if p != 1:
agg_tanimoto = (agg_tanimoto)**(1/p)
if no_list:
return np.mean(agg_tanimoto)
else:
return np.mean(agg_tanimoto), agg_tanimoto, best_stock_indices
def generate_vecs(mols):
zero_vec = np.zeros(1024)
return np.array([AllChem.GetMorganFingerprintAsBitVect(mol, 2, nBits=1024) if mol is not None else zero_vec for mol in mols])
def to_mol(smiles_list):
return [Chem.MolFromSmiles(smiles) for smiles in smiles_list]
def sascorer_calculation(mols):
scores = []
for mol in mols:
if mol is None:
scores.append(None)
else:
try:
scores.append(sascorer.calculateScore(mol))
except ZeroDivisionError:
scores.append(None)
return scores
def qed_calculation(mols):
return [QED.qed(mol) if mol is not None else None for mol in mols]
def logp_calculation(mols):
return [Chem.Crippen.MolLogP(mol) if mol is not None else None for mol in mols]
def metrics_calculation(predictions, references, train_data, train_vec=None,training=True):
predictions = [x.replace(" ", "") for x in predictions]
references = [x.replace(" ", "") for x in references]
prediction_smiles = pd.DataFrame([sf.decoder(x) for x in predictions], columns=["smiles"])
prediction_validity_ratio = fraction_valid(list(prediction_smiles["smiles"]))
if prediction_validity_ratio != 0:
prediction_mols = to_mol(list(prediction_smiles["smiles"]))
training_data_smiles = [sf.decoder(x) for x in train_data["Compound_SELFIES"]]
reference_smiles = [sf.decoder(x) for x in references]
prediction_uniqueness_ratio = fraction_unique(prediction_smiles["smiles"])
prediction_smiles_novelty_against_training_samples = novelty(list(prediction_smiles["smiles"]), training_data_smiles)
prediction_smiles_novelty_against_reference_samples = novelty(list(prediction_smiles["smiles"]), reference_smiles)
prediction_vecs = generate_vecs(prediction_mols)
reference_vec = generate_vecs([Chem.MolFromSmiles(x) for x in reference_smiles if Chem.MolFromSmiles(x) is not None])
predicted_vs_reference_sim_mean, predicted_vs_reference_sim_list, _ = average_agg_tanimoto(reference_vec,prediction_vecs, no_list=False)
if train_vec is not None:
predicted_vs_training_sim_mean, predicted_vs_training_sim_list, _ = average_agg_tanimoto(train_vec,prediction_vecs, no_list=False)
else:
predicted_vs_training_sim_mean, predicted_vs_training_sim_list = 0, []
IntDiv = 1 - average_agg_tanimoto(prediction_vecs, prediction_vecs, agg="mean", no_list=True)
prediction_sa_score_list = sascorer_calculation(prediction_mols)
prediction_sa_score = np.mean(prediction_sa_score_list)
prediction_qed_score_list = qed_calculation(prediction_mols)
prediction_qed_score = np.mean(prediction_qed_score_list)
prediction_logp_score_list = logp_calculation(prediction_mols)
prediction_logp_score = np.mean(prediction_logp_score_list)
metrics = {"validity": prediction_validity_ratio,
"uniqueness": prediction_uniqueness_ratio,
"novelty_against_training_samples": prediction_smiles_novelty_against_training_samples,
"novelty_against_reference_samples": prediction_smiles_novelty_against_reference_samples,
"intdiv": IntDiv,
"similarity_to_training_samples": predicted_vs_training_sim_mean,
"similarity_to_reference_samples": predicted_vs_reference_sim_mean,
"sa_score": prediction_sa_score,
"qed_score": prediction_qed_score,
"logp_score": prediction_logp_score}
else:
metrics = {"validity": 0,
"uniqueness": 0,
"novelty_against_training_samples": 0,
"novelty_against_reference_samples": 0,
"intdiv": 0,
"similarity_to_training_samples": 0,
"similarity_to_reference_samples": 0,
"sa_score": 0,
"qed_score": 0,
"logp_score": 0}
if training:
wandb.log(metrics)
if training:
return metrics
elif training == False:
result_dict = {"smiles": prediction_smiles["smiles"],
"test_sim": predicted_vs_reference_sim_list,
"train_sim": predicted_vs_training_sim_list,
"sa_score": prediction_sa_score_list,
"qed_score": prediction_qed_score_list,
"logp_score": prediction_logp_score_list
}
results = pd.DataFrame.from_dict(result_dict)
return metrics, results