-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathelectra_predictor.py
More file actions
83 lines (69 loc) · 2.5 KB
/
electra_predictor.py
File metadata and controls
83 lines (69 loc) · 2.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
from typing import TYPE_CHECKING
import numpy as np
from .nn_predictor import NNPredictor
if TYPE_CHECKING:
from chebai.models.electra import Electra
def build_graph_from_attention(att, node_labels, token_labels, threshold=0.0):
n_nodes = len(node_labels)
return dict(
nodes=[
dict(
label=token_labels[n],
id=f"{group}_{i}",
fixed=dict(x=True, y=True),
y=100 * int(group == "r"),
x=30 * i,
group=group,
)
for i, n in enumerate([0] + node_labels)
for group in ("l", "r")
],
edges=[
{
"from": f"l_{i}",
"to": f"r_{j}",
"color": {"opacity": att[i, j].item()},
"smooth": False,
"physics": False,
}
for i in range(n_nodes)
for j in range(n_nodes)
if att[i, j] > threshold
],
)
class ElectraPredictor(NNPredictor):
def __init__(self, model_name: str, ckpt_path: str, **kwargs):
from chebai.preprocessing.reader import ChemDataReader
super().__init__(model_name, ckpt_path, reader_cls=ChemDataReader, **kwargs)
print(f"Initialised Electra model {self.model_name} (device: {self.device})")
def init_model(self, ckpt_path: str, **kwargs) -> "Electra":
from chebai.models.electra import Electra
model = Electra.load_from_checkpoint(
ckpt_path,
map_location=self.device,
criterion=None,
strict=False,
metrics=dict(train=dict(), test=dict(), validation=dict()),
pretrained_checkpoint=None,
)
model.eval()
return model
def explain_smiles(self, smiles) -> dict:
from chebai.preprocessing.reader import EMBEDDING_OFFSET
reader = self.reader_cls()
token_dict = reader.to_data(dict(features=smiles, labels=None))
tokens = np.array(token_dict["features"]).astype(int).tolist()
result = self.calculate_results([token_dict])
token_labels = (
["[CLR]"]
+ [None for _ in range(EMBEDDING_OFFSET - 1)]
+ list(reader.cache.keys())
)
graphs = [
[
build_graph_from_attention(a[0, i], tokens, token_labels, threshold=0.1)
for i in range(a.shape[1])
]
for a in result["attentions"]
]
return {"graphs": graphs}