-
Notifications
You must be signed in to change notification settings - Fork 8
Expand file tree
/
Copy pathclassification.py
More file actions
162 lines (139 loc) · 5.71 KB
/
classification.py
File metadata and controls
162 lines (139 loc) · 5.71 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
import os
from typing import List, Optional
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import torch
from torch import Tensor
from torchmetrics.classification import (
MultilabelF1Score,
MultilabelPrecision,
MultilabelRecall,
MultilabelAUROC,
BinaryF1Score,
BinaryAUROC,
BinaryAveragePrecision,
MultilabelAveragePrecision,
)
from chebai.callbacks.epoch_metrics import BalancedAccuracy, MacroF1
# from chebai.result.utils import *
def visualise_f1(logs_path: str) -> None:
"""
Visualize F1 scores from metrics.csv and save the plot as f1_plot.png.
Args:
logs_path: The path to the directory containing metrics.csv.
"""
df = pd.read_csv(os.path.join(logs_path, "metrics.csv"))
df_loss = df.melt(
id_vars="epoch",
value_vars=[
"val_ep_macro-f1",
"val_micro-f1",
"train_micro-f1",
"train_ep_macro-f1",
],
)
lineplt = sns.lineplot(df_loss, x="epoch", y="value", hue="variable") # noqa: F841
plt.savefig(os.path.join(logs_path, "f1_plot.png"))
plt.show()
def print_metrics(
preds: Tensor,
labels: Tensor,
device: torch.device,
classes: Optional[List[str]] = None,
top_k: int = 10,
markdown_output: bool = False,
) -> None:
"""
Prints relevant metrics, including micro and macro F1, recall and precision,
best k classes, and worst classes.
Args:
preds: Predicted labels as a tensor.
labels: True labels as a tensor.
device: The device to perform computations on.
classes: Optional list of class names.
top_k: The number of top classes to display based on F1 score.
markdown_output: If True, print metrics in markdown format.
"""
if device != labels.device:
device = labels.device
f1_micro = MultilabelF1Score(preds.shape[1], average="micro").to(device=device)
my_f1_macro = MacroF1(preds.shape[1]).to(device=device)
my_bal_acc = BalancedAccuracy(preds.shape[1]).to(device=device)
print(f"Macro-F1: {my_f1_macro(preds, labels):3f}")
print(f"Micro-F1: {f1_micro(preds, labels):3f}")
print(f"Balanced Accuracy: {my_bal_acc(preds, labels):3f}")
precision_macro = MultilabelPrecision(preds.shape[1], average="macro").to(
device=device
)
precision_micro = MultilabelPrecision(preds.shape[1], average="micro").to(
device=device
)
macro_adjust = 1
recall_macro = MultilabelRecall(preds.shape[1], average="macro").to(device=device)
recall_micro = MultilabelRecall(preds.shape[1], average="micro").to(device=device)
print(f"Macro-Precision: {precision_macro(preds, labels) * macro_adjust:3f}")
print(f"Micro-Precision: {precision_micro(preds, labels):3f}")
print(f"Macro-Recall: {recall_macro(preds, labels) * macro_adjust:3f}")
print(f"Micro-Recall: {recall_micro(preds, labels):3f}")
if markdown_output:
print(
"| Model | Macro-F1 | Micro-F1 | Macro-Precision | Micro-Precision | Macro-Recall | Micro-Recall | Balanced Accuracy |"
)
print("| --- | --- | --- | --- | --- | --- | --- | --- |")
print(
f"| | {my_f1_macro(preds, labels):3f} | {f1_micro(preds, labels):3f} | {precision_macro(preds, labels):3f} | "
f"{precision_micro(preds, labels):3f} | {recall_macro(preds, labels):3f} | "
f"{recall_micro(preds, labels):3f} | {my_bal_acc(preds, labels):3f} |"
)
classwise_f1_fn = MultilabelF1Score(preds.shape[1], average=None).to(device=device)
classwise_f1 = classwise_f1_fn(preds, labels)
best_classwise_f1 = torch.topk(classwise_f1, top_k).indices
print(f"Top {top_k} classes (F1-score):")
for i, best in enumerate(best_classwise_f1):
print(
f"{i + 1}. {classes[best] if classes is not None else best} - F1: {classwise_f1[best]:3f}"
)
zeros = []
for i, f1 in enumerate(classwise_f1):
if f1 == 0.0 and torch.sum(labels[:, i]) != 0:
zeros.append(f"{classes[i] if classes is not None else i}")
print(
f"Found {len(zeros)} classes with F1-score == 0 (and non-zero labels): {', '.join(zeros)}"
)
def metrics_classification_multilabel(
preds: Tensor,
labels: Tensor,
device: torch.device,
):
if device != labels.device:
device = labels.device
my_bal_acc = BalancedAccuracy(preds.shape[1]).to(device=device)
bal_acc = my_bal_acc(preds, labels).cpu().numpy()
my_f1_macro = MultilabelF1Score(preds.shape[1], average="micro").to(device=device)
f1_micro = MacroF1(preds.shape[1]).to(device=device)
my_auc_roc = MultilabelAUROC(preds.shape[1]).to(device=device)
my_av_prec = MultilabelAveragePrecision(preds.shape[1]).to(device=device)
macro_f1 = my_f1_macro(preds, labels).cpu().numpy()
micro_f1 = f1_micro(preds, labels).cpu().numpy()
auc_roc = my_auc_roc(preds, labels).cpu().numpy()
prc_auc = my_av_prec(preds, labels).cpu().numpy()
return auc_roc, macro_f1, micro_f1, bal_acc, prc_auc
def metrics_classification_binary(
preds: Tensor,
labels: Tensor,
device: torch.device,
):
if device != labels.device:
device = labels.device
my_auc_roc = BinaryAUROC()
my_f1 = BinaryF1Score().to(device=device)
my_av_prec = BinaryAveragePrecision().to(device=device)
my_bal_acc = BalancedAccuracy(preds.shape[1]).to(device=device)
bal_acc = my_bal_acc(preds, labels).cpu().numpy()
auc_roc = my_auc_roc(preds, labels).cpu().numpy()
# my_auc_roc.update(preds.cpu()[:, 0], labels.cpu()[:, 0])
# auc_roc = my_auc_roc.compute().numpy()
f1_score = my_f1(preds, labels).cpu().numpy()
prc_auc = my_av_prec(preds, labels).cpu().numpy()
return auc_roc, f1_score, bal_acc, prc_auc