-
Notifications
You must be signed in to change notification settings - Fork 33
Expand file tree
/
Copy pathmnist_nonlinear.py
More file actions
72 lines (51 loc) · 2.2 KB
/
mnist_nonlinear.py
File metadata and controls
72 lines (51 loc) · 2.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
# This is an example of using nonlinear encoding on the MNIST dataset
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision.datasets import MNIST
# Note: this example requires the torchmetrics library: https://torchmetrics.readthedocs.io
import torchmetrics
from tqdm import tqdm
import torchhd
from torchhd.models import Centroid
from torchhd import embeddings
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Using {} device".format(device))
DIMENSIONS = 10000
IMG_SIZE = 28
BATCH_SIZE = 1 # for GPUs with enough memory we can process multiple images at ones
transform = torchvision.transforms.ToTensor()
train_ds = MNIST("../data", train=True, transform=transform, download=True)
train_ld = torch.utils.data.DataLoader(train_ds, batch_size=BATCH_SIZE, shuffle=True)
test_ds = MNIST("../data", train=False, transform=transform, download=True)
test_ld = torch.utils.data.DataLoader(test_ds, batch_size=BATCH_SIZE, shuffle=False)
class Encoder(nn.Module):
def __init__(self, out_features, size):
super(Encoder, self).__init__()
self.flatten = torch.nn.Flatten()
self.nonlinear_projection = embeddings.Sinusoid(size * size, out_features)
def forward(self, x):
x = self.flatten(x)
sample_hv = self.nonlinear_projection(x)
return torchhd.hard_quantize(sample_hv)
encode = Encoder(DIMENSIONS, IMG_SIZE)
encode = encode.to(device)
num_classes = len(train_ds.classes)
model = Centroid(DIMENSIONS, len(train_ds.classes))
model = model.to(device)
with torch.no_grad():
for samples, labels in tqdm(train_ld, desc="Training"):
samples = samples.to(device)
labels = labels.to(device)
samples_hv = encode(samples)
model.add(samples_hv, labels)
accuracy = torchmetrics.Accuracy(task="multiclass", top_k=1, num_classes=num_classes)
with torch.no_grad():
model.normalize()
for samples, labels in tqdm(test_ld, desc="Testing"):
samples = samples.to(device)
samples_hv = encode(samples)
outputs = model(samples_hv, dot=True)
accuracy.update(outputs.cpu(), labels)
print(f"Testing accuracy of {(accuracy.compute().item() * 100):.3f}%")