-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathValidator.py
More file actions
59 lines (55 loc) · 2.65 KB
/
Validator.py
File metadata and controls
59 lines (55 loc) · 2.65 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import time
import numpy as np
# Our validator class.
class Validator:
# Our feature subset in the form of a list. We specify our features using their indices.
feature_subset = None
# Our NN-classifier.
classifier = None
# A 2D numpy array representing our training data.
dataset = None
def __init__(self, feature_subset, classifier, dataset):
# Inserting 0 at the beginning of our feature subset to account for the label.
self.feature_subset = [0] + feature_subset
# Setting our classifier as the NN-classifier.
self.classifier = classifier
# Initially storing our dataset specified by the feature subset.
self.dataset = dataset[:, self.feature_subset]
# Normalizing our dataset using min-max normalization (From Wikipedia).
mins = np.min(self.dataset[:, 1:], axis=0)
maxs = np.max(self.dataset[:, 1:], axis=0)
# Normalization formula: X = (X - min) / (max - min).
self.dataset[:, 1:] = (self.dataset[:, 1:] - mins) / (maxs - mins)
# Our validation method: Leave-One-Out Cross Validation.
def evaluate(self):
correct_classifications = 0
# The total number of samples or the number of rows in our dataset.
N = self.dataset.shape[0]
# Our validation data sample.
validation_sample = None
# Our total running time.
total_time = 0
# Our Leave-One-Out loop:
for i in range(N):
#print(f'Leave-One-Out Iteration {i+1}')
#print('---------------------------------')
start = time.time()
# We'll first set aside the validation sample.
validation_sample = self.dataset[i]
#print(f'Validation sample: {validation_sample}')
# We'll use the remaining samples to train our classifier.
selected_samples = [s for s in range(N) if s != i]
self.classifier.train(self.dataset[selected_samples, :])
# Testing our classifier on the validation sample.
predicted_label = self.classifier.test(validation_sample)
# If our predicted label matches the true label, we'll increment our correct classifications.
if predicted_label == validation_sample[0]:
correct_classifications += 1
end = time.time()
#print(f'\tIteration {i+1} took {end - start:.6f} seconds to execute.')
total_time += end - start
#print()
# Computing our accuracy and returning it.
#print(f'Total time taken for Leave-One-Out Validation: {total_time:.2f} seconds')
accuracy = correct_classifications / N
return accuracy