-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathneural_net.py
More file actions
134 lines (99 loc) · 4.64 KB
/
neural_net.py
File metadata and controls
134 lines (99 loc) · 4.64 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
from backprop import *
import numpy as np
class NeuralNet:
def __init__(self, layers, loss_func, loss_func_der, out_interpretation):
self.layers = layers
self.loss_func = loss_func
self.loss_func_der = loss_func_der
self.out_interpretation = out_interpretation
def to_graph(self, with_loss):
nodes = [Node(None, None, (self.input_dimension(), 1))] # input node
edges = []
for layer in self.layers:
prev_out_node = nodes[-1]
def weights_func(w):
return lambda _: w
weight_node = Node(weights_func(layer.weights), None, (layer.output_dimension(), layer.input_dimension()))
nodes.append(weight_node)
def matmul(prev_activations): # matrix multiplication
return np.matmul(prev_activations[0], prev_activations[1])
def matmul_grad(forward_grad, prev_activations): # gradient for matrix multiplication
return (
np.matmul(forward_grad, np.transpose(prev_activations[1])),
np.matmul(np.transpose(prev_activations[0]), forward_grad)
)
matmul_node = Node(matmul, matmul_grad, (layer.output_dimension(), 1))
nodes.append(matmul_node)
def bias_func(b):
return lambda _: b
bias_node = Node(bias_func(layer.biases), None, (layer.output_dimension(), 1))
nodes.append(bias_node)
def plus(prev_activations):
return np.add(prev_activations[0], prev_activations[1])
def plus_grad(forward_grad, _):
return forward_grad, forward_grad
plus_node = Node(plus, plus_grad, (layer.output_dimension(), 1))
nodes.append(plus_node)
def get_activation_func(act_func):
def activation(prev_activations):
prev_result = prev_activations[0]
act_res = act_func(prev_result)
return act_res
return activation
def get_act_der(act_der):
def activation_der(forward_grad, prev_activations):
prev_result = prev_activations[0]
derived_acts = act_der(prev_result)
new_grad = np.multiply(forward_grad, derived_acts)
return (new_grad,)
return activation_der
activation_node = Node(get_activation_func(layer.activation_func), get_act_der(layer.act_func_der), (layer.output_dimension(), 1))
nodes.append(activation_node)
edges.append((weight_node, matmul_node))
edges.append((prev_out_node, matmul_node))
edges.append((matmul_node, plus_node))
edges.append((bias_node, plus_node))
edges.append((plus_node, activation_node))
if with_loss:
y_head_node = nodes[-1]
target_node = Node(None, None, (self.output_dimension(), 1))
nodes.append(target_node)
def loss(prev_activations):
return self.loss_func(prev_activations[0], prev_activations[1])
def loss_der(_, prev_activations):
return (
self.loss_func_der(prev_activations[0], prev_activations[1]),
0 # w.r.t. target --> not needed
)
loss_node = Node(loss, loss_der, (1, 1))
nodes.append(loss_node)
edges.append((y_head_node, loss_node))
edges.append((target_node, loss_node))
pois_w = [i * 5 + 1 for i in range(len(self.layers))]
pois_b = [i * 5 + 3 for i in range(len(self.layers))]
pois = [None] * (2 * len(self.layers))
pois[::2] = pois_w
pois[1::2] = pois_b
ins = [0, len(nodes) - 2]
return Graph(nodes, edges, ins, pois)
else:
return Graph(nodes, edges, [0], [])
def input_dimension(self):
return self.layers[0].input_dimension()
def output_dimension(self):
return self.layers[-1].output_dimension()
def evaluate(self, x):
graph = self.to_graph(False)
return self.out_interpretation(graph.evaluate([x])[graph.nodes[-1]])
class Layer:
def __init__(self, weights, biases, activation_func, act_func_der):
self.weights = weights
self.biases = biases
self.activation_func = activation_func
self.act_func_der = act_func_der
def input_dimension(self):
if len(self.weights) == 0:
return None
return len(self.weights[0])
def output_dimension(self):
return len(self.biases)