-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathtanner.py
More file actions
403 lines (296 loc) · 13.7 KB
/
tanner.py
File metadata and controls
403 lines (296 loc) · 13.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
import numpy as np
from Hmatrixbaby import ParityCheckMatrix
from networkx.algorithms import bipartite
import networkx as nx
import matplotlib.pyplot as plt
import time
import row_echleon as r
from bec import generate_erasures
from tqdm import tqdm
from cProfile import Profile
from density_evolution import threshold_binary_search
from sklearn.preprocessing import normalize
from pstats import Stats
import re
import sys
def permuter(arr, ffield, vn_value):
possibilities = set(arr[0])
new_possibilities = set()
for i in range(1, len(arr)):
for k in possibilities:
for j in arr[i]:
new_possibilities.add((j+k) % ffield)
if len(new_possibilities) == ffield:
return vn_value
possibilities = new_possibilities
new_possibilities = set()
return {(-p)%ffield for p in possibilities}
def conv_circ(u, v):
"""Perform circular convolution between u and v over GF using FFT."""
return np.real(np.fft.ifft(np.fft.fft(u) * np.fft.fft(v)))
def perform_convolutions(arr_pd):
""" Combines all the Probability distributions within the array using the Convolution operator
Args:
arr_pd (arr): Array of Discrete P robability Distributions
Returns:
conv_pd (arr): Combined Probability Distributions after taking convolution over all of the pdf
"""
pdf = conv_circ(arr_pd[0], arr_pd[1])
for i in arr_pd[2:]:
pdf = conv_circ(pdf, i)
return pdf
class Node:
def __init__(self, no_connections, identifier):
self.value = 0
self.links = np.zeros(no_connections, dtype=int)
self.identifier = identifier
def add_link(self, node):
""" Adds a link to the node. Throws an error if the node is full """
# Check if node is full
#if np.all(self.links):
# raise ValueError("Node is full")
# Add to empty link
for (i,j) in enumerate(self.links):
if not j:
self.links[i] = node.identifier
break
return self.links
def get_links(self):
return self.links
def replace_link(self, node, index):
""" Replaces a link with another link """
self.links[index] = node
return self.links
class CheckNode(Node):
def __init__(self, dc, identifier):
super().__init__(dc, identifier)
class VariableNode(Node):
def __init__(self, dv, identifier):
super().__init__(dv, identifier)
class Link(Node):
def __init__(self, cn, vn, value):
self.cn = cn
self.vn = vn
self.value = value
class VariableTannerGraph:
""" Initializes empty, on establishing connections creates H and forms links """
def __init__(self, dv, dc, k, n, ffdim=2):
# Check if connections are non-uniform
if type(dv) == list:
assert len(dv) == n and len(dc) == n-k
self.vns = [VariableNode(dv[i], i) for i in range(n)]
self.cns = [CheckNode(dc[i], i) for i in range(n-k)]
self.dv = dv
self.dc = dc
else:
self.vns = [VariableNode(dv, i) for i in range(n)]
self.cns = [CheckNode(dc, i) for i in range(n-k)]
self.dv = [dv for i in range(n)]
self.dc = [dc for i in range(n-k)]
# For the singular case - it remains as an integer, but for the Changing Case it goes to a list, need to make sure that does not break everything
self.k = k
self.n = n
self.ffdim = ffdim
self.links = {}
def add_link(self, cn_index, vn_index, link_value):
""" Adds a link to the links data structure """
self.links[(cn_index, vn_index)] = link_value
def update_link_weight(self, cn_index, vn_index, link_value):
""" Updates Link weight """
self.add_link(cn_index, vn_index, link_value)
def get_link_weight(self, cn_index, vn_index):
""" Get Link Weight """
return self.links[(cn_index, vn_index)]
def update_within_link_weight(self, cn_index, vn_index, val_index, new_value):
self.links[(cn_index, vn_index)][val_index] = new_value
def get_vn_value(self, vn_index):
return self.vns[vn_index].value
def get_cn_value(self, cn_index):
return self.cns[cn_index].value
def establish_connections(self, Harr=None):
""" Establishes connections between variable nodes and check nodes """
# In case Harr is sent as a parameter
if Harr is None:
# If we are creating, assuming it's not scldpc - really needs some unification here champ
self.Harr = r.get_H_arr(self.dv[0], self.dc[0], self.k, self.n)
else:
self.Harr = np.array(Harr)
# Our Harr is implementation is different - will need to be considered when adapting - assuming that this is the check nodes they are connected to
Harr = self.Harr
# Divide Harr into dv parts
# But dv is a list in the case of the changing case
# All the dvs are the same for this case
dv = self.dv[0]
if len(np.unique(self.dc)) == 1:
Harr = Harr // self.dc[0]
Harr = [Harr[i:i+dv] for i in range(0, len(Harr), dv)]
# Checking for spatially coupled
# Establish connections
for (i,j) in enumerate(Harr):
for k in j:
self.vns[i].add_link(self.cns[k])
self.cns[k].add_link(self.vns[i])
def get_connections(self):
""" Returns the connections in the Tanner Graph """
return [(i.identifier, j) for i in self.cns for j in i.links]
def get_cn_link_values(self, cn):
""" Returns the values of the link weights for the cn as an array"""
vals = []
for i in cn.links:
vals.append(self.get_link_weight(cn.identifier, i))
return vals
def visualise(self):
""" Visualise Tanner Graph """
G = nx.Graph()
rows = len(self.cns)
cols = len(self.vns)
# For each row add a check node
for i in range(rows):
G.add_node(i, bipartite=0)
# For each column add a variable node
for i in range(cols):
G.add_node(i + rows, bipartite=1)
# Utilise the links to add edges
for (i,j) in enumerate(self.cns):
for k in j.links:
G.add_edge(i, k + rows, weight=1)
nx.draw(G, with_labels=True)
plt.show()
return G
def assign_values(self, arr):
"""Assigns values to the VNs based on input pre decoding """
assert len(arr) == len(self.vns)
for i in range(len(arr)):
self.vns[i].value = arr[i]
def get_max_prob_codeword(self, P, GF):
"""Calculates the most possible Codeword using the probability likelihoods established in the VN's and influenced by the initial probability likelihoods.
Returns:
codeword (arr): n length most probable codeword with symbols
"""
z = np.zeros(self.n)
for j in self.vns:
vn_index = j.identifier
probs = 1 * P[vn_index]
for a in range(GF.order):
for i in j.links:
probs[a] *= self.get_link_weight(i, vn_index)[a]
z[vn_index] = np.argmax(probs)
z = GF(z.astype(int))
return z
def normalize(self, arr):
""" Normalizes an array """
sum_arr = sum(arr)
return [i/sum_arr for i in arr]
def validate_codeword(self, H, GF, max_prob_codeword):
""" Checks if the most probable codeword is valid as a termination condition of qspa decoding """
return not np.matmul(H, max_prob_codeword).any()
def remove_from_array(self, vals, current_value):
""" Removes current value from vals"""
new_vals = []
for i in range(len(vals)):
if np.array_equal(vals[i], current_value):
continue
new_vals.append(vals[i])
return new_vals
def initialize_vn_links(self, P):
""" Sets all the links from a VN to the VN initial likelihood array """
for i in self.vns:
vn_index = i.identifier
for j in i.links:
self.update_link_weight(j, vn_index, 1*P[vn_index])
def update_cn_links(self, cn, new_vals):
""" Updates the CN links post a VN update iteration """
cn_index = cn.identifier
for i,j in enumerate(cn.links):
self.update_link_weight(cn_index, j, new_vals[i])
def cn_update_qspa(self):
""" CN Update for the QSPA Decoder. For each CN, performs convolutions for individual VN's as per the remaining links and updates the individual link values after finishing each link. Repeats for all the CN's """
for i in self.cns:
cn_index = i.identifier
vns = i.links
new_pdfs = []
for j in vns:
conv_indices = [idx for idx in vns if idx != j]
pdf = conv_circ(self.get_link_weight(cn_index, conv_indices[0]), self.get_link_weight(cn_index, conv_indices[1]))
for indice in conv_indices[2:]:
pdf = conv_circ(pdf, self.get_link_weight(cn_index, indice))
#new_pdfs.append(pdf[self.idx_shuffle])
#self.update_link_weight(i,j,pdf[self.idx_shuffle])
self.update_cn_links(i, new_pdfs)
def vn_update_qspa(self):
""" Updates the CN as per the QSPA Decoding. Conditional Probability of a Symbol being favoured yadayada """
copy_links = self.links.copy()
for a in range(self.GF.order):
for j in self.vns:
vn_index = j.identifier
for i in j.links:
copy_links[(i, vn_index)][a] = self.P[vn_index][a]
for t in j.links[j.links!=i]:
copy_links[(i,vn_index)][a] *= self.get_link_weight(t, vn_index)[a]
sum_copy_links = np.einsum('i->', copy_links[i, vn_index]) # Seems to be twice as fast or smth
#sum_copy_links = np.sum(copy_links[i, vn_index])
#sum_copy_links = sum(copy_links[i, vn_index])
copy_links[i, vn_index] = copy_links[i, vn_index]/sum_copy_links
self.links = copy_links
def qspa_decoding(self, symbols_likelihood_arr, H, GF, max_iterations=50):
self.GF = GF
# Additive inverse of GF Field
self.idx_shuffle = np.array([
(GF.order - a) % GF.order for a in range(GF.order)
])
# Initial likelihoods
self.P = symbols_likelihood_arr
copy_links = self.links.copy()
prev_max_prob_codeword = self.get_max_prob_codeword(self.P, GF)
# Set copy links to intiialized values - or VN to cn links as opposed to CN to VN Links
self.initialize_vn_links(self.P)
iterations = 0
#for i in range(max_iterations):
while(True):
self.cn_update_qspa(copy_links)
max_prob_codeword = self.get_max_prob_codeword(self.P, GF)
parity = not np.matmul(H, max_prob_codeword).any()
if parity:
print("Decoding converges")
return max_prob_codeword
self.vn_update_qspa()
if np.array_equal(max_prob_codeword, prev_max_prob_codeword) or iterations > max_iterations:
break
prev_max_prob_codeword = max_prob_codeword
iterations+=1
print(f"Iteration {iterations}")
print("Decoding does not converge")
return max_prob_codeword
def coupon_collector_decoding(self, max_iterations=10000):
""" Decodes for the case of symbol possiblities for each variable node
utilising Belief Propagation - may be worth doing for BEC as well
"""
unresolved_vns = sum([1 for i in self.vns if len(i.value) > 1 ])
resolved_vns = 0
total_possibilites = sum([len(i.value) for i in self.vns])
while True:
# Iterating through all the check nodes
for i in self.cns:
vn_vals = [self.vns[j].value for j in i.links]
for j in i.links:
vals = vn_vals.copy()
current_value = self.vns[j].value
#vals = self.remove_from_array(vals, current_value)
vals.remove(current_value)
possibilites = permuter(vals, self.ffdim, current_value)
new_values = set(current_value).intersection(set(possibilites))
self.vns[j].value = list(new_values)
"""
if len(new_values) < len(current_value) and len(possibilites) > 1:
print("I reached here")
"""
if len(current_value) > 1 and len(new_values) == 1:
resolved_vns += 1
decoded_values = [i.value for i in self.vns]
if unresolved_vns == resolved_vns and sum([len(i) for i in decoded_values]) == len(decoded_values):
return np.array([i.value for i in self.vns])
if sum([len(i.value) for i in self.vns]) == total_possibilites:
return [i.value for i in self.vns]
total_possibilites = sum([len(i.value) for i in self.vns])
prev_resolved_vns = resolved_vns
return [i.value for i in self.vns]