-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathbetterFIM.py
More file actions
231 lines (195 loc) · 9.53 KB
/
betterFIM.py
File metadata and controls
231 lines (195 loc) · 9.53 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
from utils import data, comunity_detection, mf_dcv, ic, fitness
from utils import xp, to_numpy, GPU_AVAILABLE
from multiprocessing import Pool, cpu_count
import networkx as nx
import numpy as np
import random
import math
K_SEEDS = 40
POP_SIZE = 10
MAX_GEN = 150
P_CROSSOVER = 0.6
P_MUTATION = 0.1
LAMBDA_VAL = 0.5
PROPAGATION_PROB = 0.01
MC_SIMULATIONS = 1000
def evuluate(individual, G, groups, ideal_influences, cache):
mf, dcv = mf_dcv.calculate_MF_DCV(
G, individual, groups, ideal_influences,
p=PROPAGATION_PROB, mc=MC_SIMULATIONS, cache=cache
)
fit = fitness.fitness_F(mf, dcv, LAMBDA_VAL)
return individual, mf, dcv, fit
def betterFIM(links_file, attr_file=None, attribute_name='color'):
try:
if links_file.endswith('.pickle') or links_file.endswith('.pkl'):
G, node_groups_map = data.load_data_from_pickle(links_file, attribute_name)
else:
G, node_groups_map = data.load_data(links_file, attr_file)
except FileNotFoundError:
print(f"Error: Không tìm thấy file data. Hãy đảm bảo {links_file} tồn tại.")
return
groups = {}
for n, g in node_groups_map.items():
if g not in groups: groups[g] = []
groups[g].append(n)
print(f"Nodes: {len(G)}, Edges: {G.number_of_edges()}")
print(f"Groups: {list(groups.keys())}")
ideal_influences = {}
N = len(G)
for g_id, nodes in groups.items():
k_i = math.ceil(K_SEEDS * len(nodes) / N)
subgraph = G.subgraph(nodes)
ideal = ic.greedy_max_influence(subgraph, k_i, p=PROPAGATION_PROB, mc=30)
ideal_influences[g_id] = ideal
SN_scores = nx.pagerank(G)
print("Starting Genetic Algorithm for seed selection...")
communities, _ = comunity_detection.get_community_structure(G)
A_j_counts = {g: len(nodes) for g, nodes in groups.items()}
SC_scores = comunity_detection.calculate_SC(communities, G, None, A_j_counts)
population = []
comm_ids_list = list(communities.keys())
sc_scores_gpu = xp.array([SC_scores.get(cid, 0) for cid in comm_ids_list], dtype=float)
for _ in range(POP_SIZE):
ind = comunity_detection.community_based_selection(G, K_SEEDS, communities, SN_scores, SC_scores)
population.append(ind)
community_counter = {cid: 0 for cid in communities.keys()}
community_scores = {cid: SC_scores.get(cid, 0) for cid in communities.keys()}
community_selected = {cid: 0 for cid in communities.keys()}
fairness_random_solution = []
for _ in range(0, K_SEEDS):
total_score = sum(community_scores.values())
if total_score == 0:
selected_comm_id = random.choice(list(communities.keys()))
else:
random_value = random.random() * total_score
cumulative = 0
for cid, score in community_scores.items():
cumulative += score
if random_value <= cumulative:
selected_comm_id = cid
break
community_counter[selected_comm_id] += 1
if not community_selected[selected_comm_id]:
community_scores[selected_comm_id] = SC_scores.get(selected_comm_id, 0)
community_selected[selected_comm_id] = 1
for cid, count in community_counter.items():
if count > 0:
nodes_in_comm = list(communities[cid])
sorted_nodes = sorted(nodes_in_comm, key=lambda x: SN_scores.get(x, 0), reverse=True)
selected_nodes = sorted_nodes[:count]
fairness_random_solution.extend(selected_nodes)
while len(fairness_random_solution) < K_SEEDS:
all_nodes = sorted(G.nodes(), key=lambda x: SN_scores.get(x, 0), reverse=True)
for node in all_nodes:
if node not in fairness_random_solution:
fairness_random_solution.append(node)
break
fairness_random_solution = fairness_random_solution[:K_SEEDS]
population.append(fairness_random_solution)
all_nodes = list(G.nodes())
if all_nodes:
weights_gpu = xp.array([SN_scores.get(n, 0) + 1e-8 for n in all_nodes], dtype=float)
if float(xp.sum(weights_gpu)) == 0:
random_weighted_solution = random.sample(all_nodes, min(K_SEEDS, len(all_nodes)))
else:
probs_gpu = weights_gpu / xp.sum(weights_gpu)
probs = to_numpy(probs_gpu)
k_pick = min(K_SEEDS, len(all_nodes))
random_weighted_solution = list(np.random.choice(all_nodes, size=k_pick, replace=False, p=probs))
population.append(random_weighted_solution)
best_S = None
best_Fit = -999
best_metrics = (0, 0)
for gen in range(MAX_GEN):
influence_cache = {}
# Evaluate sequentially
results = []
for ind in population:
result = evuluate(ind, G, groups, ideal_influences, influence_cache)
results.append(result)
fitnesses = []
for ind, mf, dcv, fit in results:
fitnesses.append(fit)
if fit > best_Fit:
best_Fit = fit
best_S = ind
best_metrics = (mf, dcv)
# Selection (GPU-accelerated argsort)
fitnesses_gpu = xp.array(fitnesses)
sorted_idx_gpu = xp.argsort(fitnesses_gpu)[::-1]
sorted_idx = to_numpy(sorted_idx_gpu)
population = [population[i] for i in sorted_idx[:POP_SIZE]]
# print(f"Gen {gen+1}: Best Fit={best_Fit:.4f} | MF={best_metrics[0]:.4f}, DCV={best_metrics[1]:.4f}")
# Crossover & Mutation
new_pop = []
new_pop.extend(population[:2]) # Elitism
while len(new_pop) < POP_SIZE:
idx1 = np.random.randint(0, len(population))
idx2 = np.random.randint(0, len(population))
p1 = population[idx1]
p2 = population[idx2]
# Crossover
if np.random.random() < P_CROSSOVER:
combined = list(set(p1) | set(p2))
# Sort theo SN score để lấy top node
combined.sort(key=lambda x: SN_scores.get(x, 0), reverse=True)
child = combined[:K_SEEDS]
else:
child = p1[:]
# Mutation with fairness-aware node selection
if np.random.random() < P_MUTATION and len(child) > 0:
idx_remove = np.random.randint(0, len(child))
removed_node = child.pop(idx_remove)
# Calculate current group coverage in child
group_coverage = {g_id: 0 for g_id in groups.keys()}
for node in child:
for g_id, g_nodes in groups.items():
if node in g_nodes:
group_coverage[g_id] += 1
break
# Prioritize communities containing under-covered groups
comm_weights = {}
for cid, comm_nodes in communities.items():
weight = 0
for g_id, g_nodes in groups.items():
overlap = len(set(comm_nodes) & set(g_nodes))
if overlap > 0:
# Lower coverage → higher weight
ideal_count = max(1, int(K_SEEDS * len(g_nodes) / len(G)))
deficit = max(0, ideal_count - group_coverage[g_id])
weight += deficit * overlap
comm_weights[cid] = weight + 1 # +1 to avoid zero weight
# Select community by fairness-weighted probability (GPU)
comm_keys = list(communities.keys())
weights_gpu = xp.array([comm_weights[cid] for cid in comm_keys], dtype=float)
weights_gpu = weights_gpu / xp.sum(weights_gpu)
weights_arr = to_numpy(weights_gpu)
comm_id = np.random.choice(comm_keys, p=weights_arr)
candidates = list(communities[comm_id])
if candidates:
# Prefer high-SN nodes from selected community (GPU)
candidates_not_in = [c for c in candidates if c not in child]
if candidates_not_in:
sn_vals_gpu = xp.array([SN_scores.get(c, 0) for c in candidates_not_in], dtype=float)
if float(xp.sum(sn_vals_gpu)) > 0:
probs_gpu = sn_vals_gpu / xp.sum(sn_vals_gpu)
probs = to_numpy(probs_gpu)
cand = np.random.choice(candidates_not_in, p=probs)
else:
cand = np.random.choice(candidates_not_in)
child.append(cand)
elif len(p1) > idx_remove:
child.append(p1[idx_remove])
else:
child.append(removed_node)
# Fill if missing
while len(child) < K_SEEDS:
possible = list(set(G.nodes()) - set(child))
if not possible: break
child.append(np.random.choice(possible))
# Trim if excess
child = child[:K_SEEDS]
new_pop.append(child)
population = new_pop
return best_Fit, best_metrics, best_S