-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
95 lines (86 loc) · 3.8 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import networkx
import matplotlib.pyplot as plt
import numpy as np
import voting_mechanism as vm
import network_generator as ng
def iterated_voting(n):
has_converged = False
total_updates = 0
total_passes = 0
while not has_converged:
(n, has_converged, n_updates) = vm.sequential_update(n)
total_passes += 1
total_updates += n_updates
return total_passes, total_updates
def one_shot_voting(n, n_props):
n = vm.update_delegates(n)
mean_acc = n.get_mean_accuracy()
outcome = vm.vote(n, n_props)
total_gurus = n.get_n_gurus()
mean_dist_g = n.get_mean_dist_guru()
return outcome, mean_acc, total_gurus, mean_dist_g
def plot_network(n):
graph = networkx.DiGraph()
for a in n.agents:
for x in a.neighbors:
graph.add_edge(a.my_id, x)
position = networkx.spring_layout(graph,k=0.2,iterations=70)
networkx.draw_networkx(graph, pos=position)
plt.show()
def run_experiment_oneshot(network_type, n_agents, degree, n_props, effort, iterations):
accuracy = np.empty(iterations)
outcomes = np.empty(iterations)
log_n_gurus = np.empty(iterations)
for i in range(iterations):
n = ng.generate_network(network_type, n_agents, degree, effort)
outcome, mean_acc, n_gurus, mean_dist_g = one_shot_voting(n, n_props)
outcomes[i] = outcome
accuracy[i] = mean_acc
log_n_gurus[i] = n_gurus
average_accuracy = np.mean(accuracy)
mean_n_gurus = np.mean(log_n_gurus)
probability_correct = sum([1 for x in outcomes if x == 0]) / iterations
mean_dist_guru = np.mean(mean_dist_g)
return average_accuracy, probability_correct, mean_n_gurus, mean_dist_guru
def run_full_experiment_oneshot():
n_agents = 250
types = ["random", "regular", "caveman", "relaxed_caveman"]
degrees = [4, 8, 12, 16, 20, 24]
n_props = 2
iterations = 1000
for effort in [True, False]:
for t in types:
for d in degrees:
avg_acc, prob_corr, mean_n_gurus, mean_dist_guru = run_experiment(t, n_agents, d, n_props, effort, iterations)
print("Effort: ", effort, "\tType: ", t, "\tDegrees: ", d, "\tMean_acc: ", avg_acc, "\tProb_corr", prob_corr, "\tMean_n_gurus: ", mean_n_gurus, "\tMean_dist_guru: ", mean_dist_guru)
def run_full_experiment_iterated():
n_agents = 250
types = ["random", "regular", "caveman", "relaxed_caveman"]
degrees = [4, 8, 12, 16, 20, 24]
iterations = 1000
for effort in [True, False]:
for t in types:
for d in degrees:
passes = []
updates = []
for i in range(iterations):
n = ng.generate_network(t, n_agents, d, effort)
p, u = iterated_voting(n)
passes.append(p)
updates.append(u)
print("Effort: ", effort, "\tType: ", t, "\tDegrees: ", d, "Passes: ", np.mean(passes), " (", np.std(passes), ") BR updates: ", np.mean(updates), " (", np.std(updates), ")")
if __name__ == "__main__":
for t in ["random", "regular", "caveman", "relaxed_caveman"]:
for d in [8]:
for p in range(10):
all_frac_d = []
all_frac_g = []
for i in range(1000):
n = ng.generate_network(t, 250, d, False)
n.create_politicians(p)
n = vm.update_delegates(n)
frac_d = n.get_fraction_delegated_to_politician()
frac_g = n.get_fraction_gurud_to_politician()
all_frac_d.append(frac_d)
all_frac_g.append(frac_g)
print("Effortless, Type: ", t, "\tDegree: ", d, "\tn_politicians: ", p, "\tfrac delegating to polit: ", np.mean(all_frac_d), "\tfrac guruing to polit: ", np.mean(all_frac_g))