-
Notifications
You must be signed in to change notification settings - Fork 1
/
main_aggregated.R
165 lines (111 loc) · 5.39 KB
/
main_aggregated.R
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
library(here)
options(scipen=999)
setwd(here::here())
set.seed(18090212)
# Number of possible actions. This is to generalize nth-level linear prisoners dilemma
# Please note that the complexity drastically increases as the num_actions increase
# As I wanted the simulation to be easily generalizable, most of the functions depend on this value
# Therefore it needs to be assigned globally (with super-assignment operator <<) before the load of `functions.R`
num_actions <<- 3
source('./functions.R')
types <- get_type_names()
actions <- get_actions()
num_types <- length(types)
num_actions <- length(actions)
delta_range <- c(0.8)
output_types = "./output/db_types_agg.csv"
output_actions = "./output/db_actions_agg.csv"
initiate_output_files_agg(types_filename = output_types, actions_filename = output_actions)
# number of agents should be even
num_agents <- 200
mistake_rate <- 0.005
mutation_rate <- 0.01
num_generations <- 15000
num_simulations <- 10
efficiency_rate <- 3
#delta_range <- c(0.25, 0.33, 0.50, 0.66, 0.75, 0.85, 0.90, 0.95)
simulation_weight <- 1 / num_simulations
# These are for warnings etc.
message_percent <- 25
message_steps <- num_generations * message_percent / 100
# Inititate data files
for (delta in delta_range ) {
message("delta:", delta, " in range ", delta_range)
# Data files for simulation
aggregated_actions <- replicate(num_generations, rep(0, num_actions), simplify = FALSE)
aggregated_types <- replicate(num_generations, rep(0, num_types), simplify = FALSE)
for (simulation in 1:num_simulations) {
message("simulation:", simulation, " out of ", num_simulations, " Time ",Sys.time())
# message(simulation)
agents = NULL # NULL indicates the generation function that it is the first generation
for (generation in 1:num_generations) {
agents <- generate_agents(num_agents = num_agents, all_types = types, agent_table = agents, mutation_prob = mutation_rate)
matchings <- create_matching(num_agents)
num_matchings <- dim(matchings)[1]
actions_prop_generation <- rep(0,num_actions)
for (current_matching_line in 1:num_matchings) {
current_matching <-matchings[current_matching_line,]
num_interactions <- draw_num_interactions(delta)
# reset action frequencies
action_frequencies <- rep(0,num_actions)
#Setting The Initial Reaction
previous_action<--1
for (intr in 1:num_interactions) {
# random selection of the first mover
mover<- sample(current_matching,1)
receiver <- current_matching[current_matching!=mover]
#for (i in 1:number_of_interactions) {
# action of the current player
action <- react(agents[mover,"type"],opponent_action = previous_action, mistake_rate = mistake_rate)
action_frequencies[action+1] <- action_frequencies[action+1] + 1 # +1 is the usual 0,1,2
#recording the moves
#todo mcmovefreq[move+1,gen]=mcmovefreq[move+1,gen]+1
#assigning payoffs
current_payoffs<-get_payoffs(action, efficiency_rate)
#movers payoff
agents[mover, "payoff"] <- agents[mover, "payoff"] + current_payoffs["mover"]
#counterparts payoff
agents[receiver,"payoff"] <- agents[receiver,"payoff"] + current_payoffs["receiver"]
#some verbose summary
# print(intr)
# print(paste0("mover: ", as.character(mover),"(",as.character(agents[mover,"type"]), ") | receiver: ", as.character(receiver),"(",as.character(agents[receiver,"type"]), ")"))
# print(paste0("mover plays ", as.character(action)))
# print(agents[current_matching,])
#Switches the Mover here
mover<- receiver
receiver <- current_matching[current_matching!=mover]
previous_action <- action
}
# Action frequencies stands for each
actions_prop_generation <- actions_prop_generation + action_frequencies/(num_interactions * num_matchings)
}
# Writing to data
types_prop_generation <- table(factor(agents[,"type"], levels= types)) / num_agents
# Adding to aggregated
aggregated_actions[[generation]] <- aggregated_actions[[generation]] + actions_prop_generation * simulation_weight
aggregated_types[[generation]] <- aggregated_types[[generation]] + types_prop_generation * simulation_weight
}
}
write_to_file(file_name = output_actions,
delta = delta,
efficiency_rate = efficiency_rate,
mistake_rate = mistake_rate,
mutation_rate = mutation_rate,
num_agents = num_agents,
category_vector = actions,
proportion_list = aggregated_actions
)
write_to_file(file_name = output_types,
delta = delta,
efficiency_rate = efficiency_rate,
mistake_rate = mistake_rate,
mutation_rate = mutation_rate,
num_agents = num_agents,
category_vector = types,
proportion_list = aggregated_types
)
}
message('done!')
# TODO: Create two empty lists with number of generations: one for actions, one for types
# TODO: Weight actions_prop_generation and types_prop_generation according to the number of simulations
# TODO: Add to the current list