/
environment.py
172 lines (123 loc) · 4.92 KB
/
environment.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
import numpy as np
import torch
import pulp
"""
This file contains the definition of the environment
in which the agents are run.
"""
class Environment:
def __init__(self, graph,name):
self.graphs = graph
self.name= name
def reset(self, g):
self.games = g
self.graph_init = self.graphs[self.games]
self.nodes = self.graph_init.nodes()
self.nbr_of_nodes = 0
self.edge_add_old = 0
self.last_reward = 0
self.observation = torch.zeros(1,self.nodes,1,dtype=torch.float)
def observe(self):
"""Returns the current observation that the agent can make
of the environment, if applicable.
"""
return self.observation
def act(self,node):
self.observation[:,node,:]=1
reward = self.get_reward(self.observation, node)
return reward
def get_reward(self, observation, node):
if self.name == "MVC":
new_nbr_nodes=np.sum(observation[0].numpy())
if new_nbr_nodes - self.nbr_of_nodes > 0:
reward = -1#np.round(-1.0/20.0,3)
else:
reward = 0
self.nbr_of_nodes=new_nbr_nodes
#Minimum vertex set:
done = True
edge_add = 0
for edge in self.graph_init.edges():
if observation[:,edge[0],:]==0 and observation[:,edge[1],:]==0:
done=False
# break
else:
edge_add += 1
#reward = ((edge_add - self.edge_add_old) / np.max(
# [1, self.graph_init.average_neighbor_degree([node])[node]]) - 10)/100
self.edge_add_old = edge_add
return (reward,done)
elif self.name=="MAXCUT" :
reward=0
done=False
adj= self.graph_init.edges()
select_node=np.where(self.observation[0, :, 0].numpy() == 1)[0]
for nodes in adj:
if ((nodes[0] in select_node) & (nodes[1] not in select_node)) | ((nodes[0] not in select_node) & (nodes[1] in select_node)) :
reward += 1#/20.0
change_reward = reward-self.last_reward
if change_reward<=0:
done=True
self.last_reward = reward
return (change_reward,done)
def get_approx(self):
if self.name=="MVC":
cover_edge=[]
edges= list(self.graph_init.edges())
while len(edges)>0:
edge = edges[np.random.choice(len(edges))]
cover_edge.append(edge[0])
cover_edge.append(edge[1])
to_remove=[]
for edge_ in edges:
if edge_[0]==edge[0] or edge_[0]==edge[1]:
to_remove.append(edge_)
else:
if edge_[1]==edge[1] or edge_[1]==edge[0]:
to_remove.append(edge_)
for i in to_remove:
edges.remove(i)
return len(cover_edge)
elif self.name=="MAXCUT":
return 1
else:
return 'you pass a wrong environment name'
def get_optimal_sol(self):
if self.name =="MVC":
x = list(range(self.graph_init.g.number_of_nodes()))
xv = pulp.LpVariable.dicts('is_opti', x,
lowBound=0,
upBound=1,
cat=pulp.LpInteger)
mdl = pulp.LpProblem("MVC", pulp.LpMinimize)
mdl += sum(xv[k] for k in xv)
for edge in self.graph_init.edges():
mdl += xv[edge[0]] + xv[edge[1]] >= 1, "constraint :" + str(edge)
mdl.solve()
#print("Status:", pulp.LpStatus[mdl.status])
optimal=0
for x in xv:
optimal += xv[x].value()
#print(xv[x].value())
return optimal
elif self.name=="MAXCUT":
x = list(range(self.graph_init.g.number_of_nodes()))
e = list(self.graph_init.edges())
xv = pulp.LpVariable.dicts('is_opti', x,
lowBound=0,
upBound=1,
cat=pulp.LpInteger)
ev = pulp.LpVariable.dicts('ev', e,
lowBound=0,
upBound=1,
cat=pulp.LpInteger)
mdl = pulp.LpProblem("MVC", pulp.LpMaximize)
mdl += sum(ev[k] for k in ev)
for i in e:
mdl+= ev[i] <= xv[i[0]]+xv[i[1]]
for i in e:
mdl+= ev[i]<= 2 -(xv[i[0]]+xv[i[1]])
#pulp.LpSolverDefault.msg = 1
mdl.solve()
# print("Status:", pulp.LpStatus[mdl.status])
return mdl.objective.value()