-
Notifications
You must be signed in to change notification settings - Fork 2
/
multi_env.py
229 lines (167 loc) · 6.67 KB
/
multi_env.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 14 09:54:26 2018
@author: edward
A class that can be used to implement many parallel environments
"""
import multiprocessing as mp
import numpy as np
try:
from gym.spaces.box import Box
from baselines.common.atari_wrappers import make_atari, wrap_deepmind
except ImportError:
print('Unable to import gym / OpenAI baselines, I assume you are running the doom env')
from arguments import parse_game_args
from environments import DoomEnvironment
def worker(in_queue, out_queue, params):
env = DoomEnvironment(params)
while True:
action = in_queue.get()
if action is None:
break
elif action == 'reset':
out_queue.put(env.reset())
elif action == 'depth_trim':
out_queue.put(env.get_depth()[2:-2,2:-2])
elif action == 'depth':
out_queue.put(env.get_depth())
else:
obs, reward, done, info = env.step(action)
out_queue.put((obs, reward, done, info))
class MultiEnvsMP(object):
def __init__(self, env_id, num_envs, num_processes, params):
self.in_queues = [mp.Queue() for _ in range(num_envs)]
self.out_queues = [mp.Queue() for _ in range(num_envs)]
self.workers = []
for in_queue, out_queue in zip(self.in_queues, self.out_queues):
print('Creating environment')
process = mp.Process(target=worker, args=(in_queue, out_queue, params))
self.workers.append(process)
process.start()
#print('There are {} workers'.format(len(self.workers)))
assert env_id == 'doom', 'Multiprocessing only implemented for doom envirnment'
tmp_env = DoomEnvironment(params)
self.num_actions = tmp_env.num_actions
self.obs_shape = (3, params.screen_height, params.screen_width)
self.prep = False # Observations already in CxHxW order
def reset(self):
new_obs = []
for queue in self.in_queues:
queue.put('reset')
for queue in self.out_queues:
obs = queue.get()
new_obs.append(self.prep_obs(obs))
return np.stack(new_obs)
def get_depths(self, trim=True):
depths = []
command = 'depth'
if trim: command = 'depth_trim'
for queue in self.in_queues:
queue.put(command)
for queue in self.out_queues:
depths.append(queue.get())
return np.stack(depths)
def prep_obs(self, obs):
if self.prep:
return obs.transpose(2,0,1)
else:
return obs
def step(self, actions):
new_obs = []
rewards = []
dones = []
infos = []
for action, queue in zip(actions, self.in_queues):
queue.put(action)
for queue in self.out_queues:
obs, reward, done, info = queue.get()
new_obs.append(self.prep_obs(obs))
rewards.append(reward)
dones.append(done)
infos.append(infos)
return np.stack(new_obs), rewards, dones, infos
class MultiEnvs(object):
def __init__(self, env_id, num_envs, num_processes, params):
if env_id == 'doom':
# for the doom scenarios
self.envs = [DoomEnvironment(params) for i in range(num_envs)]
self.num_actions = self.envs[0].num_actions
self.obs_shape = (3, params.screen_height, params.screen_width)
self.prep = False # Observations already in CxHxW order
elif env_id == 'home':
assert 0, 'HoME has not been implemented yet'
else:
# if testing on Atari games such as Pong etc
self.envs = [wrap_deepmind(make_atari(env_id)) for i in range(num_envs)]
observation_space = self.envs[0].observation_space
obs_shape = observation_space.shape
observation_space = Box(
observation_space.low[0,0,0],
observation_space.high[0,0,0],
[obs_shape[2], obs_shape[1], obs_shape[0]]
)
action_space = self.envs[0].action_space
self.num_actions = action_space.n
self.obs_shape = observation_space.shape
self.prep = True
def reset(self):
return np.stack([self.prep_obs(env.reset()) for env in self.envs])
def get_depths(self, trim=True):
if trim:
return np.stack([env.get_depth()[2:-2,2:-2] for env in self.envs])
else:
return np.stack([env.get_depth() for env in self.envs])
def prep_obs(self, obs):
if self.prep:
return obs.transpose(2,0,1)
else:
return obs
def step(self, actions):
new_obs = []
rewards = []
dones = []
infos = []
for env, action in zip(self.envs, actions):
obs, reward, done, info = env.step(action)
# if done:
# obs = env.reset()
new_obs.append(self.prep_obs(obs))
rewards.append(reward)
dones.append(done)
infos.append(infos)
return np.stack(new_obs), rewards, dones, infos
if __name__ == '__main__':
params = parse_game_args()
params.scenario_dir = '../resources/scenarios/'
mp_test_envs = MultiEnvsMP(params.simulator, params.num_environments, 1, params)
mp_test_envs.reset()
actions = [2]*16
for i in range(10):
new_obs, rewards, dones, infos = mp_test_envs.step(actions)
print(mp_test_envs.get_depths().shape)
print(rewards, np.stack(rewards))
envs = MultiEnvs(params.simulator, params.num_environments, 1, params)
envs.reset()
for i in range(10):
new_obs, rewards, dones, infos = envs.step(actions)
print(envs.get_depths().shape)
print(rewards, np.stack(rewards))
def test_mp_reset():
mp_test_envs.reset()
def test_mp_get_obs():
actions = [2]*16
new_obs, rewards, dones, infos = mp_test_envs.step(actions)
def test_sp_reset():
envs.reset()
def test_sp_get_obs():
actions = [2]*16
new_obs, rewards, dones, infos = envs.step(actions)
print('#'*80)
print('#'*80)
print('--- Running timing tests ---')
print('#'*80)
print('Multiprocessing')
print('MP Reset test 1000 trials', timeit.timeit("test_mp_reset()", number=10))
print('#'*80)
print('Multiprocessing')