-
Notifications
You must be signed in to change notification settings - Fork 4
/
utils.py
140 lines (112 loc) · 4.7 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
import itertools
import os
import random
from collections import deque, namedtuple
import numpy as np
import torch
from moviepy.editor import ImageSequenceClip
from numpy.random import default_rng
Transition = namedtuple('Transition', ('state', 'action', 'reward', 'nextstate', 'real_done'))
class MeanStdevFilter():
def __init__(self, shape, clip=3.0):
self.eps = 1e-4
self.shape = shape
self.clip = clip
self._count = 0
self._running_sum = np.zeros(shape)
self._running_sum_sq = np.zeros(shape) + self.eps
self.mean = np.zeros(shape)
self.stdev = np.ones(shape) * self.eps
def update(self, x):
if len(x.shape) == 1:
x = x.reshape(1,-1)
self._running_sum += np.sum(x, axis=0)
self._running_sum_sq += np.sum(np.square(x), axis=0)
# assume 2D data
self._count += x.shape[0]
self.mean = self._running_sum / self._count
self.stdev = np.sqrt(
np.maximum(
self._running_sum_sq / self._count - self.mean**2,
self.eps
))
def __call__(self, x):
return np.clip(((x - self.mean) / self.stdev), -self.clip, self.clip)
def invert(self, x):
return (x * self.stdev) + self.mean
class ReplayPool:
def __init__(self, action_dim, state_dim, capacity=1e6):
self.capacity = int(capacity)
self._action_dim = action_dim
self._state_dim = state_dim
self._pointer = 0
self._size = 0
self._init_memory()
self._rng = default_rng()
def _init_memory(self):
self._memory = {
'state': np.zeros((self.capacity, self._state_dim), dtype='float32'),
'action': np.zeros((self.capacity, self._action_dim), dtype='float32'),
'reward': np.zeros((self.capacity), dtype='float32'),
'nextstate': np.zeros((self.capacity, self._state_dim), dtype='float32'),
'real_done': np.zeros((self.capacity), dtype='bool')
}
def push(self, transition: Transition):
# Handle 1-D Data
num_samples = transition.state.shape[0] if len(transition.state.shape) > 1 else 1
idx = np.arange(self._pointer, self._pointer + num_samples) % self.capacity
for key, value in transition._asdict().items():
self._memory[key][idx] = value
self._pointer = (self._pointer + num_samples) % self.capacity
self._size = min(self._size + num_samples, self.capacity)
def _return_from_idx(self, idx):
sample = {k: tuple(v[idx]) for k,v in self._memory.items()}
return Transition(**sample)
def sample(self, batch_size: int, unique: bool = True):
idx = np.random.randint(0, self._size, batch_size) if not unique else self._rng.choice(self._size, size=batch_size, replace=False)
return self._return_from_idx(idx)
def sample_all(self):
return self._return_from_idx(np.arange(0, self._size))
def __len__(self):
return self._size
def clear_pool(self):
self._init_memory()
def initialise(self, old_pool):
# Not Tested
old_memory = old_pool.sample_all()
for key in self._memory:
self._memory[key] = np.append(self._memory[key], old_memory[key], 0)
# Code courtesy of JPH: https://github.com/jparkerholder
def make_gif(policy, env, step_count, state_filter, maxsteps=1000):
envname = env.spec.id
gif_name = '_'.join([envname, str(step_count)])
state = env.reset()
done = False
steps = []
rewards = []
t = 0
while (not done) & (t< maxsteps):
s = env.render('rgb_array')
steps.append(s)
action = policy.get_action(state, state_filter=state_filter, deterministic=True)
action = np.clip(action, env.action_space.low[0], env.action_space.high[0])
action = action.reshape(len(action), )
state, reward, done, _ = env.step(action)
rewards.append(reward)
t +=1
print('Final reward :', np.sum(rewards))
clip = ImageSequenceClip(steps, fps=30)
if not os.path.isdir('gifs'):
os.makedirs('gifs')
clip.write_gif('gifs/{}.gif'.format(gif_name), fps=30)
def make_checkpoint(agent, step_count, env_name):
q_funcs, target_q_funcs, policy, log_alpha = agent.q_funcs, agent.target_q_funcs, agent.policy, agent.log_alpha
save_path = "checkpoints/model-{}-{}.pt".format(step_count, env_name)
if not os.path.isdir('checkpoints'):
os.makedirs('checkpoints')
torch.save({
'double_q_state_dict': q_funcs.state_dict(),
'target_double_q_state_dict': target_q_funcs.state_dict(),
'policy_state_dict': policy.state_dict(),
'log_alpha_state_dict': log_alpha
}, save_path)