Merge branch 'master' of https://github.com/Brandon-Rozek/rltorch
# Conflicts: # rltorch/agents/QEPAgent.py
This commit is contained in:
commit
9d32a9edd1
4 changed files with 160 additions and 10 deletions
|
@ -4,6 +4,7 @@ import torch
|
|||
import torch.nn.functional as F
|
||||
from copy import deepcopy
|
||||
import numpy as np
|
||||
from pathlib import Path
|
||||
|
||||
class DQNAgent:
|
||||
def __init__(self, net , memory, config, target_net = None, logger = None):
|
||||
|
@ -12,6 +13,12 @@ class DQNAgent:
|
|||
self.memory = memory
|
||||
self.config = deepcopy(config)
|
||||
self.logger = logger
|
||||
def save(self, file_location):
|
||||
torch.save(self.net.model.state_dict(), file_location)
|
||||
def load(self, file_location):
|
||||
self.net.model.state_dict(torch.load(file_location))
|
||||
self.net.model.to(self.net.device)
|
||||
self.target_net.sync()
|
||||
|
||||
def learn(self, logger = None):
|
||||
if len(self.memory) < self.config['batch_size']:
|
||||
|
@ -57,8 +64,10 @@ class DQNAgent:
|
|||
|
||||
# If we're sampling by TD error, multiply loss by a importance weight which helps decrease overfitting
|
||||
if (isinstance(self.memory, M.PrioritizedReplayMemory)):
|
||||
loss = (torch.as_tensor(importance_weights, device = self.net.device) * ((obtained_values - expected_values)**2).squeeze(1)).mean()
|
||||
# loss = (torch.as_tensor(importance_weights, device = self.net.device) * F.smooth_l1_loss(obtained_values, expected_values, reduction = 'none').squeeze(1)).mean()
|
||||
loss = (torch.as_tensor(importance_weights, device = self.net.device) * ((obtained_values - expected_values)**2).squeeze(1)).mean()
|
||||
else:
|
||||
# loss = F.smooth_l1_loss(obtained_values, expected_values)
|
||||
loss = F.mse_loss(obtained_values, expected_values)
|
||||
|
||||
if self.logger is not None:
|
||||
|
|
|
@ -20,9 +20,21 @@ class QEPAgent:
|
|||
self.config = deepcopy(config)
|
||||
self.logger = logger
|
||||
self.policy_skip = 4
|
||||
|
||||
def save(self, file_location):
|
||||
torch.save({
|
||||
'policy': self.policy_net.model.state_dict(),
|
||||
'value': self.value_net.model.state_dict()
|
||||
}, file_location)
|
||||
def load(self, file_location):
|
||||
checkpoint = torch.load(file_location)
|
||||
self.value_net.model.state_dict(checkpoint['value'])
|
||||
self.value_net.model.to(self.value_net.device)
|
||||
self.policy_net.model.state_dict(checkpoint['policy'])
|
||||
self.policy_net.model.to(self.policy_net.device)
|
||||
self.target_net.sync()
|
||||
|
||||
def fitness(self, policy_net, value_net, state_batch):
|
||||
# print("Worker started")
|
||||
batch_size = len(state_batch)
|
||||
action_probabilities = policy_net(state_batch)
|
||||
action_size = action_probabilities.shape[1]
|
||||
|
@ -44,8 +56,8 @@ class QEPAgent:
|
|||
value_importance = 1 - entropy_importance
|
||||
|
||||
# entropy_loss = (action_probabilities * torch.log2(action_probabilities)).sum(1) # Standard entropy loss from information theory
|
||||
entropy_loss = (action_probabilities - torch.tensor(1 / action_size).repeat(len(state_batch), action_size)).abs().sum(1)
|
||||
# print("END WORKER")
|
||||
entropy_loss = (action_probabilities - torch.tensor(1 / action_size, device = state_batch.device).repeat(len(state_batch), action_size)).abs().sum(1)
|
||||
|
||||
return (entropy_importance * entropy_loss - value_importance * obtained_values).mean().item()
|
||||
|
||||
|
||||
|
@ -119,7 +131,6 @@ class QEPAgent:
|
|||
self.policy_skip -= 1
|
||||
return
|
||||
self.policy_skip = 4
|
||||
|
||||
if self.target_value_net is not None:
|
||||
self.policy_net.calc_gradients(self.target_value_net, state_batch)
|
||||
else:
|
||||
|
|
8
rltorch/env/simulate.py
vendored
8
rltorch/env/simulate.py
vendored
|
@ -17,7 +17,7 @@ def simulateEnvEps(env, actor, config, total_episodes = 1, memory = None, logger
|
|||
|
||||
if episode % config['print_stat_n_eps'] == 0:
|
||||
print("episode: {}/{}, score: {}"
|
||||
.format(episode, total_episodes, episode_reward))
|
||||
.format(episode, total_episodes, episode_reward), flush=True)
|
||||
|
||||
if logger is not None:
|
||||
logger.append(name + '/EpisodeReward', episode_reward)
|
||||
|
@ -51,7 +51,7 @@ class EnvironmentRunSync():
|
|||
if done:
|
||||
if self.episode_num % self.config['print_stat_n_eps'] == 0:
|
||||
print("episode: {}/{}, score: {}"
|
||||
.format(self.episode_num, self.config['total_training_episodes'], self.episode_reward))
|
||||
.format(self.episode_num, self.config['total_training_episodes'], self.episode_reward), flush=True)
|
||||
|
||||
if self.logwriter is not None:
|
||||
logger.append(self.name + '/EpisodeReward', self.episode_reward)
|
||||
|
@ -92,10 +92,10 @@ class EnvironmentEpisodeSync():
|
|||
|
||||
if self.episode_num % self.config['print_stat_n_eps'] == 0:
|
||||
print("episode: {}/{}, score: {}"
|
||||
.format(self.episode_num, self.config['total_training_episodes'], episodeReward))
|
||||
.format(self.episode_num, self.config['total_training_episodes'], episodeReward), flush=True)
|
||||
|
||||
if self.logwriter is not None:
|
||||
logger.append(self.name + '/EpisodeReward', episodeReward)
|
||||
self.logwriter.write(logger)
|
||||
|
||||
self.episode_num += 1
|
||||
self.episode_num += 1
|
||||
|
|
132
rltorch/env/wrappers.py
vendored
132
rltorch/env/wrappers.py
vendored
|
@ -3,6 +3,111 @@ import torch
|
|||
from gym import spaces
|
||||
import cv2
|
||||
from collections import deque
|
||||
import numpy as np
|
||||
|
||||
class EpisodicLifeEnv(gym.Wrapper):
|
||||
def __init__(self, env=None):
|
||||
"""Make end-of-life == end-of-episode, but only reset on true game over.
|
||||
Done by DeepMind for the DQN and co. since it helps value estimation.
|
||||
"""
|
||||
super(EpisodicLifeEnv, self).__init__(env)
|
||||
self.lives = 0
|
||||
self.was_real_done = True
|
||||
self.was_real_reset = False
|
||||
|
||||
def step(self, action):
|
||||
obs, reward, done, info = self.env.step(action)
|
||||
self.was_real_done = done
|
||||
# check current lives, make loss of life terminal,
|
||||
# then update lives to handle bonus lives
|
||||
lives = self.env.unwrapped.ale.lives()
|
||||
if lives < self.lives and lives > 0:
|
||||
# for Qbert somtimes we stay in lives == 0 condtion for a few frames
|
||||
# so its important to keep lives > 0, so that we only reset once
|
||||
# the environment advertises done.
|
||||
done = True
|
||||
self.lives = lives
|
||||
return obs, reward, done, info
|
||||
|
||||
def reset(self):
|
||||
"""Reset only when lives are exhausted.
|
||||
This way all states are still reachable even though lives are episodic,
|
||||
and the learner need not know about any of this behind-the-scenes.
|
||||
"""
|
||||
if self.was_real_done:
|
||||
obs = self.env.reset()
|
||||
self.was_real_reset = True
|
||||
else:
|
||||
# no-op step to advance from terminal/lost life state
|
||||
obs, _, _, _ = self.env.step(0)
|
||||
self.was_real_reset = False
|
||||
self.lives = self.env.unwrapped.ale.lives()
|
||||
return obs
|
||||
|
||||
|
||||
class NoopResetEnv(gym.Wrapper):
|
||||
def __init__(self, env=None, noop_max=30):
|
||||
"""Sample initial states by taking random number of no-ops on reset.
|
||||
No-op is assumed to be action 0.
|
||||
"""
|
||||
super(NoopResetEnv, self).__init__(env)
|
||||
self.noop_max = noop_max
|
||||
self.override_num_noops = None
|
||||
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
|
||||
|
||||
def step(self, action):
|
||||
return self.env.step(action)
|
||||
|
||||
def reset(self):
|
||||
""" Do no-op action for a number of steps in [1, noop_max]."""
|
||||
self.env.reset()
|
||||
if self.override_num_noops is not None:
|
||||
noops = self.override_num_noops
|
||||
else:
|
||||
noops = np.random.randint(1, self.noop_max + 1)
|
||||
assert noops > 0
|
||||
obs = None
|
||||
for _ in range(noops):
|
||||
obs, _, done, _ = self.env.step(0)
|
||||
if done:
|
||||
obs = self.env.reset()
|
||||
return obs
|
||||
|
||||
|
||||
class MaxAndSkipEnv(gym.Wrapper):
|
||||
def __init__(self, env=None, skip=4):
|
||||
"""Return only every `skip`-th frame"""
|
||||
super(MaxAndSkipEnv, self).__init__(env)
|
||||
# most recent raw observations (for max pooling across time steps)
|
||||
self._obs_buffer = deque(maxlen=2)
|
||||
self._skip = skip
|
||||
|
||||
def step(self, action):
|
||||
total_reward = 0.0
|
||||
done = None
|
||||
for _ in range(self._skip):
|
||||
obs, reward, done, info = self.env.step(action)
|
||||
self._obs_buffer.append(obs)
|
||||
total_reward += reward
|
||||
if done:
|
||||
break
|
||||
|
||||
max_frame = np.max(np.stack(self._obs_buffer), axis=0)
|
||||
|
||||
return max_frame, total_reward, done, info
|
||||
|
||||
def reset(self):
|
||||
"""Clear past frame buffer and init. to first obs. from inner env."""
|
||||
self._obs_buffer.clear()
|
||||
obs = self.env.reset()
|
||||
self._obs_buffer.append(obs)
|
||||
return obs
|
||||
|
||||
class ClippedRewardsWrapper(gym.RewardWrapper):
|
||||
def reward(self, reward):
|
||||
"""Change all the positive rewards to 1, negative to -1 and keep zero."""
|
||||
return np.sign(reward)
|
||||
|
||||
|
||||
# Mostly derived from OpenAI baselines
|
||||
class FireResetEnv(gym.Wrapper):
|
||||
|
@ -126,4 +231,29 @@ class TorchWrap(gym.Wrapper):
|
|||
|
||||
def _convert(self, frame):
|
||||
frame = torch.from_numpy(frame).unsqueeze(0).float()
|
||||
return frame
|
||||
return frame
|
||||
|
||||
|
||||
|
||||
class ProcessFrame84(gym.ObservationWrapper):
|
||||
def __init__(self, env=None):
|
||||
super(ProcessFrame84, self).__init__(env)
|
||||
self.observation_space = spaces.Box(low=0, high=255, shape=(84, 84, 1), dtype=np.uint8)
|
||||
|
||||
def observation(self, obs):
|
||||
return ProcessFrame84.process(obs)
|
||||
|
||||
@staticmethod
|
||||
def process(frame):
|
||||
if frame.size == 210 * 160 * 3:
|
||||
img = np.reshape(frame, [210, 160, 3]).astype(np.float32)
|
||||
elif frame.size == 250 * 160 * 3:
|
||||
img = np.reshape(frame, [250, 160, 3]).astype(np.float32)
|
||||
else:
|
||||
assert False, "Unknown resolution."
|
||||
img = img[:, :, 0] * 0.299 + img[:, :, 1] * 0.587 + img[:, :, 2] * 0.114
|
||||
resized_screen = cv2.resize(img, (84, 110), interpolation=cv2.INTER_AREA)
|
||||
x_t = resized_screen[18:102, :]
|
||||
x_t = np.reshape(x_t, [84, 84])
|
||||
return x_t.astype(np.uint8)
|
||||
|
||||
|
|
Loading…
Reference in a new issue