From 912e3d42cb6b8021b015db3b3086e67ea7ac2a83 Mon Sep 17 00:00:00 2001 From: Brandon Rozek Date: Fri, 13 Sep 2019 19:48:24 -0400 Subject: [PATCH] Added new OpenAI Baseline Wrappers --- rltorch/env/wrappers.py | 132 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 131 insertions(+), 1 deletion(-) diff --git a/rltorch/env/wrappers.py b/rltorch/env/wrappers.py index 065b931..2bd5b97 100644 --- a/rltorch/env/wrappers.py +++ b/rltorch/env/wrappers.py @@ -3,6 +3,111 @@ import torch from gym import spaces import cv2 from collections import deque +import numpy as np + +class EpisodicLifeEnv(gym.Wrapper): + def __init__(self, env=None): + """Make end-of-life == end-of-episode, but only reset on true game over. + Done by DeepMind for the DQN and co. since it helps value estimation. + """ + super(EpisodicLifeEnv, self).__init__(env) + self.lives = 0 + self.was_real_done = True + self.was_real_reset = False + + def step(self, action): + obs, reward, done, info = self.env.step(action) + self.was_real_done = done + # check current lives, make loss of life terminal, + # then update lives to handle bonus lives + lives = self.env.unwrapped.ale.lives() + if lives < self.lives and lives > 0: + # for Qbert somtimes we stay in lives == 0 condtion for a few frames + # so its important to keep lives > 0, so that we only reset once + # the environment advertises done. + done = True + self.lives = lives + return obs, reward, done, info + + def reset(self): + """Reset only when lives are exhausted. + This way all states are still reachable even though lives are episodic, + and the learner need not know about any of this behind-the-scenes. + """ + if self.was_real_done: + obs = self.env.reset() + self.was_real_reset = True + else: + # no-op step to advance from terminal/lost life state + obs, _, _, _ = self.env.step(0) + self.was_real_reset = False + self.lives = self.env.unwrapped.ale.lives() + return obs + + +class NoopResetEnv(gym.Wrapper): + def __init__(self, env=None, noop_max=30): + """Sample initial states by taking random number of no-ops on reset. + No-op is assumed to be action 0. + """ + super(NoopResetEnv, self).__init__(env) + self.noop_max = noop_max + self.override_num_noops = None + assert env.unwrapped.get_action_meanings()[0] == 'NOOP' + + def step(self, action): + return self.env.step(action) + + def reset(self): + """ Do no-op action for a number of steps in [1, noop_max].""" + self.env.reset() + if self.override_num_noops is not None: + noops = self.override_num_noops + else: + noops = np.random.randint(1, self.noop_max + 1) + assert noops > 0 + obs = None + for _ in range(noops): + obs, _, done, _ = self.env.step(0) + if done: + obs = self.env.reset() + return obs + + +class MaxAndSkipEnv(gym.Wrapper): + def __init__(self, env=None, skip=4): + """Return only every `skip`-th frame""" + super(MaxAndSkipEnv, self).__init__(env) + # most recent raw observations (for max pooling across time steps) + self._obs_buffer = deque(maxlen=2) + self._skip = skip + + def step(self, action): + total_reward = 0.0 + done = None + for _ in range(self._skip): + obs, reward, done, info = self.env.step(action) + self._obs_buffer.append(obs) + total_reward += reward + if done: + break + + max_frame = np.max(np.stack(self._obs_buffer), axis=0) + + return max_frame, total_reward, done, info + + def reset(self): + """Clear past frame buffer and init. to first obs. from inner env.""" + self._obs_buffer.clear() + obs = self.env.reset() + self._obs_buffer.append(obs) + return obs + +class ClippedRewardsWrapper(gym.RewardWrapper): + def reward(self, reward): + """Change all the positive rewards to 1, negative to -1 and keep zero.""" + return np.sign(reward) + # Mostly derived from OpenAI baselines class FireResetEnv(gym.Wrapper): @@ -126,4 +231,29 @@ class TorchWrap(gym.Wrapper): def _convert(self, frame): frame = torch.from_numpy(frame).unsqueeze(0).float() - return frame \ No newline at end of file + return frame + + + +class ProcessFrame84(gym.ObservationWrapper): + def __init__(self, env=None): + super(ProcessFrame84, self).__init__(env) + self.observation_space = spaces.Box(low=0, high=255, shape=(84, 84, 1), dtype=np.uint8) + + def observation(self, obs): + return ProcessFrame84.process(obs) + + @staticmethod + def process(frame): + if frame.size == 210 * 160 * 3: + img = np.reshape(frame, [210, 160, 3]).astype(np.float32) + elif frame.size == 250 * 160 * 3: + img = np.reshape(frame, [250, 160, 3]).astype(np.float32) + else: + assert False, "Unknown resolution." + img = img[:, :, 0] * 0.299 + img[:, :, 1] * 0.587 + img[:, :, 2] * 0.114 + resized_screen = cv2.resize(img, (84, 110), interpolation=cv2.INTER_AREA) + x_t = resized_screen[18:102, :] + x_t = np.reshape(x_t, [84, 84]) + return x_t.astype(np.uint8) +