From a59f84b446e1a4dd6684cbcb641b33fba4b00cbb Mon Sep 17 00:00:00 2001 From: Brandon Rozek Date: Mon, 4 Mar 2019 17:09:46 -0500 Subject: [PATCH] Cleaned up scripts, added more comments --- examples/acrobot_a2c.py | 46 ++------- examples/acrobot_es.py | 42 +++++---- examples/acrobot_ppo.py | 43 ++------- examples/acrobot_qep.py | 21 +++-- examples/acrobot_reinforce.py | 79 +++++++--------- examples/acrobot_single_process.py | 135 -------------------------- examples/pong.py | 147 ----------------------------- rltorch/agents/A2CSingleAgent.py | 13 ++- rltorch/agents/DQNAgent.py | 2 + rltorch/agents/PPOAgent.py | 8 +- rltorch/network/ESNetwork.py | 3 +- 11 files changed, 103 insertions(+), 436 deletions(-) delete mode 100644 examples/acrobot_single_process.py delete mode 100644 examples/pong.py diff --git a/examples/acrobot_a2c.py b/examples/acrobot_a2c.py index 29ce258..b59390c 100644 --- a/examples/acrobot_a2c.py +++ b/examples/acrobot_a2c.py @@ -1,5 +1,4 @@ import gym -import numpy as np import torch import torch.nn as nn import torch.nn.functional as F @@ -9,10 +8,10 @@ import rltorch.memory as M import rltorch.env as E from rltorch.action_selector import StochasticSelector from tensorboardX import SummaryWriter -import torch.multiprocessing as mp -import signal -from copy import deepcopy +# +## Networks +# class Value(nn.Module): def __init__(self, state_size): super(Value, self).__init__() @@ -28,11 +27,8 @@ class Value(nn.Module): def forward(self, x): x = F.relu(self.fc_norm(self.fc1(x))) - x = F.relu(self.fc2_norm(self.fc2(x))) - x = self.fc3(x) - return x class Policy(nn.Module): @@ -48,50 +44,30 @@ class Policy(nn.Module): self.fc2_norm = nn.LayerNorm(64) self.fc3 = rn.NoisyLinear(64, action_size) - # self.fc3_norm = nn.LayerNorm(action_size) - - # self.value_fc = rn.NoisyLinear(64, 64) - # self.value_fc_norm = nn.LayerNorm(64) - # self.value = rn.NoisyLinear(64, 1) - - # self.advantage_fc = rn.NoisyLinear(64, 64) - # self.advantage_fc_norm = nn.LayerNorm(64) - # self.advantage = rn.NoisyLinear(64, action_size) def forward(self, x): x = F.relu(self.fc_norm(self.fc1(x))) - x = F.relu(self.fc2_norm(self.fc2(x))) - x = F.softmax(self.fc3(x), dim = 1) - - # state_value = F.relu(self.value_fc_norm(self.value_fc(x))) - # state_value = self.value(state_value) - - # advantage = F.relu(self.advantage_fc_norm(self.advantage_fc(x))) - # advantage = self.advantage(advantage) - - # x = F.softmax(state_value + advantage - advantage.mean(), dim = 1) - return x - +# +## Configuration +# config = {} config['seed'] = 901 config['environment_name'] = 'Acrobot-v1' -config['memory_size'] = 2000 config['total_training_episodes'] = 500 config['total_evaluation_episodes'] = 10 -config['batch_size'] = 32 config['learning_rate'] = 1e-3 -config['target_sync_tau'] = 1e-1 config['discount_rate'] = 0.99 -config['replay_skip'] = 0 # How many episodes between printing out the episode stats config['print_stat_n_eps'] = 1 config['disable_cuda'] = False - +# +## Training Loop +# def train(runner, agent, config, logger = None, logwriter = None): finished = False while not finished: @@ -103,9 +79,8 @@ def train(runner, agent, config, logger = None, logwriter = None): logwriter.write(logger) finished = runner.episode_num > config['total_training_episodes'] -if __name__ == "__main__": - torch.multiprocessing.set_sharing_strategy('file_system') # To not hit file descriptor memory limit +if __name__ == "__main__": # Setting up the environment rltorch.set_seed(config['seed']) print("Setting up environment...", end = " ") @@ -135,7 +110,6 @@ if __name__ == "__main__": actor = StochasticSelector(policy_net, action_size, memory, device = device) # Agent is what performs the training - # agent = rltorch.agents.REINFORCEAgent(net, memory, config, target_net = target_net, logger = logger) agent = rltorch.agents.A2CSingleAgent(policy_net, value_net, memory, config, logger = logger) # Runner performs one episode in the environment diff --git a/examples/acrobot_es.py b/examples/acrobot_es.py index 7b0249e..c8be856 100644 --- a/examples/acrobot_es.py +++ b/examples/acrobot_es.py @@ -1,5 +1,4 @@ import gym -import numpy as np import torch import torch.nn as nn import torch.nn.functional as F @@ -10,8 +9,10 @@ import rltorch.memory as M import rltorch.env as E from rltorch.action_selector import StochasticSelector from tensorboardX import SummaryWriter -import torch.multiprocessing as mp +# +## Networks +# class Policy(nn.Module): def __init__(self, state_size, action_size): super(Policy, self).__init__() @@ -32,37 +33,37 @@ class Policy(nn.Module): x = F.softmax(self.action_prob(x), dim = 1) return x +# +## Configuration +# config = {} config['seed'] = 901 config['environment_name'] = 'Acrobot-v1' -config['memory_size'] = 2000 config['total_training_episodes'] = 50 config['total_evaluation_episodes'] = 5 -config['batch_size'] = 32 config['learning_rate'] = 1e-1 -config['target_sync_tau'] = 1e-1 config['discount_rate'] = 0.99 -config['replay_skip'] = 0 # How many episodes between printing out the episode stats config['print_stat_n_eps'] = 1 config['disable_cuda'] = False - - -def train(env, net, actor, config, logger = None, logwriter = None): +# +## Training Loop +# +def train(runner, net, config, logger = None, logwriter = None): finished = False - episode_num = 1 while not finished: - rltorch.env.simulateEnvEps(env, actor, config, logger = logger, name = "Training") - episode_num += 1 + runner.run() net.calc_gradients() net.step() - # When the episode number changes, log network paramters if logwriter is not None: net.log_named_parameters() logwriter.write(logger) - finished = episode_num > config['total_training_episodes'] + finished = runner.episode_num > config['total_training_episodes'] +# +## Loss function +# def fitness(model): env = gym.make("Acrobot-v1") state = torch.from_numpy(env.reset()).float().unsqueeze(0) @@ -75,9 +76,12 @@ def fitness(model): next_state, reward, done, _ = env.step(action) total_reward += reward state = torch.from_numpy(next_state).float().unsqueeze(0) - return total_reward + return -total_reward if __name__ == "__main__": + # Hide internal gym warnings + gym.logger.set_level(40) + # Setting up the environment rltorch.set_seed(config['seed']) print("Setting up environment...", end = " ") @@ -90,21 +94,21 @@ if __name__ == "__main__": # Logging logger = rltorch.log.Logger() - # logwriter = rltorch.log.LogWriter(logger, SummaryWriter()) logwriter = rltorch.log.LogWriter(SummaryWriter()) # Setting up the networks device = torch.device("cuda:0" if torch.cuda.is_available() and not config['disable_cuda'] else "cpu") net = rn.ESNetwork(Policy(state_size, action_size), torch.optim.Adam, 100, fitness, config, device = device, name = "ES", logger = logger) - net.model.share_memory() # Actor takes a net and uses it to produce actions from given states actor = StochasticSelector(net, action_size, device = device) - print("Training...") + # Runner performs an episode of the environment + runner = rltorch.env.EnvironmentEpisodeSync(env, actor, config, name = "Training", logwriter = logwriter) - train(env, net, actor, config, logger = logger, logwriter = logwriter) + print("Training...") + train(runner, net, config, logger = logger, logwriter = logwriter) # For profiling... # import cProfile diff --git a/examples/acrobot_ppo.py b/examples/acrobot_ppo.py index 0e5c5fa..d0f3bce 100644 --- a/examples/acrobot_ppo.py +++ b/examples/acrobot_ppo.py @@ -1,5 +1,4 @@ import gym -import numpy as np import torch import torch.nn as nn import torch.nn.functional as F @@ -9,10 +8,10 @@ import rltorch.memory as M import rltorch.env as E from rltorch.action_selector import StochasticSelector from tensorboardX import SummaryWriter -import torch.multiprocessing as mp -import signal -from copy import deepcopy +# +## Networks +# class Value(nn.Module): def __init__(self, state_size): super(Value, self).__init__() @@ -28,11 +27,8 @@ class Value(nn.Module): def forward(self, x): x = F.relu(self.fc_norm(self.fc1(x))) - x = F.relu(self.fc2_norm(self.fc2(x))) - x = self.fc3(x) - return x class Policy(nn.Module): @@ -48,50 +44,30 @@ class Policy(nn.Module): self.fc2_norm = nn.LayerNorm(64) self.fc3 = rn.NoisyLinear(64, action_size) - # self.fc3_norm = nn.LayerNorm(action_size) - - # self.value_fc = rn.NoisyLinear(64, 64) - # self.value_fc_norm = nn.LayerNorm(64) - # self.value = rn.NoisyLinear(64, 1) - - # self.advantage_fc = rn.NoisyLinear(64, 64) - # self.advantage_fc_norm = nn.LayerNorm(64) - # self.advantage = rn.NoisyLinear(64, action_size) def forward(self, x): x = F.relu(self.fc_norm(self.fc1(x))) - x = F.relu(self.fc2_norm(self.fc2(x))) - x = F.softmax(self.fc3(x), dim = 1) - - # state_value = F.relu(self.value_fc_norm(self.value_fc(x))) - # state_value = self.value(state_value) - - # advantage = F.relu(self.advantage_fc_norm(self.advantage_fc(x))) - # advantage = self.advantage(advantage) - - # x = F.softmax(state_value + advantage - advantage.mean(), dim = 1) - return x - +# +## Configuration +# config = {} config['seed'] = 901 config['environment_name'] = 'Acrobot-v1' -config['memory_size'] = 2000 config['total_training_episodes'] = 500 config['total_evaluation_episodes'] = 10 -config['batch_size'] = 32 config['learning_rate'] = 1e-3 -config['target_sync_tau'] = 1e-1 config['discount_rate'] = 0.99 -config['replay_skip'] = 0 # How many episodes between printing out the episode stats config['print_stat_n_eps'] = 1 config['disable_cuda'] = False - +# +## Training Loop +# def train(runner, agent, config, logger = None, logwriter = None): finished = False while not finished: @@ -133,7 +109,6 @@ if __name__ == "__main__": actor = StochasticSelector(policy_net, action_size, memory, device = device) # Agent is what performs the training - # agent = rltorch.agents.REINFORCEAgent(net, memory, config, target_net = target_net, logger = logger) agent = rltorch.agents.PPOAgent(policy_net, value_net, memory, config, logger = logger) # Runner performs a certain number of steps in the environment diff --git a/examples/acrobot_qep.py b/examples/acrobot_qep.py index 80df660..9643f11 100644 --- a/examples/acrobot_qep.py +++ b/examples/acrobot_qep.py @@ -1,5 +1,4 @@ import gym -import numpy as np import torch import torch.nn as nn import torch.nn.functional as F @@ -9,9 +8,11 @@ import rltorch.memory as M import rltorch.env as E from rltorch.action_selector import StochasticSelector from tensorboardX import SummaryWriter -import torch.multiprocessing as mp from copy import deepcopy +# +## Networks +# class Value(nn.Module): def __init__(self, state_size, action_size): super(Value, self).__init__() @@ -39,7 +40,6 @@ class Value(nn.Module): advantage = self.advantage(advantage) x = state_value + advantage - advantage.mean() - return x @@ -63,6 +63,9 @@ class Policy(nn.Module): x = F.softmax(self.action_prob(x), dim = 1) return x +# +## Configuration +# config = {} config['seed'] = 901 config['environment_name'] = 'Acrobot-v1' @@ -88,7 +91,9 @@ config['prioritized_replay_sampling_priority'] = 0.6 config['prioritized_replay_weight_importance'] = rltorch.scheduler.ExponentialScheduler(initial_value = 0.4, end_value = 1, iterations = 5000) - +# +## Training Loop +# def train(runner, agent, config, logger = None, logwriter = None): finished = False last_episode_num = 1 @@ -103,6 +108,7 @@ def train(runner, agent, config, logger = None, logwriter = None): logwriter.write(logger) finished = runner.episode_num > config['total_training_episodes'] + if __name__ == "__main__": # Setting up the environment rltorch.set_seed(config['seed']) @@ -116,7 +122,6 @@ if __name__ == "__main__": # Logging logger = rltorch.log.Logger() - # logwriter = rltorch.log.LogWriter(logger, SummaryWriter()) logwriter = rltorch.log.LogWriter(SummaryWriter()) # Setting up the networks @@ -127,13 +132,11 @@ if __name__ == "__main__": torch.optim.Adam, 500, None, config2, sigma = 0.1, device = device, name = "ES", logger = logger) value_net = rn.Network(Value(state_size, action_size), torch.optim.Adam, config, device = device, name = "DQN", logger = logger) - target_net = rn.TargetNetwork(value_net, device = device) - value_net.model.share_memory() - target_net.model.share_memory() # Actor takes a net and uses it to produce actions from given states actor = StochasticSelector(policy_net, action_size, device = device) + # Memory stores experiences for later training memory = M.PrioritizedReplayMemory(capacity = config['memory_size'], alpha = config['prioritized_replay_sampling_priority']) @@ -141,11 +144,9 @@ if __name__ == "__main__": runner = rltorch.env.EnvironmentRunSync(env, actor, config, name = "Training", memory = memory, logwriter = logwriter) # Agent is what performs the training - # agent = TestAgent(policy_net, value_net, memory, config, target_value_net = target_net, logger = logger) agent = rltorch.agents.QEPAgent(policy_net, value_net, memory, config, target_value_net = target_net, logger = logger) print("Training...") - train(runner, agent, config, logger = logger, logwriter = logwriter) # For profiling... diff --git a/examples/acrobot_reinforce.py b/examples/acrobot_reinforce.py index 44d1585..d0cd397 100644 --- a/examples/acrobot_reinforce.py +++ b/examples/acrobot_reinforce.py @@ -1,5 +1,4 @@ import gym -import numpy as np import torch import torch.nn as nn import torch.nn.functional as F @@ -9,69 +8,57 @@ import rltorch.memory as M import rltorch.env as E from rltorch.action_selector import StochasticSelector from tensorboardX import SummaryWriter -import torch.multiprocessing as mp -import signal -from copy import deepcopy -class Value(nn.Module): +# +## Networks +# +class Policy(nn.Module): def __init__(self, state_size, action_size): - super(Value, self).__init__() + super(Policy, self).__init__() self.state_size = state_size self.action_size = action_size self.fc1 = rn.NoisyLinear(state_size, 64) self.fc_norm = nn.LayerNorm(64) - - self.value_fc = rn.NoisyLinear(64, 64) - self.value_fc_norm = nn.LayerNorm(64) - self.value = rn.NoisyLinear(64, 1) - - self.advantage_fc = rn.NoisyLinear(64, 64) - self.advantage_fc_norm = nn.LayerNorm(64) - self.advantage = rn.NoisyLinear(64, action_size) + + self.fc2 = rn.NoisyLinear(64, 64) + self.fc2_norm = nn.LayerNorm(64) + + self.fc3 = rn.NoisyLinear(64, action_size) def forward(self, x): x = F.relu(self.fc_norm(self.fc1(x))) - - state_value = F.relu(self.value_fc_norm(self.value_fc(x))) - state_value = self.value(state_value) - - advantage = F.relu(self.advantage_fc_norm(self.advantage_fc(x))) - advantage = self.advantage(advantage) - - x = F.softmax(state_value + advantage - advantage.mean(), dim = 1) - + x = F.relu(self.fc2_norm(self.fc2(x))) + x = F.softmax(self.fc3(x), dim = 1) return x - +# +## Configuration +# config = {} config['seed'] = 901 config['environment_name'] = 'Acrobot-v1' -config['memory_size'] = 2000 config['total_training_episodes'] = 500 config['total_evaluation_episodes'] = 10 -config['batch_size'] = 32 config['learning_rate'] = 1e-3 -config['target_sync_tau'] = 1e-1 config['discount_rate'] = 0.99 -config['replay_skip'] = 0 # How many episodes between printing out the episode stats config['print_stat_n_eps'] = 1 config['disable_cuda'] = False -def train(env, agent, actor, memory, config, logger = None, logwriter = None): - finished = False - episode_num = 1 - while not finished: - rltorch.env.simulateEnvEps(env, actor, config, memory = memory, logger = logger, name = "Training") - episode_num += 1 - agent.learn() - # When the episode number changes, log network paramters - if logwriter is not None: - agent.net.log_named_parameters() - logwriter.write(logger) - finished = episode_num > config['total_training_episodes'] - +# +## Training Loop +# +def train(runner, agent, config, logger = None, logwriter = None): + finished = False + while not finished: + runner.run() + agent.learn() + # When the episode number changes, log network paramters + if logwriter is not None: + agent.net.log_named_parameters() + logwriter.write(logger) + finished = runner.episode_num > config['total_training_episodes'] if __name__ == "__main__": @@ -93,11 +80,9 @@ if __name__ == "__main__": # Setting up the networks device = torch.device("cuda:0" if torch.cuda.is_available() and not config['disable_cuda'] else "cpu") - net = rn.Network(Value(state_size, action_size), + net = rn.Network(Policy(state_size, action_size), torch.optim.Adam, config, device = device, name = "DQN") target_net = rn.TargetNetwork(net, device = device) - net.model.share_memory() - target_net.model.share_memory() # Memory stores experiences for later training memory = M.EpisodeMemory() @@ -108,9 +93,11 @@ if __name__ == "__main__": # Agent is what performs the training agent = rltorch.agents.REINFORCEAgent(net, memory, config, target_net = target_net, logger = logger) - print("Training...") + # Runner performs one episode in the environment + runner = rltorch.env.EnvironmentEpisodeSync(env, actor, config, name = "Training", memory = memory, logwriter = logwriter) - train(env, agent, actor, memory, config, logger = logger, logwriter = logwriter) + print("Training...") + train(runner, agent, config, logger = logger, logwriter = logwriter) # For profiling... # import cProfile diff --git a/examples/acrobot_single_process.py b/examples/acrobot_single_process.py deleted file mode 100644 index cc54f11..0000000 --- a/examples/acrobot_single_process.py +++ /dev/null @@ -1,135 +0,0 @@ -import gym -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -import rltorch -import rltorch.network as rn -import rltorch.memory as M -import rltorch.env as E -from rltorch.action_selector import ArgMaxSelector -from tensorboardX import SummaryWriter -import torch.multiprocessing as mp - -class Value(nn.Module): - def __init__(self, state_size, action_size): - super(Value, self).__init__() - self.state_size = state_size - self.action_size = action_size - - self.fc1 = rn.NoisyLinear(state_size, 255) - self.fc_norm = nn.LayerNorm(255) - - self.value_fc = rn.NoisyLinear(255, 255) - self.value_fc_norm = nn.LayerNorm(255) - self.value = rn.NoisyLinear(255, 1) - - self.advantage_fc = rn.NoisyLinear(255, 255) - self.advantage_fc_norm = nn.LayerNorm(255) - self.advantage = rn.NoisyLinear(255, action_size) - - def forward(self, x): - x = F.relu(self.fc_norm(self.fc1(x))) - - state_value = F.relu(self.value_fc_norm(self.value_fc(x))) - state_value = self.value(state_value) - - advantage = F.relu(self.advantage_fc_norm(self.advantage_fc(x))) - advantage = self.advantage(advantage) - - x = state_value + advantage - advantage.mean() - - return x - - -config = {} -config['seed'] = 901 -config['environment_name'] = 'Acrobot-v1' -config['memory_size'] = 2000 -config['total_training_episodes'] = 50 -config['total_evaluation_episodes'] = 5 -config['batch_size'] = 32 -config['learning_rate'] = 1e-3 -config['target_sync_tau'] = 1e-1 -config['discount_rate'] = 0.99 -config['replay_skip'] = 0 -# How many episodes between printing out the episode stats -config['print_stat_n_eps'] = 1 -config['disable_cuda'] = False -# Prioritized vs Random Sampling -# 0 - Random sampling -# 1 - Only the highest prioirities -config['prioritized_replay_sampling_priority'] = 0.6 -# How important are the weights for the loss? -# 0 - Treat all losses equally -# 1 - Lower the importance of high losses -# Should ideally start from 0 and move your way to 1 to prevent overfitting -config['prioritized_replay_weight_importance'] = rltorch.scheduler.ExponentialScheduler(initial_value = 0.4, end_value = 1, iterations = 5000) - -def train(runner, agent, config, logger = None, logwriter = None): - finished = False - last_episode_num = 1 - while not finished: - runner.run(config['replay_skip'] + 1) - agent.learn() - if logwriter is not None: - if last_episode_num < runner.episode_num: - last_episode_num = runner.episode_num - agent.net.log_named_parameters() - logwriter.write(logger) - finished = runner.episode_num > config['total_training_episodes'] - -if __name__ == "__main__": - torch.multiprocessing.set_sharing_strategy('file_system') # To not hit file descriptor memory limit - - # Setting up the environment - rltorch.set_seed(config['seed']) - print("Setting up environment...", end = " ") - env = E.TorchWrap(gym.make(config['environment_name'])) - env.seed(config['seed']) - print("Done.") - - state_size = env.observation_space.shape[0] - action_size = env.action_space.n - - # Logging - logger = rltorch.log.Logger() - # logwriter = rltorch.log.LogWriter(logger, SummaryWriter()) - logwriter = rltorch.log.LogWriter(SummaryWriter()) - - # Setting up the networks - device = torch.device("cuda:0" if torch.cuda.is_available() and not config['disable_cuda'] else "cpu") - net = rn.Network(Value(state_size, action_size), - torch.optim.Adam, config, device = device, name = "DQN", logger = logger) - target_net = rn.TargetNetwork(net, device = device) - net.model.share_memory() - target_net.model.share_memory() - - # Actor takes a net and uses it to produce actions from given states - actor = ArgMaxSelector(net, action_size, device = device) - # Memory stores experiences for later training - memory = M.PrioritizedReplayMemory(capacity = config['memory_size'], alpha = config['prioritized_replay_sampling_priority']) - # memory = M.ReplayMemory(capacity = config['memory_size']) - - # Runner performs a certain number of steps in the environment - runner = rltorch.env.EnvironmentRunSync(env, actor, config, name = "Training", memory = memory, logwriter = logwriter) - - # Agent is what performs the training - agent = rltorch.agents.DQNAgent(net, memory, config, target_net = target_net, logger = logger) - - print("Training...") - - train(runner, agent, config, logger = logger, logwriter = logwriter) - - # For profiling... - # import cProfile - # cProfile.run('train(runner, agent, config, logger = logger, logwriter = logwriter )') - # python -m torch.utils.bottleneck /path/to/source/script.py [args] is also a good solution... - - print("Training Finished.") - - print("Evaluating...") - rltorch.env.simulateEnvEps(env, actor, config, total_episodes = config['total_evaluation_episodes'], logger = logger, name = "Evaluation") - print("Evaulations Done.") - - logwriter.close() # We don't need to write anything out to disk anymore diff --git a/examples/pong.py b/examples/pong.py deleted file mode 100644 index c9820a2..0000000 --- a/examples/pong.py +++ /dev/null @@ -1,147 +0,0 @@ -import gym -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -import rltorch -import rltorch.network as rn -import rltorch.memory as M -import rltorch.env as E -from rltorch.action_selector import ArgMaxSelector -from tensorboardX import SummaryWriter -import torch.multiprocessing as mp - -class Value(nn.Module): - def __init__(self, state_size, action_size): - super(Value, self).__init__() - self.state_size = state_size - self.action_size = action_size - - self.conv1 = nn.Conv2d(4, 32, kernel_size = (8, 8), stride = (4, 4)) - self.conv_norm1 = nn.LayerNorm([32, 19, 19]) - self.conv2 = nn.Conv2d(32, 64, kernel_size = (4, 4), stride = (2, 2)) - self.conv_norm2 = nn.LayerNorm([64, 8, 8]) - self.conv3 = nn.Conv2d(64, 64, kernel_size = (3, 3), stride = (1, 1)) - self.conv_norm3 = nn.LayerNorm([64, 6, 6]) - - self.fc1 = rn.NoisyLinear(64 * 6 * 6, 384) - self.fc_norm = nn.LayerNorm(384) - - self.value_fc = rn.NoisyLinear(384, 384) - self.value_fc_norm = nn.LayerNorm(384) - self.value = rn.NoisyLinear(384, 1) - - self.advantage_fc = rn.NoisyLinear(384, 384) - self.advantage_fc_norm = nn.LayerNorm(384) - self.advantage = rn.NoisyLinear(384, action_size) - - - def forward(self, x): - x = F.relu(self.conv_norm1(self.conv1(x))) - x = F.relu(self.conv_norm2(self.conv2(x))) - x = F.relu(self.conv_norm3(self.conv3(x))) - - # Makes batch_size dimension again - x = x.view(-1, 64 * 6 * 6) - x = F.relu(self.fc_norm(self.fc1(x))) - - state_value = F.relu(self.value_fc_norm(self.value_fc(x))) - state_value = self.value(state_value) - - advantage = F.relu(self.advantage_fc_norm(self.advantage_fc(x))) - advantage = self.advantage(advantage) - - x = state_value + advantage - advantage.mean() - - # For debugging purposes... - if torch.isnan(x).any().item(): - print("WARNING NAN IN MODEL DETECTED") - - return x - - - - - -config = {} -config['seed'] = 901 -config['environment_name'] = 'PongNoFrameskip-v4' -config['memory_size'] = 5000 -config['total_training_episodes'] = 500 -config['total_evaluation_episodes'] = 10 -config['learning_rate'] = 1e-4 -config['target_sync_tau'] = 1e-3 -config['discount_rate'] = 0.99 -config['exploration_rate'] = rltorch.scheduler.ExponentialScheduler(initial_value = 0.1, end_value = 0.01, iterations = 5000) -config['replay_skip'] = 4 -config['batch_size'] = 32 * (config['replay_skip'] + 1) -# How many episodes between printing out the episode stats -config['print_stat_n_eps'] = 1 -config['disable_cuda'] = False -# Prioritized vs Random Sampling -# 0 - Random sampling -# 1 - Only the highest prioirities -config['prioritized_replay_sampling_priority'] = 0.6 -# How important are the weights for the loss? -# 0 - Treat all losses equally -# 1 - Lower the importance of high losses -# Should ideally start from 0 and move your way to 1 to prevent overfitting -config['prioritized_replay_weight_importance'] = rltorch.scheduler.ExponentialScheduler(initial_value = 0.4, end_value = 1, iterations = 5000) - -if __name__ == "__main__": - torch.multiprocessing.set_sharing_strategy('file_system') # To not hit file descriptor memory limit - - # Setting up the environment - rltorch.set_seed(config['seed']) - print("Setting up environment...", end = " ") - env = E.FrameStack(E.TorchWrap( - E.ProcessFrame(E.FireResetEnv(gym.make(config['environment_name'])), - resize_shape = (80, 80), crop_bounds = [34, 194, 15, 145], grayscale = True)) - , 4) - env.seed(config['seed']) - print("Done.") - - state_size = env.observation_space.shape[0] - action_size = env.action_space.n - - # Logging - logger = rltorch.log.Logger() - logwriter = rltorch.log.LogWriter(SummaryWriter()) - - # Setting up the networks - device = torch.device("cuda:0" if torch.cuda.is_available() and not config['disable_cuda'] else "cpu") - net = rn.Network(Value(state_size, action_size), - torch.optim.Adam, config, device = device, name = "DQN") - target_net = rn.TargetNetwork(net, device = device) - net.model.share_memory() - target_net.model.share_memory() - - # Actor takes a net and uses it to produce actions from given states - actor = ArgMaxSelector(net, action_size, device = device) - # Memory stores experiences for later training - memory = M.PrioritizedReplayMemory(capacity = config['memory_size'], alpha = config['prioritized_replay_sampling_priority']) - # memory = M.ReplayMemory(capacity = config['memory_size']) - - # Runner performs a certain number of steps in the environment - runner = rltorch.mp.EnvironmentRun(env, actor, config, name = "Training", memory = memory, logwriter = logwriter) - - # Agent is what performs the training - agent = rltorch.agents.DQNAgent(net, memory, config, target_net = target_net, logger = logger) - - print("Training...") - - train(runner, agent, config, logger = logger, logwriter = logwriter) - - # For profiling... - # import cProfile - # cProfile.run('train(runner, agent, config, logger = logger, logwriter = logwriter )') - # python -m torch.utils.bottleneck /path/to/source/script.py [args] is also a good solution... - - print("Training Finished.") - runner.terminate() # We don't need the extra process anymore - - print("Evaluating...") - rltorch.env.simulateEnvEps(env, actor, config, total_episodes = config['total_evaluation_episodes'], logger = logger, name = "Evaluation") - print("Evaulations Done.") - - logwriter.close() # We don't need to write anything out to disk anymore diff --git a/rltorch/agents/A2CSingleAgent.py b/rltorch/agents/A2CSingleAgent.py index 4d04eab..305c543 100644 --- a/rltorch/agents/A2CSingleAgent.py +++ b/rltorch/agents/A2CSingleAgent.py @@ -1,12 +1,8 @@ from copy import deepcopy -import numpy as np import torch import torch.nn.functional as F -from torch.distributions import Categorical import rltorch import rltorch.memory as M -import collections -import random class A2CSingleAgent: def __init__(self, policy_net, value_net, memory, config, logger = None): @@ -25,11 +21,11 @@ class A2CSingleAgent: return discounted_rewards - def learn(self): episode_batch = self.memory.recall() state_batch, action_batch, reward_batch, next_state_batch, done_batch, log_prob_batch = zip(*episode_batch) + # Send batches to the appropriate device state_batch = torch.cat(state_batch).to(self.value_net.device) reward_batch = torch.tensor(reward_batch).to(self.value_net.device) not_done_batch = ~torch.tensor(done_batch).to(self.value_net.device) @@ -37,12 +33,16 @@ class A2CSingleAgent: log_prob_batch = torch.cat(log_prob_batch).to(self.value_net.device) ## Value Loss + # In A2C, the value loss is the difference between the discounted reward and the value from the first state + # The value of the first state is supposed to tell us the expected reward from the current policy of the whole episode value_loss = F.mse_loss(self._discount_rewards(reward_batch).sum(), self.value_net(state_batch[0])) self.value_net.zero_grad() value_loss.backward() self.value_net.step() ## Policy Loss + # Increase probabilities of advantageous states + # and decrease the probabilities of non-advantageous ones with torch.no_grad(): state_values = self.value_net(state_batch) next_state_values = torch.zeros_like(state_values) @@ -61,8 +61,7 @@ class A2CSingleAgent: policy_loss.backward() self.policy_net.step() - - # Memory is irrelevant for future training + # Memory under the old policy is not needed for future training self.memory.clear() diff --git a/rltorch/agents/DQNAgent.py b/rltorch/agents/DQNAgent.py index 2e3d407..a73391f 100644 --- a/rltorch/agents/DQNAgent.py +++ b/rltorch/agents/DQNAgent.py @@ -55,6 +55,7 @@ class DQNAgent: expected_values = (reward_batch + (self.config['discount_rate'] * best_next_state_value)).unsqueeze(1) + # If we're sampling by TD error, multiply loss by a importance weight which helps decrease overfitting if (isinstance(self.memory, M.PrioritizedReplayMemory)): loss = (torch.as_tensor(importance_weights, device = self.net.device) * ((obtained_values - expected_values)**2).squeeze(1)).mean() else: @@ -74,6 +75,7 @@ class DQNAgent: else: self.target_net.sync() + # If we're sampling by TD error, readjust the weights of the experiences if (isinstance(self.memory, M.PrioritizedReplayMemory)): td_error = (obtained_values - expected_values).detach().abs() self.memory.update_priorities(batch_indexes, td_error) diff --git a/rltorch/agents/PPOAgent.py b/rltorch/agents/PPOAgent.py index 8ea24f7..44c1f5d 100644 --- a/rltorch/agents/PPOAgent.py +++ b/rltorch/agents/PPOAgent.py @@ -31,6 +31,7 @@ class PPOAgent: episode_batch = self.memory.recall() state_batch, action_batch, reward_batch, next_state_batch, done_batch, log_prob_batch = zip(*episode_batch) + # Send batches to the appropriate device state_batch = torch.cat(state_batch).to(self.value_net.device) action_batch = torch.tensor(action_batch).to(self.value_net.device) reward_batch = torch.tensor(reward_batch).to(self.value_net.device) @@ -39,12 +40,16 @@ class PPOAgent: log_prob_batch = torch.cat(log_prob_batch).to(self.value_net.device) ## Value Loss + # In PPO, the value loss is the difference between the discounted reward and the value from the first state + # The value of the first state is supposed to tell us the expected reward from the current policy of the whole episode value_loss = F.mse_loss(self._discount_rewards(reward_batch).sum(), self.value_net(state_batch[0])) self.value_net.zero_grad() value_loss.backward() self.value_net.step() ## Policy Loss + # Increase probabilities of advantageous states + # and decrease the probabilities of non-advantageous ones with torch.no_grad(): state_values = self.value_net(state_batch) next_state_values = torch.zeros_like(state_values) @@ -56,6 +61,7 @@ class PPOAgent: distributions = list(map(Categorical, action_probabilities)) old_log_probs = torch.stack(list(map(lambda distribution, action: distribution.log_prob(action), distributions, action_batch))) + # For PPO we want to stay within a certain ratio of the old policy policy_ratio = torch.exp(log_prob_batch - old_log_probs) # Equivalent to (log_prob / old_log_prob) policy_loss1 = policy_ratio * advantages policy_loss2 = policy_ratio.clamp(min = 0.8, max = 1.2) * advantages # From original paper @@ -72,7 +78,7 @@ class PPOAgent: self.policy_net.step() - # Memory is irrelevant for future training + # Memory under the old policy is not needed for future training self.memory.clear() diff --git a/rltorch/network/ESNetwork.py b/rltorch/network/ESNetwork.py index b3f372c..be0970e 100644 --- a/rltorch/network/ESNetwork.py +++ b/rltorch/network/ESNetwork.py @@ -3,7 +3,8 @@ import torch from .Network import Network from copy import deepcopy -# [TODO] See if you need to move network to device +# [TODO] Should we torch.no_grad the __call__? +# What if we want to sometimes do gradient descent as well? class ESNetwork(Network): """ Network that functions from the paper Evolutionary Strategies (https://arxiv.org/abs/1703.03864)