Fixed parallel implementation of getting experiences by using a queue

This commit is contained in:
Brandon Rozek 2019-02-13 00:36:23 -05:00
parent 5094ed53af
commit 115543d201
4 changed files with 33 additions and 22 deletions

View file

@ -9,7 +9,7 @@ import rltorch.memory as M
import rltorch.env as E import rltorch.env as E
from rltorch.action_selector import ArgMaxSelector from rltorch.action_selector import ArgMaxSelector
from tensorboardX import SummaryWriter from tensorboardX import SummaryWriter
import torch.multiprocessing as mp
class Value(nn.Module): class Value(nn.Module):
def __init__(self, state_size, action_size): def __init__(self, state_size, action_size):
@ -28,7 +28,6 @@ class Value(nn.Module):
self.advantage_fc_norm = nn.LayerNorm(64) self.advantage_fc_norm = nn.LayerNorm(64)
self.advantage = rn.NoisyLinear(64, action_size) self.advantage = rn.NoisyLinear(64, action_size)
def forward(self, x): def forward(self, x):
x = F.relu(self.fc_norm(self.fc1(x))) x = F.relu(self.fc_norm(self.fc1(x)))
@ -67,13 +66,16 @@ config['prioritized_replay_sampling_priority'] = 0.6
# Should ideally start from 0 and move your way to 1 to prevent overfitting # Should ideally start from 0 and move your way to 1 to prevent overfitting
config['prioritized_replay_weight_importance'] = rltorch.scheduler.ExponentialScheduler(initial_value = 0.4, end_value = 1, iterations = 5000) config['prioritized_replay_weight_importance'] = rltorch.scheduler.ExponentialScheduler(initial_value = 0.4, end_value = 1, iterations = 5000)
def train(runner, agent, config, logwriter = None): def train(runner, agent, config, logwriter = None, memory = None):
finished = False finished = False
episode_num = 1 episode_num = 1
memory_queue = mp.Queue(maxsize = config['replay_skip'] + 1)
while not finished: while not finished:
runner.run(config['replay_skip'] + 1, printstat = runner.episode_num % config['print_stat_n_eps'] == 0) runner.run(config['replay_skip'] + 1, printstat = runner.episode_num % config['print_stat_n_eps'] == 0, memory = memory_queue)
agent.learn() agent.learn()
runner.join() runner.join()
for i in range(config['replay_skip'] + 1):
memory.append(*memory_queue.get())
# When the episode number changes, write out the weight histograms # When the episode number changes, write out the weight histograms
if logwriter is not None and episode_num < runner.episode_num: if logwriter is not None and episode_num < runner.episode_num:
episode_num = runner.episode_num episode_num = runner.episode_num
@ -84,6 +86,7 @@ def train(runner, agent, config, logwriter = None):
finished = runner.episode_num > config['total_training_episodes'] finished = runner.episode_num > config['total_training_episodes']
torch.multiprocessing.set_sharing_strategy('file_system') # To not hit file descriptor memory limit
# Setting up the environment # Setting up the environment
rltorch.set_seed(config['seed']) rltorch.set_seed(config['seed'])
print("Setting up environment...", end = " ") print("Setting up environment...", end = " ")
@ -98,11 +101,14 @@ action_size = env.action_space.n
logger = rltorch.log.Logger() logger = rltorch.log.Logger()
logwriter = rltorch.log.LogWriter(logger, SummaryWriter()) logwriter = rltorch.log.LogWriter(logger, SummaryWriter())
# Setting up the networks # Setting up the networks
device = torch.device("cuda:0" if torch.cuda.is_available() and not config['disable_cuda'] else "cpu") device = torch.device("cuda:0" if torch.cuda.is_available() and not config['disable_cuda'] else "cpu")
net = rn.Network(Value(state_size, action_size), net = rn.Network(Value(state_size, action_size),
torch.optim.Adam, config, device = device, logger = logger, name = "DQN") torch.optim.Adam, config, device = device, logger = logger, name = "DQN")
target_net = rn.TargetNetwork(net, device = device) target_net = rn.TargetNetwork(net, device = device)
net.model.share_memory()
target_net.model.share_memory()
# Actor takes a net and uses it to produce actions from given states # Actor takes a net and uses it to produce actions from given states
actor = ArgMaxSelector(net, action_size, device = device) actor = ArgMaxSelector(net, action_size, device = device)
@ -111,14 +117,14 @@ memory = M.PrioritizedReplayMemory(capacity = config['memory_size'], alpha = con
# memory = M.ReplayMemory(capacity = config['memory_size']) # memory = M.ReplayMemory(capacity = config['memory_size'])
# Runner performs a certain number of steps in the environment # Runner performs a certain number of steps in the environment
runner = rltorch.mp.EnvironmentRun(env, actor, config, memory = memory, logger = logger, name = "Training") runner = rltorch.mp.EnvironmentRun(env, actor, config, logger = logger, name = "Training")
runner.start() runner.start()
# Agent is what performs the training # Agent is what performs the training
agent = rltorch.agents.DQNAgent(net, memory, config, target_net = target_net, logger = logger) agent = rltorch.agents.DQNAgent(net, memory, config, target_net = target_net, logger = logger)
print("Training...") print("Training...")
train(runner, agent, config, logwriter = logwriter) train(runner, agent, config, logwriter = logwriter, memory = memory)
# For profiling... # For profiling...
# import cProfile # import cProfile

View file

@ -9,6 +9,7 @@ import rltorch.memory as M
import rltorch.env as E import rltorch.env as E
from rltorch.action_selector import ArgMaxSelector from rltorch.action_selector import ArgMaxSelector
from tensorboardX import SummaryWriter from tensorboardX import SummaryWriter
import torch.multiprocessing as mp
class Value(nn.Module): class Value(nn.Module):
def __init__(self, state_size, action_size): def __init__(self, state_size, action_size):
@ -87,13 +88,16 @@ config['prioritized_replay_sampling_priority'] = 0.6
# Should ideally start from 0 and move your way to 1 to prevent overfitting # Should ideally start from 0 and move your way to 1 to prevent overfitting
config['prioritized_replay_weight_importance'] = rltorch.scheduler.ExponentialScheduler(initial_value = 0.4, end_value = 1, iterations = 5000) config['prioritized_replay_weight_importance'] = rltorch.scheduler.ExponentialScheduler(initial_value = 0.4, end_value = 1, iterations = 5000)
def train(runner, agent, config, logwriter = None): def train(runner, agent, config, logwriter = None, memory = None):
finished = False finished = False
episode_num = 1 episode_num = 1
memory_queue = mp.Queue(maxsize = config['replay_skip'] + 1)
while not finished: while not finished:
runner.run(config['replay_skip'] + 1, printstat = runner.episode_num % config['print_stat_n_eps'] == 0) runner.run(config['replay_skip'] + 1, printstat = runner.episode_num % config['print_stat_n_eps'] == 0, memory = memory_queue)
agent.learn() agent.learn()
runner.join() runner.join()
for i in range(config['replay_skip'] + 1):
memory.append(*memory_queue.get())
# When the episode number changes, write out the weight histograms # When the episode number changes, write out the weight histograms
if logwriter is not None and episode_num < runner.episode_num: if logwriter is not None and episode_num < runner.episode_num:
episode_num = runner.episode_num episode_num = runner.episode_num
@ -104,6 +108,7 @@ def train(runner, agent, config, logwriter = None):
finished = runner.episode_num > config['total_training_episodes'] finished = runner.episode_num > config['total_training_episodes']
torch.multiprocessing.set_sharing_strategy('file_system') # To not hit file descriptor memory limit
rltorch.set_seed(config['seed']) rltorch.set_seed(config['seed'])
print("Setting up environment...", end = " ") print("Setting up environment...", end = " ")
env = E.FrameStack(E.TorchWrap( env = E.FrameStack(E.TorchWrap(
@ -125,6 +130,8 @@ device = torch.device("cuda:0" if torch.cuda.is_available() and not config['disa
net = rn.Network(Value(state_size, action_size), net = rn.Network(Value(state_size, action_size),
torch.optim.Adam, config, device = device, logger = logger, name = "DQN") torch.optim.Adam, config, device = device, logger = logger, name = "DQN")
target_net = rn.TargetNetwork(net, device = device) target_net = rn.TargetNetwork(net, device = device)
net.model.share_memory()
target_net.model.share_memory()
# Actor takes a network and uses it to produce actions from given states # Actor takes a network and uses it to produce actions from given states
actor = ArgMaxSelector(net, action_size, device = device) actor = ArgMaxSelector(net, action_size, device = device)
@ -132,14 +139,14 @@ actor = ArgMaxSelector(net, action_size, device = device)
memory = M.PrioritizedReplayMemory(capacity = config['memory_size'], alpha = config['prioritized_replay_sampling_priority']) memory = M.PrioritizedReplayMemory(capacity = config['memory_size'], alpha = config['prioritized_replay_sampling_priority'])
# Runner performs a certain number of steps in the environment # Runner performs a certain number of steps in the environment
runner = rltorch.mp.EnvironmentRun(env, actor, config, memory = memory, logger = logger, name = "Training") runner = rltorch.mp.EnvironmentRun(env, actor, config, logger = logger, name = "Training")
runner.start() runner.start()
# Agent is what performs the training # Agent is what performs the training
agent = rltorch.agents.DQNAgent(net, memory, config, target_net = target_net, logger = logger) agent = rltorch.agents.DQNAgent(net, memory, config, target_net = target_net, logger = logger)
print("Training...") print("Training...")
train(runner, agent, config, logwriter = logwriter) train(runner, agent, config, logwriter = logwriter, memory = memory)
# For profiling... # For profiling...
# import cProfile # import cProfile

View file

@ -2,17 +2,16 @@ from copy import deepcopy
import torch.multiprocessing as mp import torch.multiprocessing as mp
class EnvironmentEpisode(mp.Process): class EnvironmentEpisode(mp.Process):
def __init__(self, env, actor, config, memory = None, logger = None, name = ""): def __init__(self, env, actor, config, logger = None, name = ""):
super(EnvironmentEpisode, self).__init__() super(EnvironmentEpisode, self).__init__()
self.env = env self.env = env
self.actor = actor self.actor = actor
self.memory = memory
self.config = deepcopy(config) self.config = deepcopy(config)
self.logger = logger self.logger = logger
self.name = name self.name = name
self.episode_num = 1 self.episode_num = 1
def run(self, printstat = False): def run(self, printstat = False, memory = None):
state = self.env.reset() state = self.env.reset()
done = False done = False
episode_reward = 0 episode_reward = 0
@ -21,8 +20,8 @@ class EnvironmentEpisode(mp.Process):
next_state, reward, done, _ = self.env.step(action) next_state, reward, done, _ = self.env.step(action)
episode_reward = episode_reward + reward episode_reward = episode_reward + reward
if self.memory is not None: if memory is not None:
self.memory.append(state, action, reward, next_state, done) memory.put((state, action, reward, next_state, done))
state = next_state state = next_state
if printstat: if printstat:

View file

@ -2,11 +2,10 @@ from copy import deepcopy
import torch.multiprocessing as mp import torch.multiprocessing as mp
class EnvironmentRun(mp.Process): class EnvironmentRun(mp.Process):
def __init__(self, env, actor, config, memory = None, logger = None, name = ""): def __init__(self, env, actor, config, logger = None, name = ""):
super(EnvironmentRun, self).__init__() super(EnvironmentRun, self).__init__()
self.env = env self.env = env
self.actor = actor self.actor = actor
self.memory = memory
self.config = deepcopy(config) self.config = deepcopy(config)
self.logger = logger self.logger = logger
self.name = name self.name = name
@ -14,15 +13,15 @@ class EnvironmentRun(mp.Process):
self.episode_reward = 0 self.episode_reward = 0
self.last_state = env.reset() self.last_state = env.reset()
def run(self, iterations = 1, printstat = False): def run(self, iterations = 1, printstat = False, memory = None):
state = self.last_state state = self.last_state
for _ in range(iterations): for _ in range(iterations):
action = self.actor.act(state) action = self.actor.act(state)
next_state, reward, done, _ = self.env.step(action) next_state, reward, done, _ = self.env.step(action)
self.episode_reward = self.episode_reward + reward self.episode_reward = self.episode_reward + reward
if self.memory is not None: if memory is not None:
self.memory.append(state, action, reward, next_state, done) memory.put((state, action, reward, next_state, done))
state = next_state state = next_state
if done: if done: