[go: nahoru, domu]

Skip to content

Commit

Permalink
Use self.env instead of defining self._env
Browse files Browse the repository at this point in the history
  • Loading branch information
seungjaeryanlee committed Aug 3, 2019
1 parent 89b2dc1 commit c051e5f
Showing 1 changed file with 2 additions and 3 deletions.
5 changes: 2 additions & 3 deletions tf_agents/environments/atari_preprocessing.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,6 @@ def __init__(self,
ValueError: if frame_skip or screen_size are not strictly positive.
"""
super(AtariPreprocessing, self).__init__(env)
self._env = env

# Return the observation space adjusted to match the shape of the processed
# observations.
Expand Down Expand Up @@ -111,7 +110,7 @@ def reset(self):
observation: numpy array, the initial observation emitted by the
environment.
"""
self._env.reset()
self.env.reset()
self.lives = self.env.ale.lives()
self.game_over = False
self._fetch_grayscale_observation(self.screen_buffer[0])
Expand Down Expand Up @@ -144,7 +143,7 @@ def step(self, action):
for time_step in range(self.frame_skip):
# We bypass the Gym observation altogether and directly fetch the
# grayscale image from the ALE. This is a little faster.
_, reward, game_over, info = self._env.step(action)
_, reward, game_over, info = self.env.step(action)
accumulated_reward += reward

if self.terminal_on_life_loss:
Expand Down

0 comments on commit c051e5f

Please sign in to comment.