Updated single-agent env

This commit is contained in:
Vasilis Valatsos 2023-11-24 15:40:34 +01:00
parent d3e2a23535
commit fd320c3cf3

View file

@ -21,7 +21,7 @@ figure_file = 'plots/score_sp.png'
game = Game() game = Game()
agent_list = [0] agent_list = [game.level.player_sprites[0].agent]
score_history = np.zeros(shape=(game.max_num_players, n_episodes)) score_history = np.zeros(shape=(game.max_num_players, n_episodes))
best_score = np.zeros(game.max_num_players) best_score = np.zeros(game.max_num_players)
@ -32,12 +32,12 @@ for i in tqdm(range(n_episodes)):
if i != 0: if i != 0:
game.level.__init__(reset=True) game.level.__init__(reset=True)
# TODO: Make game.level.reset_map() so we don't pull out and load the agent every time (There is -definitevly- a better way) # TODO: Make game.level.reset_map() so we don't pull out and load the agent every time (There is -definitevly- a better way)
for player in game.level.player_sprites:
for player in game.level.player_sprites:
player.stats.exp = score_history[player.player_id][i-1] player.stats.exp = score_history[player.player_id][i-1]
player.agent = agent_list[0] player.agent = agent_list[0]
agent_list = [0] agent_list = [game.level.player_sprites[0].agent]
for j in range(game_len): for j in range(game_len):
if not game.level.done: if not game.level.done: