129 lines
4.8 KiB
Python
129 lines
4.8 KiB
Python
|
import os
|
||
|
import torch
|
||
|
import torch.nn.functional as F
|
||
|
import numpy as np
|
||
|
from buffer import ReplayBuffer
|
||
|
from networks import ActorNetwork, CriticNetwork, ValueNetwork
|
||
|
|
||
|
class SoftActorCritic():
|
||
|
def __init__(self, alpha=3e-4, beta=3e-4, input_dims=[8],
|
||
|
env=None, gamma=0.99, tau=5e-3, n_actions=2, max_size=1000000,
|
||
|
batch_size=256, reward_scale=2):
|
||
|
self.gamma = gamma
|
||
|
self.tau = tau
|
||
|
self.memory = ReplayBuffer(max_size, input_dims, n_actions)
|
||
|
self.batch_size = batch_size
|
||
|
self.n_actions = n_actions
|
||
|
|
||
|
self.actor = ActorNetwork(alpha, input_dims, n_actions=n_actions, max_action=env.action_space.high)
|
||
|
|
||
|
self.critic1 = CriticNetwork(beta, input_dims, n_actions=n_actions, name='critic1')
|
||
|
self.critic2 = CriticNetwork(beta, input_dims, n_actions=n_actions, name='critic2')
|
||
|
|
||
|
self.value = ValueNetwork(beta, input_dims, name='value')
|
||
|
self.target_value = ValueNetwork(beta, input_dims, name='target_value')
|
||
|
|
||
|
self.scale = reward_scale
|
||
|
self.update_network_parameters(tau=1)
|
||
|
|
||
|
def choose_action(self, observation):
|
||
|
state = T.Tensor([observation]).to(self.actor.device)
|
||
|
actions, _ = self.actor.sample_normal(state, reparametrize=False)
|
||
|
|
||
|
return actions.cpu().detach().numpy()[0]
|
||
|
|
||
|
def remember(self, state, action, reward, new_state, done):
|
||
|
self.memory.store_transition(state,action,reward,new_state, done)
|
||
|
|
||
|
def update_network_parameters(self, tau=None):
|
||
|
if tau is None:
|
||
|
tau = self.tau
|
||
|
|
||
|
target_value_params = self.target_value.names_parameters()
|
||
|
value_params = self.value.named_parameters()
|
||
|
|
||
|
target_value_state_dict = dict(target_value_params)
|
||
|
value_state_dict = dict(value_params)
|
||
|
|
||
|
for name in value_state_dict:
|
||
|
value_state_dict[name] = tau*value_state_dict[name].clone() + \
|
||
|
(1-tau)*target_value_state_dict[name].clone()
|
||
|
self.target_value.load_state_dict(value_state_dict)
|
||
|
|
||
|
def save_models(self):
|
||
|
print('... saving models ...')
|
||
|
self.actor.save()
|
||
|
self.critic1.save()
|
||
|
self.critic2.save()
|
||
|
self.value.save()
|
||
|
self.target_value.save()
|
||
|
|
||
|
def load_models(self):
|
||
|
print('... loading models ...')
|
||
|
self.actor.load()
|
||
|
self.critic1.load()
|
||
|
self.critic2.load()
|
||
|
self.value.load()
|
||
|
self.target_value.load()
|
||
|
|
||
|
def learn(self):
|
||
|
if self.memory.mem_cntr < self.batch_size:
|
||
|
return
|
||
|
|
||
|
state, action, reward, new_state, done =\
|
||
|
self.memory.sample_buffer(self.batch_size)
|
||
|
|
||
|
reward = T.tensor(reward, dtype=T.float).to(self.actor.device)
|
||
|
done = T.tensor(done).to(self.actor.device)
|
||
|
new_state = T.tensor(new_state, dtype=T.float).to(self.actor.device)
|
||
|
state = T.tensor(state, dtype=T.float).to(self.actor.device)
|
||
|
action = T.tensor(action, dtpye=T.float).to(self.actor.device)
|
||
|
|
||
|
value = self.value(state).view(-1)
|
||
|
target_value = self.target_value(new_state).view(-1)
|
||
|
target_value[done] = 0.0
|
||
|
|
||
|
actions, log_probs = self.actor.sample_normal(state, reparameterize=False)
|
||
|
log_probs = log_probs.view(-1)
|
||
|
q1_new_policy = self.critic1.forward(state, actions)
|
||
|
q2_new_policy = self.critic2.forward(state, actions)
|
||
|
critic_value = T.min(q1_new_policy, q2_new_policy)
|
||
|
critic_value = critic_Value.view(-1)
|
||
|
|
||
|
self.value_optimizer.zero_grad()
|
||
|
value_target = critic_value - log_probs
|
||
|
value_loss = 0.5*F.mse_loss(value, value_target)
|
||
|
value_loss.backward(retain_graph=True)
|
||
|
self.value.optimizer.step()
|
||
|
|
||
|
actions, log_probs = self.actor.sample_normal(state, reparametrize=True)
|
||
|
log_probs = log_probs.view(-1)
|
||
|
q1_new_policy = self.critic1.forward(state, actions)
|
||
|
q2_new_policy = self.critic2.forward(state, actions)
|
||
|
critic_value = T.min(q1_new_policy, q2_new_policy)
|
||
|
critic_value = critic_Value.view(-1)
|
||
|
|
||
|
actor_loss = log_probs - critic_value
|
||
|
actor_loss = T.mean(actor_loss)
|
||
|
self.actor.optimizer.zero_grad()
|
||
|
actor_loss.backward(retain_graph=True)
|
||
|
self.actor.optimizer.step()
|
||
|
|
||
|
self.critic1.optimizer.zero_grad()
|
||
|
self.critic2.optimizer.zero_grad()
|
||
|
q_hat = self.scale * reward + self.gamma*new_value
|
||
|
|
||
|
q1_old_policy = self.critic1.forward(state, action).view(-1)
|
||
|
q2_old_policy = self.critic2.forward(state, action).view(-1)
|
||
|
critic1_loss = 0.5*F.mse_loss(q1_old_policy, q_hat)
|
||
|
critic2_loss = 0.5*F.mse_loss(q2_old_policy, q_hat)
|
||
|
|
||
|
critic_loss = critic1_loss + critic2_loss
|
||
|
critic_loss.backward()
|
||
|
self.critic1.optimizer.step()
|
||
|
self.critic2.optimizer.step()
|
||
|
|
||
|
self.update_network_parameters()
|
||
|
|
||
|
|