Use Tanh activation to preserve negative values

This commit is contained in:
Mattéo Delabre 2019-12-14 23:03:02 -05:00
parent a01d83f339
commit d0af8fc3da
Signed by: matteo
GPG Key ID: AE3FBD02DC583ABB
1 changed files with 3 additions and 4 deletions

View File

@ -32,13 +32,12 @@ class ConstellationNet(nn.Module):
for layer_size in encoder_layers_sizes:
encoder_layers.append(nn.Linear(prev_layer_size, layer_size))
encoder_layers.append(nn.ReLU())
encoder_layers.append(nn.Tanh())
prev_layer_size = layer_size
encoder_layers += [
nn.Linear(prev_layer_size, 2),
nn.ReLU(),
# TODO: Normalization step
nn.Tanh(),
]
self.encoder = nn.Sequential(*encoder_layers)
@ -55,7 +54,7 @@ class ConstellationNet(nn.Module):
for layer_size in decoder_layers_sizes:
decoder_layers.append(nn.Linear(prev_layer_size, layer_size))
decoder_layers.append(nn.ReLU())
decoder_layers.append(nn.Tanh())
prev_layer_size = layer_size
# Softmax is not used at the end of the network because the