Use Tanh activation to preserve negative values
This commit is contained in:
parent
a01d83f339
commit
d0af8fc3da
|
@ -32,13 +32,12 @@ class ConstellationNet(nn.Module):
|
||||||
|
|
||||||
for layer_size in encoder_layers_sizes:
|
for layer_size in encoder_layers_sizes:
|
||||||
encoder_layers.append(nn.Linear(prev_layer_size, layer_size))
|
encoder_layers.append(nn.Linear(prev_layer_size, layer_size))
|
||||||
encoder_layers.append(nn.ReLU())
|
encoder_layers.append(nn.Tanh())
|
||||||
prev_layer_size = layer_size
|
prev_layer_size = layer_size
|
||||||
|
|
||||||
encoder_layers += [
|
encoder_layers += [
|
||||||
nn.Linear(prev_layer_size, 2),
|
nn.Linear(prev_layer_size, 2),
|
||||||
nn.ReLU(),
|
nn.Tanh(),
|
||||||
# TODO: Normalization step
|
|
||||||
]
|
]
|
||||||
|
|
||||||
self.encoder = nn.Sequential(*encoder_layers)
|
self.encoder = nn.Sequential(*encoder_layers)
|
||||||
|
@ -55,7 +54,7 @@ class ConstellationNet(nn.Module):
|
||||||
|
|
||||||
for layer_size in decoder_layers_sizes:
|
for layer_size in decoder_layers_sizes:
|
||||||
decoder_layers.append(nn.Linear(prev_layer_size, layer_size))
|
decoder_layers.append(nn.Linear(prev_layer_size, layer_size))
|
||||||
decoder_layers.append(nn.ReLU())
|
decoder_layers.append(nn.Tanh())
|
||||||
prev_layer_size = layer_size
|
prev_layer_size = layer_size
|
||||||
|
|
||||||
# Softmax is not used at the end of the network because the
|
# Softmax is not used at the end of the network because the
|
||||||
|
|
Loading…
Reference in New Issue