From 0769a61fcff72567eeeb7314d287cb0196f50380 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Matt=C3=A9o=20Delabre?= Date: Wed, 18 Dec 2019 09:41:50 -0500 Subject: [PATCH] Use best parameters as found by experimentation --- train.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/train.py b/train.py index e4d0948..24d0155 100644 --- a/train.py +++ b/train.py @@ -4,16 +4,19 @@ import torch from matplotlib import pyplot from mpl_toolkits.axisartist.axislines import SubplotZero -torch.manual_seed(42) +torch.manual_seed(57) # Number of symbols to learn order = 16 +# Initial value for the learning rate +initial_learning_rate = 0.1 + # Number of batches to skip between every loss report loss_report_batch_skip = 50 # Size of batches -batch_size = 32 +batch_size = 2048 # File in which the trained model is saved output_file = 'output/constellation-order-{}.pth'.format(order) @@ -30,8 +33,8 @@ pyplot.show(block=False) # Train the model with random data model = constellation.ConstellationNet( order=order, - encoder_layers_sizes=(8, 4), - decoder_layers_sizes=(4, 8), + encoder_layers_sizes=(8, 4,), + decoder_layers_sizes=(4, 8,), channel_model=constellation.GaussianChannel() ) @@ -52,7 +55,7 @@ total_change = float('inf') # Optimizer settings criterion = torch.nn.CrossEntropyLoss() -optimizer = torch.optim.Adam(model.parameters(), lr=0.1) +optimizer = torch.optim.Adam(model.parameters(), lr=initial_learning_rate) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( optimizer, verbose=True, factor=0.25,