Fix networks

This commit is contained in:
Corentin 2021-08-30 23:21:58 +09:00
commit 1704b7aad1
4 changed files with 343 additions and 145 deletions

View file

@ -2,6 +2,7 @@ from argparse import ArgumentParser
from pathlib import Path
import math
import shutil
import sys
import time
import numpy as np
@ -9,7 +10,7 @@ import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from src.torch_networks import LSTMModel, LSTMCellModel, StackedLSTMModel
from src.torch_networks import TorchLSTMModel, TorchLSTMCellModel, CustomLSTMModel, ChainLSTMLayer
from src.torch_utils.utils.batch_generator import BatchGenerator
from src.torch_utils.train import parameter_summary
@ -50,18 +51,21 @@ class DataGenerator:
def main():
parser = ArgumentParser()
parser.add_argument('--output', type=Path, default=Path('output', 'modulo'), help='Output dir')
parser.add_argument('--model', default='torch-lstm', help='Model to train')
parser.add_argument('--batch', type=int, default=32, help='Batch size')
parser.add_argument('--sequence', type=int, default=12, help='Max sequence length')
parser.add_argument('--hidden', type=int, default=16, help='LSTM cells hidden size')
parser.add_argument('--step', type=int, default=2000, help='Number of steps to train')
parser.add_argument('--model', help='Model to train')
arguments = parser.parse_args()
output_dir: Path = arguments.output
model: str = arguments.model
batch_size: int = arguments.batch
sequence_size: int = arguments.sequence
hidden_size: int = arguments.hidden
max_step: int = arguments.step
model: str = arguments.model
output_dir = output_dir.parent / f'modulo_{model}_b{batch_size}_s{sequence_size}_h{hidden_size}'
if not output_dir.exists():
output_dir.mkdir(parents=True)
if (output_dir / 'train').exists():
@ -71,15 +75,24 @@ def main():
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
torch.backends.cudnn.benchmark = True
if model == 'stack':
network = StackedLSTMModel(1, 16, 2).to(device)
elif model == 'cell':
network = LSTMCellModel(1, 16, 2).to(device)
network = CustomLSTMModel(1, hidden_size, 2).to(device)
elif model == 'stack-torchcell':
network = CustomLSTMModel(1, hidden_size, 2, cell_class=nn.LSTMCell).to(device)
elif model == 'chain':
network = CustomLSTMModel(1, hidden_size, 2, layer_class=ChainLSTMLayer).to(device)
elif model == 'torch-cell':
network = TorchLSTMCellModel(1, hidden_size, 2).to(device)
elif model == 'torch-lstm':
network = TorchLSTMModel(1, hidden_size, 2).to(device)
else:
network = LSTMModel(1, 16, 2).to(device)
print('Error : Unkown model')
sys.exit(1)
torch.save(network.state_dict(), output_dir / 'model_ini.pt')
input_sample = torch.from_numpy(generate_data(2, 4)[0]).to(device)
writer_train.add_graph(network, (input_sample,))
# Save parameters info
with open(output_dir / 'parameters.csv', 'w') as param_file:
with open(output_dir / 'parameters.csv', 'w', encoding='utf-8') as param_file:
param_summary = parameter_summary(network)
names = [len(name) for name, _, _ in param_summary]
shapes = [len(str(shape)) for _, shape, _ in param_summary]
@ -88,7 +101,7 @@ def main():
[f'{name: <{max(names)}} {str(shape): <{max(shapes)}} {size}'
for name, shape, size in param_summary]))
optimizer = torch.optim.Adam(network.parameters(), lr=1e-3)
optimizer = torch.optim.Adam(network.parameters(), lr=1e-3, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.995)
criterion = nn.CrossEntropyLoss()
@ -99,17 +112,10 @@ def main():
(batch_size, max_step)).transpose((1, 0)), (batch_size * max_step))
dummy_label = np.zeros((batch_size * max_step), dtype=np.uint8)
DataGenerator.MAX_LENGTH = sequence_size
if model in ['cell', 'stack']:
state = [(torch.zeros((batch_size, 16)).to(device),
torch.zeros((batch_size, 16)).to(device))] * network.NUM_LAYERS
else:
state = None
with BatchGenerator(sequence_data_reshaped, dummy_label, batch_size=batch_size,
pipeline=DataGenerator.pipeline, num_workers=8, shuffle=False) as batch_generator:
# data_np, _ = generate_data(batch_size, int(np.random.uniform(4, sequence_size + 1)))
data_np = batch_generator.batch_data
label_np = batch_generator.batch_label
# writer_train.add_graph(network, (torch.from_numpy(data_np).to(device),))
running_loss = 0.0
running_accuracy = 0.0
@ -126,7 +132,7 @@ def main():
optimizer.zero_grad(set_to_none=True)
outputs, _states = network(data, state)
outputs, _states = network(data)
loss = criterion(outputs[-1], label)
running_loss += loss.item()
outputs_np = outputs[-1].detach().cpu().numpy()
@ -164,7 +170,7 @@ def main():
data = torch.from_numpy(data_np).to(device)
label = torch.from_numpy(label_np).to(device)
outputs, _states = network(data, state)
outputs, _states = network(data)
outputs_np = outputs[-1].detach().cpu().numpy()
running_accuracy += ((outputs_np[:, 1] > outputs_np[:, 0]).astype(np.int32) == label_np).astype(
np.float32).mean()
@ -189,13 +195,9 @@ def main():
test_label = np.asarray([1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0], dtype=np.int32)
running_accuracy = 0.0
running_count = 0
if model in ['cell', 'stack']:
state = [(torch.zeros((1, 16)).to(device),
torch.zeros((1, 16)).to(device))] * network.NUM_LAYERS
for data, label in zip(test_data, test_label):
outputs, _states = network(
torch.from_numpy(np.expand_dims(np.asarray(data, dtype=np.float32), 1)).to(device),
state)
torch.from_numpy(np.expand_dims(np.asarray(data, dtype=np.float32), 1)).to(device))
outputs_np = outputs[-1].detach().cpu().numpy()
output_correct = int(outputs_np[0, 1] > outputs_np[0, 0]) == label
running_accuracy += 1.0 if output_correct else 0.0