From 7a6f5821bd6615eabb6bc91ba9169828eb00243a Mon Sep 17 00:00:00 2001 From: Hoel Bagard Date: Thu, 21 Jan 2021 16:10:10 +0900 Subject: [PATCH] Introduced the use_batch_norm variable, removed old code --- layers.py | 41 ++++++++++++++++------------------------- 1 file changed, 16 insertions(+), 25 deletions(-) diff --git a/layers.py b/layers.py index 10df5f2..0d7ae78 100644 --- a/layers.py +++ b/layers.py @@ -7,13 +7,6 @@ import torch.nn.functional as F from .utils.logger import DummyLogger -class LayerInfo(): - def __init__(self): - self.memory = 0.0 - self.ops = 0.0 - self.output = 0.0 - - class Layer(nn.Module): # Default layer arguments ACTIVATION = F.leaky_relu @@ -27,14 +20,12 @@ class Layer(nn.Module): VERBOSE = 0 LOGGER = DummyLogger() - def __init__(self, activation, batch_norm): + def __init__(self, activation): super().__init__() self.name = 'Layer' - self.info = LayerInfo() # Preload default self.activation = Layer.ACTIVATION if activation == 0 else activation - self.batch_norm = Layer.BATCH_NORM if batch_norm is None else batch_norm def forward(self, input_data: torch.Tensor) -> torch.Tensor: output = input_data @@ -46,14 +37,14 @@ class Layer(nn.Module): class Linear(Layer): - def __init__(self, in_channels: int, out_channels: int, activation=0, batch_norm=None, **kwargs): - super().__init__(activation, batch_norm) + def __init__(self, in_channels: int, out_channels: int, activation=0, use_batch_norm: bool = False, **kwargs): + super().__init__(activation) self.fc = nn.Linear(in_channels, out_channels, **kwargs) self.batch_norm = nn.BatchNorm1d( out_channels, momentum=Layer.BATCH_NORM_MOMENTUM, - track_running_stats=Layer.BATCH_NORM_TRAINING) if self.batch_norm else None + track_running_stats=Layer.BATCH_NORM_TRAINING) if use_batch_norm else None def forward(self, input_data: torch.Tensor) -> torch.Tensor: return super().forward(self.fc(input_data)) @@ -61,15 +52,15 @@ class Linear(Layer): class Conv1d(Layer): def __init__(self, in_channels: int, out_channels: int, kernel_size: int = 3, - stride: Union[int, Tuple[int, int]] = 1, activation=0, batch_norm=None, **kwargs): - super().__init__(activation, batch_norm) + stride: Union[int, Tuple[int, int]] = 1, activation=0, use_batch_norm: bool = False, **kwargs): + super().__init__(activation) self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride=stride, bias=not self.batch_norm, **kwargs) self.batch_norm = nn.BatchNorm1d( out_channels, momentum=Layer.BATCH_NORM_MOMENTUM, - track_running_stats=Layer.BATCH_NORM_TRAINING) if self.batch_norm else None + track_running_stats=Layer.BATCH_NORM_TRAINING) if use_batch_norm else None def forward(self, input_data: torch.Tensor) -> torch.Tensor: return super().forward(self.conv(input_data)) @@ -77,15 +68,15 @@ class Conv1d(Layer): class Conv2d(Layer): def __init__(self, in_channels: int, out_channels: int, kernel_size: int = 3, - stride: Union[int, Tuple[int, int]] = 1, activation=0, batch_norm=None, **kwargs): - super().__init__(activation, batch_norm) + stride: Union[int, Tuple[int, int]] = 1, activation=0, use_batch_norm: bool = False, **kwargs): + super().__init__(activation) self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, bias=not self.batch_norm, **kwargs) self.batch_norm = nn.BatchNorm2d( out_channels, momentum=Layer.BATCH_NORM_MOMENTUM, - track_running_stats=not Layer.BATCH_NORM_TRAINING) if self.batch_norm else None + track_running_stats=not Layer.BATCH_NORM_TRAINING) if use_batch_norm else None def forward(self, input_data: torch.Tensor) -> torch.Tensor: return super().forward(self.conv(input_data)) @@ -93,15 +84,15 @@ class Conv2d(Layer): class Conv3d(Layer): def __init__(self, in_channels: int, out_channels: int, kernel_size: int = 3, - stride: Union[int, Tuple[int, int]] = 1, activation=0, batch_norm=None, **kwargs): - super().__init__(activation, batch_norm) + stride: Union[int, Tuple[int, int]] = 1, activation=0, use_batch_norm: bool = False, **kwargs): + super().__init__(activation) self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, bias=not self.batch_norm, **kwargs) self.batch_norm = nn.BatchNorm3d( out_channels, momentum=Layer.BATCH_NORM_MOMENTUM, - track_running_stats=Layer.BATCH_NORM_TRAINING) if self.batch_norm else None + track_running_stats=Layer.BATCH_NORM_TRAINING) if use_batch_norm else None def forward(self, input_data: torch.Tensor) -> torch.Tensor: return super().forward(self.conv(input_data)) @@ -109,8 +100,8 @@ class Conv3d(Layer): class Deconv2d(Layer): def __init__(self, in_channels: int, out_channels: int, kernel_size: int = 3, - stride: Union[int, Tuple[int, int]] = 1, activation=0, batch_norm=None, **kwargs): - super().__init__(activation, batch_norm) + stride: Union[int, Tuple[int, int]] = 1, activation=0, use_batch_norm: bool = False, **kwargs): + super().__init__(activation) self.deconv = nn.ConvTranspose2d( in_channels, out_channels, kernel_size, stride=stride, @@ -118,7 +109,7 @@ class Deconv2d(Layer): self.batch_norm = nn.BatchNorm2d( out_channels, momentum=Layer.BATCH_NORM_MOMENTUM, - track_running_stats=not Layer.BATCH_NORM_TRAINING) if self.batch_norm else None + track_running_stats=not Layer.BATCH_NORM_TRAINING) if use_batch_norm else None def forward(self, input_data: torch.Tensor) -> torch.Tensor: return super().forward(self.deconv(input_data))