Introduced the use_batch_norm variable, removed old code
This commit is contained in:
parent
fa9188ad75
commit
7a6f5821bd
1 changed files with 16 additions and 25 deletions
41
layers.py
41
layers.py
|
|
@ -7,13 +7,6 @@ import torch.nn.functional as F
|
||||||
from .utils.logger import DummyLogger
|
from .utils.logger import DummyLogger
|
||||||
|
|
||||||
|
|
||||||
class LayerInfo():
|
|
||||||
def __init__(self):
|
|
||||||
self.memory = 0.0
|
|
||||||
self.ops = 0.0
|
|
||||||
self.output = 0.0
|
|
||||||
|
|
||||||
|
|
||||||
class Layer(nn.Module):
|
class Layer(nn.Module):
|
||||||
# Default layer arguments
|
# Default layer arguments
|
||||||
ACTIVATION = F.leaky_relu
|
ACTIVATION = F.leaky_relu
|
||||||
|
|
@ -27,14 +20,12 @@ class Layer(nn.Module):
|
||||||
VERBOSE = 0
|
VERBOSE = 0
|
||||||
LOGGER = DummyLogger()
|
LOGGER = DummyLogger()
|
||||||
|
|
||||||
def __init__(self, activation, batch_norm):
|
def __init__(self, activation):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.name = 'Layer'
|
self.name = 'Layer'
|
||||||
self.info = LayerInfo()
|
|
||||||
|
|
||||||
# Preload default
|
# Preload default
|
||||||
self.activation = Layer.ACTIVATION if activation == 0 else activation
|
self.activation = Layer.ACTIVATION if activation == 0 else activation
|
||||||
self.batch_norm = Layer.BATCH_NORM if batch_norm is None else batch_norm
|
|
||||||
|
|
||||||
def forward(self, input_data: torch.Tensor) -> torch.Tensor:
|
def forward(self, input_data: torch.Tensor) -> torch.Tensor:
|
||||||
output = input_data
|
output = input_data
|
||||||
|
|
@ -46,14 +37,14 @@ class Layer(nn.Module):
|
||||||
|
|
||||||
|
|
||||||
class Linear(Layer):
|
class Linear(Layer):
|
||||||
def __init__(self, in_channels: int, out_channels: int, activation=0, batch_norm=None, **kwargs):
|
def __init__(self, in_channels: int, out_channels: int, activation=0, use_batch_norm: bool = False, **kwargs):
|
||||||
super().__init__(activation, batch_norm)
|
super().__init__(activation)
|
||||||
|
|
||||||
self.fc = nn.Linear(in_channels, out_channels, **kwargs)
|
self.fc = nn.Linear(in_channels, out_channels, **kwargs)
|
||||||
self.batch_norm = nn.BatchNorm1d(
|
self.batch_norm = nn.BatchNorm1d(
|
||||||
out_channels,
|
out_channels,
|
||||||
momentum=Layer.BATCH_NORM_MOMENTUM,
|
momentum=Layer.BATCH_NORM_MOMENTUM,
|
||||||
track_running_stats=Layer.BATCH_NORM_TRAINING) if self.batch_norm else None
|
track_running_stats=Layer.BATCH_NORM_TRAINING) if use_batch_norm else None
|
||||||
|
|
||||||
def forward(self, input_data: torch.Tensor) -> torch.Tensor:
|
def forward(self, input_data: torch.Tensor) -> torch.Tensor:
|
||||||
return super().forward(self.fc(input_data))
|
return super().forward(self.fc(input_data))
|
||||||
|
|
@ -61,15 +52,15 @@ class Linear(Layer):
|
||||||
|
|
||||||
class Conv1d(Layer):
|
class Conv1d(Layer):
|
||||||
def __init__(self, in_channels: int, out_channels: int, kernel_size: int = 3,
|
def __init__(self, in_channels: int, out_channels: int, kernel_size: int = 3,
|
||||||
stride: Union[int, Tuple[int, int]] = 1, activation=0, batch_norm=None, **kwargs):
|
stride: Union[int, Tuple[int, int]] = 1, activation=0, use_batch_norm: bool = False, **kwargs):
|
||||||
super().__init__(activation, batch_norm)
|
super().__init__(activation)
|
||||||
|
|
||||||
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride=stride,
|
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride=stride,
|
||||||
bias=not self.batch_norm, **kwargs)
|
bias=not self.batch_norm, **kwargs)
|
||||||
self.batch_norm = nn.BatchNorm1d(
|
self.batch_norm = nn.BatchNorm1d(
|
||||||
out_channels,
|
out_channels,
|
||||||
momentum=Layer.BATCH_NORM_MOMENTUM,
|
momentum=Layer.BATCH_NORM_MOMENTUM,
|
||||||
track_running_stats=Layer.BATCH_NORM_TRAINING) if self.batch_norm else None
|
track_running_stats=Layer.BATCH_NORM_TRAINING) if use_batch_norm else None
|
||||||
|
|
||||||
def forward(self, input_data: torch.Tensor) -> torch.Tensor:
|
def forward(self, input_data: torch.Tensor) -> torch.Tensor:
|
||||||
return super().forward(self.conv(input_data))
|
return super().forward(self.conv(input_data))
|
||||||
|
|
@ -77,15 +68,15 @@ class Conv1d(Layer):
|
||||||
|
|
||||||
class Conv2d(Layer):
|
class Conv2d(Layer):
|
||||||
def __init__(self, in_channels: int, out_channels: int, kernel_size: int = 3,
|
def __init__(self, in_channels: int, out_channels: int, kernel_size: int = 3,
|
||||||
stride: Union[int, Tuple[int, int]] = 1, activation=0, batch_norm=None, **kwargs):
|
stride: Union[int, Tuple[int, int]] = 1, activation=0, use_batch_norm: bool = False, **kwargs):
|
||||||
super().__init__(activation, batch_norm)
|
super().__init__(activation)
|
||||||
|
|
||||||
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride,
|
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride,
|
||||||
bias=not self.batch_norm, **kwargs)
|
bias=not self.batch_norm, **kwargs)
|
||||||
self.batch_norm = nn.BatchNorm2d(
|
self.batch_norm = nn.BatchNorm2d(
|
||||||
out_channels,
|
out_channels,
|
||||||
momentum=Layer.BATCH_NORM_MOMENTUM,
|
momentum=Layer.BATCH_NORM_MOMENTUM,
|
||||||
track_running_stats=not Layer.BATCH_NORM_TRAINING) if self.batch_norm else None
|
track_running_stats=not Layer.BATCH_NORM_TRAINING) if use_batch_norm else None
|
||||||
|
|
||||||
def forward(self, input_data: torch.Tensor) -> torch.Tensor:
|
def forward(self, input_data: torch.Tensor) -> torch.Tensor:
|
||||||
return super().forward(self.conv(input_data))
|
return super().forward(self.conv(input_data))
|
||||||
|
|
@ -93,15 +84,15 @@ class Conv2d(Layer):
|
||||||
|
|
||||||
class Conv3d(Layer):
|
class Conv3d(Layer):
|
||||||
def __init__(self, in_channels: int, out_channels: int, kernel_size: int = 3,
|
def __init__(self, in_channels: int, out_channels: int, kernel_size: int = 3,
|
||||||
stride: Union[int, Tuple[int, int]] = 1, activation=0, batch_norm=None, **kwargs):
|
stride: Union[int, Tuple[int, int]] = 1, activation=0, use_batch_norm: bool = False, **kwargs):
|
||||||
super().__init__(activation, batch_norm)
|
super().__init__(activation)
|
||||||
|
|
||||||
self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride,
|
self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride,
|
||||||
bias=not self.batch_norm, **kwargs)
|
bias=not self.batch_norm, **kwargs)
|
||||||
self.batch_norm = nn.BatchNorm3d(
|
self.batch_norm = nn.BatchNorm3d(
|
||||||
out_channels,
|
out_channels,
|
||||||
momentum=Layer.BATCH_NORM_MOMENTUM,
|
momentum=Layer.BATCH_NORM_MOMENTUM,
|
||||||
track_running_stats=Layer.BATCH_NORM_TRAINING) if self.batch_norm else None
|
track_running_stats=Layer.BATCH_NORM_TRAINING) if use_batch_norm else None
|
||||||
|
|
||||||
def forward(self, input_data: torch.Tensor) -> torch.Tensor:
|
def forward(self, input_data: torch.Tensor) -> torch.Tensor:
|
||||||
return super().forward(self.conv(input_data))
|
return super().forward(self.conv(input_data))
|
||||||
|
|
@ -109,8 +100,8 @@ class Conv3d(Layer):
|
||||||
|
|
||||||
class Deconv2d(Layer):
|
class Deconv2d(Layer):
|
||||||
def __init__(self, in_channels: int, out_channels: int, kernel_size: int = 3,
|
def __init__(self, in_channels: int, out_channels: int, kernel_size: int = 3,
|
||||||
stride: Union[int, Tuple[int, int]] = 1, activation=0, batch_norm=None, **kwargs):
|
stride: Union[int, Tuple[int, int]] = 1, activation=0, use_batch_norm: bool = False, **kwargs):
|
||||||
super().__init__(activation, batch_norm)
|
super().__init__(activation)
|
||||||
|
|
||||||
self.deconv = nn.ConvTranspose2d(
|
self.deconv = nn.ConvTranspose2d(
|
||||||
in_channels, out_channels, kernel_size, stride=stride,
|
in_channels, out_channels, kernel_size, stride=stride,
|
||||||
|
|
@ -118,7 +109,7 @@ class Deconv2d(Layer):
|
||||||
self.batch_norm = nn.BatchNorm2d(
|
self.batch_norm = nn.BatchNorm2d(
|
||||||
out_channels,
|
out_channels,
|
||||||
momentum=Layer.BATCH_NORM_MOMENTUM,
|
momentum=Layer.BATCH_NORM_MOMENTUM,
|
||||||
track_running_stats=not Layer.BATCH_NORM_TRAINING) if self.batch_norm else None
|
track_running_stats=not Layer.BATCH_NORM_TRAINING) if use_batch_norm else None
|
||||||
|
|
||||||
def forward(self, input_data: torch.Tensor) -> torch.Tensor:
|
def forward(self, input_data: torch.Tensor) -> torch.Tensor:
|
||||||
return super().forward(self.deconv(input_data))
|
return super().forward(self.deconv(input_data))
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue