Trainer implementation
* Add Deconv2d * Fix BatchGenerator save option when using current directory
This commit is contained in:
parent
144ff4a004
commit
f2282e3216
3 changed files with 263 additions and 7 deletions
29
layers.py
29
layers.py
|
|
@ -45,6 +45,20 @@ class Layer(nn.Module):
|
|||
return output
|
||||
|
||||
|
||||
class Linear(Layer):
|
||||
def __init__(self, in_channels: int, out_channels: int, activation=0, batch_norm=None, **kwargs):
|
||||
super().__init__(activation, batch_norm)
|
||||
|
||||
self.fc = nn.Linear(in_channels, out_channels, **kwargs)
|
||||
self.batch_norm = nn.BatchNorm1d(
|
||||
out_channels,
|
||||
momentum=Layer.BATCH_NORM_MOMENTUM,
|
||||
track_running_stats=Layer.BATCH_NORM_TRAINING) if self.batch_norm else None
|
||||
|
||||
def forward(self, input_data: torch.Tensor) -> torch.Tensor:
|
||||
return super().forward(self.fc(input_data))
|
||||
|
||||
|
||||
class Conv1d(Layer):
|
||||
def __init__(self, in_channels: int, out_channels: int, kernel_size: int = 3,
|
||||
stride: Union[int, Tuple[int, int]] = 1, activation=0, batch_norm=None, **kwargs):
|
||||
|
|
@ -93,15 +107,18 @@ class Conv3d(Layer):
|
|||
return super().forward(self.conv(input_data))
|
||||
|
||||
|
||||
class Linear(Layer):
|
||||
def __init__(self, in_channels: int, out_channels: int, activation=0, batch_norm=None, **kwargs):
|
||||
class Deconv2d(Layer):
|
||||
def __init__(self, in_channels: int, out_channels: int, kernel_size: int = 3,
|
||||
stride: Union[int, Tuple[int, int]] = 1, activation=0, batch_norm=None, **kwargs):
|
||||
super().__init__(activation, batch_norm)
|
||||
|
||||
self.fc = nn.Linear(in_channels, out_channels, **kwargs)
|
||||
self.batch_norm = nn.BatchNorm1d(
|
||||
self.deconv = nn.ConvTranspose2d(
|
||||
in_channels, out_channels, kernel_size, stride=stride,
|
||||
bias=not self.batch_norm, **kwargs)
|
||||
self.batch_norm = nn.BatchNorm2d(
|
||||
out_channels,
|
||||
momentum=Layer.BATCH_NORM_MOMENTUM,
|
||||
track_running_stats=Layer.BATCH_NORM_TRAINING) if self.batch_norm else None
|
||||
track_running_stats=not Layer.BATCH_NORM_TRAINING) if self.batch_norm else None
|
||||
|
||||
def forward(self, input_data: torch.Tensor) -> torch.Tensor:
|
||||
return super().forward(self.fc(input_data))
|
||||
return super().forward(self.deconv(input_data))
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue