Several improvement
This commit is contained in:
parent
268429fa1a
commit
12afc7cc93
2 changed files with 16 additions and 1 deletions
|
|
@ -48,7 +48,7 @@ class Layer(nn.Module):
|
|||
|
||||
|
||||
class Conv2d(Layer):
|
||||
def __init__(self, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1,
|
||||
def __init__(self, in_channels: int, out_channels: int, kernel_size: int = 3, stride: int = 1,
|
||||
activation=0, batch_norm=None, **kwargs):
|
||||
super(Conv2d, self).__init__(activation, batch_norm)
|
||||
|
||||
|
|
|
|||
15
train.py
15
train.py
|
|
@ -1,3 +1,6 @@
|
|||
import os
|
||||
import resource
|
||||
import subprocess
|
||||
from typing import List, Tuple
|
||||
|
||||
import torch
|
||||
|
|
@ -18,3 +21,15 @@ def parameter_summary(network: torch.nn.Module) -> List[Tuple[str, Tuple[int], s
|
|||
numpy = param.detach().cpu().numpy()
|
||||
parameter_info.append((name, numpy.shape, human_size(numpy.size * numpy.dtype.itemsize)))
|
||||
return parameter_info
|
||||
|
||||
|
||||
def resource_usage() -> Tuple[int, str]:
|
||||
memory_peak = int(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)
|
||||
gpu_memory = subprocess.check_output(
|
||||
'nvidia-smi --query-gpu=memory.used --format=csv,noheader', shell=True).decode()
|
||||
if 'CUDA_VISIBLE_DEVICES' in os.environ:
|
||||
gpu_memory = gpu_memory.split('\n')[int(os.environ['CUDA_VISIBLE_DEVICES'])]
|
||||
else:
|
||||
gpu_memory = ' '.join(gpu_memory.split('\n'))
|
||||
|
||||
return memory_peak, gpu_memory
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue