From 12afc7cc938b70b3d8d1ec981c06745912b62173 Mon Sep 17 00:00:00 2001 From: Corentin Risselin Date: Mon, 13 Apr 2020 17:30:35 +0900 Subject: [PATCH] Several improvement --- layers.py | 2 +- train.py | 15 +++++++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/layers.py b/layers.py index 728f026..fa473f0 100644 --- a/layers.py +++ b/layers.py @@ -48,7 +48,7 @@ class Layer(nn.Module): class Conv2d(Layer): - def __init__(self, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1, + def __init__(self, in_channels: int, out_channels: int, kernel_size: int = 3, stride: int = 1, activation=0, batch_norm=None, **kwargs): super(Conv2d, self).__init__(activation, batch_norm) diff --git a/train.py b/train.py index 157704d..6d6b256 100644 --- a/train.py +++ b/train.py @@ -1,3 +1,6 @@ +import os +import resource +import subprocess from typing import List, Tuple import torch @@ -18,3 +21,15 @@ def parameter_summary(network: torch.nn.Module) -> List[Tuple[str, Tuple[int], s numpy = param.detach().cpu().numpy() parameter_info.append((name, numpy.shape, human_size(numpy.size * numpy.dtype.itemsize))) return parameter_info + + +def resource_usage() -> Tuple[int, str]: + memory_peak = int(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) + gpu_memory = subprocess.check_output( + 'nvidia-smi --query-gpu=memory.used --format=csv,noheader', shell=True).decode() + if 'CUDA_VISIBLE_DEVICES' in os.environ: + gpu_memory = gpu_memory.split('\n')[int(os.environ['CUDA_VISIBLE_DEVICES'])] + else: + gpu_memory = ' '.join(gpu_memory.split('\n')) + + return memory_peak, gpu_memory