53 lines
1.7 KiB
Python
53 lines
1.7 KiB
Python
import os
|
|
import resource
|
|
import subprocess
|
|
from typing import List, Tuple
|
|
|
|
import torch
|
|
|
|
from .utils.memory import human_size
|
|
|
|
|
|
def parameter_summary(network: torch.nn.Module) -> List[Tuple[str, Tuple[int], str]]:
|
|
""" Returns network parameter
|
|
|
|
Returns a list of tuple: name, shape (tuple os ints), size (string)
|
|
|
|
Args:
|
|
network (torch.nn.Module): network to parse
|
|
"""
|
|
parameter_info = []
|
|
for name, param in network.named_parameters():
|
|
numpy = param.detach().cpu().numpy()
|
|
parameter_info.append((name, numpy.shape, human_size(numpy.size * numpy.dtype.itemsize)))
|
|
for name, param in network.named_buffers():
|
|
numpy = param.detach().cpu().numpy()
|
|
parameter_info.append((name, numpy.shape, human_size(numpy.size * numpy.dtype.itemsize)))
|
|
return parameter_info
|
|
|
|
|
|
def resource_usage() -> Tuple[int, str]:
|
|
memory_peak = int(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)
|
|
return memory_peak, gpu_used_memory()
|
|
|
|
|
|
def gpu_used_memory() -> str:
|
|
gpu_memory = subprocess.check_output(
|
|
'nvidia-smi --query-gpu=memory.used --format=csv,noheader', shell=True).decode().strip()
|
|
if 'CUDA_VISIBLE_DEVICES' in os.environ:
|
|
gpu_memory = gpu_memory.split('\n')[int(os.environ['CUDA_VISIBLE_DEVICES'])]
|
|
else:
|
|
gpu_memory = ','.join(gpu_memory.split('\n'))
|
|
|
|
return gpu_memory
|
|
|
|
|
|
def gpu_total_memory() -> str:
|
|
gpu_memory = subprocess.check_output(
|
|
'nvidia-smi --query-gpu=memory.total --format=csv,noheader', shell=True).decode().strip()
|
|
if 'CUDA_VISIBLE_DEVICES' in os.environ:
|
|
gpu_memory = gpu_memory.split('\n')[int(os.environ['CUDA_VISIBLE_DEVICES'])]
|
|
else:
|
|
gpu_memory = ','.join(gpu_memory.split('\n'))
|
|
|
|
return gpu_memory
|