Refactor system architecture (#82)
This commit is contained in:
@ -1,13 +1,8 @@
|
||||
import enum
|
||||
import random
|
||||
|
||||
import psutil
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from cacheflow.parallel_utils.parallel_state import model_parallel_is_initialized
|
||||
from cacheflow.parallel_utils.tensor_parallel import model_parallel_cuda_manual_seed
|
||||
|
||||
|
||||
class Device(enum.Enum):
|
||||
GPU = enum.auto()
|
||||
@ -28,17 +23,6 @@ class Counter:
|
||||
self.counter = 0
|
||||
|
||||
|
||||
def set_random_seed(seed: int):
|
||||
random.seed(seed)
|
||||
np.random.seed(seed)
|
||||
torch.manual_seed(seed)
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.manual_seed_all(seed)
|
||||
|
||||
if model_parallel_is_initialized():
|
||||
model_parallel_cuda_manual_seed(seed)
|
||||
|
||||
|
||||
def get_gpu_memory(gpu: int = 0) -> int:
|
||||
return torch.cuda.get_device_properties(gpu).total_memory
|
||||
|
||||
|
||||
Reference in New Issue
Block a user