Compare commits

..

1 Commits

5 changed files with 13 additions and 32 deletions

View File

@ -1082,20 +1082,8 @@ def cast_to_device(tensor, device, dtype, copy=False):
non_blocking = device_supports_non_blocking(device)
return cast_to(tensor, dtype=dtype, device=device, non_blocking=non_blocking, copy=copy)
PINNED_MEMORY = {}
TOTAL_PINNED_MEMORY = 0
if PerformanceFeature.PinnedMem in args.fast:
if WINDOWS:
MAX_PINNED_MEMORY = get_total_memory(torch.device("cpu")) * 0.45 # Windows limit is apparently 50%
else:
MAX_PINNED_MEMORY = get_total_memory(torch.device("cpu")) * 0.95
else:
MAX_PINNED_MEMORY = -1
def pin_memory(tensor):
global TOTAL_PINNED_MEMORY
if MAX_PINNED_MEMORY <= 0:
if PerformanceFeature.PinnedMem not in args.fast:
return False
if not is_nvidia():
@ -1104,21 +1092,13 @@ def pin_memory(tensor):
if not is_device_cpu(tensor.device):
return False
size = tensor.numel() * tensor.element_size()
if (TOTAL_PINNED_MEMORY + size) > MAX_PINNED_MEMORY:
return False
ptr = tensor.data_ptr()
if torch.cuda.cudart().cudaHostRegister(ptr, size, 1) == 0:
PINNED_MEMORY[ptr] = size
TOTAL_PINNED_MEMORY += size
if torch.cuda.cudart().cudaHostRegister(tensor.data_ptr(), tensor.numel() * tensor.element_size(), 1) == 0:
return True
return False
def unpin_memory(tensor):
global TOTAL_PINNED_MEMORY
if MAX_PINNED_MEMORY <= 0:
if PerformanceFeature.PinnedMem not in args.fast:
return False
if not is_nvidia():
@ -1127,11 +1107,7 @@ def unpin_memory(tensor):
if not is_device_cpu(tensor.device):
return False
ptr = tensor.data_ptr()
if torch.cuda.cudart().cudaHostUnregister(ptr) == 0:
TOTAL_PINNED_MEMORY -= PINNED_MEMORY.pop(ptr)
if len(PINNED_MEMORY) == 0:
TOTAL_PINNED_MEMORY = 0
if torch.cuda.cudart().cudaHostUnregister(tensor.data_ptr()) == 0:
return True
return False

View File

@ -399,8 +399,6 @@ class RAMPressureCache(LRUCache):
ram_usage = RAM_CACHE_DEFAULT_RAM_USAGE
def scan_list_for_ram_usage(outputs):
nonlocal ram_usage
if outputs is None:
return
for output in outputs:
if isinstance(output, list):
scan_list_for_ram_usage(output)

View File

@ -23,6 +23,13 @@ def validate_node_input(
if not received_type != input_type:
return True
# If input_type is a Combo, frontend permits a Combo output to be connected,
# but it is defined as a tuple of values with V3 schema.
# This probably should be dealt with sending one thing to the frontend and another to the backend,
# but this will do for now.
if input_type == "COMBO" and isinstance(received_type, tuple):
return True
# Not equal, and not strings
if not isinstance(received_type, str) or not isinstance(input_type, str):
return False

View File

@ -1,3 +1,3 @@
# This file is automatically generated by the build process when version is
# updated in pyproject.toml.
__version__ = "0.3.68"
__version__ = "0.3.67"

View File

@ -1,6 +1,6 @@
[project]
name = "ComfyUI"
version = "0.3.68"
version = "0.3.67"
readme = "README.md"
license = { file = "LICENSE" }
requires-python = ">=3.9"